repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
box/genty
|
genty/genty.py
|
_build_final_method_name
|
python
|
def _build_final_method_name(
method_name,
dataset_name,
dataprovider_name,
repeat_suffix,
):
# For tests using a dataprovider, append "_<dataprovider_name>" to
# the test method name
suffix = ''
if dataprovider_name:
suffix = '_{0}'.format(dataprovider_name)
if not dataset_name and not repeat_suffix:
return '{0}{1}'.format(method_name, suffix)
if dataset_name:
# Nosetest multi-processing code parses the full test name
# to discern package/module names. Thus any periods in the test-name
# causes that code to fail. So replace any periods with the unicode
# middle-dot character. Yes, this change is applied independent
# of the test runner being used... and that's fine since there is
# no real contract as to how the fabricated tests are named.
dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR)
# Place data_set info inside parens, as if it were a function call
suffix = '{0}({1})'.format(suffix, dataset_name or "")
if repeat_suffix:
suffix = '{0} {1}'.format(suffix, repeat_suffix)
test_method_name_for_dataset = "{0}{1}".format(
method_name,
suffix,
)
return test_method_name_for_dataset
|
Return a nice human friendly name, that almost looks like code.
Example: a test called 'test_something' with a dataset of (5, 'hello')
Return: "test_something(5, 'hello')"
Example: a test called 'test_other_stuff' with dataset of (9) and repeats
Return: "test_other_stuff(9) iteration_<X>"
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataprovider_name:
If there's a dataprovider involved, then this is its name.
:type dataprovider_name:
`unicode` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:return:
The fully composed name of the generated test method.
:rtype:
`unicode`
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty.py#L270-L335
| null |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import functools
from itertools import chain
import math
import re
import sys
import types
import six
from .genty_args import GentyArgs
from .private import encode_non_ascii_string
REPLACE_FOR_PERIOD_CHAR = '\xb7'
def genty(target_cls):
"""
This decorator takes the information provided by @genty_dataset,
@genty_dataprovider, and @genty_repeat and generates the corresponding
test methods.
:param target_cls:
Test class whose test methods have been decorated.
:type target_cls:
`class`
"""
tests = _expand_tests(target_cls)
tests_with_datasets = _expand_datasets(tests)
tests_with_datasets_and_repeats = _expand_repeats(tests_with_datasets)
_add_new_test_methods(target_cls, tests_with_datasets_and_repeats)
return target_cls
def _expand_tests(target_cls):
"""
Generator of all the test unbound functions in the given class.
:param target_cls:
Target test class.
:type target_cls:
`class`
:return:
Generator of all the test_methods in the given class yielding
tuples of method name and unbound function.
:rtype:
`generator` of `tuple` of (`unicode`, `function`)
"""
entries = dict(six.iteritems(target_cls.__dict__))
for key, value in six.iteritems(entries):
if key.startswith('test') and isinstance(value, types.FunctionType):
if not hasattr(value, 'genty_generated_test'):
yield key, value
def _expand_datasets(test_functions):
"""
Generator producing test_methods, with an optional dataset.
:param test_functions:
Iterator over tuples of test name and test unbound function.
:type test_functions:
`iterator` of `tuple` of (`unicode`, `function`)
:return:
Generator yielding a tuple of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:rtype:
`generator` of `tuple` of (
`unicode`,
`function`,
`unicode` or None,
`tuple` or None,
`function` or None,
)
"""
for name, func in test_functions:
dataset_tuples = chain(
[(None, getattr(func, 'genty_datasets', {}))],
getattr(func, 'genty_dataproviders', []),
)
no_datasets = True
for dataprovider, datasets in dataset_tuples:
for dataset_name, dataset in six.iteritems(datasets):
no_datasets = False
yield name, func, dataset_name, dataset, dataprovider
if no_datasets:
# yield the original test method, unaltered
yield name, func, None, None, None
def _expand_repeats(test_functions):
"""
Generator producing test_methods, with any repeat count unrolled.
:param test_functions:
Sequence of tuples of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:type test_functions:
`iterator` of `tuple` of
(`unicode`, `function`, `unicode` or None, `tuple` or None, `function`)
:return:
Generator yielding a tuple of
(method_name, unbound function, dataset, name dataset, repeat_suffix)
:rtype:
`generator` of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for name, func, dataset_name, dataset, dataprovider in test_functions:
repeat_count = getattr(func, 'genty_repeat_count', 0)
if repeat_count:
for i in range(1, repeat_count + 1):
repeat_suffix = _build_repeat_suffix(i, repeat_count)
yield (
name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
else:
yield name, func, dataset_name, dataset, dataprovider, None
def _add_new_test_methods(target_cls, tests_with_datasets_and_repeats):
"""Define the given tests in the given class.
:param target_cls:
Test class where to define the given test methods.
:type target_cls:
`class`
:param tests_with_datasets_and_repeats:
Sequence of tuples describing the new test to add to the class.
(method_name, unbound function, dataset name, dataset,
dataprovider, repeat_suffix)
:type tests_with_datasets_and_repeats:
Sequence of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for test_info in tests_with_datasets_and_repeats:
(
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
) = test_info
# Remove the original test_method as it's superseded by this
# generated method.
is_first_reference = _delete_original_test_method(
target_cls,
method_name,
)
# However, if that test_method is referenced by name in sys.argv
# Then take 1 of the generated methods (we take the first) and
# give that generated method the original name... so that the reference
# can find an actual test method.
if is_first_reference and _is_referenced_in_argv(method_name):
dataset_name = None
repeat_suffix = None
_add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
def _is_referenced_in_argv(method_name):
"""
Various test runners allow one to run a specific test like so:
python -m unittest -v <test_module>.<test_name>
Return True is the given method name is so referenced.
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:return:
Is the given method referenced by the command line.
:rtype:
`bool`
"""
expr = '.*[:.]{0}$'.format(method_name)
regex = re.compile(expr)
return any(regex.match(arg) for arg in sys.argv)
def _build_repeat_suffix(iteration, count):
"""
Return the suffix string to identify iteration X out of Y.
For example, with a count of 100, this will build strings like
"iteration_053" or "iteration_008".
:param iteration:
Current iteration.
:type iteration:
`int`
:param count:
Total number of iterations.
:type count:
`int`
:return:
Repeat suffix.
:rtype:
`unicode`
"""
format_width = int(math.ceil(math.log(count + 1, 10)))
new_suffix = 'iteration_{0:0{width}d}'.format(
iteration,
width=format_width
)
return new_suffix
def _delete_original_test_method(target_cls, name):
"""
Delete an original test method with the given name.
:param target_cls:
Target class.
:type target_cls:
`class`
:param name:
Name of the method to remove.
:type name:
`unicode`
:return:
True if the original method existed
:rtype:
`bool`
"""
attribute = getattr(target_cls, name, None)
if attribute and not getattr(attribute, 'genty_generated_test', None):
try:
delattr(target_cls, name)
except AttributeError:
pass
return True
else:
return False
def _build_dataset_method(method, dataset):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
test_method = lambda my_self: method(
my_self,
*dataset.args,
**dataset.kwargs
)
else:
test_method = lambda my_self: method(
my_self,
*dataset
)
return test_method
def _build_dataprovider_method(method, dataset, dataprovider):
"""
Return a fabricated method that calls the dataprovider with the given
dataset, and marshals the return value from that into params to the
underlying test 'method'.
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function.
:type dataprovider:
`callable`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
final_args = dataset.args
final_kwargs = dataset.kwargs
else:
final_args = dataset
final_kwargs = {}
def test_method_wrapper(my_self):
args = dataprovider(
my_self,
*final_args,
**final_kwargs
)
kwargs = {}
if isinstance(args, GentyArgs):
kwargs = args.kwargs
args = args.args
elif not isinstance(args, (tuple, list)):
args = (args, )
return method(my_self, *args, **kwargs)
return test_method_wrapper
def _build_test_method(method, dataset, dataprovider=None):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None
:type dataprovider:
`callable` or None
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if dataprovider:
test_method = _build_dataprovider_method(method, dataset, dataprovider)
elif dataset:
test_method = _build_dataset_method(method, dataset)
else:
test_method = method
return test_method
def _add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
):
"""
Add the described method to the given class.
:param target_cls:
Test class to which to add a method.
:type target_cls:
`class`
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param func:
The underlying test function to call.
:type func:
`callable`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataset:
Tuple containing the args of the dataset.
:type dataset:
`tuple` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None.
:type dataprovider:
`callable`
"""
# pylint: disable=too-many-arguments
test_method_name_for_dataset = _build_final_method_name(
method_name,
dataset_name,
dataprovider.__name__ if dataprovider else None,
repeat_suffix,
)
test_method_for_dataset = _build_test_method(func, dataset, dataprovider)
test_method_for_dataset = functools.update_wrapper(
test_method_for_dataset,
func,
)
test_method_name_for_dataset = encode_non_ascii_string(
test_method_name_for_dataset,
)
test_method_for_dataset.__name__ = test_method_name_for_dataset
test_method_for_dataset.genty_generated_test = True
# Add the method to the class under the proper name
setattr(target_cls, test_method_name_for_dataset, test_method_for_dataset)
|
box/genty
|
genty/genty.py
|
_build_dataset_method
|
python
|
def _build_dataset_method(method, dataset):
if isinstance(dataset, GentyArgs):
test_method = lambda my_self: method(
my_self,
*dataset.args,
**dataset.kwargs
)
else:
test_method = lambda my_self: method(
my_self,
*dataset
)
return test_method
|
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty.py#L338-L366
| null |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import functools
from itertools import chain
import math
import re
import sys
import types
import six
from .genty_args import GentyArgs
from .private import encode_non_ascii_string
REPLACE_FOR_PERIOD_CHAR = '\xb7'
def genty(target_cls):
"""
This decorator takes the information provided by @genty_dataset,
@genty_dataprovider, and @genty_repeat and generates the corresponding
test methods.
:param target_cls:
Test class whose test methods have been decorated.
:type target_cls:
`class`
"""
tests = _expand_tests(target_cls)
tests_with_datasets = _expand_datasets(tests)
tests_with_datasets_and_repeats = _expand_repeats(tests_with_datasets)
_add_new_test_methods(target_cls, tests_with_datasets_and_repeats)
return target_cls
def _expand_tests(target_cls):
"""
Generator of all the test unbound functions in the given class.
:param target_cls:
Target test class.
:type target_cls:
`class`
:return:
Generator of all the test_methods in the given class yielding
tuples of method name and unbound function.
:rtype:
`generator` of `tuple` of (`unicode`, `function`)
"""
entries = dict(six.iteritems(target_cls.__dict__))
for key, value in six.iteritems(entries):
if key.startswith('test') and isinstance(value, types.FunctionType):
if not hasattr(value, 'genty_generated_test'):
yield key, value
def _expand_datasets(test_functions):
"""
Generator producing test_methods, with an optional dataset.
:param test_functions:
Iterator over tuples of test name and test unbound function.
:type test_functions:
`iterator` of `tuple` of (`unicode`, `function`)
:return:
Generator yielding a tuple of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:rtype:
`generator` of `tuple` of (
`unicode`,
`function`,
`unicode` or None,
`tuple` or None,
`function` or None,
)
"""
for name, func in test_functions:
dataset_tuples = chain(
[(None, getattr(func, 'genty_datasets', {}))],
getattr(func, 'genty_dataproviders', []),
)
no_datasets = True
for dataprovider, datasets in dataset_tuples:
for dataset_name, dataset in six.iteritems(datasets):
no_datasets = False
yield name, func, dataset_name, dataset, dataprovider
if no_datasets:
# yield the original test method, unaltered
yield name, func, None, None, None
def _expand_repeats(test_functions):
"""
Generator producing test_methods, with any repeat count unrolled.
:param test_functions:
Sequence of tuples of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:type test_functions:
`iterator` of `tuple` of
(`unicode`, `function`, `unicode` or None, `tuple` or None, `function`)
:return:
Generator yielding a tuple of
(method_name, unbound function, dataset, name dataset, repeat_suffix)
:rtype:
`generator` of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for name, func, dataset_name, dataset, dataprovider in test_functions:
repeat_count = getattr(func, 'genty_repeat_count', 0)
if repeat_count:
for i in range(1, repeat_count + 1):
repeat_suffix = _build_repeat_suffix(i, repeat_count)
yield (
name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
else:
yield name, func, dataset_name, dataset, dataprovider, None
def _add_new_test_methods(target_cls, tests_with_datasets_and_repeats):
"""Define the given tests in the given class.
:param target_cls:
Test class where to define the given test methods.
:type target_cls:
`class`
:param tests_with_datasets_and_repeats:
Sequence of tuples describing the new test to add to the class.
(method_name, unbound function, dataset name, dataset,
dataprovider, repeat_suffix)
:type tests_with_datasets_and_repeats:
Sequence of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for test_info in tests_with_datasets_and_repeats:
(
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
) = test_info
# Remove the original test_method as it's superseded by this
# generated method.
is_first_reference = _delete_original_test_method(
target_cls,
method_name,
)
# However, if that test_method is referenced by name in sys.argv
# Then take 1 of the generated methods (we take the first) and
# give that generated method the original name... so that the reference
# can find an actual test method.
if is_first_reference and _is_referenced_in_argv(method_name):
dataset_name = None
repeat_suffix = None
_add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
def _is_referenced_in_argv(method_name):
"""
Various test runners allow one to run a specific test like so:
python -m unittest -v <test_module>.<test_name>
Return True is the given method name is so referenced.
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:return:
Is the given method referenced by the command line.
:rtype:
`bool`
"""
expr = '.*[:.]{0}$'.format(method_name)
regex = re.compile(expr)
return any(regex.match(arg) for arg in sys.argv)
def _build_repeat_suffix(iteration, count):
"""
Return the suffix string to identify iteration X out of Y.
For example, with a count of 100, this will build strings like
"iteration_053" or "iteration_008".
:param iteration:
Current iteration.
:type iteration:
`int`
:param count:
Total number of iterations.
:type count:
`int`
:return:
Repeat suffix.
:rtype:
`unicode`
"""
format_width = int(math.ceil(math.log(count + 1, 10)))
new_suffix = 'iteration_{0:0{width}d}'.format(
iteration,
width=format_width
)
return new_suffix
def _delete_original_test_method(target_cls, name):
"""
Delete an original test method with the given name.
:param target_cls:
Target class.
:type target_cls:
`class`
:param name:
Name of the method to remove.
:type name:
`unicode`
:return:
True if the original method existed
:rtype:
`bool`
"""
attribute = getattr(target_cls, name, None)
if attribute and not getattr(attribute, 'genty_generated_test', None):
try:
delattr(target_cls, name)
except AttributeError:
pass
return True
else:
return False
def _build_final_method_name(
method_name,
dataset_name,
dataprovider_name,
repeat_suffix,
):
"""
Return a nice human friendly name, that almost looks like code.
Example: a test called 'test_something' with a dataset of (5, 'hello')
Return: "test_something(5, 'hello')"
Example: a test called 'test_other_stuff' with dataset of (9) and repeats
Return: "test_other_stuff(9) iteration_<X>"
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataprovider_name:
If there's a dataprovider involved, then this is its name.
:type dataprovider_name:
`unicode` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:return:
The fully composed name of the generated test method.
:rtype:
`unicode`
"""
# For tests using a dataprovider, append "_<dataprovider_name>" to
# the test method name
suffix = ''
if dataprovider_name:
suffix = '_{0}'.format(dataprovider_name)
if not dataset_name and not repeat_suffix:
return '{0}{1}'.format(method_name, suffix)
if dataset_name:
# Nosetest multi-processing code parses the full test name
# to discern package/module names. Thus any periods in the test-name
# causes that code to fail. So replace any periods with the unicode
# middle-dot character. Yes, this change is applied independent
# of the test runner being used... and that's fine since there is
# no real contract as to how the fabricated tests are named.
dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR)
# Place data_set info inside parens, as if it were a function call
suffix = '{0}({1})'.format(suffix, dataset_name or "")
if repeat_suffix:
suffix = '{0} {1}'.format(suffix, repeat_suffix)
test_method_name_for_dataset = "{0}{1}".format(
method_name,
suffix,
)
return test_method_name_for_dataset
def _build_dataprovider_method(method, dataset, dataprovider):
"""
Return a fabricated method that calls the dataprovider with the given
dataset, and marshals the return value from that into params to the
underlying test 'method'.
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function.
:type dataprovider:
`callable`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
final_args = dataset.args
final_kwargs = dataset.kwargs
else:
final_args = dataset
final_kwargs = {}
def test_method_wrapper(my_self):
args = dataprovider(
my_self,
*final_args,
**final_kwargs
)
kwargs = {}
if isinstance(args, GentyArgs):
kwargs = args.kwargs
args = args.args
elif not isinstance(args, (tuple, list)):
args = (args, )
return method(my_self, *args, **kwargs)
return test_method_wrapper
def _build_test_method(method, dataset, dataprovider=None):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None
:type dataprovider:
`callable` or None
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if dataprovider:
test_method = _build_dataprovider_method(method, dataset, dataprovider)
elif dataset:
test_method = _build_dataset_method(method, dataset)
else:
test_method = method
return test_method
def _add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
):
"""
Add the described method to the given class.
:param target_cls:
Test class to which to add a method.
:type target_cls:
`class`
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param func:
The underlying test function to call.
:type func:
`callable`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataset:
Tuple containing the args of the dataset.
:type dataset:
`tuple` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None.
:type dataprovider:
`callable`
"""
# pylint: disable=too-many-arguments
test_method_name_for_dataset = _build_final_method_name(
method_name,
dataset_name,
dataprovider.__name__ if dataprovider else None,
repeat_suffix,
)
test_method_for_dataset = _build_test_method(func, dataset, dataprovider)
test_method_for_dataset = functools.update_wrapper(
test_method_for_dataset,
func,
)
test_method_name_for_dataset = encode_non_ascii_string(
test_method_name_for_dataset,
)
test_method_for_dataset.__name__ = test_method_name_for_dataset
test_method_for_dataset.genty_generated_test = True
# Add the method to the class under the proper name
setattr(target_cls, test_method_name_for_dataset, test_method_for_dataset)
|
box/genty
|
genty/genty.py
|
_build_dataprovider_method
|
python
|
def _build_dataprovider_method(method, dataset, dataprovider):
if isinstance(dataset, GentyArgs):
final_args = dataset.args
final_kwargs = dataset.kwargs
else:
final_args = dataset
final_kwargs = {}
def test_method_wrapper(my_self):
args = dataprovider(
my_self,
*final_args,
**final_kwargs
)
kwargs = {}
if isinstance(args, GentyArgs):
kwargs = args.kwargs
args = args.args
elif not isinstance(args, (tuple, list)):
args = (args, )
return method(my_self, *args, **kwargs)
return test_method_wrapper
|
Return a fabricated method that calls the dataprovider with the given
dataset, and marshals the return value from that into params to the
underlying test 'method'.
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function.
:type dataprovider:
`callable`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty.py#L369-L416
| null |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import functools
from itertools import chain
import math
import re
import sys
import types
import six
from .genty_args import GentyArgs
from .private import encode_non_ascii_string
REPLACE_FOR_PERIOD_CHAR = '\xb7'
def genty(target_cls):
"""
This decorator takes the information provided by @genty_dataset,
@genty_dataprovider, and @genty_repeat and generates the corresponding
test methods.
:param target_cls:
Test class whose test methods have been decorated.
:type target_cls:
`class`
"""
tests = _expand_tests(target_cls)
tests_with_datasets = _expand_datasets(tests)
tests_with_datasets_and_repeats = _expand_repeats(tests_with_datasets)
_add_new_test_methods(target_cls, tests_with_datasets_and_repeats)
return target_cls
def _expand_tests(target_cls):
"""
Generator of all the test unbound functions in the given class.
:param target_cls:
Target test class.
:type target_cls:
`class`
:return:
Generator of all the test_methods in the given class yielding
tuples of method name and unbound function.
:rtype:
`generator` of `tuple` of (`unicode`, `function`)
"""
entries = dict(six.iteritems(target_cls.__dict__))
for key, value in six.iteritems(entries):
if key.startswith('test') and isinstance(value, types.FunctionType):
if not hasattr(value, 'genty_generated_test'):
yield key, value
def _expand_datasets(test_functions):
"""
Generator producing test_methods, with an optional dataset.
:param test_functions:
Iterator over tuples of test name and test unbound function.
:type test_functions:
`iterator` of `tuple` of (`unicode`, `function`)
:return:
Generator yielding a tuple of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:rtype:
`generator` of `tuple` of (
`unicode`,
`function`,
`unicode` or None,
`tuple` or None,
`function` or None,
)
"""
for name, func in test_functions:
dataset_tuples = chain(
[(None, getattr(func, 'genty_datasets', {}))],
getattr(func, 'genty_dataproviders', []),
)
no_datasets = True
for dataprovider, datasets in dataset_tuples:
for dataset_name, dataset in six.iteritems(datasets):
no_datasets = False
yield name, func, dataset_name, dataset, dataprovider
if no_datasets:
# yield the original test method, unaltered
yield name, func, None, None, None
def _expand_repeats(test_functions):
"""
Generator producing test_methods, with any repeat count unrolled.
:param test_functions:
Sequence of tuples of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:type test_functions:
`iterator` of `tuple` of
(`unicode`, `function`, `unicode` or None, `tuple` or None, `function`)
:return:
Generator yielding a tuple of
(method_name, unbound function, dataset, name dataset, repeat_suffix)
:rtype:
`generator` of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for name, func, dataset_name, dataset, dataprovider in test_functions:
repeat_count = getattr(func, 'genty_repeat_count', 0)
if repeat_count:
for i in range(1, repeat_count + 1):
repeat_suffix = _build_repeat_suffix(i, repeat_count)
yield (
name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
else:
yield name, func, dataset_name, dataset, dataprovider, None
def _add_new_test_methods(target_cls, tests_with_datasets_and_repeats):
"""Define the given tests in the given class.
:param target_cls:
Test class where to define the given test methods.
:type target_cls:
`class`
:param tests_with_datasets_and_repeats:
Sequence of tuples describing the new test to add to the class.
(method_name, unbound function, dataset name, dataset,
dataprovider, repeat_suffix)
:type tests_with_datasets_and_repeats:
Sequence of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for test_info in tests_with_datasets_and_repeats:
(
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
) = test_info
# Remove the original test_method as it's superseded by this
# generated method.
is_first_reference = _delete_original_test_method(
target_cls,
method_name,
)
# However, if that test_method is referenced by name in sys.argv
# Then take 1 of the generated methods (we take the first) and
# give that generated method the original name... so that the reference
# can find an actual test method.
if is_first_reference and _is_referenced_in_argv(method_name):
dataset_name = None
repeat_suffix = None
_add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
def _is_referenced_in_argv(method_name):
"""
Various test runners allow one to run a specific test like so:
python -m unittest -v <test_module>.<test_name>
Return True is the given method name is so referenced.
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:return:
Is the given method referenced by the command line.
:rtype:
`bool`
"""
expr = '.*[:.]{0}$'.format(method_name)
regex = re.compile(expr)
return any(regex.match(arg) for arg in sys.argv)
def _build_repeat_suffix(iteration, count):
"""
Return the suffix string to identify iteration X out of Y.
For example, with a count of 100, this will build strings like
"iteration_053" or "iteration_008".
:param iteration:
Current iteration.
:type iteration:
`int`
:param count:
Total number of iterations.
:type count:
`int`
:return:
Repeat suffix.
:rtype:
`unicode`
"""
format_width = int(math.ceil(math.log(count + 1, 10)))
new_suffix = 'iteration_{0:0{width}d}'.format(
iteration,
width=format_width
)
return new_suffix
def _delete_original_test_method(target_cls, name):
"""
Delete an original test method with the given name.
:param target_cls:
Target class.
:type target_cls:
`class`
:param name:
Name of the method to remove.
:type name:
`unicode`
:return:
True if the original method existed
:rtype:
`bool`
"""
attribute = getattr(target_cls, name, None)
if attribute and not getattr(attribute, 'genty_generated_test', None):
try:
delattr(target_cls, name)
except AttributeError:
pass
return True
else:
return False
def _build_final_method_name(
method_name,
dataset_name,
dataprovider_name,
repeat_suffix,
):
"""
Return a nice human friendly name, that almost looks like code.
Example: a test called 'test_something' with a dataset of (5, 'hello')
Return: "test_something(5, 'hello')"
Example: a test called 'test_other_stuff' with dataset of (9) and repeats
Return: "test_other_stuff(9) iteration_<X>"
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataprovider_name:
If there's a dataprovider involved, then this is its name.
:type dataprovider_name:
`unicode` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:return:
The fully composed name of the generated test method.
:rtype:
`unicode`
"""
# For tests using a dataprovider, append "_<dataprovider_name>" to
# the test method name
suffix = ''
if dataprovider_name:
suffix = '_{0}'.format(dataprovider_name)
if not dataset_name and not repeat_suffix:
return '{0}{1}'.format(method_name, suffix)
if dataset_name:
# Nosetest multi-processing code parses the full test name
# to discern package/module names. Thus any periods in the test-name
# causes that code to fail. So replace any periods with the unicode
# middle-dot character. Yes, this change is applied independent
# of the test runner being used... and that's fine since there is
# no real contract as to how the fabricated tests are named.
dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR)
# Place data_set info inside parens, as if it were a function call
suffix = '{0}({1})'.format(suffix, dataset_name or "")
if repeat_suffix:
suffix = '{0} {1}'.format(suffix, repeat_suffix)
test_method_name_for_dataset = "{0}{1}".format(
method_name,
suffix,
)
return test_method_name_for_dataset
def _build_dataset_method(method, dataset):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
test_method = lambda my_self: method(
my_self,
*dataset.args,
**dataset.kwargs
)
else:
test_method = lambda my_self: method(
my_self,
*dataset
)
return test_method
def _build_test_method(method, dataset, dataprovider=None):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None
:type dataprovider:
`callable` or None
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if dataprovider:
test_method = _build_dataprovider_method(method, dataset, dataprovider)
elif dataset:
test_method = _build_dataset_method(method, dataset)
else:
test_method = method
return test_method
def _add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
):
"""
Add the described method to the given class.
:param target_cls:
Test class to which to add a method.
:type target_cls:
`class`
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param func:
The underlying test function to call.
:type func:
`callable`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataset:
Tuple containing the args of the dataset.
:type dataset:
`tuple` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None.
:type dataprovider:
`callable`
"""
# pylint: disable=too-many-arguments
test_method_name_for_dataset = _build_final_method_name(
method_name,
dataset_name,
dataprovider.__name__ if dataprovider else None,
repeat_suffix,
)
test_method_for_dataset = _build_test_method(func, dataset, dataprovider)
test_method_for_dataset = functools.update_wrapper(
test_method_for_dataset,
func,
)
test_method_name_for_dataset = encode_non_ascii_string(
test_method_name_for_dataset,
)
test_method_for_dataset.__name__ = test_method_name_for_dataset
test_method_for_dataset.genty_generated_test = True
# Add the method to the class under the proper name
setattr(target_cls, test_method_name_for_dataset, test_method_for_dataset)
|
box/genty
|
genty/genty.py
|
_add_method_to_class
|
python
|
def _add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
):
# pylint: disable=too-many-arguments
test_method_name_for_dataset = _build_final_method_name(
method_name,
dataset_name,
dataprovider.__name__ if dataprovider else None,
repeat_suffix,
)
test_method_for_dataset = _build_test_method(func, dataset, dataprovider)
test_method_for_dataset = functools.update_wrapper(
test_method_for_dataset,
func,
)
test_method_name_for_dataset = encode_non_ascii_string(
test_method_name_for_dataset,
)
test_method_for_dataset.__name__ = test_method_name_for_dataset
test_method_for_dataset.genty_generated_test = True
# Add the method to the class under the proper name
setattr(target_cls, test_method_name_for_dataset, test_method_for_dataset)
|
Add the described method to the given class.
:param target_cls:
Test class to which to add a method.
:type target_cls:
`class`
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param func:
The underlying test function to call.
:type func:
`callable`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataset:
Tuple containing the args of the dataset.
:type dataset:
`tuple` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None.
:type dataprovider:
`callable`
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty.py#L450-L514
| null |
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import functools
from itertools import chain
import math
import re
import sys
import types
import six
from .genty_args import GentyArgs
from .private import encode_non_ascii_string
REPLACE_FOR_PERIOD_CHAR = '\xb7'
def genty(target_cls):
"""
This decorator takes the information provided by @genty_dataset,
@genty_dataprovider, and @genty_repeat and generates the corresponding
test methods.
:param target_cls:
Test class whose test methods have been decorated.
:type target_cls:
`class`
"""
tests = _expand_tests(target_cls)
tests_with_datasets = _expand_datasets(tests)
tests_with_datasets_and_repeats = _expand_repeats(tests_with_datasets)
_add_new_test_methods(target_cls, tests_with_datasets_and_repeats)
return target_cls
def _expand_tests(target_cls):
"""
Generator of all the test unbound functions in the given class.
:param target_cls:
Target test class.
:type target_cls:
`class`
:return:
Generator of all the test_methods in the given class yielding
tuples of method name and unbound function.
:rtype:
`generator` of `tuple` of (`unicode`, `function`)
"""
entries = dict(six.iteritems(target_cls.__dict__))
for key, value in six.iteritems(entries):
if key.startswith('test') and isinstance(value, types.FunctionType):
if not hasattr(value, 'genty_generated_test'):
yield key, value
def _expand_datasets(test_functions):
"""
Generator producing test_methods, with an optional dataset.
:param test_functions:
Iterator over tuples of test name and test unbound function.
:type test_functions:
`iterator` of `tuple` of (`unicode`, `function`)
:return:
Generator yielding a tuple of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:rtype:
`generator` of `tuple` of (
`unicode`,
`function`,
`unicode` or None,
`tuple` or None,
`function` or None,
)
"""
for name, func in test_functions:
dataset_tuples = chain(
[(None, getattr(func, 'genty_datasets', {}))],
getattr(func, 'genty_dataproviders', []),
)
no_datasets = True
for dataprovider, datasets in dataset_tuples:
for dataset_name, dataset in six.iteritems(datasets):
no_datasets = False
yield name, func, dataset_name, dataset, dataprovider
if no_datasets:
# yield the original test method, unaltered
yield name, func, None, None, None
def _expand_repeats(test_functions):
"""
Generator producing test_methods, with any repeat count unrolled.
:param test_functions:
Sequence of tuples of
- method_name : Name of the test method
- unbound function : Unbound function that will be the test method.
- dataset name : String representation of the given dataset
- dataset : Tuple representing the args for a test
- param factory : Function that returns params for the test method
:type test_functions:
`iterator` of `tuple` of
(`unicode`, `function`, `unicode` or None, `tuple` or None, `function`)
:return:
Generator yielding a tuple of
(method_name, unbound function, dataset, name dataset, repeat_suffix)
:rtype:
`generator` of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for name, func, dataset_name, dataset, dataprovider in test_functions:
repeat_count = getattr(func, 'genty_repeat_count', 0)
if repeat_count:
for i in range(1, repeat_count + 1):
repeat_suffix = _build_repeat_suffix(i, repeat_count)
yield (
name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
else:
yield name, func, dataset_name, dataset, dataprovider, None
def _add_new_test_methods(target_cls, tests_with_datasets_and_repeats):
"""Define the given tests in the given class.
:param target_cls:
Test class where to define the given test methods.
:type target_cls:
`class`
:param tests_with_datasets_and_repeats:
Sequence of tuples describing the new test to add to the class.
(method_name, unbound function, dataset name, dataset,
dataprovider, repeat_suffix)
:type tests_with_datasets_and_repeats:
Sequence of `tuple` of (`unicode`, `function`,
`unicode` or None, `tuple` or None, `function`, `unicode`)
"""
for test_info in tests_with_datasets_and_repeats:
(
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
) = test_info
# Remove the original test_method as it's superseded by this
# generated method.
is_first_reference = _delete_original_test_method(
target_cls,
method_name,
)
# However, if that test_method is referenced by name in sys.argv
# Then take 1 of the generated methods (we take the first) and
# give that generated method the original name... so that the reference
# can find an actual test method.
if is_first_reference and _is_referenced_in_argv(method_name):
dataset_name = None
repeat_suffix = None
_add_method_to_class(
target_cls,
method_name,
func,
dataset_name,
dataset,
dataprovider,
repeat_suffix,
)
def _is_referenced_in_argv(method_name):
"""
Various test runners allow one to run a specific test like so:
python -m unittest -v <test_module>.<test_name>
Return True is the given method name is so referenced.
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:return:
Is the given method referenced by the command line.
:rtype:
`bool`
"""
expr = '.*[:.]{0}$'.format(method_name)
regex = re.compile(expr)
return any(regex.match(arg) for arg in sys.argv)
def _build_repeat_suffix(iteration, count):
"""
Return the suffix string to identify iteration X out of Y.
For example, with a count of 100, this will build strings like
"iteration_053" or "iteration_008".
:param iteration:
Current iteration.
:type iteration:
`int`
:param count:
Total number of iterations.
:type count:
`int`
:return:
Repeat suffix.
:rtype:
`unicode`
"""
format_width = int(math.ceil(math.log(count + 1, 10)))
new_suffix = 'iteration_{0:0{width}d}'.format(
iteration,
width=format_width
)
return new_suffix
def _delete_original_test_method(target_cls, name):
"""
Delete an original test method with the given name.
:param target_cls:
Target class.
:type target_cls:
`class`
:param name:
Name of the method to remove.
:type name:
`unicode`
:return:
True if the original method existed
:rtype:
`bool`
"""
attribute = getattr(target_cls, name, None)
if attribute and not getattr(attribute, 'genty_generated_test', None):
try:
delattr(target_cls, name)
except AttributeError:
pass
return True
else:
return False
def _build_final_method_name(
method_name,
dataset_name,
dataprovider_name,
repeat_suffix,
):
"""
Return a nice human friendly name, that almost looks like code.
Example: a test called 'test_something' with a dataset of (5, 'hello')
Return: "test_something(5, 'hello')"
Example: a test called 'test_other_stuff' with dataset of (9) and repeats
Return: "test_other_stuff(9) iteration_<X>"
:param method_name:
Base name of the method to add.
:type method_name:
`unicode`
:param dataset_name:
Base name of the data set.
:type dataset_name:
`unicode` or None
:param dataprovider_name:
If there's a dataprovider involved, then this is its name.
:type dataprovider_name:
`unicode` or None
:param repeat_suffix:
Suffix to append to the name of the generated method.
:type repeat_suffix:
`unicode` or None
:return:
The fully composed name of the generated test method.
:rtype:
`unicode`
"""
# For tests using a dataprovider, append "_<dataprovider_name>" to
# the test method name
suffix = ''
if dataprovider_name:
suffix = '_{0}'.format(dataprovider_name)
if not dataset_name and not repeat_suffix:
return '{0}{1}'.format(method_name, suffix)
if dataset_name:
# Nosetest multi-processing code parses the full test name
# to discern package/module names. Thus any periods in the test-name
# causes that code to fail. So replace any periods with the unicode
# middle-dot character. Yes, this change is applied independent
# of the test runner being used... and that's fine since there is
# no real contract as to how the fabricated tests are named.
dataset_name = dataset_name.replace('.', REPLACE_FOR_PERIOD_CHAR)
# Place data_set info inside parens, as if it were a function call
suffix = '{0}({1})'.format(suffix, dataset_name or "")
if repeat_suffix:
suffix = '{0} {1}'.format(suffix, repeat_suffix)
test_method_name_for_dataset = "{0}{1}".format(
method_name,
suffix,
)
return test_method_name_for_dataset
def _build_dataset_method(method, dataset):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
test_method = lambda my_self: method(
my_self,
*dataset.args,
**dataset.kwargs
)
else:
test_method = lambda my_self: method(
my_self,
*dataset
)
return test_method
def _build_dataprovider_method(method, dataset, dataprovider):
"""
Return a fabricated method that calls the dataprovider with the given
dataset, and marshals the return value from that into params to the
underlying test 'method'.
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs`
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function.
:type dataprovider:
`callable`
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if isinstance(dataset, GentyArgs):
final_args = dataset.args
final_kwargs = dataset.kwargs
else:
final_args = dataset
final_kwargs = {}
def test_method_wrapper(my_self):
args = dataprovider(
my_self,
*final_args,
**final_kwargs
)
kwargs = {}
if isinstance(args, GentyArgs):
kwargs = args.kwargs
args = args.args
elif not isinstance(args, (tuple, list)):
args = (args, )
return method(my_self, *args, **kwargs)
return test_method_wrapper
def _build_test_method(method, dataset, dataprovider=None):
"""
Return a fabricated method that marshals the dataset into parameters
for given 'method'
:param method:
The underlying test method.
:type method:
`callable`
:param dataset:
Tuple or GentyArgs instance containing the args of the dataset.
:type dataset:
`tuple` or :class:`GentyArgs` or None
:param dataprovider:
The unbound function that's responsible for generating the actual
params that will be passed to the test function. Can be None
:type dataprovider:
`callable` or None
:return:
Return an unbound function that will become a test method
:rtype:
`function`
"""
if dataprovider:
test_method = _build_dataprovider_method(method, dataset, dataprovider)
elif dataset:
test_method = _build_dataset_method(method, dataset)
else:
test_method = method
return test_method
|
box/genty
|
genty/private/__init__.py
|
format_kwarg
|
python
|
def format_kwarg(key, value):
translator = repr if isinstance(value, six.string_types) else six.text_type
arg_value = translator(value)
return '{0}={1}'.format(key, arg_value)
|
Return a string of form: "key=<value>"
If 'value' is a string, we want it quoted. The goal is to make
the string a named parameter in a method call.
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/private/__init__.py#L7-L17
| null |
# coding: utf-8
from __future__ import unicode_literals
import six
def format_arg(value):
"""
:param value:
Some value in a dataset.
:type value:
varies
:return:
unicode representation of that value
:rtype:
`unicode`
"""
translator = repr if isinstance(value, six.string_types) else six.text_type
return translator(value)
def encode_non_ascii_string(string):
"""
:param string:
The string to be encoded
:type string:
unicode or str
:return:
The encoded string
:rtype:
str
"""
encoded_string = string.encode('utf-8', 'replace')
if six.PY3:
encoded_string = encoded_string.decode()
return encoded_string
|
box/genty
|
genty/private/__init__.py
|
format_arg
|
python
|
def format_arg(value):
translator = repr if isinstance(value, six.string_types) else six.text_type
return translator(value)
|
:param value:
Some value in a dataset.
:type value:
varies
:return:
unicode representation of that value
:rtype:
`unicode`
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/private/__init__.py#L20-L32
| null |
# coding: utf-8
from __future__ import unicode_literals
import six
def format_kwarg(key, value):
"""
Return a string of form: "key=<value>"
If 'value' is a string, we want it quoted. The goal is to make
the string a named parameter in a method call.
"""
translator = repr if isinstance(value, six.string_types) else six.text_type
arg_value = translator(value)
return '{0}={1}'.format(key, arg_value)
def encode_non_ascii_string(string):
"""
:param string:
The string to be encoded
:type string:
unicode or str
:return:
The encoded string
:rtype:
str
"""
encoded_string = string.encode('utf-8', 'replace')
if six.PY3:
encoded_string = encoded_string.decode()
return encoded_string
|
box/genty
|
genty/private/__init__.py
|
encode_non_ascii_string
|
python
|
def encode_non_ascii_string(string):
encoded_string = string.encode('utf-8', 'replace')
if six.PY3:
encoded_string = encoded_string.decode()
return encoded_string
|
:param string:
The string to be encoded
:type string:
unicode or str
:return:
The encoded string
:rtype:
str
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/private/__init__.py#L35-L50
| null |
# coding: utf-8
from __future__ import unicode_literals
import six
def format_kwarg(key, value):
"""
Return a string of form: "key=<value>"
If 'value' is a string, we want it quoted. The goal is to make
the string a named parameter in a method call.
"""
translator = repr if isinstance(value, six.string_types) else six.text_type
arg_value = translator(value)
return '{0}={1}'.format(key, arg_value)
def format_arg(value):
"""
:param value:
Some value in a dataset.
:type value:
varies
:return:
unicode representation of that value
:rtype:
`unicode`
"""
translator = repr if isinstance(value, six.string_types) else six.text_type
return translator(value)
|
box/genty
|
genty/genty_dataset.py
|
genty_dataprovider
|
python
|
def genty_dataprovider(builder_function):
datasets = getattr(builder_function, 'genty_datasets', {None: ()})
def wrap(test_method):
# Save the data providers in the test method. This data will be
# consumed by the @genty decorator.
if not hasattr(test_method, 'genty_dataproviders'):
test_method.genty_dataproviders = []
test_method.genty_dataproviders.append(
(builder_function, datasets),
)
return test_method
return wrap
|
Decorator defining that this test gets parameters from the given
build_function.
:param builder_function:
A callable that returns parameters that will be passed to the method
decorated by this decorator.
If the builder_function returns a tuple or list, then that will be
passed as *args to the decorated method.
If the builder_function returns a :class:`GentyArgs`, then that will
be used to pass *args and **kwargs to the decorated method.
Any other return value will be treated as a single parameter, and
passed as such to the decorated method.
:type builder_function:
`callable`
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty_dataset.py#L15-L47
| null |
# coding: utf-8
from __future__ import unicode_literals
try:
from collections import OrderedDict
except ImportError:
# pylint:disable=import-error
from ordereddict import OrderedDict
# pylint:enable=import-error
import six
from .genty_args import GentyArgs
from .private import format_arg
def genty_dataset(*args, **kwargs):
"""Decorator defining data sets to provide to a test.
Inspired by http://sebastian-bergmann.de/archives/
702-Data-Providers-in-PHPUnit-3.2.html
The canonical way to call @genty_dataset, with each argument each
representing a data set to be injected in the test method call:
@genty_dataset(
('a1', 'b1'),
('a2', 'b2'),
)
def test_some_function(a, b)
...
If the test function takes only one parameter, you can replace the tuples
by a single value. So instead of the more verbose:
@genty_dataset(
('c1',),
('c2',),
)
def test_some_other_function(c)
...
One can write:
@genty_dataset('c1', 'c2')
def test_some_other_function(c)
...
For each set of arguments, a suffix identifying that argument set is
built by concatenating the string representation of the arguments
together. You can control the test names for each data set by passing
the data sets as keyword args, where the keyword is the desired suffix.
For example:
@genty_dataset(
('a1', 'b1),
)
def test_function(a, b)
...
produces a test named 'test_function_for_a1_and_b1', while
@genty_dataset(
happy_path=('a1', 'b1'),
)
def test_function(a, b)
...
produces a test named test_function_for_happy_path. These are just
parameters to a method call, so one can have unnamed args first
followed by keyword args
@genty_dataset(
('x', 'y'),
('p', 'q'),
Monday=('a1', 'b1'),
Tuesday=('t1', 't2'),
)
def test_function(a, b)
...
Finally, datasets can be chained. Useful for example if there are
distinct sets of params that make sense (cleaner, more readable, or
semantically nicer) if kept separate. A fabricated example:
@genty_dataset(
*([i for i in range(10)] + [(i, i) for i in range(10)])
)
def test_some_other_function(param1, param2=None)
...
-- vs --
@genty_dataset(*[i for i in range(10)])
@genty_dataset(*[(i, i) for i in range(10)])
def test_some_other_function(param1, param2=None)
...
If the names of datasets conflict across chained genty_datasets, the
key&value pair from the outer (first) decorator will override the
data from the inner.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
datasets = _build_datasets(*args, **kwargs)
def wrap(test_method):
# Save the datasets in the test method. This data will be consumed
# by the @genty decorator.
if not hasattr(test_method, 'genty_datasets'):
test_method.genty_datasets = OrderedDict()
test_method.genty_datasets.update(datasets)
return test_method
return wrap
def _build_datasets(*args, **kwargs):
"""Build the datasets into a dict, where the keys are the name of the
data set and the values are the data sets themselves.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
:return:
The dataset dict.
:rtype:
`dict`
"""
datasets = OrderedDict()
_add_arg_datasets(datasets, args)
_add_kwarg_datasets(datasets, kwargs)
return datasets
def _add_arg_datasets(datasets, args):
"""Add data sets of the given args.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
"""
for dataset in args:
# turn a value into a 1-tuple.
if not isinstance(dataset, (tuple, GentyArgs)):
dataset = (dataset,)
# Create a test_name_suffix - basically the parameter list
if isinstance(dataset, GentyArgs):
dataset_strings = dataset # GentyArgs supports iteration
else:
dataset_strings = [format_arg(data) for data in dataset]
test_method_suffix = ", ".join(dataset_strings)
datasets[test_method_suffix] = dataset
def _add_kwarg_datasets(datasets, kwargs):
"""Add data sets of the given kwargs.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
for test_method_suffix, dataset in six.iteritems(kwargs):
datasets[test_method_suffix] = dataset
|
box/genty
|
genty/genty_dataset.py
|
genty_dataset
|
python
|
def genty_dataset(*args, **kwargs):
datasets = _build_datasets(*args, **kwargs)
def wrap(test_method):
# Save the datasets in the test method. This data will be consumed
# by the @genty decorator.
if not hasattr(test_method, 'genty_datasets'):
test_method.genty_datasets = OrderedDict()
test_method.genty_datasets.update(datasets)
return test_method
return wrap
|
Decorator defining data sets to provide to a test.
Inspired by http://sebastian-bergmann.de/archives/
702-Data-Providers-in-PHPUnit-3.2.html
The canonical way to call @genty_dataset, with each argument each
representing a data set to be injected in the test method call:
@genty_dataset(
('a1', 'b1'),
('a2', 'b2'),
)
def test_some_function(a, b)
...
If the test function takes only one parameter, you can replace the tuples
by a single value. So instead of the more verbose:
@genty_dataset(
('c1',),
('c2',),
)
def test_some_other_function(c)
...
One can write:
@genty_dataset('c1', 'c2')
def test_some_other_function(c)
...
For each set of arguments, a suffix identifying that argument set is
built by concatenating the string representation of the arguments
together. You can control the test names for each data set by passing
the data sets as keyword args, where the keyword is the desired suffix.
For example:
@genty_dataset(
('a1', 'b1),
)
def test_function(a, b)
...
produces a test named 'test_function_for_a1_and_b1', while
@genty_dataset(
happy_path=('a1', 'b1'),
)
def test_function(a, b)
...
produces a test named test_function_for_happy_path. These are just
parameters to a method call, so one can have unnamed args first
followed by keyword args
@genty_dataset(
('x', 'y'),
('p', 'q'),
Monday=('a1', 'b1'),
Tuesday=('t1', 't2'),
)
def test_function(a, b)
...
Finally, datasets can be chained. Useful for example if there are
distinct sets of params that make sense (cleaner, more readable, or
semantically nicer) if kept separate. A fabricated example:
@genty_dataset(
*([i for i in range(10)] + [(i, i) for i in range(10)])
)
def test_some_other_function(param1, param2=None)
...
-- vs --
@genty_dataset(*[i for i in range(10)])
@genty_dataset(*[(i, i) for i in range(10)])
def test_some_other_function(param1, param2=None)
...
If the names of datasets conflict across chained genty_datasets, the
key&value pair from the outer (first) decorator will override the
data from the inner.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty_dataset.py#L50-L148
|
[
"def _build_datasets(*args, **kwargs):\n \"\"\"Build the datasets into a dict, where the keys are the name of the\n data set and the values are the data sets themselves.\n\n :param args:\n Tuple of unnamed data sets.\n :type args:\n `tuple` of varies\n :param kwargs:\n Dict of pre-named data sets.\n :type kwargs:\n `dict` of `unicode` to varies\n :return:\n The dataset dict.\n :rtype:\n `dict`\n \"\"\"\n datasets = OrderedDict()\n _add_arg_datasets(datasets, args)\n _add_kwarg_datasets(datasets, kwargs)\n return datasets\n"
] |
# coding: utf-8
from __future__ import unicode_literals
try:
from collections import OrderedDict
except ImportError:
# pylint:disable=import-error
from ordereddict import OrderedDict
# pylint:enable=import-error
import six
from .genty_args import GentyArgs
from .private import format_arg
def genty_dataprovider(builder_function):
"""Decorator defining that this test gets parameters from the given
build_function.
:param builder_function:
A callable that returns parameters that will be passed to the method
decorated by this decorator.
If the builder_function returns a tuple or list, then that will be
passed as *args to the decorated method.
If the builder_function returns a :class:`GentyArgs`, then that will
be used to pass *args and **kwargs to the decorated method.
Any other return value will be treated as a single parameter, and
passed as such to the decorated method.
:type builder_function:
`callable`
"""
datasets = getattr(builder_function, 'genty_datasets', {None: ()})
def wrap(test_method):
# Save the data providers in the test method. This data will be
# consumed by the @genty decorator.
if not hasattr(test_method, 'genty_dataproviders'):
test_method.genty_dataproviders = []
test_method.genty_dataproviders.append(
(builder_function, datasets),
)
return test_method
return wrap
def _build_datasets(*args, **kwargs):
"""Build the datasets into a dict, where the keys are the name of the
data set and the values are the data sets themselves.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
:return:
The dataset dict.
:rtype:
`dict`
"""
datasets = OrderedDict()
_add_arg_datasets(datasets, args)
_add_kwarg_datasets(datasets, kwargs)
return datasets
def _add_arg_datasets(datasets, args):
"""Add data sets of the given args.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
"""
for dataset in args:
# turn a value into a 1-tuple.
if not isinstance(dataset, (tuple, GentyArgs)):
dataset = (dataset,)
# Create a test_name_suffix - basically the parameter list
if isinstance(dataset, GentyArgs):
dataset_strings = dataset # GentyArgs supports iteration
else:
dataset_strings = [format_arg(data) for data in dataset]
test_method_suffix = ", ".join(dataset_strings)
datasets[test_method_suffix] = dataset
def _add_kwarg_datasets(datasets, kwargs):
"""Add data sets of the given kwargs.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
for test_method_suffix, dataset in six.iteritems(kwargs):
datasets[test_method_suffix] = dataset
|
box/genty
|
genty/genty_dataset.py
|
_build_datasets
|
python
|
def _build_datasets(*args, **kwargs):
datasets = OrderedDict()
_add_arg_datasets(datasets, args)
_add_kwarg_datasets(datasets, kwargs)
return datasets
|
Build the datasets into a dict, where the keys are the name of the
data set and the values are the data sets themselves.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
:return:
The dataset dict.
:rtype:
`dict`
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty_dataset.py#L151-L171
|
[
"def _add_arg_datasets(datasets, args):\n \"\"\"Add data sets of the given args.\n\n :param datasets:\n The dict where to accumulate data sets.\n :type datasets:\n `dict`\n :param args:\n Tuple of unnamed data sets.\n :type args:\n `tuple` of varies\n \"\"\"\n for dataset in args:\n # turn a value into a 1-tuple.\n if not isinstance(dataset, (tuple, GentyArgs)):\n dataset = (dataset,)\n\n # Create a test_name_suffix - basically the parameter list\n if isinstance(dataset, GentyArgs):\n dataset_strings = dataset # GentyArgs supports iteration\n else:\n dataset_strings = [format_arg(data) for data in dataset]\n test_method_suffix = \", \".join(dataset_strings)\n\n datasets[test_method_suffix] = dataset\n",
"def _add_kwarg_datasets(datasets, kwargs):\n \"\"\"Add data sets of the given kwargs.\n\n :param datasets:\n The dict where to accumulate data sets.\n :type datasets:\n `dict`\n :param kwargs:\n Dict of pre-named data sets.\n :type kwargs:\n `dict` of `unicode` to varies\n \"\"\"\n for test_method_suffix, dataset in six.iteritems(kwargs):\n datasets[test_method_suffix] = dataset\n"
] |
# coding: utf-8
from __future__ import unicode_literals
try:
from collections import OrderedDict
except ImportError:
# pylint:disable=import-error
from ordereddict import OrderedDict
# pylint:enable=import-error
import six
from .genty_args import GentyArgs
from .private import format_arg
def genty_dataprovider(builder_function):
"""Decorator defining that this test gets parameters from the given
build_function.
:param builder_function:
A callable that returns parameters that will be passed to the method
decorated by this decorator.
If the builder_function returns a tuple or list, then that will be
passed as *args to the decorated method.
If the builder_function returns a :class:`GentyArgs`, then that will
be used to pass *args and **kwargs to the decorated method.
Any other return value will be treated as a single parameter, and
passed as such to the decorated method.
:type builder_function:
`callable`
"""
datasets = getattr(builder_function, 'genty_datasets', {None: ()})
def wrap(test_method):
# Save the data providers in the test method. This data will be
# consumed by the @genty decorator.
if not hasattr(test_method, 'genty_dataproviders'):
test_method.genty_dataproviders = []
test_method.genty_dataproviders.append(
(builder_function, datasets),
)
return test_method
return wrap
def genty_dataset(*args, **kwargs):
"""Decorator defining data sets to provide to a test.
Inspired by http://sebastian-bergmann.de/archives/
702-Data-Providers-in-PHPUnit-3.2.html
The canonical way to call @genty_dataset, with each argument each
representing a data set to be injected in the test method call:
@genty_dataset(
('a1', 'b1'),
('a2', 'b2'),
)
def test_some_function(a, b)
...
If the test function takes only one parameter, you can replace the tuples
by a single value. So instead of the more verbose:
@genty_dataset(
('c1',),
('c2',),
)
def test_some_other_function(c)
...
One can write:
@genty_dataset('c1', 'c2')
def test_some_other_function(c)
...
For each set of arguments, a suffix identifying that argument set is
built by concatenating the string representation of the arguments
together. You can control the test names for each data set by passing
the data sets as keyword args, where the keyword is the desired suffix.
For example:
@genty_dataset(
('a1', 'b1),
)
def test_function(a, b)
...
produces a test named 'test_function_for_a1_and_b1', while
@genty_dataset(
happy_path=('a1', 'b1'),
)
def test_function(a, b)
...
produces a test named test_function_for_happy_path. These are just
parameters to a method call, so one can have unnamed args first
followed by keyword args
@genty_dataset(
('x', 'y'),
('p', 'q'),
Monday=('a1', 'b1'),
Tuesday=('t1', 't2'),
)
def test_function(a, b)
...
Finally, datasets can be chained. Useful for example if there are
distinct sets of params that make sense (cleaner, more readable, or
semantically nicer) if kept separate. A fabricated example:
@genty_dataset(
*([i for i in range(10)] + [(i, i) for i in range(10)])
)
def test_some_other_function(param1, param2=None)
...
-- vs --
@genty_dataset(*[i for i in range(10)])
@genty_dataset(*[(i, i) for i in range(10)])
def test_some_other_function(param1, param2=None)
...
If the names of datasets conflict across chained genty_datasets, the
key&value pair from the outer (first) decorator will override the
data from the inner.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
datasets = _build_datasets(*args, **kwargs)
def wrap(test_method):
# Save the datasets in the test method. This data will be consumed
# by the @genty decorator.
if not hasattr(test_method, 'genty_datasets'):
test_method.genty_datasets = OrderedDict()
test_method.genty_datasets.update(datasets)
return test_method
return wrap
def _add_arg_datasets(datasets, args):
"""Add data sets of the given args.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
"""
for dataset in args:
# turn a value into a 1-tuple.
if not isinstance(dataset, (tuple, GentyArgs)):
dataset = (dataset,)
# Create a test_name_suffix - basically the parameter list
if isinstance(dataset, GentyArgs):
dataset_strings = dataset # GentyArgs supports iteration
else:
dataset_strings = [format_arg(data) for data in dataset]
test_method_suffix = ", ".join(dataset_strings)
datasets[test_method_suffix] = dataset
def _add_kwarg_datasets(datasets, kwargs):
"""Add data sets of the given kwargs.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
for test_method_suffix, dataset in six.iteritems(kwargs):
datasets[test_method_suffix] = dataset
|
box/genty
|
genty/genty_dataset.py
|
_add_arg_datasets
|
python
|
def _add_arg_datasets(datasets, args):
for dataset in args:
# turn a value into a 1-tuple.
if not isinstance(dataset, (tuple, GentyArgs)):
dataset = (dataset,)
# Create a test_name_suffix - basically the parameter list
if isinstance(dataset, GentyArgs):
dataset_strings = dataset # GentyArgs supports iteration
else:
dataset_strings = [format_arg(data) for data in dataset]
test_method_suffix = ", ".join(dataset_strings)
datasets[test_method_suffix] = dataset
|
Add data sets of the given args.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty_dataset.py#L174-L198
| null |
# coding: utf-8
from __future__ import unicode_literals
try:
from collections import OrderedDict
except ImportError:
# pylint:disable=import-error
from ordereddict import OrderedDict
# pylint:enable=import-error
import six
from .genty_args import GentyArgs
from .private import format_arg
def genty_dataprovider(builder_function):
"""Decorator defining that this test gets parameters from the given
build_function.
:param builder_function:
A callable that returns parameters that will be passed to the method
decorated by this decorator.
If the builder_function returns a tuple or list, then that will be
passed as *args to the decorated method.
If the builder_function returns a :class:`GentyArgs`, then that will
be used to pass *args and **kwargs to the decorated method.
Any other return value will be treated as a single parameter, and
passed as such to the decorated method.
:type builder_function:
`callable`
"""
datasets = getattr(builder_function, 'genty_datasets', {None: ()})
def wrap(test_method):
# Save the data providers in the test method. This data will be
# consumed by the @genty decorator.
if not hasattr(test_method, 'genty_dataproviders'):
test_method.genty_dataproviders = []
test_method.genty_dataproviders.append(
(builder_function, datasets),
)
return test_method
return wrap
def genty_dataset(*args, **kwargs):
"""Decorator defining data sets to provide to a test.
Inspired by http://sebastian-bergmann.de/archives/
702-Data-Providers-in-PHPUnit-3.2.html
The canonical way to call @genty_dataset, with each argument each
representing a data set to be injected in the test method call:
@genty_dataset(
('a1', 'b1'),
('a2', 'b2'),
)
def test_some_function(a, b)
...
If the test function takes only one parameter, you can replace the tuples
by a single value. So instead of the more verbose:
@genty_dataset(
('c1',),
('c2',),
)
def test_some_other_function(c)
...
One can write:
@genty_dataset('c1', 'c2')
def test_some_other_function(c)
...
For each set of arguments, a suffix identifying that argument set is
built by concatenating the string representation of the arguments
together. You can control the test names for each data set by passing
the data sets as keyword args, where the keyword is the desired suffix.
For example:
@genty_dataset(
('a1', 'b1),
)
def test_function(a, b)
...
produces a test named 'test_function_for_a1_and_b1', while
@genty_dataset(
happy_path=('a1', 'b1'),
)
def test_function(a, b)
...
produces a test named test_function_for_happy_path. These are just
parameters to a method call, so one can have unnamed args first
followed by keyword args
@genty_dataset(
('x', 'y'),
('p', 'q'),
Monday=('a1', 'b1'),
Tuesday=('t1', 't2'),
)
def test_function(a, b)
...
Finally, datasets can be chained. Useful for example if there are
distinct sets of params that make sense (cleaner, more readable, or
semantically nicer) if kept separate. A fabricated example:
@genty_dataset(
*([i for i in range(10)] + [(i, i) for i in range(10)])
)
def test_some_other_function(param1, param2=None)
...
-- vs --
@genty_dataset(*[i for i in range(10)])
@genty_dataset(*[(i, i) for i in range(10)])
def test_some_other_function(param1, param2=None)
...
If the names of datasets conflict across chained genty_datasets, the
key&value pair from the outer (first) decorator will override the
data from the inner.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
datasets = _build_datasets(*args, **kwargs)
def wrap(test_method):
# Save the datasets in the test method. This data will be consumed
# by the @genty decorator.
if not hasattr(test_method, 'genty_datasets'):
test_method.genty_datasets = OrderedDict()
test_method.genty_datasets.update(datasets)
return test_method
return wrap
def _build_datasets(*args, **kwargs):
"""Build the datasets into a dict, where the keys are the name of the
data set and the values are the data sets themselves.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
:return:
The dataset dict.
:rtype:
`dict`
"""
datasets = OrderedDict()
_add_arg_datasets(datasets, args)
_add_kwarg_datasets(datasets, kwargs)
return datasets
def _add_kwarg_datasets(datasets, kwargs):
"""Add data sets of the given kwargs.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
for test_method_suffix, dataset in six.iteritems(kwargs):
datasets[test_method_suffix] = dataset
|
box/genty
|
genty/genty_dataset.py
|
_add_kwarg_datasets
|
python
|
def _add_kwarg_datasets(datasets, kwargs):
for test_method_suffix, dataset in six.iteritems(kwargs):
datasets[test_method_suffix] = dataset
|
Add data sets of the given kwargs.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
|
train
|
https://github.com/box/genty/blob/85f7c960a2b67cf9e58e0d9e677e4a0bc4f05081/genty/genty_dataset.py#L201-L214
| null |
# coding: utf-8
from __future__ import unicode_literals
try:
from collections import OrderedDict
except ImportError:
# pylint:disable=import-error
from ordereddict import OrderedDict
# pylint:enable=import-error
import six
from .genty_args import GentyArgs
from .private import format_arg
def genty_dataprovider(builder_function):
"""Decorator defining that this test gets parameters from the given
build_function.
:param builder_function:
A callable that returns parameters that will be passed to the method
decorated by this decorator.
If the builder_function returns a tuple or list, then that will be
passed as *args to the decorated method.
If the builder_function returns a :class:`GentyArgs`, then that will
be used to pass *args and **kwargs to the decorated method.
Any other return value will be treated as a single parameter, and
passed as such to the decorated method.
:type builder_function:
`callable`
"""
datasets = getattr(builder_function, 'genty_datasets', {None: ()})
def wrap(test_method):
# Save the data providers in the test method. This data will be
# consumed by the @genty decorator.
if not hasattr(test_method, 'genty_dataproviders'):
test_method.genty_dataproviders = []
test_method.genty_dataproviders.append(
(builder_function, datasets),
)
return test_method
return wrap
def genty_dataset(*args, **kwargs):
"""Decorator defining data sets to provide to a test.
Inspired by http://sebastian-bergmann.de/archives/
702-Data-Providers-in-PHPUnit-3.2.html
The canonical way to call @genty_dataset, with each argument each
representing a data set to be injected in the test method call:
@genty_dataset(
('a1', 'b1'),
('a2', 'b2'),
)
def test_some_function(a, b)
...
If the test function takes only one parameter, you can replace the tuples
by a single value. So instead of the more verbose:
@genty_dataset(
('c1',),
('c2',),
)
def test_some_other_function(c)
...
One can write:
@genty_dataset('c1', 'c2')
def test_some_other_function(c)
...
For each set of arguments, a suffix identifying that argument set is
built by concatenating the string representation of the arguments
together. You can control the test names for each data set by passing
the data sets as keyword args, where the keyword is the desired suffix.
For example:
@genty_dataset(
('a1', 'b1),
)
def test_function(a, b)
...
produces a test named 'test_function_for_a1_and_b1', while
@genty_dataset(
happy_path=('a1', 'b1'),
)
def test_function(a, b)
...
produces a test named test_function_for_happy_path. These are just
parameters to a method call, so one can have unnamed args first
followed by keyword args
@genty_dataset(
('x', 'y'),
('p', 'q'),
Monday=('a1', 'b1'),
Tuesday=('t1', 't2'),
)
def test_function(a, b)
...
Finally, datasets can be chained. Useful for example if there are
distinct sets of params that make sense (cleaner, more readable, or
semantically nicer) if kept separate. A fabricated example:
@genty_dataset(
*([i for i in range(10)] + [(i, i) for i in range(10)])
)
def test_some_other_function(param1, param2=None)
...
-- vs --
@genty_dataset(*[i for i in range(10)])
@genty_dataset(*[(i, i) for i in range(10)])
def test_some_other_function(param1, param2=None)
...
If the names of datasets conflict across chained genty_datasets, the
key&value pair from the outer (first) decorator will override the
data from the inner.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
"""
datasets = _build_datasets(*args, **kwargs)
def wrap(test_method):
# Save the datasets in the test method. This data will be consumed
# by the @genty decorator.
if not hasattr(test_method, 'genty_datasets'):
test_method.genty_datasets = OrderedDict()
test_method.genty_datasets.update(datasets)
return test_method
return wrap
def _build_datasets(*args, **kwargs):
"""Build the datasets into a dict, where the keys are the name of the
data set and the values are the data sets themselves.
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
:param kwargs:
Dict of pre-named data sets.
:type kwargs:
`dict` of `unicode` to varies
:return:
The dataset dict.
:rtype:
`dict`
"""
datasets = OrderedDict()
_add_arg_datasets(datasets, args)
_add_kwarg_datasets(datasets, kwargs)
return datasets
def _add_arg_datasets(datasets, args):
"""Add data sets of the given args.
:param datasets:
The dict where to accumulate data sets.
:type datasets:
`dict`
:param args:
Tuple of unnamed data sets.
:type args:
`tuple` of varies
"""
for dataset in args:
# turn a value into a 1-tuple.
if not isinstance(dataset, (tuple, GentyArgs)):
dataset = (dataset,)
# Create a test_name_suffix - basically the parameter list
if isinstance(dataset, GentyArgs):
dataset_strings = dataset # GentyArgs supports iteration
else:
dataset_strings = [format_arg(data) for data in dataset]
test_method_suffix = ", ".join(dataset_strings)
datasets[test_method_suffix] = dataset
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
getInterfaceInAllSpeeds
|
python
|
def getInterfaceInAllSpeeds(interface, endpoint_list, class_descriptor_list=()):
interface = getDescriptor(
USBInterfaceDescriptor,
bNumEndpoints=len(endpoint_list),
**interface
)
class_descriptor_list = list(class_descriptor_list)
fs_list = [interface] + class_descriptor_list
hs_list = [interface] + class_descriptor_list
ss_list = [interface] + class_descriptor_list
need_address = (
endpoint_list[0]['endpoint'].get(
'bEndpointAddress',
0,
) & ~ch9.USB_DIR_IN == 0
)
for index, endpoint in enumerate(endpoint_list, 1):
endpoint_kw = endpoint['endpoint'].copy()
transfer_type = endpoint_kw[
'bmAttributes'
] & ch9.USB_ENDPOINT_XFERTYPE_MASK
fs_max, hs_max, ss_max = _MAX_PACKET_SIZE_DICT[transfer_type]
if need_address:
endpoint_kw['bEndpointAddress'] = index | (
endpoint_kw.get('bEndpointAddress', 0) & ch9.USB_DIR_IN
)
klass = (
USBEndpointDescriptor
if 'bRefresh' in endpoint_kw or 'bSynchAddress' in endpoint_kw else
USBEndpointDescriptorNoAudio
)
interval = endpoint_kw.pop('bInterval', _MARKER)
if interval is _MARKER:
fs_interval = hs_interval = 0
else:
if transfer_type == ch9.USB_ENDPOINT_XFER_BULK:
fs_interval = 0
hs_interval = interval
else: # USB_ENDPOINT_XFER_ISOC or USB_ENDPOINT_XFER_INT
fs_interval = max(1, min(255, round(interval)))
# 8 is the number of microframes in a millisecond
hs_interval = max(
1,
min(16, int(round(1 + math.log(interval * 8, 2)))),
)
packet_size = endpoint_kw.pop('wMaxPacketSize', _MARKER)
if packet_size is _MARKER:
fs_packet_size = fs_max
hs_packet_size = hs_max
ss_packet_size = ss_max
else:
fs_packet_size = min(fs_max, packet_size)
hs_packet_size = min(hs_max, packet_size)
ss_packet_size = min(ss_max, packet_size)
fs_list.append(getDescriptor(
klass,
wMaxPacketSize=fs_max,
bInterval=fs_interval,
**endpoint_kw
))
hs_list.append(getDescriptor(
klass,
wMaxPacketSize=hs_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_list.append(getDescriptor(
klass,
wMaxPacketSize=ss_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_companion_kw = endpoint.get('superspeed', _EMPTY_DICT)
ss_list.append(getDescriptor(
USBSSEPCompDescriptor,
**ss_companion_kw
))
ssp_iso_kw = endpoint.get('superspeed_iso', _EMPTY_DICT)
if bool(ssp_iso_kw) != (
endpoint_kw.get('bmAttributes', 0) &
ch9.USB_ENDPOINT_XFERTYPE_MASK ==
ch9.USB_ENDPOINT_XFER_ISOC and
bool(ch9.USB_SS_SSP_ISOC_COMP(
ss_companion_kw.get('bmAttributes', 0),
))
):
raise ValueError('Inconsistent isochronous companion')
if ssp_iso_kw:
ss_list.append(getDescriptor(
USBSSPIsocEndpointDescriptor,
**ssp_iso_kw
))
return (fs_list, hs_list, ss_list)
|
Produce similar fs, hs and ss interface and endpoints descriptors.
Should be useful for devices desiring to work in all 3 speeds with maximum
endpoint wMaxPacketSize. Reduces data duplication from descriptor
declarations.
Not intended to cover fancy combinations.
interface (dict):
Keyword arguments for
getDescriptor(USBInterfaceDescriptor, ...)
in all speeds.
bNumEndpoints must not be provided.
endpoint_list (list of dicts)
Each dict represents an endpoint, and may contain the following items:
- "endpoint": required, contains keyword arguments for
getDescriptor(USBEndpointDescriptorNoAudio, ...)
or
getDescriptor(USBEndpointDescriptor, ...)
The with-audio variant is picked when its extra fields are assigned a
value.
wMaxPacketSize may be missing, in which case it will be set to the
maximum size for given speed and endpoint type.
bmAttributes must be provided.
If bEndpointAddress is zero (excluding direction bit) on the first
endpoint, endpoints will be assigned their rank in this list,
starting at 1. Their direction bit is preserved.
If bInterval is present on a INT or ISO endpoint, it must be in
millisecond units (but may not be an integer), and will be converted
to the nearest integer millisecond for full-speed descriptor, and
nearest possible interval for high- and super-speed descriptors.
If bInterval is present on a BULK endpoint, it is set to zero on
full-speed descriptor and used as provided on high- and super-speed
descriptors.
- "superspeed": optional, contains keyword arguments for
getDescriptor(USBSSEPCompDescriptor, ...)
- "superspeed_iso": optional, contains keyword arguments for
getDescriptor(USBSSPIsocEndpointDescriptor, ...)
Must be provided and non-empty only when endpoint is isochronous and
"superspeed" dict has "bmAttributes" bit 7 set.
class_descriptor (list of descriptors of any type)
Descriptors to insert in all speeds between the interface descriptor and
endpoint descriptors.
Returns a 3-tuple of lists:
- fs descriptors
- hs descriptors
- ss descriptors
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L120-L260
|
[
"def getDescriptor(klass, **kw):\n \"\"\"\n Automatically fills bLength and bDescriptorType.\n \"\"\"\n # XXX: ctypes Structure.__init__ ignores arguments which do not exist\n # as structure fields. So check it.\n # This is annoying, but not doing it is a huge waste of time for the\n # developer.\n empty = klass()\n assert hasattr(empty, 'bLength')\n assert hasattr(empty, 'bDescriptorType')\n unknown = [x for x in kw if not hasattr(empty, x)]\n if unknown:\n raise TypeError('Unknown fields %r' % (unknown, ))\n # XXX: not very pythonic...\n return klass(\n bLength=ctypes.sizeof(klass),\n # pylint: disable=protected-access\n bDescriptorType=klass._bDescriptorType,\n # pylint: enable=protected-access\n **kw\n )\n"
] |
# This file is part of python-functionfs
# Copyright (C) 2016-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# python-functionfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-functionfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-functionfs. If not, see <http://www.gnu.org/licenses/>.
"""
Interfaces with functionfs to simplify USB gadget function declaration and
implementation on linux.
Defines standard USB descriptors (see "ch9" submodule) and sends them to the
kernel to declare function's structure.
Provides methods for accessing each endpoint and to react to events.
"""
import ctypes
import errno
import fcntl
import io
import itertools
import math
import os
import struct
import warnings
from .common import (
USBDescriptorHeader,
le32,
)
from . import ch9
from .ch9 import (
USBInterfaceDescriptor,
USBEndpointDescriptorNoAudio,
USBEndpointDescriptor,
USBSSEPCompDescriptor,
# USBSSPIsocEndpointDescriptor is not implemented in kernel as of this
# writing.
USBSSPIsocEndpointDescriptor,
# USBQualifierDescriptor is reserved for gadgets, so don't expose it.
USBOTGDescriptor,
USBOTG20Descriptor,
# USBDebugDescriptor is not implemented in kernelas of this writing.
USBDebugDescriptor,
USBInterfaceAssocDescriptor,
)
from .functionfs import (
DESCRIPTORS_MAGIC, STRINGS_MAGIC, DESCRIPTORS_MAGIC_V2,
FLAGS,
DescsHeadV2,
DescsHead,
OSDescHeader,
OSDescHeaderBCount,
OSExtCompatDesc,
OSExtPropDescHead,
StringsHead,
StringBase,
Event,
FIFO_STATUS, FIFO_FLUSH, CLEAR_HALT, INTERFACE_REVMAP, ENDPOINT_REVMAP, ENDPOINT_DESC,
)
# pylint: disable=no-name-in-module
from .functionfs import (
HAS_FS_DESC,
HAS_HS_DESC,
HAS_SS_DESC,
HAS_MS_OS_DESC,
ALL_CTRL_RECIP,
CONFIG0_SETUP,
BIND, UNBIND, ENABLE, DISABLE, SETUP, SUSPEND, RESUME,
)
# pylint: enable=no-name-in-module
__all__ = (
'ch9',
'Function',
# XXX: Not very pythonic...
'getInterfaceInAllSpeeds',
'getDescriptor',
'getOSDesc',
'getOSExtPropDesc',
'USBInterfaceDescriptor',
'USBEndpointDescriptorNoAudio',
'USBEndpointDescriptor',
'USBSSEPCompDescriptor',
'USBSSPIsocEndpointDescriptor',
'USBOTGDescriptor',
'USBOTG20Descriptor',
'USBDebugDescriptor',
'USBInterfaceAssocDescriptor',
'OSExtCompatDesc',
)
_MAX_PACKET_SIZE_DICT = {
ch9.USB_ENDPOINT_XFER_ISOC: (
1023, # 0..1023
1024, # 0..1024
1024, # 0..1024
),
ch9.USB_ENDPOINT_XFER_BULK: (
64, # 8, 16, 32, 64
512, # 512 only
1024, # 1024 only
),
ch9.USB_ENDPOINT_XFER_INT: (
64, # 0..64
1024, # 0..1024
1024, # 1..1024
),
}
_MARKER = object()
_EMPTY_DICT = {} # For internal ** falback usage
def getDescriptor(klass, **kw):
"""
Automatically fills bLength and bDescriptorType.
"""
# XXX: ctypes Structure.__init__ ignores arguments which do not exist
# as structure fields. So check it.
# This is annoying, but not doing it is a huge waste of time for the
# developer.
empty = klass()
assert hasattr(empty, 'bLength')
assert hasattr(empty, 'bDescriptorType')
unknown = [x for x in kw if not hasattr(empty, x)]
if unknown:
raise TypeError('Unknown fields %r' % (unknown, ))
# XXX: not very pythonic...
return klass(
bLength=ctypes.sizeof(klass),
# pylint: disable=protected-access
bDescriptorType=klass._bDescriptorType,
# pylint: enable=protected-access
**kw
)
def getOSDesc(interface, ext_list):
"""
Return an OS description header.
interface (int)
Related interface number.
ext_list (list of OSExtCompatDesc or OSExtPropDesc)
List of instances of extended descriptors.
"""
try:
ext_type, = {type(x) for x in ext_list}
except ValueError:
raise TypeError('Extensions of a single type are required.')
if issubclass(ext_type, OSExtCompatDesc):
wIndex = 4
kw = {
'b': OSDescHeaderBCount(
bCount=len(ext_list),
Reserved=0,
),
}
elif issubclass(ext_type, OSExtPropDescHead):
wIndex = 5
kw = {
'wCount': len(ext_list),
}
else:
raise TypeError('Extensions of unexpected type')
ext_list_type = ext_type * len(ext_list)
klass = type(
'OSDesc',
(OSDescHeader, ),
{
'_fields_': [
('ext_list', ext_list_type),
],
},
)
return klass(
interface=interface,
dwLength=ctypes.sizeof(klass),
bcdVersion=1,
wIndex=wIndex,
ext_list=ext_list_type(*ext_list),
**kw
)
def getOSExtPropDesc(data_type, name, value):
"""
Returns an OS extension property descriptor.
data_type (int)
See wPropertyDataType documentation.
name (string)
See PropertyName documentation.
value (string)
See PropertyData documentation.
NULL chars must be explicitely included in the value when needed,
this function does not add any terminating NULL for example.
"""
klass = type(
'OSExtPropDesc',
(OSExtPropDescHead, ),
{
'_fields_': [
('bPropertyName', ctypes.c_char * len(name)),
('dwPropertyDataLength', le32),
('bProperty', ctypes.c_char * len(value)),
],
}
)
return klass(
dwSize=ctypes.sizeof(klass),
dwPropertyDataType=data_type,
wPropertyNameLength=len(name),
bPropertyName=name,
dwPropertyDataLength=len(value),
bProperty=value,
)
#def getDescs(*args, **kw):
# """
# Return a legacy format FunctionFS suitable for serialisation.
# Deprecated as of 3.14 .
#
# NOT IMPLEMENTED
# """
# warnings.warn(
# DeprecationWarning,
# 'Legacy format, deprecated as of 3.14.',
# )
# raise NotImplementedError('TODO')
# klass = type(
# 'Descs',
# (DescsHead, ),
# {
# 'fs_descrs': None, # TODO
# 'hs_descrs': None, # TODO
# },
# )
# return klass(
# magic=DESCRIPTORS_MAGIC,
# length=ctypes.sizeof(klass),
# **kw
# )
def getDescsV2(flags, fs_list=(), hs_list=(), ss_list=(), os_list=()):
"""
Return a FunctionFS descriptor suitable for serialisation.
flags (int)
Any combination of VIRTUAL_ADDR, EVENTFD, ALL_CTRL_RECIP,
CONFIG0_SETUP.
{fs,hs,ss,os}_list (list of descriptors)
Instances of the following classes:
{fs,hs,ss}_list:
USBInterfaceDescriptor
USBEndpointDescriptorNoAudio
USBEndpointDescriptor
USBSSEPCompDescriptor
USBSSPIsocEndpointDescriptor
USBOTGDescriptor
USBOTG20Descriptor
USBInterfaceAssocDescriptor
TODO: HID
All (non-empty) lists must define the same number of interfaces
and endpoints, and endpoint descriptors must be given in the same
order, bEndpointAddress-wise.
os_list:
OSDesc
"""
count_field_list = []
descr_field_list = []
kw = {}
for descriptor_list, flag, prefix, allowed_descriptor_klass in (
(fs_list, HAS_FS_DESC, 'fs', USBDescriptorHeader),
(hs_list, HAS_HS_DESC, 'hs', USBDescriptorHeader),
(ss_list, HAS_SS_DESC, 'ss', USBDescriptorHeader),
(os_list, HAS_MS_OS_DESC, 'os', OSDescHeader),
):
if descriptor_list:
for index, descriptor in enumerate(descriptor_list):
if not isinstance(descriptor, allowed_descriptor_klass):
raise TypeError(
'Descriptor %r of unexpected type: %r' % (
index,
type(descriptor),
),
)
descriptor_map = [
('desc_%i' % x, y)
for x, y in enumerate(descriptor_list)
]
flags |= flag
count_name = prefix + 'count'
descr_name = prefix + 'descr'
count_field_list.append((count_name, le32))
descr_type = type(
't_' + descr_name,
(ctypes.LittleEndianStructure, ),
{
'_pack_': 1,
'_fields_': [
(x, type(y))
for x, y in descriptor_map
],
}
)
descr_field_list.append((descr_name, descr_type))
kw[count_name] = len(descriptor_map)
kw[descr_name] = descr_type(**dict(descriptor_map))
elif flags & flag:
raise ValueError(
'Flag %r set but descriptor list empty, cannot generate type.' % (
FLAGS.get(flag),
)
)
klass = type(
'DescsV2_0x%02x' % (
flags & (
HAS_FS_DESC |
HAS_HS_DESC |
HAS_SS_DESC |
HAS_MS_OS_DESC
),
# XXX: include contained descriptors type information ? (and name ?)
),
(DescsHeadV2, ),
{
'_fields_': count_field_list + descr_field_list,
},
)
return klass(
magic=DESCRIPTORS_MAGIC_V2,
length=ctypes.sizeof(klass),
flags=flags,
**kw
)
def getStrings(lang_dict):
"""
Return a FunctionFS descriptor suitable for serialisation.
lang_dict (dict)
Key: language ID (ex: 0x0409 for en-us)
Value: list of unicode objects
All values must have the same number of items.
"""
field_list = []
kw = {}
try:
str_count = len(next(iter(lang_dict.values())))
except StopIteration:
str_count = 0
else:
for lang, string_list in lang_dict.items():
if len(string_list) != str_count:
raise ValueError('All values must have the same string count.')
field_id = 'strings_%04x' % lang
strings = b'\x00'.join(x.encode('utf-8') for x in string_list) + b'\x00'
field_type = type(
'String',
(StringBase, ),
{
'_fields_': [
('strings', ctypes.c_char * len(strings)),
],
},
)
field_list.append((field_id, field_type))
kw[field_id] = field_type(
lang=lang,
strings=strings,
)
klass = type(
'Strings',
(StringsHead, ),
{
'_fields_': field_list,
},
)
return klass(
magic=STRINGS_MAGIC,
length=ctypes.sizeof(klass),
str_count=str_count,
lang_count=len(lang_dict),
**kw
)
def serialise(structure):
"""
structure (ctypes.Structure)
The structure to serialise.
Returns a ctypes.c_char array.
Does not copy memory.
"""
return ctypes.cast(
ctypes.pointer(structure),
ctypes.POINTER(ctypes.c_char * ctypes.sizeof(structure)),
).contents
class EndpointFileBase(io.FileIO):
"""
File object representing a endpoint. Abstract.
"""
def _ioctl(self, func, *args, **kw):
result = fcntl.ioctl(self, func, *args, **kw)
if result < 0:
raise IOError(result)
return result
class Endpoint0File(EndpointFileBase):
"""
File object exposing ioctls available on endpoint zero.
"""
def halt(self, request_type):
"""
Halt current endpoint.
"""
try:
if request_type & ch9.USB_DIR_IN:
self.read(0)
else:
self.write(b'')
except IOError as exc:
if exc.errno != errno.EL2HLT:
raise
else:
raise ValueError('halt did not return EL2HLT ?')
def getRealInterfaceNumber(self, interface):
"""
Returns the host-visible interface number, or None if there is no such
interface.
"""
try:
return self._ioctl(INTERFACE_REVMAP, interface)
except IOError as exc:
if exc.errno == errno.EDOM:
return None
raise
# TODO: Add any standard IOCTL in usb_gadget_ops.ioctl ?
class EndpointFile(EndpointFileBase):
"""
File object exposing ioctls available on non-zero endpoints.
"""
_halted = False
def getRealEndpointNumber(self):
"""
Returns the host-visible endpoint number.
"""
return self._ioctl(ENDPOINT_REVMAP)
def clearHalt(self):
"""
Clears endpoint halt, and resets toggle.
See drivers/usb/gadget/udc/core.c:usb_ep_clear_halt
"""
self._ioctl(CLEAR_HALT)
self._halted = False
def getFIFOStatus(self):
"""
Returns the number of bytes in fifo.
"""
return self._ioctl(FIFO_STATUS)
def flushFIFO(self):
"""
Discards Endpoint FIFO content.
"""
self._ioctl(FIFO_FLUSH)
def getDescriptor(self):
"""
Returns the currently active endpoint descriptor
(depending on current USB speed).
"""
result = USBEndpointDescriptor()
self._ioctl(ENDPOINT_DESC, result, True)
return result
def _halt(self):
raise NotImplementedError
def halt(self):
"""
Halt current endpoint.
"""
try:
self._halt()
except IOError as exc:
if exc.errno != errno.EBADMSG:
raise
else:
raise ValueError('halt did not return EBADMSG ?')
self._halted = True
def isHalted(self):
"""
Whether endpoint is currently halted.
"""
return self._halted
class EndpointINFile(EndpointFile):
"""
Write-only endpoint file.
"""
@staticmethod
def read(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for reading')
readinto = read
readall = read
readlines = read
readline = read
@staticmethod
def readable():
"""
Never readable.
"""
return False
def _halt(self):
super(EndpointINFile, self).read(0)
class EndpointOUTFile(EndpointFile):
"""
Read-only endpoint file.
"""
@staticmethod
def write(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for writing')
writelines = write
@staticmethod
def writable():
"""
Never writable.
"""
return False
def _halt(self):
super(EndpointOUTFile, self).write(b'')
_INFINITY = itertools.repeat(None)
_ONCE = (None, )
class Function(object):
"""
Pythonic class for interfacing with FunctionFS.
Properties available:
function_remote_wakeup_capable (bool)
Whether the function wishes to be allowed to wake host.
function_remote_wakeup (bool)
Whether host has allowed the function to wake it up.
Set and cleared by onSetup by calling enableRemoteWakeup and
disableRemoteWakeup, respectively.
"""
_closed = False
_ep_list = () # Avoids failing in __del__ when (subclass') __init__ fails.
function_remote_wakeup_capable = False
function_remote_wakeup = False
def __init__(
self,
path,
fs_list=(), hs_list=(), ss_list=(),
os_list=(),
lang_dict={},
all_ctrl_recip=False, config0_setup=False,
):
"""
path (string)
Path to the functionfs mountpoint (where the ep* files are
located).
{fs,hs,ss}_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
os_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
lang_dict (dict)
Keys: language id (ex: 0x0402 for "us-en").
Values: List of unicode objects. First item becomes string
descriptor 1, and so on. Must contain at least as many
string descriptors as the highest string index declared
in all descriptors.
all_ctrl_recip (bool)
When true, this function will receive all control transactions.
Useful when implementing non-standard control transactions.
config0_setup (bool)
When true, this function will receive control transactions before
any configuration gets enabled.
"""
self._path = path
ep0 = Endpoint0File(os.path.join(path, 'ep0'), 'r+')
self._ep_list = ep_list = [ep0]
self._ep_address_dict = ep_address_dict = {}
flags = 0
if all_ctrl_recip:
flags |= ALL_CTRL_RECIP
if config0_setup:
flags |= CONFIG0_SETUP
# Note: serialise does not prevent its argument from being freed and
# reallocated. Keep strong references to to-serialise values until
# after they get written.
desc = getDescsV2(
flags,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
os_list=os_list,
)
ep0.write(serialise(desc))
# TODO: try v1 on failure ?
del desc
# Note: see above.
strings = getStrings(lang_dict)
ep0.write(serialise(strings))
del strings
for descriptor in ss_list or hs_list or fs_list:
if descriptor.bDescriptorType == ch9.USB_DT_ENDPOINT:
assert descriptor.bEndpointAddress not in ep_address_dict, (
descriptor,
ep_address_dict[descriptor.bEndpointAddress],
)
index = len(ep_list)
ep_address_dict[descriptor.bEndpointAddress] = index
ep_list.append(
(
EndpointINFile
if descriptor.bEndpointAddress & ch9.USB_DIR_IN
else EndpointOUTFile
)(
os.path.join(path, 'ep%u' % (index, )),
'r+',
)
)
@property
def ep0(self):
"""
Endpoint 0, use when handling setup transactions.
"""
return self._ep_list[0]
def close(self):
"""
Close all endpoint file descriptors.
"""
ep_list = self._ep_list
while ep_list:
ep_list.pop().close()
self._closed = True
def __del__(self):
self.close()
__event_dict = {
BIND: 'onBind',
UNBIND: 'onUnbind',
ENABLE: 'onEnable',
DISABLE: 'onDisable',
# SETUP: handled specially
SUSPEND: 'onSuspend',
RESUME: 'onResume',
}
def __process(self, iterator):
readinto = self.ep0.readinto
# FunctionFS can queue up to 4 events, so let's read that much.
event_len = ctypes.sizeof(Event)
array_type = Event * 4
buf = bytearray(ctypes.sizeof(array_type))
event_list = array_type.from_buffer(buf)
event_dict = self.__event_dict
for _ in iterator:
if self._closed:
break
try:
length = readinto(buf)
except IOError as exc:
if exc.errno == errno.EINTR:
continue
raise
if not length:
# Note: also catches None, returned when ep0 is non-blocking
break # TODO: test if this happens when ep0 gets closed
# (by FunctionFS or in another thread or in a handler)
count, remainder = divmod(length, event_len)
assert remainder == 0, (length, event_len)
for index in range(count):
event = event_list[index]
event_type = event.type
if event_type == SETUP:
setup = event.u.setup
try:
self.onSetup(
setup.bRequestType,
setup.bRequest,
setup.wValue,
setup.wIndex,
setup.wLength,
)
except:
# On *ANY* exception, halt endpoint
self.ep0.halt(setup.bRequestType)
raise
else:
getattr(self, event_dict[event.type])()
def processEventsForever(self):
"""
Process kernel ep0 events until closed.
ep0 must be in blocking mode, otherwise behaves like `processEvents`.
"""
self.__process(_INFINITY)
def processEvents(self):
"""
Process at least one kernel event if ep0 is in blocking mode.
Process any already available event if ep0 is in non-blocking mode.
"""
self.__process(_ONCE)
def getEndpoint(self, index):
"""
Return a file object corresponding to given endpoint index,
in descriptor list order.
"""
return self._ep_list[index]
def getEndpointByAddress(self, address):
"""
Return a file object corresponding to given endpoint address.
"""
return self.getEndpoint(self._ep_address_dict[address])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def onBind(self):
"""
Triggered when FunctionFS signals gadget binding.
May be overridden in subclass.
"""
pass
def onUnbind(self):
"""
Triggered when FunctionFS signals gadget unbinding.
May be overridden in subclass.
"""
pass
def onEnable(self):
"""
Called when FunctionFS signals the function was (re)enabled.
This may happen several times without onDisable being called.
It must reset the function to its default state.
May be overridden in subclass.
"""
self.disableRemoteWakeup()
def onDisable(self):
"""
Called when FunctionFS signals the function was (re)disabled.
This may happen several times without onEnable being called.
May be overridden in subclass.
"""
pass
def disableRemoteWakeup(self):
"""
Called when host issues a clearFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to False so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = False
def enableRemoteWakeup(self):
"""
Called when host issues a setFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to True so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = True
def onSetup(self, request_type, request, value, index, length):
"""
Called when a setup USB transaction was received.
Default implementation:
- handles USB_REQ_GET_STATUS on interface and endpoints
- handles USB_REQ_CLEAR_FEATURE(USB_ENDPOINT_HALT) on endpoints
- handles USB_REQ_SET_FEATURE(USB_ENDPOINT_HALT) on endpoints
- halts on everything else
If this method raises anything, endpoint 0 is halted by its caller and
exception is let through.
May be overridden in subclass.
"""
if (request_type & ch9.USB_TYPE_MASK) == ch9.USB_TYPE_STANDARD:
recipient = request_type & ch9.USB_RECIP_MASK
is_in = (request_type & ch9.USB_DIR_IN) == ch9.USB_DIR_IN
if request == ch9.USB_REQ_GET_STATUS:
if is_in and length == 2:
if recipient == ch9.USB_RECIP_INTERFACE:
if value == 0:
status = 0
if index == 0:
if self.function_remote_wakeup_capable:
status |= 1 << 0
if self.function_remote_wakeup:
status |= 1 << 1
self.ep0.write(struct.pack('<H', status)[:length])
return
elif recipient == ch9.USB_RECIP_ENDPOINT:
if value == 0:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
status = 0
if endpoint.isHalted():
status |= 1 << 0
self.ep0.write(
struct.pack('<H', status)[:length],
)
return
elif request == ch9.USB_REQ_CLEAR_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.clearHalt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.disableRemoteWakeup()
self.ep0.read(0)
return
elif request == ch9.USB_REQ_SET_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.halt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.enableRemoteWakeup()
self.ep0.read(0)
return
self.ep0.halt(request_type)
def onSuspend(self):
"""
Called when FunctionFS signals the host stops USB traffic.
May be overridden in subclass.
"""
pass
def onResume(self):
"""
Called when FunctionFS signals the host restarts USB traffic.
May be overridden in subclass.
"""
pass
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
getDescriptor
|
python
|
def getDescriptor(klass, **kw):
# XXX: ctypes Structure.__init__ ignores arguments which do not exist
# as structure fields. So check it.
# This is annoying, but not doing it is a huge waste of time for the
# developer.
empty = klass()
assert hasattr(empty, 'bLength')
assert hasattr(empty, 'bDescriptorType')
unknown = [x for x in kw if not hasattr(empty, x)]
if unknown:
raise TypeError('Unknown fields %r' % (unknown, ))
# XXX: not very pythonic...
return klass(
bLength=ctypes.sizeof(klass),
# pylint: disable=protected-access
bDescriptorType=klass._bDescriptorType,
# pylint: enable=protected-access
**kw
)
|
Automatically fills bLength and bDescriptorType.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L262-L283
| null |
# This file is part of python-functionfs
# Copyright (C) 2016-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# python-functionfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-functionfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-functionfs. If not, see <http://www.gnu.org/licenses/>.
"""
Interfaces with functionfs to simplify USB gadget function declaration and
implementation on linux.
Defines standard USB descriptors (see "ch9" submodule) and sends them to the
kernel to declare function's structure.
Provides methods for accessing each endpoint and to react to events.
"""
import ctypes
import errno
import fcntl
import io
import itertools
import math
import os
import struct
import warnings
from .common import (
USBDescriptorHeader,
le32,
)
from . import ch9
from .ch9 import (
USBInterfaceDescriptor,
USBEndpointDescriptorNoAudio,
USBEndpointDescriptor,
USBSSEPCompDescriptor,
# USBSSPIsocEndpointDescriptor is not implemented in kernel as of this
# writing.
USBSSPIsocEndpointDescriptor,
# USBQualifierDescriptor is reserved for gadgets, so don't expose it.
USBOTGDescriptor,
USBOTG20Descriptor,
# USBDebugDescriptor is not implemented in kernelas of this writing.
USBDebugDescriptor,
USBInterfaceAssocDescriptor,
)
from .functionfs import (
DESCRIPTORS_MAGIC, STRINGS_MAGIC, DESCRIPTORS_MAGIC_V2,
FLAGS,
DescsHeadV2,
DescsHead,
OSDescHeader,
OSDescHeaderBCount,
OSExtCompatDesc,
OSExtPropDescHead,
StringsHead,
StringBase,
Event,
FIFO_STATUS, FIFO_FLUSH, CLEAR_HALT, INTERFACE_REVMAP, ENDPOINT_REVMAP, ENDPOINT_DESC,
)
# pylint: disable=no-name-in-module
from .functionfs import (
HAS_FS_DESC,
HAS_HS_DESC,
HAS_SS_DESC,
HAS_MS_OS_DESC,
ALL_CTRL_RECIP,
CONFIG0_SETUP,
BIND, UNBIND, ENABLE, DISABLE, SETUP, SUSPEND, RESUME,
)
# pylint: enable=no-name-in-module
__all__ = (
'ch9',
'Function',
# XXX: Not very pythonic...
'getInterfaceInAllSpeeds',
'getDescriptor',
'getOSDesc',
'getOSExtPropDesc',
'USBInterfaceDescriptor',
'USBEndpointDescriptorNoAudio',
'USBEndpointDescriptor',
'USBSSEPCompDescriptor',
'USBSSPIsocEndpointDescriptor',
'USBOTGDescriptor',
'USBOTG20Descriptor',
'USBDebugDescriptor',
'USBInterfaceAssocDescriptor',
'OSExtCompatDesc',
)
_MAX_PACKET_SIZE_DICT = {
ch9.USB_ENDPOINT_XFER_ISOC: (
1023, # 0..1023
1024, # 0..1024
1024, # 0..1024
),
ch9.USB_ENDPOINT_XFER_BULK: (
64, # 8, 16, 32, 64
512, # 512 only
1024, # 1024 only
),
ch9.USB_ENDPOINT_XFER_INT: (
64, # 0..64
1024, # 0..1024
1024, # 1..1024
),
}
_MARKER = object()
_EMPTY_DICT = {} # For internal ** falback usage
def getInterfaceInAllSpeeds(interface, endpoint_list, class_descriptor_list=()):
"""
Produce similar fs, hs and ss interface and endpoints descriptors.
Should be useful for devices desiring to work in all 3 speeds with maximum
endpoint wMaxPacketSize. Reduces data duplication from descriptor
declarations.
Not intended to cover fancy combinations.
interface (dict):
Keyword arguments for
getDescriptor(USBInterfaceDescriptor, ...)
in all speeds.
bNumEndpoints must not be provided.
endpoint_list (list of dicts)
Each dict represents an endpoint, and may contain the following items:
- "endpoint": required, contains keyword arguments for
getDescriptor(USBEndpointDescriptorNoAudio, ...)
or
getDescriptor(USBEndpointDescriptor, ...)
The with-audio variant is picked when its extra fields are assigned a
value.
wMaxPacketSize may be missing, in which case it will be set to the
maximum size for given speed and endpoint type.
bmAttributes must be provided.
If bEndpointAddress is zero (excluding direction bit) on the first
endpoint, endpoints will be assigned their rank in this list,
starting at 1. Their direction bit is preserved.
If bInterval is present on a INT or ISO endpoint, it must be in
millisecond units (but may not be an integer), and will be converted
to the nearest integer millisecond for full-speed descriptor, and
nearest possible interval for high- and super-speed descriptors.
If bInterval is present on a BULK endpoint, it is set to zero on
full-speed descriptor and used as provided on high- and super-speed
descriptors.
- "superspeed": optional, contains keyword arguments for
getDescriptor(USBSSEPCompDescriptor, ...)
- "superspeed_iso": optional, contains keyword arguments for
getDescriptor(USBSSPIsocEndpointDescriptor, ...)
Must be provided and non-empty only when endpoint is isochronous and
"superspeed" dict has "bmAttributes" bit 7 set.
class_descriptor (list of descriptors of any type)
Descriptors to insert in all speeds between the interface descriptor and
endpoint descriptors.
Returns a 3-tuple of lists:
- fs descriptors
- hs descriptors
- ss descriptors
"""
interface = getDescriptor(
USBInterfaceDescriptor,
bNumEndpoints=len(endpoint_list),
**interface
)
class_descriptor_list = list(class_descriptor_list)
fs_list = [interface] + class_descriptor_list
hs_list = [interface] + class_descriptor_list
ss_list = [interface] + class_descriptor_list
need_address = (
endpoint_list[0]['endpoint'].get(
'bEndpointAddress',
0,
) & ~ch9.USB_DIR_IN == 0
)
for index, endpoint in enumerate(endpoint_list, 1):
endpoint_kw = endpoint['endpoint'].copy()
transfer_type = endpoint_kw[
'bmAttributes'
] & ch9.USB_ENDPOINT_XFERTYPE_MASK
fs_max, hs_max, ss_max = _MAX_PACKET_SIZE_DICT[transfer_type]
if need_address:
endpoint_kw['bEndpointAddress'] = index | (
endpoint_kw.get('bEndpointAddress', 0) & ch9.USB_DIR_IN
)
klass = (
USBEndpointDescriptor
if 'bRefresh' in endpoint_kw or 'bSynchAddress' in endpoint_kw else
USBEndpointDescriptorNoAudio
)
interval = endpoint_kw.pop('bInterval', _MARKER)
if interval is _MARKER:
fs_interval = hs_interval = 0
else:
if transfer_type == ch9.USB_ENDPOINT_XFER_BULK:
fs_interval = 0
hs_interval = interval
else: # USB_ENDPOINT_XFER_ISOC or USB_ENDPOINT_XFER_INT
fs_interval = max(1, min(255, round(interval)))
# 8 is the number of microframes in a millisecond
hs_interval = max(
1,
min(16, int(round(1 + math.log(interval * 8, 2)))),
)
packet_size = endpoint_kw.pop('wMaxPacketSize', _MARKER)
if packet_size is _MARKER:
fs_packet_size = fs_max
hs_packet_size = hs_max
ss_packet_size = ss_max
else:
fs_packet_size = min(fs_max, packet_size)
hs_packet_size = min(hs_max, packet_size)
ss_packet_size = min(ss_max, packet_size)
fs_list.append(getDescriptor(
klass,
wMaxPacketSize=fs_max,
bInterval=fs_interval,
**endpoint_kw
))
hs_list.append(getDescriptor(
klass,
wMaxPacketSize=hs_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_list.append(getDescriptor(
klass,
wMaxPacketSize=ss_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_companion_kw = endpoint.get('superspeed', _EMPTY_DICT)
ss_list.append(getDescriptor(
USBSSEPCompDescriptor,
**ss_companion_kw
))
ssp_iso_kw = endpoint.get('superspeed_iso', _EMPTY_DICT)
if bool(ssp_iso_kw) != (
endpoint_kw.get('bmAttributes', 0) &
ch9.USB_ENDPOINT_XFERTYPE_MASK ==
ch9.USB_ENDPOINT_XFER_ISOC and
bool(ch9.USB_SS_SSP_ISOC_COMP(
ss_companion_kw.get('bmAttributes', 0),
))
):
raise ValueError('Inconsistent isochronous companion')
if ssp_iso_kw:
ss_list.append(getDescriptor(
USBSSPIsocEndpointDescriptor,
**ssp_iso_kw
))
return (fs_list, hs_list, ss_list)
def getOSDesc(interface, ext_list):
"""
Return an OS description header.
interface (int)
Related interface number.
ext_list (list of OSExtCompatDesc or OSExtPropDesc)
List of instances of extended descriptors.
"""
try:
ext_type, = {type(x) for x in ext_list}
except ValueError:
raise TypeError('Extensions of a single type are required.')
if issubclass(ext_type, OSExtCompatDesc):
wIndex = 4
kw = {
'b': OSDescHeaderBCount(
bCount=len(ext_list),
Reserved=0,
),
}
elif issubclass(ext_type, OSExtPropDescHead):
wIndex = 5
kw = {
'wCount': len(ext_list),
}
else:
raise TypeError('Extensions of unexpected type')
ext_list_type = ext_type * len(ext_list)
klass = type(
'OSDesc',
(OSDescHeader, ),
{
'_fields_': [
('ext_list', ext_list_type),
],
},
)
return klass(
interface=interface,
dwLength=ctypes.sizeof(klass),
bcdVersion=1,
wIndex=wIndex,
ext_list=ext_list_type(*ext_list),
**kw
)
def getOSExtPropDesc(data_type, name, value):
"""
Returns an OS extension property descriptor.
data_type (int)
See wPropertyDataType documentation.
name (string)
See PropertyName documentation.
value (string)
See PropertyData documentation.
NULL chars must be explicitely included in the value when needed,
this function does not add any terminating NULL for example.
"""
klass = type(
'OSExtPropDesc',
(OSExtPropDescHead, ),
{
'_fields_': [
('bPropertyName', ctypes.c_char * len(name)),
('dwPropertyDataLength', le32),
('bProperty', ctypes.c_char * len(value)),
],
}
)
return klass(
dwSize=ctypes.sizeof(klass),
dwPropertyDataType=data_type,
wPropertyNameLength=len(name),
bPropertyName=name,
dwPropertyDataLength=len(value),
bProperty=value,
)
#def getDescs(*args, **kw):
# """
# Return a legacy format FunctionFS suitable for serialisation.
# Deprecated as of 3.14 .
#
# NOT IMPLEMENTED
# """
# warnings.warn(
# DeprecationWarning,
# 'Legacy format, deprecated as of 3.14.',
# )
# raise NotImplementedError('TODO')
# klass = type(
# 'Descs',
# (DescsHead, ),
# {
# 'fs_descrs': None, # TODO
# 'hs_descrs': None, # TODO
# },
# )
# return klass(
# magic=DESCRIPTORS_MAGIC,
# length=ctypes.sizeof(klass),
# **kw
# )
def getDescsV2(flags, fs_list=(), hs_list=(), ss_list=(), os_list=()):
"""
Return a FunctionFS descriptor suitable for serialisation.
flags (int)
Any combination of VIRTUAL_ADDR, EVENTFD, ALL_CTRL_RECIP,
CONFIG0_SETUP.
{fs,hs,ss,os}_list (list of descriptors)
Instances of the following classes:
{fs,hs,ss}_list:
USBInterfaceDescriptor
USBEndpointDescriptorNoAudio
USBEndpointDescriptor
USBSSEPCompDescriptor
USBSSPIsocEndpointDescriptor
USBOTGDescriptor
USBOTG20Descriptor
USBInterfaceAssocDescriptor
TODO: HID
All (non-empty) lists must define the same number of interfaces
and endpoints, and endpoint descriptors must be given in the same
order, bEndpointAddress-wise.
os_list:
OSDesc
"""
count_field_list = []
descr_field_list = []
kw = {}
for descriptor_list, flag, prefix, allowed_descriptor_klass in (
(fs_list, HAS_FS_DESC, 'fs', USBDescriptorHeader),
(hs_list, HAS_HS_DESC, 'hs', USBDescriptorHeader),
(ss_list, HAS_SS_DESC, 'ss', USBDescriptorHeader),
(os_list, HAS_MS_OS_DESC, 'os', OSDescHeader),
):
if descriptor_list:
for index, descriptor in enumerate(descriptor_list):
if not isinstance(descriptor, allowed_descriptor_klass):
raise TypeError(
'Descriptor %r of unexpected type: %r' % (
index,
type(descriptor),
),
)
descriptor_map = [
('desc_%i' % x, y)
for x, y in enumerate(descriptor_list)
]
flags |= flag
count_name = prefix + 'count'
descr_name = prefix + 'descr'
count_field_list.append((count_name, le32))
descr_type = type(
't_' + descr_name,
(ctypes.LittleEndianStructure, ),
{
'_pack_': 1,
'_fields_': [
(x, type(y))
for x, y in descriptor_map
],
}
)
descr_field_list.append((descr_name, descr_type))
kw[count_name] = len(descriptor_map)
kw[descr_name] = descr_type(**dict(descriptor_map))
elif flags & flag:
raise ValueError(
'Flag %r set but descriptor list empty, cannot generate type.' % (
FLAGS.get(flag),
)
)
klass = type(
'DescsV2_0x%02x' % (
flags & (
HAS_FS_DESC |
HAS_HS_DESC |
HAS_SS_DESC |
HAS_MS_OS_DESC
),
# XXX: include contained descriptors type information ? (and name ?)
),
(DescsHeadV2, ),
{
'_fields_': count_field_list + descr_field_list,
},
)
return klass(
magic=DESCRIPTORS_MAGIC_V2,
length=ctypes.sizeof(klass),
flags=flags,
**kw
)
def getStrings(lang_dict):
"""
Return a FunctionFS descriptor suitable for serialisation.
lang_dict (dict)
Key: language ID (ex: 0x0409 for en-us)
Value: list of unicode objects
All values must have the same number of items.
"""
field_list = []
kw = {}
try:
str_count = len(next(iter(lang_dict.values())))
except StopIteration:
str_count = 0
else:
for lang, string_list in lang_dict.items():
if len(string_list) != str_count:
raise ValueError('All values must have the same string count.')
field_id = 'strings_%04x' % lang
strings = b'\x00'.join(x.encode('utf-8') for x in string_list) + b'\x00'
field_type = type(
'String',
(StringBase, ),
{
'_fields_': [
('strings', ctypes.c_char * len(strings)),
],
},
)
field_list.append((field_id, field_type))
kw[field_id] = field_type(
lang=lang,
strings=strings,
)
klass = type(
'Strings',
(StringsHead, ),
{
'_fields_': field_list,
},
)
return klass(
magic=STRINGS_MAGIC,
length=ctypes.sizeof(klass),
str_count=str_count,
lang_count=len(lang_dict),
**kw
)
def serialise(structure):
"""
structure (ctypes.Structure)
The structure to serialise.
Returns a ctypes.c_char array.
Does not copy memory.
"""
return ctypes.cast(
ctypes.pointer(structure),
ctypes.POINTER(ctypes.c_char * ctypes.sizeof(structure)),
).contents
class EndpointFileBase(io.FileIO):
"""
File object representing a endpoint. Abstract.
"""
def _ioctl(self, func, *args, **kw):
result = fcntl.ioctl(self, func, *args, **kw)
if result < 0:
raise IOError(result)
return result
class Endpoint0File(EndpointFileBase):
"""
File object exposing ioctls available on endpoint zero.
"""
def halt(self, request_type):
"""
Halt current endpoint.
"""
try:
if request_type & ch9.USB_DIR_IN:
self.read(0)
else:
self.write(b'')
except IOError as exc:
if exc.errno != errno.EL2HLT:
raise
else:
raise ValueError('halt did not return EL2HLT ?')
def getRealInterfaceNumber(self, interface):
"""
Returns the host-visible interface number, or None if there is no such
interface.
"""
try:
return self._ioctl(INTERFACE_REVMAP, interface)
except IOError as exc:
if exc.errno == errno.EDOM:
return None
raise
# TODO: Add any standard IOCTL in usb_gadget_ops.ioctl ?
class EndpointFile(EndpointFileBase):
"""
File object exposing ioctls available on non-zero endpoints.
"""
_halted = False
def getRealEndpointNumber(self):
"""
Returns the host-visible endpoint number.
"""
return self._ioctl(ENDPOINT_REVMAP)
def clearHalt(self):
"""
Clears endpoint halt, and resets toggle.
See drivers/usb/gadget/udc/core.c:usb_ep_clear_halt
"""
self._ioctl(CLEAR_HALT)
self._halted = False
def getFIFOStatus(self):
"""
Returns the number of bytes in fifo.
"""
return self._ioctl(FIFO_STATUS)
def flushFIFO(self):
"""
Discards Endpoint FIFO content.
"""
self._ioctl(FIFO_FLUSH)
def getDescriptor(self):
"""
Returns the currently active endpoint descriptor
(depending on current USB speed).
"""
result = USBEndpointDescriptor()
self._ioctl(ENDPOINT_DESC, result, True)
return result
def _halt(self):
raise NotImplementedError
def halt(self):
"""
Halt current endpoint.
"""
try:
self._halt()
except IOError as exc:
if exc.errno != errno.EBADMSG:
raise
else:
raise ValueError('halt did not return EBADMSG ?')
self._halted = True
def isHalted(self):
"""
Whether endpoint is currently halted.
"""
return self._halted
class EndpointINFile(EndpointFile):
"""
Write-only endpoint file.
"""
@staticmethod
def read(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for reading')
readinto = read
readall = read
readlines = read
readline = read
@staticmethod
def readable():
"""
Never readable.
"""
return False
def _halt(self):
super(EndpointINFile, self).read(0)
class EndpointOUTFile(EndpointFile):
"""
Read-only endpoint file.
"""
@staticmethod
def write(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for writing')
writelines = write
@staticmethod
def writable():
"""
Never writable.
"""
return False
def _halt(self):
super(EndpointOUTFile, self).write(b'')
_INFINITY = itertools.repeat(None)
_ONCE = (None, )
class Function(object):
"""
Pythonic class for interfacing with FunctionFS.
Properties available:
function_remote_wakeup_capable (bool)
Whether the function wishes to be allowed to wake host.
function_remote_wakeup (bool)
Whether host has allowed the function to wake it up.
Set and cleared by onSetup by calling enableRemoteWakeup and
disableRemoteWakeup, respectively.
"""
_closed = False
_ep_list = () # Avoids failing in __del__ when (subclass') __init__ fails.
function_remote_wakeup_capable = False
function_remote_wakeup = False
def __init__(
self,
path,
fs_list=(), hs_list=(), ss_list=(),
os_list=(),
lang_dict={},
all_ctrl_recip=False, config0_setup=False,
):
"""
path (string)
Path to the functionfs mountpoint (where the ep* files are
located).
{fs,hs,ss}_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
os_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
lang_dict (dict)
Keys: language id (ex: 0x0402 for "us-en").
Values: List of unicode objects. First item becomes string
descriptor 1, and so on. Must contain at least as many
string descriptors as the highest string index declared
in all descriptors.
all_ctrl_recip (bool)
When true, this function will receive all control transactions.
Useful when implementing non-standard control transactions.
config0_setup (bool)
When true, this function will receive control transactions before
any configuration gets enabled.
"""
self._path = path
ep0 = Endpoint0File(os.path.join(path, 'ep0'), 'r+')
self._ep_list = ep_list = [ep0]
self._ep_address_dict = ep_address_dict = {}
flags = 0
if all_ctrl_recip:
flags |= ALL_CTRL_RECIP
if config0_setup:
flags |= CONFIG0_SETUP
# Note: serialise does not prevent its argument from being freed and
# reallocated. Keep strong references to to-serialise values until
# after they get written.
desc = getDescsV2(
flags,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
os_list=os_list,
)
ep0.write(serialise(desc))
# TODO: try v1 on failure ?
del desc
# Note: see above.
strings = getStrings(lang_dict)
ep0.write(serialise(strings))
del strings
for descriptor in ss_list or hs_list or fs_list:
if descriptor.bDescriptorType == ch9.USB_DT_ENDPOINT:
assert descriptor.bEndpointAddress not in ep_address_dict, (
descriptor,
ep_address_dict[descriptor.bEndpointAddress],
)
index = len(ep_list)
ep_address_dict[descriptor.bEndpointAddress] = index
ep_list.append(
(
EndpointINFile
if descriptor.bEndpointAddress & ch9.USB_DIR_IN
else EndpointOUTFile
)(
os.path.join(path, 'ep%u' % (index, )),
'r+',
)
)
@property
def ep0(self):
"""
Endpoint 0, use when handling setup transactions.
"""
return self._ep_list[0]
def close(self):
"""
Close all endpoint file descriptors.
"""
ep_list = self._ep_list
while ep_list:
ep_list.pop().close()
self._closed = True
def __del__(self):
self.close()
__event_dict = {
BIND: 'onBind',
UNBIND: 'onUnbind',
ENABLE: 'onEnable',
DISABLE: 'onDisable',
# SETUP: handled specially
SUSPEND: 'onSuspend',
RESUME: 'onResume',
}
def __process(self, iterator):
readinto = self.ep0.readinto
# FunctionFS can queue up to 4 events, so let's read that much.
event_len = ctypes.sizeof(Event)
array_type = Event * 4
buf = bytearray(ctypes.sizeof(array_type))
event_list = array_type.from_buffer(buf)
event_dict = self.__event_dict
for _ in iterator:
if self._closed:
break
try:
length = readinto(buf)
except IOError as exc:
if exc.errno == errno.EINTR:
continue
raise
if not length:
# Note: also catches None, returned when ep0 is non-blocking
break # TODO: test if this happens when ep0 gets closed
# (by FunctionFS or in another thread or in a handler)
count, remainder = divmod(length, event_len)
assert remainder == 0, (length, event_len)
for index in range(count):
event = event_list[index]
event_type = event.type
if event_type == SETUP:
setup = event.u.setup
try:
self.onSetup(
setup.bRequestType,
setup.bRequest,
setup.wValue,
setup.wIndex,
setup.wLength,
)
except:
# On *ANY* exception, halt endpoint
self.ep0.halt(setup.bRequestType)
raise
else:
getattr(self, event_dict[event.type])()
def processEventsForever(self):
"""
Process kernel ep0 events until closed.
ep0 must be in blocking mode, otherwise behaves like `processEvents`.
"""
self.__process(_INFINITY)
def processEvents(self):
"""
Process at least one kernel event if ep0 is in blocking mode.
Process any already available event if ep0 is in non-blocking mode.
"""
self.__process(_ONCE)
def getEndpoint(self, index):
"""
Return a file object corresponding to given endpoint index,
in descriptor list order.
"""
return self._ep_list[index]
def getEndpointByAddress(self, address):
"""
Return a file object corresponding to given endpoint address.
"""
return self.getEndpoint(self._ep_address_dict[address])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def onBind(self):
"""
Triggered when FunctionFS signals gadget binding.
May be overridden in subclass.
"""
pass
def onUnbind(self):
"""
Triggered when FunctionFS signals gadget unbinding.
May be overridden in subclass.
"""
pass
def onEnable(self):
"""
Called when FunctionFS signals the function was (re)enabled.
This may happen several times without onDisable being called.
It must reset the function to its default state.
May be overridden in subclass.
"""
self.disableRemoteWakeup()
def onDisable(self):
"""
Called when FunctionFS signals the function was (re)disabled.
This may happen several times without onEnable being called.
May be overridden in subclass.
"""
pass
def disableRemoteWakeup(self):
"""
Called when host issues a clearFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to False so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = False
def enableRemoteWakeup(self):
"""
Called when host issues a setFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to True so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = True
def onSetup(self, request_type, request, value, index, length):
"""
Called when a setup USB transaction was received.
Default implementation:
- handles USB_REQ_GET_STATUS on interface and endpoints
- handles USB_REQ_CLEAR_FEATURE(USB_ENDPOINT_HALT) on endpoints
- handles USB_REQ_SET_FEATURE(USB_ENDPOINT_HALT) on endpoints
- halts on everything else
If this method raises anything, endpoint 0 is halted by its caller and
exception is let through.
May be overridden in subclass.
"""
if (request_type & ch9.USB_TYPE_MASK) == ch9.USB_TYPE_STANDARD:
recipient = request_type & ch9.USB_RECIP_MASK
is_in = (request_type & ch9.USB_DIR_IN) == ch9.USB_DIR_IN
if request == ch9.USB_REQ_GET_STATUS:
if is_in and length == 2:
if recipient == ch9.USB_RECIP_INTERFACE:
if value == 0:
status = 0
if index == 0:
if self.function_remote_wakeup_capable:
status |= 1 << 0
if self.function_remote_wakeup:
status |= 1 << 1
self.ep0.write(struct.pack('<H', status)[:length])
return
elif recipient == ch9.USB_RECIP_ENDPOINT:
if value == 0:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
status = 0
if endpoint.isHalted():
status |= 1 << 0
self.ep0.write(
struct.pack('<H', status)[:length],
)
return
elif request == ch9.USB_REQ_CLEAR_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.clearHalt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.disableRemoteWakeup()
self.ep0.read(0)
return
elif request == ch9.USB_REQ_SET_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.halt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.enableRemoteWakeup()
self.ep0.read(0)
return
self.ep0.halt(request_type)
def onSuspend(self):
"""
Called when FunctionFS signals the host stops USB traffic.
May be overridden in subclass.
"""
pass
def onResume(self):
"""
Called when FunctionFS signals the host restarts USB traffic.
May be overridden in subclass.
"""
pass
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
getOSDesc
|
python
|
def getOSDesc(interface, ext_list):
try:
ext_type, = {type(x) for x in ext_list}
except ValueError:
raise TypeError('Extensions of a single type are required.')
if issubclass(ext_type, OSExtCompatDesc):
wIndex = 4
kw = {
'b': OSDescHeaderBCount(
bCount=len(ext_list),
Reserved=0,
),
}
elif issubclass(ext_type, OSExtPropDescHead):
wIndex = 5
kw = {
'wCount': len(ext_list),
}
else:
raise TypeError('Extensions of unexpected type')
ext_list_type = ext_type * len(ext_list)
klass = type(
'OSDesc',
(OSDescHeader, ),
{
'_fields_': [
('ext_list', ext_list_type),
],
},
)
return klass(
interface=interface,
dwLength=ctypes.sizeof(klass),
bcdVersion=1,
wIndex=wIndex,
ext_list=ext_list_type(*ext_list),
**kw
)
|
Return an OS description header.
interface (int)
Related interface number.
ext_list (list of OSExtCompatDesc or OSExtPropDesc)
List of instances of extended descriptors.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L285-L329
| null |
# This file is part of python-functionfs
# Copyright (C) 2016-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# python-functionfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-functionfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-functionfs. If not, see <http://www.gnu.org/licenses/>.
"""
Interfaces with functionfs to simplify USB gadget function declaration and
implementation on linux.
Defines standard USB descriptors (see "ch9" submodule) and sends them to the
kernel to declare function's structure.
Provides methods for accessing each endpoint and to react to events.
"""
import ctypes
import errno
import fcntl
import io
import itertools
import math
import os
import struct
import warnings
from .common import (
USBDescriptorHeader,
le32,
)
from . import ch9
from .ch9 import (
USBInterfaceDescriptor,
USBEndpointDescriptorNoAudio,
USBEndpointDescriptor,
USBSSEPCompDescriptor,
# USBSSPIsocEndpointDescriptor is not implemented in kernel as of this
# writing.
USBSSPIsocEndpointDescriptor,
# USBQualifierDescriptor is reserved for gadgets, so don't expose it.
USBOTGDescriptor,
USBOTG20Descriptor,
# USBDebugDescriptor is not implemented in kernelas of this writing.
USBDebugDescriptor,
USBInterfaceAssocDescriptor,
)
from .functionfs import (
DESCRIPTORS_MAGIC, STRINGS_MAGIC, DESCRIPTORS_MAGIC_V2,
FLAGS,
DescsHeadV2,
DescsHead,
OSDescHeader,
OSDescHeaderBCount,
OSExtCompatDesc,
OSExtPropDescHead,
StringsHead,
StringBase,
Event,
FIFO_STATUS, FIFO_FLUSH, CLEAR_HALT, INTERFACE_REVMAP, ENDPOINT_REVMAP, ENDPOINT_DESC,
)
# pylint: disable=no-name-in-module
from .functionfs import (
HAS_FS_DESC,
HAS_HS_DESC,
HAS_SS_DESC,
HAS_MS_OS_DESC,
ALL_CTRL_RECIP,
CONFIG0_SETUP,
BIND, UNBIND, ENABLE, DISABLE, SETUP, SUSPEND, RESUME,
)
# pylint: enable=no-name-in-module
__all__ = (
'ch9',
'Function',
# XXX: Not very pythonic...
'getInterfaceInAllSpeeds',
'getDescriptor',
'getOSDesc',
'getOSExtPropDesc',
'USBInterfaceDescriptor',
'USBEndpointDescriptorNoAudio',
'USBEndpointDescriptor',
'USBSSEPCompDescriptor',
'USBSSPIsocEndpointDescriptor',
'USBOTGDescriptor',
'USBOTG20Descriptor',
'USBDebugDescriptor',
'USBInterfaceAssocDescriptor',
'OSExtCompatDesc',
)
_MAX_PACKET_SIZE_DICT = {
ch9.USB_ENDPOINT_XFER_ISOC: (
1023, # 0..1023
1024, # 0..1024
1024, # 0..1024
),
ch9.USB_ENDPOINT_XFER_BULK: (
64, # 8, 16, 32, 64
512, # 512 only
1024, # 1024 only
),
ch9.USB_ENDPOINT_XFER_INT: (
64, # 0..64
1024, # 0..1024
1024, # 1..1024
),
}
_MARKER = object()
_EMPTY_DICT = {} # For internal ** falback usage
def getInterfaceInAllSpeeds(interface, endpoint_list, class_descriptor_list=()):
"""
Produce similar fs, hs and ss interface and endpoints descriptors.
Should be useful for devices desiring to work in all 3 speeds with maximum
endpoint wMaxPacketSize. Reduces data duplication from descriptor
declarations.
Not intended to cover fancy combinations.
interface (dict):
Keyword arguments for
getDescriptor(USBInterfaceDescriptor, ...)
in all speeds.
bNumEndpoints must not be provided.
endpoint_list (list of dicts)
Each dict represents an endpoint, and may contain the following items:
- "endpoint": required, contains keyword arguments for
getDescriptor(USBEndpointDescriptorNoAudio, ...)
or
getDescriptor(USBEndpointDescriptor, ...)
The with-audio variant is picked when its extra fields are assigned a
value.
wMaxPacketSize may be missing, in which case it will be set to the
maximum size for given speed and endpoint type.
bmAttributes must be provided.
If bEndpointAddress is zero (excluding direction bit) on the first
endpoint, endpoints will be assigned their rank in this list,
starting at 1. Their direction bit is preserved.
If bInterval is present on a INT or ISO endpoint, it must be in
millisecond units (but may not be an integer), and will be converted
to the nearest integer millisecond for full-speed descriptor, and
nearest possible interval for high- and super-speed descriptors.
If bInterval is present on a BULK endpoint, it is set to zero on
full-speed descriptor and used as provided on high- and super-speed
descriptors.
- "superspeed": optional, contains keyword arguments for
getDescriptor(USBSSEPCompDescriptor, ...)
- "superspeed_iso": optional, contains keyword arguments for
getDescriptor(USBSSPIsocEndpointDescriptor, ...)
Must be provided and non-empty only when endpoint is isochronous and
"superspeed" dict has "bmAttributes" bit 7 set.
class_descriptor (list of descriptors of any type)
Descriptors to insert in all speeds between the interface descriptor and
endpoint descriptors.
Returns a 3-tuple of lists:
- fs descriptors
- hs descriptors
- ss descriptors
"""
interface = getDescriptor(
USBInterfaceDescriptor,
bNumEndpoints=len(endpoint_list),
**interface
)
class_descriptor_list = list(class_descriptor_list)
fs_list = [interface] + class_descriptor_list
hs_list = [interface] + class_descriptor_list
ss_list = [interface] + class_descriptor_list
need_address = (
endpoint_list[0]['endpoint'].get(
'bEndpointAddress',
0,
) & ~ch9.USB_DIR_IN == 0
)
for index, endpoint in enumerate(endpoint_list, 1):
endpoint_kw = endpoint['endpoint'].copy()
transfer_type = endpoint_kw[
'bmAttributes'
] & ch9.USB_ENDPOINT_XFERTYPE_MASK
fs_max, hs_max, ss_max = _MAX_PACKET_SIZE_DICT[transfer_type]
if need_address:
endpoint_kw['bEndpointAddress'] = index | (
endpoint_kw.get('bEndpointAddress', 0) & ch9.USB_DIR_IN
)
klass = (
USBEndpointDescriptor
if 'bRefresh' in endpoint_kw or 'bSynchAddress' in endpoint_kw else
USBEndpointDescriptorNoAudio
)
interval = endpoint_kw.pop('bInterval', _MARKER)
if interval is _MARKER:
fs_interval = hs_interval = 0
else:
if transfer_type == ch9.USB_ENDPOINT_XFER_BULK:
fs_interval = 0
hs_interval = interval
else: # USB_ENDPOINT_XFER_ISOC or USB_ENDPOINT_XFER_INT
fs_interval = max(1, min(255, round(interval)))
# 8 is the number of microframes in a millisecond
hs_interval = max(
1,
min(16, int(round(1 + math.log(interval * 8, 2)))),
)
packet_size = endpoint_kw.pop('wMaxPacketSize', _MARKER)
if packet_size is _MARKER:
fs_packet_size = fs_max
hs_packet_size = hs_max
ss_packet_size = ss_max
else:
fs_packet_size = min(fs_max, packet_size)
hs_packet_size = min(hs_max, packet_size)
ss_packet_size = min(ss_max, packet_size)
fs_list.append(getDescriptor(
klass,
wMaxPacketSize=fs_max,
bInterval=fs_interval,
**endpoint_kw
))
hs_list.append(getDescriptor(
klass,
wMaxPacketSize=hs_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_list.append(getDescriptor(
klass,
wMaxPacketSize=ss_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_companion_kw = endpoint.get('superspeed', _EMPTY_DICT)
ss_list.append(getDescriptor(
USBSSEPCompDescriptor,
**ss_companion_kw
))
ssp_iso_kw = endpoint.get('superspeed_iso', _EMPTY_DICT)
if bool(ssp_iso_kw) != (
endpoint_kw.get('bmAttributes', 0) &
ch9.USB_ENDPOINT_XFERTYPE_MASK ==
ch9.USB_ENDPOINT_XFER_ISOC and
bool(ch9.USB_SS_SSP_ISOC_COMP(
ss_companion_kw.get('bmAttributes', 0),
))
):
raise ValueError('Inconsistent isochronous companion')
if ssp_iso_kw:
ss_list.append(getDescriptor(
USBSSPIsocEndpointDescriptor,
**ssp_iso_kw
))
return (fs_list, hs_list, ss_list)
def getDescriptor(klass, **kw):
"""
Automatically fills bLength and bDescriptorType.
"""
# XXX: ctypes Structure.__init__ ignores arguments which do not exist
# as structure fields. So check it.
# This is annoying, but not doing it is a huge waste of time for the
# developer.
empty = klass()
assert hasattr(empty, 'bLength')
assert hasattr(empty, 'bDescriptorType')
unknown = [x for x in kw if not hasattr(empty, x)]
if unknown:
raise TypeError('Unknown fields %r' % (unknown, ))
# XXX: not very pythonic...
return klass(
bLength=ctypes.sizeof(klass),
# pylint: disable=protected-access
bDescriptorType=klass._bDescriptorType,
# pylint: enable=protected-access
**kw
)
def getOSExtPropDesc(data_type, name, value):
"""
Returns an OS extension property descriptor.
data_type (int)
See wPropertyDataType documentation.
name (string)
See PropertyName documentation.
value (string)
See PropertyData documentation.
NULL chars must be explicitely included in the value when needed,
this function does not add any terminating NULL for example.
"""
klass = type(
'OSExtPropDesc',
(OSExtPropDescHead, ),
{
'_fields_': [
('bPropertyName', ctypes.c_char * len(name)),
('dwPropertyDataLength', le32),
('bProperty', ctypes.c_char * len(value)),
],
}
)
return klass(
dwSize=ctypes.sizeof(klass),
dwPropertyDataType=data_type,
wPropertyNameLength=len(name),
bPropertyName=name,
dwPropertyDataLength=len(value),
bProperty=value,
)
#def getDescs(*args, **kw):
# """
# Return a legacy format FunctionFS suitable for serialisation.
# Deprecated as of 3.14 .
#
# NOT IMPLEMENTED
# """
# warnings.warn(
# DeprecationWarning,
# 'Legacy format, deprecated as of 3.14.',
# )
# raise NotImplementedError('TODO')
# klass = type(
# 'Descs',
# (DescsHead, ),
# {
# 'fs_descrs': None, # TODO
# 'hs_descrs': None, # TODO
# },
# )
# return klass(
# magic=DESCRIPTORS_MAGIC,
# length=ctypes.sizeof(klass),
# **kw
# )
def getDescsV2(flags, fs_list=(), hs_list=(), ss_list=(), os_list=()):
"""
Return a FunctionFS descriptor suitable for serialisation.
flags (int)
Any combination of VIRTUAL_ADDR, EVENTFD, ALL_CTRL_RECIP,
CONFIG0_SETUP.
{fs,hs,ss,os}_list (list of descriptors)
Instances of the following classes:
{fs,hs,ss}_list:
USBInterfaceDescriptor
USBEndpointDescriptorNoAudio
USBEndpointDescriptor
USBSSEPCompDescriptor
USBSSPIsocEndpointDescriptor
USBOTGDescriptor
USBOTG20Descriptor
USBInterfaceAssocDescriptor
TODO: HID
All (non-empty) lists must define the same number of interfaces
and endpoints, and endpoint descriptors must be given in the same
order, bEndpointAddress-wise.
os_list:
OSDesc
"""
count_field_list = []
descr_field_list = []
kw = {}
for descriptor_list, flag, prefix, allowed_descriptor_klass in (
(fs_list, HAS_FS_DESC, 'fs', USBDescriptorHeader),
(hs_list, HAS_HS_DESC, 'hs', USBDescriptorHeader),
(ss_list, HAS_SS_DESC, 'ss', USBDescriptorHeader),
(os_list, HAS_MS_OS_DESC, 'os', OSDescHeader),
):
if descriptor_list:
for index, descriptor in enumerate(descriptor_list):
if not isinstance(descriptor, allowed_descriptor_klass):
raise TypeError(
'Descriptor %r of unexpected type: %r' % (
index,
type(descriptor),
),
)
descriptor_map = [
('desc_%i' % x, y)
for x, y in enumerate(descriptor_list)
]
flags |= flag
count_name = prefix + 'count'
descr_name = prefix + 'descr'
count_field_list.append((count_name, le32))
descr_type = type(
't_' + descr_name,
(ctypes.LittleEndianStructure, ),
{
'_pack_': 1,
'_fields_': [
(x, type(y))
for x, y in descriptor_map
],
}
)
descr_field_list.append((descr_name, descr_type))
kw[count_name] = len(descriptor_map)
kw[descr_name] = descr_type(**dict(descriptor_map))
elif flags & flag:
raise ValueError(
'Flag %r set but descriptor list empty, cannot generate type.' % (
FLAGS.get(flag),
)
)
klass = type(
'DescsV2_0x%02x' % (
flags & (
HAS_FS_DESC |
HAS_HS_DESC |
HAS_SS_DESC |
HAS_MS_OS_DESC
),
# XXX: include contained descriptors type information ? (and name ?)
),
(DescsHeadV2, ),
{
'_fields_': count_field_list + descr_field_list,
},
)
return klass(
magic=DESCRIPTORS_MAGIC_V2,
length=ctypes.sizeof(klass),
flags=flags,
**kw
)
def getStrings(lang_dict):
"""
Return a FunctionFS descriptor suitable for serialisation.
lang_dict (dict)
Key: language ID (ex: 0x0409 for en-us)
Value: list of unicode objects
All values must have the same number of items.
"""
field_list = []
kw = {}
try:
str_count = len(next(iter(lang_dict.values())))
except StopIteration:
str_count = 0
else:
for lang, string_list in lang_dict.items():
if len(string_list) != str_count:
raise ValueError('All values must have the same string count.')
field_id = 'strings_%04x' % lang
strings = b'\x00'.join(x.encode('utf-8') for x in string_list) + b'\x00'
field_type = type(
'String',
(StringBase, ),
{
'_fields_': [
('strings', ctypes.c_char * len(strings)),
],
},
)
field_list.append((field_id, field_type))
kw[field_id] = field_type(
lang=lang,
strings=strings,
)
klass = type(
'Strings',
(StringsHead, ),
{
'_fields_': field_list,
},
)
return klass(
magic=STRINGS_MAGIC,
length=ctypes.sizeof(klass),
str_count=str_count,
lang_count=len(lang_dict),
**kw
)
def serialise(structure):
"""
structure (ctypes.Structure)
The structure to serialise.
Returns a ctypes.c_char array.
Does not copy memory.
"""
return ctypes.cast(
ctypes.pointer(structure),
ctypes.POINTER(ctypes.c_char * ctypes.sizeof(structure)),
).contents
class EndpointFileBase(io.FileIO):
"""
File object representing a endpoint. Abstract.
"""
def _ioctl(self, func, *args, **kw):
result = fcntl.ioctl(self, func, *args, **kw)
if result < 0:
raise IOError(result)
return result
class Endpoint0File(EndpointFileBase):
"""
File object exposing ioctls available on endpoint zero.
"""
def halt(self, request_type):
"""
Halt current endpoint.
"""
try:
if request_type & ch9.USB_DIR_IN:
self.read(0)
else:
self.write(b'')
except IOError as exc:
if exc.errno != errno.EL2HLT:
raise
else:
raise ValueError('halt did not return EL2HLT ?')
def getRealInterfaceNumber(self, interface):
"""
Returns the host-visible interface number, or None if there is no such
interface.
"""
try:
return self._ioctl(INTERFACE_REVMAP, interface)
except IOError as exc:
if exc.errno == errno.EDOM:
return None
raise
# TODO: Add any standard IOCTL in usb_gadget_ops.ioctl ?
class EndpointFile(EndpointFileBase):
"""
File object exposing ioctls available on non-zero endpoints.
"""
_halted = False
def getRealEndpointNumber(self):
"""
Returns the host-visible endpoint number.
"""
return self._ioctl(ENDPOINT_REVMAP)
def clearHalt(self):
"""
Clears endpoint halt, and resets toggle.
See drivers/usb/gadget/udc/core.c:usb_ep_clear_halt
"""
self._ioctl(CLEAR_HALT)
self._halted = False
def getFIFOStatus(self):
"""
Returns the number of bytes in fifo.
"""
return self._ioctl(FIFO_STATUS)
def flushFIFO(self):
"""
Discards Endpoint FIFO content.
"""
self._ioctl(FIFO_FLUSH)
def getDescriptor(self):
"""
Returns the currently active endpoint descriptor
(depending on current USB speed).
"""
result = USBEndpointDescriptor()
self._ioctl(ENDPOINT_DESC, result, True)
return result
def _halt(self):
raise NotImplementedError
def halt(self):
"""
Halt current endpoint.
"""
try:
self._halt()
except IOError as exc:
if exc.errno != errno.EBADMSG:
raise
else:
raise ValueError('halt did not return EBADMSG ?')
self._halted = True
def isHalted(self):
"""
Whether endpoint is currently halted.
"""
return self._halted
class EndpointINFile(EndpointFile):
"""
Write-only endpoint file.
"""
@staticmethod
def read(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for reading')
readinto = read
readall = read
readlines = read
readline = read
@staticmethod
def readable():
"""
Never readable.
"""
return False
def _halt(self):
super(EndpointINFile, self).read(0)
class EndpointOUTFile(EndpointFile):
"""
Read-only endpoint file.
"""
@staticmethod
def write(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for writing')
writelines = write
@staticmethod
def writable():
"""
Never writable.
"""
return False
def _halt(self):
super(EndpointOUTFile, self).write(b'')
_INFINITY = itertools.repeat(None)
_ONCE = (None, )
class Function(object):
"""
Pythonic class for interfacing with FunctionFS.
Properties available:
function_remote_wakeup_capable (bool)
Whether the function wishes to be allowed to wake host.
function_remote_wakeup (bool)
Whether host has allowed the function to wake it up.
Set and cleared by onSetup by calling enableRemoteWakeup and
disableRemoteWakeup, respectively.
"""
_closed = False
_ep_list = () # Avoids failing in __del__ when (subclass') __init__ fails.
function_remote_wakeup_capable = False
function_remote_wakeup = False
def __init__(
self,
path,
fs_list=(), hs_list=(), ss_list=(),
os_list=(),
lang_dict={},
all_ctrl_recip=False, config0_setup=False,
):
"""
path (string)
Path to the functionfs mountpoint (where the ep* files are
located).
{fs,hs,ss}_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
os_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
lang_dict (dict)
Keys: language id (ex: 0x0402 for "us-en").
Values: List of unicode objects. First item becomes string
descriptor 1, and so on. Must contain at least as many
string descriptors as the highest string index declared
in all descriptors.
all_ctrl_recip (bool)
When true, this function will receive all control transactions.
Useful when implementing non-standard control transactions.
config0_setup (bool)
When true, this function will receive control transactions before
any configuration gets enabled.
"""
self._path = path
ep0 = Endpoint0File(os.path.join(path, 'ep0'), 'r+')
self._ep_list = ep_list = [ep0]
self._ep_address_dict = ep_address_dict = {}
flags = 0
if all_ctrl_recip:
flags |= ALL_CTRL_RECIP
if config0_setup:
flags |= CONFIG0_SETUP
# Note: serialise does not prevent its argument from being freed and
# reallocated. Keep strong references to to-serialise values until
# after they get written.
desc = getDescsV2(
flags,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
os_list=os_list,
)
ep0.write(serialise(desc))
# TODO: try v1 on failure ?
del desc
# Note: see above.
strings = getStrings(lang_dict)
ep0.write(serialise(strings))
del strings
for descriptor in ss_list or hs_list or fs_list:
if descriptor.bDescriptorType == ch9.USB_DT_ENDPOINT:
assert descriptor.bEndpointAddress not in ep_address_dict, (
descriptor,
ep_address_dict[descriptor.bEndpointAddress],
)
index = len(ep_list)
ep_address_dict[descriptor.bEndpointAddress] = index
ep_list.append(
(
EndpointINFile
if descriptor.bEndpointAddress & ch9.USB_DIR_IN
else EndpointOUTFile
)(
os.path.join(path, 'ep%u' % (index, )),
'r+',
)
)
@property
def ep0(self):
"""
Endpoint 0, use when handling setup transactions.
"""
return self._ep_list[0]
def close(self):
"""
Close all endpoint file descriptors.
"""
ep_list = self._ep_list
while ep_list:
ep_list.pop().close()
self._closed = True
def __del__(self):
self.close()
__event_dict = {
BIND: 'onBind',
UNBIND: 'onUnbind',
ENABLE: 'onEnable',
DISABLE: 'onDisable',
# SETUP: handled specially
SUSPEND: 'onSuspend',
RESUME: 'onResume',
}
def __process(self, iterator):
readinto = self.ep0.readinto
# FunctionFS can queue up to 4 events, so let's read that much.
event_len = ctypes.sizeof(Event)
array_type = Event * 4
buf = bytearray(ctypes.sizeof(array_type))
event_list = array_type.from_buffer(buf)
event_dict = self.__event_dict
for _ in iterator:
if self._closed:
break
try:
length = readinto(buf)
except IOError as exc:
if exc.errno == errno.EINTR:
continue
raise
if not length:
# Note: also catches None, returned when ep0 is non-blocking
break # TODO: test if this happens when ep0 gets closed
# (by FunctionFS or in another thread or in a handler)
count, remainder = divmod(length, event_len)
assert remainder == 0, (length, event_len)
for index in range(count):
event = event_list[index]
event_type = event.type
if event_type == SETUP:
setup = event.u.setup
try:
self.onSetup(
setup.bRequestType,
setup.bRequest,
setup.wValue,
setup.wIndex,
setup.wLength,
)
except:
# On *ANY* exception, halt endpoint
self.ep0.halt(setup.bRequestType)
raise
else:
getattr(self, event_dict[event.type])()
def processEventsForever(self):
"""
Process kernel ep0 events until closed.
ep0 must be in blocking mode, otherwise behaves like `processEvents`.
"""
self.__process(_INFINITY)
def processEvents(self):
"""
Process at least one kernel event if ep0 is in blocking mode.
Process any already available event if ep0 is in non-blocking mode.
"""
self.__process(_ONCE)
def getEndpoint(self, index):
"""
Return a file object corresponding to given endpoint index,
in descriptor list order.
"""
return self._ep_list[index]
def getEndpointByAddress(self, address):
"""
Return a file object corresponding to given endpoint address.
"""
return self.getEndpoint(self._ep_address_dict[address])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def onBind(self):
"""
Triggered when FunctionFS signals gadget binding.
May be overridden in subclass.
"""
pass
def onUnbind(self):
"""
Triggered when FunctionFS signals gadget unbinding.
May be overridden in subclass.
"""
pass
def onEnable(self):
"""
Called when FunctionFS signals the function was (re)enabled.
This may happen several times without onDisable being called.
It must reset the function to its default state.
May be overridden in subclass.
"""
self.disableRemoteWakeup()
def onDisable(self):
"""
Called when FunctionFS signals the function was (re)disabled.
This may happen several times without onEnable being called.
May be overridden in subclass.
"""
pass
def disableRemoteWakeup(self):
"""
Called when host issues a clearFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to False so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = False
def enableRemoteWakeup(self):
"""
Called when host issues a setFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to True so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = True
def onSetup(self, request_type, request, value, index, length):
"""
Called when a setup USB transaction was received.
Default implementation:
- handles USB_REQ_GET_STATUS on interface and endpoints
- handles USB_REQ_CLEAR_FEATURE(USB_ENDPOINT_HALT) on endpoints
- handles USB_REQ_SET_FEATURE(USB_ENDPOINT_HALT) on endpoints
- halts on everything else
If this method raises anything, endpoint 0 is halted by its caller and
exception is let through.
May be overridden in subclass.
"""
if (request_type & ch9.USB_TYPE_MASK) == ch9.USB_TYPE_STANDARD:
recipient = request_type & ch9.USB_RECIP_MASK
is_in = (request_type & ch9.USB_DIR_IN) == ch9.USB_DIR_IN
if request == ch9.USB_REQ_GET_STATUS:
if is_in and length == 2:
if recipient == ch9.USB_RECIP_INTERFACE:
if value == 0:
status = 0
if index == 0:
if self.function_remote_wakeup_capable:
status |= 1 << 0
if self.function_remote_wakeup:
status |= 1 << 1
self.ep0.write(struct.pack('<H', status)[:length])
return
elif recipient == ch9.USB_RECIP_ENDPOINT:
if value == 0:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
status = 0
if endpoint.isHalted():
status |= 1 << 0
self.ep0.write(
struct.pack('<H', status)[:length],
)
return
elif request == ch9.USB_REQ_CLEAR_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.clearHalt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.disableRemoteWakeup()
self.ep0.read(0)
return
elif request == ch9.USB_REQ_SET_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.halt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.enableRemoteWakeup()
self.ep0.read(0)
return
self.ep0.halt(request_type)
def onSuspend(self):
"""
Called when FunctionFS signals the host stops USB traffic.
May be overridden in subclass.
"""
pass
def onResume(self):
"""
Called when FunctionFS signals the host restarts USB traffic.
May be overridden in subclass.
"""
pass
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
getOSExtPropDesc
|
python
|
def getOSExtPropDesc(data_type, name, value):
klass = type(
'OSExtPropDesc',
(OSExtPropDescHead, ),
{
'_fields_': [
('bPropertyName', ctypes.c_char * len(name)),
('dwPropertyDataLength', le32),
('bProperty', ctypes.c_char * len(value)),
],
}
)
return klass(
dwSize=ctypes.sizeof(klass),
dwPropertyDataType=data_type,
wPropertyNameLength=len(name),
bPropertyName=name,
dwPropertyDataLength=len(value),
bProperty=value,
)
|
Returns an OS extension property descriptor.
data_type (int)
See wPropertyDataType documentation.
name (string)
See PropertyName documentation.
value (string)
See PropertyData documentation.
NULL chars must be explicitely included in the value when needed,
this function does not add any terminating NULL for example.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L331-L361
| null |
# This file is part of python-functionfs
# Copyright (C) 2016-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# python-functionfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-functionfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-functionfs. If not, see <http://www.gnu.org/licenses/>.
"""
Interfaces with functionfs to simplify USB gadget function declaration and
implementation on linux.
Defines standard USB descriptors (see "ch9" submodule) and sends them to the
kernel to declare function's structure.
Provides methods for accessing each endpoint and to react to events.
"""
import ctypes
import errno
import fcntl
import io
import itertools
import math
import os
import struct
import warnings
from .common import (
USBDescriptorHeader,
le32,
)
from . import ch9
from .ch9 import (
USBInterfaceDescriptor,
USBEndpointDescriptorNoAudio,
USBEndpointDescriptor,
USBSSEPCompDescriptor,
# USBSSPIsocEndpointDescriptor is not implemented in kernel as of this
# writing.
USBSSPIsocEndpointDescriptor,
# USBQualifierDescriptor is reserved for gadgets, so don't expose it.
USBOTGDescriptor,
USBOTG20Descriptor,
# USBDebugDescriptor is not implemented in kernelas of this writing.
USBDebugDescriptor,
USBInterfaceAssocDescriptor,
)
from .functionfs import (
DESCRIPTORS_MAGIC, STRINGS_MAGIC, DESCRIPTORS_MAGIC_V2,
FLAGS,
DescsHeadV2,
DescsHead,
OSDescHeader,
OSDescHeaderBCount,
OSExtCompatDesc,
OSExtPropDescHead,
StringsHead,
StringBase,
Event,
FIFO_STATUS, FIFO_FLUSH, CLEAR_HALT, INTERFACE_REVMAP, ENDPOINT_REVMAP, ENDPOINT_DESC,
)
# pylint: disable=no-name-in-module
from .functionfs import (
HAS_FS_DESC,
HAS_HS_DESC,
HAS_SS_DESC,
HAS_MS_OS_DESC,
ALL_CTRL_RECIP,
CONFIG0_SETUP,
BIND, UNBIND, ENABLE, DISABLE, SETUP, SUSPEND, RESUME,
)
# pylint: enable=no-name-in-module
__all__ = (
'ch9',
'Function',
# XXX: Not very pythonic...
'getInterfaceInAllSpeeds',
'getDescriptor',
'getOSDesc',
'getOSExtPropDesc',
'USBInterfaceDescriptor',
'USBEndpointDescriptorNoAudio',
'USBEndpointDescriptor',
'USBSSEPCompDescriptor',
'USBSSPIsocEndpointDescriptor',
'USBOTGDescriptor',
'USBOTG20Descriptor',
'USBDebugDescriptor',
'USBInterfaceAssocDescriptor',
'OSExtCompatDesc',
)
_MAX_PACKET_SIZE_DICT = {
ch9.USB_ENDPOINT_XFER_ISOC: (
1023, # 0..1023
1024, # 0..1024
1024, # 0..1024
),
ch9.USB_ENDPOINT_XFER_BULK: (
64, # 8, 16, 32, 64
512, # 512 only
1024, # 1024 only
),
ch9.USB_ENDPOINT_XFER_INT: (
64, # 0..64
1024, # 0..1024
1024, # 1..1024
),
}
_MARKER = object()
_EMPTY_DICT = {} # For internal ** falback usage
def getInterfaceInAllSpeeds(interface, endpoint_list, class_descriptor_list=()):
"""
Produce similar fs, hs and ss interface and endpoints descriptors.
Should be useful for devices desiring to work in all 3 speeds with maximum
endpoint wMaxPacketSize. Reduces data duplication from descriptor
declarations.
Not intended to cover fancy combinations.
interface (dict):
Keyword arguments for
getDescriptor(USBInterfaceDescriptor, ...)
in all speeds.
bNumEndpoints must not be provided.
endpoint_list (list of dicts)
Each dict represents an endpoint, and may contain the following items:
- "endpoint": required, contains keyword arguments for
getDescriptor(USBEndpointDescriptorNoAudio, ...)
or
getDescriptor(USBEndpointDescriptor, ...)
The with-audio variant is picked when its extra fields are assigned a
value.
wMaxPacketSize may be missing, in which case it will be set to the
maximum size for given speed and endpoint type.
bmAttributes must be provided.
If bEndpointAddress is zero (excluding direction bit) on the first
endpoint, endpoints will be assigned their rank in this list,
starting at 1. Their direction bit is preserved.
If bInterval is present on a INT or ISO endpoint, it must be in
millisecond units (but may not be an integer), and will be converted
to the nearest integer millisecond for full-speed descriptor, and
nearest possible interval for high- and super-speed descriptors.
If bInterval is present on a BULK endpoint, it is set to zero on
full-speed descriptor and used as provided on high- and super-speed
descriptors.
- "superspeed": optional, contains keyword arguments for
getDescriptor(USBSSEPCompDescriptor, ...)
- "superspeed_iso": optional, contains keyword arguments for
getDescriptor(USBSSPIsocEndpointDescriptor, ...)
Must be provided and non-empty only when endpoint is isochronous and
"superspeed" dict has "bmAttributes" bit 7 set.
class_descriptor (list of descriptors of any type)
Descriptors to insert in all speeds between the interface descriptor and
endpoint descriptors.
Returns a 3-tuple of lists:
- fs descriptors
- hs descriptors
- ss descriptors
"""
interface = getDescriptor(
USBInterfaceDescriptor,
bNumEndpoints=len(endpoint_list),
**interface
)
class_descriptor_list = list(class_descriptor_list)
fs_list = [interface] + class_descriptor_list
hs_list = [interface] + class_descriptor_list
ss_list = [interface] + class_descriptor_list
need_address = (
endpoint_list[0]['endpoint'].get(
'bEndpointAddress',
0,
) & ~ch9.USB_DIR_IN == 0
)
for index, endpoint in enumerate(endpoint_list, 1):
endpoint_kw = endpoint['endpoint'].copy()
transfer_type = endpoint_kw[
'bmAttributes'
] & ch9.USB_ENDPOINT_XFERTYPE_MASK
fs_max, hs_max, ss_max = _MAX_PACKET_SIZE_DICT[transfer_type]
if need_address:
endpoint_kw['bEndpointAddress'] = index | (
endpoint_kw.get('bEndpointAddress', 0) & ch9.USB_DIR_IN
)
klass = (
USBEndpointDescriptor
if 'bRefresh' in endpoint_kw or 'bSynchAddress' in endpoint_kw else
USBEndpointDescriptorNoAudio
)
interval = endpoint_kw.pop('bInterval', _MARKER)
if interval is _MARKER:
fs_interval = hs_interval = 0
else:
if transfer_type == ch9.USB_ENDPOINT_XFER_BULK:
fs_interval = 0
hs_interval = interval
else: # USB_ENDPOINT_XFER_ISOC or USB_ENDPOINT_XFER_INT
fs_interval = max(1, min(255, round(interval)))
# 8 is the number of microframes in a millisecond
hs_interval = max(
1,
min(16, int(round(1 + math.log(interval * 8, 2)))),
)
packet_size = endpoint_kw.pop('wMaxPacketSize', _MARKER)
if packet_size is _MARKER:
fs_packet_size = fs_max
hs_packet_size = hs_max
ss_packet_size = ss_max
else:
fs_packet_size = min(fs_max, packet_size)
hs_packet_size = min(hs_max, packet_size)
ss_packet_size = min(ss_max, packet_size)
fs_list.append(getDescriptor(
klass,
wMaxPacketSize=fs_max,
bInterval=fs_interval,
**endpoint_kw
))
hs_list.append(getDescriptor(
klass,
wMaxPacketSize=hs_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_list.append(getDescriptor(
klass,
wMaxPacketSize=ss_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_companion_kw = endpoint.get('superspeed', _EMPTY_DICT)
ss_list.append(getDescriptor(
USBSSEPCompDescriptor,
**ss_companion_kw
))
ssp_iso_kw = endpoint.get('superspeed_iso', _EMPTY_DICT)
if bool(ssp_iso_kw) != (
endpoint_kw.get('bmAttributes', 0) &
ch9.USB_ENDPOINT_XFERTYPE_MASK ==
ch9.USB_ENDPOINT_XFER_ISOC and
bool(ch9.USB_SS_SSP_ISOC_COMP(
ss_companion_kw.get('bmAttributes', 0),
))
):
raise ValueError('Inconsistent isochronous companion')
if ssp_iso_kw:
ss_list.append(getDescriptor(
USBSSPIsocEndpointDescriptor,
**ssp_iso_kw
))
return (fs_list, hs_list, ss_list)
def getDescriptor(klass, **kw):
"""
Automatically fills bLength and bDescriptorType.
"""
# XXX: ctypes Structure.__init__ ignores arguments which do not exist
# as structure fields. So check it.
# This is annoying, but not doing it is a huge waste of time for the
# developer.
empty = klass()
assert hasattr(empty, 'bLength')
assert hasattr(empty, 'bDescriptorType')
unknown = [x for x in kw if not hasattr(empty, x)]
if unknown:
raise TypeError('Unknown fields %r' % (unknown, ))
# XXX: not very pythonic...
return klass(
bLength=ctypes.sizeof(klass),
# pylint: disable=protected-access
bDescriptorType=klass._bDescriptorType,
# pylint: enable=protected-access
**kw
)
def getOSDesc(interface, ext_list):
"""
Return an OS description header.
interface (int)
Related interface number.
ext_list (list of OSExtCompatDesc or OSExtPropDesc)
List of instances of extended descriptors.
"""
try:
ext_type, = {type(x) for x in ext_list}
except ValueError:
raise TypeError('Extensions of a single type are required.')
if issubclass(ext_type, OSExtCompatDesc):
wIndex = 4
kw = {
'b': OSDescHeaderBCount(
bCount=len(ext_list),
Reserved=0,
),
}
elif issubclass(ext_type, OSExtPropDescHead):
wIndex = 5
kw = {
'wCount': len(ext_list),
}
else:
raise TypeError('Extensions of unexpected type')
ext_list_type = ext_type * len(ext_list)
klass = type(
'OSDesc',
(OSDescHeader, ),
{
'_fields_': [
('ext_list', ext_list_type),
],
},
)
return klass(
interface=interface,
dwLength=ctypes.sizeof(klass),
bcdVersion=1,
wIndex=wIndex,
ext_list=ext_list_type(*ext_list),
**kw
)
#def getDescs(*args, **kw):
# """
# Return a legacy format FunctionFS suitable for serialisation.
# Deprecated as of 3.14 .
#
# NOT IMPLEMENTED
# """
# warnings.warn(
# DeprecationWarning,
# 'Legacy format, deprecated as of 3.14.',
# )
# raise NotImplementedError('TODO')
# klass = type(
# 'Descs',
# (DescsHead, ),
# {
# 'fs_descrs': None, # TODO
# 'hs_descrs': None, # TODO
# },
# )
# return klass(
# magic=DESCRIPTORS_MAGIC,
# length=ctypes.sizeof(klass),
# **kw
# )
def getDescsV2(flags, fs_list=(), hs_list=(), ss_list=(), os_list=()):
"""
Return a FunctionFS descriptor suitable for serialisation.
flags (int)
Any combination of VIRTUAL_ADDR, EVENTFD, ALL_CTRL_RECIP,
CONFIG0_SETUP.
{fs,hs,ss,os}_list (list of descriptors)
Instances of the following classes:
{fs,hs,ss}_list:
USBInterfaceDescriptor
USBEndpointDescriptorNoAudio
USBEndpointDescriptor
USBSSEPCompDescriptor
USBSSPIsocEndpointDescriptor
USBOTGDescriptor
USBOTG20Descriptor
USBInterfaceAssocDescriptor
TODO: HID
All (non-empty) lists must define the same number of interfaces
and endpoints, and endpoint descriptors must be given in the same
order, bEndpointAddress-wise.
os_list:
OSDesc
"""
count_field_list = []
descr_field_list = []
kw = {}
for descriptor_list, flag, prefix, allowed_descriptor_klass in (
(fs_list, HAS_FS_DESC, 'fs', USBDescriptorHeader),
(hs_list, HAS_HS_DESC, 'hs', USBDescriptorHeader),
(ss_list, HAS_SS_DESC, 'ss', USBDescriptorHeader),
(os_list, HAS_MS_OS_DESC, 'os', OSDescHeader),
):
if descriptor_list:
for index, descriptor in enumerate(descriptor_list):
if not isinstance(descriptor, allowed_descriptor_klass):
raise TypeError(
'Descriptor %r of unexpected type: %r' % (
index,
type(descriptor),
),
)
descriptor_map = [
('desc_%i' % x, y)
for x, y in enumerate(descriptor_list)
]
flags |= flag
count_name = prefix + 'count'
descr_name = prefix + 'descr'
count_field_list.append((count_name, le32))
descr_type = type(
't_' + descr_name,
(ctypes.LittleEndianStructure, ),
{
'_pack_': 1,
'_fields_': [
(x, type(y))
for x, y in descriptor_map
],
}
)
descr_field_list.append((descr_name, descr_type))
kw[count_name] = len(descriptor_map)
kw[descr_name] = descr_type(**dict(descriptor_map))
elif flags & flag:
raise ValueError(
'Flag %r set but descriptor list empty, cannot generate type.' % (
FLAGS.get(flag),
)
)
klass = type(
'DescsV2_0x%02x' % (
flags & (
HAS_FS_DESC |
HAS_HS_DESC |
HAS_SS_DESC |
HAS_MS_OS_DESC
),
# XXX: include contained descriptors type information ? (and name ?)
),
(DescsHeadV2, ),
{
'_fields_': count_field_list + descr_field_list,
},
)
return klass(
magic=DESCRIPTORS_MAGIC_V2,
length=ctypes.sizeof(klass),
flags=flags,
**kw
)
def getStrings(lang_dict):
"""
Return a FunctionFS descriptor suitable for serialisation.
lang_dict (dict)
Key: language ID (ex: 0x0409 for en-us)
Value: list of unicode objects
All values must have the same number of items.
"""
field_list = []
kw = {}
try:
str_count = len(next(iter(lang_dict.values())))
except StopIteration:
str_count = 0
else:
for lang, string_list in lang_dict.items():
if len(string_list) != str_count:
raise ValueError('All values must have the same string count.')
field_id = 'strings_%04x' % lang
strings = b'\x00'.join(x.encode('utf-8') for x in string_list) + b'\x00'
field_type = type(
'String',
(StringBase, ),
{
'_fields_': [
('strings', ctypes.c_char * len(strings)),
],
},
)
field_list.append((field_id, field_type))
kw[field_id] = field_type(
lang=lang,
strings=strings,
)
klass = type(
'Strings',
(StringsHead, ),
{
'_fields_': field_list,
},
)
return klass(
magic=STRINGS_MAGIC,
length=ctypes.sizeof(klass),
str_count=str_count,
lang_count=len(lang_dict),
**kw
)
def serialise(structure):
"""
structure (ctypes.Structure)
The structure to serialise.
Returns a ctypes.c_char array.
Does not copy memory.
"""
return ctypes.cast(
ctypes.pointer(structure),
ctypes.POINTER(ctypes.c_char * ctypes.sizeof(structure)),
).contents
class EndpointFileBase(io.FileIO):
"""
File object representing a endpoint. Abstract.
"""
def _ioctl(self, func, *args, **kw):
result = fcntl.ioctl(self, func, *args, **kw)
if result < 0:
raise IOError(result)
return result
class Endpoint0File(EndpointFileBase):
"""
File object exposing ioctls available on endpoint zero.
"""
def halt(self, request_type):
"""
Halt current endpoint.
"""
try:
if request_type & ch9.USB_DIR_IN:
self.read(0)
else:
self.write(b'')
except IOError as exc:
if exc.errno != errno.EL2HLT:
raise
else:
raise ValueError('halt did not return EL2HLT ?')
def getRealInterfaceNumber(self, interface):
"""
Returns the host-visible interface number, or None if there is no such
interface.
"""
try:
return self._ioctl(INTERFACE_REVMAP, interface)
except IOError as exc:
if exc.errno == errno.EDOM:
return None
raise
# TODO: Add any standard IOCTL in usb_gadget_ops.ioctl ?
class EndpointFile(EndpointFileBase):
"""
File object exposing ioctls available on non-zero endpoints.
"""
_halted = False
def getRealEndpointNumber(self):
"""
Returns the host-visible endpoint number.
"""
return self._ioctl(ENDPOINT_REVMAP)
def clearHalt(self):
"""
Clears endpoint halt, and resets toggle.
See drivers/usb/gadget/udc/core.c:usb_ep_clear_halt
"""
self._ioctl(CLEAR_HALT)
self._halted = False
def getFIFOStatus(self):
"""
Returns the number of bytes in fifo.
"""
return self._ioctl(FIFO_STATUS)
def flushFIFO(self):
"""
Discards Endpoint FIFO content.
"""
self._ioctl(FIFO_FLUSH)
def getDescriptor(self):
"""
Returns the currently active endpoint descriptor
(depending on current USB speed).
"""
result = USBEndpointDescriptor()
self._ioctl(ENDPOINT_DESC, result, True)
return result
def _halt(self):
raise NotImplementedError
def halt(self):
"""
Halt current endpoint.
"""
try:
self._halt()
except IOError as exc:
if exc.errno != errno.EBADMSG:
raise
else:
raise ValueError('halt did not return EBADMSG ?')
self._halted = True
def isHalted(self):
"""
Whether endpoint is currently halted.
"""
return self._halted
class EndpointINFile(EndpointFile):
"""
Write-only endpoint file.
"""
@staticmethod
def read(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for reading')
readinto = read
readall = read
readlines = read
readline = read
@staticmethod
def readable():
"""
Never readable.
"""
return False
def _halt(self):
super(EndpointINFile, self).read(0)
class EndpointOUTFile(EndpointFile):
"""
Read-only endpoint file.
"""
@staticmethod
def write(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for writing')
writelines = write
@staticmethod
def writable():
"""
Never writable.
"""
return False
def _halt(self):
super(EndpointOUTFile, self).write(b'')
_INFINITY = itertools.repeat(None)
_ONCE = (None, )
class Function(object):
"""
Pythonic class for interfacing with FunctionFS.
Properties available:
function_remote_wakeup_capable (bool)
Whether the function wishes to be allowed to wake host.
function_remote_wakeup (bool)
Whether host has allowed the function to wake it up.
Set and cleared by onSetup by calling enableRemoteWakeup and
disableRemoteWakeup, respectively.
"""
_closed = False
_ep_list = () # Avoids failing in __del__ when (subclass') __init__ fails.
function_remote_wakeup_capable = False
function_remote_wakeup = False
def __init__(
self,
path,
fs_list=(), hs_list=(), ss_list=(),
os_list=(),
lang_dict={},
all_ctrl_recip=False, config0_setup=False,
):
"""
path (string)
Path to the functionfs mountpoint (where the ep* files are
located).
{fs,hs,ss}_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
os_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
lang_dict (dict)
Keys: language id (ex: 0x0402 for "us-en").
Values: List of unicode objects. First item becomes string
descriptor 1, and so on. Must contain at least as many
string descriptors as the highest string index declared
in all descriptors.
all_ctrl_recip (bool)
When true, this function will receive all control transactions.
Useful when implementing non-standard control transactions.
config0_setup (bool)
When true, this function will receive control transactions before
any configuration gets enabled.
"""
self._path = path
ep0 = Endpoint0File(os.path.join(path, 'ep0'), 'r+')
self._ep_list = ep_list = [ep0]
self._ep_address_dict = ep_address_dict = {}
flags = 0
if all_ctrl_recip:
flags |= ALL_CTRL_RECIP
if config0_setup:
flags |= CONFIG0_SETUP
# Note: serialise does not prevent its argument from being freed and
# reallocated. Keep strong references to to-serialise values until
# after they get written.
desc = getDescsV2(
flags,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
os_list=os_list,
)
ep0.write(serialise(desc))
# TODO: try v1 on failure ?
del desc
# Note: see above.
strings = getStrings(lang_dict)
ep0.write(serialise(strings))
del strings
for descriptor in ss_list or hs_list or fs_list:
if descriptor.bDescriptorType == ch9.USB_DT_ENDPOINT:
assert descriptor.bEndpointAddress not in ep_address_dict, (
descriptor,
ep_address_dict[descriptor.bEndpointAddress],
)
index = len(ep_list)
ep_address_dict[descriptor.bEndpointAddress] = index
ep_list.append(
(
EndpointINFile
if descriptor.bEndpointAddress & ch9.USB_DIR_IN
else EndpointOUTFile
)(
os.path.join(path, 'ep%u' % (index, )),
'r+',
)
)
@property
def ep0(self):
"""
Endpoint 0, use when handling setup transactions.
"""
return self._ep_list[0]
def close(self):
"""
Close all endpoint file descriptors.
"""
ep_list = self._ep_list
while ep_list:
ep_list.pop().close()
self._closed = True
def __del__(self):
self.close()
__event_dict = {
BIND: 'onBind',
UNBIND: 'onUnbind',
ENABLE: 'onEnable',
DISABLE: 'onDisable',
# SETUP: handled specially
SUSPEND: 'onSuspend',
RESUME: 'onResume',
}
def __process(self, iterator):
readinto = self.ep0.readinto
# FunctionFS can queue up to 4 events, so let's read that much.
event_len = ctypes.sizeof(Event)
array_type = Event * 4
buf = bytearray(ctypes.sizeof(array_type))
event_list = array_type.from_buffer(buf)
event_dict = self.__event_dict
for _ in iterator:
if self._closed:
break
try:
length = readinto(buf)
except IOError as exc:
if exc.errno == errno.EINTR:
continue
raise
if not length:
# Note: also catches None, returned when ep0 is non-blocking
break # TODO: test if this happens when ep0 gets closed
# (by FunctionFS or in another thread or in a handler)
count, remainder = divmod(length, event_len)
assert remainder == 0, (length, event_len)
for index in range(count):
event = event_list[index]
event_type = event.type
if event_type == SETUP:
setup = event.u.setup
try:
self.onSetup(
setup.bRequestType,
setup.bRequest,
setup.wValue,
setup.wIndex,
setup.wLength,
)
except:
# On *ANY* exception, halt endpoint
self.ep0.halt(setup.bRequestType)
raise
else:
getattr(self, event_dict[event.type])()
def processEventsForever(self):
"""
Process kernel ep0 events until closed.
ep0 must be in blocking mode, otherwise behaves like `processEvents`.
"""
self.__process(_INFINITY)
def processEvents(self):
"""
Process at least one kernel event if ep0 is in blocking mode.
Process any already available event if ep0 is in non-blocking mode.
"""
self.__process(_ONCE)
def getEndpoint(self, index):
"""
Return a file object corresponding to given endpoint index,
in descriptor list order.
"""
return self._ep_list[index]
def getEndpointByAddress(self, address):
"""
Return a file object corresponding to given endpoint address.
"""
return self.getEndpoint(self._ep_address_dict[address])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def onBind(self):
"""
Triggered when FunctionFS signals gadget binding.
May be overridden in subclass.
"""
pass
def onUnbind(self):
"""
Triggered when FunctionFS signals gadget unbinding.
May be overridden in subclass.
"""
pass
def onEnable(self):
"""
Called when FunctionFS signals the function was (re)enabled.
This may happen several times without onDisable being called.
It must reset the function to its default state.
May be overridden in subclass.
"""
self.disableRemoteWakeup()
def onDisable(self):
"""
Called when FunctionFS signals the function was (re)disabled.
This may happen several times without onEnable being called.
May be overridden in subclass.
"""
pass
def disableRemoteWakeup(self):
"""
Called when host issues a clearFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to False so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = False
def enableRemoteWakeup(self):
"""
Called when host issues a setFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to True so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = True
def onSetup(self, request_type, request, value, index, length):
"""
Called when a setup USB transaction was received.
Default implementation:
- handles USB_REQ_GET_STATUS on interface and endpoints
- handles USB_REQ_CLEAR_FEATURE(USB_ENDPOINT_HALT) on endpoints
- handles USB_REQ_SET_FEATURE(USB_ENDPOINT_HALT) on endpoints
- halts on everything else
If this method raises anything, endpoint 0 is halted by its caller and
exception is let through.
May be overridden in subclass.
"""
if (request_type & ch9.USB_TYPE_MASK) == ch9.USB_TYPE_STANDARD:
recipient = request_type & ch9.USB_RECIP_MASK
is_in = (request_type & ch9.USB_DIR_IN) == ch9.USB_DIR_IN
if request == ch9.USB_REQ_GET_STATUS:
if is_in and length == 2:
if recipient == ch9.USB_RECIP_INTERFACE:
if value == 0:
status = 0
if index == 0:
if self.function_remote_wakeup_capable:
status |= 1 << 0
if self.function_remote_wakeup:
status |= 1 << 1
self.ep0.write(struct.pack('<H', status)[:length])
return
elif recipient == ch9.USB_RECIP_ENDPOINT:
if value == 0:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
status = 0
if endpoint.isHalted():
status |= 1 << 0
self.ep0.write(
struct.pack('<H', status)[:length],
)
return
elif request == ch9.USB_REQ_CLEAR_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.clearHalt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.disableRemoteWakeup()
self.ep0.read(0)
return
elif request == ch9.USB_REQ_SET_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.halt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.enableRemoteWakeup()
self.ep0.read(0)
return
self.ep0.halt(request_type)
def onSuspend(self):
"""
Called when FunctionFS signals the host stops USB traffic.
May be overridden in subclass.
"""
pass
def onResume(self):
"""
Called when FunctionFS signals the host restarts USB traffic.
May be overridden in subclass.
"""
pass
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
getDescsV2
|
python
|
def getDescsV2(flags, fs_list=(), hs_list=(), ss_list=(), os_list=()):
count_field_list = []
descr_field_list = []
kw = {}
for descriptor_list, flag, prefix, allowed_descriptor_klass in (
(fs_list, HAS_FS_DESC, 'fs', USBDescriptorHeader),
(hs_list, HAS_HS_DESC, 'hs', USBDescriptorHeader),
(ss_list, HAS_SS_DESC, 'ss', USBDescriptorHeader),
(os_list, HAS_MS_OS_DESC, 'os', OSDescHeader),
):
if descriptor_list:
for index, descriptor in enumerate(descriptor_list):
if not isinstance(descriptor, allowed_descriptor_klass):
raise TypeError(
'Descriptor %r of unexpected type: %r' % (
index,
type(descriptor),
),
)
descriptor_map = [
('desc_%i' % x, y)
for x, y in enumerate(descriptor_list)
]
flags |= flag
count_name = prefix + 'count'
descr_name = prefix + 'descr'
count_field_list.append((count_name, le32))
descr_type = type(
't_' + descr_name,
(ctypes.LittleEndianStructure, ),
{
'_pack_': 1,
'_fields_': [
(x, type(y))
for x, y in descriptor_map
],
}
)
descr_field_list.append((descr_name, descr_type))
kw[count_name] = len(descriptor_map)
kw[descr_name] = descr_type(**dict(descriptor_map))
elif flags & flag:
raise ValueError(
'Flag %r set but descriptor list empty, cannot generate type.' % (
FLAGS.get(flag),
)
)
klass = type(
'DescsV2_0x%02x' % (
flags & (
HAS_FS_DESC |
HAS_HS_DESC |
HAS_SS_DESC |
HAS_MS_OS_DESC
),
# XXX: include contained descriptors type information ? (and name ?)
),
(DescsHeadV2, ),
{
'_fields_': count_field_list + descr_field_list,
},
)
return klass(
magic=DESCRIPTORS_MAGIC_V2,
length=ctypes.sizeof(klass),
flags=flags,
**kw
)
|
Return a FunctionFS descriptor suitable for serialisation.
flags (int)
Any combination of VIRTUAL_ADDR, EVENTFD, ALL_CTRL_RECIP,
CONFIG0_SETUP.
{fs,hs,ss,os}_list (list of descriptors)
Instances of the following classes:
{fs,hs,ss}_list:
USBInterfaceDescriptor
USBEndpointDescriptorNoAudio
USBEndpointDescriptor
USBSSEPCompDescriptor
USBSSPIsocEndpointDescriptor
USBOTGDescriptor
USBOTG20Descriptor
USBInterfaceAssocDescriptor
TODO: HID
All (non-empty) lists must define the same number of interfaces
and endpoints, and endpoint descriptors must be given in the same
order, bEndpointAddress-wise.
os_list:
OSDesc
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L389-L480
|
[
"def get(self, value, default=None):\n return self.reverse_dict.get(value, default)\n"
] |
# This file is part of python-functionfs
# Copyright (C) 2016-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# python-functionfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-functionfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-functionfs. If not, see <http://www.gnu.org/licenses/>.
"""
Interfaces with functionfs to simplify USB gadget function declaration and
implementation on linux.
Defines standard USB descriptors (see "ch9" submodule) and sends them to the
kernel to declare function's structure.
Provides methods for accessing each endpoint and to react to events.
"""
import ctypes
import errno
import fcntl
import io
import itertools
import math
import os
import struct
import warnings
from .common import (
USBDescriptorHeader,
le32,
)
from . import ch9
from .ch9 import (
USBInterfaceDescriptor,
USBEndpointDescriptorNoAudio,
USBEndpointDescriptor,
USBSSEPCompDescriptor,
# USBSSPIsocEndpointDescriptor is not implemented in kernel as of this
# writing.
USBSSPIsocEndpointDescriptor,
# USBQualifierDescriptor is reserved for gadgets, so don't expose it.
USBOTGDescriptor,
USBOTG20Descriptor,
# USBDebugDescriptor is not implemented in kernelas of this writing.
USBDebugDescriptor,
USBInterfaceAssocDescriptor,
)
from .functionfs import (
DESCRIPTORS_MAGIC, STRINGS_MAGIC, DESCRIPTORS_MAGIC_V2,
FLAGS,
DescsHeadV2,
DescsHead,
OSDescHeader,
OSDescHeaderBCount,
OSExtCompatDesc,
OSExtPropDescHead,
StringsHead,
StringBase,
Event,
FIFO_STATUS, FIFO_FLUSH, CLEAR_HALT, INTERFACE_REVMAP, ENDPOINT_REVMAP, ENDPOINT_DESC,
)
# pylint: disable=no-name-in-module
from .functionfs import (
HAS_FS_DESC,
HAS_HS_DESC,
HAS_SS_DESC,
HAS_MS_OS_DESC,
ALL_CTRL_RECIP,
CONFIG0_SETUP,
BIND, UNBIND, ENABLE, DISABLE, SETUP, SUSPEND, RESUME,
)
# pylint: enable=no-name-in-module
__all__ = (
'ch9',
'Function',
# XXX: Not very pythonic...
'getInterfaceInAllSpeeds',
'getDescriptor',
'getOSDesc',
'getOSExtPropDesc',
'USBInterfaceDescriptor',
'USBEndpointDescriptorNoAudio',
'USBEndpointDescriptor',
'USBSSEPCompDescriptor',
'USBSSPIsocEndpointDescriptor',
'USBOTGDescriptor',
'USBOTG20Descriptor',
'USBDebugDescriptor',
'USBInterfaceAssocDescriptor',
'OSExtCompatDesc',
)
_MAX_PACKET_SIZE_DICT = {
ch9.USB_ENDPOINT_XFER_ISOC: (
1023, # 0..1023
1024, # 0..1024
1024, # 0..1024
),
ch9.USB_ENDPOINT_XFER_BULK: (
64, # 8, 16, 32, 64
512, # 512 only
1024, # 1024 only
),
ch9.USB_ENDPOINT_XFER_INT: (
64, # 0..64
1024, # 0..1024
1024, # 1..1024
),
}
_MARKER = object()
_EMPTY_DICT = {} # For internal ** falback usage
def getInterfaceInAllSpeeds(interface, endpoint_list, class_descriptor_list=()):
"""
Produce similar fs, hs and ss interface and endpoints descriptors.
Should be useful for devices desiring to work in all 3 speeds with maximum
endpoint wMaxPacketSize. Reduces data duplication from descriptor
declarations.
Not intended to cover fancy combinations.
interface (dict):
Keyword arguments for
getDescriptor(USBInterfaceDescriptor, ...)
in all speeds.
bNumEndpoints must not be provided.
endpoint_list (list of dicts)
Each dict represents an endpoint, and may contain the following items:
- "endpoint": required, contains keyword arguments for
getDescriptor(USBEndpointDescriptorNoAudio, ...)
or
getDescriptor(USBEndpointDescriptor, ...)
The with-audio variant is picked when its extra fields are assigned a
value.
wMaxPacketSize may be missing, in which case it will be set to the
maximum size for given speed and endpoint type.
bmAttributes must be provided.
If bEndpointAddress is zero (excluding direction bit) on the first
endpoint, endpoints will be assigned their rank in this list,
starting at 1. Their direction bit is preserved.
If bInterval is present on a INT or ISO endpoint, it must be in
millisecond units (but may not be an integer), and will be converted
to the nearest integer millisecond for full-speed descriptor, and
nearest possible interval for high- and super-speed descriptors.
If bInterval is present on a BULK endpoint, it is set to zero on
full-speed descriptor and used as provided on high- and super-speed
descriptors.
- "superspeed": optional, contains keyword arguments for
getDescriptor(USBSSEPCompDescriptor, ...)
- "superspeed_iso": optional, contains keyword arguments for
getDescriptor(USBSSPIsocEndpointDescriptor, ...)
Must be provided and non-empty only when endpoint is isochronous and
"superspeed" dict has "bmAttributes" bit 7 set.
class_descriptor (list of descriptors of any type)
Descriptors to insert in all speeds between the interface descriptor and
endpoint descriptors.
Returns a 3-tuple of lists:
- fs descriptors
- hs descriptors
- ss descriptors
"""
interface = getDescriptor(
USBInterfaceDescriptor,
bNumEndpoints=len(endpoint_list),
**interface
)
class_descriptor_list = list(class_descriptor_list)
fs_list = [interface] + class_descriptor_list
hs_list = [interface] + class_descriptor_list
ss_list = [interface] + class_descriptor_list
need_address = (
endpoint_list[0]['endpoint'].get(
'bEndpointAddress',
0,
) & ~ch9.USB_DIR_IN == 0
)
for index, endpoint in enumerate(endpoint_list, 1):
endpoint_kw = endpoint['endpoint'].copy()
transfer_type = endpoint_kw[
'bmAttributes'
] & ch9.USB_ENDPOINT_XFERTYPE_MASK
fs_max, hs_max, ss_max = _MAX_PACKET_SIZE_DICT[transfer_type]
if need_address:
endpoint_kw['bEndpointAddress'] = index | (
endpoint_kw.get('bEndpointAddress', 0) & ch9.USB_DIR_IN
)
klass = (
USBEndpointDescriptor
if 'bRefresh' in endpoint_kw or 'bSynchAddress' in endpoint_kw else
USBEndpointDescriptorNoAudio
)
interval = endpoint_kw.pop('bInterval', _MARKER)
if interval is _MARKER:
fs_interval = hs_interval = 0
else:
if transfer_type == ch9.USB_ENDPOINT_XFER_BULK:
fs_interval = 0
hs_interval = interval
else: # USB_ENDPOINT_XFER_ISOC or USB_ENDPOINT_XFER_INT
fs_interval = max(1, min(255, round(interval)))
# 8 is the number of microframes in a millisecond
hs_interval = max(
1,
min(16, int(round(1 + math.log(interval * 8, 2)))),
)
packet_size = endpoint_kw.pop('wMaxPacketSize', _MARKER)
if packet_size is _MARKER:
fs_packet_size = fs_max
hs_packet_size = hs_max
ss_packet_size = ss_max
else:
fs_packet_size = min(fs_max, packet_size)
hs_packet_size = min(hs_max, packet_size)
ss_packet_size = min(ss_max, packet_size)
fs_list.append(getDescriptor(
klass,
wMaxPacketSize=fs_max,
bInterval=fs_interval,
**endpoint_kw
))
hs_list.append(getDescriptor(
klass,
wMaxPacketSize=hs_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_list.append(getDescriptor(
klass,
wMaxPacketSize=ss_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_companion_kw = endpoint.get('superspeed', _EMPTY_DICT)
ss_list.append(getDescriptor(
USBSSEPCompDescriptor,
**ss_companion_kw
))
ssp_iso_kw = endpoint.get('superspeed_iso', _EMPTY_DICT)
if bool(ssp_iso_kw) != (
endpoint_kw.get('bmAttributes', 0) &
ch9.USB_ENDPOINT_XFERTYPE_MASK ==
ch9.USB_ENDPOINT_XFER_ISOC and
bool(ch9.USB_SS_SSP_ISOC_COMP(
ss_companion_kw.get('bmAttributes', 0),
))
):
raise ValueError('Inconsistent isochronous companion')
if ssp_iso_kw:
ss_list.append(getDescriptor(
USBSSPIsocEndpointDescriptor,
**ssp_iso_kw
))
return (fs_list, hs_list, ss_list)
def getDescriptor(klass, **kw):
"""
Automatically fills bLength and bDescriptorType.
"""
# XXX: ctypes Structure.__init__ ignores arguments which do not exist
# as structure fields. So check it.
# This is annoying, but not doing it is a huge waste of time for the
# developer.
empty = klass()
assert hasattr(empty, 'bLength')
assert hasattr(empty, 'bDescriptorType')
unknown = [x for x in kw if not hasattr(empty, x)]
if unknown:
raise TypeError('Unknown fields %r' % (unknown, ))
# XXX: not very pythonic...
return klass(
bLength=ctypes.sizeof(klass),
# pylint: disable=protected-access
bDescriptorType=klass._bDescriptorType,
# pylint: enable=protected-access
**kw
)
def getOSDesc(interface, ext_list):
"""
Return an OS description header.
interface (int)
Related interface number.
ext_list (list of OSExtCompatDesc or OSExtPropDesc)
List of instances of extended descriptors.
"""
try:
ext_type, = {type(x) for x in ext_list}
except ValueError:
raise TypeError('Extensions of a single type are required.')
if issubclass(ext_type, OSExtCompatDesc):
wIndex = 4
kw = {
'b': OSDescHeaderBCount(
bCount=len(ext_list),
Reserved=0,
),
}
elif issubclass(ext_type, OSExtPropDescHead):
wIndex = 5
kw = {
'wCount': len(ext_list),
}
else:
raise TypeError('Extensions of unexpected type')
ext_list_type = ext_type * len(ext_list)
klass = type(
'OSDesc',
(OSDescHeader, ),
{
'_fields_': [
('ext_list', ext_list_type),
],
},
)
return klass(
interface=interface,
dwLength=ctypes.sizeof(klass),
bcdVersion=1,
wIndex=wIndex,
ext_list=ext_list_type(*ext_list),
**kw
)
def getOSExtPropDesc(data_type, name, value):
"""
Returns an OS extension property descriptor.
data_type (int)
See wPropertyDataType documentation.
name (string)
See PropertyName documentation.
value (string)
See PropertyData documentation.
NULL chars must be explicitely included in the value when needed,
this function does not add any terminating NULL for example.
"""
klass = type(
'OSExtPropDesc',
(OSExtPropDescHead, ),
{
'_fields_': [
('bPropertyName', ctypes.c_char * len(name)),
('dwPropertyDataLength', le32),
('bProperty', ctypes.c_char * len(value)),
],
}
)
return klass(
dwSize=ctypes.sizeof(klass),
dwPropertyDataType=data_type,
wPropertyNameLength=len(name),
bPropertyName=name,
dwPropertyDataLength=len(value),
bProperty=value,
)
#def getDescs(*args, **kw):
# """
# Return a legacy format FunctionFS suitable for serialisation.
# Deprecated as of 3.14 .
#
# NOT IMPLEMENTED
# """
# warnings.warn(
# DeprecationWarning,
# 'Legacy format, deprecated as of 3.14.',
# )
# raise NotImplementedError('TODO')
# klass = type(
# 'Descs',
# (DescsHead, ),
# {
# 'fs_descrs': None, # TODO
# 'hs_descrs': None, # TODO
# },
# )
# return klass(
# magic=DESCRIPTORS_MAGIC,
# length=ctypes.sizeof(klass),
# **kw
# )
def getStrings(lang_dict):
"""
Return a FunctionFS descriptor suitable for serialisation.
lang_dict (dict)
Key: language ID (ex: 0x0409 for en-us)
Value: list of unicode objects
All values must have the same number of items.
"""
field_list = []
kw = {}
try:
str_count = len(next(iter(lang_dict.values())))
except StopIteration:
str_count = 0
else:
for lang, string_list in lang_dict.items():
if len(string_list) != str_count:
raise ValueError('All values must have the same string count.')
field_id = 'strings_%04x' % lang
strings = b'\x00'.join(x.encode('utf-8') for x in string_list) + b'\x00'
field_type = type(
'String',
(StringBase, ),
{
'_fields_': [
('strings', ctypes.c_char * len(strings)),
],
},
)
field_list.append((field_id, field_type))
kw[field_id] = field_type(
lang=lang,
strings=strings,
)
klass = type(
'Strings',
(StringsHead, ),
{
'_fields_': field_list,
},
)
return klass(
magic=STRINGS_MAGIC,
length=ctypes.sizeof(klass),
str_count=str_count,
lang_count=len(lang_dict),
**kw
)
def serialise(structure):
"""
structure (ctypes.Structure)
The structure to serialise.
Returns a ctypes.c_char array.
Does not copy memory.
"""
return ctypes.cast(
ctypes.pointer(structure),
ctypes.POINTER(ctypes.c_char * ctypes.sizeof(structure)),
).contents
class EndpointFileBase(io.FileIO):
"""
File object representing a endpoint. Abstract.
"""
def _ioctl(self, func, *args, **kw):
result = fcntl.ioctl(self, func, *args, **kw)
if result < 0:
raise IOError(result)
return result
class Endpoint0File(EndpointFileBase):
"""
File object exposing ioctls available on endpoint zero.
"""
def halt(self, request_type):
"""
Halt current endpoint.
"""
try:
if request_type & ch9.USB_DIR_IN:
self.read(0)
else:
self.write(b'')
except IOError as exc:
if exc.errno != errno.EL2HLT:
raise
else:
raise ValueError('halt did not return EL2HLT ?')
def getRealInterfaceNumber(self, interface):
"""
Returns the host-visible interface number, or None if there is no such
interface.
"""
try:
return self._ioctl(INTERFACE_REVMAP, interface)
except IOError as exc:
if exc.errno == errno.EDOM:
return None
raise
# TODO: Add any standard IOCTL in usb_gadget_ops.ioctl ?
class EndpointFile(EndpointFileBase):
"""
File object exposing ioctls available on non-zero endpoints.
"""
_halted = False
def getRealEndpointNumber(self):
"""
Returns the host-visible endpoint number.
"""
return self._ioctl(ENDPOINT_REVMAP)
def clearHalt(self):
"""
Clears endpoint halt, and resets toggle.
See drivers/usb/gadget/udc/core.c:usb_ep_clear_halt
"""
self._ioctl(CLEAR_HALT)
self._halted = False
def getFIFOStatus(self):
"""
Returns the number of bytes in fifo.
"""
return self._ioctl(FIFO_STATUS)
def flushFIFO(self):
"""
Discards Endpoint FIFO content.
"""
self._ioctl(FIFO_FLUSH)
def getDescriptor(self):
"""
Returns the currently active endpoint descriptor
(depending on current USB speed).
"""
result = USBEndpointDescriptor()
self._ioctl(ENDPOINT_DESC, result, True)
return result
def _halt(self):
raise NotImplementedError
def halt(self):
"""
Halt current endpoint.
"""
try:
self._halt()
except IOError as exc:
if exc.errno != errno.EBADMSG:
raise
else:
raise ValueError('halt did not return EBADMSG ?')
self._halted = True
def isHalted(self):
"""
Whether endpoint is currently halted.
"""
return self._halted
class EndpointINFile(EndpointFile):
"""
Write-only endpoint file.
"""
@staticmethod
def read(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for reading')
readinto = read
readall = read
readlines = read
readline = read
@staticmethod
def readable():
"""
Never readable.
"""
return False
def _halt(self):
super(EndpointINFile, self).read(0)
class EndpointOUTFile(EndpointFile):
"""
Read-only endpoint file.
"""
@staticmethod
def write(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for writing')
writelines = write
@staticmethod
def writable():
"""
Never writable.
"""
return False
def _halt(self):
super(EndpointOUTFile, self).write(b'')
_INFINITY = itertools.repeat(None)
_ONCE = (None, )
class Function(object):
"""
Pythonic class for interfacing with FunctionFS.
Properties available:
function_remote_wakeup_capable (bool)
Whether the function wishes to be allowed to wake host.
function_remote_wakeup (bool)
Whether host has allowed the function to wake it up.
Set and cleared by onSetup by calling enableRemoteWakeup and
disableRemoteWakeup, respectively.
"""
_closed = False
_ep_list = () # Avoids failing in __del__ when (subclass') __init__ fails.
function_remote_wakeup_capable = False
function_remote_wakeup = False
def __init__(
self,
path,
fs_list=(), hs_list=(), ss_list=(),
os_list=(),
lang_dict={},
all_ctrl_recip=False, config0_setup=False,
):
"""
path (string)
Path to the functionfs mountpoint (where the ep* files are
located).
{fs,hs,ss}_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
os_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
lang_dict (dict)
Keys: language id (ex: 0x0402 for "us-en").
Values: List of unicode objects. First item becomes string
descriptor 1, and so on. Must contain at least as many
string descriptors as the highest string index declared
in all descriptors.
all_ctrl_recip (bool)
When true, this function will receive all control transactions.
Useful when implementing non-standard control transactions.
config0_setup (bool)
When true, this function will receive control transactions before
any configuration gets enabled.
"""
self._path = path
ep0 = Endpoint0File(os.path.join(path, 'ep0'), 'r+')
self._ep_list = ep_list = [ep0]
self._ep_address_dict = ep_address_dict = {}
flags = 0
if all_ctrl_recip:
flags |= ALL_CTRL_RECIP
if config0_setup:
flags |= CONFIG0_SETUP
# Note: serialise does not prevent its argument from being freed and
# reallocated. Keep strong references to to-serialise values until
# after they get written.
desc = getDescsV2(
flags,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
os_list=os_list,
)
ep0.write(serialise(desc))
# TODO: try v1 on failure ?
del desc
# Note: see above.
strings = getStrings(lang_dict)
ep0.write(serialise(strings))
del strings
for descriptor in ss_list or hs_list or fs_list:
if descriptor.bDescriptorType == ch9.USB_DT_ENDPOINT:
assert descriptor.bEndpointAddress not in ep_address_dict, (
descriptor,
ep_address_dict[descriptor.bEndpointAddress],
)
index = len(ep_list)
ep_address_dict[descriptor.bEndpointAddress] = index
ep_list.append(
(
EndpointINFile
if descriptor.bEndpointAddress & ch9.USB_DIR_IN
else EndpointOUTFile
)(
os.path.join(path, 'ep%u' % (index, )),
'r+',
)
)
@property
def ep0(self):
"""
Endpoint 0, use when handling setup transactions.
"""
return self._ep_list[0]
def close(self):
"""
Close all endpoint file descriptors.
"""
ep_list = self._ep_list
while ep_list:
ep_list.pop().close()
self._closed = True
def __del__(self):
self.close()
__event_dict = {
BIND: 'onBind',
UNBIND: 'onUnbind',
ENABLE: 'onEnable',
DISABLE: 'onDisable',
# SETUP: handled specially
SUSPEND: 'onSuspend',
RESUME: 'onResume',
}
def __process(self, iterator):
readinto = self.ep0.readinto
# FunctionFS can queue up to 4 events, so let's read that much.
event_len = ctypes.sizeof(Event)
array_type = Event * 4
buf = bytearray(ctypes.sizeof(array_type))
event_list = array_type.from_buffer(buf)
event_dict = self.__event_dict
for _ in iterator:
if self._closed:
break
try:
length = readinto(buf)
except IOError as exc:
if exc.errno == errno.EINTR:
continue
raise
if not length:
# Note: also catches None, returned when ep0 is non-blocking
break # TODO: test if this happens when ep0 gets closed
# (by FunctionFS or in another thread or in a handler)
count, remainder = divmod(length, event_len)
assert remainder == 0, (length, event_len)
for index in range(count):
event = event_list[index]
event_type = event.type
if event_type == SETUP:
setup = event.u.setup
try:
self.onSetup(
setup.bRequestType,
setup.bRequest,
setup.wValue,
setup.wIndex,
setup.wLength,
)
except:
# On *ANY* exception, halt endpoint
self.ep0.halt(setup.bRequestType)
raise
else:
getattr(self, event_dict[event.type])()
def processEventsForever(self):
"""
Process kernel ep0 events until closed.
ep0 must be in blocking mode, otherwise behaves like `processEvents`.
"""
self.__process(_INFINITY)
def processEvents(self):
"""
Process at least one kernel event if ep0 is in blocking mode.
Process any already available event if ep0 is in non-blocking mode.
"""
self.__process(_ONCE)
def getEndpoint(self, index):
"""
Return a file object corresponding to given endpoint index,
in descriptor list order.
"""
return self._ep_list[index]
def getEndpointByAddress(self, address):
"""
Return a file object corresponding to given endpoint address.
"""
return self.getEndpoint(self._ep_address_dict[address])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def onBind(self):
"""
Triggered when FunctionFS signals gadget binding.
May be overridden in subclass.
"""
pass
def onUnbind(self):
"""
Triggered when FunctionFS signals gadget unbinding.
May be overridden in subclass.
"""
pass
def onEnable(self):
"""
Called when FunctionFS signals the function was (re)enabled.
This may happen several times without onDisable being called.
It must reset the function to its default state.
May be overridden in subclass.
"""
self.disableRemoteWakeup()
def onDisable(self):
"""
Called when FunctionFS signals the function was (re)disabled.
This may happen several times without onEnable being called.
May be overridden in subclass.
"""
pass
def disableRemoteWakeup(self):
"""
Called when host issues a clearFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to False so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = False
def enableRemoteWakeup(self):
"""
Called when host issues a setFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to True so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = True
def onSetup(self, request_type, request, value, index, length):
"""
Called when a setup USB transaction was received.
Default implementation:
- handles USB_REQ_GET_STATUS on interface and endpoints
- handles USB_REQ_CLEAR_FEATURE(USB_ENDPOINT_HALT) on endpoints
- handles USB_REQ_SET_FEATURE(USB_ENDPOINT_HALT) on endpoints
- halts on everything else
If this method raises anything, endpoint 0 is halted by its caller and
exception is let through.
May be overridden in subclass.
"""
if (request_type & ch9.USB_TYPE_MASK) == ch9.USB_TYPE_STANDARD:
recipient = request_type & ch9.USB_RECIP_MASK
is_in = (request_type & ch9.USB_DIR_IN) == ch9.USB_DIR_IN
if request == ch9.USB_REQ_GET_STATUS:
if is_in and length == 2:
if recipient == ch9.USB_RECIP_INTERFACE:
if value == 0:
status = 0
if index == 0:
if self.function_remote_wakeup_capable:
status |= 1 << 0
if self.function_remote_wakeup:
status |= 1 << 1
self.ep0.write(struct.pack('<H', status)[:length])
return
elif recipient == ch9.USB_RECIP_ENDPOINT:
if value == 0:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
status = 0
if endpoint.isHalted():
status |= 1 << 0
self.ep0.write(
struct.pack('<H', status)[:length],
)
return
elif request == ch9.USB_REQ_CLEAR_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.clearHalt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.disableRemoteWakeup()
self.ep0.read(0)
return
elif request == ch9.USB_REQ_SET_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.halt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.enableRemoteWakeup()
self.ep0.read(0)
return
self.ep0.halt(request_type)
def onSuspend(self):
"""
Called when FunctionFS signals the host stops USB traffic.
May be overridden in subclass.
"""
pass
def onResume(self):
"""
Called when FunctionFS signals the host restarts USB traffic.
May be overridden in subclass.
"""
pass
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
getStrings
|
python
|
def getStrings(lang_dict):
field_list = []
kw = {}
try:
str_count = len(next(iter(lang_dict.values())))
except StopIteration:
str_count = 0
else:
for lang, string_list in lang_dict.items():
if len(string_list) != str_count:
raise ValueError('All values must have the same string count.')
field_id = 'strings_%04x' % lang
strings = b'\x00'.join(x.encode('utf-8') for x in string_list) + b'\x00'
field_type = type(
'String',
(StringBase, ),
{
'_fields_': [
('strings', ctypes.c_char * len(strings)),
],
},
)
field_list.append((field_id, field_type))
kw[field_id] = field_type(
lang=lang,
strings=strings,
)
klass = type(
'Strings',
(StringsHead, ),
{
'_fields_': field_list,
},
)
return klass(
magic=STRINGS_MAGIC,
length=ctypes.sizeof(klass),
str_count=str_count,
lang_count=len(lang_dict),
**kw
)
|
Return a FunctionFS descriptor suitable for serialisation.
lang_dict (dict)
Key: language ID (ex: 0x0409 for en-us)
Value: list of unicode objects
All values must have the same number of items.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L482-L530
| null |
# This file is part of python-functionfs
# Copyright (C) 2016-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# python-functionfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-functionfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-functionfs. If not, see <http://www.gnu.org/licenses/>.
"""
Interfaces with functionfs to simplify USB gadget function declaration and
implementation on linux.
Defines standard USB descriptors (see "ch9" submodule) and sends them to the
kernel to declare function's structure.
Provides methods for accessing each endpoint and to react to events.
"""
import ctypes
import errno
import fcntl
import io
import itertools
import math
import os
import struct
import warnings
from .common import (
USBDescriptorHeader,
le32,
)
from . import ch9
from .ch9 import (
USBInterfaceDescriptor,
USBEndpointDescriptorNoAudio,
USBEndpointDescriptor,
USBSSEPCompDescriptor,
# USBSSPIsocEndpointDescriptor is not implemented in kernel as of this
# writing.
USBSSPIsocEndpointDescriptor,
# USBQualifierDescriptor is reserved for gadgets, so don't expose it.
USBOTGDescriptor,
USBOTG20Descriptor,
# USBDebugDescriptor is not implemented in kernelas of this writing.
USBDebugDescriptor,
USBInterfaceAssocDescriptor,
)
from .functionfs import (
DESCRIPTORS_MAGIC, STRINGS_MAGIC, DESCRIPTORS_MAGIC_V2,
FLAGS,
DescsHeadV2,
DescsHead,
OSDescHeader,
OSDescHeaderBCount,
OSExtCompatDesc,
OSExtPropDescHead,
StringsHead,
StringBase,
Event,
FIFO_STATUS, FIFO_FLUSH, CLEAR_HALT, INTERFACE_REVMAP, ENDPOINT_REVMAP, ENDPOINT_DESC,
)
# pylint: disable=no-name-in-module
from .functionfs import (
HAS_FS_DESC,
HAS_HS_DESC,
HAS_SS_DESC,
HAS_MS_OS_DESC,
ALL_CTRL_RECIP,
CONFIG0_SETUP,
BIND, UNBIND, ENABLE, DISABLE, SETUP, SUSPEND, RESUME,
)
# pylint: enable=no-name-in-module
__all__ = (
'ch9',
'Function',
# XXX: Not very pythonic...
'getInterfaceInAllSpeeds',
'getDescriptor',
'getOSDesc',
'getOSExtPropDesc',
'USBInterfaceDescriptor',
'USBEndpointDescriptorNoAudio',
'USBEndpointDescriptor',
'USBSSEPCompDescriptor',
'USBSSPIsocEndpointDescriptor',
'USBOTGDescriptor',
'USBOTG20Descriptor',
'USBDebugDescriptor',
'USBInterfaceAssocDescriptor',
'OSExtCompatDesc',
)
_MAX_PACKET_SIZE_DICT = {
ch9.USB_ENDPOINT_XFER_ISOC: (
1023, # 0..1023
1024, # 0..1024
1024, # 0..1024
),
ch9.USB_ENDPOINT_XFER_BULK: (
64, # 8, 16, 32, 64
512, # 512 only
1024, # 1024 only
),
ch9.USB_ENDPOINT_XFER_INT: (
64, # 0..64
1024, # 0..1024
1024, # 1..1024
),
}
_MARKER = object()
_EMPTY_DICT = {} # For internal ** falback usage
def getInterfaceInAllSpeeds(interface, endpoint_list, class_descriptor_list=()):
"""
Produce similar fs, hs and ss interface and endpoints descriptors.
Should be useful for devices desiring to work in all 3 speeds with maximum
endpoint wMaxPacketSize. Reduces data duplication from descriptor
declarations.
Not intended to cover fancy combinations.
interface (dict):
Keyword arguments for
getDescriptor(USBInterfaceDescriptor, ...)
in all speeds.
bNumEndpoints must not be provided.
endpoint_list (list of dicts)
Each dict represents an endpoint, and may contain the following items:
- "endpoint": required, contains keyword arguments for
getDescriptor(USBEndpointDescriptorNoAudio, ...)
or
getDescriptor(USBEndpointDescriptor, ...)
The with-audio variant is picked when its extra fields are assigned a
value.
wMaxPacketSize may be missing, in which case it will be set to the
maximum size for given speed and endpoint type.
bmAttributes must be provided.
If bEndpointAddress is zero (excluding direction bit) on the first
endpoint, endpoints will be assigned their rank in this list,
starting at 1. Their direction bit is preserved.
If bInterval is present on a INT or ISO endpoint, it must be in
millisecond units (but may not be an integer), and will be converted
to the nearest integer millisecond for full-speed descriptor, and
nearest possible interval for high- and super-speed descriptors.
If bInterval is present on a BULK endpoint, it is set to zero on
full-speed descriptor and used as provided on high- and super-speed
descriptors.
- "superspeed": optional, contains keyword arguments for
getDescriptor(USBSSEPCompDescriptor, ...)
- "superspeed_iso": optional, contains keyword arguments for
getDescriptor(USBSSPIsocEndpointDescriptor, ...)
Must be provided and non-empty only when endpoint is isochronous and
"superspeed" dict has "bmAttributes" bit 7 set.
class_descriptor (list of descriptors of any type)
Descriptors to insert in all speeds between the interface descriptor and
endpoint descriptors.
Returns a 3-tuple of lists:
- fs descriptors
- hs descriptors
- ss descriptors
"""
interface = getDescriptor(
USBInterfaceDescriptor,
bNumEndpoints=len(endpoint_list),
**interface
)
class_descriptor_list = list(class_descriptor_list)
fs_list = [interface] + class_descriptor_list
hs_list = [interface] + class_descriptor_list
ss_list = [interface] + class_descriptor_list
need_address = (
endpoint_list[0]['endpoint'].get(
'bEndpointAddress',
0,
) & ~ch9.USB_DIR_IN == 0
)
for index, endpoint in enumerate(endpoint_list, 1):
endpoint_kw = endpoint['endpoint'].copy()
transfer_type = endpoint_kw[
'bmAttributes'
] & ch9.USB_ENDPOINT_XFERTYPE_MASK
fs_max, hs_max, ss_max = _MAX_PACKET_SIZE_DICT[transfer_type]
if need_address:
endpoint_kw['bEndpointAddress'] = index | (
endpoint_kw.get('bEndpointAddress', 0) & ch9.USB_DIR_IN
)
klass = (
USBEndpointDescriptor
if 'bRefresh' in endpoint_kw or 'bSynchAddress' in endpoint_kw else
USBEndpointDescriptorNoAudio
)
interval = endpoint_kw.pop('bInterval', _MARKER)
if interval is _MARKER:
fs_interval = hs_interval = 0
else:
if transfer_type == ch9.USB_ENDPOINT_XFER_BULK:
fs_interval = 0
hs_interval = interval
else: # USB_ENDPOINT_XFER_ISOC or USB_ENDPOINT_XFER_INT
fs_interval = max(1, min(255, round(interval)))
# 8 is the number of microframes in a millisecond
hs_interval = max(
1,
min(16, int(round(1 + math.log(interval * 8, 2)))),
)
packet_size = endpoint_kw.pop('wMaxPacketSize', _MARKER)
if packet_size is _MARKER:
fs_packet_size = fs_max
hs_packet_size = hs_max
ss_packet_size = ss_max
else:
fs_packet_size = min(fs_max, packet_size)
hs_packet_size = min(hs_max, packet_size)
ss_packet_size = min(ss_max, packet_size)
fs_list.append(getDescriptor(
klass,
wMaxPacketSize=fs_max,
bInterval=fs_interval,
**endpoint_kw
))
hs_list.append(getDescriptor(
klass,
wMaxPacketSize=hs_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_list.append(getDescriptor(
klass,
wMaxPacketSize=ss_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_companion_kw = endpoint.get('superspeed', _EMPTY_DICT)
ss_list.append(getDescriptor(
USBSSEPCompDescriptor,
**ss_companion_kw
))
ssp_iso_kw = endpoint.get('superspeed_iso', _EMPTY_DICT)
if bool(ssp_iso_kw) != (
endpoint_kw.get('bmAttributes', 0) &
ch9.USB_ENDPOINT_XFERTYPE_MASK ==
ch9.USB_ENDPOINT_XFER_ISOC and
bool(ch9.USB_SS_SSP_ISOC_COMP(
ss_companion_kw.get('bmAttributes', 0),
))
):
raise ValueError('Inconsistent isochronous companion')
if ssp_iso_kw:
ss_list.append(getDescriptor(
USBSSPIsocEndpointDescriptor,
**ssp_iso_kw
))
return (fs_list, hs_list, ss_list)
def getDescriptor(klass, **kw):
"""
Automatically fills bLength and bDescriptorType.
"""
# XXX: ctypes Structure.__init__ ignores arguments which do not exist
# as structure fields. So check it.
# This is annoying, but not doing it is a huge waste of time for the
# developer.
empty = klass()
assert hasattr(empty, 'bLength')
assert hasattr(empty, 'bDescriptorType')
unknown = [x for x in kw if not hasattr(empty, x)]
if unknown:
raise TypeError('Unknown fields %r' % (unknown, ))
# XXX: not very pythonic...
return klass(
bLength=ctypes.sizeof(klass),
# pylint: disable=protected-access
bDescriptorType=klass._bDescriptorType,
# pylint: enable=protected-access
**kw
)
def getOSDesc(interface, ext_list):
"""
Return an OS description header.
interface (int)
Related interface number.
ext_list (list of OSExtCompatDesc or OSExtPropDesc)
List of instances of extended descriptors.
"""
try:
ext_type, = {type(x) for x in ext_list}
except ValueError:
raise TypeError('Extensions of a single type are required.')
if issubclass(ext_type, OSExtCompatDesc):
wIndex = 4
kw = {
'b': OSDescHeaderBCount(
bCount=len(ext_list),
Reserved=0,
),
}
elif issubclass(ext_type, OSExtPropDescHead):
wIndex = 5
kw = {
'wCount': len(ext_list),
}
else:
raise TypeError('Extensions of unexpected type')
ext_list_type = ext_type * len(ext_list)
klass = type(
'OSDesc',
(OSDescHeader, ),
{
'_fields_': [
('ext_list', ext_list_type),
],
},
)
return klass(
interface=interface,
dwLength=ctypes.sizeof(klass),
bcdVersion=1,
wIndex=wIndex,
ext_list=ext_list_type(*ext_list),
**kw
)
def getOSExtPropDesc(data_type, name, value):
"""
Returns an OS extension property descriptor.
data_type (int)
See wPropertyDataType documentation.
name (string)
See PropertyName documentation.
value (string)
See PropertyData documentation.
NULL chars must be explicitely included in the value when needed,
this function does not add any terminating NULL for example.
"""
klass = type(
'OSExtPropDesc',
(OSExtPropDescHead, ),
{
'_fields_': [
('bPropertyName', ctypes.c_char * len(name)),
('dwPropertyDataLength', le32),
('bProperty', ctypes.c_char * len(value)),
],
}
)
return klass(
dwSize=ctypes.sizeof(klass),
dwPropertyDataType=data_type,
wPropertyNameLength=len(name),
bPropertyName=name,
dwPropertyDataLength=len(value),
bProperty=value,
)
#def getDescs(*args, **kw):
# """
# Return a legacy format FunctionFS suitable for serialisation.
# Deprecated as of 3.14 .
#
# NOT IMPLEMENTED
# """
# warnings.warn(
# DeprecationWarning,
# 'Legacy format, deprecated as of 3.14.',
# )
# raise NotImplementedError('TODO')
# klass = type(
# 'Descs',
# (DescsHead, ),
# {
# 'fs_descrs': None, # TODO
# 'hs_descrs': None, # TODO
# },
# )
# return klass(
# magic=DESCRIPTORS_MAGIC,
# length=ctypes.sizeof(klass),
# **kw
# )
def getDescsV2(flags, fs_list=(), hs_list=(), ss_list=(), os_list=()):
"""
Return a FunctionFS descriptor suitable for serialisation.
flags (int)
Any combination of VIRTUAL_ADDR, EVENTFD, ALL_CTRL_RECIP,
CONFIG0_SETUP.
{fs,hs,ss,os}_list (list of descriptors)
Instances of the following classes:
{fs,hs,ss}_list:
USBInterfaceDescriptor
USBEndpointDescriptorNoAudio
USBEndpointDescriptor
USBSSEPCompDescriptor
USBSSPIsocEndpointDescriptor
USBOTGDescriptor
USBOTG20Descriptor
USBInterfaceAssocDescriptor
TODO: HID
All (non-empty) lists must define the same number of interfaces
and endpoints, and endpoint descriptors must be given in the same
order, bEndpointAddress-wise.
os_list:
OSDesc
"""
count_field_list = []
descr_field_list = []
kw = {}
for descriptor_list, flag, prefix, allowed_descriptor_klass in (
(fs_list, HAS_FS_DESC, 'fs', USBDescriptorHeader),
(hs_list, HAS_HS_DESC, 'hs', USBDescriptorHeader),
(ss_list, HAS_SS_DESC, 'ss', USBDescriptorHeader),
(os_list, HAS_MS_OS_DESC, 'os', OSDescHeader),
):
if descriptor_list:
for index, descriptor in enumerate(descriptor_list):
if not isinstance(descriptor, allowed_descriptor_klass):
raise TypeError(
'Descriptor %r of unexpected type: %r' % (
index,
type(descriptor),
),
)
descriptor_map = [
('desc_%i' % x, y)
for x, y in enumerate(descriptor_list)
]
flags |= flag
count_name = prefix + 'count'
descr_name = prefix + 'descr'
count_field_list.append((count_name, le32))
descr_type = type(
't_' + descr_name,
(ctypes.LittleEndianStructure, ),
{
'_pack_': 1,
'_fields_': [
(x, type(y))
for x, y in descriptor_map
],
}
)
descr_field_list.append((descr_name, descr_type))
kw[count_name] = len(descriptor_map)
kw[descr_name] = descr_type(**dict(descriptor_map))
elif flags & flag:
raise ValueError(
'Flag %r set but descriptor list empty, cannot generate type.' % (
FLAGS.get(flag),
)
)
klass = type(
'DescsV2_0x%02x' % (
flags & (
HAS_FS_DESC |
HAS_HS_DESC |
HAS_SS_DESC |
HAS_MS_OS_DESC
),
# XXX: include contained descriptors type information ? (and name ?)
),
(DescsHeadV2, ),
{
'_fields_': count_field_list + descr_field_list,
},
)
return klass(
magic=DESCRIPTORS_MAGIC_V2,
length=ctypes.sizeof(klass),
flags=flags,
**kw
)
def serialise(structure):
"""
structure (ctypes.Structure)
The structure to serialise.
Returns a ctypes.c_char array.
Does not copy memory.
"""
return ctypes.cast(
ctypes.pointer(structure),
ctypes.POINTER(ctypes.c_char * ctypes.sizeof(structure)),
).contents
class EndpointFileBase(io.FileIO):
"""
File object representing a endpoint. Abstract.
"""
def _ioctl(self, func, *args, **kw):
result = fcntl.ioctl(self, func, *args, **kw)
if result < 0:
raise IOError(result)
return result
class Endpoint0File(EndpointFileBase):
"""
File object exposing ioctls available on endpoint zero.
"""
def halt(self, request_type):
"""
Halt current endpoint.
"""
try:
if request_type & ch9.USB_DIR_IN:
self.read(0)
else:
self.write(b'')
except IOError as exc:
if exc.errno != errno.EL2HLT:
raise
else:
raise ValueError('halt did not return EL2HLT ?')
def getRealInterfaceNumber(self, interface):
"""
Returns the host-visible interface number, or None if there is no such
interface.
"""
try:
return self._ioctl(INTERFACE_REVMAP, interface)
except IOError as exc:
if exc.errno == errno.EDOM:
return None
raise
# TODO: Add any standard IOCTL in usb_gadget_ops.ioctl ?
class EndpointFile(EndpointFileBase):
"""
File object exposing ioctls available on non-zero endpoints.
"""
_halted = False
def getRealEndpointNumber(self):
"""
Returns the host-visible endpoint number.
"""
return self._ioctl(ENDPOINT_REVMAP)
def clearHalt(self):
"""
Clears endpoint halt, and resets toggle.
See drivers/usb/gadget/udc/core.c:usb_ep_clear_halt
"""
self._ioctl(CLEAR_HALT)
self._halted = False
def getFIFOStatus(self):
"""
Returns the number of bytes in fifo.
"""
return self._ioctl(FIFO_STATUS)
def flushFIFO(self):
"""
Discards Endpoint FIFO content.
"""
self._ioctl(FIFO_FLUSH)
def getDescriptor(self):
"""
Returns the currently active endpoint descriptor
(depending on current USB speed).
"""
result = USBEndpointDescriptor()
self._ioctl(ENDPOINT_DESC, result, True)
return result
def _halt(self):
raise NotImplementedError
def halt(self):
"""
Halt current endpoint.
"""
try:
self._halt()
except IOError as exc:
if exc.errno != errno.EBADMSG:
raise
else:
raise ValueError('halt did not return EBADMSG ?')
self._halted = True
def isHalted(self):
"""
Whether endpoint is currently halted.
"""
return self._halted
class EndpointINFile(EndpointFile):
"""
Write-only endpoint file.
"""
@staticmethod
def read(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for reading')
readinto = read
readall = read
readlines = read
readline = read
@staticmethod
def readable():
"""
Never readable.
"""
return False
def _halt(self):
super(EndpointINFile, self).read(0)
class EndpointOUTFile(EndpointFile):
"""
Read-only endpoint file.
"""
@staticmethod
def write(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for writing')
writelines = write
@staticmethod
def writable():
"""
Never writable.
"""
return False
def _halt(self):
super(EndpointOUTFile, self).write(b'')
_INFINITY = itertools.repeat(None)
_ONCE = (None, )
class Function(object):
"""
Pythonic class for interfacing with FunctionFS.
Properties available:
function_remote_wakeup_capable (bool)
Whether the function wishes to be allowed to wake host.
function_remote_wakeup (bool)
Whether host has allowed the function to wake it up.
Set and cleared by onSetup by calling enableRemoteWakeup and
disableRemoteWakeup, respectively.
"""
_closed = False
_ep_list = () # Avoids failing in __del__ when (subclass') __init__ fails.
function_remote_wakeup_capable = False
function_remote_wakeup = False
def __init__(
self,
path,
fs_list=(), hs_list=(), ss_list=(),
os_list=(),
lang_dict={},
all_ctrl_recip=False, config0_setup=False,
):
"""
path (string)
Path to the functionfs mountpoint (where the ep* files are
located).
{fs,hs,ss}_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
os_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
lang_dict (dict)
Keys: language id (ex: 0x0402 for "us-en").
Values: List of unicode objects. First item becomes string
descriptor 1, and so on. Must contain at least as many
string descriptors as the highest string index declared
in all descriptors.
all_ctrl_recip (bool)
When true, this function will receive all control transactions.
Useful when implementing non-standard control transactions.
config0_setup (bool)
When true, this function will receive control transactions before
any configuration gets enabled.
"""
self._path = path
ep0 = Endpoint0File(os.path.join(path, 'ep0'), 'r+')
self._ep_list = ep_list = [ep0]
self._ep_address_dict = ep_address_dict = {}
flags = 0
if all_ctrl_recip:
flags |= ALL_CTRL_RECIP
if config0_setup:
flags |= CONFIG0_SETUP
# Note: serialise does not prevent its argument from being freed and
# reallocated. Keep strong references to to-serialise values until
# after they get written.
desc = getDescsV2(
flags,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
os_list=os_list,
)
ep0.write(serialise(desc))
# TODO: try v1 on failure ?
del desc
# Note: see above.
strings = getStrings(lang_dict)
ep0.write(serialise(strings))
del strings
for descriptor in ss_list or hs_list or fs_list:
if descriptor.bDescriptorType == ch9.USB_DT_ENDPOINT:
assert descriptor.bEndpointAddress not in ep_address_dict, (
descriptor,
ep_address_dict[descriptor.bEndpointAddress],
)
index = len(ep_list)
ep_address_dict[descriptor.bEndpointAddress] = index
ep_list.append(
(
EndpointINFile
if descriptor.bEndpointAddress & ch9.USB_DIR_IN
else EndpointOUTFile
)(
os.path.join(path, 'ep%u' % (index, )),
'r+',
)
)
@property
def ep0(self):
"""
Endpoint 0, use when handling setup transactions.
"""
return self._ep_list[0]
def close(self):
"""
Close all endpoint file descriptors.
"""
ep_list = self._ep_list
while ep_list:
ep_list.pop().close()
self._closed = True
def __del__(self):
self.close()
__event_dict = {
BIND: 'onBind',
UNBIND: 'onUnbind',
ENABLE: 'onEnable',
DISABLE: 'onDisable',
# SETUP: handled specially
SUSPEND: 'onSuspend',
RESUME: 'onResume',
}
def __process(self, iterator):
readinto = self.ep0.readinto
# FunctionFS can queue up to 4 events, so let's read that much.
event_len = ctypes.sizeof(Event)
array_type = Event * 4
buf = bytearray(ctypes.sizeof(array_type))
event_list = array_type.from_buffer(buf)
event_dict = self.__event_dict
for _ in iterator:
if self._closed:
break
try:
length = readinto(buf)
except IOError as exc:
if exc.errno == errno.EINTR:
continue
raise
if not length:
# Note: also catches None, returned when ep0 is non-blocking
break # TODO: test if this happens when ep0 gets closed
# (by FunctionFS or in another thread or in a handler)
count, remainder = divmod(length, event_len)
assert remainder == 0, (length, event_len)
for index in range(count):
event = event_list[index]
event_type = event.type
if event_type == SETUP:
setup = event.u.setup
try:
self.onSetup(
setup.bRequestType,
setup.bRequest,
setup.wValue,
setup.wIndex,
setup.wLength,
)
except:
# On *ANY* exception, halt endpoint
self.ep0.halt(setup.bRequestType)
raise
else:
getattr(self, event_dict[event.type])()
def processEventsForever(self):
"""
Process kernel ep0 events until closed.
ep0 must be in blocking mode, otherwise behaves like `processEvents`.
"""
self.__process(_INFINITY)
def processEvents(self):
"""
Process at least one kernel event if ep0 is in blocking mode.
Process any already available event if ep0 is in non-blocking mode.
"""
self.__process(_ONCE)
def getEndpoint(self, index):
"""
Return a file object corresponding to given endpoint index,
in descriptor list order.
"""
return self._ep_list[index]
def getEndpointByAddress(self, address):
"""
Return a file object corresponding to given endpoint address.
"""
return self.getEndpoint(self._ep_address_dict[address])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def onBind(self):
"""
Triggered when FunctionFS signals gadget binding.
May be overridden in subclass.
"""
pass
def onUnbind(self):
"""
Triggered when FunctionFS signals gadget unbinding.
May be overridden in subclass.
"""
pass
def onEnable(self):
"""
Called when FunctionFS signals the function was (re)enabled.
This may happen several times without onDisable being called.
It must reset the function to its default state.
May be overridden in subclass.
"""
self.disableRemoteWakeup()
def onDisable(self):
"""
Called when FunctionFS signals the function was (re)disabled.
This may happen several times without onEnable being called.
May be overridden in subclass.
"""
pass
def disableRemoteWakeup(self):
"""
Called when host issues a clearFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to False so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = False
def enableRemoteWakeup(self):
"""
Called when host issues a setFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to True so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = True
def onSetup(self, request_type, request, value, index, length):
"""
Called when a setup USB transaction was received.
Default implementation:
- handles USB_REQ_GET_STATUS on interface and endpoints
- handles USB_REQ_CLEAR_FEATURE(USB_ENDPOINT_HALT) on endpoints
- handles USB_REQ_SET_FEATURE(USB_ENDPOINT_HALT) on endpoints
- halts on everything else
If this method raises anything, endpoint 0 is halted by its caller and
exception is let through.
May be overridden in subclass.
"""
if (request_type & ch9.USB_TYPE_MASK) == ch9.USB_TYPE_STANDARD:
recipient = request_type & ch9.USB_RECIP_MASK
is_in = (request_type & ch9.USB_DIR_IN) == ch9.USB_DIR_IN
if request == ch9.USB_REQ_GET_STATUS:
if is_in and length == 2:
if recipient == ch9.USB_RECIP_INTERFACE:
if value == 0:
status = 0
if index == 0:
if self.function_remote_wakeup_capable:
status |= 1 << 0
if self.function_remote_wakeup:
status |= 1 << 1
self.ep0.write(struct.pack('<H', status)[:length])
return
elif recipient == ch9.USB_RECIP_ENDPOINT:
if value == 0:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
status = 0
if endpoint.isHalted():
status |= 1 << 0
self.ep0.write(
struct.pack('<H', status)[:length],
)
return
elif request == ch9.USB_REQ_CLEAR_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.clearHalt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.disableRemoteWakeup()
self.ep0.read(0)
return
elif request == ch9.USB_REQ_SET_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.halt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.enableRemoteWakeup()
self.ep0.read(0)
return
self.ep0.halt(request_type)
def onSuspend(self):
"""
Called when FunctionFS signals the host stops USB traffic.
May be overridden in subclass.
"""
pass
def onResume(self):
"""
Called when FunctionFS signals the host restarts USB traffic.
May be overridden in subclass.
"""
pass
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
serialise
|
python
|
def serialise(structure):
return ctypes.cast(
ctypes.pointer(structure),
ctypes.POINTER(ctypes.c_char * ctypes.sizeof(structure)),
).contents
|
structure (ctypes.Structure)
The structure to serialise.
Returns a ctypes.c_char array.
Does not copy memory.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L532-L543
| null |
# This file is part of python-functionfs
# Copyright (C) 2016-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# python-functionfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-functionfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-functionfs. If not, see <http://www.gnu.org/licenses/>.
"""
Interfaces with functionfs to simplify USB gadget function declaration and
implementation on linux.
Defines standard USB descriptors (see "ch9" submodule) and sends them to the
kernel to declare function's structure.
Provides methods for accessing each endpoint and to react to events.
"""
import ctypes
import errno
import fcntl
import io
import itertools
import math
import os
import struct
import warnings
from .common import (
USBDescriptorHeader,
le32,
)
from . import ch9
from .ch9 import (
USBInterfaceDescriptor,
USBEndpointDescriptorNoAudio,
USBEndpointDescriptor,
USBSSEPCompDescriptor,
# USBSSPIsocEndpointDescriptor is not implemented in kernel as of this
# writing.
USBSSPIsocEndpointDescriptor,
# USBQualifierDescriptor is reserved for gadgets, so don't expose it.
USBOTGDescriptor,
USBOTG20Descriptor,
# USBDebugDescriptor is not implemented in kernelas of this writing.
USBDebugDescriptor,
USBInterfaceAssocDescriptor,
)
from .functionfs import (
DESCRIPTORS_MAGIC, STRINGS_MAGIC, DESCRIPTORS_MAGIC_V2,
FLAGS,
DescsHeadV2,
DescsHead,
OSDescHeader,
OSDescHeaderBCount,
OSExtCompatDesc,
OSExtPropDescHead,
StringsHead,
StringBase,
Event,
FIFO_STATUS, FIFO_FLUSH, CLEAR_HALT, INTERFACE_REVMAP, ENDPOINT_REVMAP, ENDPOINT_DESC,
)
# pylint: disable=no-name-in-module
from .functionfs import (
HAS_FS_DESC,
HAS_HS_DESC,
HAS_SS_DESC,
HAS_MS_OS_DESC,
ALL_CTRL_RECIP,
CONFIG0_SETUP,
BIND, UNBIND, ENABLE, DISABLE, SETUP, SUSPEND, RESUME,
)
# pylint: enable=no-name-in-module
__all__ = (
'ch9',
'Function',
# XXX: Not very pythonic...
'getInterfaceInAllSpeeds',
'getDescriptor',
'getOSDesc',
'getOSExtPropDesc',
'USBInterfaceDescriptor',
'USBEndpointDescriptorNoAudio',
'USBEndpointDescriptor',
'USBSSEPCompDescriptor',
'USBSSPIsocEndpointDescriptor',
'USBOTGDescriptor',
'USBOTG20Descriptor',
'USBDebugDescriptor',
'USBInterfaceAssocDescriptor',
'OSExtCompatDesc',
)
_MAX_PACKET_SIZE_DICT = {
ch9.USB_ENDPOINT_XFER_ISOC: (
1023, # 0..1023
1024, # 0..1024
1024, # 0..1024
),
ch9.USB_ENDPOINT_XFER_BULK: (
64, # 8, 16, 32, 64
512, # 512 only
1024, # 1024 only
),
ch9.USB_ENDPOINT_XFER_INT: (
64, # 0..64
1024, # 0..1024
1024, # 1..1024
),
}
_MARKER = object()
_EMPTY_DICT = {} # For internal ** falback usage
def getInterfaceInAllSpeeds(interface, endpoint_list, class_descriptor_list=()):
"""
Produce similar fs, hs and ss interface and endpoints descriptors.
Should be useful for devices desiring to work in all 3 speeds with maximum
endpoint wMaxPacketSize. Reduces data duplication from descriptor
declarations.
Not intended to cover fancy combinations.
interface (dict):
Keyword arguments for
getDescriptor(USBInterfaceDescriptor, ...)
in all speeds.
bNumEndpoints must not be provided.
endpoint_list (list of dicts)
Each dict represents an endpoint, and may contain the following items:
- "endpoint": required, contains keyword arguments for
getDescriptor(USBEndpointDescriptorNoAudio, ...)
or
getDescriptor(USBEndpointDescriptor, ...)
The with-audio variant is picked when its extra fields are assigned a
value.
wMaxPacketSize may be missing, in which case it will be set to the
maximum size for given speed and endpoint type.
bmAttributes must be provided.
If bEndpointAddress is zero (excluding direction bit) on the first
endpoint, endpoints will be assigned their rank in this list,
starting at 1. Their direction bit is preserved.
If bInterval is present on a INT or ISO endpoint, it must be in
millisecond units (but may not be an integer), and will be converted
to the nearest integer millisecond for full-speed descriptor, and
nearest possible interval for high- and super-speed descriptors.
If bInterval is present on a BULK endpoint, it is set to zero on
full-speed descriptor and used as provided on high- and super-speed
descriptors.
- "superspeed": optional, contains keyword arguments for
getDescriptor(USBSSEPCompDescriptor, ...)
- "superspeed_iso": optional, contains keyword arguments for
getDescriptor(USBSSPIsocEndpointDescriptor, ...)
Must be provided and non-empty only when endpoint is isochronous and
"superspeed" dict has "bmAttributes" bit 7 set.
class_descriptor (list of descriptors of any type)
Descriptors to insert in all speeds between the interface descriptor and
endpoint descriptors.
Returns a 3-tuple of lists:
- fs descriptors
- hs descriptors
- ss descriptors
"""
interface = getDescriptor(
USBInterfaceDescriptor,
bNumEndpoints=len(endpoint_list),
**interface
)
class_descriptor_list = list(class_descriptor_list)
fs_list = [interface] + class_descriptor_list
hs_list = [interface] + class_descriptor_list
ss_list = [interface] + class_descriptor_list
need_address = (
endpoint_list[0]['endpoint'].get(
'bEndpointAddress',
0,
) & ~ch9.USB_DIR_IN == 0
)
for index, endpoint in enumerate(endpoint_list, 1):
endpoint_kw = endpoint['endpoint'].copy()
transfer_type = endpoint_kw[
'bmAttributes'
] & ch9.USB_ENDPOINT_XFERTYPE_MASK
fs_max, hs_max, ss_max = _MAX_PACKET_SIZE_DICT[transfer_type]
if need_address:
endpoint_kw['bEndpointAddress'] = index | (
endpoint_kw.get('bEndpointAddress', 0) & ch9.USB_DIR_IN
)
klass = (
USBEndpointDescriptor
if 'bRefresh' in endpoint_kw or 'bSynchAddress' in endpoint_kw else
USBEndpointDescriptorNoAudio
)
interval = endpoint_kw.pop('bInterval', _MARKER)
if interval is _MARKER:
fs_interval = hs_interval = 0
else:
if transfer_type == ch9.USB_ENDPOINT_XFER_BULK:
fs_interval = 0
hs_interval = interval
else: # USB_ENDPOINT_XFER_ISOC or USB_ENDPOINT_XFER_INT
fs_interval = max(1, min(255, round(interval)))
# 8 is the number of microframes in a millisecond
hs_interval = max(
1,
min(16, int(round(1 + math.log(interval * 8, 2)))),
)
packet_size = endpoint_kw.pop('wMaxPacketSize', _MARKER)
if packet_size is _MARKER:
fs_packet_size = fs_max
hs_packet_size = hs_max
ss_packet_size = ss_max
else:
fs_packet_size = min(fs_max, packet_size)
hs_packet_size = min(hs_max, packet_size)
ss_packet_size = min(ss_max, packet_size)
fs_list.append(getDescriptor(
klass,
wMaxPacketSize=fs_max,
bInterval=fs_interval,
**endpoint_kw
))
hs_list.append(getDescriptor(
klass,
wMaxPacketSize=hs_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_list.append(getDescriptor(
klass,
wMaxPacketSize=ss_max,
bInterval=hs_interval,
**endpoint_kw
))
ss_companion_kw = endpoint.get('superspeed', _EMPTY_DICT)
ss_list.append(getDescriptor(
USBSSEPCompDescriptor,
**ss_companion_kw
))
ssp_iso_kw = endpoint.get('superspeed_iso', _EMPTY_DICT)
if bool(ssp_iso_kw) != (
endpoint_kw.get('bmAttributes', 0) &
ch9.USB_ENDPOINT_XFERTYPE_MASK ==
ch9.USB_ENDPOINT_XFER_ISOC and
bool(ch9.USB_SS_SSP_ISOC_COMP(
ss_companion_kw.get('bmAttributes', 0),
))
):
raise ValueError('Inconsistent isochronous companion')
if ssp_iso_kw:
ss_list.append(getDescriptor(
USBSSPIsocEndpointDescriptor,
**ssp_iso_kw
))
return (fs_list, hs_list, ss_list)
def getDescriptor(klass, **kw):
"""
Automatically fills bLength and bDescriptorType.
"""
# XXX: ctypes Structure.__init__ ignores arguments which do not exist
# as structure fields. So check it.
# This is annoying, but not doing it is a huge waste of time for the
# developer.
empty = klass()
assert hasattr(empty, 'bLength')
assert hasattr(empty, 'bDescriptorType')
unknown = [x for x in kw if not hasattr(empty, x)]
if unknown:
raise TypeError('Unknown fields %r' % (unknown, ))
# XXX: not very pythonic...
return klass(
bLength=ctypes.sizeof(klass),
# pylint: disable=protected-access
bDescriptorType=klass._bDescriptorType,
# pylint: enable=protected-access
**kw
)
def getOSDesc(interface, ext_list):
"""
Return an OS description header.
interface (int)
Related interface number.
ext_list (list of OSExtCompatDesc or OSExtPropDesc)
List of instances of extended descriptors.
"""
try:
ext_type, = {type(x) for x in ext_list}
except ValueError:
raise TypeError('Extensions of a single type are required.')
if issubclass(ext_type, OSExtCompatDesc):
wIndex = 4
kw = {
'b': OSDescHeaderBCount(
bCount=len(ext_list),
Reserved=0,
),
}
elif issubclass(ext_type, OSExtPropDescHead):
wIndex = 5
kw = {
'wCount': len(ext_list),
}
else:
raise TypeError('Extensions of unexpected type')
ext_list_type = ext_type * len(ext_list)
klass = type(
'OSDesc',
(OSDescHeader, ),
{
'_fields_': [
('ext_list', ext_list_type),
],
},
)
return klass(
interface=interface,
dwLength=ctypes.sizeof(klass),
bcdVersion=1,
wIndex=wIndex,
ext_list=ext_list_type(*ext_list),
**kw
)
def getOSExtPropDesc(data_type, name, value):
"""
Returns an OS extension property descriptor.
data_type (int)
See wPropertyDataType documentation.
name (string)
See PropertyName documentation.
value (string)
See PropertyData documentation.
NULL chars must be explicitely included in the value when needed,
this function does not add any terminating NULL for example.
"""
klass = type(
'OSExtPropDesc',
(OSExtPropDescHead, ),
{
'_fields_': [
('bPropertyName', ctypes.c_char * len(name)),
('dwPropertyDataLength', le32),
('bProperty', ctypes.c_char * len(value)),
],
}
)
return klass(
dwSize=ctypes.sizeof(klass),
dwPropertyDataType=data_type,
wPropertyNameLength=len(name),
bPropertyName=name,
dwPropertyDataLength=len(value),
bProperty=value,
)
#def getDescs(*args, **kw):
# """
# Return a legacy format FunctionFS suitable for serialisation.
# Deprecated as of 3.14 .
#
# NOT IMPLEMENTED
# """
# warnings.warn(
# DeprecationWarning,
# 'Legacy format, deprecated as of 3.14.',
# )
# raise NotImplementedError('TODO')
# klass = type(
# 'Descs',
# (DescsHead, ),
# {
# 'fs_descrs': None, # TODO
# 'hs_descrs': None, # TODO
# },
# )
# return klass(
# magic=DESCRIPTORS_MAGIC,
# length=ctypes.sizeof(klass),
# **kw
# )
def getDescsV2(flags, fs_list=(), hs_list=(), ss_list=(), os_list=()):
"""
Return a FunctionFS descriptor suitable for serialisation.
flags (int)
Any combination of VIRTUAL_ADDR, EVENTFD, ALL_CTRL_RECIP,
CONFIG0_SETUP.
{fs,hs,ss,os}_list (list of descriptors)
Instances of the following classes:
{fs,hs,ss}_list:
USBInterfaceDescriptor
USBEndpointDescriptorNoAudio
USBEndpointDescriptor
USBSSEPCompDescriptor
USBSSPIsocEndpointDescriptor
USBOTGDescriptor
USBOTG20Descriptor
USBInterfaceAssocDescriptor
TODO: HID
All (non-empty) lists must define the same number of interfaces
and endpoints, and endpoint descriptors must be given in the same
order, bEndpointAddress-wise.
os_list:
OSDesc
"""
count_field_list = []
descr_field_list = []
kw = {}
for descriptor_list, flag, prefix, allowed_descriptor_klass in (
(fs_list, HAS_FS_DESC, 'fs', USBDescriptorHeader),
(hs_list, HAS_HS_DESC, 'hs', USBDescriptorHeader),
(ss_list, HAS_SS_DESC, 'ss', USBDescriptorHeader),
(os_list, HAS_MS_OS_DESC, 'os', OSDescHeader),
):
if descriptor_list:
for index, descriptor in enumerate(descriptor_list):
if not isinstance(descriptor, allowed_descriptor_klass):
raise TypeError(
'Descriptor %r of unexpected type: %r' % (
index,
type(descriptor),
),
)
descriptor_map = [
('desc_%i' % x, y)
for x, y in enumerate(descriptor_list)
]
flags |= flag
count_name = prefix + 'count'
descr_name = prefix + 'descr'
count_field_list.append((count_name, le32))
descr_type = type(
't_' + descr_name,
(ctypes.LittleEndianStructure, ),
{
'_pack_': 1,
'_fields_': [
(x, type(y))
for x, y in descriptor_map
],
}
)
descr_field_list.append((descr_name, descr_type))
kw[count_name] = len(descriptor_map)
kw[descr_name] = descr_type(**dict(descriptor_map))
elif flags & flag:
raise ValueError(
'Flag %r set but descriptor list empty, cannot generate type.' % (
FLAGS.get(flag),
)
)
klass = type(
'DescsV2_0x%02x' % (
flags & (
HAS_FS_DESC |
HAS_HS_DESC |
HAS_SS_DESC |
HAS_MS_OS_DESC
),
# XXX: include contained descriptors type information ? (and name ?)
),
(DescsHeadV2, ),
{
'_fields_': count_field_list + descr_field_list,
},
)
return klass(
magic=DESCRIPTORS_MAGIC_V2,
length=ctypes.sizeof(klass),
flags=flags,
**kw
)
def getStrings(lang_dict):
"""
Return a FunctionFS descriptor suitable for serialisation.
lang_dict (dict)
Key: language ID (ex: 0x0409 for en-us)
Value: list of unicode objects
All values must have the same number of items.
"""
field_list = []
kw = {}
try:
str_count = len(next(iter(lang_dict.values())))
except StopIteration:
str_count = 0
else:
for lang, string_list in lang_dict.items():
if len(string_list) != str_count:
raise ValueError('All values must have the same string count.')
field_id = 'strings_%04x' % lang
strings = b'\x00'.join(x.encode('utf-8') for x in string_list) + b'\x00'
field_type = type(
'String',
(StringBase, ),
{
'_fields_': [
('strings', ctypes.c_char * len(strings)),
],
},
)
field_list.append((field_id, field_type))
kw[field_id] = field_type(
lang=lang,
strings=strings,
)
klass = type(
'Strings',
(StringsHead, ),
{
'_fields_': field_list,
},
)
return klass(
magic=STRINGS_MAGIC,
length=ctypes.sizeof(klass),
str_count=str_count,
lang_count=len(lang_dict),
**kw
)
class EndpointFileBase(io.FileIO):
"""
File object representing a endpoint. Abstract.
"""
def _ioctl(self, func, *args, **kw):
result = fcntl.ioctl(self, func, *args, **kw)
if result < 0:
raise IOError(result)
return result
class Endpoint0File(EndpointFileBase):
"""
File object exposing ioctls available on endpoint zero.
"""
def halt(self, request_type):
"""
Halt current endpoint.
"""
try:
if request_type & ch9.USB_DIR_IN:
self.read(0)
else:
self.write(b'')
except IOError as exc:
if exc.errno != errno.EL2HLT:
raise
else:
raise ValueError('halt did not return EL2HLT ?')
def getRealInterfaceNumber(self, interface):
"""
Returns the host-visible interface number, or None if there is no such
interface.
"""
try:
return self._ioctl(INTERFACE_REVMAP, interface)
except IOError as exc:
if exc.errno == errno.EDOM:
return None
raise
# TODO: Add any standard IOCTL in usb_gadget_ops.ioctl ?
class EndpointFile(EndpointFileBase):
"""
File object exposing ioctls available on non-zero endpoints.
"""
_halted = False
def getRealEndpointNumber(self):
"""
Returns the host-visible endpoint number.
"""
return self._ioctl(ENDPOINT_REVMAP)
def clearHalt(self):
"""
Clears endpoint halt, and resets toggle.
See drivers/usb/gadget/udc/core.c:usb_ep_clear_halt
"""
self._ioctl(CLEAR_HALT)
self._halted = False
def getFIFOStatus(self):
"""
Returns the number of bytes in fifo.
"""
return self._ioctl(FIFO_STATUS)
def flushFIFO(self):
"""
Discards Endpoint FIFO content.
"""
self._ioctl(FIFO_FLUSH)
def getDescriptor(self):
"""
Returns the currently active endpoint descriptor
(depending on current USB speed).
"""
result = USBEndpointDescriptor()
self._ioctl(ENDPOINT_DESC, result, True)
return result
def _halt(self):
raise NotImplementedError
def halt(self):
"""
Halt current endpoint.
"""
try:
self._halt()
except IOError as exc:
if exc.errno != errno.EBADMSG:
raise
else:
raise ValueError('halt did not return EBADMSG ?')
self._halted = True
def isHalted(self):
"""
Whether endpoint is currently halted.
"""
return self._halted
class EndpointINFile(EndpointFile):
"""
Write-only endpoint file.
"""
@staticmethod
def read(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for reading')
readinto = read
readall = read
readlines = read
readline = read
@staticmethod
def readable():
"""
Never readable.
"""
return False
def _halt(self):
super(EndpointINFile, self).read(0)
class EndpointOUTFile(EndpointFile):
"""
Read-only endpoint file.
"""
@staticmethod
def write(*_, **__):
"""
Always raises IOError.
"""
raise IOError('File not open for writing')
writelines = write
@staticmethod
def writable():
"""
Never writable.
"""
return False
def _halt(self):
super(EndpointOUTFile, self).write(b'')
_INFINITY = itertools.repeat(None)
_ONCE = (None, )
class Function(object):
"""
Pythonic class for interfacing with FunctionFS.
Properties available:
function_remote_wakeup_capable (bool)
Whether the function wishes to be allowed to wake host.
function_remote_wakeup (bool)
Whether host has allowed the function to wake it up.
Set and cleared by onSetup by calling enableRemoteWakeup and
disableRemoteWakeup, respectively.
"""
_closed = False
_ep_list = () # Avoids failing in __del__ when (subclass') __init__ fails.
function_remote_wakeup_capable = False
function_remote_wakeup = False
def __init__(
self,
path,
fs_list=(), hs_list=(), ss_list=(),
os_list=(),
lang_dict={},
all_ctrl_recip=False, config0_setup=False,
):
"""
path (string)
Path to the functionfs mountpoint (where the ep* files are
located).
{fs,hs,ss}_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
os_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
lang_dict (dict)
Keys: language id (ex: 0x0402 for "us-en").
Values: List of unicode objects. First item becomes string
descriptor 1, and so on. Must contain at least as many
string descriptors as the highest string index declared
in all descriptors.
all_ctrl_recip (bool)
When true, this function will receive all control transactions.
Useful when implementing non-standard control transactions.
config0_setup (bool)
When true, this function will receive control transactions before
any configuration gets enabled.
"""
self._path = path
ep0 = Endpoint0File(os.path.join(path, 'ep0'), 'r+')
self._ep_list = ep_list = [ep0]
self._ep_address_dict = ep_address_dict = {}
flags = 0
if all_ctrl_recip:
flags |= ALL_CTRL_RECIP
if config0_setup:
flags |= CONFIG0_SETUP
# Note: serialise does not prevent its argument from being freed and
# reallocated. Keep strong references to to-serialise values until
# after they get written.
desc = getDescsV2(
flags,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
os_list=os_list,
)
ep0.write(serialise(desc))
# TODO: try v1 on failure ?
del desc
# Note: see above.
strings = getStrings(lang_dict)
ep0.write(serialise(strings))
del strings
for descriptor in ss_list or hs_list or fs_list:
if descriptor.bDescriptorType == ch9.USB_DT_ENDPOINT:
assert descriptor.bEndpointAddress not in ep_address_dict, (
descriptor,
ep_address_dict[descriptor.bEndpointAddress],
)
index = len(ep_list)
ep_address_dict[descriptor.bEndpointAddress] = index
ep_list.append(
(
EndpointINFile
if descriptor.bEndpointAddress & ch9.USB_DIR_IN
else EndpointOUTFile
)(
os.path.join(path, 'ep%u' % (index, )),
'r+',
)
)
@property
def ep0(self):
"""
Endpoint 0, use when handling setup transactions.
"""
return self._ep_list[0]
def close(self):
"""
Close all endpoint file descriptors.
"""
ep_list = self._ep_list
while ep_list:
ep_list.pop().close()
self._closed = True
def __del__(self):
self.close()
__event_dict = {
BIND: 'onBind',
UNBIND: 'onUnbind',
ENABLE: 'onEnable',
DISABLE: 'onDisable',
# SETUP: handled specially
SUSPEND: 'onSuspend',
RESUME: 'onResume',
}
def __process(self, iterator):
readinto = self.ep0.readinto
# FunctionFS can queue up to 4 events, so let's read that much.
event_len = ctypes.sizeof(Event)
array_type = Event * 4
buf = bytearray(ctypes.sizeof(array_type))
event_list = array_type.from_buffer(buf)
event_dict = self.__event_dict
for _ in iterator:
if self._closed:
break
try:
length = readinto(buf)
except IOError as exc:
if exc.errno == errno.EINTR:
continue
raise
if not length:
# Note: also catches None, returned when ep0 is non-blocking
break # TODO: test if this happens when ep0 gets closed
# (by FunctionFS or in another thread or in a handler)
count, remainder = divmod(length, event_len)
assert remainder == 0, (length, event_len)
for index in range(count):
event = event_list[index]
event_type = event.type
if event_type == SETUP:
setup = event.u.setup
try:
self.onSetup(
setup.bRequestType,
setup.bRequest,
setup.wValue,
setup.wIndex,
setup.wLength,
)
except:
# On *ANY* exception, halt endpoint
self.ep0.halt(setup.bRequestType)
raise
else:
getattr(self, event_dict[event.type])()
def processEventsForever(self):
"""
Process kernel ep0 events until closed.
ep0 must be in blocking mode, otherwise behaves like `processEvents`.
"""
self.__process(_INFINITY)
def processEvents(self):
"""
Process at least one kernel event if ep0 is in blocking mode.
Process any already available event if ep0 is in non-blocking mode.
"""
self.__process(_ONCE)
def getEndpoint(self, index):
"""
Return a file object corresponding to given endpoint index,
in descriptor list order.
"""
return self._ep_list[index]
def getEndpointByAddress(self, address):
"""
Return a file object corresponding to given endpoint address.
"""
return self.getEndpoint(self._ep_address_dict[address])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def onBind(self):
"""
Triggered when FunctionFS signals gadget binding.
May be overridden in subclass.
"""
pass
def onUnbind(self):
"""
Triggered when FunctionFS signals gadget unbinding.
May be overridden in subclass.
"""
pass
def onEnable(self):
"""
Called when FunctionFS signals the function was (re)enabled.
This may happen several times without onDisable being called.
It must reset the function to its default state.
May be overridden in subclass.
"""
self.disableRemoteWakeup()
def onDisable(self):
"""
Called when FunctionFS signals the function was (re)disabled.
This may happen several times without onEnable being called.
May be overridden in subclass.
"""
pass
def disableRemoteWakeup(self):
"""
Called when host issues a clearFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to False so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = False
def enableRemoteWakeup(self):
"""
Called when host issues a setFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to True so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = True
def onSetup(self, request_type, request, value, index, length):
"""
Called when a setup USB transaction was received.
Default implementation:
- handles USB_REQ_GET_STATUS on interface and endpoints
- handles USB_REQ_CLEAR_FEATURE(USB_ENDPOINT_HALT) on endpoints
- handles USB_REQ_SET_FEATURE(USB_ENDPOINT_HALT) on endpoints
- halts on everything else
If this method raises anything, endpoint 0 is halted by its caller and
exception is let through.
May be overridden in subclass.
"""
if (request_type & ch9.USB_TYPE_MASK) == ch9.USB_TYPE_STANDARD:
recipient = request_type & ch9.USB_RECIP_MASK
is_in = (request_type & ch9.USB_DIR_IN) == ch9.USB_DIR_IN
if request == ch9.USB_REQ_GET_STATUS:
if is_in and length == 2:
if recipient == ch9.USB_RECIP_INTERFACE:
if value == 0:
status = 0
if index == 0:
if self.function_remote_wakeup_capable:
status |= 1 << 0
if self.function_remote_wakeup:
status |= 1 << 1
self.ep0.write(struct.pack('<H', status)[:length])
return
elif recipient == ch9.USB_RECIP_ENDPOINT:
if value == 0:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
status = 0
if endpoint.isHalted():
status |= 1 << 0
self.ep0.write(
struct.pack('<H', status)[:length],
)
return
elif request == ch9.USB_REQ_CLEAR_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.clearHalt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.disableRemoteWakeup()
self.ep0.read(0)
return
elif request == ch9.USB_REQ_SET_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.halt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.enableRemoteWakeup()
self.ep0.read(0)
return
self.ep0.halt(request_type)
def onSuspend(self):
"""
Called when FunctionFS signals the host stops USB traffic.
May be overridden in subclass.
"""
pass
def onResume(self):
"""
Called when FunctionFS signals the host restarts USB traffic.
May be overridden in subclass.
"""
pass
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
Endpoint0File.halt
|
python
|
def halt(self, request_type):
try:
if request_type & ch9.USB_DIR_IN:
self.read(0)
else:
self.write(b'')
except IOError as exc:
if exc.errno != errno.EL2HLT:
raise
else:
raise ValueError('halt did not return EL2HLT ?')
|
Halt current endpoint.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L559-L572
| null |
class Endpoint0File(EndpointFileBase):
"""
File object exposing ioctls available on endpoint zero.
"""
def getRealInterfaceNumber(self, interface):
"""
Returns the host-visible interface number, or None if there is no such
interface.
"""
try:
return self._ioctl(INTERFACE_REVMAP, interface)
except IOError as exc:
if exc.errno == errno.EDOM:
return None
raise
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
Endpoint0File.getRealInterfaceNumber
|
python
|
def getRealInterfaceNumber(self, interface):
try:
return self._ioctl(INTERFACE_REVMAP, interface)
except IOError as exc:
if exc.errno == errno.EDOM:
return None
raise
|
Returns the host-visible interface number, or None if there is no such
interface.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L574-L584
|
[
"def _ioctl(self, func, *args, **kw):\n result = fcntl.ioctl(self, func, *args, **kw)\n if result < 0:\n raise IOError(result)\n return result\n"
] |
class Endpoint0File(EndpointFileBase):
"""
File object exposing ioctls available on endpoint zero.
"""
def halt(self, request_type):
"""
Halt current endpoint.
"""
try:
if request_type & ch9.USB_DIR_IN:
self.read(0)
else:
self.write(b'')
except IOError as exc:
if exc.errno != errno.EL2HLT:
raise
else:
raise ValueError('halt did not return EL2HLT ?')
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
EndpointFile.getDescriptor
|
python
|
def getDescriptor(self):
result = USBEndpointDescriptor()
self._ioctl(ENDPOINT_DESC, result, True)
return result
|
Returns the currently active endpoint descriptor
(depending on current USB speed).
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L621-L628
|
[
"def _ioctl(self, func, *args, **kw):\n result = fcntl.ioctl(self, func, *args, **kw)\n if result < 0:\n raise IOError(result)\n return result\n"
] |
class EndpointFile(EndpointFileBase):
"""
File object exposing ioctls available on non-zero endpoints.
"""
_halted = False
def getRealEndpointNumber(self):
"""
Returns the host-visible endpoint number.
"""
return self._ioctl(ENDPOINT_REVMAP)
def clearHalt(self):
"""
Clears endpoint halt, and resets toggle.
See drivers/usb/gadget/udc/core.c:usb_ep_clear_halt
"""
self._ioctl(CLEAR_HALT)
self._halted = False
def getFIFOStatus(self):
"""
Returns the number of bytes in fifo.
"""
return self._ioctl(FIFO_STATUS)
def flushFIFO(self):
"""
Discards Endpoint FIFO content.
"""
self._ioctl(FIFO_FLUSH)
def _halt(self):
raise NotImplementedError
def halt(self):
"""
Halt current endpoint.
"""
try:
self._halt()
except IOError as exc:
if exc.errno != errno.EBADMSG:
raise
else:
raise ValueError('halt did not return EBADMSG ?')
self._halted = True
def isHalted(self):
"""
Whether endpoint is currently halted.
"""
return self._halted
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
EndpointFile.halt
|
python
|
def halt(self):
try:
self._halt()
except IOError as exc:
if exc.errno != errno.EBADMSG:
raise
else:
raise ValueError('halt did not return EBADMSG ?')
self._halted = True
|
Halt current endpoint.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L633-L644
|
[
"def _halt(self):\n raise NotImplementedError\n"
] |
class EndpointFile(EndpointFileBase):
"""
File object exposing ioctls available on non-zero endpoints.
"""
_halted = False
def getRealEndpointNumber(self):
"""
Returns the host-visible endpoint number.
"""
return self._ioctl(ENDPOINT_REVMAP)
def clearHalt(self):
"""
Clears endpoint halt, and resets toggle.
See drivers/usb/gadget/udc/core.c:usb_ep_clear_halt
"""
self._ioctl(CLEAR_HALT)
self._halted = False
def getFIFOStatus(self):
"""
Returns the number of bytes in fifo.
"""
return self._ioctl(FIFO_STATUS)
def flushFIFO(self):
"""
Discards Endpoint FIFO content.
"""
self._ioctl(FIFO_FLUSH)
def getDescriptor(self):
"""
Returns the currently active endpoint descriptor
(depending on current USB speed).
"""
result = USBEndpointDescriptor()
self._ioctl(ENDPOINT_DESC, result, True)
return result
def _halt(self):
raise NotImplementedError
def isHalted(self):
"""
Whether endpoint is currently halted.
"""
return self._halted
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
Function.close
|
python
|
def close(self):
ep_list = self._ep_list
while ep_list:
ep_list.pop().close()
self._closed = True
|
Close all endpoint file descriptors.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L800-L807
| null |
class Function(object):
"""
Pythonic class for interfacing with FunctionFS.
Properties available:
function_remote_wakeup_capable (bool)
Whether the function wishes to be allowed to wake host.
function_remote_wakeup (bool)
Whether host has allowed the function to wake it up.
Set and cleared by onSetup by calling enableRemoteWakeup and
disableRemoteWakeup, respectively.
"""
_closed = False
_ep_list = () # Avoids failing in __del__ when (subclass') __init__ fails.
function_remote_wakeup_capable = False
function_remote_wakeup = False
def __init__(
self,
path,
fs_list=(), hs_list=(), ss_list=(),
os_list=(),
lang_dict={},
all_ctrl_recip=False, config0_setup=False,
):
"""
path (string)
Path to the functionfs mountpoint (where the ep* files are
located).
{fs,hs,ss}_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
os_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
lang_dict (dict)
Keys: language id (ex: 0x0402 for "us-en").
Values: List of unicode objects. First item becomes string
descriptor 1, and so on. Must contain at least as many
string descriptors as the highest string index declared
in all descriptors.
all_ctrl_recip (bool)
When true, this function will receive all control transactions.
Useful when implementing non-standard control transactions.
config0_setup (bool)
When true, this function will receive control transactions before
any configuration gets enabled.
"""
self._path = path
ep0 = Endpoint0File(os.path.join(path, 'ep0'), 'r+')
self._ep_list = ep_list = [ep0]
self._ep_address_dict = ep_address_dict = {}
flags = 0
if all_ctrl_recip:
flags |= ALL_CTRL_RECIP
if config0_setup:
flags |= CONFIG0_SETUP
# Note: serialise does not prevent its argument from being freed and
# reallocated. Keep strong references to to-serialise values until
# after they get written.
desc = getDescsV2(
flags,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
os_list=os_list,
)
ep0.write(serialise(desc))
# TODO: try v1 on failure ?
del desc
# Note: see above.
strings = getStrings(lang_dict)
ep0.write(serialise(strings))
del strings
for descriptor in ss_list or hs_list or fs_list:
if descriptor.bDescriptorType == ch9.USB_DT_ENDPOINT:
assert descriptor.bEndpointAddress not in ep_address_dict, (
descriptor,
ep_address_dict[descriptor.bEndpointAddress],
)
index = len(ep_list)
ep_address_dict[descriptor.bEndpointAddress] = index
ep_list.append(
(
EndpointINFile
if descriptor.bEndpointAddress & ch9.USB_DIR_IN
else EndpointOUTFile
)(
os.path.join(path, 'ep%u' % (index, )),
'r+',
)
)
@property
def ep0(self):
"""
Endpoint 0, use when handling setup transactions.
"""
return self._ep_list[0]
def __del__(self):
self.close()
__event_dict = {
BIND: 'onBind',
UNBIND: 'onUnbind',
ENABLE: 'onEnable',
DISABLE: 'onDisable',
# SETUP: handled specially
SUSPEND: 'onSuspend',
RESUME: 'onResume',
}
def __process(self, iterator):
readinto = self.ep0.readinto
# FunctionFS can queue up to 4 events, so let's read that much.
event_len = ctypes.sizeof(Event)
array_type = Event * 4
buf = bytearray(ctypes.sizeof(array_type))
event_list = array_type.from_buffer(buf)
event_dict = self.__event_dict
for _ in iterator:
if self._closed:
break
try:
length = readinto(buf)
except IOError as exc:
if exc.errno == errno.EINTR:
continue
raise
if not length:
# Note: also catches None, returned when ep0 is non-blocking
break # TODO: test if this happens when ep0 gets closed
# (by FunctionFS or in another thread or in a handler)
count, remainder = divmod(length, event_len)
assert remainder == 0, (length, event_len)
for index in range(count):
event = event_list[index]
event_type = event.type
if event_type == SETUP:
setup = event.u.setup
try:
self.onSetup(
setup.bRequestType,
setup.bRequest,
setup.wValue,
setup.wIndex,
setup.wLength,
)
except:
# On *ANY* exception, halt endpoint
self.ep0.halt(setup.bRequestType)
raise
else:
getattr(self, event_dict[event.type])()
def processEventsForever(self):
"""
Process kernel ep0 events until closed.
ep0 must be in blocking mode, otherwise behaves like `processEvents`.
"""
self.__process(_INFINITY)
def processEvents(self):
"""
Process at least one kernel event if ep0 is in blocking mode.
Process any already available event if ep0 is in non-blocking mode.
"""
self.__process(_ONCE)
def getEndpoint(self, index):
"""
Return a file object corresponding to given endpoint index,
in descriptor list order.
"""
return self._ep_list[index]
def getEndpointByAddress(self, address):
"""
Return a file object corresponding to given endpoint address.
"""
return self.getEndpoint(self._ep_address_dict[address])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def onBind(self):
"""
Triggered when FunctionFS signals gadget binding.
May be overridden in subclass.
"""
pass
def onUnbind(self):
"""
Triggered when FunctionFS signals gadget unbinding.
May be overridden in subclass.
"""
pass
def onEnable(self):
"""
Called when FunctionFS signals the function was (re)enabled.
This may happen several times without onDisable being called.
It must reset the function to its default state.
May be overridden in subclass.
"""
self.disableRemoteWakeup()
def onDisable(self):
"""
Called when FunctionFS signals the function was (re)disabled.
This may happen several times without onEnable being called.
May be overridden in subclass.
"""
pass
def disableRemoteWakeup(self):
"""
Called when host issues a clearFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to False so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = False
def enableRemoteWakeup(self):
"""
Called when host issues a setFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to True so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = True
def onSetup(self, request_type, request, value, index, length):
"""
Called when a setup USB transaction was received.
Default implementation:
- handles USB_REQ_GET_STATUS on interface and endpoints
- handles USB_REQ_CLEAR_FEATURE(USB_ENDPOINT_HALT) on endpoints
- handles USB_REQ_SET_FEATURE(USB_ENDPOINT_HALT) on endpoints
- halts on everything else
If this method raises anything, endpoint 0 is halted by its caller and
exception is let through.
May be overridden in subclass.
"""
if (request_type & ch9.USB_TYPE_MASK) == ch9.USB_TYPE_STANDARD:
recipient = request_type & ch9.USB_RECIP_MASK
is_in = (request_type & ch9.USB_DIR_IN) == ch9.USB_DIR_IN
if request == ch9.USB_REQ_GET_STATUS:
if is_in and length == 2:
if recipient == ch9.USB_RECIP_INTERFACE:
if value == 0:
status = 0
if index == 0:
if self.function_remote_wakeup_capable:
status |= 1 << 0
if self.function_remote_wakeup:
status |= 1 << 1
self.ep0.write(struct.pack('<H', status)[:length])
return
elif recipient == ch9.USB_RECIP_ENDPOINT:
if value == 0:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
status = 0
if endpoint.isHalted():
status |= 1 << 0
self.ep0.write(
struct.pack('<H', status)[:length],
)
return
elif request == ch9.USB_REQ_CLEAR_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.clearHalt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.disableRemoteWakeup()
self.ep0.read(0)
return
elif request == ch9.USB_REQ_SET_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.halt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.enableRemoteWakeup()
self.ep0.read(0)
return
self.ep0.halt(request_type)
def onSuspend(self):
"""
Called when FunctionFS signals the host stops USB traffic.
May be overridden in subclass.
"""
pass
def onResume(self):
"""
Called when FunctionFS signals the host restarts USB traffic.
May be overridden in subclass.
"""
pass
|
vpelletier/python-functionfs
|
functionfs/__init__.py
|
Function.onSetup
|
python
|
def onSetup(self, request_type, request, value, index, length):
if (request_type & ch9.USB_TYPE_MASK) == ch9.USB_TYPE_STANDARD:
recipient = request_type & ch9.USB_RECIP_MASK
is_in = (request_type & ch9.USB_DIR_IN) == ch9.USB_DIR_IN
if request == ch9.USB_REQ_GET_STATUS:
if is_in and length == 2:
if recipient == ch9.USB_RECIP_INTERFACE:
if value == 0:
status = 0
if index == 0:
if self.function_remote_wakeup_capable:
status |= 1 << 0
if self.function_remote_wakeup:
status |= 1 << 1
self.ep0.write(struct.pack('<H', status)[:length])
return
elif recipient == ch9.USB_RECIP_ENDPOINT:
if value == 0:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
status = 0
if endpoint.isHalted():
status |= 1 << 0
self.ep0.write(
struct.pack('<H', status)[:length],
)
return
elif request == ch9.USB_REQ_CLEAR_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.clearHalt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.disableRemoteWakeup()
self.ep0.read(0)
return
elif request == ch9.USB_REQ_SET_FEATURE:
if not is_in and length == 0:
if recipient == ch9.USB_RECIP_ENDPOINT:
if value == ch9.USB_ENDPOINT_HALT:
try:
endpoint = self.getEndpoint(index)
except IndexError:
pass
else:
endpoint.halt()
self.ep0.read(0)
return
elif recipient == ch9.USB_RECIP_INTERFACE:
if value == ch9.USB_INTRF_FUNC_SUSPEND:
if self.function_remote_wakeup_capable:
self.enableRemoteWakeup()
self.ep0.read(0)
return
self.ep0.halt(request_type)
|
Called when a setup USB transaction was received.
Default implementation:
- handles USB_REQ_GET_STATUS on interface and endpoints
- handles USB_REQ_CLEAR_FEATURE(USB_ENDPOINT_HALT) on endpoints
- handles USB_REQ_SET_FEATURE(USB_ENDPOINT_HALT) on endpoints
- halts on everything else
If this method raises anything, endpoint 0 is halted by its caller and
exception is let through.
May be overridden in subclass.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/functionfs/__init__.py#L956-L1036
|
[
"def getEndpoint(self, index):\n \"\"\"\n Return a file object corresponding to given endpoint index,\n in descriptor list order.\n \"\"\"\n return self._ep_list[index]\n",
"def disableRemoteWakeup(self):\n \"\"\"\n Called when host issues a clearFeature request of the \"suspend\" flag\n on this interface.\n Sets function_remote_wakeup property to False so subsequent getStatus\n requests will return expected value.\n\n May be overridden in subclass.\n \"\"\"\n self.function_remote_wakeup = False\n",
"def enableRemoteWakeup(self):\n \"\"\"\n Called when host issues a setFeature request of the \"suspend\" flag\n on this interface.\n Sets function_remote_wakeup property to True so subsequent getStatus\n requests will return expected value.\n\n May be overridden in subclass.\n \"\"\"\n self.function_remote_wakeup = True\n"
] |
class Function(object):
"""
Pythonic class for interfacing with FunctionFS.
Properties available:
function_remote_wakeup_capable (bool)
Whether the function wishes to be allowed to wake host.
function_remote_wakeup (bool)
Whether host has allowed the function to wake it up.
Set and cleared by onSetup by calling enableRemoteWakeup and
disableRemoteWakeup, respectively.
"""
_closed = False
_ep_list = () # Avoids failing in __del__ when (subclass') __init__ fails.
function_remote_wakeup_capable = False
function_remote_wakeup = False
def __init__(
self,
path,
fs_list=(), hs_list=(), ss_list=(),
os_list=(),
lang_dict={},
all_ctrl_recip=False, config0_setup=False,
):
"""
path (string)
Path to the functionfs mountpoint (where the ep* files are
located).
{fs,hs,ss}_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
os_list (list of descriptors)
XXX: may change to avoid requiring ctype objects.
lang_dict (dict)
Keys: language id (ex: 0x0402 for "us-en").
Values: List of unicode objects. First item becomes string
descriptor 1, and so on. Must contain at least as many
string descriptors as the highest string index declared
in all descriptors.
all_ctrl_recip (bool)
When true, this function will receive all control transactions.
Useful when implementing non-standard control transactions.
config0_setup (bool)
When true, this function will receive control transactions before
any configuration gets enabled.
"""
self._path = path
ep0 = Endpoint0File(os.path.join(path, 'ep0'), 'r+')
self._ep_list = ep_list = [ep0]
self._ep_address_dict = ep_address_dict = {}
flags = 0
if all_ctrl_recip:
flags |= ALL_CTRL_RECIP
if config0_setup:
flags |= CONFIG0_SETUP
# Note: serialise does not prevent its argument from being freed and
# reallocated. Keep strong references to to-serialise values until
# after they get written.
desc = getDescsV2(
flags,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
os_list=os_list,
)
ep0.write(serialise(desc))
# TODO: try v1 on failure ?
del desc
# Note: see above.
strings = getStrings(lang_dict)
ep0.write(serialise(strings))
del strings
for descriptor in ss_list or hs_list or fs_list:
if descriptor.bDescriptorType == ch9.USB_DT_ENDPOINT:
assert descriptor.bEndpointAddress not in ep_address_dict, (
descriptor,
ep_address_dict[descriptor.bEndpointAddress],
)
index = len(ep_list)
ep_address_dict[descriptor.bEndpointAddress] = index
ep_list.append(
(
EndpointINFile
if descriptor.bEndpointAddress & ch9.USB_DIR_IN
else EndpointOUTFile
)(
os.path.join(path, 'ep%u' % (index, )),
'r+',
)
)
@property
def ep0(self):
"""
Endpoint 0, use when handling setup transactions.
"""
return self._ep_list[0]
def close(self):
"""
Close all endpoint file descriptors.
"""
ep_list = self._ep_list
while ep_list:
ep_list.pop().close()
self._closed = True
def __del__(self):
self.close()
__event_dict = {
BIND: 'onBind',
UNBIND: 'onUnbind',
ENABLE: 'onEnable',
DISABLE: 'onDisable',
# SETUP: handled specially
SUSPEND: 'onSuspend',
RESUME: 'onResume',
}
def __process(self, iterator):
readinto = self.ep0.readinto
# FunctionFS can queue up to 4 events, so let's read that much.
event_len = ctypes.sizeof(Event)
array_type = Event * 4
buf = bytearray(ctypes.sizeof(array_type))
event_list = array_type.from_buffer(buf)
event_dict = self.__event_dict
for _ in iterator:
if self._closed:
break
try:
length = readinto(buf)
except IOError as exc:
if exc.errno == errno.EINTR:
continue
raise
if not length:
# Note: also catches None, returned when ep0 is non-blocking
break # TODO: test if this happens when ep0 gets closed
# (by FunctionFS or in another thread or in a handler)
count, remainder = divmod(length, event_len)
assert remainder == 0, (length, event_len)
for index in range(count):
event = event_list[index]
event_type = event.type
if event_type == SETUP:
setup = event.u.setup
try:
self.onSetup(
setup.bRequestType,
setup.bRequest,
setup.wValue,
setup.wIndex,
setup.wLength,
)
except:
# On *ANY* exception, halt endpoint
self.ep0.halt(setup.bRequestType)
raise
else:
getattr(self, event_dict[event.type])()
def processEventsForever(self):
"""
Process kernel ep0 events until closed.
ep0 must be in blocking mode, otherwise behaves like `processEvents`.
"""
self.__process(_INFINITY)
def processEvents(self):
"""
Process at least one kernel event if ep0 is in blocking mode.
Process any already available event if ep0 is in non-blocking mode.
"""
self.__process(_ONCE)
def getEndpoint(self, index):
"""
Return a file object corresponding to given endpoint index,
in descriptor list order.
"""
return self._ep_list[index]
def getEndpointByAddress(self, address):
"""
Return a file object corresponding to given endpoint address.
"""
return self.getEndpoint(self._ep_address_dict[address])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def onBind(self):
"""
Triggered when FunctionFS signals gadget binding.
May be overridden in subclass.
"""
pass
def onUnbind(self):
"""
Triggered when FunctionFS signals gadget unbinding.
May be overridden in subclass.
"""
pass
def onEnable(self):
"""
Called when FunctionFS signals the function was (re)enabled.
This may happen several times without onDisable being called.
It must reset the function to its default state.
May be overridden in subclass.
"""
self.disableRemoteWakeup()
def onDisable(self):
"""
Called when FunctionFS signals the function was (re)disabled.
This may happen several times without onEnable being called.
May be overridden in subclass.
"""
pass
def disableRemoteWakeup(self):
"""
Called when host issues a clearFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to False so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = False
def enableRemoteWakeup(self):
"""
Called when host issues a setFeature request of the "suspend" flag
on this interface.
Sets function_remote_wakeup property to True so subsequent getStatus
requests will return expected value.
May be overridden in subclass.
"""
self.function_remote_wakeup = True
def onSuspend(self):
"""
Called when FunctionFS signals the host stops USB traffic.
May be overridden in subclass.
"""
pass
def onResume(self):
"""
Called when FunctionFS signals the host restarts USB traffic.
May be overridden in subclass.
"""
pass
|
vpelletier/python-functionfs
|
examples/usbcat/slowprinter.py
|
main
|
python
|
def main():
now = datetime.datetime.now
try:
while True:
sys.stdout.write(str(now()) + ' ')
time.sleep(1)
except KeyboardInterrupt:
pass
except IOError as exc:
if exc.errno != errno.EPIPE:
raise
|
Slowly writes to stdout, without emitting a newline so any output
buffering (or input for next pipeline command) can be detected.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/examples/usbcat/slowprinter.py#L22-L36
| null |
#!/usr/bin/env python -u
# This file is part of python-functionfs
# Copyright (C) 2016-2018 Vincent Pelletier <plr.vincent@gmail.com>
#
# python-functionfs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-functionfs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-functionfs. If not, see <http://www.gnu.org/licenses/>.
import datetime
import errno
import sys
import time
if __name__ == '__main__':
main()
|
vpelletier/python-functionfs
|
examples/usbcat/device.py
|
USBCat.onEnable
|
python
|
def onEnable(self):
trace('onEnable')
self._disable()
self._aio_context.submit(self._aio_recv_block_list)
self._real_onCanSend()
self._enabled = True
|
The configuration containing this function has been enabled by host.
Endpoints become working files, so submit some read operations.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/examples/usbcat/device.py#L121-L130
|
[
"def _disable(self):\n \"\"\"\n The configuration containing this function has been disabled by host.\n Endpoint do not work anymore, so cancel AIO operation blocks.\n \"\"\"\n if self._enabled:\n self._real_onCannotSend()\n has_cancelled = 0\n for block in self._aio_recv_block_list + self._aio_send_block_list:\n try:\n self._aio_context.cancel(block)\n except OSError as exc:\n trace(\n 'cancelling %r raised: %s' % (block, exc),\n )\n else:\n has_cancelled += 1\n if has_cancelled:\n noIntr(functools.partial(self._aio_context.getEvents, min_nr=None))\n self._enabled = False\n"
] |
class USBCat(functionfs.Function):
_enabled = False
def __init__(self, path, writer, onCanSend, onCannotSend):
self._aio_context = libaio.AIOContext(
PENDING_READ_COUNT + MAX_PENDING_WRITE_COUNT,
)
self.eventfd = eventfd = libaio.EventFD()
self._writer = writer
fs_list, hs_list, ss_list = functionfs.getInterfaceInAllSpeeds(
interface={
'bInterfaceClass': functionfs.ch9.USB_CLASS_VENDOR_SPEC,
'iInterface': 1,
},
endpoint_list=[
{
'endpoint': {
'bEndpointAddress': functionfs.ch9.USB_DIR_IN,
'bmAttributes': functionfs.ch9.USB_ENDPOINT_XFER_BULK,
},
}, {
'endpoint': {
'bEndpointAddress': functionfs.ch9.USB_DIR_OUT,
'bmAttributes': functionfs.ch9.USB_ENDPOINT_XFER_BULK,
},
},
],
)
super(USBCat, self).__init__(
path,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
lang_dict={
0x0409: [
u"USBCat",
],
}
)
to_host = self.getEndpoint(2)
self._aio_recv_block_list = [
libaio.AIOBlock(
mode=libaio.AIOBLOCK_MODE_READ,
target_file=to_host,
buffer_list=[bytearray(BUF_SIZE)],
offset=0,
eventfd=eventfd,
onCompletion=self._onReceived,
)
for _ in xrange(PENDING_READ_COUNT)
]
self._aio_send_block_list = []
self._real_onCanSend = onCanSend
self._real_onCannotSend = onCannotSend
self._need_resume = False
def close(self):
self._disable()
self._aio_context.close()
super(USBCat, self).close()
def onBind(self):
"""
Just for tracing purposes.
"""
trace('onBind')
def onUnbind(self):
"""
Kernel may unbind us without calling disable.
It does cancel all pending IOs before signaling unbinding, so it would
be sufficient to mark us as disabled... Except we need to call
onCannotSend ourselves.
"""
trace('onUnbind')
self._disable()
def onDisable(self):
trace('onDisable')
self._disable()
def _disable(self):
"""
The configuration containing this function has been disabled by host.
Endpoint do not work anymore, so cancel AIO operation blocks.
"""
if self._enabled:
self._real_onCannotSend()
has_cancelled = 0
for block in self._aio_recv_block_list + self._aio_send_block_list:
try:
self._aio_context.cancel(block)
except OSError as exc:
trace(
'cancelling %r raised: %s' % (block, exc),
)
else:
has_cancelled += 1
if has_cancelled:
noIntr(functools.partial(self._aio_context.getEvents, min_nr=None))
self._enabled = False
def onAIOCompletion(self):
"""
Call when eventfd notified events are available.
"""
event_count = self.eventfd.read()
trace('eventfd reports %i events' % event_count)
# Even though eventfd signaled activity, even though it may give us
# some number of pending events, some events seem to have been already
# processed (maybe during io_cancel call ?).
# So do not trust eventfd value, and do not even trust that there must
# be even one event to process.
self._aio_context.getEvents(0)
def _onReceived(self, block, res, res2):
if res != -errno.ESHUTDOWN:
# XXX: is it good to resubmit on any other error ?
self._aio_context.submit([block])
if res < 0:
trace('aio read completion error:', -res)
else:
trace('aio read completion received', res, 'bytes')
self._writer(block.buffer_list[0][:res])
def _onCanSend(self, block, res, res2):
if res < 0:
trace('aio write completion error:', -res)
else:
trace('aio write completion sent', res, 'bytes')
self._aio_send_block_list.remove(block)
if self._need_resume:
trace('send queue has room, resume sending')
self._real_onCanSend()
self._need_resume = False
def _onCannotSend(self):
trace('send queue full, pause sending')
self._real_onCannotSend()
self._need_resume = True
def write(self, value):
"""
Queue write in kernel.
value (bytes)
Value to send.
"""
aio_block = libaio.AIOBlock(
mode=libaio.AIOBLOCK_MODE_WRITE,
target_file=self.getEndpoint(1),
buffer_list=[bytearray(value)],
offset=0,
eventfd=self.eventfd,
onCompletion=self._onCanSend,
)
self._aio_send_block_list.append(aio_block)
self._aio_context.submit([aio_block])
if len(self._aio_send_block_list) == MAX_PENDING_WRITE_COUNT:
self._onCannotSend()
|
vpelletier/python-functionfs
|
examples/usbcat/device.py
|
USBCat._disable
|
python
|
def _disable(self):
if self._enabled:
self._real_onCannotSend()
has_cancelled = 0
for block in self._aio_recv_block_list + self._aio_send_block_list:
try:
self._aio_context.cancel(block)
except OSError as exc:
trace(
'cancelling %r raised: %s' % (block, exc),
)
else:
has_cancelled += 1
if has_cancelled:
noIntr(functools.partial(self._aio_context.getEvents, min_nr=None))
self._enabled = False
|
The configuration containing this function has been disabled by host.
Endpoint do not work anymore, so cancel AIO operation blocks.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/examples/usbcat/device.py#L136-L155
|
[
"def noIntr(func):\n while True:\n try:\n return func()\n except (IOError, OSError) as exc:\n if exc.errno != errno.EINTR:\n raise\n"
] |
class USBCat(functionfs.Function):
_enabled = False
def __init__(self, path, writer, onCanSend, onCannotSend):
self._aio_context = libaio.AIOContext(
PENDING_READ_COUNT + MAX_PENDING_WRITE_COUNT,
)
self.eventfd = eventfd = libaio.EventFD()
self._writer = writer
fs_list, hs_list, ss_list = functionfs.getInterfaceInAllSpeeds(
interface={
'bInterfaceClass': functionfs.ch9.USB_CLASS_VENDOR_SPEC,
'iInterface': 1,
},
endpoint_list=[
{
'endpoint': {
'bEndpointAddress': functionfs.ch9.USB_DIR_IN,
'bmAttributes': functionfs.ch9.USB_ENDPOINT_XFER_BULK,
},
}, {
'endpoint': {
'bEndpointAddress': functionfs.ch9.USB_DIR_OUT,
'bmAttributes': functionfs.ch9.USB_ENDPOINT_XFER_BULK,
},
},
],
)
super(USBCat, self).__init__(
path,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
lang_dict={
0x0409: [
u"USBCat",
],
}
)
to_host = self.getEndpoint(2)
self._aio_recv_block_list = [
libaio.AIOBlock(
mode=libaio.AIOBLOCK_MODE_READ,
target_file=to_host,
buffer_list=[bytearray(BUF_SIZE)],
offset=0,
eventfd=eventfd,
onCompletion=self._onReceived,
)
for _ in xrange(PENDING_READ_COUNT)
]
self._aio_send_block_list = []
self._real_onCanSend = onCanSend
self._real_onCannotSend = onCannotSend
self._need_resume = False
def close(self):
self._disable()
self._aio_context.close()
super(USBCat, self).close()
def onBind(self):
"""
Just for tracing purposes.
"""
trace('onBind')
def onUnbind(self):
"""
Kernel may unbind us without calling disable.
It does cancel all pending IOs before signaling unbinding, so it would
be sufficient to mark us as disabled... Except we need to call
onCannotSend ourselves.
"""
trace('onUnbind')
self._disable()
def onEnable(self):
"""
The configuration containing this function has been enabled by host.
Endpoints become working files, so submit some read operations.
"""
trace('onEnable')
self._disable()
self._aio_context.submit(self._aio_recv_block_list)
self._real_onCanSend()
self._enabled = True
def onDisable(self):
trace('onDisable')
self._disable()
def onAIOCompletion(self):
"""
Call when eventfd notified events are available.
"""
event_count = self.eventfd.read()
trace('eventfd reports %i events' % event_count)
# Even though eventfd signaled activity, even though it may give us
# some number of pending events, some events seem to have been already
# processed (maybe during io_cancel call ?).
# So do not trust eventfd value, and do not even trust that there must
# be even one event to process.
self._aio_context.getEvents(0)
def _onReceived(self, block, res, res2):
if res != -errno.ESHUTDOWN:
# XXX: is it good to resubmit on any other error ?
self._aio_context.submit([block])
if res < 0:
trace('aio read completion error:', -res)
else:
trace('aio read completion received', res, 'bytes')
self._writer(block.buffer_list[0][:res])
def _onCanSend(self, block, res, res2):
if res < 0:
trace('aio write completion error:', -res)
else:
trace('aio write completion sent', res, 'bytes')
self._aio_send_block_list.remove(block)
if self._need_resume:
trace('send queue has room, resume sending')
self._real_onCanSend()
self._need_resume = False
def _onCannotSend(self):
trace('send queue full, pause sending')
self._real_onCannotSend()
self._need_resume = True
def write(self, value):
"""
Queue write in kernel.
value (bytes)
Value to send.
"""
aio_block = libaio.AIOBlock(
mode=libaio.AIOBLOCK_MODE_WRITE,
target_file=self.getEndpoint(1),
buffer_list=[bytearray(value)],
offset=0,
eventfd=self.eventfd,
onCompletion=self._onCanSend,
)
self._aio_send_block_list.append(aio_block)
self._aio_context.submit([aio_block])
if len(self._aio_send_block_list) == MAX_PENDING_WRITE_COUNT:
self._onCannotSend()
|
vpelletier/python-functionfs
|
examples/usbcat/device.py
|
USBCat.onAIOCompletion
|
python
|
def onAIOCompletion(self):
event_count = self.eventfd.read()
trace('eventfd reports %i events' % event_count)
# Even though eventfd signaled activity, even though it may give us
# some number of pending events, some events seem to have been already
# processed (maybe during io_cancel call ?).
# So do not trust eventfd value, and do not even trust that there must
# be even one event to process.
self._aio_context.getEvents(0)
|
Call when eventfd notified events are available.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/examples/usbcat/device.py#L157-L168
| null |
class USBCat(functionfs.Function):
_enabled = False
def __init__(self, path, writer, onCanSend, onCannotSend):
self._aio_context = libaio.AIOContext(
PENDING_READ_COUNT + MAX_PENDING_WRITE_COUNT,
)
self.eventfd = eventfd = libaio.EventFD()
self._writer = writer
fs_list, hs_list, ss_list = functionfs.getInterfaceInAllSpeeds(
interface={
'bInterfaceClass': functionfs.ch9.USB_CLASS_VENDOR_SPEC,
'iInterface': 1,
},
endpoint_list=[
{
'endpoint': {
'bEndpointAddress': functionfs.ch9.USB_DIR_IN,
'bmAttributes': functionfs.ch9.USB_ENDPOINT_XFER_BULK,
},
}, {
'endpoint': {
'bEndpointAddress': functionfs.ch9.USB_DIR_OUT,
'bmAttributes': functionfs.ch9.USB_ENDPOINT_XFER_BULK,
},
},
],
)
super(USBCat, self).__init__(
path,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
lang_dict={
0x0409: [
u"USBCat",
],
}
)
to_host = self.getEndpoint(2)
self._aio_recv_block_list = [
libaio.AIOBlock(
mode=libaio.AIOBLOCK_MODE_READ,
target_file=to_host,
buffer_list=[bytearray(BUF_SIZE)],
offset=0,
eventfd=eventfd,
onCompletion=self._onReceived,
)
for _ in xrange(PENDING_READ_COUNT)
]
self._aio_send_block_list = []
self._real_onCanSend = onCanSend
self._real_onCannotSend = onCannotSend
self._need_resume = False
def close(self):
self._disable()
self._aio_context.close()
super(USBCat, self).close()
def onBind(self):
"""
Just for tracing purposes.
"""
trace('onBind')
def onUnbind(self):
"""
Kernel may unbind us without calling disable.
It does cancel all pending IOs before signaling unbinding, so it would
be sufficient to mark us as disabled... Except we need to call
onCannotSend ourselves.
"""
trace('onUnbind')
self._disable()
def onEnable(self):
"""
The configuration containing this function has been enabled by host.
Endpoints become working files, so submit some read operations.
"""
trace('onEnable')
self._disable()
self._aio_context.submit(self._aio_recv_block_list)
self._real_onCanSend()
self._enabled = True
def onDisable(self):
trace('onDisable')
self._disable()
def _disable(self):
"""
The configuration containing this function has been disabled by host.
Endpoint do not work anymore, so cancel AIO operation blocks.
"""
if self._enabled:
self._real_onCannotSend()
has_cancelled = 0
for block in self._aio_recv_block_list + self._aio_send_block_list:
try:
self._aio_context.cancel(block)
except OSError as exc:
trace(
'cancelling %r raised: %s' % (block, exc),
)
else:
has_cancelled += 1
if has_cancelled:
noIntr(functools.partial(self._aio_context.getEvents, min_nr=None))
self._enabled = False
def _onReceived(self, block, res, res2):
if res != -errno.ESHUTDOWN:
# XXX: is it good to resubmit on any other error ?
self._aio_context.submit([block])
if res < 0:
trace('aio read completion error:', -res)
else:
trace('aio read completion received', res, 'bytes')
self._writer(block.buffer_list[0][:res])
def _onCanSend(self, block, res, res2):
if res < 0:
trace('aio write completion error:', -res)
else:
trace('aio write completion sent', res, 'bytes')
self._aio_send_block_list.remove(block)
if self._need_resume:
trace('send queue has room, resume sending')
self._real_onCanSend()
self._need_resume = False
def _onCannotSend(self):
trace('send queue full, pause sending')
self._real_onCannotSend()
self._need_resume = True
def write(self, value):
"""
Queue write in kernel.
value (bytes)
Value to send.
"""
aio_block = libaio.AIOBlock(
mode=libaio.AIOBLOCK_MODE_WRITE,
target_file=self.getEndpoint(1),
buffer_list=[bytearray(value)],
offset=0,
eventfd=self.eventfd,
onCompletion=self._onCanSend,
)
self._aio_send_block_list.append(aio_block)
self._aio_context.submit([aio_block])
if len(self._aio_send_block_list) == MAX_PENDING_WRITE_COUNT:
self._onCannotSend()
|
vpelletier/python-functionfs
|
examples/usbcat/device.py
|
USBCat.write
|
python
|
def write(self, value):
aio_block = libaio.AIOBlock(
mode=libaio.AIOBLOCK_MODE_WRITE,
target_file=self.getEndpoint(1),
buffer_list=[bytearray(value)],
offset=0,
eventfd=self.eventfd,
onCompletion=self._onCanSend,
)
self._aio_send_block_list.append(aio_block)
self._aio_context.submit([aio_block])
if len(self._aio_send_block_list) == MAX_PENDING_WRITE_COUNT:
self._onCannotSend()
|
Queue write in kernel.
value (bytes)
Value to send.
|
train
|
https://github.com/vpelletier/python-functionfs/blob/e19f729bb47a7d1edd2488531af24551bb86726f/examples/usbcat/device.py#L196-L213
|
[
"def getEndpoint(self, index):\n \"\"\"\n Return a file object corresponding to given endpoint index,\n in descriptor list order.\n \"\"\"\n return self._ep_list[index]\n",
"def _onCannotSend(self):\n trace('send queue full, pause sending')\n self._real_onCannotSend()\n self._need_resume = True\n"
] |
class USBCat(functionfs.Function):
_enabled = False
def __init__(self, path, writer, onCanSend, onCannotSend):
self._aio_context = libaio.AIOContext(
PENDING_READ_COUNT + MAX_PENDING_WRITE_COUNT,
)
self.eventfd = eventfd = libaio.EventFD()
self._writer = writer
fs_list, hs_list, ss_list = functionfs.getInterfaceInAllSpeeds(
interface={
'bInterfaceClass': functionfs.ch9.USB_CLASS_VENDOR_SPEC,
'iInterface': 1,
},
endpoint_list=[
{
'endpoint': {
'bEndpointAddress': functionfs.ch9.USB_DIR_IN,
'bmAttributes': functionfs.ch9.USB_ENDPOINT_XFER_BULK,
},
}, {
'endpoint': {
'bEndpointAddress': functionfs.ch9.USB_DIR_OUT,
'bmAttributes': functionfs.ch9.USB_ENDPOINT_XFER_BULK,
},
},
],
)
super(USBCat, self).__init__(
path,
fs_list=fs_list,
hs_list=hs_list,
ss_list=ss_list,
lang_dict={
0x0409: [
u"USBCat",
],
}
)
to_host = self.getEndpoint(2)
self._aio_recv_block_list = [
libaio.AIOBlock(
mode=libaio.AIOBLOCK_MODE_READ,
target_file=to_host,
buffer_list=[bytearray(BUF_SIZE)],
offset=0,
eventfd=eventfd,
onCompletion=self._onReceived,
)
for _ in xrange(PENDING_READ_COUNT)
]
self._aio_send_block_list = []
self._real_onCanSend = onCanSend
self._real_onCannotSend = onCannotSend
self._need_resume = False
def close(self):
self._disable()
self._aio_context.close()
super(USBCat, self).close()
def onBind(self):
"""
Just for tracing purposes.
"""
trace('onBind')
def onUnbind(self):
"""
Kernel may unbind us without calling disable.
It does cancel all pending IOs before signaling unbinding, so it would
be sufficient to mark us as disabled... Except we need to call
onCannotSend ourselves.
"""
trace('onUnbind')
self._disable()
def onEnable(self):
"""
The configuration containing this function has been enabled by host.
Endpoints become working files, so submit some read operations.
"""
trace('onEnable')
self._disable()
self._aio_context.submit(self._aio_recv_block_list)
self._real_onCanSend()
self._enabled = True
def onDisable(self):
trace('onDisable')
self._disable()
def _disable(self):
"""
The configuration containing this function has been disabled by host.
Endpoint do not work anymore, so cancel AIO operation blocks.
"""
if self._enabled:
self._real_onCannotSend()
has_cancelled = 0
for block in self._aio_recv_block_list + self._aio_send_block_list:
try:
self._aio_context.cancel(block)
except OSError as exc:
trace(
'cancelling %r raised: %s' % (block, exc),
)
else:
has_cancelled += 1
if has_cancelled:
noIntr(functools.partial(self._aio_context.getEvents, min_nr=None))
self._enabled = False
def onAIOCompletion(self):
"""
Call when eventfd notified events are available.
"""
event_count = self.eventfd.read()
trace('eventfd reports %i events' % event_count)
# Even though eventfd signaled activity, even though it may give us
# some number of pending events, some events seem to have been already
# processed (maybe during io_cancel call ?).
# So do not trust eventfd value, and do not even trust that there must
# be even one event to process.
self._aio_context.getEvents(0)
def _onReceived(self, block, res, res2):
if res != -errno.ESHUTDOWN:
# XXX: is it good to resubmit on any other error ?
self._aio_context.submit([block])
if res < 0:
trace('aio read completion error:', -res)
else:
trace('aio read completion received', res, 'bytes')
self._writer(block.buffer_list[0][:res])
def _onCanSend(self, block, res, res2):
if res < 0:
trace('aio write completion error:', -res)
else:
trace('aio write completion sent', res, 'bytes')
self._aio_send_block_list.remove(block)
if self._need_resume:
trace('send queue has room, resume sending')
self._real_onCanSend()
self._need_resume = False
def _onCannotSend(self):
trace('send queue full, pause sending')
self._real_onCannotSend()
self._need_resume = True
|
tuxu/python-samplerate
|
examples/play_modulation.py
|
get_input_callback
|
python
|
def get_input_callback(samplerate, params, num_samples=256):
amplitude = params['mod_amplitude']
frequency = params['mod_frequency']
def producer():
"""Generate samples.
Yields
------
samples : ndarray
A number of samples (`num_samples`) of the sine.
"""
start_time = 0
while True:
time = start_time + np.arange(num_samples) / samplerate
start_time += num_samples / samplerate
output = amplitude * np.cos(2 * np.pi * frequency * time)
yield output
return lambda p=producer(): next(p)
|
Return a function that produces samples of a sine.
Parameters
----------
samplerate : float
The sample rate.
params : dict
Parameters for FM generation.
num_samples : int, optional
Number of samples to be generated on each call.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/examples/play_modulation.py#L27-L57
|
[
"def producer():\n \"\"\"Generate samples.\n\n Yields\n ------\n samples : ndarray\n A number of samples (`num_samples`) of the sine.\n \"\"\"\n start_time = 0\n while True:\n time = start_time + np.arange(num_samples) / samplerate\n start_time += num_samples / samplerate\n output = amplitude * np.cos(2 * np.pi * frequency * time)\n yield output\n"
] |
#!/usr/bin/env python
"""Demonstrate realtime audio resampling and playback using the callback API.
A carrier frequency is modulated by a sine wave, and the resulting signal is
played back on the default sound output. During playback, the modulation signal
is generated at source samplerate, then resampled to target samplerate, and
mixed onto the carrier.
"""
from __future__ import print_function, division
import numpy as np
import sounddevice as sd
import samplerate as sr
source_samplerate = 3600
target_samplerate = 44100
converter_type = 'sinc_fastest'
params = {
'mod_amplitude': 1, # Modulation amplitude (Hz)
'mod_frequency': 1, # Modulation frequency (Hz)
'fm_gain': 20, # FM gain (Hz/Hz)
'output_volume': 0.1, # Output volume
'carrier_frequency': 500, # Carrier frequency (Hz)
}
def get_playback_callback(resampler, samplerate, params):
"""Return a sound playback callback.
Parameters
----------
resampler
The resampler from which samples are read.
samplerate : float
The sample rate.
params : dict
Parameters for FM generation.
"""
def callback(outdata, frames, time, _):
"""Playback callback.
Read samples from the resampler and modulate them onto a carrier
frequency.
"""
last_fmphase = getattr(callback, 'last_fmphase', 0)
df = params['fm_gain'] * resampler.read(frames)
df = np.pad(df, (0, frames - len(df)), mode='constant')
t = time.outputBufferDacTime + np.arange(frames) / samplerate
phase = 2 * np.pi * params['carrier_frequency'] * t
fmphase = last_fmphase + 2 * np.pi * np.cumsum(df) / samplerate
outdata[:, 0] = params['output_volume'] * np.cos(phase + fmphase)
callback.last_fmphase = fmphase[-1]
return callback
def main(source_samplerate, target_samplerate, params, converter_type):
"""Setup the resampling and audio output callbacks and start playback."""
from time import sleep
ratio = target_samplerate / source_samplerate
with sr.CallbackResampler(get_input_callback(source_samplerate, params),
ratio, converter_type) as resampler, \
sd.OutputStream(channels=1, samplerate=target_samplerate,
callback=get_playback_callback(
resampler, target_samplerate, params)):
print("Playing back... Ctrl+C to stop.")
try:
while True:
sleep(1)
except KeyboardInterrupt:
print("Aborting.")
if __name__ == '__main__':
main(
source_samplerate=source_samplerate,
target_samplerate=target_samplerate,
params=params,
converter_type=converter_type)
|
tuxu/python-samplerate
|
examples/play_modulation.py
|
get_playback_callback
|
python
|
def get_playback_callback(resampler, samplerate, params):
def callback(outdata, frames, time, _):
"""Playback callback.
Read samples from the resampler and modulate them onto a carrier
frequency.
"""
last_fmphase = getattr(callback, 'last_fmphase', 0)
df = params['fm_gain'] * resampler.read(frames)
df = np.pad(df, (0, frames - len(df)), mode='constant')
t = time.outputBufferDacTime + np.arange(frames) / samplerate
phase = 2 * np.pi * params['carrier_frequency'] * t
fmphase = last_fmphase + 2 * np.pi * np.cumsum(df) / samplerate
outdata[:, 0] = params['output_volume'] * np.cos(phase + fmphase)
callback.last_fmphase = fmphase[-1]
return callback
|
Return a sound playback callback.
Parameters
----------
resampler
The resampler from which samples are read.
samplerate : float
The sample rate.
params : dict
Parameters for FM generation.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/examples/play_modulation.py#L60-L88
| null |
#!/usr/bin/env python
"""Demonstrate realtime audio resampling and playback using the callback API.
A carrier frequency is modulated by a sine wave, and the resulting signal is
played back on the default sound output. During playback, the modulation signal
is generated at source samplerate, then resampled to target samplerate, and
mixed onto the carrier.
"""
from __future__ import print_function, division
import numpy as np
import sounddevice as sd
import samplerate as sr
source_samplerate = 3600
target_samplerate = 44100
converter_type = 'sinc_fastest'
params = {
'mod_amplitude': 1, # Modulation amplitude (Hz)
'mod_frequency': 1, # Modulation frequency (Hz)
'fm_gain': 20, # FM gain (Hz/Hz)
'output_volume': 0.1, # Output volume
'carrier_frequency': 500, # Carrier frequency (Hz)
}
def get_input_callback(samplerate, params, num_samples=256):
"""Return a function that produces samples of a sine.
Parameters
----------
samplerate : float
The sample rate.
params : dict
Parameters for FM generation.
num_samples : int, optional
Number of samples to be generated on each call.
"""
amplitude = params['mod_amplitude']
frequency = params['mod_frequency']
def producer():
"""Generate samples.
Yields
------
samples : ndarray
A number of samples (`num_samples`) of the sine.
"""
start_time = 0
while True:
time = start_time + np.arange(num_samples) / samplerate
start_time += num_samples / samplerate
output = amplitude * np.cos(2 * np.pi * frequency * time)
yield output
return lambda p=producer(): next(p)
def main(source_samplerate, target_samplerate, params, converter_type):
"""Setup the resampling and audio output callbacks and start playback."""
from time import sleep
ratio = target_samplerate / source_samplerate
with sr.CallbackResampler(get_input_callback(source_samplerate, params),
ratio, converter_type) as resampler, \
sd.OutputStream(channels=1, samplerate=target_samplerate,
callback=get_playback_callback(
resampler, target_samplerate, params)):
print("Playing back... Ctrl+C to stop.")
try:
while True:
sleep(1)
except KeyboardInterrupt:
print("Aborting.")
if __name__ == '__main__':
main(
source_samplerate=source_samplerate,
target_samplerate=target_samplerate,
params=params,
converter_type=converter_type)
|
tuxu/python-samplerate
|
examples/play_modulation.py
|
main
|
python
|
def main(source_samplerate, target_samplerate, params, converter_type):
from time import sleep
ratio = target_samplerate / source_samplerate
with sr.CallbackResampler(get_input_callback(source_samplerate, params),
ratio, converter_type) as resampler, \
sd.OutputStream(channels=1, samplerate=target_samplerate,
callback=get_playback_callback(
resampler, target_samplerate, params)):
print("Playing back... Ctrl+C to stop.")
try:
while True:
sleep(1)
except KeyboardInterrupt:
print("Aborting.")
|
Setup the resampling and audio output callbacks and start playback.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/examples/play_modulation.py#L91-L107
|
[
"def get_input_callback(samplerate, params, num_samples=256):\n \"\"\"Return a function that produces samples of a sine.\n\n Parameters\n ----------\n samplerate : float\n The sample rate.\n params : dict\n Parameters for FM generation.\n num_samples : int, optional\n Number of samples to be generated on each call.\n \"\"\"\n amplitude = params['mod_amplitude']\n frequency = params['mod_frequency']\n\n def producer():\n \"\"\"Generate samples.\n\n Yields\n ------\n samples : ndarray\n A number of samples (`num_samples`) of the sine.\n \"\"\"\n start_time = 0\n while True:\n time = start_time + np.arange(num_samples) / samplerate\n start_time += num_samples / samplerate\n output = amplitude * np.cos(2 * np.pi * frequency * time)\n yield output\n\n return lambda p=producer(): next(p)\n",
"def get_playback_callback(resampler, samplerate, params):\n \"\"\"Return a sound playback callback.\n\n Parameters\n ----------\n resampler\n The resampler from which samples are read.\n samplerate : float\n The sample rate.\n params : dict\n Parameters for FM generation.\n \"\"\"\n\n def callback(outdata, frames, time, _):\n \"\"\"Playback callback.\n\n Read samples from the resampler and modulate them onto a carrier\n frequency.\n \"\"\"\n last_fmphase = getattr(callback, 'last_fmphase', 0)\n df = params['fm_gain'] * resampler.read(frames)\n df = np.pad(df, (0, frames - len(df)), mode='constant')\n t = time.outputBufferDacTime + np.arange(frames) / samplerate\n phase = 2 * np.pi * params['carrier_frequency'] * t\n fmphase = last_fmphase + 2 * np.pi * np.cumsum(df) / samplerate\n outdata[:, 0] = params['output_volume'] * np.cos(phase + fmphase)\n callback.last_fmphase = fmphase[-1]\n\n return callback\n"
] |
#!/usr/bin/env python
"""Demonstrate realtime audio resampling and playback using the callback API.
A carrier frequency is modulated by a sine wave, and the resulting signal is
played back on the default sound output. During playback, the modulation signal
is generated at source samplerate, then resampled to target samplerate, and
mixed onto the carrier.
"""
from __future__ import print_function, division
import numpy as np
import sounddevice as sd
import samplerate as sr
source_samplerate = 3600
target_samplerate = 44100
converter_type = 'sinc_fastest'
params = {
'mod_amplitude': 1, # Modulation amplitude (Hz)
'mod_frequency': 1, # Modulation frequency (Hz)
'fm_gain': 20, # FM gain (Hz/Hz)
'output_volume': 0.1, # Output volume
'carrier_frequency': 500, # Carrier frequency (Hz)
}
def get_input_callback(samplerate, params, num_samples=256):
"""Return a function that produces samples of a sine.
Parameters
----------
samplerate : float
The sample rate.
params : dict
Parameters for FM generation.
num_samples : int, optional
Number of samples to be generated on each call.
"""
amplitude = params['mod_amplitude']
frequency = params['mod_frequency']
def producer():
"""Generate samples.
Yields
------
samples : ndarray
A number of samples (`num_samples`) of the sine.
"""
start_time = 0
while True:
time = start_time + np.arange(num_samples) / samplerate
start_time += num_samples / samplerate
output = amplitude * np.cos(2 * np.pi * frequency * time)
yield output
return lambda p=producer(): next(p)
def get_playback_callback(resampler, samplerate, params):
"""Return a sound playback callback.
Parameters
----------
resampler
The resampler from which samples are read.
samplerate : float
The sample rate.
params : dict
Parameters for FM generation.
"""
def callback(outdata, frames, time, _):
"""Playback callback.
Read samples from the resampler and modulate them onto a carrier
frequency.
"""
last_fmphase = getattr(callback, 'last_fmphase', 0)
df = params['fm_gain'] * resampler.read(frames)
df = np.pad(df, (0, frames - len(df)), mode='constant')
t = time.outputBufferDacTime + np.arange(frames) / samplerate
phase = 2 * np.pi * params['carrier_frequency'] * t
fmphase = last_fmphase + 2 * np.pi * np.cumsum(df) / samplerate
outdata[:, 0] = params['output_volume'] * np.cos(phase + fmphase)
callback.last_fmphase = fmphase[-1]
return callback
if __name__ == '__main__':
main(
source_samplerate=source_samplerate,
target_samplerate=target_samplerate,
params=params,
converter_type=converter_type)
|
tuxu/python-samplerate
|
samplerate/converters.py
|
_get_converter_type
|
python
|
def _get_converter_type(identifier):
if isinstance(identifier, str):
return ConverterType[identifier]
if isinstance(identifier, ConverterType):
return identifier
return ConverterType(identifier)
|
Return the converter type for `identifier`.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/converters.py#L22-L28
| null |
"""Converters
"""
from __future__ import print_function, division
from enum import Enum
import numpy as np
class ConverterType(Enum):
"""Enum of samplerate converter types.
Pass any of the members, or their string or value representation, as
``converter_type`` in the resamplers.
"""
sinc_best = 0
sinc_medium = 1
sinc_fastest = 2
zero_order_hold = 3
linear = 4
def resample(input_data, ratio, converter_type='sinc_best', verbose=False):
"""Resample the signal in `input_data` at once.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
Note
----
If samples are to be processed in chunks, `Resampler` and
`CallbackResampler` will provide better results and allow for variable
conversion ratios.
"""
from samplerate.lowlevel import src_simple
from samplerate.exceptions import ResamplingError
input_data = np.require(input_data, requirements='C', dtype=np.float32)
if input_data.ndim == 2:
num_frames, channels = input_data.shape
output_shape = (int(num_frames * ratio), channels)
elif input_data.ndim == 1:
num_frames, channels = input_data.size, 1
output_shape = (int(num_frames * ratio), )
else:
raise ValueError('rank > 2 not supported')
output_data = np.empty(output_shape, dtype=np.float32)
converter_type = _get_converter_type(converter_type)
(error, input_frames_used, output_frames_gen) \
= src_simple(input_data, output_data, ratio,
converter_type.value, channels)
if error != 0:
raise ResamplingError(error)
if verbose:
info = ('samplerate info:\n'
'{} input frames used\n'
'{} output frames generated\n'
.format(input_frames_used, output_frames_gen))
print(info)
return (output_data[:output_frames_gen, :]
if channels > 1 else output_data[:output_frames_gen])
class Resampler(object):
"""Resampler.
Parameters
----------
converter_type : ConverterType, str, or int
Sample rate converter.
num_channels : int
Number of channels.
"""
def __init__(self, converter_type='sinc_fastest', channels=1):
from samplerate.lowlevel import ffi, src_new, src_delete
from samplerate.exceptions import ResamplingError
converter_type = _get_converter_type(converter_type)
state, error = src_new(converter_type.value, channels)
self._state = ffi.gc(state, src_delete)
self._converter_type = converter_type
self._channels = channels
if error != 0:
raise ResamplingError(error)
@property
def converter_type(self):
"""Converter type."""
return self._converter_type
@property
def channels(self):
"""Number of channels."""
return self._channels
def reset(self):
"""Reset internal state."""
from samplerate.lowlevel import src_reset
return src_reset(self._state)
def set_ratio(self, new_ratio):
"""Set a new conversion ratio immediately."""
from samplerate.lowlevel import src_set_ratio
return src_set_ratio(self._state, new_ratio)
def process(self, input_data, ratio, end_of_input=False, verbose=False):
"""Resample the signal in `input_data`.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
end_of_input : int
Set to `True` if no more data is available, or to `False` otherwise.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
"""
from samplerate.lowlevel import src_process
from samplerate.exceptions import ResamplingError
input_data = np.require(input_data, requirements='C', dtype=np.float32)
if input_data.ndim == 2:
num_frames, channels = input_data.shape
output_shape = (int(num_frames * ratio), channels)
elif input_data.ndim == 1:
num_frames, channels = input_data.size, 1
output_shape = (int(num_frames * ratio), )
else:
raise ValueError('rank > 2 not supported')
if channels != self._channels:
raise ValueError('Invalid number of channels in input data.')
output_data = np.empty(output_shape, dtype=np.float32)
(error, input_frames_used, output_frames_gen) = src_process(
self._state, input_data, output_data, ratio, end_of_input)
if error != 0:
raise ResamplingError(error)
if verbose:
info = ('samplerate info:\n'
'{} input frames used\n'
'{} output frames generated\n'
.format(input_frames_used, output_frames_gen))
print(info)
return (output_data[:output_frames_gen, :]
if channels > 1 else output_data[:output_frames_gen])
class CallbackResampler(object):
"""CallbackResampler.
Parameters
----------
callback : function
Function that returns new frames on each call, or `None` otherwise.
A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
channels : int
Number of channels.
"""
def __init__(self, callback, ratio, converter_type='sinc_fastest',
channels=1):
if channels < 1:
raise ValueError('Invalid number of channels.')
self._callback = callback
self._ratio = ratio
self._converter_type = _get_converter_type(converter_type)
self._channels = channels
self._state = None
self._handle = None
self._create()
def _create(self):
"""Create new callback resampler."""
from samplerate.lowlevel import ffi, src_callback_new, src_delete
from samplerate.exceptions import ResamplingError
state, handle, error = src_callback_new(
self._callback, self._converter_type.value, self._channels)
if error != 0:
raise ResamplingError(error)
self._state = ffi.gc(state, src_delete)
self._handle = handle
def _destroy(self):
"""Destroy resampler state."""
if self._state:
self._state = None
self._handle = None
def __enter__(self):
return self
def __exit__(self, *args):
self._destroy()
def set_starting_ratio(self, ratio):
""" Set the starting conversion ratio for the next `read` call. """
from samplerate.lowlevel import src_set_ratio
if self._state is None:
self._create()
src_set_ratio(self._state, ratio)
self.ratio = ratio
def reset(self):
"""Reset state."""
from samplerate.lowlevel import src_reset
if self._state is None:
self._create()
src_reset(self._state)
@property
def ratio(self):
"""Conversion ratio = output sample rate / input sample rate."""
return self._ratio
@ratio.setter
def ratio(self, ratio):
self._ratio = ratio
def read(self, num_frames):
"""Read a number of frames from the resampler.
Parameters
----------
num_frames : int
Number of frames to read.
Returns
-------
output_data : ndarray
Resampled frames as a (`num_output_frames`, `num_channels`) or
(`num_output_frames`,) array. Note that this may return fewer frames
than requested, for example when no more input is available.
"""
from samplerate.lowlevel import src_callback_read, src_error
from samplerate.exceptions import ResamplingError
if self._state is None:
self._create()
if self._channels > 1:
output_shape = (num_frames, self._channels)
elif self._channels == 1:
output_shape = (num_frames, )
output_data = np.empty(output_shape, dtype=np.float32)
ret = src_callback_read(self._state, self._ratio, num_frames,
output_data)
if ret == 0:
error = src_error(self._state)
if error:
raise ResamplingError(error)
return (output_data[:ret, :]
if self._channels > 1 else output_data[:ret])
|
tuxu/python-samplerate
|
samplerate/converters.py
|
resample
|
python
|
def resample(input_data, ratio, converter_type='sinc_best', verbose=False):
from samplerate.lowlevel import src_simple
from samplerate.exceptions import ResamplingError
input_data = np.require(input_data, requirements='C', dtype=np.float32)
if input_data.ndim == 2:
num_frames, channels = input_data.shape
output_shape = (int(num_frames * ratio), channels)
elif input_data.ndim == 1:
num_frames, channels = input_data.size, 1
output_shape = (int(num_frames * ratio), )
else:
raise ValueError('rank > 2 not supported')
output_data = np.empty(output_shape, dtype=np.float32)
converter_type = _get_converter_type(converter_type)
(error, input_frames_used, output_frames_gen) \
= src_simple(input_data, output_data, ratio,
converter_type.value, channels)
if error != 0:
raise ResamplingError(error)
if verbose:
info = ('samplerate info:\n'
'{} input frames used\n'
'{} output frames generated\n'
.format(input_frames_used, output_frames_gen))
print(info)
return (output_data[:output_frames_gen, :]
if channels > 1 else output_data[:output_frames_gen])
|
Resample the signal in `input_data` at once.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
Note
----
If samples are to be processed in chunks, `Resampler` and
`CallbackResampler` will provide better results and allow for variable
conversion ratios.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/converters.py#L31-L90
|
[
"def _get_converter_type(identifier):\n \"\"\"Return the converter type for `identifier`.\"\"\"\n if isinstance(identifier, str):\n return ConverterType[identifier]\n if isinstance(identifier, ConverterType):\n return identifier\n return ConverterType(identifier)\n",
"def src_simple(input_data, output_data, ratio, converter_type, channels):\n \"\"\"Perform a single conversion from an input buffer to an output buffer.\n\n Simple interface for performing a single conversion from input buffer to\n output buffer at a fixed conversion ratio. Simple interface does not require\n initialisation as it can only operate on a single buffer worth of audio.\n \"\"\"\n input_frames, _ = _check_data(input_data)\n output_frames, _ = _check_data(output_data)\n data = ffi.new('SRC_DATA*')\n data.input_frames = input_frames\n data.output_frames = output_frames\n data.src_ratio = ratio\n data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))\n data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))\n error = _lib.src_simple(data, converter_type, channels)\n return error, data.input_frames_used, data.output_frames_gen\n"
] |
"""Converters
"""
from __future__ import print_function, division
from enum import Enum
import numpy as np
class ConverterType(Enum):
"""Enum of samplerate converter types.
Pass any of the members, or their string or value representation, as
``converter_type`` in the resamplers.
"""
sinc_best = 0
sinc_medium = 1
sinc_fastest = 2
zero_order_hold = 3
linear = 4
def _get_converter_type(identifier):
"""Return the converter type for `identifier`."""
if isinstance(identifier, str):
return ConverterType[identifier]
if isinstance(identifier, ConverterType):
return identifier
return ConverterType(identifier)
class Resampler(object):
"""Resampler.
Parameters
----------
converter_type : ConverterType, str, or int
Sample rate converter.
num_channels : int
Number of channels.
"""
def __init__(self, converter_type='sinc_fastest', channels=1):
from samplerate.lowlevel import ffi, src_new, src_delete
from samplerate.exceptions import ResamplingError
converter_type = _get_converter_type(converter_type)
state, error = src_new(converter_type.value, channels)
self._state = ffi.gc(state, src_delete)
self._converter_type = converter_type
self._channels = channels
if error != 0:
raise ResamplingError(error)
@property
def converter_type(self):
"""Converter type."""
return self._converter_type
@property
def channels(self):
"""Number of channels."""
return self._channels
def reset(self):
"""Reset internal state."""
from samplerate.lowlevel import src_reset
return src_reset(self._state)
def set_ratio(self, new_ratio):
"""Set a new conversion ratio immediately."""
from samplerate.lowlevel import src_set_ratio
return src_set_ratio(self._state, new_ratio)
def process(self, input_data, ratio, end_of_input=False, verbose=False):
"""Resample the signal in `input_data`.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
end_of_input : int
Set to `True` if no more data is available, or to `False` otherwise.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
"""
from samplerate.lowlevel import src_process
from samplerate.exceptions import ResamplingError
input_data = np.require(input_data, requirements='C', dtype=np.float32)
if input_data.ndim == 2:
num_frames, channels = input_data.shape
output_shape = (int(num_frames * ratio), channels)
elif input_data.ndim == 1:
num_frames, channels = input_data.size, 1
output_shape = (int(num_frames * ratio), )
else:
raise ValueError('rank > 2 not supported')
if channels != self._channels:
raise ValueError('Invalid number of channels in input data.')
output_data = np.empty(output_shape, dtype=np.float32)
(error, input_frames_used, output_frames_gen) = src_process(
self._state, input_data, output_data, ratio, end_of_input)
if error != 0:
raise ResamplingError(error)
if verbose:
info = ('samplerate info:\n'
'{} input frames used\n'
'{} output frames generated\n'
.format(input_frames_used, output_frames_gen))
print(info)
return (output_data[:output_frames_gen, :]
if channels > 1 else output_data[:output_frames_gen])
class CallbackResampler(object):
"""CallbackResampler.
Parameters
----------
callback : function
Function that returns new frames on each call, or `None` otherwise.
A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
channels : int
Number of channels.
"""
def __init__(self, callback, ratio, converter_type='sinc_fastest',
channels=1):
if channels < 1:
raise ValueError('Invalid number of channels.')
self._callback = callback
self._ratio = ratio
self._converter_type = _get_converter_type(converter_type)
self._channels = channels
self._state = None
self._handle = None
self._create()
def _create(self):
"""Create new callback resampler."""
from samplerate.lowlevel import ffi, src_callback_new, src_delete
from samplerate.exceptions import ResamplingError
state, handle, error = src_callback_new(
self._callback, self._converter_type.value, self._channels)
if error != 0:
raise ResamplingError(error)
self._state = ffi.gc(state, src_delete)
self._handle = handle
def _destroy(self):
"""Destroy resampler state."""
if self._state:
self._state = None
self._handle = None
def __enter__(self):
return self
def __exit__(self, *args):
self._destroy()
def set_starting_ratio(self, ratio):
""" Set the starting conversion ratio for the next `read` call. """
from samplerate.lowlevel import src_set_ratio
if self._state is None:
self._create()
src_set_ratio(self._state, ratio)
self.ratio = ratio
def reset(self):
"""Reset state."""
from samplerate.lowlevel import src_reset
if self._state is None:
self._create()
src_reset(self._state)
@property
def ratio(self):
"""Conversion ratio = output sample rate / input sample rate."""
return self._ratio
@ratio.setter
def ratio(self, ratio):
self._ratio = ratio
def read(self, num_frames):
"""Read a number of frames from the resampler.
Parameters
----------
num_frames : int
Number of frames to read.
Returns
-------
output_data : ndarray
Resampled frames as a (`num_output_frames`, `num_channels`) or
(`num_output_frames`,) array. Note that this may return fewer frames
than requested, for example when no more input is available.
"""
from samplerate.lowlevel import src_callback_read, src_error
from samplerate.exceptions import ResamplingError
if self._state is None:
self._create()
if self._channels > 1:
output_shape = (num_frames, self._channels)
elif self._channels == 1:
output_shape = (num_frames, )
output_data = np.empty(output_shape, dtype=np.float32)
ret = src_callback_read(self._state, self._ratio, num_frames,
output_data)
if ret == 0:
error = src_error(self._state)
if error:
raise ResamplingError(error)
return (output_data[:ret, :]
if self._channels > 1 else output_data[:ret])
|
tuxu/python-samplerate
|
samplerate/converters.py
|
Resampler.set_ratio
|
python
|
def set_ratio(self, new_ratio):
from samplerate.lowlevel import src_set_ratio
return src_set_ratio(self._state, new_ratio)
|
Set a new conversion ratio immediately.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/converters.py#L130-L133
|
[
"def src_set_ratio(state, new_ratio):\n \"\"\"Set a new SRC ratio.\n\n This allows step responses in the conversion ratio.\n Returns non zero on error.\n \"\"\"\n return _lib.src_set_ratio(state, new_ratio) if state else None\n"
] |
class Resampler(object):
"""Resampler.
Parameters
----------
converter_type : ConverterType, str, or int
Sample rate converter.
num_channels : int
Number of channels.
"""
def __init__(self, converter_type='sinc_fastest', channels=1):
from samplerate.lowlevel import ffi, src_new, src_delete
from samplerate.exceptions import ResamplingError
converter_type = _get_converter_type(converter_type)
state, error = src_new(converter_type.value, channels)
self._state = ffi.gc(state, src_delete)
self._converter_type = converter_type
self._channels = channels
if error != 0:
raise ResamplingError(error)
@property
def converter_type(self):
"""Converter type."""
return self._converter_type
@property
def channels(self):
"""Number of channels."""
return self._channels
def reset(self):
"""Reset internal state."""
from samplerate.lowlevel import src_reset
return src_reset(self._state)
def process(self, input_data, ratio, end_of_input=False, verbose=False):
"""Resample the signal in `input_data`.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
end_of_input : int
Set to `True` if no more data is available, or to `False` otherwise.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
"""
from samplerate.lowlevel import src_process
from samplerate.exceptions import ResamplingError
input_data = np.require(input_data, requirements='C', dtype=np.float32)
if input_data.ndim == 2:
num_frames, channels = input_data.shape
output_shape = (int(num_frames * ratio), channels)
elif input_data.ndim == 1:
num_frames, channels = input_data.size, 1
output_shape = (int(num_frames * ratio), )
else:
raise ValueError('rank > 2 not supported')
if channels != self._channels:
raise ValueError('Invalid number of channels in input data.')
output_data = np.empty(output_shape, dtype=np.float32)
(error, input_frames_used, output_frames_gen) = src_process(
self._state, input_data, output_data, ratio, end_of_input)
if error != 0:
raise ResamplingError(error)
if verbose:
info = ('samplerate info:\n'
'{} input frames used\n'
'{} output frames generated\n'
.format(input_frames_used, output_frames_gen))
print(info)
return (output_data[:output_frames_gen, :]
if channels > 1 else output_data[:output_frames_gen])
|
tuxu/python-samplerate
|
samplerate/converters.py
|
Resampler.process
|
python
|
def process(self, input_data, ratio, end_of_input=False, verbose=False):
from samplerate.lowlevel import src_process
from samplerate.exceptions import ResamplingError
input_data = np.require(input_data, requirements='C', dtype=np.float32)
if input_data.ndim == 2:
num_frames, channels = input_data.shape
output_shape = (int(num_frames * ratio), channels)
elif input_data.ndim == 1:
num_frames, channels = input_data.size, 1
output_shape = (int(num_frames * ratio), )
else:
raise ValueError('rank > 2 not supported')
if channels != self._channels:
raise ValueError('Invalid number of channels in input data.')
output_data = np.empty(output_shape, dtype=np.float32)
(error, input_frames_used, output_frames_gen) = src_process(
self._state, input_data, output_data, ratio, end_of_input)
if error != 0:
raise ResamplingError(error)
if verbose:
info = ('samplerate info:\n'
'{} input frames used\n'
'{} output frames generated\n'
.format(input_frames_used, output_frames_gen))
print(info)
return (output_data[:output_frames_gen, :]
if channels > 1 else output_data[:output_frames_gen])
|
Resample the signal in `input_data`.
Parameters
----------
input_data : ndarray
Input data. A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
end_of_input : int
Set to `True` if no more data is available, or to `False` otherwise.
verbose : bool
If `True`, print additional information about the conversion.
Returns
-------
output_data : ndarray
Resampled input data.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/converters.py#L135-L189
|
[
"def src_process(state, input_data, output_data, ratio, end_of_input=0):\n \"\"\"Standard processing function.\n\n Returns non zero on error.\n \"\"\"\n input_frames, _ = _check_data(input_data)\n output_frames, _ = _check_data(output_data)\n data = ffi.new('SRC_DATA*')\n data.input_frames = input_frames\n data.output_frames = output_frames\n data.src_ratio = ratio\n data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))\n data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))\n data.end_of_input = end_of_input\n error = _lib.src_process(state, data)\n return error, data.input_frames_used, data.output_frames_gen\n"
] |
class Resampler(object):
"""Resampler.
Parameters
----------
converter_type : ConverterType, str, or int
Sample rate converter.
num_channels : int
Number of channels.
"""
def __init__(self, converter_type='sinc_fastest', channels=1):
from samplerate.lowlevel import ffi, src_new, src_delete
from samplerate.exceptions import ResamplingError
converter_type = _get_converter_type(converter_type)
state, error = src_new(converter_type.value, channels)
self._state = ffi.gc(state, src_delete)
self._converter_type = converter_type
self._channels = channels
if error != 0:
raise ResamplingError(error)
@property
def converter_type(self):
"""Converter type."""
return self._converter_type
@property
def channels(self):
"""Number of channels."""
return self._channels
def reset(self):
"""Reset internal state."""
from samplerate.lowlevel import src_reset
return src_reset(self._state)
def set_ratio(self, new_ratio):
"""Set a new conversion ratio immediately."""
from samplerate.lowlevel import src_set_ratio
return src_set_ratio(self._state, new_ratio)
|
tuxu/python-samplerate
|
samplerate/converters.py
|
CallbackResampler._create
|
python
|
def _create(self):
from samplerate.lowlevel import ffi, src_callback_new, src_delete
from samplerate.exceptions import ResamplingError
state, handle, error = src_callback_new(
self._callback, self._converter_type.value, self._channels)
if error != 0:
raise ResamplingError(error)
self._state = ffi.gc(state, src_delete)
self._handle = handle
|
Create new callback resampler.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/converters.py#L222-L232
|
[
"def src_callback_new(callback, converter_type, channels):\n \"\"\"Initialisation for the callback based API.\n\n Parameters\n ----------\n callback : function\n Called whenever new frames are to be read. Must return a NumPy array\n of shape (num_frames, channels).\n converter_type : int\n Converter to be used.\n channels : int\n Number of channels.\n\n Returns\n -------\n state\n An anonymous pointer to the internal state of the converter.\n handle\n A CFFI handle to the callback data.\n error : int\n Error code.\n\n \"\"\"\n cb_data = {'callback': callback, 'channels': channels}\n handle = ffi.new_handle(cb_data)\n error = ffi.new('int*')\n state = _lib.src_callback_new(_src_input_callback, converter_type,\n channels, error, handle)\n if state == ffi.NULL:\n return None, handle, error[0]\n return state, handle, error[0]\n"
] |
class CallbackResampler(object):
"""CallbackResampler.
Parameters
----------
callback : function
Function that returns new frames on each call, or `None` otherwise.
A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
channels : int
Number of channels.
"""
def __init__(self, callback, ratio, converter_type='sinc_fastest',
channels=1):
if channels < 1:
raise ValueError('Invalid number of channels.')
self._callback = callback
self._ratio = ratio
self._converter_type = _get_converter_type(converter_type)
self._channels = channels
self._state = None
self._handle = None
self._create()
def _destroy(self):
"""Destroy resampler state."""
if self._state:
self._state = None
self._handle = None
def __enter__(self):
return self
def __exit__(self, *args):
self._destroy()
def set_starting_ratio(self, ratio):
""" Set the starting conversion ratio for the next `read` call. """
from samplerate.lowlevel import src_set_ratio
if self._state is None:
self._create()
src_set_ratio(self._state, ratio)
self.ratio = ratio
def reset(self):
"""Reset state."""
from samplerate.lowlevel import src_reset
if self._state is None:
self._create()
src_reset(self._state)
@property
def ratio(self):
"""Conversion ratio = output sample rate / input sample rate."""
return self._ratio
@ratio.setter
def ratio(self, ratio):
self._ratio = ratio
def read(self, num_frames):
"""Read a number of frames from the resampler.
Parameters
----------
num_frames : int
Number of frames to read.
Returns
-------
output_data : ndarray
Resampled frames as a (`num_output_frames`, `num_channels`) or
(`num_output_frames`,) array. Note that this may return fewer frames
than requested, for example when no more input is available.
"""
from samplerate.lowlevel import src_callback_read, src_error
from samplerate.exceptions import ResamplingError
if self._state is None:
self._create()
if self._channels > 1:
output_shape = (num_frames, self._channels)
elif self._channels == 1:
output_shape = (num_frames, )
output_data = np.empty(output_shape, dtype=np.float32)
ret = src_callback_read(self._state, self._ratio, num_frames,
output_data)
if ret == 0:
error = src_error(self._state)
if error:
raise ResamplingError(error)
return (output_data[:ret, :]
if self._channels > 1 else output_data[:ret])
|
tuxu/python-samplerate
|
samplerate/converters.py
|
CallbackResampler.set_starting_ratio
|
python
|
def set_starting_ratio(self, ratio):
from samplerate.lowlevel import src_set_ratio
if self._state is None:
self._create()
src_set_ratio(self._state, ratio)
self.ratio = ratio
|
Set the starting conversion ratio for the next `read` call.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/converters.py#L246-L252
|
[
"def src_set_ratio(state, new_ratio):\n \"\"\"Set a new SRC ratio.\n\n This allows step responses in the conversion ratio.\n Returns non zero on error.\n \"\"\"\n return _lib.src_set_ratio(state, new_ratio) if state else None\n",
"def _create(self):\n \"\"\"Create new callback resampler.\"\"\"\n from samplerate.lowlevel import ffi, src_callback_new, src_delete\n from samplerate.exceptions import ResamplingError\n\n state, handle, error = src_callback_new(\n self._callback, self._converter_type.value, self._channels)\n if error != 0:\n raise ResamplingError(error)\n self._state = ffi.gc(state, src_delete)\n self._handle = handle\n"
] |
class CallbackResampler(object):
"""CallbackResampler.
Parameters
----------
callback : function
Function that returns new frames on each call, or `None` otherwise.
A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
channels : int
Number of channels.
"""
def __init__(self, callback, ratio, converter_type='sinc_fastest',
channels=1):
if channels < 1:
raise ValueError('Invalid number of channels.')
self._callback = callback
self._ratio = ratio
self._converter_type = _get_converter_type(converter_type)
self._channels = channels
self._state = None
self._handle = None
self._create()
def _create(self):
"""Create new callback resampler."""
from samplerate.lowlevel import ffi, src_callback_new, src_delete
from samplerate.exceptions import ResamplingError
state, handle, error = src_callback_new(
self._callback, self._converter_type.value, self._channels)
if error != 0:
raise ResamplingError(error)
self._state = ffi.gc(state, src_delete)
self._handle = handle
def _destroy(self):
"""Destroy resampler state."""
if self._state:
self._state = None
self._handle = None
def __enter__(self):
return self
def __exit__(self, *args):
self._destroy()
def reset(self):
"""Reset state."""
from samplerate.lowlevel import src_reset
if self._state is None:
self._create()
src_reset(self._state)
@property
def ratio(self):
"""Conversion ratio = output sample rate / input sample rate."""
return self._ratio
@ratio.setter
def ratio(self, ratio):
self._ratio = ratio
def read(self, num_frames):
"""Read a number of frames from the resampler.
Parameters
----------
num_frames : int
Number of frames to read.
Returns
-------
output_data : ndarray
Resampled frames as a (`num_output_frames`, `num_channels`) or
(`num_output_frames`,) array. Note that this may return fewer frames
than requested, for example when no more input is available.
"""
from samplerate.lowlevel import src_callback_read, src_error
from samplerate.exceptions import ResamplingError
if self._state is None:
self._create()
if self._channels > 1:
output_shape = (num_frames, self._channels)
elif self._channels == 1:
output_shape = (num_frames, )
output_data = np.empty(output_shape, dtype=np.float32)
ret = src_callback_read(self._state, self._ratio, num_frames,
output_data)
if ret == 0:
error = src_error(self._state)
if error:
raise ResamplingError(error)
return (output_data[:ret, :]
if self._channels > 1 else output_data[:ret])
|
tuxu/python-samplerate
|
samplerate/converters.py
|
CallbackResampler.reset
|
python
|
def reset(self):
from samplerate.lowlevel import src_reset
if self._state is None:
self._create()
src_reset(self._state)
|
Reset state.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/converters.py#L254-L259
|
[
"def src_reset(state):\n \"\"\"Reset the internal SRC state.\n\n Does not modify the quality settings.\n Does not free any memory allocations.\n Returns non zero on error.\n \"\"\"\n return _lib.src_reset(state) if state else None\n",
"def _create(self):\n \"\"\"Create new callback resampler.\"\"\"\n from samplerate.lowlevel import ffi, src_callback_new, src_delete\n from samplerate.exceptions import ResamplingError\n\n state, handle, error = src_callback_new(\n self._callback, self._converter_type.value, self._channels)\n if error != 0:\n raise ResamplingError(error)\n self._state = ffi.gc(state, src_delete)\n self._handle = handle\n"
] |
class CallbackResampler(object):
"""CallbackResampler.
Parameters
----------
callback : function
Function that returns new frames on each call, or `None` otherwise.
A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
channels : int
Number of channels.
"""
def __init__(self, callback, ratio, converter_type='sinc_fastest',
channels=1):
if channels < 1:
raise ValueError('Invalid number of channels.')
self._callback = callback
self._ratio = ratio
self._converter_type = _get_converter_type(converter_type)
self._channels = channels
self._state = None
self._handle = None
self._create()
def _create(self):
"""Create new callback resampler."""
from samplerate.lowlevel import ffi, src_callback_new, src_delete
from samplerate.exceptions import ResamplingError
state, handle, error = src_callback_new(
self._callback, self._converter_type.value, self._channels)
if error != 0:
raise ResamplingError(error)
self._state = ffi.gc(state, src_delete)
self._handle = handle
def _destroy(self):
"""Destroy resampler state."""
if self._state:
self._state = None
self._handle = None
def __enter__(self):
return self
def __exit__(self, *args):
self._destroy()
def set_starting_ratio(self, ratio):
""" Set the starting conversion ratio for the next `read` call. """
from samplerate.lowlevel import src_set_ratio
if self._state is None:
self._create()
src_set_ratio(self._state, ratio)
self.ratio = ratio
@property
def ratio(self):
"""Conversion ratio = output sample rate / input sample rate."""
return self._ratio
@ratio.setter
def ratio(self, ratio):
self._ratio = ratio
def read(self, num_frames):
"""Read a number of frames from the resampler.
Parameters
----------
num_frames : int
Number of frames to read.
Returns
-------
output_data : ndarray
Resampled frames as a (`num_output_frames`, `num_channels`) or
(`num_output_frames`,) array. Note that this may return fewer frames
than requested, for example when no more input is available.
"""
from samplerate.lowlevel import src_callback_read, src_error
from samplerate.exceptions import ResamplingError
if self._state is None:
self._create()
if self._channels > 1:
output_shape = (num_frames, self._channels)
elif self._channels == 1:
output_shape = (num_frames, )
output_data = np.empty(output_shape, dtype=np.float32)
ret = src_callback_read(self._state, self._ratio, num_frames,
output_data)
if ret == 0:
error = src_error(self._state)
if error:
raise ResamplingError(error)
return (output_data[:ret, :]
if self._channels > 1 else output_data[:ret])
|
tuxu/python-samplerate
|
samplerate/converters.py
|
CallbackResampler.read
|
python
|
def read(self, num_frames):
from samplerate.lowlevel import src_callback_read, src_error
from samplerate.exceptions import ResamplingError
if self._state is None:
self._create()
if self._channels > 1:
output_shape = (num_frames, self._channels)
elif self._channels == 1:
output_shape = (num_frames, )
output_data = np.empty(output_shape, dtype=np.float32)
ret = src_callback_read(self._state, self._ratio, num_frames,
output_data)
if ret == 0:
error = src_error(self._state)
if error:
raise ResamplingError(error)
return (output_data[:ret, :]
if self._channels > 1 else output_data[:ret])
|
Read a number of frames from the resampler.
Parameters
----------
num_frames : int
Number of frames to read.
Returns
-------
output_data : ndarray
Resampled frames as a (`num_output_frames`, `num_channels`) or
(`num_output_frames`,) array. Note that this may return fewer frames
than requested, for example when no more input is available.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/converters.py#L270-L304
|
[
"def src_callback_read(state, ratio, frames, data):\n \"\"\"Read up to `frames` worth of data using the callback API.\n\n Returns\n -------\n frames : int\n Number of frames read or -1 on error.\n \"\"\"\n data_ptr = ffi.cast('float*f', ffi.from_buffer(data))\n return _lib.src_callback_read(state, ratio, frames, data_ptr)\n",
"def src_error(state):\n \"\"\"Return an error number.\"\"\"\n return _lib.src_error(state) if state else None\n",
"def _create(self):\n \"\"\"Create new callback resampler.\"\"\"\n from samplerate.lowlevel import ffi, src_callback_new, src_delete\n from samplerate.exceptions import ResamplingError\n\n state, handle, error = src_callback_new(\n self._callback, self._converter_type.value, self._channels)\n if error != 0:\n raise ResamplingError(error)\n self._state = ffi.gc(state, src_delete)\n self._handle = handle\n"
] |
class CallbackResampler(object):
"""CallbackResampler.
Parameters
----------
callback : function
Function that returns new frames on each call, or `None` otherwise.
A single channel is provided as a 1D array of `num_frames` length.
Input data with several channels is represented as a 2D array of shape
(`num_frames`, `num_channels`). For use with `libsamplerate`, `input_data`
is converted to 32-bit float and C (row-major) memory order.
ratio : float
Conversion ratio = output sample rate / input sample rate.
converter_type : ConverterType, str, or int
Sample rate converter.
channels : int
Number of channels.
"""
def __init__(self, callback, ratio, converter_type='sinc_fastest',
channels=1):
if channels < 1:
raise ValueError('Invalid number of channels.')
self._callback = callback
self._ratio = ratio
self._converter_type = _get_converter_type(converter_type)
self._channels = channels
self._state = None
self._handle = None
self._create()
def _create(self):
"""Create new callback resampler."""
from samplerate.lowlevel import ffi, src_callback_new, src_delete
from samplerate.exceptions import ResamplingError
state, handle, error = src_callback_new(
self._callback, self._converter_type.value, self._channels)
if error != 0:
raise ResamplingError(error)
self._state = ffi.gc(state, src_delete)
self._handle = handle
def _destroy(self):
"""Destroy resampler state."""
if self._state:
self._state = None
self._handle = None
def __enter__(self):
return self
def __exit__(self, *args):
self._destroy()
def set_starting_ratio(self, ratio):
""" Set the starting conversion ratio for the next `read` call. """
from samplerate.lowlevel import src_set_ratio
if self._state is None:
self._create()
src_set_ratio(self._state, ratio)
self.ratio = ratio
def reset(self):
"""Reset state."""
from samplerate.lowlevel import src_reset
if self._state is None:
self._create()
src_reset(self._state)
@property
def ratio(self):
"""Conversion ratio = output sample rate / input sample rate."""
return self._ratio
@ratio.setter
def ratio(self, ratio):
self._ratio = ratio
|
tuxu/python-samplerate
|
samplerate/lowlevel.py
|
_check_data
|
python
|
def _check_data(data):
if not (data.dtype == _np.float32 and data.flags.c_contiguous):
raise ValueError('supplied data must be float32 and C contiguous')
if data.ndim == 2:
num_frames, channels = data.shape
elif data.ndim == 1:
num_frames, channels = data.size, 1
else:
raise ValueError('rank > 2 not supported')
return num_frames, channels
|
Check whether `data` is a valid input/output for libsamplerate.
Returns
-------
num_frames
Number of frames in `data`.
channels
Number of channels in `data`.
Raises
------
ValueError: If invalid data is supplied.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L41-L63
| null |
"""Lowlevel wrappers around libsamplerate.
The docstrings of the `src_*` functions are adapted from the libsamplerate
header file.
"""
import os as _os
import sys as _sys
from ctypes.util import find_library as _find_library
import numpy as _np
# Locate and load libsamplerate
from samplerate._src import ffi
lib_basename = 'libsamplerate'
lib_filename = _find_library('samplerate')
if _os.environ.get('READTHEDOCS') == 'True':
# Mock minimum C API for Read the Docs
class MockLib(object):
@classmethod
def src_get_version(cls):
return ffi.new('char[]', 'libsamplerate-0.1.9 (c) ...')
lib_filename = 'mock'
_lib = MockLib()
elif lib_filename is None:
if _sys.platform == 'darwin':
lib_filename = '{}.dylib'.format(lib_basename)
elif _sys.platform == 'win32':
from platform import architecture
lib_filename = '{}-{}.dll'.format(lib_basename, architecture()[0])
else:
raise OSError('{} not found'.format(lib_basename))
lib_filename = _os.path.join(
_os.path.dirname(_os.path.abspath(__file__)), '_samplerate_data',
lib_filename)
_lib = ffi.dlopen(lib_filename)
else:
_lib = ffi.dlopen(lib_filename)
def src_strerror(error):
"""Convert the error number into a string."""
return ffi.string(_lib.src_strerror(error)).decode()
def src_get_name(converter_type):
"""Return the name of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_name(converter_type)).decode()
def src_get_description(converter_type):
"""Return the description of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_description(converter_type)).decode()
def src_get_version():
"""Return the version string of libsamplerate."""
return ffi.string(_lib.src_get_version()).decode()
def src_simple(input_data, output_data, ratio, converter_type, channels):
"""Perform a single conversion from an input buffer to an output buffer.
Simple interface for performing a single conversion from input buffer to
output buffer at a fixed conversion ratio. Simple interface does not require
initialisation as it can only operate on a single buffer worth of audio.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
error = _lib.src_simple(data, converter_type, channels)
return error, data.input_frames_used, data.output_frames_gen
def src_new(converter_type, channels):
"""Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
"""
error = ffi.new('int*')
state = _lib.src_new(converter_type, channels, error)
return state, error[0]
def src_delete(state):
"""Release `state`.
Cleanup all internal allocations.
"""
_lib.src_delete(state)
def src_process(state, input_data, output_data, ratio, end_of_input=0):
"""Standard processing function.
Returns non zero on error.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
data.end_of_input = end_of_input
error = _lib.src_process(state, data)
return error, data.input_frames_used, data.output_frames_gen
def src_error(state):
"""Return an error number."""
return _lib.src_error(state) if state else None
def src_reset(state):
"""Reset the internal SRC state.
Does not modify the quality settings.
Does not free any memory allocations.
Returns non zero on error.
"""
return _lib.src_reset(state) if state else None
def src_set_ratio(state, new_ratio):
"""Set a new SRC ratio.
This allows step responses in the conversion ratio.
Returns non zero on error.
"""
return _lib.src_set_ratio(state, new_ratio) if state else None
def src_is_valid_ratio(ratio):
"""Return `True` if ratio is a valid conversion ratio, `False` otherwise.
"""
return bool(_lib.src_is_valid_ratio(ratio))
@ffi.callback('src_callback_t')
def _src_input_callback(cb_data, data):
"""Internal callback function to be used with the callback API.
Pulls the Python callback function from the handle contained in `cb_data`
and calls it to fetch frames. Frames are converted to the format required by
the API (float, interleaved channels). A reference to these data is kept
internally.
Returns
-------
frames : int
The number of frames supplied.
"""
cb_data = ffi.from_handle(cb_data)
ret = cb_data['callback']()
if ret is None:
cb_data['last_input'] = None
return 0 # No frames supplied
input_data = _np.require(ret, requirements='C', dtype=_np.float32)
input_frames, channels = _check_data(input_data)
# Check whether the correct number of channels is supplied by user.
if cb_data['channels'] != channels:
raise ValueError('Invalid number of channels in callback.')
# Store a reference of the input data to ensure it is still alive when
# accessed by libsamplerate.
cb_data['last_input'] = input_data
data[0] = ffi.cast('float*', ffi.from_buffer(input_data))
return input_frames
def src_callback_new(callback, converter_type, channels):
"""Initialisation for the callback based API.
Parameters
----------
callback : function
Called whenever new frames are to be read. Must return a NumPy array
of shape (num_frames, channels).
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
handle
A CFFI handle to the callback data.
error : int
Error code.
"""
cb_data = {'callback': callback, 'channels': channels}
handle = ffi.new_handle(cb_data)
error = ffi.new('int*')
state = _lib.src_callback_new(_src_input_callback, converter_type,
channels, error, handle)
if state == ffi.NULL:
return None, handle, error[0]
return state, handle, error[0]
def src_callback_read(state, ratio, frames, data):
"""Read up to `frames` worth of data using the callback API.
Returns
-------
frames : int
Number of frames read or -1 on error.
"""
data_ptr = ffi.cast('float*f', ffi.from_buffer(data))
return _lib.src_callback_read(state, ratio, frames, data_ptr)
__libsamplerate_version__ = src_get_version()
if __libsamplerate_version__.startswith(lib_basename):
__libsamplerate_version__ = __libsamplerate_version__[len(
lib_basename) + 1:__libsamplerate_version__.find(' ')]
|
tuxu/python-samplerate
|
samplerate/lowlevel.py
|
src_simple
|
python
|
def src_simple(input_data, output_data, ratio, converter_type, channels):
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
error = _lib.src_simple(data, converter_type, channels)
return error, data.input_frames_used, data.output_frames_gen
|
Perform a single conversion from an input buffer to an output buffer.
Simple interface for performing a single conversion from input buffer to
output buffer at a fixed conversion ratio. Simple interface does not require
initialisation as it can only operate on a single buffer worth of audio.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L86-L102
|
[
"def _check_data(data):\n \"\"\"Check whether `data` is a valid input/output for libsamplerate.\n\n Returns\n -------\n num_frames\n Number of frames in `data`.\n channels\n Number of channels in `data`.\n\n Raises\n ------\n ValueError: If invalid data is supplied.\n \"\"\"\n if not (data.dtype == _np.float32 and data.flags.c_contiguous):\n raise ValueError('supplied data must be float32 and C contiguous')\n if data.ndim == 2:\n num_frames, channels = data.shape\n elif data.ndim == 1:\n num_frames, channels = data.size, 1\n else:\n raise ValueError('rank > 2 not supported')\n return num_frames, channels\n"
] |
"""Lowlevel wrappers around libsamplerate.
The docstrings of the `src_*` functions are adapted from the libsamplerate
header file.
"""
import os as _os
import sys as _sys
from ctypes.util import find_library as _find_library
import numpy as _np
# Locate and load libsamplerate
from samplerate._src import ffi
lib_basename = 'libsamplerate'
lib_filename = _find_library('samplerate')
if _os.environ.get('READTHEDOCS') == 'True':
# Mock minimum C API for Read the Docs
class MockLib(object):
@classmethod
def src_get_version(cls):
return ffi.new('char[]', 'libsamplerate-0.1.9 (c) ...')
lib_filename = 'mock'
_lib = MockLib()
elif lib_filename is None:
if _sys.platform == 'darwin':
lib_filename = '{}.dylib'.format(lib_basename)
elif _sys.platform == 'win32':
from platform import architecture
lib_filename = '{}-{}.dll'.format(lib_basename, architecture()[0])
else:
raise OSError('{} not found'.format(lib_basename))
lib_filename = _os.path.join(
_os.path.dirname(_os.path.abspath(__file__)), '_samplerate_data',
lib_filename)
_lib = ffi.dlopen(lib_filename)
else:
_lib = ffi.dlopen(lib_filename)
def _check_data(data):
"""Check whether `data` is a valid input/output for libsamplerate.
Returns
-------
num_frames
Number of frames in `data`.
channels
Number of channels in `data`.
Raises
------
ValueError: If invalid data is supplied.
"""
if not (data.dtype == _np.float32 and data.flags.c_contiguous):
raise ValueError('supplied data must be float32 and C contiguous')
if data.ndim == 2:
num_frames, channels = data.shape
elif data.ndim == 1:
num_frames, channels = data.size, 1
else:
raise ValueError('rank > 2 not supported')
return num_frames, channels
def src_strerror(error):
"""Convert the error number into a string."""
return ffi.string(_lib.src_strerror(error)).decode()
def src_get_name(converter_type):
"""Return the name of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_name(converter_type)).decode()
def src_get_description(converter_type):
"""Return the description of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_description(converter_type)).decode()
def src_get_version():
"""Return the version string of libsamplerate."""
return ffi.string(_lib.src_get_version()).decode()
def src_new(converter_type, channels):
"""Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
"""
error = ffi.new('int*')
state = _lib.src_new(converter_type, channels, error)
return state, error[0]
def src_delete(state):
"""Release `state`.
Cleanup all internal allocations.
"""
_lib.src_delete(state)
def src_process(state, input_data, output_data, ratio, end_of_input=0):
"""Standard processing function.
Returns non zero on error.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
data.end_of_input = end_of_input
error = _lib.src_process(state, data)
return error, data.input_frames_used, data.output_frames_gen
def src_error(state):
"""Return an error number."""
return _lib.src_error(state) if state else None
def src_reset(state):
"""Reset the internal SRC state.
Does not modify the quality settings.
Does not free any memory allocations.
Returns non zero on error.
"""
return _lib.src_reset(state) if state else None
def src_set_ratio(state, new_ratio):
"""Set a new SRC ratio.
This allows step responses in the conversion ratio.
Returns non zero on error.
"""
return _lib.src_set_ratio(state, new_ratio) if state else None
def src_is_valid_ratio(ratio):
"""Return `True` if ratio is a valid conversion ratio, `False` otherwise.
"""
return bool(_lib.src_is_valid_ratio(ratio))
@ffi.callback('src_callback_t')
def _src_input_callback(cb_data, data):
"""Internal callback function to be used with the callback API.
Pulls the Python callback function from the handle contained in `cb_data`
and calls it to fetch frames. Frames are converted to the format required by
the API (float, interleaved channels). A reference to these data is kept
internally.
Returns
-------
frames : int
The number of frames supplied.
"""
cb_data = ffi.from_handle(cb_data)
ret = cb_data['callback']()
if ret is None:
cb_data['last_input'] = None
return 0 # No frames supplied
input_data = _np.require(ret, requirements='C', dtype=_np.float32)
input_frames, channels = _check_data(input_data)
# Check whether the correct number of channels is supplied by user.
if cb_data['channels'] != channels:
raise ValueError('Invalid number of channels in callback.')
# Store a reference of the input data to ensure it is still alive when
# accessed by libsamplerate.
cb_data['last_input'] = input_data
data[0] = ffi.cast('float*', ffi.from_buffer(input_data))
return input_frames
def src_callback_new(callback, converter_type, channels):
"""Initialisation for the callback based API.
Parameters
----------
callback : function
Called whenever new frames are to be read. Must return a NumPy array
of shape (num_frames, channels).
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
handle
A CFFI handle to the callback data.
error : int
Error code.
"""
cb_data = {'callback': callback, 'channels': channels}
handle = ffi.new_handle(cb_data)
error = ffi.new('int*')
state = _lib.src_callback_new(_src_input_callback, converter_type,
channels, error, handle)
if state == ffi.NULL:
return None, handle, error[0]
return state, handle, error[0]
def src_callback_read(state, ratio, frames, data):
"""Read up to `frames` worth of data using the callback API.
Returns
-------
frames : int
Number of frames read or -1 on error.
"""
data_ptr = ffi.cast('float*f', ffi.from_buffer(data))
return _lib.src_callback_read(state, ratio, frames, data_ptr)
__libsamplerate_version__ = src_get_version()
if __libsamplerate_version__.startswith(lib_basename):
__libsamplerate_version__ = __libsamplerate_version__[len(
lib_basename) + 1:__libsamplerate_version__.find(' ')]
|
tuxu/python-samplerate
|
samplerate/lowlevel.py
|
src_new
|
python
|
def src_new(converter_type, channels):
error = ffi.new('int*')
state = _lib.src_new(converter_type, channels, error)
return state, error[0]
|
Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L105-L124
| null |
"""Lowlevel wrappers around libsamplerate.
The docstrings of the `src_*` functions are adapted from the libsamplerate
header file.
"""
import os as _os
import sys as _sys
from ctypes.util import find_library as _find_library
import numpy as _np
# Locate and load libsamplerate
from samplerate._src import ffi
lib_basename = 'libsamplerate'
lib_filename = _find_library('samplerate')
if _os.environ.get('READTHEDOCS') == 'True':
# Mock minimum C API for Read the Docs
class MockLib(object):
@classmethod
def src_get_version(cls):
return ffi.new('char[]', 'libsamplerate-0.1.9 (c) ...')
lib_filename = 'mock'
_lib = MockLib()
elif lib_filename is None:
if _sys.platform == 'darwin':
lib_filename = '{}.dylib'.format(lib_basename)
elif _sys.platform == 'win32':
from platform import architecture
lib_filename = '{}-{}.dll'.format(lib_basename, architecture()[0])
else:
raise OSError('{} not found'.format(lib_basename))
lib_filename = _os.path.join(
_os.path.dirname(_os.path.abspath(__file__)), '_samplerate_data',
lib_filename)
_lib = ffi.dlopen(lib_filename)
else:
_lib = ffi.dlopen(lib_filename)
def _check_data(data):
"""Check whether `data` is a valid input/output for libsamplerate.
Returns
-------
num_frames
Number of frames in `data`.
channels
Number of channels in `data`.
Raises
------
ValueError: If invalid data is supplied.
"""
if not (data.dtype == _np.float32 and data.flags.c_contiguous):
raise ValueError('supplied data must be float32 and C contiguous')
if data.ndim == 2:
num_frames, channels = data.shape
elif data.ndim == 1:
num_frames, channels = data.size, 1
else:
raise ValueError('rank > 2 not supported')
return num_frames, channels
def src_strerror(error):
"""Convert the error number into a string."""
return ffi.string(_lib.src_strerror(error)).decode()
def src_get_name(converter_type):
"""Return the name of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_name(converter_type)).decode()
def src_get_description(converter_type):
"""Return the description of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_description(converter_type)).decode()
def src_get_version():
"""Return the version string of libsamplerate."""
return ffi.string(_lib.src_get_version()).decode()
def src_simple(input_data, output_data, ratio, converter_type, channels):
"""Perform a single conversion from an input buffer to an output buffer.
Simple interface for performing a single conversion from input buffer to
output buffer at a fixed conversion ratio. Simple interface does not require
initialisation as it can only operate on a single buffer worth of audio.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
error = _lib.src_simple(data, converter_type, channels)
return error, data.input_frames_used, data.output_frames_gen
def src_delete(state):
"""Release `state`.
Cleanup all internal allocations.
"""
_lib.src_delete(state)
def src_process(state, input_data, output_data, ratio, end_of_input=0):
"""Standard processing function.
Returns non zero on error.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
data.end_of_input = end_of_input
error = _lib.src_process(state, data)
return error, data.input_frames_used, data.output_frames_gen
def src_error(state):
"""Return an error number."""
return _lib.src_error(state) if state else None
def src_reset(state):
"""Reset the internal SRC state.
Does not modify the quality settings.
Does not free any memory allocations.
Returns non zero on error.
"""
return _lib.src_reset(state) if state else None
def src_set_ratio(state, new_ratio):
"""Set a new SRC ratio.
This allows step responses in the conversion ratio.
Returns non zero on error.
"""
return _lib.src_set_ratio(state, new_ratio) if state else None
def src_is_valid_ratio(ratio):
"""Return `True` if ratio is a valid conversion ratio, `False` otherwise.
"""
return bool(_lib.src_is_valid_ratio(ratio))
@ffi.callback('src_callback_t')
def _src_input_callback(cb_data, data):
"""Internal callback function to be used with the callback API.
Pulls the Python callback function from the handle contained in `cb_data`
and calls it to fetch frames. Frames are converted to the format required by
the API (float, interleaved channels). A reference to these data is kept
internally.
Returns
-------
frames : int
The number of frames supplied.
"""
cb_data = ffi.from_handle(cb_data)
ret = cb_data['callback']()
if ret is None:
cb_data['last_input'] = None
return 0 # No frames supplied
input_data = _np.require(ret, requirements='C', dtype=_np.float32)
input_frames, channels = _check_data(input_data)
# Check whether the correct number of channels is supplied by user.
if cb_data['channels'] != channels:
raise ValueError('Invalid number of channels in callback.')
# Store a reference of the input data to ensure it is still alive when
# accessed by libsamplerate.
cb_data['last_input'] = input_data
data[0] = ffi.cast('float*', ffi.from_buffer(input_data))
return input_frames
def src_callback_new(callback, converter_type, channels):
"""Initialisation for the callback based API.
Parameters
----------
callback : function
Called whenever new frames are to be read. Must return a NumPy array
of shape (num_frames, channels).
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
handle
A CFFI handle to the callback data.
error : int
Error code.
"""
cb_data = {'callback': callback, 'channels': channels}
handle = ffi.new_handle(cb_data)
error = ffi.new('int*')
state = _lib.src_callback_new(_src_input_callback, converter_type,
channels, error, handle)
if state == ffi.NULL:
return None, handle, error[0]
return state, handle, error[0]
def src_callback_read(state, ratio, frames, data):
"""Read up to `frames` worth of data using the callback API.
Returns
-------
frames : int
Number of frames read or -1 on error.
"""
data_ptr = ffi.cast('float*f', ffi.from_buffer(data))
return _lib.src_callback_read(state, ratio, frames, data_ptr)
__libsamplerate_version__ = src_get_version()
if __libsamplerate_version__.startswith(lib_basename):
__libsamplerate_version__ = __libsamplerate_version__[len(
lib_basename) + 1:__libsamplerate_version__.find(' ')]
|
tuxu/python-samplerate
|
samplerate/lowlevel.py
|
src_process
|
python
|
def src_process(state, input_data, output_data, ratio, end_of_input=0):
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
data.end_of_input = end_of_input
error = _lib.src_process(state, data)
return error, data.input_frames_used, data.output_frames_gen
|
Standard processing function.
Returns non zero on error.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L135-L150
|
[
"def _check_data(data):\n \"\"\"Check whether `data` is a valid input/output for libsamplerate.\n\n Returns\n -------\n num_frames\n Number of frames in `data`.\n channels\n Number of channels in `data`.\n\n Raises\n ------\n ValueError: If invalid data is supplied.\n \"\"\"\n if not (data.dtype == _np.float32 and data.flags.c_contiguous):\n raise ValueError('supplied data must be float32 and C contiguous')\n if data.ndim == 2:\n num_frames, channels = data.shape\n elif data.ndim == 1:\n num_frames, channels = data.size, 1\n else:\n raise ValueError('rank > 2 not supported')\n return num_frames, channels\n"
] |
"""Lowlevel wrappers around libsamplerate.
The docstrings of the `src_*` functions are adapted from the libsamplerate
header file.
"""
import os as _os
import sys as _sys
from ctypes.util import find_library as _find_library
import numpy as _np
# Locate and load libsamplerate
from samplerate._src import ffi
lib_basename = 'libsamplerate'
lib_filename = _find_library('samplerate')
if _os.environ.get('READTHEDOCS') == 'True':
# Mock minimum C API for Read the Docs
class MockLib(object):
@classmethod
def src_get_version(cls):
return ffi.new('char[]', 'libsamplerate-0.1.9 (c) ...')
lib_filename = 'mock'
_lib = MockLib()
elif lib_filename is None:
if _sys.platform == 'darwin':
lib_filename = '{}.dylib'.format(lib_basename)
elif _sys.platform == 'win32':
from platform import architecture
lib_filename = '{}-{}.dll'.format(lib_basename, architecture()[0])
else:
raise OSError('{} not found'.format(lib_basename))
lib_filename = _os.path.join(
_os.path.dirname(_os.path.abspath(__file__)), '_samplerate_data',
lib_filename)
_lib = ffi.dlopen(lib_filename)
else:
_lib = ffi.dlopen(lib_filename)
def _check_data(data):
"""Check whether `data` is a valid input/output for libsamplerate.
Returns
-------
num_frames
Number of frames in `data`.
channels
Number of channels in `data`.
Raises
------
ValueError: If invalid data is supplied.
"""
if not (data.dtype == _np.float32 and data.flags.c_contiguous):
raise ValueError('supplied data must be float32 and C contiguous')
if data.ndim == 2:
num_frames, channels = data.shape
elif data.ndim == 1:
num_frames, channels = data.size, 1
else:
raise ValueError('rank > 2 not supported')
return num_frames, channels
def src_strerror(error):
"""Convert the error number into a string."""
return ffi.string(_lib.src_strerror(error)).decode()
def src_get_name(converter_type):
"""Return the name of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_name(converter_type)).decode()
def src_get_description(converter_type):
"""Return the description of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_description(converter_type)).decode()
def src_get_version():
"""Return the version string of libsamplerate."""
return ffi.string(_lib.src_get_version()).decode()
def src_simple(input_data, output_data, ratio, converter_type, channels):
"""Perform a single conversion from an input buffer to an output buffer.
Simple interface for performing a single conversion from input buffer to
output buffer at a fixed conversion ratio. Simple interface does not require
initialisation as it can only operate on a single buffer worth of audio.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
error = _lib.src_simple(data, converter_type, channels)
return error, data.input_frames_used, data.output_frames_gen
def src_new(converter_type, channels):
"""Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
"""
error = ffi.new('int*')
state = _lib.src_new(converter_type, channels, error)
return state, error[0]
def src_delete(state):
"""Release `state`.
Cleanup all internal allocations.
"""
_lib.src_delete(state)
def src_error(state):
"""Return an error number."""
return _lib.src_error(state) if state else None
def src_reset(state):
"""Reset the internal SRC state.
Does not modify the quality settings.
Does not free any memory allocations.
Returns non zero on error.
"""
return _lib.src_reset(state) if state else None
def src_set_ratio(state, new_ratio):
"""Set a new SRC ratio.
This allows step responses in the conversion ratio.
Returns non zero on error.
"""
return _lib.src_set_ratio(state, new_ratio) if state else None
def src_is_valid_ratio(ratio):
"""Return `True` if ratio is a valid conversion ratio, `False` otherwise.
"""
return bool(_lib.src_is_valid_ratio(ratio))
@ffi.callback('src_callback_t')
def _src_input_callback(cb_data, data):
"""Internal callback function to be used with the callback API.
Pulls the Python callback function from the handle contained in `cb_data`
and calls it to fetch frames. Frames are converted to the format required by
the API (float, interleaved channels). A reference to these data is kept
internally.
Returns
-------
frames : int
The number of frames supplied.
"""
cb_data = ffi.from_handle(cb_data)
ret = cb_data['callback']()
if ret is None:
cb_data['last_input'] = None
return 0 # No frames supplied
input_data = _np.require(ret, requirements='C', dtype=_np.float32)
input_frames, channels = _check_data(input_data)
# Check whether the correct number of channels is supplied by user.
if cb_data['channels'] != channels:
raise ValueError('Invalid number of channels in callback.')
# Store a reference of the input data to ensure it is still alive when
# accessed by libsamplerate.
cb_data['last_input'] = input_data
data[0] = ffi.cast('float*', ffi.from_buffer(input_data))
return input_frames
def src_callback_new(callback, converter_type, channels):
"""Initialisation for the callback based API.
Parameters
----------
callback : function
Called whenever new frames are to be read. Must return a NumPy array
of shape (num_frames, channels).
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
handle
A CFFI handle to the callback data.
error : int
Error code.
"""
cb_data = {'callback': callback, 'channels': channels}
handle = ffi.new_handle(cb_data)
error = ffi.new('int*')
state = _lib.src_callback_new(_src_input_callback, converter_type,
channels, error, handle)
if state == ffi.NULL:
return None, handle, error[0]
return state, handle, error[0]
def src_callback_read(state, ratio, frames, data):
"""Read up to `frames` worth of data using the callback API.
Returns
-------
frames : int
Number of frames read or -1 on error.
"""
data_ptr = ffi.cast('float*f', ffi.from_buffer(data))
return _lib.src_callback_read(state, ratio, frames, data_ptr)
__libsamplerate_version__ = src_get_version()
if __libsamplerate_version__.startswith(lib_basename):
__libsamplerate_version__ = __libsamplerate_version__[len(
lib_basename) + 1:__libsamplerate_version__.find(' ')]
|
tuxu/python-samplerate
|
samplerate/lowlevel.py
|
_src_input_callback
|
python
|
def _src_input_callback(cb_data, data):
cb_data = ffi.from_handle(cb_data)
ret = cb_data['callback']()
if ret is None:
cb_data['last_input'] = None
return 0 # No frames supplied
input_data = _np.require(ret, requirements='C', dtype=_np.float32)
input_frames, channels = _check_data(input_data)
# Check whether the correct number of channels is supplied by user.
if cb_data['channels'] != channels:
raise ValueError('Invalid number of channels in callback.')
# Store a reference of the input data to ensure it is still alive when
# accessed by libsamplerate.
cb_data['last_input'] = input_data
data[0] = ffi.cast('float*', ffi.from_buffer(input_data))
return input_frames
|
Internal callback function to be used with the callback API.
Pulls the Python callback function from the handle contained in `cb_data`
and calls it to fetch frames. Frames are converted to the format required by
the API (float, interleaved channels). A reference to these data is kept
internally.
Returns
-------
frames : int
The number of frames supplied.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L184-L214
| null |
"""Lowlevel wrappers around libsamplerate.
The docstrings of the `src_*` functions are adapted from the libsamplerate
header file.
"""
import os as _os
import sys as _sys
from ctypes.util import find_library as _find_library
import numpy as _np
# Locate and load libsamplerate
from samplerate._src import ffi
lib_basename = 'libsamplerate'
lib_filename = _find_library('samplerate')
if _os.environ.get('READTHEDOCS') == 'True':
# Mock minimum C API for Read the Docs
class MockLib(object):
@classmethod
def src_get_version(cls):
return ffi.new('char[]', 'libsamplerate-0.1.9 (c) ...')
lib_filename = 'mock'
_lib = MockLib()
elif lib_filename is None:
if _sys.platform == 'darwin':
lib_filename = '{}.dylib'.format(lib_basename)
elif _sys.platform == 'win32':
from platform import architecture
lib_filename = '{}-{}.dll'.format(lib_basename, architecture()[0])
else:
raise OSError('{} not found'.format(lib_basename))
lib_filename = _os.path.join(
_os.path.dirname(_os.path.abspath(__file__)), '_samplerate_data',
lib_filename)
_lib = ffi.dlopen(lib_filename)
else:
_lib = ffi.dlopen(lib_filename)
def _check_data(data):
"""Check whether `data` is a valid input/output for libsamplerate.
Returns
-------
num_frames
Number of frames in `data`.
channels
Number of channels in `data`.
Raises
------
ValueError: If invalid data is supplied.
"""
if not (data.dtype == _np.float32 and data.flags.c_contiguous):
raise ValueError('supplied data must be float32 and C contiguous')
if data.ndim == 2:
num_frames, channels = data.shape
elif data.ndim == 1:
num_frames, channels = data.size, 1
else:
raise ValueError('rank > 2 not supported')
return num_frames, channels
def src_strerror(error):
"""Convert the error number into a string."""
return ffi.string(_lib.src_strerror(error)).decode()
def src_get_name(converter_type):
"""Return the name of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_name(converter_type)).decode()
def src_get_description(converter_type):
"""Return the description of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_description(converter_type)).decode()
def src_get_version():
"""Return the version string of libsamplerate."""
return ffi.string(_lib.src_get_version()).decode()
def src_simple(input_data, output_data, ratio, converter_type, channels):
"""Perform a single conversion from an input buffer to an output buffer.
Simple interface for performing a single conversion from input buffer to
output buffer at a fixed conversion ratio. Simple interface does not require
initialisation as it can only operate on a single buffer worth of audio.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
error = _lib.src_simple(data, converter_type, channels)
return error, data.input_frames_used, data.output_frames_gen
def src_new(converter_type, channels):
"""Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
"""
error = ffi.new('int*')
state = _lib.src_new(converter_type, channels, error)
return state, error[0]
def src_delete(state):
"""Release `state`.
Cleanup all internal allocations.
"""
_lib.src_delete(state)
def src_process(state, input_data, output_data, ratio, end_of_input=0):
"""Standard processing function.
Returns non zero on error.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
data.end_of_input = end_of_input
error = _lib.src_process(state, data)
return error, data.input_frames_used, data.output_frames_gen
def src_error(state):
"""Return an error number."""
return _lib.src_error(state) if state else None
def src_reset(state):
"""Reset the internal SRC state.
Does not modify the quality settings.
Does not free any memory allocations.
Returns non zero on error.
"""
return _lib.src_reset(state) if state else None
def src_set_ratio(state, new_ratio):
"""Set a new SRC ratio.
This allows step responses in the conversion ratio.
Returns non zero on error.
"""
return _lib.src_set_ratio(state, new_ratio) if state else None
def src_is_valid_ratio(ratio):
"""Return `True` if ratio is a valid conversion ratio, `False` otherwise.
"""
return bool(_lib.src_is_valid_ratio(ratio))
@ffi.callback('src_callback_t')
def src_callback_new(callback, converter_type, channels):
"""Initialisation for the callback based API.
Parameters
----------
callback : function
Called whenever new frames are to be read. Must return a NumPy array
of shape (num_frames, channels).
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
handle
A CFFI handle to the callback data.
error : int
Error code.
"""
cb_data = {'callback': callback, 'channels': channels}
handle = ffi.new_handle(cb_data)
error = ffi.new('int*')
state = _lib.src_callback_new(_src_input_callback, converter_type,
channels, error, handle)
if state == ffi.NULL:
return None, handle, error[0]
return state, handle, error[0]
def src_callback_read(state, ratio, frames, data):
"""Read up to `frames` worth of data using the callback API.
Returns
-------
frames : int
Number of frames read or -1 on error.
"""
data_ptr = ffi.cast('float*f', ffi.from_buffer(data))
return _lib.src_callback_read(state, ratio, frames, data_ptr)
__libsamplerate_version__ = src_get_version()
if __libsamplerate_version__.startswith(lib_basename):
__libsamplerate_version__ = __libsamplerate_version__[len(
lib_basename) + 1:__libsamplerate_version__.find(' ')]
|
tuxu/python-samplerate
|
samplerate/lowlevel.py
|
src_callback_new
|
python
|
def src_callback_new(callback, converter_type, channels):
cb_data = {'callback': callback, 'channels': channels}
handle = ffi.new_handle(cb_data)
error = ffi.new('int*')
state = _lib.src_callback_new(_src_input_callback, converter_type,
channels, error, handle)
if state == ffi.NULL:
return None, handle, error[0]
return state, handle, error[0]
|
Initialisation for the callback based API.
Parameters
----------
callback : function
Called whenever new frames are to be read. Must return a NumPy array
of shape (num_frames, channels).
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
handle
A CFFI handle to the callback data.
error : int
Error code.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L217-L247
| null |
"""Lowlevel wrappers around libsamplerate.
The docstrings of the `src_*` functions are adapted from the libsamplerate
header file.
"""
import os as _os
import sys as _sys
from ctypes.util import find_library as _find_library
import numpy as _np
# Locate and load libsamplerate
from samplerate._src import ffi
lib_basename = 'libsamplerate'
lib_filename = _find_library('samplerate')
if _os.environ.get('READTHEDOCS') == 'True':
# Mock minimum C API for Read the Docs
class MockLib(object):
@classmethod
def src_get_version(cls):
return ffi.new('char[]', 'libsamplerate-0.1.9 (c) ...')
lib_filename = 'mock'
_lib = MockLib()
elif lib_filename is None:
if _sys.platform == 'darwin':
lib_filename = '{}.dylib'.format(lib_basename)
elif _sys.platform == 'win32':
from platform import architecture
lib_filename = '{}-{}.dll'.format(lib_basename, architecture()[0])
else:
raise OSError('{} not found'.format(lib_basename))
lib_filename = _os.path.join(
_os.path.dirname(_os.path.abspath(__file__)), '_samplerate_data',
lib_filename)
_lib = ffi.dlopen(lib_filename)
else:
_lib = ffi.dlopen(lib_filename)
def _check_data(data):
"""Check whether `data` is a valid input/output for libsamplerate.
Returns
-------
num_frames
Number of frames in `data`.
channels
Number of channels in `data`.
Raises
------
ValueError: If invalid data is supplied.
"""
if not (data.dtype == _np.float32 and data.flags.c_contiguous):
raise ValueError('supplied data must be float32 and C contiguous')
if data.ndim == 2:
num_frames, channels = data.shape
elif data.ndim == 1:
num_frames, channels = data.size, 1
else:
raise ValueError('rank > 2 not supported')
return num_frames, channels
def src_strerror(error):
"""Convert the error number into a string."""
return ffi.string(_lib.src_strerror(error)).decode()
def src_get_name(converter_type):
"""Return the name of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_name(converter_type)).decode()
def src_get_description(converter_type):
"""Return the description of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_description(converter_type)).decode()
def src_get_version():
"""Return the version string of libsamplerate."""
return ffi.string(_lib.src_get_version()).decode()
def src_simple(input_data, output_data, ratio, converter_type, channels):
"""Perform a single conversion from an input buffer to an output buffer.
Simple interface for performing a single conversion from input buffer to
output buffer at a fixed conversion ratio. Simple interface does not require
initialisation as it can only operate on a single buffer worth of audio.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
error = _lib.src_simple(data, converter_type, channels)
return error, data.input_frames_used, data.output_frames_gen
def src_new(converter_type, channels):
"""Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
"""
error = ffi.new('int*')
state = _lib.src_new(converter_type, channels, error)
return state, error[0]
def src_delete(state):
"""Release `state`.
Cleanup all internal allocations.
"""
_lib.src_delete(state)
def src_process(state, input_data, output_data, ratio, end_of_input=0):
"""Standard processing function.
Returns non zero on error.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
data.end_of_input = end_of_input
error = _lib.src_process(state, data)
return error, data.input_frames_used, data.output_frames_gen
def src_error(state):
"""Return an error number."""
return _lib.src_error(state) if state else None
def src_reset(state):
"""Reset the internal SRC state.
Does not modify the quality settings.
Does not free any memory allocations.
Returns non zero on error.
"""
return _lib.src_reset(state) if state else None
def src_set_ratio(state, new_ratio):
"""Set a new SRC ratio.
This allows step responses in the conversion ratio.
Returns non zero on error.
"""
return _lib.src_set_ratio(state, new_ratio) if state else None
def src_is_valid_ratio(ratio):
"""Return `True` if ratio is a valid conversion ratio, `False` otherwise.
"""
return bool(_lib.src_is_valid_ratio(ratio))
@ffi.callback('src_callback_t')
def _src_input_callback(cb_data, data):
"""Internal callback function to be used with the callback API.
Pulls the Python callback function from the handle contained in `cb_data`
and calls it to fetch frames. Frames are converted to the format required by
the API (float, interleaved channels). A reference to these data is kept
internally.
Returns
-------
frames : int
The number of frames supplied.
"""
cb_data = ffi.from_handle(cb_data)
ret = cb_data['callback']()
if ret is None:
cb_data['last_input'] = None
return 0 # No frames supplied
input_data = _np.require(ret, requirements='C', dtype=_np.float32)
input_frames, channels = _check_data(input_data)
# Check whether the correct number of channels is supplied by user.
if cb_data['channels'] != channels:
raise ValueError('Invalid number of channels in callback.')
# Store a reference of the input data to ensure it is still alive when
# accessed by libsamplerate.
cb_data['last_input'] = input_data
data[0] = ffi.cast('float*', ffi.from_buffer(input_data))
return input_frames
def src_callback_read(state, ratio, frames, data):
"""Read up to `frames` worth of data using the callback API.
Returns
-------
frames : int
Number of frames read or -1 on error.
"""
data_ptr = ffi.cast('float*f', ffi.from_buffer(data))
return _lib.src_callback_read(state, ratio, frames, data_ptr)
__libsamplerate_version__ = src_get_version()
if __libsamplerate_version__.startswith(lib_basename):
__libsamplerate_version__ = __libsamplerate_version__[len(
lib_basename) + 1:__libsamplerate_version__.find(' ')]
|
tuxu/python-samplerate
|
samplerate/lowlevel.py
|
src_callback_read
|
python
|
def src_callback_read(state, ratio, frames, data):
data_ptr = ffi.cast('float*f', ffi.from_buffer(data))
return _lib.src_callback_read(state, ratio, frames, data_ptr)
|
Read up to `frames` worth of data using the callback API.
Returns
-------
frames : int
Number of frames read or -1 on error.
|
train
|
https://github.com/tuxu/python-samplerate/blob/ed73d7a39e61bfb34b03dade14ffab59aa27922a/samplerate/lowlevel.py#L250-L259
| null |
"""Lowlevel wrappers around libsamplerate.
The docstrings of the `src_*` functions are adapted from the libsamplerate
header file.
"""
import os as _os
import sys as _sys
from ctypes.util import find_library as _find_library
import numpy as _np
# Locate and load libsamplerate
from samplerate._src import ffi
lib_basename = 'libsamplerate'
lib_filename = _find_library('samplerate')
if _os.environ.get('READTHEDOCS') == 'True':
# Mock minimum C API for Read the Docs
class MockLib(object):
@classmethod
def src_get_version(cls):
return ffi.new('char[]', 'libsamplerate-0.1.9 (c) ...')
lib_filename = 'mock'
_lib = MockLib()
elif lib_filename is None:
if _sys.platform == 'darwin':
lib_filename = '{}.dylib'.format(lib_basename)
elif _sys.platform == 'win32':
from platform import architecture
lib_filename = '{}-{}.dll'.format(lib_basename, architecture()[0])
else:
raise OSError('{} not found'.format(lib_basename))
lib_filename = _os.path.join(
_os.path.dirname(_os.path.abspath(__file__)), '_samplerate_data',
lib_filename)
_lib = ffi.dlopen(lib_filename)
else:
_lib = ffi.dlopen(lib_filename)
def _check_data(data):
"""Check whether `data` is a valid input/output for libsamplerate.
Returns
-------
num_frames
Number of frames in `data`.
channels
Number of channels in `data`.
Raises
------
ValueError: If invalid data is supplied.
"""
if not (data.dtype == _np.float32 and data.flags.c_contiguous):
raise ValueError('supplied data must be float32 and C contiguous')
if data.ndim == 2:
num_frames, channels = data.shape
elif data.ndim == 1:
num_frames, channels = data.size, 1
else:
raise ValueError('rank > 2 not supported')
return num_frames, channels
def src_strerror(error):
"""Convert the error number into a string."""
return ffi.string(_lib.src_strerror(error)).decode()
def src_get_name(converter_type):
"""Return the name of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_name(converter_type)).decode()
def src_get_description(converter_type):
"""Return the description of the converter given by `converter_type`."""
return ffi.string(_lib.src_get_description(converter_type)).decode()
def src_get_version():
"""Return the version string of libsamplerate."""
return ffi.string(_lib.src_get_version()).decode()
def src_simple(input_data, output_data, ratio, converter_type, channels):
"""Perform a single conversion from an input buffer to an output buffer.
Simple interface for performing a single conversion from input buffer to
output buffer at a fixed conversion ratio. Simple interface does not require
initialisation as it can only operate on a single buffer worth of audio.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
error = _lib.src_simple(data, converter_type, channels)
return error, data.input_frames_used, data.output_frames_gen
def src_new(converter_type, channels):
"""Initialise a new sample rate converter.
Parameters
----------
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
error : int
Error code.
"""
error = ffi.new('int*')
state = _lib.src_new(converter_type, channels, error)
return state, error[0]
def src_delete(state):
"""Release `state`.
Cleanup all internal allocations.
"""
_lib.src_delete(state)
def src_process(state, input_data, output_data, ratio, end_of_input=0):
"""Standard processing function.
Returns non zero on error.
"""
input_frames, _ = _check_data(input_data)
output_frames, _ = _check_data(output_data)
data = ffi.new('SRC_DATA*')
data.input_frames = input_frames
data.output_frames = output_frames
data.src_ratio = ratio
data.data_in = ffi.cast('float*', ffi.from_buffer(input_data))
data.data_out = ffi.cast('float*', ffi.from_buffer(output_data))
data.end_of_input = end_of_input
error = _lib.src_process(state, data)
return error, data.input_frames_used, data.output_frames_gen
def src_error(state):
"""Return an error number."""
return _lib.src_error(state) if state else None
def src_reset(state):
"""Reset the internal SRC state.
Does not modify the quality settings.
Does not free any memory allocations.
Returns non zero on error.
"""
return _lib.src_reset(state) if state else None
def src_set_ratio(state, new_ratio):
"""Set a new SRC ratio.
This allows step responses in the conversion ratio.
Returns non zero on error.
"""
return _lib.src_set_ratio(state, new_ratio) if state else None
def src_is_valid_ratio(ratio):
"""Return `True` if ratio is a valid conversion ratio, `False` otherwise.
"""
return bool(_lib.src_is_valid_ratio(ratio))
@ffi.callback('src_callback_t')
def _src_input_callback(cb_data, data):
"""Internal callback function to be used with the callback API.
Pulls the Python callback function from the handle contained in `cb_data`
and calls it to fetch frames. Frames are converted to the format required by
the API (float, interleaved channels). A reference to these data is kept
internally.
Returns
-------
frames : int
The number of frames supplied.
"""
cb_data = ffi.from_handle(cb_data)
ret = cb_data['callback']()
if ret is None:
cb_data['last_input'] = None
return 0 # No frames supplied
input_data = _np.require(ret, requirements='C', dtype=_np.float32)
input_frames, channels = _check_data(input_data)
# Check whether the correct number of channels is supplied by user.
if cb_data['channels'] != channels:
raise ValueError('Invalid number of channels in callback.')
# Store a reference of the input data to ensure it is still alive when
# accessed by libsamplerate.
cb_data['last_input'] = input_data
data[0] = ffi.cast('float*', ffi.from_buffer(input_data))
return input_frames
def src_callback_new(callback, converter_type, channels):
"""Initialisation for the callback based API.
Parameters
----------
callback : function
Called whenever new frames are to be read. Must return a NumPy array
of shape (num_frames, channels).
converter_type : int
Converter to be used.
channels : int
Number of channels.
Returns
-------
state
An anonymous pointer to the internal state of the converter.
handle
A CFFI handle to the callback data.
error : int
Error code.
"""
cb_data = {'callback': callback, 'channels': channels}
handle = ffi.new_handle(cb_data)
error = ffi.new('int*')
state = _lib.src_callback_new(_src_input_callback, converter_type,
channels, error, handle)
if state == ffi.NULL:
return None, handle, error[0]
return state, handle, error[0]
__libsamplerate_version__ = src_get_version()
if __libsamplerate_version__.startswith(lib_basename):
__libsamplerate_version__ = __libsamplerate_version__[len(
lib_basename) + 1:__libsamplerate_version__.find(' ')]
|
bd808/python-iptools
|
iptools/ipv6.py
|
validate_ip
|
python
|
def validate_ip(s):
if _HEX_RE.match(s):
return len(s.split('::')) <= 2
if _DOTTED_QUAD_RE.match(s):
halves = s.split('::')
if len(halves) > 2:
return False
hextets = s.split(':')
quads = hextets[-1].split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
|
Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L157-L209
| null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from . import ipv4
__all__ = (
'cidr2block',
'ip2long',
'long2ip',
'long2rfc1924',
'rfc19242long',
'validate_cidr',
'validate_ip',
'DOCUMENTATION_NETWORK',
'IPV4_MAPPED',
'IPV6_TO_IPV4_NETWORK',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_GLOBAL',
'MULTICAST_LOCAL',
'MULTICAST_LOCAL_DHCP',
'MULTICAST_LOCAL_NODES',
'MULTICAST_LOCAL_ROUTERS',
'MULTICAST_LOOPBACK',
'MULTICAST_SITE',
'MULTICAST_SITE',
'MULTICAST_SITE_DHCP',
'PRIVATE_NETWORK',
'TEREDO_NETWORK',
'UNSPECIFIED_ADDRESS',
)
#: Regex for validating an IPv6 in hex notation
_HEX_RE = re.compile(r'^([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}$')
#: Regex for validating an IPv6 in dotted-quad notation
_DOTTED_QUAD_RE = re.compile(r'^([0-9a-f]{0,4}:){2,6}(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}$')
#: Mamimum IPv6 integer
MAX_IP = 0xffffffffffffffffffffffffffffffff
#: Minimum IPv6 integer
MIN_IP = 0x0
#: Absence of an address (only valid as source address)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
UNSPECIFIED_ADDRESS = "::/128"
#: Loopback addresses on the local host
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOOPBACK = "::1/128"
#: Common `localhost` address
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOCALHOST = LOOPBACK
#: IPv4 mapped to IPv6 (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
IPV4_MAPPED = "::ffff:0:0/96"
#: Documentation and example network
#: (`RFC 3849 <https://tools.ietf.org/html/rfc3849>`_)
DOCUMENTATION_NETWORK = "2001::db8::/32"
#: 6to4 Address block
#: (`RFC 3056 <https://tools.ietf.org/html/rfc3056>`_)
IPV6_TO_IPV4_NETWORK = "2002::/16"
#: Teredo addresses
#: (`RFC 4380 <https://tools.ietf.org/html/rfc4380>`_)
TEREDO_NETWORK = "2001::/32"
#: Private network
#: (`RFC 4193 <https://tools.ietf.org/html/rfc4193>`_)
PRIVATE_NETWORK = "fd00::/8"
#: Link-Local unicast networks (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LINK_LOCAL = "fe80::/10"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "ff00::/8"
#: Interface-Local multicast
MULTICAST_LOOPBACK = "ff01::/16"
#: Link-Local multicast
MULTICAST_LOCAL = "ff02::/16"
#: Site-Local multicast
MULTICAST_SITE = "ff05::/16"
#: Organization-Local multicast
MULTICAST_SITE = "ff08::/16"
#: Organization-Local multicast
MULTICAST_GLOBAL = "ff0e::/16"
#: All nodes on the local segment
MULTICAST_LOCAL_NODES = "ff02::1"
#: All routers on the local segment
MULTICAST_LOCAL_ROUTERS = "ff02::2"
#: All DHCP servers and relay agents on the local segment
MULTICAST_LOCAL_DHCP = "ff02::1:2"
#: All DHCP servers and relay agents on the local site
MULTICAST_SITE_DHCP = "ff05::1:3"
#: RFC 1924 alphabet
_RFC1924_ALPHABET = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'!', '#', '$', '%', '&', '(', ')', '*', '+', '-', ';', '<', '=',
'>', '?', '@', '^', '_', '`', '{', '|', '}', '~',
]
#: RFC 1924 reverse lookup
_RFC1924_REV = None
#: Regex for validating an IPv6 in hex notation
_RFC1924_RE = re.compile(r'^[0-9A-Za-z!#$%&()*+-;<=>?@^_`{|}~]{20}$')
# end validate_ip
def ip2long(ip):
"""Convert a hexidecimal IPv6 address to a network byte order 128-bit
integer.
>>> ip2long('::') == 0
True
>>> ip2long('::1') == 1
True
>>> expect = 0x20010db885a3000000008a2e03707334
>>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect
True
>>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect
True
>>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect
True
>>> expect = 0x20010db8000000000001000000000001
>>> ip2long('2001:db8::1:0:0:1') == expect
True
>>> expect = 281473902969472
>>> ip2long('::ffff:192.0.2.128') == expect
True
>>> expect = 0xffffffffffffffffffffffffffffffff
>>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect
True
>>> ip2long('ff::ff::ff') == None
True
>>> expect = 21932261930451111902915077091070067066
>>> ip2long('1080:0:0:0:8:800:200C:417A') == expect
True
:param ip: Hexidecimal IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
if '.' in ip:
# convert IPv4 suffix to hex
chunks = ip.split(':')
v4_int = ipv4.ip2long(chunks.pop())
if v4_int is None:
return None
chunks.append('%x' % ((v4_int >> 16) & 0xffff))
chunks.append('%x' % (v4_int & 0xffff))
ip = ':'.join(chunks)
halves = ip.split('::')
hextets = halves[0].split(':')
if len(halves) == 2:
h2 = halves[1].split(':')
for z in range(8 - (len(hextets) + len(h2))):
hextets.append('0')
for h in h2:
hextets.append(h)
# end if
lngip = 0
for h in hextets:
if '' == h:
h = '0'
lngip = (lngip << 16) | int(h, 16)
return lngip
# end ip2long
def long2ip(l, rfc1924=False):
"""Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
if rfc1924:
return long2rfc1924(l)
# format as one big hex value
hex_str = '%032x' % l
# split into double octet chunks without padding zeros
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
# find and remove left most longest run of zeros
dc_start, dc_len = (-1, 0)
run_start, run_len = (-1, 0)
for idx, hextet in enumerate(hextets):
if '0' == hextet:
run_len += 1
if -1 == run_start:
run_start = idx
if run_len > dc_len:
dc_len, dc_start = (run_len, run_start)
else:
run_len, run_start = (0, -1)
# end for
if dc_len > 1:
dc_end = dc_start + dc_len
if dc_end == len(hextets):
hextets += ['']
hextets[dc_start:dc_end] = ['']
if dc_start == 0:
hextets = [''] + hextets
# end if
return ':'.join(hextets)
# end long2ip
def long2rfc1924(l):
"""Convert a network byte order 128-bit integer to an rfc1924 IPv6
address.
>>> long2rfc1924(ip2long('1080::8:800:200C:417A'))
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2rfc1924(ip2long('::'))
'00000000000000000000'
>>> long2rfc1924(MAX_IP)
'=r54lj&NUUO~Hi%c2ym0'
:param l: Network byte order 128-bit integer.
:type l: int
:returns: RFC 1924 IPv6 address
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
o = []
r = l
while r > 85:
o.append(_RFC1924_ALPHABET[r % 85])
r = r // 85
o.append(_RFC1924_ALPHABET[r])
return ''.join(reversed(o)).zfill(20)
def rfc19242long(s):
"""Convert an RFC 1924 IPv6 address to a network byte order 128-bit
integer.
>>> expect = 0
>>> rfc19242long('00000000000000000000') == expect
True
>>> expect = 21932261930451111902915077091070067066
>>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect
True
>>> rfc19242long('pizza') == None
True
>>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None
True
>>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP
True
:param ip: RFC 1924 IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
global _RFC1924_REV
if not _RFC1924_RE.match(s):
return None
if _RFC1924_REV is None:
_RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)}
x = 0
for c in s:
x = x * 85 + _RFC1924_REV[c]
if x > MAX_IP:
return None
return x
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv6 address in hextet format followed by a forward slash (/) and a bit
mask length (0-128).
>>> validate_cidr('::/128')
True
>>> validate_cidr('::/0')
True
>>> validate_cidr('fc00::/7')
True
>>> validate_cidr('::ffff:0:0/96')
True
>>> validate_cidr('::')
False
>>> validate_cidr('::/129')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 128:
return False
else:
return False
return True
return False
# end validate_cidr
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('2001:db8::/48')
('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff')
>>> cidr2block('::/0')
('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
ip = ip2long(ip)
# keep left most prefix bits of ip
shift = 128 - prefix
block_start = ip >> shift << shift
# expand right most 128 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end cidr2block
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv6.py
|
ip2long
|
python
|
def ip2long(ip):
if not validate_ip(ip):
return None
if '.' in ip:
# convert IPv4 suffix to hex
chunks = ip.split(':')
v4_int = ipv4.ip2long(chunks.pop())
if v4_int is None:
return None
chunks.append('%x' % ((v4_int >> 16) & 0xffff))
chunks.append('%x' % (v4_int & 0xffff))
ip = ':'.join(chunks)
halves = ip.split('::')
hextets = halves[0].split(':')
if len(halves) == 2:
h2 = halves[1].split(':')
for z in range(8 - (len(hextets) + len(h2))):
hextets.append('0')
for h in h2:
hextets.append(h)
# end if
lngip = 0
for h in hextets:
if '' == h:
h = '0'
lngip = (lngip << 16) | int(h, 16)
return lngip
|
Convert a hexidecimal IPv6 address to a network byte order 128-bit
integer.
>>> ip2long('::') == 0
True
>>> ip2long('::1') == 1
True
>>> expect = 0x20010db885a3000000008a2e03707334
>>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect
True
>>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect
True
>>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect
True
>>> expect = 0x20010db8000000000001000000000001
>>> ip2long('2001:db8::1:0:0:1') == expect
True
>>> expect = 281473902969472
>>> ip2long('::ffff:192.0.2.128') == expect
True
>>> expect = 0xffffffffffffffffffffffffffffffff
>>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect
True
>>> ip2long('ff::ff::ff') == None
True
>>> expect = 21932261930451111902915077091070067066
>>> ip2long('1080:0:0:0:8:800:200C:417A') == expect
True
:param ip: Hexidecimal IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L213-L277
|
[
"def ip2long(ip):\n \"\"\"Convert a dotted-quad ip address to a network byte order 32-bit\n integer.\n\n\n >>> ip2long('127.0.0.1')\n 2130706433\n >>> ip2long('127.1')\n 2130706433\n >>> ip2long('127')\n 2130706432\n >>> ip2long('127.0.0.256') is None\n True\n\n\n :param ip: Dotted-quad ip address (eg. '127.0.0.1').\n :type ip: str\n :returns: Network byte order 32-bit integer or ``None`` if ip is invalid.\n \"\"\"\n if not validate_ip(ip):\n return None\n quads = ip.split('.')\n if len(quads) == 1:\n # only a network quad\n quads = quads + [0, 0, 0]\n elif len(quads) < 4:\n # partial form, last supplied quad is host address, rest is network\n host = quads[-1:]\n quads = quads[:-1] + [0, ] * (4 - len(quads)) + host\n\n lngip = 0\n for q in quads:\n lngip = (lngip << 8) | int(q)\n return lngip\n",
"def validate_ip(s):\n \"\"\"Validate a hexidecimal IPv6 ip address.\n\n\n >>> validate_ip('::')\n True\n >>> validate_ip('::1')\n True\n >>> validate_ip('2001:db8:85a3::8a2e:370:7334')\n True\n >>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')\n True\n >>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')\n True\n >>> validate_ip('2001:db8::1:0:0:1')\n True\n >>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')\n True\n >>> validate_ip('::ffff:192.0.2.128')\n True\n >>> validate_ip('::ff::ff')\n False\n >>> validate_ip('::fffff')\n False\n >>> validate_ip('::ffff:192.0.2.300')\n False\n >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n TypeError: expected string or buffer\n >>> validate_ip('1080:0:0:0:8:800:200c:417a')\n True\n\n\n :param s: String to validate as a hexidecimal IPv6 ip address.\n :type s: str\n :returns: ``True`` if a valid hexidecimal IPv6 ip address,\n ``False`` otherwise.\n :raises: TypeError\n \"\"\"\n if _HEX_RE.match(s):\n return len(s.split('::')) <= 2\n if _DOTTED_QUAD_RE.match(s):\n halves = s.split('::')\n if len(halves) > 2:\n return False\n hextets = s.split(':')\n quads = hextets[-1].split('.')\n for q in quads:\n if int(q) > 255:\n return False\n return True\n return False\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from . import ipv4
__all__ = (
'cidr2block',
'ip2long',
'long2ip',
'long2rfc1924',
'rfc19242long',
'validate_cidr',
'validate_ip',
'DOCUMENTATION_NETWORK',
'IPV4_MAPPED',
'IPV6_TO_IPV4_NETWORK',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_GLOBAL',
'MULTICAST_LOCAL',
'MULTICAST_LOCAL_DHCP',
'MULTICAST_LOCAL_NODES',
'MULTICAST_LOCAL_ROUTERS',
'MULTICAST_LOOPBACK',
'MULTICAST_SITE',
'MULTICAST_SITE',
'MULTICAST_SITE_DHCP',
'PRIVATE_NETWORK',
'TEREDO_NETWORK',
'UNSPECIFIED_ADDRESS',
)
#: Regex for validating an IPv6 in hex notation
_HEX_RE = re.compile(r'^([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}$')
#: Regex for validating an IPv6 in dotted-quad notation
_DOTTED_QUAD_RE = re.compile(r'^([0-9a-f]{0,4}:){2,6}(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}$')
#: Mamimum IPv6 integer
MAX_IP = 0xffffffffffffffffffffffffffffffff
#: Minimum IPv6 integer
MIN_IP = 0x0
#: Absence of an address (only valid as source address)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
UNSPECIFIED_ADDRESS = "::/128"
#: Loopback addresses on the local host
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOOPBACK = "::1/128"
#: Common `localhost` address
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOCALHOST = LOOPBACK
#: IPv4 mapped to IPv6 (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
IPV4_MAPPED = "::ffff:0:0/96"
#: Documentation and example network
#: (`RFC 3849 <https://tools.ietf.org/html/rfc3849>`_)
DOCUMENTATION_NETWORK = "2001::db8::/32"
#: 6to4 Address block
#: (`RFC 3056 <https://tools.ietf.org/html/rfc3056>`_)
IPV6_TO_IPV4_NETWORK = "2002::/16"
#: Teredo addresses
#: (`RFC 4380 <https://tools.ietf.org/html/rfc4380>`_)
TEREDO_NETWORK = "2001::/32"
#: Private network
#: (`RFC 4193 <https://tools.ietf.org/html/rfc4193>`_)
PRIVATE_NETWORK = "fd00::/8"
#: Link-Local unicast networks (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LINK_LOCAL = "fe80::/10"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "ff00::/8"
#: Interface-Local multicast
MULTICAST_LOOPBACK = "ff01::/16"
#: Link-Local multicast
MULTICAST_LOCAL = "ff02::/16"
#: Site-Local multicast
MULTICAST_SITE = "ff05::/16"
#: Organization-Local multicast
MULTICAST_SITE = "ff08::/16"
#: Organization-Local multicast
MULTICAST_GLOBAL = "ff0e::/16"
#: All nodes on the local segment
MULTICAST_LOCAL_NODES = "ff02::1"
#: All routers on the local segment
MULTICAST_LOCAL_ROUTERS = "ff02::2"
#: All DHCP servers and relay agents on the local segment
MULTICAST_LOCAL_DHCP = "ff02::1:2"
#: All DHCP servers and relay agents on the local site
MULTICAST_SITE_DHCP = "ff05::1:3"
#: RFC 1924 alphabet
_RFC1924_ALPHABET = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'!', '#', '$', '%', '&', '(', ')', '*', '+', '-', ';', '<', '=',
'>', '?', '@', '^', '_', '`', '{', '|', '}', '~',
]
#: RFC 1924 reverse lookup
_RFC1924_REV = None
#: Regex for validating an IPv6 in hex notation
_RFC1924_RE = re.compile(r'^[0-9A-Za-z!#$%&()*+-;<=>?@^_`{|}~]{20}$')
def validate_ip(s):
"""Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
"""
if _HEX_RE.match(s):
return len(s.split('::')) <= 2
if _DOTTED_QUAD_RE.match(s):
halves = s.split('::')
if len(halves) > 2:
return False
hextets = s.split(':')
quads = hextets[-1].split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
# end ip2long
def long2ip(l, rfc1924=False):
"""Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
if rfc1924:
return long2rfc1924(l)
# format as one big hex value
hex_str = '%032x' % l
# split into double octet chunks without padding zeros
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
# find and remove left most longest run of zeros
dc_start, dc_len = (-1, 0)
run_start, run_len = (-1, 0)
for idx, hextet in enumerate(hextets):
if '0' == hextet:
run_len += 1
if -1 == run_start:
run_start = idx
if run_len > dc_len:
dc_len, dc_start = (run_len, run_start)
else:
run_len, run_start = (0, -1)
# end for
if dc_len > 1:
dc_end = dc_start + dc_len
if dc_end == len(hextets):
hextets += ['']
hextets[dc_start:dc_end] = ['']
if dc_start == 0:
hextets = [''] + hextets
# end if
return ':'.join(hextets)
# end long2ip
def long2rfc1924(l):
"""Convert a network byte order 128-bit integer to an rfc1924 IPv6
address.
>>> long2rfc1924(ip2long('1080::8:800:200C:417A'))
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2rfc1924(ip2long('::'))
'00000000000000000000'
>>> long2rfc1924(MAX_IP)
'=r54lj&NUUO~Hi%c2ym0'
:param l: Network byte order 128-bit integer.
:type l: int
:returns: RFC 1924 IPv6 address
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
o = []
r = l
while r > 85:
o.append(_RFC1924_ALPHABET[r % 85])
r = r // 85
o.append(_RFC1924_ALPHABET[r])
return ''.join(reversed(o)).zfill(20)
def rfc19242long(s):
"""Convert an RFC 1924 IPv6 address to a network byte order 128-bit
integer.
>>> expect = 0
>>> rfc19242long('00000000000000000000') == expect
True
>>> expect = 21932261930451111902915077091070067066
>>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect
True
>>> rfc19242long('pizza') == None
True
>>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None
True
>>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP
True
:param ip: RFC 1924 IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
global _RFC1924_REV
if not _RFC1924_RE.match(s):
return None
if _RFC1924_REV is None:
_RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)}
x = 0
for c in s:
x = x * 85 + _RFC1924_REV[c]
if x > MAX_IP:
return None
return x
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv6 address in hextet format followed by a forward slash (/) and a bit
mask length (0-128).
>>> validate_cidr('::/128')
True
>>> validate_cidr('::/0')
True
>>> validate_cidr('fc00::/7')
True
>>> validate_cidr('::ffff:0:0/96')
True
>>> validate_cidr('::')
False
>>> validate_cidr('::/129')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 128:
return False
else:
return False
return True
return False
# end validate_cidr
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('2001:db8::/48')
('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff')
>>> cidr2block('::/0')
('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
ip = ip2long(ip)
# keep left most prefix bits of ip
shift = 128 - prefix
block_start = ip >> shift << shift
# expand right most 128 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end cidr2block
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv6.py
|
long2ip
|
python
|
def long2ip(l, rfc1924=False):
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
if rfc1924:
return long2rfc1924(l)
# format as one big hex value
hex_str = '%032x' % l
# split into double octet chunks without padding zeros
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
# find and remove left most longest run of zeros
dc_start, dc_len = (-1, 0)
run_start, run_len = (-1, 0)
for idx, hextet in enumerate(hextets):
if '0' == hextet:
run_len += 1
if -1 == run_start:
run_start = idx
if run_len > dc_len:
dc_len, dc_start = (run_len, run_start)
else:
run_len, run_start = (0, -1)
# end for
if dc_len > 1:
dc_end = dc_start + dc_len
if dc_end == len(hextets):
hextets += ['']
hextets[dc_start:dc_end] = ['']
if dc_start == 0:
hextets = [''] + hextets
# end if
return ':'.join(hextets)
|
Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L281-L353
|
[
"def long2rfc1924(l):\n \"\"\"Convert a network byte order 128-bit integer to an rfc1924 IPv6\n address.\n\n\n >>> long2rfc1924(ip2long('1080::8:800:200C:417A'))\n '4)+k&C#VzJ4br>0wv%Yp'\n >>> long2rfc1924(ip2long('::'))\n '00000000000000000000'\n >>> long2rfc1924(MAX_IP)\n '=r54lj&NUUO~Hi%c2ym0'\n\n\n :param l: Network byte order 128-bit integer.\n :type l: int\n :returns: RFC 1924 IPv6 address\n :raises: TypeError\n \"\"\"\n if MAX_IP < l or l < MIN_IP:\n raise TypeError(\n \"expected int between %d and %d inclusive\" % (MIN_IP, MAX_IP))\n o = []\n r = l\n while r > 85:\n o.append(_RFC1924_ALPHABET[r % 85])\n r = r // 85\n o.append(_RFC1924_ALPHABET[r])\n return ''.join(reversed(o)).zfill(20)\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from . import ipv4
__all__ = (
'cidr2block',
'ip2long',
'long2ip',
'long2rfc1924',
'rfc19242long',
'validate_cidr',
'validate_ip',
'DOCUMENTATION_NETWORK',
'IPV4_MAPPED',
'IPV6_TO_IPV4_NETWORK',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_GLOBAL',
'MULTICAST_LOCAL',
'MULTICAST_LOCAL_DHCP',
'MULTICAST_LOCAL_NODES',
'MULTICAST_LOCAL_ROUTERS',
'MULTICAST_LOOPBACK',
'MULTICAST_SITE',
'MULTICAST_SITE',
'MULTICAST_SITE_DHCP',
'PRIVATE_NETWORK',
'TEREDO_NETWORK',
'UNSPECIFIED_ADDRESS',
)
#: Regex for validating an IPv6 in hex notation
_HEX_RE = re.compile(r'^([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}$')
#: Regex for validating an IPv6 in dotted-quad notation
_DOTTED_QUAD_RE = re.compile(r'^([0-9a-f]{0,4}:){2,6}(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}$')
#: Mamimum IPv6 integer
MAX_IP = 0xffffffffffffffffffffffffffffffff
#: Minimum IPv6 integer
MIN_IP = 0x0
#: Absence of an address (only valid as source address)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
UNSPECIFIED_ADDRESS = "::/128"
#: Loopback addresses on the local host
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOOPBACK = "::1/128"
#: Common `localhost` address
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOCALHOST = LOOPBACK
#: IPv4 mapped to IPv6 (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
IPV4_MAPPED = "::ffff:0:0/96"
#: Documentation and example network
#: (`RFC 3849 <https://tools.ietf.org/html/rfc3849>`_)
DOCUMENTATION_NETWORK = "2001::db8::/32"
#: 6to4 Address block
#: (`RFC 3056 <https://tools.ietf.org/html/rfc3056>`_)
IPV6_TO_IPV4_NETWORK = "2002::/16"
#: Teredo addresses
#: (`RFC 4380 <https://tools.ietf.org/html/rfc4380>`_)
TEREDO_NETWORK = "2001::/32"
#: Private network
#: (`RFC 4193 <https://tools.ietf.org/html/rfc4193>`_)
PRIVATE_NETWORK = "fd00::/8"
#: Link-Local unicast networks (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LINK_LOCAL = "fe80::/10"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "ff00::/8"
#: Interface-Local multicast
MULTICAST_LOOPBACK = "ff01::/16"
#: Link-Local multicast
MULTICAST_LOCAL = "ff02::/16"
#: Site-Local multicast
MULTICAST_SITE = "ff05::/16"
#: Organization-Local multicast
MULTICAST_SITE = "ff08::/16"
#: Organization-Local multicast
MULTICAST_GLOBAL = "ff0e::/16"
#: All nodes on the local segment
MULTICAST_LOCAL_NODES = "ff02::1"
#: All routers on the local segment
MULTICAST_LOCAL_ROUTERS = "ff02::2"
#: All DHCP servers and relay agents on the local segment
MULTICAST_LOCAL_DHCP = "ff02::1:2"
#: All DHCP servers and relay agents on the local site
MULTICAST_SITE_DHCP = "ff05::1:3"
#: RFC 1924 alphabet
_RFC1924_ALPHABET = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'!', '#', '$', '%', '&', '(', ')', '*', '+', '-', ';', '<', '=',
'>', '?', '@', '^', '_', '`', '{', '|', '}', '~',
]
#: RFC 1924 reverse lookup
_RFC1924_REV = None
#: Regex for validating an IPv6 in hex notation
_RFC1924_RE = re.compile(r'^[0-9A-Za-z!#$%&()*+-;<=>?@^_`{|}~]{20}$')
def validate_ip(s):
"""Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
"""
if _HEX_RE.match(s):
return len(s.split('::')) <= 2
if _DOTTED_QUAD_RE.match(s):
halves = s.split('::')
if len(halves) > 2:
return False
hextets = s.split(':')
quads = hextets[-1].split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def ip2long(ip):
"""Convert a hexidecimal IPv6 address to a network byte order 128-bit
integer.
>>> ip2long('::') == 0
True
>>> ip2long('::1') == 1
True
>>> expect = 0x20010db885a3000000008a2e03707334
>>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect
True
>>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect
True
>>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect
True
>>> expect = 0x20010db8000000000001000000000001
>>> ip2long('2001:db8::1:0:0:1') == expect
True
>>> expect = 281473902969472
>>> ip2long('::ffff:192.0.2.128') == expect
True
>>> expect = 0xffffffffffffffffffffffffffffffff
>>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect
True
>>> ip2long('ff::ff::ff') == None
True
>>> expect = 21932261930451111902915077091070067066
>>> ip2long('1080:0:0:0:8:800:200C:417A') == expect
True
:param ip: Hexidecimal IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
if '.' in ip:
# convert IPv4 suffix to hex
chunks = ip.split(':')
v4_int = ipv4.ip2long(chunks.pop())
if v4_int is None:
return None
chunks.append('%x' % ((v4_int >> 16) & 0xffff))
chunks.append('%x' % (v4_int & 0xffff))
ip = ':'.join(chunks)
halves = ip.split('::')
hextets = halves[0].split(':')
if len(halves) == 2:
h2 = halves[1].split(':')
for z in range(8 - (len(hextets) + len(h2))):
hextets.append('0')
for h in h2:
hextets.append(h)
# end if
lngip = 0
for h in hextets:
if '' == h:
h = '0'
lngip = (lngip << 16) | int(h, 16)
return lngip
# end ip2long
# end long2ip
def long2rfc1924(l):
"""Convert a network byte order 128-bit integer to an rfc1924 IPv6
address.
>>> long2rfc1924(ip2long('1080::8:800:200C:417A'))
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2rfc1924(ip2long('::'))
'00000000000000000000'
>>> long2rfc1924(MAX_IP)
'=r54lj&NUUO~Hi%c2ym0'
:param l: Network byte order 128-bit integer.
:type l: int
:returns: RFC 1924 IPv6 address
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
o = []
r = l
while r > 85:
o.append(_RFC1924_ALPHABET[r % 85])
r = r // 85
o.append(_RFC1924_ALPHABET[r])
return ''.join(reversed(o)).zfill(20)
def rfc19242long(s):
"""Convert an RFC 1924 IPv6 address to a network byte order 128-bit
integer.
>>> expect = 0
>>> rfc19242long('00000000000000000000') == expect
True
>>> expect = 21932261930451111902915077091070067066
>>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect
True
>>> rfc19242long('pizza') == None
True
>>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None
True
>>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP
True
:param ip: RFC 1924 IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
global _RFC1924_REV
if not _RFC1924_RE.match(s):
return None
if _RFC1924_REV is None:
_RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)}
x = 0
for c in s:
x = x * 85 + _RFC1924_REV[c]
if x > MAX_IP:
return None
return x
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv6 address in hextet format followed by a forward slash (/) and a bit
mask length (0-128).
>>> validate_cidr('::/128')
True
>>> validate_cidr('::/0')
True
>>> validate_cidr('fc00::/7')
True
>>> validate_cidr('::ffff:0:0/96')
True
>>> validate_cidr('::')
False
>>> validate_cidr('::/129')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 128:
return False
else:
return False
return True
return False
# end validate_cidr
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('2001:db8::/48')
('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff')
>>> cidr2block('::/0')
('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
ip = ip2long(ip)
# keep left most prefix bits of ip
shift = 128 - prefix
block_start = ip >> shift << shift
# expand right most 128 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end cidr2block
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv6.py
|
long2rfc1924
|
python
|
def long2rfc1924(l):
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
o = []
r = l
while r > 85:
o.append(_RFC1924_ALPHABET[r % 85])
r = r // 85
o.append(_RFC1924_ALPHABET[r])
return ''.join(reversed(o)).zfill(20)
|
Convert a network byte order 128-bit integer to an rfc1924 IPv6
address.
>>> long2rfc1924(ip2long('1080::8:800:200C:417A'))
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2rfc1924(ip2long('::'))
'00000000000000000000'
>>> long2rfc1924(MAX_IP)
'=r54lj&NUUO~Hi%c2ym0'
:param l: Network byte order 128-bit integer.
:type l: int
:returns: RFC 1924 IPv6 address
:raises: TypeError
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L357-L384
| null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from . import ipv4
__all__ = (
'cidr2block',
'ip2long',
'long2ip',
'long2rfc1924',
'rfc19242long',
'validate_cidr',
'validate_ip',
'DOCUMENTATION_NETWORK',
'IPV4_MAPPED',
'IPV6_TO_IPV4_NETWORK',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_GLOBAL',
'MULTICAST_LOCAL',
'MULTICAST_LOCAL_DHCP',
'MULTICAST_LOCAL_NODES',
'MULTICAST_LOCAL_ROUTERS',
'MULTICAST_LOOPBACK',
'MULTICAST_SITE',
'MULTICAST_SITE',
'MULTICAST_SITE_DHCP',
'PRIVATE_NETWORK',
'TEREDO_NETWORK',
'UNSPECIFIED_ADDRESS',
)
#: Regex for validating an IPv6 in hex notation
_HEX_RE = re.compile(r'^([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}$')
#: Regex for validating an IPv6 in dotted-quad notation
_DOTTED_QUAD_RE = re.compile(r'^([0-9a-f]{0,4}:){2,6}(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}$')
#: Mamimum IPv6 integer
MAX_IP = 0xffffffffffffffffffffffffffffffff
#: Minimum IPv6 integer
MIN_IP = 0x0
#: Absence of an address (only valid as source address)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
UNSPECIFIED_ADDRESS = "::/128"
#: Loopback addresses on the local host
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOOPBACK = "::1/128"
#: Common `localhost` address
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOCALHOST = LOOPBACK
#: IPv4 mapped to IPv6 (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
IPV4_MAPPED = "::ffff:0:0/96"
#: Documentation and example network
#: (`RFC 3849 <https://tools.ietf.org/html/rfc3849>`_)
DOCUMENTATION_NETWORK = "2001::db8::/32"
#: 6to4 Address block
#: (`RFC 3056 <https://tools.ietf.org/html/rfc3056>`_)
IPV6_TO_IPV4_NETWORK = "2002::/16"
#: Teredo addresses
#: (`RFC 4380 <https://tools.ietf.org/html/rfc4380>`_)
TEREDO_NETWORK = "2001::/32"
#: Private network
#: (`RFC 4193 <https://tools.ietf.org/html/rfc4193>`_)
PRIVATE_NETWORK = "fd00::/8"
#: Link-Local unicast networks (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LINK_LOCAL = "fe80::/10"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "ff00::/8"
#: Interface-Local multicast
MULTICAST_LOOPBACK = "ff01::/16"
#: Link-Local multicast
MULTICAST_LOCAL = "ff02::/16"
#: Site-Local multicast
MULTICAST_SITE = "ff05::/16"
#: Organization-Local multicast
MULTICAST_SITE = "ff08::/16"
#: Organization-Local multicast
MULTICAST_GLOBAL = "ff0e::/16"
#: All nodes on the local segment
MULTICAST_LOCAL_NODES = "ff02::1"
#: All routers on the local segment
MULTICAST_LOCAL_ROUTERS = "ff02::2"
#: All DHCP servers and relay agents on the local segment
MULTICAST_LOCAL_DHCP = "ff02::1:2"
#: All DHCP servers and relay agents on the local site
MULTICAST_SITE_DHCP = "ff05::1:3"
#: RFC 1924 alphabet
_RFC1924_ALPHABET = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'!', '#', '$', '%', '&', '(', ')', '*', '+', '-', ';', '<', '=',
'>', '?', '@', '^', '_', '`', '{', '|', '}', '~',
]
#: RFC 1924 reverse lookup
_RFC1924_REV = None
#: Regex for validating an IPv6 in hex notation
_RFC1924_RE = re.compile(r'^[0-9A-Za-z!#$%&()*+-;<=>?@^_`{|}~]{20}$')
def validate_ip(s):
"""Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
"""
if _HEX_RE.match(s):
return len(s.split('::')) <= 2
if _DOTTED_QUAD_RE.match(s):
halves = s.split('::')
if len(halves) > 2:
return False
hextets = s.split(':')
quads = hextets[-1].split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def ip2long(ip):
"""Convert a hexidecimal IPv6 address to a network byte order 128-bit
integer.
>>> ip2long('::') == 0
True
>>> ip2long('::1') == 1
True
>>> expect = 0x20010db885a3000000008a2e03707334
>>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect
True
>>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect
True
>>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect
True
>>> expect = 0x20010db8000000000001000000000001
>>> ip2long('2001:db8::1:0:0:1') == expect
True
>>> expect = 281473902969472
>>> ip2long('::ffff:192.0.2.128') == expect
True
>>> expect = 0xffffffffffffffffffffffffffffffff
>>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect
True
>>> ip2long('ff::ff::ff') == None
True
>>> expect = 21932261930451111902915077091070067066
>>> ip2long('1080:0:0:0:8:800:200C:417A') == expect
True
:param ip: Hexidecimal IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
if '.' in ip:
# convert IPv4 suffix to hex
chunks = ip.split(':')
v4_int = ipv4.ip2long(chunks.pop())
if v4_int is None:
return None
chunks.append('%x' % ((v4_int >> 16) & 0xffff))
chunks.append('%x' % (v4_int & 0xffff))
ip = ':'.join(chunks)
halves = ip.split('::')
hextets = halves[0].split(':')
if len(halves) == 2:
h2 = halves[1].split(':')
for z in range(8 - (len(hextets) + len(h2))):
hextets.append('0')
for h in h2:
hextets.append(h)
# end if
lngip = 0
for h in hextets:
if '' == h:
h = '0'
lngip = (lngip << 16) | int(h, 16)
return lngip
# end ip2long
def long2ip(l, rfc1924=False):
"""Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
if rfc1924:
return long2rfc1924(l)
# format as one big hex value
hex_str = '%032x' % l
# split into double octet chunks without padding zeros
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
# find and remove left most longest run of zeros
dc_start, dc_len = (-1, 0)
run_start, run_len = (-1, 0)
for idx, hextet in enumerate(hextets):
if '0' == hextet:
run_len += 1
if -1 == run_start:
run_start = idx
if run_len > dc_len:
dc_len, dc_start = (run_len, run_start)
else:
run_len, run_start = (0, -1)
# end for
if dc_len > 1:
dc_end = dc_start + dc_len
if dc_end == len(hextets):
hextets += ['']
hextets[dc_start:dc_end] = ['']
if dc_start == 0:
hextets = [''] + hextets
# end if
return ':'.join(hextets)
# end long2ip
def rfc19242long(s):
"""Convert an RFC 1924 IPv6 address to a network byte order 128-bit
integer.
>>> expect = 0
>>> rfc19242long('00000000000000000000') == expect
True
>>> expect = 21932261930451111902915077091070067066
>>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect
True
>>> rfc19242long('pizza') == None
True
>>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None
True
>>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP
True
:param ip: RFC 1924 IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
global _RFC1924_REV
if not _RFC1924_RE.match(s):
return None
if _RFC1924_REV is None:
_RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)}
x = 0
for c in s:
x = x * 85 + _RFC1924_REV[c]
if x > MAX_IP:
return None
return x
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv6 address in hextet format followed by a forward slash (/) and a bit
mask length (0-128).
>>> validate_cidr('::/128')
True
>>> validate_cidr('::/0')
True
>>> validate_cidr('fc00::/7')
True
>>> validate_cidr('::ffff:0:0/96')
True
>>> validate_cidr('::')
False
>>> validate_cidr('::/129')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 128:
return False
else:
return False
return True
return False
# end validate_cidr
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('2001:db8::/48')
('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff')
>>> cidr2block('::/0')
('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
ip = ip2long(ip)
# keep left most prefix bits of ip
shift = 128 - prefix
block_start = ip >> shift << shift
# expand right most 128 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end cidr2block
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv6.py
|
rfc19242long
|
python
|
def rfc19242long(s):
global _RFC1924_REV
if not _RFC1924_RE.match(s):
return None
if _RFC1924_REV is None:
_RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)}
x = 0
for c in s:
x = x * 85 + _RFC1924_REV[c]
if x > MAX_IP:
return None
return x
|
Convert an RFC 1924 IPv6 address to a network byte order 128-bit
integer.
>>> expect = 0
>>> rfc19242long('00000000000000000000') == expect
True
>>> expect = 21932261930451111902915077091070067066
>>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect
True
>>> rfc19242long('pizza') == None
True
>>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None
True
>>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP
True
:param ip: RFC 1924 IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L387-L420
| null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from . import ipv4
__all__ = (
'cidr2block',
'ip2long',
'long2ip',
'long2rfc1924',
'rfc19242long',
'validate_cidr',
'validate_ip',
'DOCUMENTATION_NETWORK',
'IPV4_MAPPED',
'IPV6_TO_IPV4_NETWORK',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_GLOBAL',
'MULTICAST_LOCAL',
'MULTICAST_LOCAL_DHCP',
'MULTICAST_LOCAL_NODES',
'MULTICAST_LOCAL_ROUTERS',
'MULTICAST_LOOPBACK',
'MULTICAST_SITE',
'MULTICAST_SITE',
'MULTICAST_SITE_DHCP',
'PRIVATE_NETWORK',
'TEREDO_NETWORK',
'UNSPECIFIED_ADDRESS',
)
#: Regex for validating an IPv6 in hex notation
_HEX_RE = re.compile(r'^([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}$')
#: Regex for validating an IPv6 in dotted-quad notation
_DOTTED_QUAD_RE = re.compile(r'^([0-9a-f]{0,4}:){2,6}(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}$')
#: Mamimum IPv6 integer
MAX_IP = 0xffffffffffffffffffffffffffffffff
#: Minimum IPv6 integer
MIN_IP = 0x0
#: Absence of an address (only valid as source address)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
UNSPECIFIED_ADDRESS = "::/128"
#: Loopback addresses on the local host
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOOPBACK = "::1/128"
#: Common `localhost` address
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOCALHOST = LOOPBACK
#: IPv4 mapped to IPv6 (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
IPV4_MAPPED = "::ffff:0:0/96"
#: Documentation and example network
#: (`RFC 3849 <https://tools.ietf.org/html/rfc3849>`_)
DOCUMENTATION_NETWORK = "2001::db8::/32"
#: 6to4 Address block
#: (`RFC 3056 <https://tools.ietf.org/html/rfc3056>`_)
IPV6_TO_IPV4_NETWORK = "2002::/16"
#: Teredo addresses
#: (`RFC 4380 <https://tools.ietf.org/html/rfc4380>`_)
TEREDO_NETWORK = "2001::/32"
#: Private network
#: (`RFC 4193 <https://tools.ietf.org/html/rfc4193>`_)
PRIVATE_NETWORK = "fd00::/8"
#: Link-Local unicast networks (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LINK_LOCAL = "fe80::/10"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "ff00::/8"
#: Interface-Local multicast
MULTICAST_LOOPBACK = "ff01::/16"
#: Link-Local multicast
MULTICAST_LOCAL = "ff02::/16"
#: Site-Local multicast
MULTICAST_SITE = "ff05::/16"
#: Organization-Local multicast
MULTICAST_SITE = "ff08::/16"
#: Organization-Local multicast
MULTICAST_GLOBAL = "ff0e::/16"
#: All nodes on the local segment
MULTICAST_LOCAL_NODES = "ff02::1"
#: All routers on the local segment
MULTICAST_LOCAL_ROUTERS = "ff02::2"
#: All DHCP servers and relay agents on the local segment
MULTICAST_LOCAL_DHCP = "ff02::1:2"
#: All DHCP servers and relay agents on the local site
MULTICAST_SITE_DHCP = "ff05::1:3"
#: RFC 1924 alphabet
_RFC1924_ALPHABET = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'!', '#', '$', '%', '&', '(', ')', '*', '+', '-', ';', '<', '=',
'>', '?', '@', '^', '_', '`', '{', '|', '}', '~',
]
#: RFC 1924 reverse lookup
_RFC1924_REV = None
#: Regex for validating an IPv6 in hex notation
_RFC1924_RE = re.compile(r'^[0-9A-Za-z!#$%&()*+-;<=>?@^_`{|}~]{20}$')
def validate_ip(s):
"""Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
"""
if _HEX_RE.match(s):
return len(s.split('::')) <= 2
if _DOTTED_QUAD_RE.match(s):
halves = s.split('::')
if len(halves) > 2:
return False
hextets = s.split(':')
quads = hextets[-1].split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def ip2long(ip):
"""Convert a hexidecimal IPv6 address to a network byte order 128-bit
integer.
>>> ip2long('::') == 0
True
>>> ip2long('::1') == 1
True
>>> expect = 0x20010db885a3000000008a2e03707334
>>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect
True
>>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect
True
>>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect
True
>>> expect = 0x20010db8000000000001000000000001
>>> ip2long('2001:db8::1:0:0:1') == expect
True
>>> expect = 281473902969472
>>> ip2long('::ffff:192.0.2.128') == expect
True
>>> expect = 0xffffffffffffffffffffffffffffffff
>>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect
True
>>> ip2long('ff::ff::ff') == None
True
>>> expect = 21932261930451111902915077091070067066
>>> ip2long('1080:0:0:0:8:800:200C:417A') == expect
True
:param ip: Hexidecimal IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
if '.' in ip:
# convert IPv4 suffix to hex
chunks = ip.split(':')
v4_int = ipv4.ip2long(chunks.pop())
if v4_int is None:
return None
chunks.append('%x' % ((v4_int >> 16) & 0xffff))
chunks.append('%x' % (v4_int & 0xffff))
ip = ':'.join(chunks)
halves = ip.split('::')
hextets = halves[0].split(':')
if len(halves) == 2:
h2 = halves[1].split(':')
for z in range(8 - (len(hextets) + len(h2))):
hextets.append('0')
for h in h2:
hextets.append(h)
# end if
lngip = 0
for h in hextets:
if '' == h:
h = '0'
lngip = (lngip << 16) | int(h, 16)
return lngip
# end ip2long
def long2ip(l, rfc1924=False):
"""Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
if rfc1924:
return long2rfc1924(l)
# format as one big hex value
hex_str = '%032x' % l
# split into double octet chunks without padding zeros
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
# find and remove left most longest run of zeros
dc_start, dc_len = (-1, 0)
run_start, run_len = (-1, 0)
for idx, hextet in enumerate(hextets):
if '0' == hextet:
run_len += 1
if -1 == run_start:
run_start = idx
if run_len > dc_len:
dc_len, dc_start = (run_len, run_start)
else:
run_len, run_start = (0, -1)
# end for
if dc_len > 1:
dc_end = dc_start + dc_len
if dc_end == len(hextets):
hextets += ['']
hextets[dc_start:dc_end] = ['']
if dc_start == 0:
hextets = [''] + hextets
# end if
return ':'.join(hextets)
# end long2ip
def long2rfc1924(l):
"""Convert a network byte order 128-bit integer to an rfc1924 IPv6
address.
>>> long2rfc1924(ip2long('1080::8:800:200C:417A'))
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2rfc1924(ip2long('::'))
'00000000000000000000'
>>> long2rfc1924(MAX_IP)
'=r54lj&NUUO~Hi%c2ym0'
:param l: Network byte order 128-bit integer.
:type l: int
:returns: RFC 1924 IPv6 address
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
o = []
r = l
while r > 85:
o.append(_RFC1924_ALPHABET[r % 85])
r = r // 85
o.append(_RFC1924_ALPHABET[r])
return ''.join(reversed(o)).zfill(20)
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv6 address in hextet format followed by a forward slash (/) and a bit
mask length (0-128).
>>> validate_cidr('::/128')
True
>>> validate_cidr('::/0')
True
>>> validate_cidr('fc00::/7')
True
>>> validate_cidr('::ffff:0:0/96')
True
>>> validate_cidr('::')
False
>>> validate_cidr('::/129')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 128:
return False
else:
return False
return True
return False
# end validate_cidr
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('2001:db8::/48')
('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff')
>>> cidr2block('::/0')
('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
ip = ip2long(ip)
# keep left most prefix bits of ip
shift = 128 - prefix
block_start = ip >> shift << shift
# expand right most 128 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end cidr2block
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv6.py
|
validate_cidr
|
python
|
def validate_cidr(s):
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 128:
return False
else:
return False
return True
return False
|
Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv6 address in hextet format followed by a forward slash (/) and a bit
mask length (0-128).
>>> validate_cidr('::/128')
True
>>> validate_cidr('::/0')
True
>>> validate_cidr('fc00::/7')
True
>>> validate_cidr('::ffff:0:0/96')
True
>>> validate_cidr('::')
False
>>> validate_cidr('::/129')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L423-L462
|
[
"def validate_ip(s):\n \"\"\"Validate a hexidecimal IPv6 ip address.\n\n\n >>> validate_ip('::')\n True\n >>> validate_ip('::1')\n True\n >>> validate_ip('2001:db8:85a3::8a2e:370:7334')\n True\n >>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')\n True\n >>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')\n True\n >>> validate_ip('2001:db8::1:0:0:1')\n True\n >>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')\n True\n >>> validate_ip('::ffff:192.0.2.128')\n True\n >>> validate_ip('::ff::ff')\n False\n >>> validate_ip('::fffff')\n False\n >>> validate_ip('::ffff:192.0.2.300')\n False\n >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n TypeError: expected string or buffer\n >>> validate_ip('1080:0:0:0:8:800:200c:417a')\n True\n\n\n :param s: String to validate as a hexidecimal IPv6 ip address.\n :type s: str\n :returns: ``True`` if a valid hexidecimal IPv6 ip address,\n ``False`` otherwise.\n :raises: TypeError\n \"\"\"\n if _HEX_RE.match(s):\n return len(s.split('::')) <= 2\n if _DOTTED_QUAD_RE.match(s):\n halves = s.split('::')\n if len(halves) > 2:\n return False\n hextets = s.split(':')\n quads = hextets[-1].split('.')\n for q in quads:\n if int(q) > 255:\n return False\n return True\n return False\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from . import ipv4
__all__ = (
'cidr2block',
'ip2long',
'long2ip',
'long2rfc1924',
'rfc19242long',
'validate_cidr',
'validate_ip',
'DOCUMENTATION_NETWORK',
'IPV4_MAPPED',
'IPV6_TO_IPV4_NETWORK',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_GLOBAL',
'MULTICAST_LOCAL',
'MULTICAST_LOCAL_DHCP',
'MULTICAST_LOCAL_NODES',
'MULTICAST_LOCAL_ROUTERS',
'MULTICAST_LOOPBACK',
'MULTICAST_SITE',
'MULTICAST_SITE',
'MULTICAST_SITE_DHCP',
'PRIVATE_NETWORK',
'TEREDO_NETWORK',
'UNSPECIFIED_ADDRESS',
)
#: Regex for validating an IPv6 in hex notation
_HEX_RE = re.compile(r'^([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}$')
#: Regex for validating an IPv6 in dotted-quad notation
_DOTTED_QUAD_RE = re.compile(r'^([0-9a-f]{0,4}:){2,6}(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}$')
#: Mamimum IPv6 integer
MAX_IP = 0xffffffffffffffffffffffffffffffff
#: Minimum IPv6 integer
MIN_IP = 0x0
#: Absence of an address (only valid as source address)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
UNSPECIFIED_ADDRESS = "::/128"
#: Loopback addresses on the local host
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOOPBACK = "::1/128"
#: Common `localhost` address
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOCALHOST = LOOPBACK
#: IPv4 mapped to IPv6 (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
IPV4_MAPPED = "::ffff:0:0/96"
#: Documentation and example network
#: (`RFC 3849 <https://tools.ietf.org/html/rfc3849>`_)
DOCUMENTATION_NETWORK = "2001::db8::/32"
#: 6to4 Address block
#: (`RFC 3056 <https://tools.ietf.org/html/rfc3056>`_)
IPV6_TO_IPV4_NETWORK = "2002::/16"
#: Teredo addresses
#: (`RFC 4380 <https://tools.ietf.org/html/rfc4380>`_)
TEREDO_NETWORK = "2001::/32"
#: Private network
#: (`RFC 4193 <https://tools.ietf.org/html/rfc4193>`_)
PRIVATE_NETWORK = "fd00::/8"
#: Link-Local unicast networks (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LINK_LOCAL = "fe80::/10"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "ff00::/8"
#: Interface-Local multicast
MULTICAST_LOOPBACK = "ff01::/16"
#: Link-Local multicast
MULTICAST_LOCAL = "ff02::/16"
#: Site-Local multicast
MULTICAST_SITE = "ff05::/16"
#: Organization-Local multicast
MULTICAST_SITE = "ff08::/16"
#: Organization-Local multicast
MULTICAST_GLOBAL = "ff0e::/16"
#: All nodes on the local segment
MULTICAST_LOCAL_NODES = "ff02::1"
#: All routers on the local segment
MULTICAST_LOCAL_ROUTERS = "ff02::2"
#: All DHCP servers and relay agents on the local segment
MULTICAST_LOCAL_DHCP = "ff02::1:2"
#: All DHCP servers and relay agents on the local site
MULTICAST_SITE_DHCP = "ff05::1:3"
#: RFC 1924 alphabet
_RFC1924_ALPHABET = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'!', '#', '$', '%', '&', '(', ')', '*', '+', '-', ';', '<', '=',
'>', '?', '@', '^', '_', '`', '{', '|', '}', '~',
]
#: RFC 1924 reverse lookup
_RFC1924_REV = None
#: Regex for validating an IPv6 in hex notation
_RFC1924_RE = re.compile(r'^[0-9A-Za-z!#$%&()*+-;<=>?@^_`{|}~]{20}$')
def validate_ip(s):
"""Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
"""
if _HEX_RE.match(s):
return len(s.split('::')) <= 2
if _DOTTED_QUAD_RE.match(s):
halves = s.split('::')
if len(halves) > 2:
return False
hextets = s.split(':')
quads = hextets[-1].split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def ip2long(ip):
"""Convert a hexidecimal IPv6 address to a network byte order 128-bit
integer.
>>> ip2long('::') == 0
True
>>> ip2long('::1') == 1
True
>>> expect = 0x20010db885a3000000008a2e03707334
>>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect
True
>>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect
True
>>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect
True
>>> expect = 0x20010db8000000000001000000000001
>>> ip2long('2001:db8::1:0:0:1') == expect
True
>>> expect = 281473902969472
>>> ip2long('::ffff:192.0.2.128') == expect
True
>>> expect = 0xffffffffffffffffffffffffffffffff
>>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect
True
>>> ip2long('ff::ff::ff') == None
True
>>> expect = 21932261930451111902915077091070067066
>>> ip2long('1080:0:0:0:8:800:200C:417A') == expect
True
:param ip: Hexidecimal IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
if '.' in ip:
# convert IPv4 suffix to hex
chunks = ip.split(':')
v4_int = ipv4.ip2long(chunks.pop())
if v4_int is None:
return None
chunks.append('%x' % ((v4_int >> 16) & 0xffff))
chunks.append('%x' % (v4_int & 0xffff))
ip = ':'.join(chunks)
halves = ip.split('::')
hextets = halves[0].split(':')
if len(halves) == 2:
h2 = halves[1].split(':')
for z in range(8 - (len(hextets) + len(h2))):
hextets.append('0')
for h in h2:
hextets.append(h)
# end if
lngip = 0
for h in hextets:
if '' == h:
h = '0'
lngip = (lngip << 16) | int(h, 16)
return lngip
# end ip2long
def long2ip(l, rfc1924=False):
"""Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
if rfc1924:
return long2rfc1924(l)
# format as one big hex value
hex_str = '%032x' % l
# split into double octet chunks without padding zeros
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
# find and remove left most longest run of zeros
dc_start, dc_len = (-1, 0)
run_start, run_len = (-1, 0)
for idx, hextet in enumerate(hextets):
if '0' == hextet:
run_len += 1
if -1 == run_start:
run_start = idx
if run_len > dc_len:
dc_len, dc_start = (run_len, run_start)
else:
run_len, run_start = (0, -1)
# end for
if dc_len > 1:
dc_end = dc_start + dc_len
if dc_end == len(hextets):
hextets += ['']
hextets[dc_start:dc_end] = ['']
if dc_start == 0:
hextets = [''] + hextets
# end if
return ':'.join(hextets)
# end long2ip
def long2rfc1924(l):
"""Convert a network byte order 128-bit integer to an rfc1924 IPv6
address.
>>> long2rfc1924(ip2long('1080::8:800:200C:417A'))
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2rfc1924(ip2long('::'))
'00000000000000000000'
>>> long2rfc1924(MAX_IP)
'=r54lj&NUUO~Hi%c2ym0'
:param l: Network byte order 128-bit integer.
:type l: int
:returns: RFC 1924 IPv6 address
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
o = []
r = l
while r > 85:
o.append(_RFC1924_ALPHABET[r % 85])
r = r // 85
o.append(_RFC1924_ALPHABET[r])
return ''.join(reversed(o)).zfill(20)
def rfc19242long(s):
"""Convert an RFC 1924 IPv6 address to a network byte order 128-bit
integer.
>>> expect = 0
>>> rfc19242long('00000000000000000000') == expect
True
>>> expect = 21932261930451111902915077091070067066
>>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect
True
>>> rfc19242long('pizza') == None
True
>>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None
True
>>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP
True
:param ip: RFC 1924 IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
global _RFC1924_REV
if not _RFC1924_RE.match(s):
return None
if _RFC1924_REV is None:
_RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)}
x = 0
for c in s:
x = x * 85 + _RFC1924_REV[c]
if x > MAX_IP:
return None
return x
# end validate_cidr
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('2001:db8::/48')
('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff')
>>> cidr2block('::/0')
('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
ip = ip2long(ip)
# keep left most prefix bits of ip
shift = 128 - prefix
block_start = ip >> shift << shift
# expand right most 128 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end cidr2block
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv6.py
|
cidr2block
|
python
|
def cidr2block(cidr):
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
ip = ip2long(ip)
# keep left most prefix bits of ip
shift = 128 - prefix
block_start = ip >> shift << shift
# expand right most 128 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
|
Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('2001:db8::/48')
('2001:db8::', '2001:db8:0:ffff:ffff:ffff:ffff:ffff')
>>> cidr2block('::/0')
('::', 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv6.py#L466-L496
|
[
"def validate_cidr(s):\n \"\"\"Validate a CIDR notation ip address.\n\n The string is considered a valid CIDR address if it consists of a valid\n IPv6 address in hextet format followed by a forward slash (/) and a bit\n mask length (0-128).\n\n\n >>> validate_cidr('::/128')\n True\n >>> validate_cidr('::/0')\n True\n >>> validate_cidr('fc00::/7')\n True\n >>> validate_cidr('::ffff:0:0/96')\n True\n >>> validate_cidr('::')\n False\n >>> validate_cidr('::/129')\n False\n >>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n TypeError: expected string or buffer\n\n\n :param s: String to validate as a CIDR notation ip address.\n :type s: str\n :returns: ``True`` if a valid CIDR address, ``False`` otherwise.\n :raises: TypeError\n \"\"\"\n if _CIDR_RE.match(s):\n ip, mask = s.split('/')\n if validate_ip(ip):\n if int(mask) > 128:\n return False\n else:\n return False\n return True\n return False\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
from . import ipv4
__all__ = (
'cidr2block',
'ip2long',
'long2ip',
'long2rfc1924',
'rfc19242long',
'validate_cidr',
'validate_ip',
'DOCUMENTATION_NETWORK',
'IPV4_MAPPED',
'IPV6_TO_IPV4_NETWORK',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_GLOBAL',
'MULTICAST_LOCAL',
'MULTICAST_LOCAL_DHCP',
'MULTICAST_LOCAL_NODES',
'MULTICAST_LOCAL_ROUTERS',
'MULTICAST_LOOPBACK',
'MULTICAST_SITE',
'MULTICAST_SITE',
'MULTICAST_SITE_DHCP',
'PRIVATE_NETWORK',
'TEREDO_NETWORK',
'UNSPECIFIED_ADDRESS',
)
#: Regex for validating an IPv6 in hex notation
_HEX_RE = re.compile(r'^([0-9a-fA-F]{0,4}:){2,7}[0-9a-fA-F]{0,4}$')
#: Regex for validating an IPv6 in dotted-quad notation
_DOTTED_QUAD_RE = re.compile(r'^([0-9a-f]{0,4}:){2,6}(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}/\d{1,3}$')
#: Mamimum IPv6 integer
MAX_IP = 0xffffffffffffffffffffffffffffffff
#: Minimum IPv6 integer
MIN_IP = 0x0
#: Absence of an address (only valid as source address)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
UNSPECIFIED_ADDRESS = "::/128"
#: Loopback addresses on the local host
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOOPBACK = "::1/128"
#: Common `localhost` address
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LOCALHOST = LOOPBACK
#: IPv4 mapped to IPv6 (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
IPV4_MAPPED = "::ffff:0:0/96"
#: Documentation and example network
#: (`RFC 3849 <https://tools.ietf.org/html/rfc3849>`_)
DOCUMENTATION_NETWORK = "2001::db8::/32"
#: 6to4 Address block
#: (`RFC 3056 <https://tools.ietf.org/html/rfc3056>`_)
IPV6_TO_IPV4_NETWORK = "2002::/16"
#: Teredo addresses
#: (`RFC 4380 <https://tools.ietf.org/html/rfc4380>`_)
TEREDO_NETWORK = "2001::/32"
#: Private network
#: (`RFC 4193 <https://tools.ietf.org/html/rfc4193>`_)
PRIVATE_NETWORK = "fd00::/8"
#: Link-Local unicast networks (not globally routable)
#: (`RFC 4291 <https://tools.ietf.org/html/rfc4291>`_)
LINK_LOCAL = "fe80::/10"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "ff00::/8"
#: Interface-Local multicast
MULTICAST_LOOPBACK = "ff01::/16"
#: Link-Local multicast
MULTICAST_LOCAL = "ff02::/16"
#: Site-Local multicast
MULTICAST_SITE = "ff05::/16"
#: Organization-Local multicast
MULTICAST_SITE = "ff08::/16"
#: Organization-Local multicast
MULTICAST_GLOBAL = "ff0e::/16"
#: All nodes on the local segment
MULTICAST_LOCAL_NODES = "ff02::1"
#: All routers on the local segment
MULTICAST_LOCAL_ROUTERS = "ff02::2"
#: All DHCP servers and relay agents on the local segment
MULTICAST_LOCAL_DHCP = "ff02::1:2"
#: All DHCP servers and relay agents on the local site
MULTICAST_SITE_DHCP = "ff05::1:3"
#: RFC 1924 alphabet
_RFC1924_ALPHABET = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'!', '#', '$', '%', '&', '(', ')', '*', '+', '-', ';', '<', '=',
'>', '?', '@', '^', '_', '`', '{', '|', '}', '~',
]
#: RFC 1924 reverse lookup
_RFC1924_REV = None
#: Regex for validating an IPv6 in hex notation
_RFC1924_RE = re.compile(r'^[0-9A-Za-z!#$%&()*+-;<=>?@^_`{|}~]{20}$')
def validate_ip(s):
"""Validate a hexidecimal IPv6 ip address.
>>> validate_ip('::')
True
>>> validate_ip('::1')
True
>>> validate_ip('2001:db8:85a3::8a2e:370:7334')
True
>>> validate_ip('2001:db8:85a3:0:0:8a2e:370:7334')
True
>>> validate_ip('2001:0db8:85a3:0000:0000:8a2e:0370:7334')
True
>>> validate_ip('2001:db8::1:0:0:1')
True
>>> validate_ip('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
True
>>> validate_ip('::ffff:192.0.2.128')
True
>>> validate_ip('::ff::ff')
False
>>> validate_ip('::fffff')
False
>>> validate_ip('::ffff:192.0.2.300')
False
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
>>> validate_ip('1080:0:0:0:8:800:200c:417a')
True
:param s: String to validate as a hexidecimal IPv6 ip address.
:type s: str
:returns: ``True`` if a valid hexidecimal IPv6 ip address,
``False`` otherwise.
:raises: TypeError
"""
if _HEX_RE.match(s):
return len(s.split('::')) <= 2
if _DOTTED_QUAD_RE.match(s):
halves = s.split('::')
if len(halves) > 2:
return False
hextets = s.split(':')
quads = hextets[-1].split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def ip2long(ip):
"""Convert a hexidecimal IPv6 address to a network byte order 128-bit
integer.
>>> ip2long('::') == 0
True
>>> ip2long('::1') == 1
True
>>> expect = 0x20010db885a3000000008a2e03707334
>>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect
True
>>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect
True
>>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect
True
>>> expect = 0x20010db8000000000001000000000001
>>> ip2long('2001:db8::1:0:0:1') == expect
True
>>> expect = 281473902969472
>>> ip2long('::ffff:192.0.2.128') == expect
True
>>> expect = 0xffffffffffffffffffffffffffffffff
>>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect
True
>>> ip2long('ff::ff::ff') == None
True
>>> expect = 21932261930451111902915077091070067066
>>> ip2long('1080:0:0:0:8:800:200C:417A') == expect
True
:param ip: Hexidecimal IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
if '.' in ip:
# convert IPv4 suffix to hex
chunks = ip.split(':')
v4_int = ipv4.ip2long(chunks.pop())
if v4_int is None:
return None
chunks.append('%x' % ((v4_int >> 16) & 0xffff))
chunks.append('%x' % (v4_int & 0xffff))
ip = ':'.join(chunks)
halves = ip.split('::')
hextets = halves[0].split(':')
if len(halves) == 2:
h2 = halves[1].split(':')
for z in range(8 - (len(hextets) + len(h2))):
hextets.append('0')
for h in h2:
hextets.append(h)
# end if
lngip = 0
for h in hextets:
if '' == h:
h = '0'
lngip = (lngip << 16) | int(h, 16)
return lngip
# end ip2long
def long2ip(l, rfc1924=False):
"""Convert a network byte order 128-bit integer to a canonical IPv6
address.
>>> long2ip(2130706433)
'::7f00:1'
>>> long2ip(42540766411282592856904266426630537217)
'2001:db8::1:0:0:1'
>>> long2ip(MIN_IP)
'::'
>>> long2ip(MAX_IP)
'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and <really big int> inclusive
>>> long2ip(ip2long('1080::8:800:200C:417A'), rfc1924=True)
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2ip(ip2long('::'), rfc1924=True)
'00000000000000000000'
:param l: Network byte order 128-bit integer.
:type l: int
:param rfc1924: Encode in RFC 1924 notation (base 85)
:type rfc1924: bool
:returns: Canonical IPv6 address (eg. '::1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
if rfc1924:
return long2rfc1924(l)
# format as one big hex value
hex_str = '%032x' % l
# split into double octet chunks without padding zeros
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
# find and remove left most longest run of zeros
dc_start, dc_len = (-1, 0)
run_start, run_len = (-1, 0)
for idx, hextet in enumerate(hextets):
if '0' == hextet:
run_len += 1
if -1 == run_start:
run_start = idx
if run_len > dc_len:
dc_len, dc_start = (run_len, run_start)
else:
run_len, run_start = (0, -1)
# end for
if dc_len > 1:
dc_end = dc_start + dc_len
if dc_end == len(hextets):
hextets += ['']
hextets[dc_start:dc_end] = ['']
if dc_start == 0:
hextets = [''] + hextets
# end if
return ':'.join(hextets)
# end long2ip
def long2rfc1924(l):
"""Convert a network byte order 128-bit integer to an rfc1924 IPv6
address.
>>> long2rfc1924(ip2long('1080::8:800:200C:417A'))
'4)+k&C#VzJ4br>0wv%Yp'
>>> long2rfc1924(ip2long('::'))
'00000000000000000000'
>>> long2rfc1924(MAX_IP)
'=r54lj&NUUO~Hi%c2ym0'
:param l: Network byte order 128-bit integer.
:type l: int
:returns: RFC 1924 IPv6 address
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
o = []
r = l
while r > 85:
o.append(_RFC1924_ALPHABET[r % 85])
r = r // 85
o.append(_RFC1924_ALPHABET[r])
return ''.join(reversed(o)).zfill(20)
def rfc19242long(s):
"""Convert an RFC 1924 IPv6 address to a network byte order 128-bit
integer.
>>> expect = 0
>>> rfc19242long('00000000000000000000') == expect
True
>>> expect = 21932261930451111902915077091070067066
>>> rfc19242long('4)+k&C#VzJ4br>0wv%Yp') == expect
True
>>> rfc19242long('pizza') == None
True
>>> rfc19242long('~~~~~~~~~~~~~~~~~~~~') == None
True
>>> rfc19242long('=r54lj&NUUO~Hi%c2ym0') == MAX_IP
True
:param ip: RFC 1924 IPv6 address
:type ip: str
:returns: Network byte order 128-bit integer or ``None`` if ip is invalid.
"""
global _RFC1924_REV
if not _RFC1924_RE.match(s):
return None
if _RFC1924_REV is None:
_RFC1924_REV = {v: k for k, v in enumerate(_RFC1924_ALPHABET)}
x = 0
for c in s:
x = x * 85 + _RFC1924_REV[c]
if x > MAX_IP:
return None
return x
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv6 address in hextet format followed by a forward slash (/) and a bit
mask length (0-128).
>>> validate_cidr('::/128')
True
>>> validate_cidr('::/0')
True
>>> validate_cidr('fc00::/7')
True
>>> validate_cidr('::ffff:0:0/96')
True
>>> validate_cidr('::')
False
>>> validate_cidr('::/129')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 128:
return False
else:
return False
return True
return False
# end validate_cidr
# end cidr2block
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv4.py
|
validate_ip
|
python
|
def validate_ip(s):
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
|
Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv4.py#L190-L222
| null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
# sniff for python2.x / python3k compatibility "fixes'
try:
basestring = basestring
except NameError:
# 'basestring' is undefined, must be python3k
basestring = str
try:
bin = bin
except NameError:
# builtin bin function doesn't exist
def bin(x):
"""
From http://code.activestate.com/recipes/219300/#c7
"""
if x < 0:
return '-' + bin(-x)
out = []
if x == 0:
out.append('0')
while x > 0:
out.append('01'[x & 1])
x >>= 1
pass
try:
return '0b' + ''.join(reversed(out))
except NameError:
out.reverse()
return '0b' + ''.join(out)
# end bin
# end compatibility "fixes'
__all__ = (
'cidr2block',
'hex2ip',
'ip2hex',
'ip2long',
'ip2network',
'long2ip',
'netmask2prefix',
'subnet2block',
'validate_cidr',
'validate_ip',
'validate_netmask',
'validate_subnet',
'BENCHMARK_TESTS',
'BROADCAST',
'CURRENT_NETWORK',
'DUAL_STACK_LITE',
'IETF_PROTOCOL_RESERVED',
'IPV6_TO_IPV4_RELAY',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_INTERNETWORK',
'MULTICAST_LOCAL',
'PRIVATE_NETWORK_10',
'PRIVATE_NETWORK_172_16',
'PRIVATE_NETWORK_192_168',
'RESERVED',
'SHARED_ADDRESS_SPACE',
'TEST_NET_1',
'TEST_NET_2',
'TEST_NET_3',
)
#: Regex for validating an IPv4 address
_DOTTED_QUAD_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}/\d{1,2}$')
#: Mamimum IPv4 integer
MAX_IP = 0xffffffff
#: Minimum IPv4 integer
MIN_IP = 0x0
#: Broadcast messages to the current network (only valid as source address)
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
CURRENT_NETWORK = "0.0.0.0/8"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_10 = "10.0.0.0/8"
#: Carrier-grade NAT private network
#: (`RFC 6598 <https://tools.ietf.org/html/rfc6598>`_)
SHARED_ADDRESS_SPACE = "100.64.0.0/10"
#: Loopback addresses on the local host
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOOPBACK = "127.0.0.0/8"
#: Common `localhost` address
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOCALHOST = "127.0.0.1"
#: Autoconfiguration when no IP address available
#: (`RFC 3972 <https://tools.ietf.org/html/rfc3972>`_)
LINK_LOCAL = "169.254.0.0/16"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_172_16 = "172.16.0.0/12"
#: IETF protocol assignments reserved block
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
IETF_PROTOCOL_RESERVED = "192.0.0.0/24"
#: Dual-Stack Lite link address
#: (`RFC 6333 <https://tools.ietf.org/html/rfc6333>`_)
DUAL_STACK_LITE = "192.0.0.0/29"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_1 = "192.0.2.0/24"
#: 6to4 anycast relay
#: (`RFC 3068 <https://tools.ietf.org/html/rfc3068>`_)
IPV6_TO_IPV4_RELAY = "192.88.99.0/24"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_192_168 = "192.168.0.0/16"
#: Inter-network communications testing
#: (`RFC 2544 <https://tools.ietf.org/html/rfc2544>`_)
BENCHMARK_TESTS = "198.18.0.0/15"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_2 = "198.51.100.0/24"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_3 = "203.0.113.0/24"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "224.0.0.0/4"
#: Link local multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_LOCAL = "224.0.0.0/24"
#: Forwardable multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_INTERNETWORK = "224.0.1.0/24"
#: Former Class E address space. Reserved for future use
#: (`RFC 1700 <https://tools.ietf.org/html/rfc1700>`_)
RESERVED = "240.0.0.0/4"
#: Broadcast messages to the current network
#: (only valid as destination address)
#: (`RFC 919 <https://tools.ietf.org/html/rfc919>`_)
BROADCAST = "255.255.255.255"
# end validate_ip
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv4 address in dotted-quad format followed by a forward slash (/) and
a bit mask length (1-32).
>>> validate_cidr('127.0.0.1/32')
True
>>> validate_cidr('127.0/8')
True
>>> validate_cidr('127.0.0.256/32')
False
>>> validate_cidr('127.0.0.0')
False
>>> validate_cidr(LOOPBACK)
True
>>> validate_cidr('127.0.0.1/33')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 32:
return False
else:
return False
return True
return False
# end validate_cidr
def validate_netmask(s):
"""Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
"""
if validate_ip(s):
# Convert to binary string, strip '0b' prefix, 0 pad to 32 bits
mask = bin(ip2network(s))[2:].zfill(32)
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False
# end validate_netmask
def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
"""
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode")
# end validate_subnet
def ip2long(ip):
"""Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip
# end ip2long
def ip2network(ip):
"""Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
netw = 0
for i in range(4):
netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)
return netw
# end ip2network
def long2ip(l):
"""Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
# end long2ip
def ip2hex(addr):
"""Convert a dotted-quad ip address to a hex encoded number.
>>> ip2hex('0.0.0.1')
'00000001'
>>> ip2hex('127.0.0.1')
'7f000001'
>>> ip2hex('127.255.255.255')
'7fffffff'
>>> ip2hex('128.0.0.1')
'80000001'
>>> ip2hex('128.1')
'80000001'
>>> ip2hex('255.255.255.255')
'ffffffff'
:param addr: Dotted-quad ip address.
:type addr: str
:returns: Numeric ip address as a hex-encoded string or ``None`` if
invalid.
"""
netip = ip2long(addr)
if netip is None:
return None
return "%08x" % netip
# end ip2hex
def hex2ip(hex_str):
"""Convert a hex encoded integer to a dotted-quad ip address.
>>> hex2ip('00000001')
'0.0.0.1'
>>> hex2ip('7f000001')
'127.0.0.1'
>>> hex2ip('7fffffff')
'127.255.255.255'
>>> hex2ip('80000001')
'128.0.0.1'
>>> hex2ip('ffffffff')
'255.255.255.255'
:param hex_str: Numeric ip address as a hex-encoded string.
:type hex_str: str
:returns: Dotted-quad ip address or ``None`` if invalid.
"""
try:
netip = int(hex_str, 16)
except ValueError:
return None
return long2ip(netip)
# end hex2ip
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('127.0.0.1/32')
('127.0.0.1', '127.0.0.1')
>>> cidr2block('127/8')
('127.0.0.0', '127.255.255.255')
>>> cidr2block('127.0.1/16')
('127.0.0.0', '127.0.255.255')
>>> cidr2block('127.1/24')
('127.1.0.0', '127.1.0.255')
>>> cidr2block('127.0.0.3/29')
('127.0.0.0', '127.0.0.7')
>>> cidr2block('127/0')
('0.0.0.0', '255.255.255.255')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end cidr2block
def netmask2prefix(mask):
"""Convert a dotted-quad netmask into a CIDR prefix.
>>> netmask2prefix('255.0.0.0')
8
>>> netmask2prefix('255.128.0.0')
9
>>> netmask2prefix('255.255.255.254')
31
>>> netmask2prefix('255.255.255.255')
32
>>> netmask2prefix('0.0.0.0')
0
>>> netmask2prefix('127.0.0.1')
0
:param mask: Netmask in dotted-quad notation.
:type mask: str
:returns: CIDR prefix corresponding to netmask or `0` if invalid.
"""
if validate_netmask(mask):
return bin(ip2network(mask)).count('1')
return 0
# end netmask2prefix
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end subnet2block
def _block_from_ip_and_prefix(ip, prefix):
"""Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
"""
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end _block_from_ip_and_prefix
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv4.py
|
validate_netmask
|
python
|
def validate_netmask(s):
if validate_ip(s):
# Convert to binary string, strip '0b' prefix, 0 pad to 32 bits
mask = bin(ip2network(s))[2:].zfill(32)
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False
|
Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv4.py#L269-L309
|
[
"def ip2network(ip):\n \"\"\"Convert a dotted-quad ip to base network number.\n\n This differs from :func:`ip2long` in that partial addresses as treated as\n all network instead of network plus host (eg. '127.1' expands to\n '127.1.0.0')\n\n :param ip: dotted-quad ip address (eg. ‘127.0.0.1’).\n :type ip: str\n :returns: Network byte order 32-bit integer or `None` if ip is invalid.\n \"\"\"\n if not validate_ip(ip):\n return None\n quads = ip.split('.')\n netw = 0\n for i in range(4):\n netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)\n return netw\n",
"def validate_ip(s):\n \"\"\"Validate a dotted-quad ip address.\n\n The string is considered a valid dotted-quad address if it consists of\n one to four octets (0-255) seperated by periods (.).\n\n\n >>> validate_ip('127.0.0.1')\n True\n >>> validate_ip('127.0')\n True\n >>> validate_ip('127.0.0.256')\n False\n >>> validate_ip(LOCALHOST)\n True\n >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n TypeError: expected string or buffer\n\n\n :param s: String to validate as a dotted-quad ip address.\n :type s: str\n :returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.\n :raises: TypeError\n \"\"\"\n if _DOTTED_QUAD_RE.match(s):\n quads = s.split('.')\n for q in quads:\n if int(q) > 255:\n return False\n return True\n return False\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
# sniff for python2.x / python3k compatibility "fixes'
try:
basestring = basestring
except NameError:
# 'basestring' is undefined, must be python3k
basestring = str
try:
bin = bin
except NameError:
# builtin bin function doesn't exist
def bin(x):
"""
From http://code.activestate.com/recipes/219300/#c7
"""
if x < 0:
return '-' + bin(-x)
out = []
if x == 0:
out.append('0')
while x > 0:
out.append('01'[x & 1])
x >>= 1
pass
try:
return '0b' + ''.join(reversed(out))
except NameError:
out.reverse()
return '0b' + ''.join(out)
# end bin
# end compatibility "fixes'
__all__ = (
'cidr2block',
'hex2ip',
'ip2hex',
'ip2long',
'ip2network',
'long2ip',
'netmask2prefix',
'subnet2block',
'validate_cidr',
'validate_ip',
'validate_netmask',
'validate_subnet',
'BENCHMARK_TESTS',
'BROADCAST',
'CURRENT_NETWORK',
'DUAL_STACK_LITE',
'IETF_PROTOCOL_RESERVED',
'IPV6_TO_IPV4_RELAY',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_INTERNETWORK',
'MULTICAST_LOCAL',
'PRIVATE_NETWORK_10',
'PRIVATE_NETWORK_172_16',
'PRIVATE_NETWORK_192_168',
'RESERVED',
'SHARED_ADDRESS_SPACE',
'TEST_NET_1',
'TEST_NET_2',
'TEST_NET_3',
)
#: Regex for validating an IPv4 address
_DOTTED_QUAD_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}/\d{1,2}$')
#: Mamimum IPv4 integer
MAX_IP = 0xffffffff
#: Minimum IPv4 integer
MIN_IP = 0x0
#: Broadcast messages to the current network (only valid as source address)
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
CURRENT_NETWORK = "0.0.0.0/8"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_10 = "10.0.0.0/8"
#: Carrier-grade NAT private network
#: (`RFC 6598 <https://tools.ietf.org/html/rfc6598>`_)
SHARED_ADDRESS_SPACE = "100.64.0.0/10"
#: Loopback addresses on the local host
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOOPBACK = "127.0.0.0/8"
#: Common `localhost` address
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOCALHOST = "127.0.0.1"
#: Autoconfiguration when no IP address available
#: (`RFC 3972 <https://tools.ietf.org/html/rfc3972>`_)
LINK_LOCAL = "169.254.0.0/16"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_172_16 = "172.16.0.0/12"
#: IETF protocol assignments reserved block
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
IETF_PROTOCOL_RESERVED = "192.0.0.0/24"
#: Dual-Stack Lite link address
#: (`RFC 6333 <https://tools.ietf.org/html/rfc6333>`_)
DUAL_STACK_LITE = "192.0.0.0/29"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_1 = "192.0.2.0/24"
#: 6to4 anycast relay
#: (`RFC 3068 <https://tools.ietf.org/html/rfc3068>`_)
IPV6_TO_IPV4_RELAY = "192.88.99.0/24"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_192_168 = "192.168.0.0/16"
#: Inter-network communications testing
#: (`RFC 2544 <https://tools.ietf.org/html/rfc2544>`_)
BENCHMARK_TESTS = "198.18.0.0/15"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_2 = "198.51.100.0/24"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_3 = "203.0.113.0/24"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "224.0.0.0/4"
#: Link local multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_LOCAL = "224.0.0.0/24"
#: Forwardable multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_INTERNETWORK = "224.0.1.0/24"
#: Former Class E address space. Reserved for future use
#: (`RFC 1700 <https://tools.ietf.org/html/rfc1700>`_)
RESERVED = "240.0.0.0/4"
#: Broadcast messages to the current network
#: (only valid as destination address)
#: (`RFC 919 <https://tools.ietf.org/html/rfc919>`_)
BROADCAST = "255.255.255.255"
def validate_ip(s):
"""Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
"""
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv4 address in dotted-quad format followed by a forward slash (/) and
a bit mask length (1-32).
>>> validate_cidr('127.0.0.1/32')
True
>>> validate_cidr('127.0/8')
True
>>> validate_cidr('127.0.0.256/32')
False
>>> validate_cidr('127.0.0.0')
False
>>> validate_cidr(LOOPBACK)
True
>>> validate_cidr('127.0.0.1/33')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 32:
return False
else:
return False
return True
return False
# end validate_cidr
# end validate_netmask
def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
"""
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode")
# end validate_subnet
def ip2long(ip):
"""Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip
# end ip2long
def ip2network(ip):
"""Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
netw = 0
for i in range(4):
netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)
return netw
# end ip2network
def long2ip(l):
"""Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
# end long2ip
def ip2hex(addr):
"""Convert a dotted-quad ip address to a hex encoded number.
>>> ip2hex('0.0.0.1')
'00000001'
>>> ip2hex('127.0.0.1')
'7f000001'
>>> ip2hex('127.255.255.255')
'7fffffff'
>>> ip2hex('128.0.0.1')
'80000001'
>>> ip2hex('128.1')
'80000001'
>>> ip2hex('255.255.255.255')
'ffffffff'
:param addr: Dotted-quad ip address.
:type addr: str
:returns: Numeric ip address as a hex-encoded string or ``None`` if
invalid.
"""
netip = ip2long(addr)
if netip is None:
return None
return "%08x" % netip
# end ip2hex
def hex2ip(hex_str):
"""Convert a hex encoded integer to a dotted-quad ip address.
>>> hex2ip('00000001')
'0.0.0.1'
>>> hex2ip('7f000001')
'127.0.0.1'
>>> hex2ip('7fffffff')
'127.255.255.255'
>>> hex2ip('80000001')
'128.0.0.1'
>>> hex2ip('ffffffff')
'255.255.255.255'
:param hex_str: Numeric ip address as a hex-encoded string.
:type hex_str: str
:returns: Dotted-quad ip address or ``None`` if invalid.
"""
try:
netip = int(hex_str, 16)
except ValueError:
return None
return long2ip(netip)
# end hex2ip
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('127.0.0.1/32')
('127.0.0.1', '127.0.0.1')
>>> cidr2block('127/8')
('127.0.0.0', '127.255.255.255')
>>> cidr2block('127.0.1/16')
('127.0.0.0', '127.0.255.255')
>>> cidr2block('127.1/24')
('127.1.0.0', '127.1.0.255')
>>> cidr2block('127.0.0.3/29')
('127.0.0.0', '127.0.0.7')
>>> cidr2block('127/0')
('0.0.0.0', '255.255.255.255')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end cidr2block
def netmask2prefix(mask):
"""Convert a dotted-quad netmask into a CIDR prefix.
>>> netmask2prefix('255.0.0.0')
8
>>> netmask2prefix('255.128.0.0')
9
>>> netmask2prefix('255.255.255.254')
31
>>> netmask2prefix('255.255.255.255')
32
>>> netmask2prefix('0.0.0.0')
0
>>> netmask2prefix('127.0.0.1')
0
:param mask: Netmask in dotted-quad notation.
:type mask: str
:returns: CIDR prefix corresponding to netmask or `0` if invalid.
"""
if validate_netmask(mask):
return bin(ip2network(mask)).count('1')
return 0
# end netmask2prefix
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end subnet2block
def _block_from_ip_and_prefix(ip, prefix):
"""Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
"""
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end _block_from_ip_and_prefix
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv4.py
|
validate_subnet
|
python
|
def validate_subnet(s):
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode")
|
Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv4.py#L313-L352
|
[
"def validate_ip(s):\n \"\"\"Validate a dotted-quad ip address.\n\n The string is considered a valid dotted-quad address if it consists of\n one to four octets (0-255) seperated by periods (.).\n\n\n >>> validate_ip('127.0.0.1')\n True\n >>> validate_ip('127.0')\n True\n >>> validate_ip('127.0.0.256')\n False\n >>> validate_ip(LOCALHOST)\n True\n >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n TypeError: expected string or buffer\n\n\n :param s: String to validate as a dotted-quad ip address.\n :type s: str\n :returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.\n :raises: TypeError\n \"\"\"\n if _DOTTED_QUAD_RE.match(s):\n quads = s.split('.')\n for q in quads:\n if int(q) > 255:\n return False\n return True\n return False\n",
"def validate_netmask(s):\n \"\"\"Validate that a dotted-quad ip address is a valid netmask.\n\n\n >>> validate_netmask('0.0.0.0')\n True\n >>> validate_netmask('128.0.0.0')\n True\n >>> validate_netmask('255.0.0.0')\n True\n >>> validate_netmask('255.255.255.255')\n True\n >>> validate_netmask(BROADCAST)\n True\n >>> validate_netmask('128.0.0.1')\n False\n >>> validate_netmask('1.255.255.0')\n False\n >>> validate_netmask('0.255.255.0')\n False\n\n\n :param s: String to validate as a dotted-quad notation netmask.\n :type s: str\n :returns: ``True`` if a valid netmask, ``False`` otherwise.\n :raises: TypeError\n \"\"\"\n if validate_ip(s):\n # Convert to binary string, strip '0b' prefix, 0 pad to 32 bits\n mask = bin(ip2network(s))[2:].zfill(32)\n # all left most bits must be 1, all right most must be 0\n seen0 = False\n for c in mask:\n if '1' == c:\n if seen0:\n return False\n else:\n seen0 = True\n return True\n else:\n return False\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
# sniff for python2.x / python3k compatibility "fixes'
try:
basestring = basestring
except NameError:
# 'basestring' is undefined, must be python3k
basestring = str
try:
bin = bin
except NameError:
# builtin bin function doesn't exist
def bin(x):
"""
From http://code.activestate.com/recipes/219300/#c7
"""
if x < 0:
return '-' + bin(-x)
out = []
if x == 0:
out.append('0')
while x > 0:
out.append('01'[x & 1])
x >>= 1
pass
try:
return '0b' + ''.join(reversed(out))
except NameError:
out.reverse()
return '0b' + ''.join(out)
# end bin
# end compatibility "fixes'
__all__ = (
'cidr2block',
'hex2ip',
'ip2hex',
'ip2long',
'ip2network',
'long2ip',
'netmask2prefix',
'subnet2block',
'validate_cidr',
'validate_ip',
'validate_netmask',
'validate_subnet',
'BENCHMARK_TESTS',
'BROADCAST',
'CURRENT_NETWORK',
'DUAL_STACK_LITE',
'IETF_PROTOCOL_RESERVED',
'IPV6_TO_IPV4_RELAY',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_INTERNETWORK',
'MULTICAST_LOCAL',
'PRIVATE_NETWORK_10',
'PRIVATE_NETWORK_172_16',
'PRIVATE_NETWORK_192_168',
'RESERVED',
'SHARED_ADDRESS_SPACE',
'TEST_NET_1',
'TEST_NET_2',
'TEST_NET_3',
)
#: Regex for validating an IPv4 address
_DOTTED_QUAD_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}/\d{1,2}$')
#: Mamimum IPv4 integer
MAX_IP = 0xffffffff
#: Minimum IPv4 integer
MIN_IP = 0x0
#: Broadcast messages to the current network (only valid as source address)
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
CURRENT_NETWORK = "0.0.0.0/8"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_10 = "10.0.0.0/8"
#: Carrier-grade NAT private network
#: (`RFC 6598 <https://tools.ietf.org/html/rfc6598>`_)
SHARED_ADDRESS_SPACE = "100.64.0.0/10"
#: Loopback addresses on the local host
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOOPBACK = "127.0.0.0/8"
#: Common `localhost` address
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOCALHOST = "127.0.0.1"
#: Autoconfiguration when no IP address available
#: (`RFC 3972 <https://tools.ietf.org/html/rfc3972>`_)
LINK_LOCAL = "169.254.0.0/16"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_172_16 = "172.16.0.0/12"
#: IETF protocol assignments reserved block
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
IETF_PROTOCOL_RESERVED = "192.0.0.0/24"
#: Dual-Stack Lite link address
#: (`RFC 6333 <https://tools.ietf.org/html/rfc6333>`_)
DUAL_STACK_LITE = "192.0.0.0/29"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_1 = "192.0.2.0/24"
#: 6to4 anycast relay
#: (`RFC 3068 <https://tools.ietf.org/html/rfc3068>`_)
IPV6_TO_IPV4_RELAY = "192.88.99.0/24"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_192_168 = "192.168.0.0/16"
#: Inter-network communications testing
#: (`RFC 2544 <https://tools.ietf.org/html/rfc2544>`_)
BENCHMARK_TESTS = "198.18.0.0/15"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_2 = "198.51.100.0/24"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_3 = "203.0.113.0/24"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "224.0.0.0/4"
#: Link local multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_LOCAL = "224.0.0.0/24"
#: Forwardable multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_INTERNETWORK = "224.0.1.0/24"
#: Former Class E address space. Reserved for future use
#: (`RFC 1700 <https://tools.ietf.org/html/rfc1700>`_)
RESERVED = "240.0.0.0/4"
#: Broadcast messages to the current network
#: (only valid as destination address)
#: (`RFC 919 <https://tools.ietf.org/html/rfc919>`_)
BROADCAST = "255.255.255.255"
def validate_ip(s):
"""Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
"""
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv4 address in dotted-quad format followed by a forward slash (/) and
a bit mask length (1-32).
>>> validate_cidr('127.0.0.1/32')
True
>>> validate_cidr('127.0/8')
True
>>> validate_cidr('127.0.0.256/32')
False
>>> validate_cidr('127.0.0.0')
False
>>> validate_cidr(LOOPBACK)
True
>>> validate_cidr('127.0.0.1/33')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 32:
return False
else:
return False
return True
return False
# end validate_cidr
def validate_netmask(s):
"""Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
"""
if validate_ip(s):
# Convert to binary string, strip '0b' prefix, 0 pad to 32 bits
mask = bin(ip2network(s))[2:].zfill(32)
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False
# end validate_netmask
# end validate_subnet
def ip2long(ip):
"""Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip
# end ip2long
def ip2network(ip):
"""Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
netw = 0
for i in range(4):
netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)
return netw
# end ip2network
def long2ip(l):
"""Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
# end long2ip
def ip2hex(addr):
"""Convert a dotted-quad ip address to a hex encoded number.
>>> ip2hex('0.0.0.1')
'00000001'
>>> ip2hex('127.0.0.1')
'7f000001'
>>> ip2hex('127.255.255.255')
'7fffffff'
>>> ip2hex('128.0.0.1')
'80000001'
>>> ip2hex('128.1')
'80000001'
>>> ip2hex('255.255.255.255')
'ffffffff'
:param addr: Dotted-quad ip address.
:type addr: str
:returns: Numeric ip address as a hex-encoded string or ``None`` if
invalid.
"""
netip = ip2long(addr)
if netip is None:
return None
return "%08x" % netip
# end ip2hex
def hex2ip(hex_str):
"""Convert a hex encoded integer to a dotted-quad ip address.
>>> hex2ip('00000001')
'0.0.0.1'
>>> hex2ip('7f000001')
'127.0.0.1'
>>> hex2ip('7fffffff')
'127.255.255.255'
>>> hex2ip('80000001')
'128.0.0.1'
>>> hex2ip('ffffffff')
'255.255.255.255'
:param hex_str: Numeric ip address as a hex-encoded string.
:type hex_str: str
:returns: Dotted-quad ip address or ``None`` if invalid.
"""
try:
netip = int(hex_str, 16)
except ValueError:
return None
return long2ip(netip)
# end hex2ip
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('127.0.0.1/32')
('127.0.0.1', '127.0.0.1')
>>> cidr2block('127/8')
('127.0.0.0', '127.255.255.255')
>>> cidr2block('127.0.1/16')
('127.0.0.0', '127.0.255.255')
>>> cidr2block('127.1/24')
('127.1.0.0', '127.1.0.255')
>>> cidr2block('127.0.0.3/29')
('127.0.0.0', '127.0.0.7')
>>> cidr2block('127/0')
('0.0.0.0', '255.255.255.255')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end cidr2block
def netmask2prefix(mask):
"""Convert a dotted-quad netmask into a CIDR prefix.
>>> netmask2prefix('255.0.0.0')
8
>>> netmask2prefix('255.128.0.0')
9
>>> netmask2prefix('255.255.255.254')
31
>>> netmask2prefix('255.255.255.255')
32
>>> netmask2prefix('0.0.0.0')
0
>>> netmask2prefix('127.0.0.1')
0
:param mask: Netmask in dotted-quad notation.
:type mask: str
:returns: CIDR prefix corresponding to netmask or `0` if invalid.
"""
if validate_netmask(mask):
return bin(ip2network(mask)).count('1')
return 0
# end netmask2prefix
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end subnet2block
def _block_from_ip_and_prefix(ip, prefix):
"""Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
"""
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end _block_from_ip_and_prefix
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv4.py
|
ip2long
|
python
|
def ip2long(ip):
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip
|
Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv4.py#L356-L389
|
[
"def validate_ip(s):\n \"\"\"Validate a dotted-quad ip address.\n\n The string is considered a valid dotted-quad address if it consists of\n one to four octets (0-255) seperated by periods (.).\n\n\n >>> validate_ip('127.0.0.1')\n True\n >>> validate_ip('127.0')\n True\n >>> validate_ip('127.0.0.256')\n False\n >>> validate_ip(LOCALHOST)\n True\n >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n TypeError: expected string or buffer\n\n\n :param s: String to validate as a dotted-quad ip address.\n :type s: str\n :returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.\n :raises: TypeError\n \"\"\"\n if _DOTTED_QUAD_RE.match(s):\n quads = s.split('.')\n for q in quads:\n if int(q) > 255:\n return False\n return True\n return False\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
# sniff for python2.x / python3k compatibility "fixes'
try:
basestring = basestring
except NameError:
# 'basestring' is undefined, must be python3k
basestring = str
try:
bin = bin
except NameError:
# builtin bin function doesn't exist
def bin(x):
"""
From http://code.activestate.com/recipes/219300/#c7
"""
if x < 0:
return '-' + bin(-x)
out = []
if x == 0:
out.append('0')
while x > 0:
out.append('01'[x & 1])
x >>= 1
pass
try:
return '0b' + ''.join(reversed(out))
except NameError:
out.reverse()
return '0b' + ''.join(out)
# end bin
# end compatibility "fixes'
__all__ = (
'cidr2block',
'hex2ip',
'ip2hex',
'ip2long',
'ip2network',
'long2ip',
'netmask2prefix',
'subnet2block',
'validate_cidr',
'validate_ip',
'validate_netmask',
'validate_subnet',
'BENCHMARK_TESTS',
'BROADCAST',
'CURRENT_NETWORK',
'DUAL_STACK_LITE',
'IETF_PROTOCOL_RESERVED',
'IPV6_TO_IPV4_RELAY',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_INTERNETWORK',
'MULTICAST_LOCAL',
'PRIVATE_NETWORK_10',
'PRIVATE_NETWORK_172_16',
'PRIVATE_NETWORK_192_168',
'RESERVED',
'SHARED_ADDRESS_SPACE',
'TEST_NET_1',
'TEST_NET_2',
'TEST_NET_3',
)
#: Regex for validating an IPv4 address
_DOTTED_QUAD_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}/\d{1,2}$')
#: Mamimum IPv4 integer
MAX_IP = 0xffffffff
#: Minimum IPv4 integer
MIN_IP = 0x0
#: Broadcast messages to the current network (only valid as source address)
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
CURRENT_NETWORK = "0.0.0.0/8"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_10 = "10.0.0.0/8"
#: Carrier-grade NAT private network
#: (`RFC 6598 <https://tools.ietf.org/html/rfc6598>`_)
SHARED_ADDRESS_SPACE = "100.64.0.0/10"
#: Loopback addresses on the local host
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOOPBACK = "127.0.0.0/8"
#: Common `localhost` address
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOCALHOST = "127.0.0.1"
#: Autoconfiguration when no IP address available
#: (`RFC 3972 <https://tools.ietf.org/html/rfc3972>`_)
LINK_LOCAL = "169.254.0.0/16"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_172_16 = "172.16.0.0/12"
#: IETF protocol assignments reserved block
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
IETF_PROTOCOL_RESERVED = "192.0.0.0/24"
#: Dual-Stack Lite link address
#: (`RFC 6333 <https://tools.ietf.org/html/rfc6333>`_)
DUAL_STACK_LITE = "192.0.0.0/29"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_1 = "192.0.2.0/24"
#: 6to4 anycast relay
#: (`RFC 3068 <https://tools.ietf.org/html/rfc3068>`_)
IPV6_TO_IPV4_RELAY = "192.88.99.0/24"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_192_168 = "192.168.0.0/16"
#: Inter-network communications testing
#: (`RFC 2544 <https://tools.ietf.org/html/rfc2544>`_)
BENCHMARK_TESTS = "198.18.0.0/15"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_2 = "198.51.100.0/24"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_3 = "203.0.113.0/24"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "224.0.0.0/4"
#: Link local multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_LOCAL = "224.0.0.0/24"
#: Forwardable multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_INTERNETWORK = "224.0.1.0/24"
#: Former Class E address space. Reserved for future use
#: (`RFC 1700 <https://tools.ietf.org/html/rfc1700>`_)
RESERVED = "240.0.0.0/4"
#: Broadcast messages to the current network
#: (only valid as destination address)
#: (`RFC 919 <https://tools.ietf.org/html/rfc919>`_)
BROADCAST = "255.255.255.255"
def validate_ip(s):
"""Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
"""
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv4 address in dotted-quad format followed by a forward slash (/) and
a bit mask length (1-32).
>>> validate_cidr('127.0.0.1/32')
True
>>> validate_cidr('127.0/8')
True
>>> validate_cidr('127.0.0.256/32')
False
>>> validate_cidr('127.0.0.0')
False
>>> validate_cidr(LOOPBACK)
True
>>> validate_cidr('127.0.0.1/33')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 32:
return False
else:
return False
return True
return False
# end validate_cidr
def validate_netmask(s):
"""Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
"""
if validate_ip(s):
# Convert to binary string, strip '0b' prefix, 0 pad to 32 bits
mask = bin(ip2network(s))[2:].zfill(32)
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False
# end validate_netmask
def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
"""
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode")
# end validate_subnet
# end ip2long
def ip2network(ip):
"""Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
netw = 0
for i in range(4):
netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)
return netw
# end ip2network
def long2ip(l):
"""Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
# end long2ip
def ip2hex(addr):
"""Convert a dotted-quad ip address to a hex encoded number.
>>> ip2hex('0.0.0.1')
'00000001'
>>> ip2hex('127.0.0.1')
'7f000001'
>>> ip2hex('127.255.255.255')
'7fffffff'
>>> ip2hex('128.0.0.1')
'80000001'
>>> ip2hex('128.1')
'80000001'
>>> ip2hex('255.255.255.255')
'ffffffff'
:param addr: Dotted-quad ip address.
:type addr: str
:returns: Numeric ip address as a hex-encoded string or ``None`` if
invalid.
"""
netip = ip2long(addr)
if netip is None:
return None
return "%08x" % netip
# end ip2hex
def hex2ip(hex_str):
"""Convert a hex encoded integer to a dotted-quad ip address.
>>> hex2ip('00000001')
'0.0.0.1'
>>> hex2ip('7f000001')
'127.0.0.1'
>>> hex2ip('7fffffff')
'127.255.255.255'
>>> hex2ip('80000001')
'128.0.0.1'
>>> hex2ip('ffffffff')
'255.255.255.255'
:param hex_str: Numeric ip address as a hex-encoded string.
:type hex_str: str
:returns: Dotted-quad ip address or ``None`` if invalid.
"""
try:
netip = int(hex_str, 16)
except ValueError:
return None
return long2ip(netip)
# end hex2ip
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('127.0.0.1/32')
('127.0.0.1', '127.0.0.1')
>>> cidr2block('127/8')
('127.0.0.0', '127.255.255.255')
>>> cidr2block('127.0.1/16')
('127.0.0.0', '127.0.255.255')
>>> cidr2block('127.1/24')
('127.1.0.0', '127.1.0.255')
>>> cidr2block('127.0.0.3/29')
('127.0.0.0', '127.0.0.7')
>>> cidr2block('127/0')
('0.0.0.0', '255.255.255.255')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end cidr2block
def netmask2prefix(mask):
"""Convert a dotted-quad netmask into a CIDR prefix.
>>> netmask2prefix('255.0.0.0')
8
>>> netmask2prefix('255.128.0.0')
9
>>> netmask2prefix('255.255.255.254')
31
>>> netmask2prefix('255.255.255.255')
32
>>> netmask2prefix('0.0.0.0')
0
>>> netmask2prefix('127.0.0.1')
0
:param mask: Netmask in dotted-quad notation.
:type mask: str
:returns: CIDR prefix corresponding to netmask or `0` if invalid.
"""
if validate_netmask(mask):
return bin(ip2network(mask)).count('1')
return 0
# end netmask2prefix
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end subnet2block
def _block_from_ip_and_prefix(ip, prefix):
"""Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
"""
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end _block_from_ip_and_prefix
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv4.py
|
ip2network
|
python
|
def ip2network(ip):
if not validate_ip(ip):
return None
quads = ip.split('.')
netw = 0
for i in range(4):
netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)
return netw
|
Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv4.py#L393-L410
|
[
"def validate_ip(s):\n \"\"\"Validate a dotted-quad ip address.\n\n The string is considered a valid dotted-quad address if it consists of\n one to four octets (0-255) seperated by periods (.).\n\n\n >>> validate_ip('127.0.0.1')\n True\n >>> validate_ip('127.0')\n True\n >>> validate_ip('127.0.0.256')\n False\n >>> validate_ip(LOCALHOST)\n True\n >>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n TypeError: expected string or buffer\n\n\n :param s: String to validate as a dotted-quad ip address.\n :type s: str\n :returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.\n :raises: TypeError\n \"\"\"\n if _DOTTED_QUAD_RE.match(s):\n quads = s.split('.')\n for q in quads:\n if int(q) > 255:\n return False\n return True\n return False\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
# sniff for python2.x / python3k compatibility "fixes'
try:
basestring = basestring
except NameError:
# 'basestring' is undefined, must be python3k
basestring = str
try:
bin = bin
except NameError:
# builtin bin function doesn't exist
def bin(x):
"""
From http://code.activestate.com/recipes/219300/#c7
"""
if x < 0:
return '-' + bin(-x)
out = []
if x == 0:
out.append('0')
while x > 0:
out.append('01'[x & 1])
x >>= 1
pass
try:
return '0b' + ''.join(reversed(out))
except NameError:
out.reverse()
return '0b' + ''.join(out)
# end bin
# end compatibility "fixes'
__all__ = (
'cidr2block',
'hex2ip',
'ip2hex',
'ip2long',
'ip2network',
'long2ip',
'netmask2prefix',
'subnet2block',
'validate_cidr',
'validate_ip',
'validate_netmask',
'validate_subnet',
'BENCHMARK_TESTS',
'BROADCAST',
'CURRENT_NETWORK',
'DUAL_STACK_LITE',
'IETF_PROTOCOL_RESERVED',
'IPV6_TO_IPV4_RELAY',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_INTERNETWORK',
'MULTICAST_LOCAL',
'PRIVATE_NETWORK_10',
'PRIVATE_NETWORK_172_16',
'PRIVATE_NETWORK_192_168',
'RESERVED',
'SHARED_ADDRESS_SPACE',
'TEST_NET_1',
'TEST_NET_2',
'TEST_NET_3',
)
#: Regex for validating an IPv4 address
_DOTTED_QUAD_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}/\d{1,2}$')
#: Mamimum IPv4 integer
MAX_IP = 0xffffffff
#: Minimum IPv4 integer
MIN_IP = 0x0
#: Broadcast messages to the current network (only valid as source address)
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
CURRENT_NETWORK = "0.0.0.0/8"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_10 = "10.0.0.0/8"
#: Carrier-grade NAT private network
#: (`RFC 6598 <https://tools.ietf.org/html/rfc6598>`_)
SHARED_ADDRESS_SPACE = "100.64.0.0/10"
#: Loopback addresses on the local host
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOOPBACK = "127.0.0.0/8"
#: Common `localhost` address
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOCALHOST = "127.0.0.1"
#: Autoconfiguration when no IP address available
#: (`RFC 3972 <https://tools.ietf.org/html/rfc3972>`_)
LINK_LOCAL = "169.254.0.0/16"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_172_16 = "172.16.0.0/12"
#: IETF protocol assignments reserved block
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
IETF_PROTOCOL_RESERVED = "192.0.0.0/24"
#: Dual-Stack Lite link address
#: (`RFC 6333 <https://tools.ietf.org/html/rfc6333>`_)
DUAL_STACK_LITE = "192.0.0.0/29"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_1 = "192.0.2.0/24"
#: 6to4 anycast relay
#: (`RFC 3068 <https://tools.ietf.org/html/rfc3068>`_)
IPV6_TO_IPV4_RELAY = "192.88.99.0/24"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_192_168 = "192.168.0.0/16"
#: Inter-network communications testing
#: (`RFC 2544 <https://tools.ietf.org/html/rfc2544>`_)
BENCHMARK_TESTS = "198.18.0.0/15"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_2 = "198.51.100.0/24"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_3 = "203.0.113.0/24"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "224.0.0.0/4"
#: Link local multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_LOCAL = "224.0.0.0/24"
#: Forwardable multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_INTERNETWORK = "224.0.1.0/24"
#: Former Class E address space. Reserved for future use
#: (`RFC 1700 <https://tools.ietf.org/html/rfc1700>`_)
RESERVED = "240.0.0.0/4"
#: Broadcast messages to the current network
#: (only valid as destination address)
#: (`RFC 919 <https://tools.ietf.org/html/rfc919>`_)
BROADCAST = "255.255.255.255"
def validate_ip(s):
"""Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
"""
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv4 address in dotted-quad format followed by a forward slash (/) and
a bit mask length (1-32).
>>> validate_cidr('127.0.0.1/32')
True
>>> validate_cidr('127.0/8')
True
>>> validate_cidr('127.0.0.256/32')
False
>>> validate_cidr('127.0.0.0')
False
>>> validate_cidr(LOOPBACK)
True
>>> validate_cidr('127.0.0.1/33')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 32:
return False
else:
return False
return True
return False
# end validate_cidr
def validate_netmask(s):
"""Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
"""
if validate_ip(s):
# Convert to binary string, strip '0b' prefix, 0 pad to 32 bits
mask = bin(ip2network(s))[2:].zfill(32)
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False
# end validate_netmask
def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
"""
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode")
# end validate_subnet
def ip2long(ip):
"""Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip
# end ip2long
# end ip2network
def long2ip(l):
"""Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
# end long2ip
def ip2hex(addr):
"""Convert a dotted-quad ip address to a hex encoded number.
>>> ip2hex('0.0.0.1')
'00000001'
>>> ip2hex('127.0.0.1')
'7f000001'
>>> ip2hex('127.255.255.255')
'7fffffff'
>>> ip2hex('128.0.0.1')
'80000001'
>>> ip2hex('128.1')
'80000001'
>>> ip2hex('255.255.255.255')
'ffffffff'
:param addr: Dotted-quad ip address.
:type addr: str
:returns: Numeric ip address as a hex-encoded string or ``None`` if
invalid.
"""
netip = ip2long(addr)
if netip is None:
return None
return "%08x" % netip
# end ip2hex
def hex2ip(hex_str):
"""Convert a hex encoded integer to a dotted-quad ip address.
>>> hex2ip('00000001')
'0.0.0.1'
>>> hex2ip('7f000001')
'127.0.0.1'
>>> hex2ip('7fffffff')
'127.255.255.255'
>>> hex2ip('80000001')
'128.0.0.1'
>>> hex2ip('ffffffff')
'255.255.255.255'
:param hex_str: Numeric ip address as a hex-encoded string.
:type hex_str: str
:returns: Dotted-quad ip address or ``None`` if invalid.
"""
try:
netip = int(hex_str, 16)
except ValueError:
return None
return long2ip(netip)
# end hex2ip
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('127.0.0.1/32')
('127.0.0.1', '127.0.0.1')
>>> cidr2block('127/8')
('127.0.0.0', '127.255.255.255')
>>> cidr2block('127.0.1/16')
('127.0.0.0', '127.0.255.255')
>>> cidr2block('127.1/24')
('127.1.0.0', '127.1.0.255')
>>> cidr2block('127.0.0.3/29')
('127.0.0.0', '127.0.0.7')
>>> cidr2block('127/0')
('0.0.0.0', '255.255.255.255')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end cidr2block
def netmask2prefix(mask):
"""Convert a dotted-quad netmask into a CIDR prefix.
>>> netmask2prefix('255.0.0.0')
8
>>> netmask2prefix('255.128.0.0')
9
>>> netmask2prefix('255.255.255.254')
31
>>> netmask2prefix('255.255.255.255')
32
>>> netmask2prefix('0.0.0.0')
0
>>> netmask2prefix('127.0.0.1')
0
:param mask: Netmask in dotted-quad notation.
:type mask: str
:returns: CIDR prefix corresponding to netmask or `0` if invalid.
"""
if validate_netmask(mask):
return bin(ip2network(mask)).count('1')
return 0
# end netmask2prefix
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end subnet2block
def _block_from_ip_and_prefix(ip, prefix):
"""Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
"""
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end _block_from_ip_and_prefix
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv4.py
|
long2ip
|
python
|
def long2ip(l):
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
|
Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv4.py#L414-L452
| null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
# sniff for python2.x / python3k compatibility "fixes'
try:
basestring = basestring
except NameError:
# 'basestring' is undefined, must be python3k
basestring = str
try:
bin = bin
except NameError:
# builtin bin function doesn't exist
def bin(x):
"""
From http://code.activestate.com/recipes/219300/#c7
"""
if x < 0:
return '-' + bin(-x)
out = []
if x == 0:
out.append('0')
while x > 0:
out.append('01'[x & 1])
x >>= 1
pass
try:
return '0b' + ''.join(reversed(out))
except NameError:
out.reverse()
return '0b' + ''.join(out)
# end bin
# end compatibility "fixes'
__all__ = (
'cidr2block',
'hex2ip',
'ip2hex',
'ip2long',
'ip2network',
'long2ip',
'netmask2prefix',
'subnet2block',
'validate_cidr',
'validate_ip',
'validate_netmask',
'validate_subnet',
'BENCHMARK_TESTS',
'BROADCAST',
'CURRENT_NETWORK',
'DUAL_STACK_LITE',
'IETF_PROTOCOL_RESERVED',
'IPV6_TO_IPV4_RELAY',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_INTERNETWORK',
'MULTICAST_LOCAL',
'PRIVATE_NETWORK_10',
'PRIVATE_NETWORK_172_16',
'PRIVATE_NETWORK_192_168',
'RESERVED',
'SHARED_ADDRESS_SPACE',
'TEST_NET_1',
'TEST_NET_2',
'TEST_NET_3',
)
#: Regex for validating an IPv4 address
_DOTTED_QUAD_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}/\d{1,2}$')
#: Mamimum IPv4 integer
MAX_IP = 0xffffffff
#: Minimum IPv4 integer
MIN_IP = 0x0
#: Broadcast messages to the current network (only valid as source address)
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
CURRENT_NETWORK = "0.0.0.0/8"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_10 = "10.0.0.0/8"
#: Carrier-grade NAT private network
#: (`RFC 6598 <https://tools.ietf.org/html/rfc6598>`_)
SHARED_ADDRESS_SPACE = "100.64.0.0/10"
#: Loopback addresses on the local host
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOOPBACK = "127.0.0.0/8"
#: Common `localhost` address
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOCALHOST = "127.0.0.1"
#: Autoconfiguration when no IP address available
#: (`RFC 3972 <https://tools.ietf.org/html/rfc3972>`_)
LINK_LOCAL = "169.254.0.0/16"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_172_16 = "172.16.0.0/12"
#: IETF protocol assignments reserved block
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
IETF_PROTOCOL_RESERVED = "192.0.0.0/24"
#: Dual-Stack Lite link address
#: (`RFC 6333 <https://tools.ietf.org/html/rfc6333>`_)
DUAL_STACK_LITE = "192.0.0.0/29"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_1 = "192.0.2.0/24"
#: 6to4 anycast relay
#: (`RFC 3068 <https://tools.ietf.org/html/rfc3068>`_)
IPV6_TO_IPV4_RELAY = "192.88.99.0/24"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_192_168 = "192.168.0.0/16"
#: Inter-network communications testing
#: (`RFC 2544 <https://tools.ietf.org/html/rfc2544>`_)
BENCHMARK_TESTS = "198.18.0.0/15"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_2 = "198.51.100.0/24"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_3 = "203.0.113.0/24"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "224.0.0.0/4"
#: Link local multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_LOCAL = "224.0.0.0/24"
#: Forwardable multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_INTERNETWORK = "224.0.1.0/24"
#: Former Class E address space. Reserved for future use
#: (`RFC 1700 <https://tools.ietf.org/html/rfc1700>`_)
RESERVED = "240.0.0.0/4"
#: Broadcast messages to the current network
#: (only valid as destination address)
#: (`RFC 919 <https://tools.ietf.org/html/rfc919>`_)
BROADCAST = "255.255.255.255"
def validate_ip(s):
"""Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
"""
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv4 address in dotted-quad format followed by a forward slash (/) and
a bit mask length (1-32).
>>> validate_cidr('127.0.0.1/32')
True
>>> validate_cidr('127.0/8')
True
>>> validate_cidr('127.0.0.256/32')
False
>>> validate_cidr('127.0.0.0')
False
>>> validate_cidr(LOOPBACK)
True
>>> validate_cidr('127.0.0.1/33')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 32:
return False
else:
return False
return True
return False
# end validate_cidr
def validate_netmask(s):
"""Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
"""
if validate_ip(s):
# Convert to binary string, strip '0b' prefix, 0 pad to 32 bits
mask = bin(ip2network(s))[2:].zfill(32)
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False
# end validate_netmask
def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
"""
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode")
# end validate_subnet
def ip2long(ip):
"""Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip
# end ip2long
def ip2network(ip):
"""Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
netw = 0
for i in range(4):
netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)
return netw
# end ip2network
# end long2ip
def ip2hex(addr):
"""Convert a dotted-quad ip address to a hex encoded number.
>>> ip2hex('0.0.0.1')
'00000001'
>>> ip2hex('127.0.0.1')
'7f000001'
>>> ip2hex('127.255.255.255')
'7fffffff'
>>> ip2hex('128.0.0.1')
'80000001'
>>> ip2hex('128.1')
'80000001'
>>> ip2hex('255.255.255.255')
'ffffffff'
:param addr: Dotted-quad ip address.
:type addr: str
:returns: Numeric ip address as a hex-encoded string or ``None`` if
invalid.
"""
netip = ip2long(addr)
if netip is None:
return None
return "%08x" % netip
# end ip2hex
def hex2ip(hex_str):
"""Convert a hex encoded integer to a dotted-quad ip address.
>>> hex2ip('00000001')
'0.0.0.1'
>>> hex2ip('7f000001')
'127.0.0.1'
>>> hex2ip('7fffffff')
'127.255.255.255'
>>> hex2ip('80000001')
'128.0.0.1'
>>> hex2ip('ffffffff')
'255.255.255.255'
:param hex_str: Numeric ip address as a hex-encoded string.
:type hex_str: str
:returns: Dotted-quad ip address or ``None`` if invalid.
"""
try:
netip = int(hex_str, 16)
except ValueError:
return None
return long2ip(netip)
# end hex2ip
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('127.0.0.1/32')
('127.0.0.1', '127.0.0.1')
>>> cidr2block('127/8')
('127.0.0.0', '127.255.255.255')
>>> cidr2block('127.0.1/16')
('127.0.0.0', '127.0.255.255')
>>> cidr2block('127.1/24')
('127.1.0.0', '127.1.0.255')
>>> cidr2block('127.0.0.3/29')
('127.0.0.0', '127.0.0.7')
>>> cidr2block('127/0')
('0.0.0.0', '255.255.255.255')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end cidr2block
def netmask2prefix(mask):
"""Convert a dotted-quad netmask into a CIDR prefix.
>>> netmask2prefix('255.0.0.0')
8
>>> netmask2prefix('255.128.0.0')
9
>>> netmask2prefix('255.255.255.254')
31
>>> netmask2prefix('255.255.255.255')
32
>>> netmask2prefix('0.0.0.0')
0
>>> netmask2prefix('127.0.0.1')
0
:param mask: Netmask in dotted-quad notation.
:type mask: str
:returns: CIDR prefix corresponding to netmask or `0` if invalid.
"""
if validate_netmask(mask):
return bin(ip2network(mask)).count('1')
return 0
# end netmask2prefix
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end subnet2block
def _block_from_ip_and_prefix(ip, prefix):
"""Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
"""
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end _block_from_ip_and_prefix
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv4.py
|
cidr2block
|
python
|
def cidr2block(cidr):
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
|
Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('127.0.0.1/32')
('127.0.0.1', '127.0.0.1')
>>> cidr2block('127/8')
('127.0.0.0', '127.255.255.255')
>>> cidr2block('127.0.1/16')
('127.0.0.0', '127.0.255.255')
>>> cidr2block('127.1/24')
('127.1.0.0', '127.1.0.255')
>>> cidr2block('127.0.0.3/29')
('127.0.0.0', '127.0.0.7')
>>> cidr2block('127/0')
('0.0.0.0', '255.255.255.255')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv4.py#L514-L547
|
[
"def validate_cidr(s):\n \"\"\"Validate a CIDR notation ip address.\n\n The string is considered a valid CIDR address if it consists of a valid\n IPv4 address in dotted-quad format followed by a forward slash (/) and\n a bit mask length (1-32).\n\n\n >>> validate_cidr('127.0.0.1/32')\n True\n >>> validate_cidr('127.0/8')\n True\n >>> validate_cidr('127.0.0.256/32')\n False\n >>> validate_cidr('127.0.0.0')\n False\n >>> validate_cidr(LOOPBACK)\n True\n >>> validate_cidr('127.0.0.1/33')\n False\n >>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n TypeError: expected string or buffer\n\n\n :param s: String to validate as a CIDR notation ip address.\n :type s: str\n :returns: ``True`` if a valid CIDR address, ``False`` otherwise.\n :raises: TypeError\n \"\"\"\n if _CIDR_RE.match(s):\n ip, mask = s.split('/')\n if validate_ip(ip):\n if int(mask) > 32:\n return False\n else:\n return False\n return True\n return False\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
# sniff for python2.x / python3k compatibility "fixes'
try:
basestring = basestring
except NameError:
# 'basestring' is undefined, must be python3k
basestring = str
try:
bin = bin
except NameError:
# builtin bin function doesn't exist
def bin(x):
"""
From http://code.activestate.com/recipes/219300/#c7
"""
if x < 0:
return '-' + bin(-x)
out = []
if x == 0:
out.append('0')
while x > 0:
out.append('01'[x & 1])
x >>= 1
pass
try:
return '0b' + ''.join(reversed(out))
except NameError:
out.reverse()
return '0b' + ''.join(out)
# end bin
# end compatibility "fixes'
__all__ = (
'cidr2block',
'hex2ip',
'ip2hex',
'ip2long',
'ip2network',
'long2ip',
'netmask2prefix',
'subnet2block',
'validate_cidr',
'validate_ip',
'validate_netmask',
'validate_subnet',
'BENCHMARK_TESTS',
'BROADCAST',
'CURRENT_NETWORK',
'DUAL_STACK_LITE',
'IETF_PROTOCOL_RESERVED',
'IPV6_TO_IPV4_RELAY',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_INTERNETWORK',
'MULTICAST_LOCAL',
'PRIVATE_NETWORK_10',
'PRIVATE_NETWORK_172_16',
'PRIVATE_NETWORK_192_168',
'RESERVED',
'SHARED_ADDRESS_SPACE',
'TEST_NET_1',
'TEST_NET_2',
'TEST_NET_3',
)
#: Regex for validating an IPv4 address
_DOTTED_QUAD_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}/\d{1,2}$')
#: Mamimum IPv4 integer
MAX_IP = 0xffffffff
#: Minimum IPv4 integer
MIN_IP = 0x0
#: Broadcast messages to the current network (only valid as source address)
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
CURRENT_NETWORK = "0.0.0.0/8"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_10 = "10.0.0.0/8"
#: Carrier-grade NAT private network
#: (`RFC 6598 <https://tools.ietf.org/html/rfc6598>`_)
SHARED_ADDRESS_SPACE = "100.64.0.0/10"
#: Loopback addresses on the local host
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOOPBACK = "127.0.0.0/8"
#: Common `localhost` address
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOCALHOST = "127.0.0.1"
#: Autoconfiguration when no IP address available
#: (`RFC 3972 <https://tools.ietf.org/html/rfc3972>`_)
LINK_LOCAL = "169.254.0.0/16"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_172_16 = "172.16.0.0/12"
#: IETF protocol assignments reserved block
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
IETF_PROTOCOL_RESERVED = "192.0.0.0/24"
#: Dual-Stack Lite link address
#: (`RFC 6333 <https://tools.ietf.org/html/rfc6333>`_)
DUAL_STACK_LITE = "192.0.0.0/29"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_1 = "192.0.2.0/24"
#: 6to4 anycast relay
#: (`RFC 3068 <https://tools.ietf.org/html/rfc3068>`_)
IPV6_TO_IPV4_RELAY = "192.88.99.0/24"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_192_168 = "192.168.0.0/16"
#: Inter-network communications testing
#: (`RFC 2544 <https://tools.ietf.org/html/rfc2544>`_)
BENCHMARK_TESTS = "198.18.0.0/15"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_2 = "198.51.100.0/24"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_3 = "203.0.113.0/24"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "224.0.0.0/4"
#: Link local multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_LOCAL = "224.0.0.0/24"
#: Forwardable multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_INTERNETWORK = "224.0.1.0/24"
#: Former Class E address space. Reserved for future use
#: (`RFC 1700 <https://tools.ietf.org/html/rfc1700>`_)
RESERVED = "240.0.0.0/4"
#: Broadcast messages to the current network
#: (only valid as destination address)
#: (`RFC 919 <https://tools.ietf.org/html/rfc919>`_)
BROADCAST = "255.255.255.255"
def validate_ip(s):
"""Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
"""
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv4 address in dotted-quad format followed by a forward slash (/) and
a bit mask length (1-32).
>>> validate_cidr('127.0.0.1/32')
True
>>> validate_cidr('127.0/8')
True
>>> validate_cidr('127.0.0.256/32')
False
>>> validate_cidr('127.0.0.0')
False
>>> validate_cidr(LOOPBACK)
True
>>> validate_cidr('127.0.0.1/33')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 32:
return False
else:
return False
return True
return False
# end validate_cidr
def validate_netmask(s):
"""Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
"""
if validate_ip(s):
# Convert to binary string, strip '0b' prefix, 0 pad to 32 bits
mask = bin(ip2network(s))[2:].zfill(32)
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False
# end validate_netmask
def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
"""
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode")
# end validate_subnet
def ip2long(ip):
"""Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip
# end ip2long
def ip2network(ip):
"""Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
netw = 0
for i in range(4):
netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)
return netw
# end ip2network
def long2ip(l):
"""Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
# end long2ip
def ip2hex(addr):
"""Convert a dotted-quad ip address to a hex encoded number.
>>> ip2hex('0.0.0.1')
'00000001'
>>> ip2hex('127.0.0.1')
'7f000001'
>>> ip2hex('127.255.255.255')
'7fffffff'
>>> ip2hex('128.0.0.1')
'80000001'
>>> ip2hex('128.1')
'80000001'
>>> ip2hex('255.255.255.255')
'ffffffff'
:param addr: Dotted-quad ip address.
:type addr: str
:returns: Numeric ip address as a hex-encoded string or ``None`` if
invalid.
"""
netip = ip2long(addr)
if netip is None:
return None
return "%08x" % netip
# end ip2hex
def hex2ip(hex_str):
"""Convert a hex encoded integer to a dotted-quad ip address.
>>> hex2ip('00000001')
'0.0.0.1'
>>> hex2ip('7f000001')
'127.0.0.1'
>>> hex2ip('7fffffff')
'127.255.255.255'
>>> hex2ip('80000001')
'128.0.0.1'
>>> hex2ip('ffffffff')
'255.255.255.255'
:param hex_str: Numeric ip address as a hex-encoded string.
:type hex_str: str
:returns: Dotted-quad ip address or ``None`` if invalid.
"""
try:
netip = int(hex_str, 16)
except ValueError:
return None
return long2ip(netip)
# end hex2ip
# end cidr2block
def netmask2prefix(mask):
"""Convert a dotted-quad netmask into a CIDR prefix.
>>> netmask2prefix('255.0.0.0')
8
>>> netmask2prefix('255.128.0.0')
9
>>> netmask2prefix('255.255.255.254')
31
>>> netmask2prefix('255.255.255.255')
32
>>> netmask2prefix('0.0.0.0')
0
>>> netmask2prefix('127.0.0.1')
0
:param mask: Netmask in dotted-quad notation.
:type mask: str
:returns: CIDR prefix corresponding to netmask or `0` if invalid.
"""
if validate_netmask(mask):
return bin(ip2network(mask)).count('1')
return 0
# end netmask2prefix
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end subnet2block
def _block_from_ip_and_prefix(ip, prefix):
"""Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
"""
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end _block_from_ip_and_prefix
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv4.py
|
subnet2block
|
python
|
def subnet2block(subnet):
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
|
Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv4.py#L579-L613
|
[
"def validate_subnet(s):\n \"\"\"Validate a dotted-quad ip address including a netmask.\n\n The string is considered a valid dotted-quad address with netmask if it\n consists of one to four octets (0-255) seperated by periods (.) followed\n by a forward slash (/) and a subnet bitmask which is expressed in\n dotted-quad format.\n\n\n >>> validate_subnet('127.0.0.1/255.255.255.255')\n True\n >>> validate_subnet('127.0/255.0.0.0')\n True\n >>> validate_subnet('127.0/255')\n True\n >>> validate_subnet('127.0.0.256/255.255.255.255')\n False\n >>> validate_subnet('127.0.0.1/255.255.255.256')\n False\n >>> validate_subnet('127.0.0.0')\n False\n >>> validate_subnet(None)\n Traceback (most recent call last):\n ...\n TypeError: expected string or unicode\n\n\n :param s: String to validate as a dotted-quad ip address with netmask.\n :type s: str\n :returns: ``True`` if a valid dotted-quad ip address with netmask,\n ``False`` otherwise.\n :raises: TypeError\n \"\"\"\n if isinstance(s, basestring):\n if '/' in s:\n start, mask = s.split('/', 2)\n return validate_ip(start) and validate_netmask(mask)\n else:\n return False\n raise TypeError(\"expected string or unicode\")\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
# sniff for python2.x / python3k compatibility "fixes'
try:
basestring = basestring
except NameError:
# 'basestring' is undefined, must be python3k
basestring = str
try:
bin = bin
except NameError:
# builtin bin function doesn't exist
def bin(x):
"""
From http://code.activestate.com/recipes/219300/#c7
"""
if x < 0:
return '-' + bin(-x)
out = []
if x == 0:
out.append('0')
while x > 0:
out.append('01'[x & 1])
x >>= 1
pass
try:
return '0b' + ''.join(reversed(out))
except NameError:
out.reverse()
return '0b' + ''.join(out)
# end bin
# end compatibility "fixes'
__all__ = (
'cidr2block',
'hex2ip',
'ip2hex',
'ip2long',
'ip2network',
'long2ip',
'netmask2prefix',
'subnet2block',
'validate_cidr',
'validate_ip',
'validate_netmask',
'validate_subnet',
'BENCHMARK_TESTS',
'BROADCAST',
'CURRENT_NETWORK',
'DUAL_STACK_LITE',
'IETF_PROTOCOL_RESERVED',
'IPV6_TO_IPV4_RELAY',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_INTERNETWORK',
'MULTICAST_LOCAL',
'PRIVATE_NETWORK_10',
'PRIVATE_NETWORK_172_16',
'PRIVATE_NETWORK_192_168',
'RESERVED',
'SHARED_ADDRESS_SPACE',
'TEST_NET_1',
'TEST_NET_2',
'TEST_NET_3',
)
#: Regex for validating an IPv4 address
_DOTTED_QUAD_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}/\d{1,2}$')
#: Mamimum IPv4 integer
MAX_IP = 0xffffffff
#: Minimum IPv4 integer
MIN_IP = 0x0
#: Broadcast messages to the current network (only valid as source address)
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
CURRENT_NETWORK = "0.0.0.0/8"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_10 = "10.0.0.0/8"
#: Carrier-grade NAT private network
#: (`RFC 6598 <https://tools.ietf.org/html/rfc6598>`_)
SHARED_ADDRESS_SPACE = "100.64.0.0/10"
#: Loopback addresses on the local host
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOOPBACK = "127.0.0.0/8"
#: Common `localhost` address
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOCALHOST = "127.0.0.1"
#: Autoconfiguration when no IP address available
#: (`RFC 3972 <https://tools.ietf.org/html/rfc3972>`_)
LINK_LOCAL = "169.254.0.0/16"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_172_16 = "172.16.0.0/12"
#: IETF protocol assignments reserved block
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
IETF_PROTOCOL_RESERVED = "192.0.0.0/24"
#: Dual-Stack Lite link address
#: (`RFC 6333 <https://tools.ietf.org/html/rfc6333>`_)
DUAL_STACK_LITE = "192.0.0.0/29"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_1 = "192.0.2.0/24"
#: 6to4 anycast relay
#: (`RFC 3068 <https://tools.ietf.org/html/rfc3068>`_)
IPV6_TO_IPV4_RELAY = "192.88.99.0/24"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_192_168 = "192.168.0.0/16"
#: Inter-network communications testing
#: (`RFC 2544 <https://tools.ietf.org/html/rfc2544>`_)
BENCHMARK_TESTS = "198.18.0.0/15"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_2 = "198.51.100.0/24"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_3 = "203.0.113.0/24"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "224.0.0.0/4"
#: Link local multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_LOCAL = "224.0.0.0/24"
#: Forwardable multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_INTERNETWORK = "224.0.1.0/24"
#: Former Class E address space. Reserved for future use
#: (`RFC 1700 <https://tools.ietf.org/html/rfc1700>`_)
RESERVED = "240.0.0.0/4"
#: Broadcast messages to the current network
#: (only valid as destination address)
#: (`RFC 919 <https://tools.ietf.org/html/rfc919>`_)
BROADCAST = "255.255.255.255"
def validate_ip(s):
"""Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
"""
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv4 address in dotted-quad format followed by a forward slash (/) and
a bit mask length (1-32).
>>> validate_cidr('127.0.0.1/32')
True
>>> validate_cidr('127.0/8')
True
>>> validate_cidr('127.0.0.256/32')
False
>>> validate_cidr('127.0.0.0')
False
>>> validate_cidr(LOOPBACK)
True
>>> validate_cidr('127.0.0.1/33')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 32:
return False
else:
return False
return True
return False
# end validate_cidr
def validate_netmask(s):
"""Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
"""
if validate_ip(s):
# Convert to binary string, strip '0b' prefix, 0 pad to 32 bits
mask = bin(ip2network(s))[2:].zfill(32)
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False
# end validate_netmask
def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
"""
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode")
# end validate_subnet
def ip2long(ip):
"""Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip
# end ip2long
def ip2network(ip):
"""Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
netw = 0
for i in range(4):
netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)
return netw
# end ip2network
def long2ip(l):
"""Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
# end long2ip
def ip2hex(addr):
"""Convert a dotted-quad ip address to a hex encoded number.
>>> ip2hex('0.0.0.1')
'00000001'
>>> ip2hex('127.0.0.1')
'7f000001'
>>> ip2hex('127.255.255.255')
'7fffffff'
>>> ip2hex('128.0.0.1')
'80000001'
>>> ip2hex('128.1')
'80000001'
>>> ip2hex('255.255.255.255')
'ffffffff'
:param addr: Dotted-quad ip address.
:type addr: str
:returns: Numeric ip address as a hex-encoded string or ``None`` if
invalid.
"""
netip = ip2long(addr)
if netip is None:
return None
return "%08x" % netip
# end ip2hex
def hex2ip(hex_str):
"""Convert a hex encoded integer to a dotted-quad ip address.
>>> hex2ip('00000001')
'0.0.0.1'
>>> hex2ip('7f000001')
'127.0.0.1'
>>> hex2ip('7fffffff')
'127.255.255.255'
>>> hex2ip('80000001')
'128.0.0.1'
>>> hex2ip('ffffffff')
'255.255.255.255'
:param hex_str: Numeric ip address as a hex-encoded string.
:type hex_str: str
:returns: Dotted-quad ip address or ``None`` if invalid.
"""
try:
netip = int(hex_str, 16)
except ValueError:
return None
return long2ip(netip)
# end hex2ip
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('127.0.0.1/32')
('127.0.0.1', '127.0.0.1')
>>> cidr2block('127/8')
('127.0.0.0', '127.255.255.255')
>>> cidr2block('127.0.1/16')
('127.0.0.0', '127.0.255.255')
>>> cidr2block('127.1/24')
('127.1.0.0', '127.1.0.255')
>>> cidr2block('127.0.0.3/29')
('127.0.0.0', '127.0.0.7')
>>> cidr2block('127/0')
('0.0.0.0', '255.255.255.255')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end cidr2block
def netmask2prefix(mask):
"""Convert a dotted-quad netmask into a CIDR prefix.
>>> netmask2prefix('255.0.0.0')
8
>>> netmask2prefix('255.128.0.0')
9
>>> netmask2prefix('255.255.255.254')
31
>>> netmask2prefix('255.255.255.255')
32
>>> netmask2prefix('0.0.0.0')
0
>>> netmask2prefix('127.0.0.1')
0
:param mask: Netmask in dotted-quad notation.
:type mask: str
:returns: CIDR prefix corresponding to netmask or `0` if invalid.
"""
if validate_netmask(mask):
return bin(ip2network(mask)).count('1')
return 0
# end netmask2prefix
# end subnet2block
def _block_from_ip_and_prefix(ip, prefix):
"""Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
"""
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
# end _block_from_ip_and_prefix
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/ipv4.py
|
_block_from_ip_and_prefix
|
python
|
def _block_from_ip_and_prefix(ip, prefix):
# keep left most prefix bits of ip
shift = 32 - prefix
block_start = ip >> shift << shift
# expand right most 32 - prefix bits to 1
mask = (1 << shift) - 1
block_end = block_start | mask
return (long2ip(block_start), long2ip(block_end))
|
Create a tuple of (start, end) dotted-quad addresses from the given
ip address and prefix length.
:param ip: Ip address in block
:type ip: long
:param prefix: Prefix size for block
:type prefix: int
:returns: Tuple of block (start, end)
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/ipv4.py#L617-L634
| null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
# sniff for python2.x / python3k compatibility "fixes'
try:
basestring = basestring
except NameError:
# 'basestring' is undefined, must be python3k
basestring = str
try:
bin = bin
except NameError:
# builtin bin function doesn't exist
def bin(x):
"""
From http://code.activestate.com/recipes/219300/#c7
"""
if x < 0:
return '-' + bin(-x)
out = []
if x == 0:
out.append('0')
while x > 0:
out.append('01'[x & 1])
x >>= 1
pass
try:
return '0b' + ''.join(reversed(out))
except NameError:
out.reverse()
return '0b' + ''.join(out)
# end bin
# end compatibility "fixes'
__all__ = (
'cidr2block',
'hex2ip',
'ip2hex',
'ip2long',
'ip2network',
'long2ip',
'netmask2prefix',
'subnet2block',
'validate_cidr',
'validate_ip',
'validate_netmask',
'validate_subnet',
'BENCHMARK_TESTS',
'BROADCAST',
'CURRENT_NETWORK',
'DUAL_STACK_LITE',
'IETF_PROTOCOL_RESERVED',
'IPV6_TO_IPV4_RELAY',
'LINK_LOCAL',
'LOCALHOST',
'LOOPBACK',
'MAX_IP',
'MIN_IP',
'MULTICAST',
'MULTICAST_INTERNETWORK',
'MULTICAST_LOCAL',
'PRIVATE_NETWORK_10',
'PRIVATE_NETWORK_172_16',
'PRIVATE_NETWORK_192_168',
'RESERVED',
'SHARED_ADDRESS_SPACE',
'TEST_NET_1',
'TEST_NET_2',
'TEST_NET_3',
)
#: Regex for validating an IPv4 address
_DOTTED_QUAD_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}$')
#: Regex for validating a CIDR network
_CIDR_RE = re.compile(r'^(\d{1,3}\.){0,3}\d{1,3}/\d{1,2}$')
#: Mamimum IPv4 integer
MAX_IP = 0xffffffff
#: Minimum IPv4 integer
MIN_IP = 0x0
#: Broadcast messages to the current network (only valid as source address)
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
CURRENT_NETWORK = "0.0.0.0/8"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_10 = "10.0.0.0/8"
#: Carrier-grade NAT private network
#: (`RFC 6598 <https://tools.ietf.org/html/rfc6598>`_)
SHARED_ADDRESS_SPACE = "100.64.0.0/10"
#: Loopback addresses on the local host
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOOPBACK = "127.0.0.0/8"
#: Common `localhost` address
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
LOCALHOST = "127.0.0.1"
#: Autoconfiguration when no IP address available
#: (`RFC 3972 <https://tools.ietf.org/html/rfc3972>`_)
LINK_LOCAL = "169.254.0.0/16"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_172_16 = "172.16.0.0/12"
#: IETF protocol assignments reserved block
#: (`RFC 5735 <https://tools.ietf.org/html/rfc5735>`_)
IETF_PROTOCOL_RESERVED = "192.0.0.0/24"
#: Dual-Stack Lite link address
#: (`RFC 6333 <https://tools.ietf.org/html/rfc6333>`_)
DUAL_STACK_LITE = "192.0.0.0/29"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_1 = "192.0.2.0/24"
#: 6to4 anycast relay
#: (`RFC 3068 <https://tools.ietf.org/html/rfc3068>`_)
IPV6_TO_IPV4_RELAY = "192.88.99.0/24"
#: Private network
#: (`RFC 1918 <https://tools.ietf.org/html/rfc1918>`_)
PRIVATE_NETWORK_192_168 = "192.168.0.0/16"
#: Inter-network communications testing
#: (`RFC 2544 <https://tools.ietf.org/html/rfc2544>`_)
BENCHMARK_TESTS = "198.18.0.0/15"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_2 = "198.51.100.0/24"
#: Documentation and example network
#: (`RFC 5737 <https://tools.ietf.org/html/rfc5737>`_)
TEST_NET_3 = "203.0.113.0/24"
#: Multicast reserved block
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST = "224.0.0.0/4"
#: Link local multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_LOCAL = "224.0.0.0/24"
#: Forwardable multicast
#: (`RFC 5771 <https://tools.ietf.org/html/rfc5771>`_)
MULTICAST_INTERNETWORK = "224.0.1.0/24"
#: Former Class E address space. Reserved for future use
#: (`RFC 1700 <https://tools.ietf.org/html/rfc1700>`_)
RESERVED = "240.0.0.0/4"
#: Broadcast messages to the current network
#: (only valid as destination address)
#: (`RFC 919 <https://tools.ietf.org/html/rfc919>`_)
BROADCAST = "255.255.255.255"
def validate_ip(s):
"""Validate a dotted-quad ip address.
The string is considered a valid dotted-quad address if it consists of
one to four octets (0-255) seperated by periods (.).
>>> validate_ip('127.0.0.1')
True
>>> validate_ip('127.0')
True
>>> validate_ip('127.0.0.256')
False
>>> validate_ip(LOCALHOST)
True
>>> validate_ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a dotted-quad ip address.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address, ``False`` otherwise.
:raises: TypeError
"""
if _DOTTED_QUAD_RE.match(s):
quads = s.split('.')
for q in quads:
if int(q) > 255:
return False
return True
return False
# end validate_ip
def validate_cidr(s):
"""Validate a CIDR notation ip address.
The string is considered a valid CIDR address if it consists of a valid
IPv4 address in dotted-quad format followed by a forward slash (/) and
a bit mask length (1-32).
>>> validate_cidr('127.0.0.1/32')
True
>>> validate_cidr('127.0/8')
True
>>> validate_cidr('127.0.0.256/32')
False
>>> validate_cidr('127.0.0.0')
False
>>> validate_cidr(LOOPBACK)
True
>>> validate_cidr('127.0.0.1/33')
False
>>> validate_cidr(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected string or buffer
:param s: String to validate as a CIDR notation ip address.
:type s: str
:returns: ``True`` if a valid CIDR address, ``False`` otherwise.
:raises: TypeError
"""
if _CIDR_RE.match(s):
ip, mask = s.split('/')
if validate_ip(ip):
if int(mask) > 32:
return False
else:
return False
return True
return False
# end validate_cidr
def validate_netmask(s):
"""Validate that a dotted-quad ip address is a valid netmask.
>>> validate_netmask('0.0.0.0')
True
>>> validate_netmask('128.0.0.0')
True
>>> validate_netmask('255.0.0.0')
True
>>> validate_netmask('255.255.255.255')
True
>>> validate_netmask(BROADCAST)
True
>>> validate_netmask('128.0.0.1')
False
>>> validate_netmask('1.255.255.0')
False
>>> validate_netmask('0.255.255.0')
False
:param s: String to validate as a dotted-quad notation netmask.
:type s: str
:returns: ``True`` if a valid netmask, ``False`` otherwise.
:raises: TypeError
"""
if validate_ip(s):
# Convert to binary string, strip '0b' prefix, 0 pad to 32 bits
mask = bin(ip2network(s))[2:].zfill(32)
# all left most bits must be 1, all right most must be 0
seen0 = False
for c in mask:
if '1' == c:
if seen0:
return False
else:
seen0 = True
return True
else:
return False
# end validate_netmask
def validate_subnet(s):
"""Validate a dotted-quad ip address including a netmask.
The string is considered a valid dotted-quad address with netmask if it
consists of one to four octets (0-255) seperated by periods (.) followed
by a forward slash (/) and a subnet bitmask which is expressed in
dotted-quad format.
>>> validate_subnet('127.0.0.1/255.255.255.255')
True
>>> validate_subnet('127.0/255.0.0.0')
True
>>> validate_subnet('127.0/255')
True
>>> validate_subnet('127.0.0.256/255.255.255.255')
False
>>> validate_subnet('127.0.0.1/255.255.255.256')
False
>>> validate_subnet('127.0.0.0')
False
>>> validate_subnet(None)
Traceback (most recent call last):
...
TypeError: expected string or unicode
:param s: String to validate as a dotted-quad ip address with netmask.
:type s: str
:returns: ``True`` if a valid dotted-quad ip address with netmask,
``False`` otherwise.
:raises: TypeError
"""
if isinstance(s, basestring):
if '/' in s:
start, mask = s.split('/', 2)
return validate_ip(start) and validate_netmask(mask)
else:
return False
raise TypeError("expected string or unicode")
# end validate_subnet
def ip2long(ip):
"""Convert a dotted-quad ip address to a network byte order 32-bit
integer.
>>> ip2long('127.0.0.1')
2130706433
>>> ip2long('127.1')
2130706433
>>> ip2long('127')
2130706432
>>> ip2long('127.0.0.256') is None
True
:param ip: Dotted-quad ip address (eg. '127.0.0.1').
:type ip: str
:returns: Network byte order 32-bit integer or ``None`` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
if len(quads) == 1:
# only a network quad
quads = quads + [0, 0, 0]
elif len(quads) < 4:
# partial form, last supplied quad is host address, rest is network
host = quads[-1:]
quads = quads[:-1] + [0, ] * (4 - len(quads)) + host
lngip = 0
for q in quads:
lngip = (lngip << 8) | int(q)
return lngip
# end ip2long
def ip2network(ip):
"""Convert a dotted-quad ip to base network number.
This differs from :func:`ip2long` in that partial addresses as treated as
all network instead of network plus host (eg. '127.1' expands to
'127.1.0.0')
:param ip: dotted-quad ip address (eg. ‘127.0.0.1’).
:type ip: str
:returns: Network byte order 32-bit integer or `None` if ip is invalid.
"""
if not validate_ip(ip):
return None
quads = ip.split('.')
netw = 0
for i in range(4):
netw = (netw << 8) | int(len(quads) > i and quads[i] or 0)
return netw
# end ip2network
def long2ip(l):
"""Convert a network byte order 32-bit integer to a dotted quad ip
address.
>>> long2ip(2130706433)
'127.0.0.1'
>>> long2ip(MIN_IP)
'0.0.0.0'
>>> long2ip(MAX_IP)
'255.255.255.255'
>>> long2ip(None) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for >>: 'NoneType' and 'int'
>>> long2ip(-1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(374297346592387463875) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
>>> long2ip(MAX_IP + 1) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: expected int between 0 and 4294967295 inclusive
:param l: Network byte order 32-bit integer.
:type l: int
:returns: Dotted-quad ip address (eg. '127.0.0.1').
:raises: TypeError
"""
if MAX_IP < l or l < MIN_IP:
raise TypeError(
"expected int between %d and %d inclusive" % (MIN_IP, MAX_IP))
return '%d.%d.%d.%d' % (
l >> 24 & 255, l >> 16 & 255, l >> 8 & 255, l & 255)
# end long2ip
def ip2hex(addr):
"""Convert a dotted-quad ip address to a hex encoded number.
>>> ip2hex('0.0.0.1')
'00000001'
>>> ip2hex('127.0.0.1')
'7f000001'
>>> ip2hex('127.255.255.255')
'7fffffff'
>>> ip2hex('128.0.0.1')
'80000001'
>>> ip2hex('128.1')
'80000001'
>>> ip2hex('255.255.255.255')
'ffffffff'
:param addr: Dotted-quad ip address.
:type addr: str
:returns: Numeric ip address as a hex-encoded string or ``None`` if
invalid.
"""
netip = ip2long(addr)
if netip is None:
return None
return "%08x" % netip
# end ip2hex
def hex2ip(hex_str):
"""Convert a hex encoded integer to a dotted-quad ip address.
>>> hex2ip('00000001')
'0.0.0.1'
>>> hex2ip('7f000001')
'127.0.0.1'
>>> hex2ip('7fffffff')
'127.255.255.255'
>>> hex2ip('80000001')
'128.0.0.1'
>>> hex2ip('ffffffff')
'255.255.255.255'
:param hex_str: Numeric ip address as a hex-encoded string.
:type hex_str: str
:returns: Dotted-quad ip address or ``None`` if invalid.
"""
try:
netip = int(hex_str, 16)
except ValueError:
return None
return long2ip(netip)
# end hex2ip
def cidr2block(cidr):
"""Convert a CIDR notation ip address into a tuple containing the network
block start and end addresses.
>>> cidr2block('127.0.0.1/32')
('127.0.0.1', '127.0.0.1')
>>> cidr2block('127/8')
('127.0.0.0', '127.255.255.255')
>>> cidr2block('127.0.1/16')
('127.0.0.0', '127.0.255.255')
>>> cidr2block('127.1/24')
('127.1.0.0', '127.1.0.255')
>>> cidr2block('127.0.0.3/29')
('127.0.0.0', '127.0.0.7')
>>> cidr2block('127/0')
('0.0.0.0', '255.255.255.255')
:param cidr: CIDR notation ip address (eg. '127.0.0.1/8').
:type cidr: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_cidr(cidr):
return None
ip, prefix = cidr.split('/')
prefix = int(prefix)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end cidr2block
def netmask2prefix(mask):
"""Convert a dotted-quad netmask into a CIDR prefix.
>>> netmask2prefix('255.0.0.0')
8
>>> netmask2prefix('255.128.0.0')
9
>>> netmask2prefix('255.255.255.254')
31
>>> netmask2prefix('255.255.255.255')
32
>>> netmask2prefix('0.0.0.0')
0
>>> netmask2prefix('127.0.0.1')
0
:param mask: Netmask in dotted-quad notation.
:type mask: str
:returns: CIDR prefix corresponding to netmask or `0` if invalid.
"""
if validate_netmask(mask):
return bin(ip2network(mask)).count('1')
return 0
# end netmask2prefix
def subnet2block(subnet):
"""Convert a dotted-quad ip address including a netmask into a tuple
containing the network block start and end addresses.
>>> subnet2block('127.0.0.1/255.255.255.255')
('127.0.0.1', '127.0.0.1')
>>> subnet2block('127/255')
('127.0.0.0', '127.255.255.255')
>>> subnet2block('127.0.1/255.255')
('127.0.0.0', '127.0.255.255')
>>> subnet2block('127.1/255.255.255.0')
('127.1.0.0', '127.1.0.255')
>>> subnet2block('127.0.0.3/255.255.255.248')
('127.0.0.0', '127.0.0.7')
>>> subnet2block('127/0')
('0.0.0.0', '255.255.255.255')
:param subnet: dotted-quad ip address with netmask
(eg. '127.0.0.1/255.0.0.0').
:type subnet: str
:returns: Tuple of block (start, end) or ``None`` if invalid.
:raises: TypeError
"""
if not validate_subnet(subnet):
return None
ip, netmask = subnet.split('/')
prefix = netmask2prefix(netmask)
# convert dotted-quad ip to base network number
network = ip2network(ip)
return _block_from_ip_and_prefix(network, prefix)
# end subnet2block
# end _block_from_ip_and_prefix
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/__init__.py
|
_address2long
|
python
|
def _address2long(address):
parsed = ipv4.ip2long(address)
if parsed is None:
parsed = ipv6.ip2long(address)
return parsed
|
Convert an address string to a long.
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/__init__.py#L58-L65
|
[
"def ip2long(ip):\n \"\"\"Convert a dotted-quad ip address to a network byte order 32-bit\n integer.\n\n\n >>> ip2long('127.0.0.1')\n 2130706433\n >>> ip2long('127.1')\n 2130706433\n >>> ip2long('127')\n 2130706432\n >>> ip2long('127.0.0.256') is None\n True\n\n\n :param ip: Dotted-quad ip address (eg. '127.0.0.1').\n :type ip: str\n :returns: Network byte order 32-bit integer or ``None`` if ip is invalid.\n \"\"\"\n if not validate_ip(ip):\n return None\n quads = ip.split('.')\n if len(quads) == 1:\n # only a network quad\n quads = quads + [0, 0, 0]\n elif len(quads) < 4:\n # partial form, last supplied quad is host address, rest is network\n host = quads[-1:]\n quads = quads[:-1] + [0, ] * (4 - len(quads)) + host\n\n lngip = 0\n for q in quads:\n lngip = (lngip << 8) | int(q)\n return lngip\n",
"def ip2long(ip):\n \"\"\"Convert a hexidecimal IPv6 address to a network byte order 128-bit\n integer.\n\n\n >>> ip2long('::') == 0\n True\n >>> ip2long('::1') == 1\n True\n >>> expect = 0x20010db885a3000000008a2e03707334\n >>> ip2long('2001:db8:85a3::8a2e:370:7334') == expect\n True\n >>> ip2long('2001:db8:85a3:0:0:8a2e:370:7334') == expect\n True\n >>> ip2long('2001:0db8:85a3:0000:0000:8a2e:0370:7334') == expect\n True\n >>> expect = 0x20010db8000000000001000000000001\n >>> ip2long('2001:db8::1:0:0:1') == expect\n True\n >>> expect = 281473902969472\n >>> ip2long('::ffff:192.0.2.128') == expect\n True\n >>> expect = 0xffffffffffffffffffffffffffffffff\n >>> ip2long('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') == expect\n True\n >>> ip2long('ff::ff::ff') == None\n True\n >>> expect = 21932261930451111902915077091070067066\n >>> ip2long('1080:0:0:0:8:800:200C:417A') == expect\n True\n\n\n :param ip: Hexidecimal IPv6 address\n :type ip: str\n :returns: Network byte order 128-bit integer or ``None`` if ip is invalid.\n \"\"\"\n if not validate_ip(ip):\n return None\n\n if '.' in ip:\n # convert IPv4 suffix to hex\n chunks = ip.split(':')\n v4_int = ipv4.ip2long(chunks.pop())\n if v4_int is None:\n return None\n chunks.append('%x' % ((v4_int >> 16) & 0xffff))\n chunks.append('%x' % (v4_int & 0xffff))\n ip = ':'.join(chunks)\n\n halves = ip.split('::')\n hextets = halves[0].split(':')\n if len(halves) == 2:\n h2 = halves[1].split(':')\n for z in range(8 - (len(hextets) + len(h2))):\n hextets.append('0')\n for h in h2:\n hextets.append(h)\n # end if\n\n lngip = 0\n for h in hextets:\n if '' == h:\n h = '0'\n lngip = (lngip << 16) | int(h, 16)\n return lngip\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2014, Bryan Davis and iptools contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# sniff for python2.x / python3k compatibility "fixes'
try:
basestring = basestring
except NameError:
# 'basestring' is undefined, must be python3k
basestring = str
try:
next = next
except NameError:
# builtin next function doesn't exist
def next(iterable):
return iterable.next()
try:
import Sequence
except ImportError:
# python <2.6 doesn't have abc classes to extend
Sequence = object
# end compatibility "fixes'
from . import ipv4
from . import ipv6
__version__ = '0.7.0'
__all__ = (
'IpRange',
'IpRangeList',
)
# end _addess2long
class IpRange (Sequence):
"""
Range of ip addresses.
Converts a CIDR notation address, ip address and subnet, tuple of ip
addresses or start and end addresses into a smart object which can perform
``in`` and ``not in`` tests and iterate all of the addresses in the range.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> '127.127.127.127' in r
True
>>> '10.0.0.1' in r
False
>>> 2130706433 in r
True
>>> # IPv4 mapped IPv6 addresses are valid in an IPv4 block
>>> '::ffff:127.127.127.127' in r
True
>>> # but only if they are actually in the block :)
>>> '::ffff:192.0.2.128' in r
False
>>> '::ffff:c000:0280' in r
False
>>> r = IpRange('127/24')
>>> print(r)
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('127/30')
>>> for ip in r:
... print(ip)
127.0.0.0
127.0.0.1
127.0.0.2
127.0.0.3
>>> print(IpRange('127.0.0.255', '127.0.0.0'))
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('127/255.255.255.0')
>>> print(r)
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('::ffff:0000:0000', '::ffff:ffff:ffff')
>>> '::ffff:192.0.2.128' in r
True
>>> '::ffff:c000:0280' in r
True
>>> 281473902969472 in r
True
>>> '192.168.2.128' in r
False
>>> 2130706433 in r
False
>>> r = IpRange('::ffff:ffff:0000/120')
>>> for ip in r:
... print(ip) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
::ffff:ffff:0 ... ::ffff:ffff:6d ... ::ffff:ffff:ff
:param start: Ip address in dotted quad format, CIDR notation, subnet
format or ``(start, end)`` tuple of ip addresses in dotted quad format.
:type start: str or tuple
:param end: Ip address in dotted quad format or ``None``.
:type end: str
"""
def __init__(self, start, end=None):
if end is None:
if isinstance(start, IpRange):
# copy constructor
start, end = start[0], start[-1]
elif isinstance(start, tuple):
# occurs when IpRangeList calls via map to pass start and end
start, end = start
elif ipv4.validate_cidr(start):
# CIDR notation range
start, end = ipv4.cidr2block(start)
elif ipv6.validate_cidr(start):
# CIDR notation range
start, end = ipv6.cidr2block(start)
elif ipv4.validate_subnet(start):
# Netmask notation range
start, end = ipv4.subnet2block(start)
else:
# degenerate range
end = start
start = _address2long(start)
end = _address2long(end)
self.startIp = min(start, end)
self.endIp = max(start, end)
self._len = self.endIp - self.startIp + 1
self._ipver = ipv4
if self.endIp > ipv4.MAX_IP:
self._ipver = ipv6
# end __init__
def __repr__(self):
"""
>>> repr(IpRange('127.0.0.1'))
"IpRange('127.0.0.1', '127.0.0.1')"
>>> repr(IpRange('10/8'))
"IpRange('10.0.0.0', '10.255.255.255')"
>>> repr(IpRange('127.0.0.255', '127.0.0.0'))
"IpRange('127.0.0.0', '127.0.0.255')"
"""
return "IpRange(%r, %r)" % (
self._ipver.long2ip(self.startIp),
self._ipver.long2ip(self.endIp))
# end __repr__
def __str__(self):
"""
>>> str(IpRange('127.0.0.1'))
"('127.0.0.1', '127.0.0.1')"
>>> str(IpRange('10/8'))
"('10.0.0.0', '10.255.255.255')"
>>> str(IpRange('127.0.0.255', '127.0.0.0'))
"('127.0.0.0', '127.0.0.255')"
"""
return (
self._ipver.long2ip(self.startIp),
self._ipver.long2ip(self.endIp)).__repr__()
# end __str__
def __eq__(self, other):
"""
>>> IpRange('127.0.0.1') == IpRange('127.0.0.1')
True
>>> IpRange('127.0.0.1') == IpRange('127.0.0.2')
False
>>> IpRange('10/8') == IpRange('10', '10.255.255.255')
True
"""
return isinstance(other, IpRange) and \
self.startIp == other.startIp and \
self.endIp == other.endIp
# end __eq__
def __len__(self):
"""
Return the length of the range.
>>> len(IpRange('127.0.0.1'))
1
>>> len(IpRange('127/31'))
2
>>> len(IpRange('127/22'))
1024
>>> IpRange('fe80::/10').__len__() == 2**118
True
"""
return self._len
# end __len__
def __hash__(self):
"""
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> a.__hash__() == b.__hash__()
True
>>> c = IpRange('10/8')
>>> a.__hash__() == c.__hash__()
False
>>> b.__hash__() == c.__hash__()
False
"""
return hash((self.startIp, self.endIp))
# end __hash__
def _cast(self, item):
if isinstance(item, basestring):
item = _address2long(item)
if type(item) not in (type(1), type(ipv4.MAX_IP), type(ipv6.MAX_IP)):
raise TypeError(
"expected ip address, 32-bit integer or 128-bit integer")
if ipv4 == self._ipver and item > ipv4.MAX_IP:
# casting an ipv6 in an ipv4 range
# downcast to ipv4 iff address is in the IPv4 mapped block
if item in _IPV6_MAPPED_IPV4:
item = item & ipv4.MAX_IP
# end if
return item
# end _cast
def index(self, item):
"""
Return the 0-based position of `item` in this IpRange.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> r.index('127.0.0.1')
0
>>> r.index('127.255.255.255')
16777214
>>> r.index('10.0.0.1')
Traceback (most recent call last):
...
ValueError: 10.0.0.1 is not in range
:param item: Dotted-quad ip address.
:type item: str
:returns: Index of ip address in range
"""
item = self._cast(item)
offset = item - self.startIp
if offset >= 0 and offset < self._len:
return offset
raise ValueError('%s is not in range' % self._ipver.long2ip(item))
# end index
def count(self, item):
return int(item in self)
# end count
def __contains__(self, item):
"""
Implements membership test operators ``in`` and ``not in`` for the
address range.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> '127.127.127.127' in r
True
>>> '10.0.0.1' in r
False
>>> 2130706433 in r
True
>>> 'invalid' in r
Traceback (most recent call last):
...
TypeError: expected ip address, 32-bit integer or 128-bit integer
:param item: Dotted-quad ip address.
:type item: str
:returns: ``True`` if address is in range, ``False`` otherwise.
"""
item = self._cast(item)
return self.startIp <= item <= self.endIp
# end __contains__
def __getitem__(self, index):
"""
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> r[0]
'127.0.0.1'
>>> r[16777214]
'127.255.255.255'
>>> r[-1]
'127.255.255.255'
>>> r[len(r)]
Traceback (most recent call last):
...
IndexError: index out of range
>>> r[:]
IpRange('127.0.0.1', '127.255.255.255')
>>> r[1:]
IpRange('127.0.0.2', '127.255.255.255')
>>> r[-2:]
IpRange('127.255.255.254', '127.255.255.255')
>>> r[0:2]
IpRange('127.0.0.1', '127.0.0.2')
>>> r[0:-1]
IpRange('127.0.0.1', '127.255.255.254')
>>> r[:-2]
IpRange('127.0.0.1', '127.255.255.253')
>>> r[::2]
Traceback (most recent call last):
...
ValueError: slice step not supported
"""
if isinstance(index, slice):
if index.step not in (None, 1):
# TODO: return an IpRangeList
raise ValueError('slice step not supported')
start = index.start or 0
if start < 0:
start = max(0, start + self._len)
if start >= self._len:
raise IndexError('start index out of range')
stop = index.stop or self._len
if stop < 0:
stop = max(start, stop + self._len)
if stop > self._len:
raise IndexError('stop index out of range')
return IpRange(
self._ipver.long2ip(self.startIp + start),
self._ipver.long2ip(self.startIp + stop - 1))
else:
if index < 0:
index = self._len + index
if index < 0 or index >= self._len:
raise IndexError('index out of range')
return self._ipver.long2ip(self.startIp + index)
# end __getitem__
def __iter__(self):
"""
Return an iterator over ip addresses in the range.
>>> iter = IpRange('127/31').__iter__()
>>> next(iter)
'127.0.0.0'
>>> next(iter)
'127.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
"""
i = self.startIp
while i <= self.endIp:
yield self._ipver.long2ip(i)
i += 1
# end __iter__
# end class IpRange
_IPV6_MAPPED_IPV4 = IpRange(ipv6.IPV4_MAPPED)
class IpRangeList (object):
r"""
List of IpRange objects.
Converts a list of ip address and/or CIDR addresses into a list of IpRange
objects. This list can perform ``in`` and ``not in`` tests and iterate all
of the addresses in the range.
:param \*args: List of ip addresses or CIDR notation and/or
``(start, end)`` tuples of ip addresses.
:type \*args: list of str and/or tuple
"""
def __init__(self, *args):
self.ips = tuple(map(IpRange, args))
# end __init__
def __repr__(self):
"""
>>> repr(IpRangeList('127.0.0.1', '10/8', '192.168/16'))
... #doctest: +NORMALIZE_WHITESPACE
"IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
IpRange('10.0.0.0', '10.255.255.255'),
IpRange('192.168.0.0', '192.168.255.255'))"
>>> repr(
... IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
... IpRange('10.0.0.0', '10.255.255.255'),
... IpRange('192.168.0.0', '192.168.255.255')))
... #doctest: +NORMALIZE_WHITESPACE
"IpRangeList(IpRange('127.0.0.1', '127.0.0.1'),
IpRange('10.0.0.0', '10.255.255.255'),
IpRange('192.168.0.0', '192.168.255.255'))"
"""
return "IpRangeList%r" % (self.ips,)
# end __repr__
def __str__(self):
"""
>>> str(IpRangeList('127.0.0.1', '10/8', '192.168/16'))
... #doctest: +NORMALIZE_WHITESPACE
"(('127.0.0.1', '127.0.0.1'),
('10.0.0.0', '10.255.255.255'),
('192.168.0.0', '192.168.255.255'))"
"""
return "(%s)" % ", ".join(str(i) for i in self.ips)
# end __str__
def __contains__(self, item):
"""
Implements membership test operators ``in`` and ``not in`` for the
address ranges contained in the list.
>>> r = IpRangeList('127.0.0.1', '10/8', '192.168/16')
>>> '127.0.0.1' in r
True
>>> '10.0.0.1' in r
True
>>> 2130706433 in r
True
>>> 'invalid' in r
Traceback (most recent call last):
...
TypeError: expected ip address, 32-bit integer or 128-bit integer
:param item: Dotted-quad ip address.
:type item: str
:returns: ``True`` if address is in list, ``False`` otherwise.
"""
if isinstance(item, basestring):
item = _address2long(item)
if type(item) not in (type(1), type(ipv4.MAX_IP), type(ipv6.MAX_IP)):
raise TypeError(
"expected ip address, 32-bit integer or 128-bit integer")
for r in self.ips:
if item in r:
return True
return False
# end __contains__
def __iter__(self):
"""
Return an iterator over all ip addresses in the list.
>>> iter = IpRangeList('127.0.0.1').__iter__()
>>> next(iter)
'127.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
>>> iter = IpRangeList('127.0.0.1', '10/31').__iter__()
>>> next(iter)
'127.0.0.1'
>>> next(iter)
'10.0.0.0'
>>> next(iter)
'10.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
"""
for r in self.ips:
for ip in r:
yield ip
# end __iter__
def __len__(self):
"""
Return the length of all ranges in the list.
>>> len(IpRangeList('127.0.0.1'))
1
>>> len(IpRangeList('127.0.0.1', '10/31'))
3
>>> len(IpRangeList('1/24'))
256
>>> len(IpRangeList('192.168.0.0/22'))
1024
>>> IpRangeList('fe80::/10').__len__() == 2**118
True
"""
return sum(r.__len__() for r in self.ips)
# end __len__
def __hash__(self):
"""
Return correct hash for IpRangeList object
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> IpRangeList(a, b).__hash__() == IpRangeList(a, b).__hash__()
True
>>> IpRangeList(a, b).__hash__() == IpRangeList(b, a).__hash__()
True
>>> c = IpRange('10.0.0.0/8')
>>> IpRangeList(a, c).__hash__() == IpRangeList(c, a).__hash__()
False
"""
return hash(self.ips)
# end __hash__
def __eq__(self, other):
"""
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> IpRangeList(a, b) == IpRangeList(a, b)
True
>>> IpRangeList(a, b) == IpRangeList(b, a)
True
>>> c = IpRange('10.0.0.0/8')
>>> IpRangeList(a, c) == IpRangeList(c, a)
False
"""
return hash(self) == hash(other)
# end __eq__
# end class IpRangeList
# vim: set sw=4 ts=4 sts=4 et :
|
bd808/python-iptools
|
iptools/__init__.py
|
IpRange.index
|
python
|
def index(self, item):
item = self._cast(item)
offset = item - self.startIp
if offset >= 0 and offset < self._len:
return offset
raise ValueError('%s is not in range' % self._ipver.long2ip(item))
|
Return the 0-based position of `item` in this IpRange.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> r.index('127.0.0.1')
0
>>> r.index('127.255.255.255')
16777214
>>> r.index('10.0.0.1')
Traceback (most recent call last):
...
ValueError: 10.0.0.1 is not in range
:param item: Dotted-quad ip address.
:type item: str
:returns: Index of ip address in range
|
train
|
https://github.com/bd808/python-iptools/blob/5d3fae0056297540355bb7c6c112703cfaa4b6ce/iptools/__init__.py#L259-L283
|
[
"def _cast(self, item):\n if isinstance(item, basestring):\n item = _address2long(item)\n if type(item) not in (type(1), type(ipv4.MAX_IP), type(ipv6.MAX_IP)):\n raise TypeError(\n \"expected ip address, 32-bit integer or 128-bit integer\")\n\n if ipv4 == self._ipver and item > ipv4.MAX_IP:\n # casting an ipv6 in an ipv4 range\n # downcast to ipv4 iff address is in the IPv4 mapped block\n if item in _IPV6_MAPPED_IPV4:\n item = item & ipv4.MAX_IP\n # end if\n\n return item\n"
] |
class IpRange (Sequence):
"""
Range of ip addresses.
Converts a CIDR notation address, ip address and subnet, tuple of ip
addresses or start and end addresses into a smart object which can perform
``in`` and ``not in`` tests and iterate all of the addresses in the range.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> '127.127.127.127' in r
True
>>> '10.0.0.1' in r
False
>>> 2130706433 in r
True
>>> # IPv4 mapped IPv6 addresses are valid in an IPv4 block
>>> '::ffff:127.127.127.127' in r
True
>>> # but only if they are actually in the block :)
>>> '::ffff:192.0.2.128' in r
False
>>> '::ffff:c000:0280' in r
False
>>> r = IpRange('127/24')
>>> print(r)
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('127/30')
>>> for ip in r:
... print(ip)
127.0.0.0
127.0.0.1
127.0.0.2
127.0.0.3
>>> print(IpRange('127.0.0.255', '127.0.0.0'))
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('127/255.255.255.0')
>>> print(r)
('127.0.0.0', '127.0.0.255')
>>> r = IpRange('::ffff:0000:0000', '::ffff:ffff:ffff')
>>> '::ffff:192.0.2.128' in r
True
>>> '::ffff:c000:0280' in r
True
>>> 281473902969472 in r
True
>>> '192.168.2.128' in r
False
>>> 2130706433 in r
False
>>> r = IpRange('::ffff:ffff:0000/120')
>>> for ip in r:
... print(ip) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
::ffff:ffff:0 ... ::ffff:ffff:6d ... ::ffff:ffff:ff
:param start: Ip address in dotted quad format, CIDR notation, subnet
format or ``(start, end)`` tuple of ip addresses in dotted quad format.
:type start: str or tuple
:param end: Ip address in dotted quad format or ``None``.
:type end: str
"""
def __init__(self, start, end=None):
if end is None:
if isinstance(start, IpRange):
# copy constructor
start, end = start[0], start[-1]
elif isinstance(start, tuple):
# occurs when IpRangeList calls via map to pass start and end
start, end = start
elif ipv4.validate_cidr(start):
# CIDR notation range
start, end = ipv4.cidr2block(start)
elif ipv6.validate_cidr(start):
# CIDR notation range
start, end = ipv6.cidr2block(start)
elif ipv4.validate_subnet(start):
# Netmask notation range
start, end = ipv4.subnet2block(start)
else:
# degenerate range
end = start
start = _address2long(start)
end = _address2long(end)
self.startIp = min(start, end)
self.endIp = max(start, end)
self._len = self.endIp - self.startIp + 1
self._ipver = ipv4
if self.endIp > ipv4.MAX_IP:
self._ipver = ipv6
# end __init__
def __repr__(self):
"""
>>> repr(IpRange('127.0.0.1'))
"IpRange('127.0.0.1', '127.0.0.1')"
>>> repr(IpRange('10/8'))
"IpRange('10.0.0.0', '10.255.255.255')"
>>> repr(IpRange('127.0.0.255', '127.0.0.0'))
"IpRange('127.0.0.0', '127.0.0.255')"
"""
return "IpRange(%r, %r)" % (
self._ipver.long2ip(self.startIp),
self._ipver.long2ip(self.endIp))
# end __repr__
def __str__(self):
"""
>>> str(IpRange('127.0.0.1'))
"('127.0.0.1', '127.0.0.1')"
>>> str(IpRange('10/8'))
"('10.0.0.0', '10.255.255.255')"
>>> str(IpRange('127.0.0.255', '127.0.0.0'))
"('127.0.0.0', '127.0.0.255')"
"""
return (
self._ipver.long2ip(self.startIp),
self._ipver.long2ip(self.endIp)).__repr__()
# end __str__
def __eq__(self, other):
"""
>>> IpRange('127.0.0.1') == IpRange('127.0.0.1')
True
>>> IpRange('127.0.0.1') == IpRange('127.0.0.2')
False
>>> IpRange('10/8') == IpRange('10', '10.255.255.255')
True
"""
return isinstance(other, IpRange) and \
self.startIp == other.startIp and \
self.endIp == other.endIp
# end __eq__
def __len__(self):
"""
Return the length of the range.
>>> len(IpRange('127.0.0.1'))
1
>>> len(IpRange('127/31'))
2
>>> len(IpRange('127/22'))
1024
>>> IpRange('fe80::/10').__len__() == 2**118
True
"""
return self._len
# end __len__
def __hash__(self):
"""
>>> a = IpRange('127.0.0.0/8')
>>> b = IpRange('127.0.0.0', '127.255.255.255')
>>> a.__hash__() == b.__hash__()
True
>>> c = IpRange('10/8')
>>> a.__hash__() == c.__hash__()
False
>>> b.__hash__() == c.__hash__()
False
"""
return hash((self.startIp, self.endIp))
# end __hash__
def _cast(self, item):
if isinstance(item, basestring):
item = _address2long(item)
if type(item) not in (type(1), type(ipv4.MAX_IP), type(ipv6.MAX_IP)):
raise TypeError(
"expected ip address, 32-bit integer or 128-bit integer")
if ipv4 == self._ipver and item > ipv4.MAX_IP:
# casting an ipv6 in an ipv4 range
# downcast to ipv4 iff address is in the IPv4 mapped block
if item in _IPV6_MAPPED_IPV4:
item = item & ipv4.MAX_IP
# end if
return item
# end _cast
# end index
def count(self, item):
return int(item in self)
# end count
def __contains__(self, item):
"""
Implements membership test operators ``in`` and ``not in`` for the
address range.
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> '127.127.127.127' in r
True
>>> '10.0.0.1' in r
False
>>> 2130706433 in r
True
>>> 'invalid' in r
Traceback (most recent call last):
...
TypeError: expected ip address, 32-bit integer or 128-bit integer
:param item: Dotted-quad ip address.
:type item: str
:returns: ``True`` if address is in range, ``False`` otherwise.
"""
item = self._cast(item)
return self.startIp <= item <= self.endIp
# end __contains__
def __getitem__(self, index):
"""
>>> r = IpRange('127.0.0.1', '127.255.255.255')
>>> r[0]
'127.0.0.1'
>>> r[16777214]
'127.255.255.255'
>>> r[-1]
'127.255.255.255'
>>> r[len(r)]
Traceback (most recent call last):
...
IndexError: index out of range
>>> r[:]
IpRange('127.0.0.1', '127.255.255.255')
>>> r[1:]
IpRange('127.0.0.2', '127.255.255.255')
>>> r[-2:]
IpRange('127.255.255.254', '127.255.255.255')
>>> r[0:2]
IpRange('127.0.0.1', '127.0.0.2')
>>> r[0:-1]
IpRange('127.0.0.1', '127.255.255.254')
>>> r[:-2]
IpRange('127.0.0.1', '127.255.255.253')
>>> r[::2]
Traceback (most recent call last):
...
ValueError: slice step not supported
"""
if isinstance(index, slice):
if index.step not in (None, 1):
# TODO: return an IpRangeList
raise ValueError('slice step not supported')
start = index.start or 0
if start < 0:
start = max(0, start + self._len)
if start >= self._len:
raise IndexError('start index out of range')
stop = index.stop or self._len
if stop < 0:
stop = max(start, stop + self._len)
if stop > self._len:
raise IndexError('stop index out of range')
return IpRange(
self._ipver.long2ip(self.startIp + start),
self._ipver.long2ip(self.startIp + stop - 1))
else:
if index < 0:
index = self._len + index
if index < 0 or index >= self._len:
raise IndexError('index out of range')
return self._ipver.long2ip(self.startIp + index)
# end __getitem__
def __iter__(self):
"""
Return an iterator over ip addresses in the range.
>>> iter = IpRange('127/31').__iter__()
>>> next(iter)
'127.0.0.0'
>>> next(iter)
'127.0.0.1'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
"""
i = self.startIp
while i <= self.endIp:
yield self._ipver.long2ip(i)
i += 1
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.get
|
python
|
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
|
A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L62-L68
| null |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.post
|
python
|
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
|
A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L70-L78
| null |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.get_parse
|
python
|
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
|
Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L85-L89
|
[
"def _request_parse(self, method, *args):\n r = method(*args)\n r.raise_for_status()\n return r.json()\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.post_parse
|
python
|
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
|
Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L91-L95
|
[
"def _request_parse(self, method, *args):\n r = method(*args)\n r.raise_for_status()\n return r.json()\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.categorization
|
python
|
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
|
Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L108-L121
|
[
"def _get_categorization(self, domain, labels):\n uri = urljoin(self._uris['categorization'], domain)\n params = {'showLabels': True} if labels else {}\n return self.get_parse(uri, params)\n",
"def _post_categorization(self, domains, labels):\n params = {'showLabels': True} if labels else {}\n return self.post_parse(self._uris['categorization'], params,\n json.dumps(domains)\n )\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.cooccurrences
|
python
|
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
|
Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L123-L129
|
[
"def get_parse(self, uri, params={}):\n '''Convenience method to call get() on an arbitrary URI and parse the response\n into a JSON object. Raises an error on non-200 response status.\n '''\n return self._request_parse(self.get, uri, params)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.related
|
python
|
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
|
Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L131-L137
|
[
"def get_parse(self, uri, params={}):\n '''Convenience method to call get() on an arbitrary URI and parse the response\n into a JSON object. Raises an error on non-200 response status.\n '''\n return self._request_parse(self.get, uri, params)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.security
|
python
|
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
|
Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L139-L145
|
[
"def get_parse(self, uri, params={}):\n '''Convenience method to call get() on an arbitrary URI and parse the response\n into a JSON object. Raises an error on non-200 response status.\n '''\n return self._request_parse(self.get, uri, params)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.rr_history
|
python
|
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
|
Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L155-L172
|
[
"def _domain_rr_history(self, domain, query_type):\n uri = self._uris[\"domain_rr_history\"].format(query_type, domain)\n return self.get_parse(uri)\n",
"def _ip_rr_history(self, ip, query_type):\n uri = self._uris[\"ip_rr_history\"].format(query_type, ip)\n return self.get_parse(uri)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.domain_whois
|
python
|
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
|
Gets whois information for a domain
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L187-L191
|
[
"def get_parse(self, uri, params={}):\n '''Convenience method to call get() on an arbitrary URI and parse the response\n into a JSON object. Raises an error on non-200 response status.\n '''\n return self._request_parse(self.get, uri, params)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.domain_whois_history
|
python
|
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
|
Gets whois history for a domain
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L193-L202
|
[
"def get_parse(self, uri, params={}):\n '''Convenience method to call get() on an arbitrary URI and parse the response\n into a JSON object. Raises an error on non-200 response status.\n '''\n return self._request_parse(self.get, uri, params)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.ns_whois
|
python
|
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
|
Gets the domains that have been registered with a nameserver or
nameservers
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L204-L215
|
[
"def get_parse(self, uri, params={}):\n '''Convenience method to call get() on an arbitrary URI and parse the response\n into a JSON object. Raises an error on non-200 response status.\n '''\n return self._request_parse(self.get, uri, params)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.search
|
python
|
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
|
Searches for domains that match a given pattern
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L231-L253
| null |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.samples
|
python
|
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
|
Return an object representing the samples identified by the input domain, IP, or URL
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L255-L261
|
[
"def get_parse(self, uri, params={}):\n '''Convenience method to call get() on an arbitrary URI and parse the response\n into a JSON object. Raises an error on non-200 response status.\n '''\n return self._request_parse(self.get, uri, params)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.sample
|
python
|
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
|
Return an object representing the sample identified by the input hash, or an empty object if that sample is not found
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L263-L269
|
[
"def get_parse(self, uri, params={}):\n '''Convenience method to call get() on an arbitrary URI and parse the response\n into a JSON object. Raises an error on non-200 response status.\n '''\n return self._request_parse(self.get, uri, params)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.as_for_ip
|
python
|
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
|
Gets the AS information for a given IP address.
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L298-L306
|
[
"def get_parse(self, uri, params={}):\n '''Convenience method to call get() on an arbitrary URI and parse the response\n into a JSON object. Raises an error on non-200 response status.\n '''\n return self._request_parse(self.get, uri, params)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.prefixes_for_asn
|
python
|
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
|
Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L308-L314
|
[
"def get_parse(self, uri, params={}):\n '''Convenience method to call get() on an arbitrary URI and parse the response\n into a JSON object. Raises an error on non-200 response status.\n '''\n return self._request_parse(self.get, uri, params)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
opendns/pyinvestigate
|
investigate/investigate.py
|
Investigate.timeline
|
python
|
def timeline(self, uri):
'''Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
'''
uri = self._uris["timeline"].format(uri)
resp_json = self.get_parse(uri)
return resp_json
|
Get the domain tagging timeline for a given uri.
Could be a domain, ip, or url.
For details, see https://docs.umbrella.com/investigate-api/docs/timeline
|
train
|
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L316-L324
|
[
"def get_parse(self, uri, params={}):\n '''Convenience method to call get() on an arbitrary URI and parse the response\n into a JSON object. Raises an error on non-200 response status.\n '''\n return self._request_parse(self.get, uri, params)\n"
] |
class Investigate(object):
BASE_URL = 'https://investigate.api.umbrella.com/'
SUPPORTED_DNS_TYPES = [
"A",
"NS",
"MX",
"TXT",
"CNAME",
]
DEFAULT_LIMIT = None
DEFAULT_OFFSET = None
DEFAULT_SORT = None
IP_PATTERN = re.compile(r'(\d{1,3}\.){3}\d{1,3}')
DOMAIN_ERR = ValueError("domains must be a string or a list of strings")
IP_ERR = ValueError("invalid IP address")
UNSUPPORTED_DNS_QUERY = ValueError("supported query types are: {}"
.format(SUPPORTED_DNS_TYPES)
)
SEARCH_ERR = ValueError("Start argument must be a datetime or a timedelta")
def __init__(self, api_key, proxies={}):
self.api_key = api_key
self.proxies = proxies
self._uris = {
"categorization": "domains/categorization/",
"cooccurrences": "recommendations/name/{}.json",
"domain_rr_history": "dnsdb/name/{}/{}.json",
"ip_rr_history": "dnsdb/ip/{}/{}.json",
"latest_domains": "ips/{}/latest_domains",
"related": "links/name/{}.json",
"security": "security/name/{}.json",
"whois_email": "whois/emails/{}",
"whois_ns": "whois/nameservers/{}",
"whois_domain": "whois/{}",
"whois_domain_history": "whois/{}/history",
"search": "search/{}",
"samples": "samples/{}",
"sample": "sample/{}",
"sample_artifacts": "sample/{}/artifacts",
"sample_connections": "sample/{}/connections",
"sample_samples": "sample/{}/samples",
"as_for_ip": "bgp_routes/ip/{}/as_for_ip.json",
"prefixes_for_asn": "bgp_routes/asn/{}/prefixes_for_asn.json",
"timeline": "timeline/{}"
}
self._auth_header = {"Authorization": "Bearer " + self.api_key}
self._session = requests.Session()
def get(self, uri, params={}):
'''A generic method to make GET requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.get(urljoin(Investigate.BASE_URL, uri),
params=params, headers=self._auth_header, proxies=self.proxies
)
def post(self, uri, params={}, data={}):
'''A generic method to make POST requests to the OpenDNS Investigate API
on the given URI.
'''
return self._session.post(
urljoin(Investigate.BASE_URL, uri),
params=params, data=data, headers=self._auth_header,
proxies=self.proxies
)
def _request_parse(self, method, *args):
r = method(*args)
r.raise_for_status()
return r.json()
def get_parse(self, uri, params={}):
'''Convenience method to call get() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.get, uri, params)
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data)
def _get_categorization(self, domain, labels):
uri = urljoin(self._uris['categorization'], domain)
params = {'showLabels': True} if labels else {}
return self.get_parse(uri, params)
def _post_categorization(self, domains, labels):
params = {'showLabels': True} if labels else {}
return self.post_parse(self._uris['categorization'], params,
json.dumps(domains)
)
def categorization(self, domains, labels=False):
'''Get the domain status and categorization of a domain or list of domains.
'domains' can be either a single domain, or a list of domains.
Setting 'labels' to True will give back categorizations in human-readable
form.
For more detail, see https://investigate.umbrella.com/docs/api#categorization
'''
if type(domains) is str:
return self._get_categorization(domains, labels)
elif type(domains) is list:
return self._post_categorization(domains, labels)
else:
raise Investigate.DOMAIN_ERR
def cooccurrences(self, domain):
'''Get the cooccurrences of the given domain.
For details, see https://investigate.umbrella.com/docs/api#co-occurrences
'''
uri = self._uris["cooccurrences"].format(domain)
return self.get_parse(uri)
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri)
def security(self, domain):
'''Get the Security Information for the given domain.
For details, see https://investigate.umbrella.com/docs/api#securityInfo
'''
uri = self._uris["security"].format(domain)
return self.get_parse(uri)
def _domain_rr_history(self, domain, query_type):
uri = self._uris["domain_rr_history"].format(query_type, domain)
return self.get_parse(uri)
def _ip_rr_history(self, ip, query_type):
uri = self._uris["ip_rr_history"].format(query_type, ip)
return self.get_parse(uri)
def rr_history(self, query, query_type="A"):
'''Get the RR (Resource Record) History of the given domain or IP.
The default query type is for 'A' records, but the following query types
are supported:
A, NS, MX, TXT, CNAME
For details, see https://investigate.umbrella.com/docs/api#dnsrr_domain
'''
if query_type not in Investigate.SUPPORTED_DNS_TYPES:
raise Investigate.UNSUPPORTED_DNS_QUERY
# if this is an IP address, query the IP
if Investigate.IP_PATTERN.match(query):
return self._ip_rr_history(query, query_type)
# otherwise, query the domain
return self._domain_rr_history(query, query_type)
def latest_domains(self, ip):
'''Gets the latest known malicious domains associated with the given
IP address, if any. Returns the list of malicious domains.
'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["latest_domains"].format(ip)
resp_json = self.get_parse(uri)
# parse out the domain names
return [ val for d in resp_json for key, val in iteritems(d) if key == 'name' ]
def domain_whois(self, domain):
'''Gets whois information for a domain'''
uri = self._uris["whois_domain"].format(domain)
resp_json = self.get_parse(uri)
return resp_json
def domain_whois_history(self, domain, limit=None):
'''Gets whois history for a domain'''
params = dict()
if limit is not None:
params['limit'] = limit
uri = self._uris["whois_domain_history"].format(domain)
resp_json = self.get_parse(uri, params)
return resp_json
def ns_whois(self, nameservers, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a nameserver or
nameservers'''
if not isinstance(nameservers, list):
uri = self._uris["whois_ns"].format(nameservers)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_ns"].format('')
params = {'emailList' : ','.join(nameservers), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def email_whois(self, emails, limit=DEFAULT_LIMIT, offset=DEFAULT_OFFSET, sort_field=DEFAULT_SORT):
'''Gets the domains that have been registered with a given email
address
'''
if not isinstance(emails, list):
uri = self._uris["whois_email"].format(emails)
params = {'limit': limit, 'offset': offset, 'sortField': sort_field}
else:
uri = self._uris["whois_email"].format('')
params = {'emailList' : ','.join(emails), 'limit': limit, 'offset': offset, 'sortField': sort_field}
resp_json = self.get_parse(uri, params=params)
return resp_json
def search(self, pattern, start=None, limit=None, include_category=None):
'''Searches for domains that match a given pattern'''
params = dict()
if start is None:
start = datetime.timedelta(days=30)
if isinstance(start, datetime.timedelta):
params['start'] = int(time.mktime((datetime.datetime.utcnow() - start).timetuple()) * 1000)
elif isinstance(start, datetime.datetime):
params['start'] = int(time.mktime(start.timetuple()) * 1000)
else:
raise Investigate.SEARCH_ERR
if limit is not None and isinstance(limit, int):
params['limit'] = limit
if include_category is not None and isinstance(include_category, bool):
params['includeCategory'] = str(include_category).lower()
uri = self._uris['search'].format(quote_plus(pattern))
return self.get_parse(uri, params)
def samples(self, anystring, limit=None, offset=None, sortby=None):
'''Return an object representing the samples identified by the input domain, IP, or URL'''
uri = self._uris['samples'].format(anystring)
params = {'limit': limit, 'offset': offset, 'sortby': sortby}
return self.get_parse(uri, params)
def sample(self, hash, limit=None, offset=None):
'''Return an object representing the sample identified by the input hash, or an empty object if that sample is not found'''
uri = self._uris['sample'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_artifacts(self, hash, limit=None, offset=None):
'''
Return an object representing artifacts associated with an input hash
NOTE: Only available to Threat Grid customers
'''
uri = self._uris['sample_artifacts'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_connections(self, hash, limit=None, offset=None):
'''Return an object representing network connections associated with an input hash'''
uri = self._uris['sample_connections'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def sample_samples(self, hash, limit=None, offset=None):
'''Return an object representing samples associated with an input hash'''
uri = self._uris['sample_samples'].format(hash)
params = {'limit': limit, 'offset': offset}
return self.get_parse(uri, params)
def as_for_ip(self, ip):
'''Gets the AS information for a given IP address.'''
if not Investigate.IP_PATTERN.match(ip):
raise Investigate.IP_ERR
uri = self._uris["as_for_ip"].format(ip)
resp_json = self.get_parse(uri)
return resp_json
def prefixes_for_asn(self, asn):
'''Gets the AS information for a given ASN. Return the CIDR and geolocation associated with the AS.'''
uri = self._uris["prefixes_for_asn"].format(asn)
resp_json = self.get_parse(uri)
return resp_json
|
greyli/flask-avatars
|
flask_avatars/__init__.py
|
_Avatars.gravatar
|
python
|
def gravatar(hash, size=100, rating='g', default='identicon', include_extension=False, force_default=False):
if include_extension:
hash += '.jpg'
default = default or current_app.config['AVATARS_GRAVATAR_DEFAULT']
query_string = urlencode({'s': int(size), 'r': rating, 'd': default})
if force_default:
query_string += '&q=y'
return 'https://gravatar.com/avatar/' + hash + '?' + query_string
|
Pass email hash, return Gravatar URL. You can get email hash like this::
import hashlib
avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
Visit https://en.gravatar.com/site/implement/images/ for more information.
:param hash: The email hash used to generate avatar URL.
:param size: The size of the avatar, default to 100 pixel.
:param rating: The rating of the avatar, default to ``g``
:param default: The type of default avatar, default to ``identicon``.
:param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``.
:param force_default: Force to use default avatar, default to ``False``.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L27-L50
| null |
class _Avatars(object):
@staticmethod
@staticmethod
def robohash(text, size=200):
"""Pass text, return Robohash-style avatar (robot).
Visit https://robohash.org/ for more information.
:param text: The text used to generate avatar.
:param size: The size of the avatar, default to 200 pixel.
"""
return 'https://robohash.org/{text}?size={size}x{size}'.format(text=text, size=size)
@staticmethod
def social_media(username, platform='twitter', size='medium'):
"""Return avatar URL at social media.
Visit https://avatars.io for more information.
:param username: The username of the social media.
:param platform: One of facebook, instagram, twitter, gravatar.
:param size: The size of avatar, one of small, medium and large.
"""
return 'https://avatars.io/{platform}/{username}/{size}'.format(
platform=platform, username=username, size=size)
@staticmethod
def default(size='m'):
"""Return built-in default avatar.
:param size: The size of avatar, one of s, m, l.
:return: Default avatar URL
"""
return url_for('avatars.static', filename='default/default_{size}.jpg'.format(size=size))
@staticmethod
def jcrop_css(css_url=None):
"""Load jcrop css file.
:param css_url: The custom CSS URL.
"""
if css_url is None:
if current_app.config['AVATARS_SERVE_LOCAL']:
css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css')
else:
css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css'
return Markup('<link rel="stylesheet" href="%s">' % css_url)
@staticmethod
def jcrop_js(js_url=None, with_jquery=True):
"""Load jcrop Javascript file.
:param js_url: The custom JavaScript URL.
:param with_jquery: Include jQuery or not, default to ``True``.
"""
serve_local = current_app.config['AVATARS_SERVE_LOCAL']
if js_url is None:
if serve_local:
js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js')
else:
js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js'
if with_jquery:
if serve_local:
jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js')
else:
jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>'
else:
jquery = ''
return Markup('''%s\n<script src="%s"></script>
''' % (jquery, js_url))
@staticmethod
def crop_box(endpoint=None, filename=None):
"""Create a crop box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH']
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
@staticmethod
def preview_box(endpoint=None, filename=None):
"""Create a preview box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
preview_size = current_app.config['AVATARS_CROP_PREVIEW_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('''
<div id="preview-box">
<div class="preview-box" style="width: %dpx; height: %dpx; overflow: hidden;">
<img src="%s" class="jcrop-preview" alt="Preview"/>
</div>
</div>''' % (preview_size, preview_size, url))
@staticmethod
def init_jcrop(min_size=None):
"""Initialize jcrop.
:param min_size: The minimal size of crop area.
"""
init_x = current_app.config['AVATARS_CROP_INIT_POS'][0]
init_y = current_app.config['AVATARS_CROP_INIT_POS'][1]
init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if current_app.config['AVATARS_CROP_MIN_SIZE']:
min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2]
min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size)
else:
min_size_js = ''
return Markup('''
<script type="text/javascript">
jQuery(function ($) {
// Create variables (in this scope) to hold the API and image size
var jcrop_api,
boundx,
boundy,
// Grab some information about the preview pane
$preview = $('#preview-box'),
$pcnt = $('#preview-box .preview-box'),
$pimg = $('#preview-box .preview-box img'),
xsize = $pcnt.width(),
ysize = $pcnt.height();
$('#crop-box').Jcrop({
onChange: updatePreview,
onSelect: updateCoords,
setSelect: [%s, %s, %s, %s],
aspectRatio: 1
}, function () {
// Use the API to get the real image size
var bounds = this.getBounds();
boundx = bounds[0];
boundy = bounds[1];
// Store the API in the jcrop_api variable
jcrop_api = this;
%s
jcrop_api.focus();
// Move the preview into the jcrop container for css positioning
$preview.appendTo(jcrop_api.ui.holder);
});
function updatePreview(c) {
if (parseInt(c.w) > 0) {
var rx = xsize / c.w;
var ry = ysize / c.h;
$pimg.css({
width: Math.round(rx * boundx) + 'px',
height: Math.round(ry * boundy) + 'px',
marginLeft: '-' + Math.round(rx * c.x) + 'px',
marginTop: '-' + Math.round(ry * c.y) + 'px'
});
}
}
});
function updateCoords(c) {
$('#x').val(c.x);
$('#y').val(c.y);
$('#w').val(c.w);
$('#h').val(c.h);
}
</script>
''' % (init_x, init_y, init_size, init_size, min_size_js))
|
greyli/flask-avatars
|
flask_avatars/__init__.py
|
_Avatars.social_media
|
python
|
def social_media(username, platform='twitter', size='medium'):
return 'https://avatars.io/{platform}/{username}/{size}'.format(
platform=platform, username=username, size=size)
|
Return avatar URL at social media.
Visit https://avatars.io for more information.
:param username: The username of the social media.
:param platform: One of facebook, instagram, twitter, gravatar.
:param size: The size of avatar, one of small, medium and large.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L63-L72
| null |
class _Avatars(object):
@staticmethod
def gravatar(hash, size=100, rating='g', default='identicon', include_extension=False, force_default=False):
"""Pass email hash, return Gravatar URL. You can get email hash like this::
import hashlib
avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
Visit https://en.gravatar.com/site/implement/images/ for more information.
:param hash: The email hash used to generate avatar URL.
:param size: The size of the avatar, default to 100 pixel.
:param rating: The rating of the avatar, default to ``g``
:param default: The type of default avatar, default to ``identicon``.
:param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``.
:param force_default: Force to use default avatar, default to ``False``.
"""
if include_extension:
hash += '.jpg'
default = default or current_app.config['AVATARS_GRAVATAR_DEFAULT']
query_string = urlencode({'s': int(size), 'r': rating, 'd': default})
if force_default:
query_string += '&q=y'
return 'https://gravatar.com/avatar/' + hash + '?' + query_string
@staticmethod
def robohash(text, size=200):
"""Pass text, return Robohash-style avatar (robot).
Visit https://robohash.org/ for more information.
:param text: The text used to generate avatar.
:param size: The size of the avatar, default to 200 pixel.
"""
return 'https://robohash.org/{text}?size={size}x{size}'.format(text=text, size=size)
@staticmethod
@staticmethod
def default(size='m'):
"""Return built-in default avatar.
:param size: The size of avatar, one of s, m, l.
:return: Default avatar URL
"""
return url_for('avatars.static', filename='default/default_{size}.jpg'.format(size=size))
@staticmethod
def jcrop_css(css_url=None):
"""Load jcrop css file.
:param css_url: The custom CSS URL.
"""
if css_url is None:
if current_app.config['AVATARS_SERVE_LOCAL']:
css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css')
else:
css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css'
return Markup('<link rel="stylesheet" href="%s">' % css_url)
@staticmethod
def jcrop_js(js_url=None, with_jquery=True):
"""Load jcrop Javascript file.
:param js_url: The custom JavaScript URL.
:param with_jquery: Include jQuery or not, default to ``True``.
"""
serve_local = current_app.config['AVATARS_SERVE_LOCAL']
if js_url is None:
if serve_local:
js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js')
else:
js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js'
if with_jquery:
if serve_local:
jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js')
else:
jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>'
else:
jquery = ''
return Markup('''%s\n<script src="%s"></script>
''' % (jquery, js_url))
@staticmethod
def crop_box(endpoint=None, filename=None):
"""Create a crop box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH']
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
@staticmethod
def preview_box(endpoint=None, filename=None):
"""Create a preview box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
preview_size = current_app.config['AVATARS_CROP_PREVIEW_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('''
<div id="preview-box">
<div class="preview-box" style="width: %dpx; height: %dpx; overflow: hidden;">
<img src="%s" class="jcrop-preview" alt="Preview"/>
</div>
</div>''' % (preview_size, preview_size, url))
@staticmethod
def init_jcrop(min_size=None):
"""Initialize jcrop.
:param min_size: The minimal size of crop area.
"""
init_x = current_app.config['AVATARS_CROP_INIT_POS'][0]
init_y = current_app.config['AVATARS_CROP_INIT_POS'][1]
init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if current_app.config['AVATARS_CROP_MIN_SIZE']:
min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2]
min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size)
else:
min_size_js = ''
return Markup('''
<script type="text/javascript">
jQuery(function ($) {
// Create variables (in this scope) to hold the API and image size
var jcrop_api,
boundx,
boundy,
// Grab some information about the preview pane
$preview = $('#preview-box'),
$pcnt = $('#preview-box .preview-box'),
$pimg = $('#preview-box .preview-box img'),
xsize = $pcnt.width(),
ysize = $pcnt.height();
$('#crop-box').Jcrop({
onChange: updatePreview,
onSelect: updateCoords,
setSelect: [%s, %s, %s, %s],
aspectRatio: 1
}, function () {
// Use the API to get the real image size
var bounds = this.getBounds();
boundx = bounds[0];
boundy = bounds[1];
// Store the API in the jcrop_api variable
jcrop_api = this;
%s
jcrop_api.focus();
// Move the preview into the jcrop container for css positioning
$preview.appendTo(jcrop_api.ui.holder);
});
function updatePreview(c) {
if (parseInt(c.w) > 0) {
var rx = xsize / c.w;
var ry = ysize / c.h;
$pimg.css({
width: Math.round(rx * boundx) + 'px',
height: Math.round(ry * boundy) + 'px',
marginLeft: '-' + Math.round(rx * c.x) + 'px',
marginTop: '-' + Math.round(ry * c.y) + 'px'
});
}
}
});
function updateCoords(c) {
$('#x').val(c.x);
$('#y').val(c.y);
$('#w').val(c.w);
$('#h').val(c.h);
}
</script>
''' % (init_x, init_y, init_size, init_size, min_size_js))
|
greyli/flask-avatars
|
flask_avatars/__init__.py
|
_Avatars.jcrop_css
|
python
|
def jcrop_css(css_url=None):
if css_url is None:
if current_app.config['AVATARS_SERVE_LOCAL']:
css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css')
else:
css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css'
return Markup('<link rel="stylesheet" href="%s">' % css_url)
|
Load jcrop css file.
:param css_url: The custom CSS URL.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L84-L94
| null |
class _Avatars(object):
@staticmethod
def gravatar(hash, size=100, rating='g', default='identicon', include_extension=False, force_default=False):
"""Pass email hash, return Gravatar URL. You can get email hash like this::
import hashlib
avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
Visit https://en.gravatar.com/site/implement/images/ for more information.
:param hash: The email hash used to generate avatar URL.
:param size: The size of the avatar, default to 100 pixel.
:param rating: The rating of the avatar, default to ``g``
:param default: The type of default avatar, default to ``identicon``.
:param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``.
:param force_default: Force to use default avatar, default to ``False``.
"""
if include_extension:
hash += '.jpg'
default = default or current_app.config['AVATARS_GRAVATAR_DEFAULT']
query_string = urlencode({'s': int(size), 'r': rating, 'd': default})
if force_default:
query_string += '&q=y'
return 'https://gravatar.com/avatar/' + hash + '?' + query_string
@staticmethod
def robohash(text, size=200):
"""Pass text, return Robohash-style avatar (robot).
Visit https://robohash.org/ for more information.
:param text: The text used to generate avatar.
:param size: The size of the avatar, default to 200 pixel.
"""
return 'https://robohash.org/{text}?size={size}x{size}'.format(text=text, size=size)
@staticmethod
def social_media(username, platform='twitter', size='medium'):
"""Return avatar URL at social media.
Visit https://avatars.io for more information.
:param username: The username of the social media.
:param platform: One of facebook, instagram, twitter, gravatar.
:param size: The size of avatar, one of small, medium and large.
"""
return 'https://avatars.io/{platform}/{username}/{size}'.format(
platform=platform, username=username, size=size)
@staticmethod
def default(size='m'):
"""Return built-in default avatar.
:param size: The size of avatar, one of s, m, l.
:return: Default avatar URL
"""
return url_for('avatars.static', filename='default/default_{size}.jpg'.format(size=size))
@staticmethod
@staticmethod
def jcrop_js(js_url=None, with_jquery=True):
"""Load jcrop Javascript file.
:param js_url: The custom JavaScript URL.
:param with_jquery: Include jQuery or not, default to ``True``.
"""
serve_local = current_app.config['AVATARS_SERVE_LOCAL']
if js_url is None:
if serve_local:
js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js')
else:
js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js'
if with_jquery:
if serve_local:
jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js')
else:
jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>'
else:
jquery = ''
return Markup('''%s\n<script src="%s"></script>
''' % (jquery, js_url))
@staticmethod
def crop_box(endpoint=None, filename=None):
"""Create a crop box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH']
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
@staticmethod
def preview_box(endpoint=None, filename=None):
"""Create a preview box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
preview_size = current_app.config['AVATARS_CROP_PREVIEW_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('''
<div id="preview-box">
<div class="preview-box" style="width: %dpx; height: %dpx; overflow: hidden;">
<img src="%s" class="jcrop-preview" alt="Preview"/>
</div>
</div>''' % (preview_size, preview_size, url))
@staticmethod
def init_jcrop(min_size=None):
"""Initialize jcrop.
:param min_size: The minimal size of crop area.
"""
init_x = current_app.config['AVATARS_CROP_INIT_POS'][0]
init_y = current_app.config['AVATARS_CROP_INIT_POS'][1]
init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if current_app.config['AVATARS_CROP_MIN_SIZE']:
min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2]
min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size)
else:
min_size_js = ''
return Markup('''
<script type="text/javascript">
jQuery(function ($) {
// Create variables (in this scope) to hold the API and image size
var jcrop_api,
boundx,
boundy,
// Grab some information about the preview pane
$preview = $('#preview-box'),
$pcnt = $('#preview-box .preview-box'),
$pimg = $('#preview-box .preview-box img'),
xsize = $pcnt.width(),
ysize = $pcnt.height();
$('#crop-box').Jcrop({
onChange: updatePreview,
onSelect: updateCoords,
setSelect: [%s, %s, %s, %s],
aspectRatio: 1
}, function () {
// Use the API to get the real image size
var bounds = this.getBounds();
boundx = bounds[0];
boundy = bounds[1];
// Store the API in the jcrop_api variable
jcrop_api = this;
%s
jcrop_api.focus();
// Move the preview into the jcrop container for css positioning
$preview.appendTo(jcrop_api.ui.holder);
});
function updatePreview(c) {
if (parseInt(c.w) > 0) {
var rx = xsize / c.w;
var ry = ysize / c.h;
$pimg.css({
width: Math.round(rx * boundx) + 'px',
height: Math.round(ry * boundy) + 'px',
marginLeft: '-' + Math.round(rx * c.x) + 'px',
marginTop: '-' + Math.round(ry * c.y) + 'px'
});
}
}
});
function updateCoords(c) {
$('#x').val(c.x);
$('#y').val(c.y);
$('#w').val(c.w);
$('#h').val(c.h);
}
</script>
''' % (init_x, init_y, init_size, init_size, min_size_js))
|
greyli/flask-avatars
|
flask_avatars/__init__.py
|
_Avatars.jcrop_js
|
python
|
def jcrop_js(js_url=None, with_jquery=True):
serve_local = current_app.config['AVATARS_SERVE_LOCAL']
if js_url is None:
if serve_local:
js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js')
else:
js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js'
if with_jquery:
if serve_local:
jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js')
else:
jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>'
else:
jquery = ''
return Markup('''%s\n<script src="%s"></script>
''' % (jquery, js_url))
|
Load jcrop Javascript file.
:param js_url: The custom JavaScript URL.
:param with_jquery: Include jQuery or not, default to ``True``.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L97-L119
| null |
class _Avatars(object):
@staticmethod
def gravatar(hash, size=100, rating='g', default='identicon', include_extension=False, force_default=False):
"""Pass email hash, return Gravatar URL. You can get email hash like this::
import hashlib
avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
Visit https://en.gravatar.com/site/implement/images/ for more information.
:param hash: The email hash used to generate avatar URL.
:param size: The size of the avatar, default to 100 pixel.
:param rating: The rating of the avatar, default to ``g``
:param default: The type of default avatar, default to ``identicon``.
:param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``.
:param force_default: Force to use default avatar, default to ``False``.
"""
if include_extension:
hash += '.jpg'
default = default or current_app.config['AVATARS_GRAVATAR_DEFAULT']
query_string = urlencode({'s': int(size), 'r': rating, 'd': default})
if force_default:
query_string += '&q=y'
return 'https://gravatar.com/avatar/' + hash + '?' + query_string
@staticmethod
def robohash(text, size=200):
"""Pass text, return Robohash-style avatar (robot).
Visit https://robohash.org/ for more information.
:param text: The text used to generate avatar.
:param size: The size of the avatar, default to 200 pixel.
"""
return 'https://robohash.org/{text}?size={size}x{size}'.format(text=text, size=size)
@staticmethod
def social_media(username, platform='twitter', size='medium'):
"""Return avatar URL at social media.
Visit https://avatars.io for more information.
:param username: The username of the social media.
:param platform: One of facebook, instagram, twitter, gravatar.
:param size: The size of avatar, one of small, medium and large.
"""
return 'https://avatars.io/{platform}/{username}/{size}'.format(
platform=platform, username=username, size=size)
@staticmethod
def default(size='m'):
"""Return built-in default avatar.
:param size: The size of avatar, one of s, m, l.
:return: Default avatar URL
"""
return url_for('avatars.static', filename='default/default_{size}.jpg'.format(size=size))
@staticmethod
def jcrop_css(css_url=None):
"""Load jcrop css file.
:param css_url: The custom CSS URL.
"""
if css_url is None:
if current_app.config['AVATARS_SERVE_LOCAL']:
css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css')
else:
css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css'
return Markup('<link rel="stylesheet" href="%s">' % css_url)
@staticmethod
@staticmethod
def crop_box(endpoint=None, filename=None):
"""Create a crop box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH']
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
@staticmethod
def preview_box(endpoint=None, filename=None):
"""Create a preview box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
preview_size = current_app.config['AVATARS_CROP_PREVIEW_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('''
<div id="preview-box">
<div class="preview-box" style="width: %dpx; height: %dpx; overflow: hidden;">
<img src="%s" class="jcrop-preview" alt="Preview"/>
</div>
</div>''' % (preview_size, preview_size, url))
@staticmethod
def init_jcrop(min_size=None):
"""Initialize jcrop.
:param min_size: The minimal size of crop area.
"""
init_x = current_app.config['AVATARS_CROP_INIT_POS'][0]
init_y = current_app.config['AVATARS_CROP_INIT_POS'][1]
init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if current_app.config['AVATARS_CROP_MIN_SIZE']:
min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2]
min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size)
else:
min_size_js = ''
return Markup('''
<script type="text/javascript">
jQuery(function ($) {
// Create variables (in this scope) to hold the API and image size
var jcrop_api,
boundx,
boundy,
// Grab some information about the preview pane
$preview = $('#preview-box'),
$pcnt = $('#preview-box .preview-box'),
$pimg = $('#preview-box .preview-box img'),
xsize = $pcnt.width(),
ysize = $pcnt.height();
$('#crop-box').Jcrop({
onChange: updatePreview,
onSelect: updateCoords,
setSelect: [%s, %s, %s, %s],
aspectRatio: 1
}, function () {
// Use the API to get the real image size
var bounds = this.getBounds();
boundx = bounds[0];
boundy = bounds[1];
// Store the API in the jcrop_api variable
jcrop_api = this;
%s
jcrop_api.focus();
// Move the preview into the jcrop container for css positioning
$preview.appendTo(jcrop_api.ui.holder);
});
function updatePreview(c) {
if (parseInt(c.w) > 0) {
var rx = xsize / c.w;
var ry = ysize / c.h;
$pimg.css({
width: Math.round(rx * boundx) + 'px',
height: Math.round(ry * boundy) + 'px',
marginLeft: '-' + Math.round(rx * c.x) + 'px',
marginTop: '-' + Math.round(ry * c.y) + 'px'
});
}
}
});
function updateCoords(c) {
$('#x').val(c.x);
$('#y').val(c.y);
$('#w').val(c.w);
$('#h').val(c.h);
}
</script>
''' % (init_x, init_y, init_size, init_size, min_size_js))
|
greyli/flask-avatars
|
flask_avatars/__init__.py
|
_Avatars.crop_box
|
python
|
def crop_box(endpoint=None, filename=None):
crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH']
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
|
Create a crop box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L122-L134
| null |
class _Avatars(object):
@staticmethod
def gravatar(hash, size=100, rating='g', default='identicon', include_extension=False, force_default=False):
"""Pass email hash, return Gravatar URL. You can get email hash like this::
import hashlib
avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
Visit https://en.gravatar.com/site/implement/images/ for more information.
:param hash: The email hash used to generate avatar URL.
:param size: The size of the avatar, default to 100 pixel.
:param rating: The rating of the avatar, default to ``g``
:param default: The type of default avatar, default to ``identicon``.
:param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``.
:param force_default: Force to use default avatar, default to ``False``.
"""
if include_extension:
hash += '.jpg'
default = default or current_app.config['AVATARS_GRAVATAR_DEFAULT']
query_string = urlencode({'s': int(size), 'r': rating, 'd': default})
if force_default:
query_string += '&q=y'
return 'https://gravatar.com/avatar/' + hash + '?' + query_string
@staticmethod
def robohash(text, size=200):
"""Pass text, return Robohash-style avatar (robot).
Visit https://robohash.org/ for more information.
:param text: The text used to generate avatar.
:param size: The size of the avatar, default to 200 pixel.
"""
return 'https://robohash.org/{text}?size={size}x{size}'.format(text=text, size=size)
@staticmethod
def social_media(username, platform='twitter', size='medium'):
"""Return avatar URL at social media.
Visit https://avatars.io for more information.
:param username: The username of the social media.
:param platform: One of facebook, instagram, twitter, gravatar.
:param size: The size of avatar, one of small, medium and large.
"""
return 'https://avatars.io/{platform}/{username}/{size}'.format(
platform=platform, username=username, size=size)
@staticmethod
def default(size='m'):
"""Return built-in default avatar.
:param size: The size of avatar, one of s, m, l.
:return: Default avatar URL
"""
return url_for('avatars.static', filename='default/default_{size}.jpg'.format(size=size))
@staticmethod
def jcrop_css(css_url=None):
"""Load jcrop css file.
:param css_url: The custom CSS URL.
"""
if css_url is None:
if current_app.config['AVATARS_SERVE_LOCAL']:
css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css')
else:
css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css'
return Markup('<link rel="stylesheet" href="%s">' % css_url)
@staticmethod
def jcrop_js(js_url=None, with_jquery=True):
"""Load jcrop Javascript file.
:param js_url: The custom JavaScript URL.
:param with_jquery: Include jQuery or not, default to ``True``.
"""
serve_local = current_app.config['AVATARS_SERVE_LOCAL']
if js_url is None:
if serve_local:
js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js')
else:
js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js'
if with_jquery:
if serve_local:
jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js')
else:
jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>'
else:
jquery = ''
return Markup('''%s\n<script src="%s"></script>
''' % (jquery, js_url))
@staticmethod
@staticmethod
def preview_box(endpoint=None, filename=None):
"""Create a preview box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
preview_size = current_app.config['AVATARS_CROP_PREVIEW_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('''
<div id="preview-box">
<div class="preview-box" style="width: %dpx; height: %dpx; overflow: hidden;">
<img src="%s" class="jcrop-preview" alt="Preview"/>
</div>
</div>''' % (preview_size, preview_size, url))
@staticmethod
def init_jcrop(min_size=None):
"""Initialize jcrop.
:param min_size: The minimal size of crop area.
"""
init_x = current_app.config['AVATARS_CROP_INIT_POS'][0]
init_y = current_app.config['AVATARS_CROP_INIT_POS'][1]
init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if current_app.config['AVATARS_CROP_MIN_SIZE']:
min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2]
min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size)
else:
min_size_js = ''
return Markup('''
<script type="text/javascript">
jQuery(function ($) {
// Create variables (in this scope) to hold the API and image size
var jcrop_api,
boundx,
boundy,
// Grab some information about the preview pane
$preview = $('#preview-box'),
$pcnt = $('#preview-box .preview-box'),
$pimg = $('#preview-box .preview-box img'),
xsize = $pcnt.width(),
ysize = $pcnt.height();
$('#crop-box').Jcrop({
onChange: updatePreview,
onSelect: updateCoords,
setSelect: [%s, %s, %s, %s],
aspectRatio: 1
}, function () {
// Use the API to get the real image size
var bounds = this.getBounds();
boundx = bounds[0];
boundy = bounds[1];
// Store the API in the jcrop_api variable
jcrop_api = this;
%s
jcrop_api.focus();
// Move the preview into the jcrop container for css positioning
$preview.appendTo(jcrop_api.ui.holder);
});
function updatePreview(c) {
if (parseInt(c.w) > 0) {
var rx = xsize / c.w;
var ry = ysize / c.h;
$pimg.css({
width: Math.round(rx * boundx) + 'px',
height: Math.round(ry * boundy) + 'px',
marginLeft: '-' + Math.round(rx * c.x) + 'px',
marginTop: '-' + Math.round(ry * c.y) + 'px'
});
}
}
});
function updateCoords(c) {
$('#x').val(c.x);
$('#y').val(c.y);
$('#w').val(c.w);
$('#h').val(c.h);
}
</script>
''' % (init_x, init_y, init_size, init_size, min_size_js))
|
greyli/flask-avatars
|
flask_avatars/__init__.py
|
_Avatars.preview_box
|
python
|
def preview_box(endpoint=None, filename=None):
preview_size = current_app.config['AVATARS_CROP_PREVIEW_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('''
<div id="preview-box">
<div class="preview-box" style="width: %dpx; height: %dpx; overflow: hidden;">
<img src="%s" class="jcrop-preview" alt="Preview"/>
</div>
</div>''' % (preview_size, preview_size, url))
|
Create a preview box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L137-L154
| null |
class _Avatars(object):
@staticmethod
def gravatar(hash, size=100, rating='g', default='identicon', include_extension=False, force_default=False):
"""Pass email hash, return Gravatar URL. You can get email hash like this::
import hashlib
avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
Visit https://en.gravatar.com/site/implement/images/ for more information.
:param hash: The email hash used to generate avatar URL.
:param size: The size of the avatar, default to 100 pixel.
:param rating: The rating of the avatar, default to ``g``
:param default: The type of default avatar, default to ``identicon``.
:param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``.
:param force_default: Force to use default avatar, default to ``False``.
"""
if include_extension:
hash += '.jpg'
default = default or current_app.config['AVATARS_GRAVATAR_DEFAULT']
query_string = urlencode({'s': int(size), 'r': rating, 'd': default})
if force_default:
query_string += '&q=y'
return 'https://gravatar.com/avatar/' + hash + '?' + query_string
@staticmethod
def robohash(text, size=200):
"""Pass text, return Robohash-style avatar (robot).
Visit https://robohash.org/ for more information.
:param text: The text used to generate avatar.
:param size: The size of the avatar, default to 200 pixel.
"""
return 'https://robohash.org/{text}?size={size}x{size}'.format(text=text, size=size)
@staticmethod
def social_media(username, platform='twitter', size='medium'):
"""Return avatar URL at social media.
Visit https://avatars.io for more information.
:param username: The username of the social media.
:param platform: One of facebook, instagram, twitter, gravatar.
:param size: The size of avatar, one of small, medium and large.
"""
return 'https://avatars.io/{platform}/{username}/{size}'.format(
platform=platform, username=username, size=size)
@staticmethod
def default(size='m'):
"""Return built-in default avatar.
:param size: The size of avatar, one of s, m, l.
:return: Default avatar URL
"""
return url_for('avatars.static', filename='default/default_{size}.jpg'.format(size=size))
@staticmethod
def jcrop_css(css_url=None):
"""Load jcrop css file.
:param css_url: The custom CSS URL.
"""
if css_url is None:
if current_app.config['AVATARS_SERVE_LOCAL']:
css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css')
else:
css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css'
return Markup('<link rel="stylesheet" href="%s">' % css_url)
@staticmethod
def jcrop_js(js_url=None, with_jquery=True):
"""Load jcrop Javascript file.
:param js_url: The custom JavaScript URL.
:param with_jquery: Include jQuery or not, default to ``True``.
"""
serve_local = current_app.config['AVATARS_SERVE_LOCAL']
if js_url is None:
if serve_local:
js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js')
else:
js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js'
if with_jquery:
if serve_local:
jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js')
else:
jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>'
else:
jquery = ''
return Markup('''%s\n<script src="%s"></script>
''' % (jquery, js_url))
@staticmethod
def crop_box(endpoint=None, filename=None):
"""Create a crop box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH']
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
@staticmethod
@staticmethod
def init_jcrop(min_size=None):
"""Initialize jcrop.
:param min_size: The minimal size of crop area.
"""
init_x = current_app.config['AVATARS_CROP_INIT_POS'][0]
init_y = current_app.config['AVATARS_CROP_INIT_POS'][1]
init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if current_app.config['AVATARS_CROP_MIN_SIZE']:
min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2]
min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size)
else:
min_size_js = ''
return Markup('''
<script type="text/javascript">
jQuery(function ($) {
// Create variables (in this scope) to hold the API and image size
var jcrop_api,
boundx,
boundy,
// Grab some information about the preview pane
$preview = $('#preview-box'),
$pcnt = $('#preview-box .preview-box'),
$pimg = $('#preview-box .preview-box img'),
xsize = $pcnt.width(),
ysize = $pcnt.height();
$('#crop-box').Jcrop({
onChange: updatePreview,
onSelect: updateCoords,
setSelect: [%s, %s, %s, %s],
aspectRatio: 1
}, function () {
// Use the API to get the real image size
var bounds = this.getBounds();
boundx = bounds[0];
boundy = bounds[1];
// Store the API in the jcrop_api variable
jcrop_api = this;
%s
jcrop_api.focus();
// Move the preview into the jcrop container for css positioning
$preview.appendTo(jcrop_api.ui.holder);
});
function updatePreview(c) {
if (parseInt(c.w) > 0) {
var rx = xsize / c.w;
var ry = ysize / c.h;
$pimg.css({
width: Math.round(rx * boundx) + 'px',
height: Math.round(ry * boundy) + 'px',
marginLeft: '-' + Math.round(rx * c.x) + 'px',
marginTop: '-' + Math.round(ry * c.y) + 'px'
});
}
}
});
function updateCoords(c) {
$('#x').val(c.x);
$('#y').val(c.y);
$('#w').val(c.w);
$('#h').val(c.h);
}
</script>
''' % (init_x, init_y, init_size, init_size, min_size_js))
|
greyli/flask-avatars
|
flask_avatars/__init__.py
|
_Avatars.init_jcrop
|
python
|
def init_jcrop(min_size=None):
init_x = current_app.config['AVATARS_CROP_INIT_POS'][0]
init_y = current_app.config['AVATARS_CROP_INIT_POS'][1]
init_size = current_app.config['AVATARS_CROP_INIT_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if current_app.config['AVATARS_CROP_MIN_SIZE']:
min_size = min_size or current_app.config['AVATARS_SIZE_TUPLE'][2]
min_size_js = 'jcrop_api.setOptions({minSize: [%d, %d]});' % (min_size, min_size)
else:
min_size_js = ''
return Markup('''
<script type="text/javascript">
jQuery(function ($) {
// Create variables (in this scope) to hold the API and image size
var jcrop_api,
boundx,
boundy,
// Grab some information about the preview pane
$preview = $('#preview-box'),
$pcnt = $('#preview-box .preview-box'),
$pimg = $('#preview-box .preview-box img'),
xsize = $pcnt.width(),
ysize = $pcnt.height();
$('#crop-box').Jcrop({
onChange: updatePreview,
onSelect: updateCoords,
setSelect: [%s, %s, %s, %s],
aspectRatio: 1
}, function () {
// Use the API to get the real image size
var bounds = this.getBounds();
boundx = bounds[0];
boundy = bounds[1];
// Store the API in the jcrop_api variable
jcrop_api = this;
%s
jcrop_api.focus();
// Move the preview into the jcrop container for css positioning
$preview.appendTo(jcrop_api.ui.holder);
});
function updatePreview(c) {
if (parseInt(c.w) > 0) {
var rx = xsize / c.w;
var ry = ysize / c.h;
$pimg.css({
width: Math.round(rx * boundx) + 'px',
height: Math.round(ry * boundy) + 'px',
marginLeft: '-' + Math.round(rx * c.x) + 'px',
marginTop: '-' + Math.round(ry * c.y) + 'px'
});
}
}
});
function updateCoords(c) {
$('#x').val(c.x);
$('#y').val(c.y);
$('#w').val(c.w);
$('#h').val(c.h);
}
</script>
''' % (init_x, init_y, init_size, init_size, min_size_js))
|
Initialize jcrop.
:param min_size: The minimal size of crop area.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L157-L226
| null |
class _Avatars(object):
@staticmethod
def gravatar(hash, size=100, rating='g', default='identicon', include_extension=False, force_default=False):
"""Pass email hash, return Gravatar URL. You can get email hash like this::
import hashlib
avatar_hash = hashlib.md5(email.lower().encode('utf-8')).hexdigest()
Visit https://en.gravatar.com/site/implement/images/ for more information.
:param hash: The email hash used to generate avatar URL.
:param size: The size of the avatar, default to 100 pixel.
:param rating: The rating of the avatar, default to ``g``
:param default: The type of default avatar, default to ``identicon``.
:param include_extension: Append a '.jpg' extension at the end of URL, default to ``False``.
:param force_default: Force to use default avatar, default to ``False``.
"""
if include_extension:
hash += '.jpg'
default = default or current_app.config['AVATARS_GRAVATAR_DEFAULT']
query_string = urlencode({'s': int(size), 'r': rating, 'd': default})
if force_default:
query_string += '&q=y'
return 'https://gravatar.com/avatar/' + hash + '?' + query_string
@staticmethod
def robohash(text, size=200):
"""Pass text, return Robohash-style avatar (robot).
Visit https://robohash.org/ for more information.
:param text: The text used to generate avatar.
:param size: The size of the avatar, default to 200 pixel.
"""
return 'https://robohash.org/{text}?size={size}x{size}'.format(text=text, size=size)
@staticmethod
def social_media(username, platform='twitter', size='medium'):
"""Return avatar URL at social media.
Visit https://avatars.io for more information.
:param username: The username of the social media.
:param platform: One of facebook, instagram, twitter, gravatar.
:param size: The size of avatar, one of small, medium and large.
"""
return 'https://avatars.io/{platform}/{username}/{size}'.format(
platform=platform, username=username, size=size)
@staticmethod
def default(size='m'):
"""Return built-in default avatar.
:param size: The size of avatar, one of s, m, l.
:return: Default avatar URL
"""
return url_for('avatars.static', filename='default/default_{size}.jpg'.format(size=size))
@staticmethod
def jcrop_css(css_url=None):
"""Load jcrop css file.
:param css_url: The custom CSS URL.
"""
if css_url is None:
if current_app.config['AVATARS_SERVE_LOCAL']:
css_url = url_for('avatars.static', filename='jcrop/css/jquery.Jcrop.min.css')
else:
css_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/css/jquery.Jcrop.min.css'
return Markup('<link rel="stylesheet" href="%s">' % css_url)
@staticmethod
def jcrop_js(js_url=None, with_jquery=True):
"""Load jcrop Javascript file.
:param js_url: The custom JavaScript URL.
:param with_jquery: Include jQuery or not, default to ``True``.
"""
serve_local = current_app.config['AVATARS_SERVE_LOCAL']
if js_url is None:
if serve_local:
js_url = url_for('avatars.static', filename='jcrop/js/jquery.Jcrop.min.js')
else:
js_url = 'https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.Jcrop.min.js'
if with_jquery:
if serve_local:
jquery = '<script src="%s"></script>' % url_for('avatars.static', filename='jcrop/js/jquery.min.js')
else:
jquery = '<script src="https://cdn.jsdelivr.net/npm/jcrop-0.9.12@0.9.12/js/jquery.min.js"></script>'
else:
jquery = ''
return Markup('''%s\n<script src="%s"></script>
''' % (jquery, js_url))
@staticmethod
def crop_box(endpoint=None, filename=None):
"""Create a crop box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
crop_size = current_app.config['AVATARS_CROP_BASE_WIDTH']
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('<img src="%s" id="crop-box" style="max-width: %dpx; display: block;">' % (url, crop_size))
@staticmethod
def preview_box(endpoint=None, filename=None):
"""Create a preview box.
:param endpoint: The endpoint of view function that serve avatar image file.
:param filename: The filename of the image that need to be crop.
"""
preview_size = current_app.config['AVATARS_CROP_PREVIEW_SIZE'] or current_app.config['AVATARS_SIZE_TUPLE'][2]
if endpoint is None or filename is None:
url = url_for('avatars.static', filename='default/default_l.jpg')
else:
url = url_for(endpoint, filename=filename)
return Markup('''
<div id="preview-box">
<div class="preview-box" style="width: %dpx; height: %dpx; overflow: hidden;">
<img src="%s" class="jcrop-preview" alt="Preview"/>
</div>
</div>''' % (preview_size, preview_size, url))
@staticmethod
|
greyli/flask-avatars
|
flask_avatars/__init__.py
|
Avatars.resize_avatar
|
python
|
def resize_avatar(self, img, base_width):
w_percent = (base_width / float(img.size[0]))
h_size = int((float(img.size[1]) * float(w_percent)))
img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS)
return img
|
Resize an avatar.
:param img: The image that needs to be resize.
:param base_width: The width of output image.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L278-L287
| null |
class Avatars(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['avatars'] = _Avatars
app.context_processor(self.context_processor)
blueprint = Blueprint('avatars', __name__,
static_folder='static',
static_url_path='/avatars' + app.static_url_path)
app.register_blueprint(blueprint)
self.root_path = blueprint.root_path
# TODO: custom file extension support
# settings
app.config.setdefault('AVATARS_GRAVATAR_DEFAULT', 'identicon')
app.config.setdefault('AVATARS_SERVE_LOCAL', False)
app.config.setdefault('AVATARS_SAVE_PATH', None)
app.config.setdefault('AVATARS_SIZE_TUPLE', (30, 60, 150))
# Identicon
app.config.setdefault('AVATARS_IDENTICON_COLS', 7)
app.config.setdefault('AVATARS_IDENTICON_ROWS', 7)
app.config.setdefault('AVATARS_IDENTICON_BG', None)
# Jcrop
app.config.setdefault('AVATARS_CROP_BASE_WIDTH', 500)
app.config.setdefault('AVATARS_CROP_INIT_POS', (0, 0))
app.config.setdefault('AVATARS_CROP_INIT_SIZE', None)
app.config.setdefault('AVATARS_CROP_PREVIEW_SIZE', None)
app.config.setdefault('AVATARS_CROP_MIN_SIZE', None)
# @blueprint.route('/%s/<path:filename>/<size>' % app.config['AVATARS_STATIC_PREFIX'])
# def static(filename_m):
# path = current_app.config['AVATARS_SAVE_PATH']
# filename = '%s_%s.png' % (filename, size)
# return send_from_directory(path, filename)
@staticmethod
def context_processor():
return {
'avatars': current_app.extensions['avatars']
}
def save_avatar(self, image):
"""Save an avatar as raw image, return new filename.
:param image: The image that needs to be saved.
"""
path = current_app.config['AVATARS_SAVE_PATH']
filename = uuid4().hex + '_raw.png'
image.save(os.path.join(path, filename))
return filename
def crop_avatar(self, filename, x, y, w, h):
"""Crop avatar with given size, return a list of file name: [filename_s, filename_m, filename_l].
:param filename: The raw image's filename.
:param x: The x-pos to start crop.
:param y: The y-pos to start crop.
:param w: The crop width.
:param h: The crop height.
"""
x = int(x)
y = int(y)
w = int(w)
h = int(h)
sizes = current_app.config['AVATARS_SIZE_TUPLE']
if not filename:
path = os.path.join(self.root_path, 'static/default/default_l.jpg')
else:
path = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename)
print(path)
raw_img = Image.open(path)
base_width = current_app.config['AVATARS_CROP_BASE_WIDTH']
if raw_img.size[0] >= base_width:
raw_img = self.resize_avatar(raw_img, base_width=base_width)
cropped_img = raw_img.crop((x, y, x + w, y + h))
filename = uuid4().hex
avatar_s = self.resize_avatar(cropped_img, base_width=sizes[0])
avatar_m = self.resize_avatar(cropped_img, base_width=sizes[1])
avatar_l = self.resize_avatar(cropped_img, base_width=sizes[2])
filename_s = filename + '_s.png'
filename_m = filename + '_m.png'
filename_l = filename + '_l.png'
path_s = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_s)
path_m = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_m)
path_l = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_l)
avatar_s.save(path_s, optimize=True, quality=85)
avatar_m.save(path_m, optimize=True, quality=85)
avatar_l.save(path_l, optimize=True, quality=85)
return [filename_s, filename_m, filename_l]
@staticmethod
def gravatar(*args, **kwargs):
return _Avatars.gravatar(*args, **kwargs)
@staticmethod
def robohash(*args, **kwargs):
return _Avatars.robohash(*args, **kwargs)
@staticmethod
def social_media(*args, **kwargs):
return _Avatars.social_media(*args, **kwargs)
@staticmethod
def default(*args, **kwargs):
return _Avatars.default(*args, **kwargs)
|
greyli/flask-avatars
|
flask_avatars/__init__.py
|
Avatars.save_avatar
|
python
|
def save_avatar(self, image):
path = current_app.config['AVATARS_SAVE_PATH']
filename = uuid4().hex + '_raw.png'
image.save(os.path.join(path, filename))
return filename
|
Save an avatar as raw image, return new filename.
:param image: The image that needs to be saved.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L289-L297
| null |
class Avatars(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['avatars'] = _Avatars
app.context_processor(self.context_processor)
blueprint = Blueprint('avatars', __name__,
static_folder='static',
static_url_path='/avatars' + app.static_url_path)
app.register_blueprint(blueprint)
self.root_path = blueprint.root_path
# TODO: custom file extension support
# settings
app.config.setdefault('AVATARS_GRAVATAR_DEFAULT', 'identicon')
app.config.setdefault('AVATARS_SERVE_LOCAL', False)
app.config.setdefault('AVATARS_SAVE_PATH', None)
app.config.setdefault('AVATARS_SIZE_TUPLE', (30, 60, 150))
# Identicon
app.config.setdefault('AVATARS_IDENTICON_COLS', 7)
app.config.setdefault('AVATARS_IDENTICON_ROWS', 7)
app.config.setdefault('AVATARS_IDENTICON_BG', None)
# Jcrop
app.config.setdefault('AVATARS_CROP_BASE_WIDTH', 500)
app.config.setdefault('AVATARS_CROP_INIT_POS', (0, 0))
app.config.setdefault('AVATARS_CROP_INIT_SIZE', None)
app.config.setdefault('AVATARS_CROP_PREVIEW_SIZE', None)
app.config.setdefault('AVATARS_CROP_MIN_SIZE', None)
# @blueprint.route('/%s/<path:filename>/<size>' % app.config['AVATARS_STATIC_PREFIX'])
# def static(filename_m):
# path = current_app.config['AVATARS_SAVE_PATH']
# filename = '%s_%s.png' % (filename, size)
# return send_from_directory(path, filename)
@staticmethod
def context_processor():
return {
'avatars': current_app.extensions['avatars']
}
def resize_avatar(self, img, base_width):
"""Resize an avatar.
:param img: The image that needs to be resize.
:param base_width: The width of output image.
"""
w_percent = (base_width / float(img.size[0]))
h_size = int((float(img.size[1]) * float(w_percent)))
img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS)
return img
def crop_avatar(self, filename, x, y, w, h):
"""Crop avatar with given size, return a list of file name: [filename_s, filename_m, filename_l].
:param filename: The raw image's filename.
:param x: The x-pos to start crop.
:param y: The y-pos to start crop.
:param w: The crop width.
:param h: The crop height.
"""
x = int(x)
y = int(y)
w = int(w)
h = int(h)
sizes = current_app.config['AVATARS_SIZE_TUPLE']
if not filename:
path = os.path.join(self.root_path, 'static/default/default_l.jpg')
else:
path = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename)
print(path)
raw_img = Image.open(path)
base_width = current_app.config['AVATARS_CROP_BASE_WIDTH']
if raw_img.size[0] >= base_width:
raw_img = self.resize_avatar(raw_img, base_width=base_width)
cropped_img = raw_img.crop((x, y, x + w, y + h))
filename = uuid4().hex
avatar_s = self.resize_avatar(cropped_img, base_width=sizes[0])
avatar_m = self.resize_avatar(cropped_img, base_width=sizes[1])
avatar_l = self.resize_avatar(cropped_img, base_width=sizes[2])
filename_s = filename + '_s.png'
filename_m = filename + '_m.png'
filename_l = filename + '_l.png'
path_s = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_s)
path_m = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_m)
path_l = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_l)
avatar_s.save(path_s, optimize=True, quality=85)
avatar_m.save(path_m, optimize=True, quality=85)
avatar_l.save(path_l, optimize=True, quality=85)
return [filename_s, filename_m, filename_l]
@staticmethod
def gravatar(*args, **kwargs):
return _Avatars.gravatar(*args, **kwargs)
@staticmethod
def robohash(*args, **kwargs):
return _Avatars.robohash(*args, **kwargs)
@staticmethod
def social_media(*args, **kwargs):
return _Avatars.social_media(*args, **kwargs)
@staticmethod
def default(*args, **kwargs):
return _Avatars.default(*args, **kwargs)
|
greyli/flask-avatars
|
flask_avatars/__init__.py
|
Avatars.crop_avatar
|
python
|
def crop_avatar(self, filename, x, y, w, h):
x = int(x)
y = int(y)
w = int(w)
h = int(h)
sizes = current_app.config['AVATARS_SIZE_TUPLE']
if not filename:
path = os.path.join(self.root_path, 'static/default/default_l.jpg')
else:
path = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename)
print(path)
raw_img = Image.open(path)
base_width = current_app.config['AVATARS_CROP_BASE_WIDTH']
if raw_img.size[0] >= base_width:
raw_img = self.resize_avatar(raw_img, base_width=base_width)
cropped_img = raw_img.crop((x, y, x + w, y + h))
filename = uuid4().hex
avatar_s = self.resize_avatar(cropped_img, base_width=sizes[0])
avatar_m = self.resize_avatar(cropped_img, base_width=sizes[1])
avatar_l = self.resize_avatar(cropped_img, base_width=sizes[2])
filename_s = filename + '_s.png'
filename_m = filename + '_m.png'
filename_l = filename + '_l.png'
path_s = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_s)
path_m = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_m)
path_l = os.path.join(current_app.config['AVATARS_SAVE_PATH'], filename_l)
avatar_s.save(path_s, optimize=True, quality=85)
avatar_m.save(path_m, optimize=True, quality=85)
avatar_l.save(path_l, optimize=True, quality=85)
return [filename_s, filename_m, filename_l]
|
Crop avatar with given size, return a list of file name: [filename_s, filename_m, filename_l].
:param filename: The raw image's filename.
:param x: The x-pos to start crop.
:param y: The y-pos to start crop.
:param w: The crop width.
:param h: The crop height.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/__init__.py#L299-L349
|
[
"def resize_avatar(self, img, base_width):\n \"\"\"Resize an avatar.\n\n :param img: The image that needs to be resize.\n :param base_width: The width of output image.\n \"\"\"\n w_percent = (base_width / float(img.size[0]))\n h_size = int((float(img.size[1]) * float(w_percent)))\n img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS)\n return img\n"
] |
class Avatars(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['avatars'] = _Avatars
app.context_processor(self.context_processor)
blueprint = Blueprint('avatars', __name__,
static_folder='static',
static_url_path='/avatars' + app.static_url_path)
app.register_blueprint(blueprint)
self.root_path = blueprint.root_path
# TODO: custom file extension support
# settings
app.config.setdefault('AVATARS_GRAVATAR_DEFAULT', 'identicon')
app.config.setdefault('AVATARS_SERVE_LOCAL', False)
app.config.setdefault('AVATARS_SAVE_PATH', None)
app.config.setdefault('AVATARS_SIZE_TUPLE', (30, 60, 150))
# Identicon
app.config.setdefault('AVATARS_IDENTICON_COLS', 7)
app.config.setdefault('AVATARS_IDENTICON_ROWS', 7)
app.config.setdefault('AVATARS_IDENTICON_BG', None)
# Jcrop
app.config.setdefault('AVATARS_CROP_BASE_WIDTH', 500)
app.config.setdefault('AVATARS_CROP_INIT_POS', (0, 0))
app.config.setdefault('AVATARS_CROP_INIT_SIZE', None)
app.config.setdefault('AVATARS_CROP_PREVIEW_SIZE', None)
app.config.setdefault('AVATARS_CROP_MIN_SIZE', None)
# @blueprint.route('/%s/<path:filename>/<size>' % app.config['AVATARS_STATIC_PREFIX'])
# def static(filename_m):
# path = current_app.config['AVATARS_SAVE_PATH']
# filename = '%s_%s.png' % (filename, size)
# return send_from_directory(path, filename)
@staticmethod
def context_processor():
return {
'avatars': current_app.extensions['avatars']
}
def resize_avatar(self, img, base_width):
"""Resize an avatar.
:param img: The image that needs to be resize.
:param base_width: The width of output image.
"""
w_percent = (base_width / float(img.size[0]))
h_size = int((float(img.size[1]) * float(w_percent)))
img = img.resize((base_width, h_size), PIL.Image.ANTIALIAS)
return img
def save_avatar(self, image):
"""Save an avatar as raw image, return new filename.
:param image: The image that needs to be saved.
"""
path = current_app.config['AVATARS_SAVE_PATH']
filename = uuid4().hex + '_raw.png'
image.save(os.path.join(path, filename))
return filename
@staticmethod
def gravatar(*args, **kwargs):
return _Avatars.gravatar(*args, **kwargs)
@staticmethod
def robohash(*args, **kwargs):
return _Avatars.robohash(*args, **kwargs)
@staticmethod
def social_media(*args, **kwargs):
return _Avatars.social_media(*args, **kwargs)
@staticmethod
def default(*args, **kwargs):
return _Avatars.default(*args, **kwargs)
|
greyli/flask-avatars
|
flask_avatars/identicon.py
|
Identicon.get_image
|
python
|
def get_image(self, string, width, height, pad=0):
hex_digest_byte_list = self._string_to_byte_list(string)
matrix = self._create_matrix(hex_digest_byte_list)
return self._create_image(matrix, width, height, pad)
|
Byte representation of a PNG image
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L72-L78
|
[
"def _string_to_byte_list(self, data):\n \"\"\"\n Creates a hex digest of the input string given to create the image,\n if it's not already hexadecimal\n\n Returns:\n Length 16 list of rgb value range integers\n (each representing a byte of the hex digest)\n \"\"\"\n bytes_length = 16\n\n m = self.digest()\n m.update(str.encode(data))\n hex_digest = m.hexdigest()\n\n return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length)\n for num in range(bytes_length))\n",
"def _create_image(self, matrix, width, height, pad):\n \"\"\"\n Generates a PNG byte list\n \"\"\"\n\n image = Image.new(\"RGB\", (width + (pad * 2),\n height + (pad * 2)), self.bg_colour)\n image_draw = ImageDraw.Draw(image)\n\n # Calculate the block width and height.\n block_width = float(width) / self.cols\n block_height = float(height) / self.rows\n\n # Loop through blocks in matrix, draw rectangles.\n for row, cols in enumerate(matrix):\n for col, cell in enumerate(cols):\n if cell:\n image_draw.rectangle((\n pad + col * block_width, # x1\n pad + row * block_height, # y1\n pad + (col + 1) * block_width - 1, # x2\n pad + (row + 1) * block_height - 1 # y2\n ), fill=self.fg_colour)\n\n stream = BytesIO()\n image.save(stream, format=\"png\", optimize=True)\n # return the image byte data\n return stream.getvalue()\n",
"def _create_matrix(self, byte_list):\n \"\"\"\n This matrix decides which blocks should be filled fg/bg colour\n True for fg_colour\n False for bg_colour\n\n hash_bytes - array of hash bytes values. RGB range values in each slot\n\n Returns:\n List representation of the matrix\n [[True, True, True, True],\n [False, True, True, False],\n [True, True, True, True],\n [False, False, False, False]]\n \"\"\"\n\n # Number of rows * cols halfed and rounded\n # in order to fill opposite side\n cells = int(self.rows * self.cols / 2 + self.cols % 2)\n\n matrix = [[False] * self.cols for num in range(self.rows)]\n\n for cell_number in range(cells):\n\n # If the bit with index corresponding to this cell is 1\n # mark that cell as fg_colour\n # Skip byte 1, that's used in determining fg_colour\n if self._bit_is_one(cell_number, byte_list[1:]):\n # Find cell coordinates in matrix.\n x_row = cell_number % self.rows\n y_col = int(cell_number / self.cols)\n # Set coord True and its opposite side\n matrix[x_row][self.cols - y_col - 1] = True\n matrix[x_row][y_col] = True\n return matrix\n"
] |
class Identicon(object):
def __init__(self, rows=None, cols=None, bg_color=None):
"""Generate identicon image.
:param rows: The row of pixels in avatar.
:param columns: The column of pixels in avatar.
:param bg_color: Backgroud color, pass RGB tuple, for example: (125, 125, 125).
Set it to ``None`` to use random color.
"""
self.rows = rows or current_app.config['AVATARS_IDENTICON_ROWS']
self.cols = cols or current_app.config['AVATARS_IDENTICON_COLS']
self.bg_colour = bg_color or current_app.config['AVATARS_IDENTICON_BG']
self._generate_colours()
m = hashlib.md5()
m.update(b"hello world")
entropy = len(m.hexdigest()) / 2 * 8
if self.rows > 15 or self.cols > 15:
raise ValueError("Rows and columns must be valued 15 or under")
self.digest = hashlib.md5
self.digest_entropy = entropy
def _generate_colours(self):
colours_ok = False
while colours_ok is False:
self.fg_colour = self._get_pastel_colour()
if self.bg_colour is None:
self.bg_colour = self._get_pastel_colour(lighten=80)
# Get the luminance for each colour
fg_lum = self._luminance(self.fg_colour) + 0.05
bg_lum = self._luminance(self.bg_colour) + 0.05
# Check the difference in luminance
# meets the 1.25 threshold
result = (fg_lum / bg_lum) \
if (fg_lum / bg_lum) else (bg_lum / fg_lum)
if result > 1.20:
colours_ok = True
else:
colours_ok = True
def save(self, image_byte_array=None, save_location=None):
if image_byte_array and save_location:
with open(save_location, 'wb') as f:
return f.write(image_byte_array)
else:
raise ValueError('image_byte_array and path must be provided')
def _get_pastel_colour(self, lighten=127):
"""
Create a pastel colour hex colour string
"""
def r():
return random.randint(0, 128) + lighten
return r(), r(), r() # return rgb values as a tuple
def _luminance(self, rgb):
"""
Determine the liminanace of an RGB colour
"""
a = []
for v in rgb:
v = v / float(255)
if v < 0.03928:
result = v / 12.92
else:
result = math.pow(((v + 0.055) / 1.055), 2.4)
a.append(result)
return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
def _string_to_byte_list(self, data):
"""
Creates a hex digest of the input string given to create the image,
if it's not already hexadecimal
Returns:
Length 16 list of rgb value range integers
(each representing a byte of the hex digest)
"""
bytes_length = 16
m = self.digest()
m.update(str.encode(data))
hex_digest = m.hexdigest()
return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length)
for num in range(bytes_length))
def _bit_is_one(self, n, hash_bytes):
"""
Check if the n (index) of hash_bytes is 1 or 0.
"""
scale = 16 # hexadecimal
if not hash_bytes[int(n / (scale / 2))] >> int(
(scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1:
return False
return True
def _create_image(self, matrix, width, height, pad):
"""
Generates a PNG byte list
"""
image = Image.new("RGB", (width + (pad * 2),
height + (pad * 2)), self.bg_colour)
image_draw = ImageDraw.Draw(image)
# Calculate the block width and height.
block_width = float(width) / self.cols
block_height = float(height) / self.rows
# Loop through blocks in matrix, draw rectangles.
for row, cols in enumerate(matrix):
for col, cell in enumerate(cols):
if cell:
image_draw.rectangle((
pad + col * block_width, # x1
pad + row * block_height, # y1
pad + (col + 1) * block_width - 1, # x2
pad + (row + 1) * block_height - 1 # y2
), fill=self.fg_colour)
stream = BytesIO()
image.save(stream, format="png", optimize=True)
# return the image byte data
return stream.getvalue()
def _create_matrix(self, byte_list):
"""
This matrix decides which blocks should be filled fg/bg colour
True for fg_colour
False for bg_colour
hash_bytes - array of hash bytes values. RGB range values in each slot
Returns:
List representation of the matrix
[[True, True, True, True],
[False, True, True, False],
[True, True, True, True],
[False, False, False, False]]
"""
# Number of rows * cols halfed and rounded
# in order to fill opposite side
cells = int(self.rows * self.cols / 2 + self.cols % 2)
matrix = [[False] * self.cols for num in range(self.rows)]
for cell_number in range(cells):
# If the bit with index corresponding to this cell is 1
# mark that cell as fg_colour
# Skip byte 1, that's used in determining fg_colour
if self._bit_is_one(cell_number, byte_list[1:]):
# Find cell coordinates in matrix.
x_row = cell_number % self.rows
y_col = int(cell_number / self.cols)
# Set coord True and its opposite side
matrix[x_row][self.cols - y_col - 1] = True
matrix[x_row][y_col] = True
return matrix
def generate(self, text):
"""Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l].
:param text: The text used to generate image.
"""
sizes = current_app.config['AVATARS_SIZE_TUPLE']
path = current_app.config['AVATARS_SAVE_PATH']
suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'}
for size in sizes:
image_byte_array = self.get_image(
string=str(text),
width=int(size),
height=int(size),
pad=int(size * 0.1))
self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size])))
return [text + '_s.png', text + '_m.png', text + '_l.png']
|
greyli/flask-avatars
|
flask_avatars/identicon.py
|
Identicon._get_pastel_colour
|
python
|
def _get_pastel_colour(self, lighten=127):
def r():
return random.randint(0, 128) + lighten
return r(), r(), r()
|
Create a pastel colour hex colour string
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L87-L93
| null |
class Identicon(object):
def __init__(self, rows=None, cols=None, bg_color=None):
"""Generate identicon image.
:param rows: The row of pixels in avatar.
:param columns: The column of pixels in avatar.
:param bg_color: Backgroud color, pass RGB tuple, for example: (125, 125, 125).
Set it to ``None`` to use random color.
"""
self.rows = rows or current_app.config['AVATARS_IDENTICON_ROWS']
self.cols = cols or current_app.config['AVATARS_IDENTICON_COLS']
self.bg_colour = bg_color or current_app.config['AVATARS_IDENTICON_BG']
self._generate_colours()
m = hashlib.md5()
m.update(b"hello world")
entropy = len(m.hexdigest()) / 2 * 8
if self.rows > 15 or self.cols > 15:
raise ValueError("Rows and columns must be valued 15 or under")
self.digest = hashlib.md5
self.digest_entropy = entropy
def _generate_colours(self):
colours_ok = False
while colours_ok is False:
self.fg_colour = self._get_pastel_colour()
if self.bg_colour is None:
self.bg_colour = self._get_pastel_colour(lighten=80)
# Get the luminance for each colour
fg_lum = self._luminance(self.fg_colour) + 0.05
bg_lum = self._luminance(self.bg_colour) + 0.05
# Check the difference in luminance
# meets the 1.25 threshold
result = (fg_lum / bg_lum) \
if (fg_lum / bg_lum) else (bg_lum / fg_lum)
if result > 1.20:
colours_ok = True
else:
colours_ok = True
def get_image(self, string, width, height, pad=0):
"""
Byte representation of a PNG image
"""
hex_digest_byte_list = self._string_to_byte_list(string)
matrix = self._create_matrix(hex_digest_byte_list)
return self._create_image(matrix, width, height, pad)
def save(self, image_byte_array=None, save_location=None):
if image_byte_array and save_location:
with open(save_location, 'wb') as f:
return f.write(image_byte_array)
else:
raise ValueError('image_byte_array and path must be provided')
# return rgb values as a tuple
def _luminance(self, rgb):
"""
Determine the liminanace of an RGB colour
"""
a = []
for v in rgb:
v = v / float(255)
if v < 0.03928:
result = v / 12.92
else:
result = math.pow(((v + 0.055) / 1.055), 2.4)
a.append(result)
return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
def _string_to_byte_list(self, data):
"""
Creates a hex digest of the input string given to create the image,
if it's not already hexadecimal
Returns:
Length 16 list of rgb value range integers
(each representing a byte of the hex digest)
"""
bytes_length = 16
m = self.digest()
m.update(str.encode(data))
hex_digest = m.hexdigest()
return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length)
for num in range(bytes_length))
def _bit_is_one(self, n, hash_bytes):
"""
Check if the n (index) of hash_bytes is 1 or 0.
"""
scale = 16 # hexadecimal
if not hash_bytes[int(n / (scale / 2))] >> int(
(scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1:
return False
return True
def _create_image(self, matrix, width, height, pad):
"""
Generates a PNG byte list
"""
image = Image.new("RGB", (width + (pad * 2),
height + (pad * 2)), self.bg_colour)
image_draw = ImageDraw.Draw(image)
# Calculate the block width and height.
block_width = float(width) / self.cols
block_height = float(height) / self.rows
# Loop through blocks in matrix, draw rectangles.
for row, cols in enumerate(matrix):
for col, cell in enumerate(cols):
if cell:
image_draw.rectangle((
pad + col * block_width, # x1
pad + row * block_height, # y1
pad + (col + 1) * block_width - 1, # x2
pad + (row + 1) * block_height - 1 # y2
), fill=self.fg_colour)
stream = BytesIO()
image.save(stream, format="png", optimize=True)
# return the image byte data
return stream.getvalue()
def _create_matrix(self, byte_list):
"""
This matrix decides which blocks should be filled fg/bg colour
True for fg_colour
False for bg_colour
hash_bytes - array of hash bytes values. RGB range values in each slot
Returns:
List representation of the matrix
[[True, True, True, True],
[False, True, True, False],
[True, True, True, True],
[False, False, False, False]]
"""
# Number of rows * cols halfed and rounded
# in order to fill opposite side
cells = int(self.rows * self.cols / 2 + self.cols % 2)
matrix = [[False] * self.cols for num in range(self.rows)]
for cell_number in range(cells):
# If the bit with index corresponding to this cell is 1
# mark that cell as fg_colour
# Skip byte 1, that's used in determining fg_colour
if self._bit_is_one(cell_number, byte_list[1:]):
# Find cell coordinates in matrix.
x_row = cell_number % self.rows
y_col = int(cell_number / self.cols)
# Set coord True and its opposite side
matrix[x_row][self.cols - y_col - 1] = True
matrix[x_row][y_col] = True
return matrix
def generate(self, text):
"""Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l].
:param text: The text used to generate image.
"""
sizes = current_app.config['AVATARS_SIZE_TUPLE']
path = current_app.config['AVATARS_SAVE_PATH']
suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'}
for size in sizes:
image_byte_array = self.get_image(
string=str(text),
width=int(size),
height=int(size),
pad=int(size * 0.1))
self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size])))
return [text + '_s.png', text + '_m.png', text + '_l.png']
|
greyli/flask-avatars
|
flask_avatars/identicon.py
|
Identicon._luminance
|
python
|
def _luminance(self, rgb):
a = []
for v in rgb:
v = v / float(255)
if v < 0.03928:
result = v / 12.92
else:
result = math.pow(((v + 0.055) / 1.055), 2.4)
a.append(result)
return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
|
Determine the liminanace of an RGB colour
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L95-L108
| null |
class Identicon(object):
def __init__(self, rows=None, cols=None, bg_color=None):
"""Generate identicon image.
:param rows: The row of pixels in avatar.
:param columns: The column of pixels in avatar.
:param bg_color: Backgroud color, pass RGB tuple, for example: (125, 125, 125).
Set it to ``None`` to use random color.
"""
self.rows = rows or current_app.config['AVATARS_IDENTICON_ROWS']
self.cols = cols or current_app.config['AVATARS_IDENTICON_COLS']
self.bg_colour = bg_color or current_app.config['AVATARS_IDENTICON_BG']
self._generate_colours()
m = hashlib.md5()
m.update(b"hello world")
entropy = len(m.hexdigest()) / 2 * 8
if self.rows > 15 or self.cols > 15:
raise ValueError("Rows and columns must be valued 15 or under")
self.digest = hashlib.md5
self.digest_entropy = entropy
def _generate_colours(self):
colours_ok = False
while colours_ok is False:
self.fg_colour = self._get_pastel_colour()
if self.bg_colour is None:
self.bg_colour = self._get_pastel_colour(lighten=80)
# Get the luminance for each colour
fg_lum = self._luminance(self.fg_colour) + 0.05
bg_lum = self._luminance(self.bg_colour) + 0.05
# Check the difference in luminance
# meets the 1.25 threshold
result = (fg_lum / bg_lum) \
if (fg_lum / bg_lum) else (bg_lum / fg_lum)
if result > 1.20:
colours_ok = True
else:
colours_ok = True
def get_image(self, string, width, height, pad=0):
"""
Byte representation of a PNG image
"""
hex_digest_byte_list = self._string_to_byte_list(string)
matrix = self._create_matrix(hex_digest_byte_list)
return self._create_image(matrix, width, height, pad)
def save(self, image_byte_array=None, save_location=None):
if image_byte_array and save_location:
with open(save_location, 'wb') as f:
return f.write(image_byte_array)
else:
raise ValueError('image_byte_array and path must be provided')
def _get_pastel_colour(self, lighten=127):
"""
Create a pastel colour hex colour string
"""
def r():
return random.randint(0, 128) + lighten
return r(), r(), r() # return rgb values as a tuple
def _string_to_byte_list(self, data):
"""
Creates a hex digest of the input string given to create the image,
if it's not already hexadecimal
Returns:
Length 16 list of rgb value range integers
(each representing a byte of the hex digest)
"""
bytes_length = 16
m = self.digest()
m.update(str.encode(data))
hex_digest = m.hexdigest()
return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length)
for num in range(bytes_length))
def _bit_is_one(self, n, hash_bytes):
"""
Check if the n (index) of hash_bytes is 1 or 0.
"""
scale = 16 # hexadecimal
if not hash_bytes[int(n / (scale / 2))] >> int(
(scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1:
return False
return True
def _create_image(self, matrix, width, height, pad):
"""
Generates a PNG byte list
"""
image = Image.new("RGB", (width + (pad * 2),
height + (pad * 2)), self.bg_colour)
image_draw = ImageDraw.Draw(image)
# Calculate the block width and height.
block_width = float(width) / self.cols
block_height = float(height) / self.rows
# Loop through blocks in matrix, draw rectangles.
for row, cols in enumerate(matrix):
for col, cell in enumerate(cols):
if cell:
image_draw.rectangle((
pad + col * block_width, # x1
pad + row * block_height, # y1
pad + (col + 1) * block_width - 1, # x2
pad + (row + 1) * block_height - 1 # y2
), fill=self.fg_colour)
stream = BytesIO()
image.save(stream, format="png", optimize=True)
# return the image byte data
return stream.getvalue()
def _create_matrix(self, byte_list):
"""
This matrix decides which blocks should be filled fg/bg colour
True for fg_colour
False for bg_colour
hash_bytes - array of hash bytes values. RGB range values in each slot
Returns:
List representation of the matrix
[[True, True, True, True],
[False, True, True, False],
[True, True, True, True],
[False, False, False, False]]
"""
# Number of rows * cols halfed and rounded
# in order to fill opposite side
cells = int(self.rows * self.cols / 2 + self.cols % 2)
matrix = [[False] * self.cols for num in range(self.rows)]
for cell_number in range(cells):
# If the bit with index corresponding to this cell is 1
# mark that cell as fg_colour
# Skip byte 1, that's used in determining fg_colour
if self._bit_is_one(cell_number, byte_list[1:]):
# Find cell coordinates in matrix.
x_row = cell_number % self.rows
y_col = int(cell_number / self.cols)
# Set coord True and its opposite side
matrix[x_row][self.cols - y_col - 1] = True
matrix[x_row][y_col] = True
return matrix
def generate(self, text):
"""Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l].
:param text: The text used to generate image.
"""
sizes = current_app.config['AVATARS_SIZE_TUPLE']
path = current_app.config['AVATARS_SAVE_PATH']
suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'}
for size in sizes:
image_byte_array = self.get_image(
string=str(text),
width=int(size),
height=int(size),
pad=int(size * 0.1))
self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size])))
return [text + '_s.png', text + '_m.png', text + '_l.png']
|
greyli/flask-avatars
|
flask_avatars/identicon.py
|
Identicon._string_to_byte_list
|
python
|
def _string_to_byte_list(self, data):
bytes_length = 16
m = self.digest()
m.update(str.encode(data))
hex_digest = m.hexdigest()
return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length)
for num in range(bytes_length))
|
Creates a hex digest of the input string given to create the image,
if it's not already hexadecimal
Returns:
Length 16 list of rgb value range integers
(each representing a byte of the hex digest)
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L110-L126
| null |
class Identicon(object):
def __init__(self, rows=None, cols=None, bg_color=None):
"""Generate identicon image.
:param rows: The row of pixels in avatar.
:param columns: The column of pixels in avatar.
:param bg_color: Backgroud color, pass RGB tuple, for example: (125, 125, 125).
Set it to ``None`` to use random color.
"""
self.rows = rows or current_app.config['AVATARS_IDENTICON_ROWS']
self.cols = cols or current_app.config['AVATARS_IDENTICON_COLS']
self.bg_colour = bg_color or current_app.config['AVATARS_IDENTICON_BG']
self._generate_colours()
m = hashlib.md5()
m.update(b"hello world")
entropy = len(m.hexdigest()) / 2 * 8
if self.rows > 15 or self.cols > 15:
raise ValueError("Rows and columns must be valued 15 or under")
self.digest = hashlib.md5
self.digest_entropy = entropy
def _generate_colours(self):
colours_ok = False
while colours_ok is False:
self.fg_colour = self._get_pastel_colour()
if self.bg_colour is None:
self.bg_colour = self._get_pastel_colour(lighten=80)
# Get the luminance for each colour
fg_lum = self._luminance(self.fg_colour) + 0.05
bg_lum = self._luminance(self.bg_colour) + 0.05
# Check the difference in luminance
# meets the 1.25 threshold
result = (fg_lum / bg_lum) \
if (fg_lum / bg_lum) else (bg_lum / fg_lum)
if result > 1.20:
colours_ok = True
else:
colours_ok = True
def get_image(self, string, width, height, pad=0):
"""
Byte representation of a PNG image
"""
hex_digest_byte_list = self._string_to_byte_list(string)
matrix = self._create_matrix(hex_digest_byte_list)
return self._create_image(matrix, width, height, pad)
def save(self, image_byte_array=None, save_location=None):
if image_byte_array and save_location:
with open(save_location, 'wb') as f:
return f.write(image_byte_array)
else:
raise ValueError('image_byte_array and path must be provided')
def _get_pastel_colour(self, lighten=127):
"""
Create a pastel colour hex colour string
"""
def r():
return random.randint(0, 128) + lighten
return r(), r(), r() # return rgb values as a tuple
def _luminance(self, rgb):
"""
Determine the liminanace of an RGB colour
"""
a = []
for v in rgb:
v = v / float(255)
if v < 0.03928:
result = v / 12.92
else:
result = math.pow(((v + 0.055) / 1.055), 2.4)
a.append(result)
return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
def _bit_is_one(self, n, hash_bytes):
"""
Check if the n (index) of hash_bytes is 1 or 0.
"""
scale = 16 # hexadecimal
if not hash_bytes[int(n / (scale / 2))] >> int(
(scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1:
return False
return True
def _create_image(self, matrix, width, height, pad):
"""
Generates a PNG byte list
"""
image = Image.new("RGB", (width + (pad * 2),
height + (pad * 2)), self.bg_colour)
image_draw = ImageDraw.Draw(image)
# Calculate the block width and height.
block_width = float(width) / self.cols
block_height = float(height) / self.rows
# Loop through blocks in matrix, draw rectangles.
for row, cols in enumerate(matrix):
for col, cell in enumerate(cols):
if cell:
image_draw.rectangle((
pad + col * block_width, # x1
pad + row * block_height, # y1
pad + (col + 1) * block_width - 1, # x2
pad + (row + 1) * block_height - 1 # y2
), fill=self.fg_colour)
stream = BytesIO()
image.save(stream, format="png", optimize=True)
# return the image byte data
return stream.getvalue()
def _create_matrix(self, byte_list):
"""
This matrix decides which blocks should be filled fg/bg colour
True for fg_colour
False for bg_colour
hash_bytes - array of hash bytes values. RGB range values in each slot
Returns:
List representation of the matrix
[[True, True, True, True],
[False, True, True, False],
[True, True, True, True],
[False, False, False, False]]
"""
# Number of rows * cols halfed and rounded
# in order to fill opposite side
cells = int(self.rows * self.cols / 2 + self.cols % 2)
matrix = [[False] * self.cols for num in range(self.rows)]
for cell_number in range(cells):
# If the bit with index corresponding to this cell is 1
# mark that cell as fg_colour
# Skip byte 1, that's used in determining fg_colour
if self._bit_is_one(cell_number, byte_list[1:]):
# Find cell coordinates in matrix.
x_row = cell_number % self.rows
y_col = int(cell_number / self.cols)
# Set coord True and its opposite side
matrix[x_row][self.cols - y_col - 1] = True
matrix[x_row][y_col] = True
return matrix
def generate(self, text):
"""Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l].
:param text: The text used to generate image.
"""
sizes = current_app.config['AVATARS_SIZE_TUPLE']
path = current_app.config['AVATARS_SAVE_PATH']
suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'}
for size in sizes:
image_byte_array = self.get_image(
string=str(text),
width=int(size),
height=int(size),
pad=int(size * 0.1))
self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size])))
return [text + '_s.png', text + '_m.png', text + '_l.png']
|
greyli/flask-avatars
|
flask_avatars/identicon.py
|
Identicon._bit_is_one
|
python
|
def _bit_is_one(self, n, hash_bytes):
scale = 16 # hexadecimal
if not hash_bytes[int(n / (scale / 2))] >> int(
(scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1:
return False
return True
|
Check if the n (index) of hash_bytes is 1 or 0.
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L128-L138
| null |
class Identicon(object):
def __init__(self, rows=None, cols=None, bg_color=None):
"""Generate identicon image.
:param rows: The row of pixels in avatar.
:param columns: The column of pixels in avatar.
:param bg_color: Backgroud color, pass RGB tuple, for example: (125, 125, 125).
Set it to ``None`` to use random color.
"""
self.rows = rows or current_app.config['AVATARS_IDENTICON_ROWS']
self.cols = cols or current_app.config['AVATARS_IDENTICON_COLS']
self.bg_colour = bg_color or current_app.config['AVATARS_IDENTICON_BG']
self._generate_colours()
m = hashlib.md5()
m.update(b"hello world")
entropy = len(m.hexdigest()) / 2 * 8
if self.rows > 15 or self.cols > 15:
raise ValueError("Rows and columns must be valued 15 or under")
self.digest = hashlib.md5
self.digest_entropy = entropy
def _generate_colours(self):
colours_ok = False
while colours_ok is False:
self.fg_colour = self._get_pastel_colour()
if self.bg_colour is None:
self.bg_colour = self._get_pastel_colour(lighten=80)
# Get the luminance for each colour
fg_lum = self._luminance(self.fg_colour) + 0.05
bg_lum = self._luminance(self.bg_colour) + 0.05
# Check the difference in luminance
# meets the 1.25 threshold
result = (fg_lum / bg_lum) \
if (fg_lum / bg_lum) else (bg_lum / fg_lum)
if result > 1.20:
colours_ok = True
else:
colours_ok = True
def get_image(self, string, width, height, pad=0):
"""
Byte representation of a PNG image
"""
hex_digest_byte_list = self._string_to_byte_list(string)
matrix = self._create_matrix(hex_digest_byte_list)
return self._create_image(matrix, width, height, pad)
def save(self, image_byte_array=None, save_location=None):
if image_byte_array and save_location:
with open(save_location, 'wb') as f:
return f.write(image_byte_array)
else:
raise ValueError('image_byte_array and path must be provided')
def _get_pastel_colour(self, lighten=127):
"""
Create a pastel colour hex colour string
"""
def r():
return random.randint(0, 128) + lighten
return r(), r(), r() # return rgb values as a tuple
def _luminance(self, rgb):
"""
Determine the liminanace of an RGB colour
"""
a = []
for v in rgb:
v = v / float(255)
if v < 0.03928:
result = v / 12.92
else:
result = math.pow(((v + 0.055) / 1.055), 2.4)
a.append(result)
return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
def _string_to_byte_list(self, data):
"""
Creates a hex digest of the input string given to create the image,
if it's not already hexadecimal
Returns:
Length 16 list of rgb value range integers
(each representing a byte of the hex digest)
"""
bytes_length = 16
m = self.digest()
m.update(str.encode(data))
hex_digest = m.hexdigest()
return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length)
for num in range(bytes_length))
def _create_image(self, matrix, width, height, pad):
"""
Generates a PNG byte list
"""
image = Image.new("RGB", (width + (pad * 2),
height + (pad * 2)), self.bg_colour)
image_draw = ImageDraw.Draw(image)
# Calculate the block width and height.
block_width = float(width) / self.cols
block_height = float(height) / self.rows
# Loop through blocks in matrix, draw rectangles.
for row, cols in enumerate(matrix):
for col, cell in enumerate(cols):
if cell:
image_draw.rectangle((
pad + col * block_width, # x1
pad + row * block_height, # y1
pad + (col + 1) * block_width - 1, # x2
pad + (row + 1) * block_height - 1 # y2
), fill=self.fg_colour)
stream = BytesIO()
image.save(stream, format="png", optimize=True)
# return the image byte data
return stream.getvalue()
def _create_matrix(self, byte_list):
"""
This matrix decides which blocks should be filled fg/bg colour
True for fg_colour
False for bg_colour
hash_bytes - array of hash bytes values. RGB range values in each slot
Returns:
List representation of the matrix
[[True, True, True, True],
[False, True, True, False],
[True, True, True, True],
[False, False, False, False]]
"""
# Number of rows * cols halfed and rounded
# in order to fill opposite side
cells = int(self.rows * self.cols / 2 + self.cols % 2)
matrix = [[False] * self.cols for num in range(self.rows)]
for cell_number in range(cells):
# If the bit with index corresponding to this cell is 1
# mark that cell as fg_colour
# Skip byte 1, that's used in determining fg_colour
if self._bit_is_one(cell_number, byte_list[1:]):
# Find cell coordinates in matrix.
x_row = cell_number % self.rows
y_col = int(cell_number / self.cols)
# Set coord True and its opposite side
matrix[x_row][self.cols - y_col - 1] = True
matrix[x_row][y_col] = True
return matrix
def generate(self, text):
"""Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l].
:param text: The text used to generate image.
"""
sizes = current_app.config['AVATARS_SIZE_TUPLE']
path = current_app.config['AVATARS_SAVE_PATH']
suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'}
for size in sizes:
image_byte_array = self.get_image(
string=str(text),
width=int(size),
height=int(size),
pad=int(size * 0.1))
self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size])))
return [text + '_s.png', text + '_m.png', text + '_l.png']
|
greyli/flask-avatars
|
flask_avatars/identicon.py
|
Identicon._create_image
|
python
|
def _create_image(self, matrix, width, height, pad):
image = Image.new("RGB", (width + (pad * 2),
height + (pad * 2)), self.bg_colour)
image_draw = ImageDraw.Draw(image)
# Calculate the block width and height.
block_width = float(width) / self.cols
block_height = float(height) / self.rows
# Loop through blocks in matrix, draw rectangles.
for row, cols in enumerate(matrix):
for col, cell in enumerate(cols):
if cell:
image_draw.rectangle((
pad + col * block_width, # x1
pad + row * block_height, # y1
pad + (col + 1) * block_width - 1, # x2
pad + (row + 1) * block_height - 1 # y2
), fill=self.fg_colour)
stream = BytesIO()
image.save(stream, format="png", optimize=True)
# return the image byte data
return stream.getvalue()
|
Generates a PNG byte list
|
train
|
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L140-L167
| null |
class Identicon(object):
def __init__(self, rows=None, cols=None, bg_color=None):
"""Generate identicon image.
:param rows: The row of pixels in avatar.
:param columns: The column of pixels in avatar.
:param bg_color: Backgroud color, pass RGB tuple, for example: (125, 125, 125).
Set it to ``None`` to use random color.
"""
self.rows = rows or current_app.config['AVATARS_IDENTICON_ROWS']
self.cols = cols or current_app.config['AVATARS_IDENTICON_COLS']
self.bg_colour = bg_color or current_app.config['AVATARS_IDENTICON_BG']
self._generate_colours()
m = hashlib.md5()
m.update(b"hello world")
entropy = len(m.hexdigest()) / 2 * 8
if self.rows > 15 or self.cols > 15:
raise ValueError("Rows and columns must be valued 15 or under")
self.digest = hashlib.md5
self.digest_entropy = entropy
def _generate_colours(self):
colours_ok = False
while colours_ok is False:
self.fg_colour = self._get_pastel_colour()
if self.bg_colour is None:
self.bg_colour = self._get_pastel_colour(lighten=80)
# Get the luminance for each colour
fg_lum = self._luminance(self.fg_colour) + 0.05
bg_lum = self._luminance(self.bg_colour) + 0.05
# Check the difference in luminance
# meets the 1.25 threshold
result = (fg_lum / bg_lum) \
if (fg_lum / bg_lum) else (bg_lum / fg_lum)
if result > 1.20:
colours_ok = True
else:
colours_ok = True
def get_image(self, string, width, height, pad=0):
"""
Byte representation of a PNG image
"""
hex_digest_byte_list = self._string_to_byte_list(string)
matrix = self._create_matrix(hex_digest_byte_list)
return self._create_image(matrix, width, height, pad)
def save(self, image_byte_array=None, save_location=None):
if image_byte_array and save_location:
with open(save_location, 'wb') as f:
return f.write(image_byte_array)
else:
raise ValueError('image_byte_array and path must be provided')
def _get_pastel_colour(self, lighten=127):
"""
Create a pastel colour hex colour string
"""
def r():
return random.randint(0, 128) + lighten
return r(), r(), r() # return rgb values as a tuple
def _luminance(self, rgb):
"""
Determine the liminanace of an RGB colour
"""
a = []
for v in rgb:
v = v / float(255)
if v < 0.03928:
result = v / 12.92
else:
result = math.pow(((v + 0.055) / 1.055), 2.4)
a.append(result)
return a[0] * 0.2126 + a[1] * 0.7152 + a[2] * 0.0722
def _string_to_byte_list(self, data):
"""
Creates a hex digest of the input string given to create the image,
if it's not already hexadecimal
Returns:
Length 16 list of rgb value range integers
(each representing a byte of the hex digest)
"""
bytes_length = 16
m = self.digest()
m.update(str.encode(data))
hex_digest = m.hexdigest()
return list(int(hex_digest[num * 2:num * 2 + 2], bytes_length)
for num in range(bytes_length))
def _bit_is_one(self, n, hash_bytes):
"""
Check if the n (index) of hash_bytes is 1 or 0.
"""
scale = 16 # hexadecimal
if not hash_bytes[int(n / (scale / 2))] >> int(
(scale / 2) - ((n % (scale / 2)) + 1)) & 1 == 1:
return False
return True
def _create_matrix(self, byte_list):
"""
This matrix decides which blocks should be filled fg/bg colour
True for fg_colour
False for bg_colour
hash_bytes - array of hash bytes values. RGB range values in each slot
Returns:
List representation of the matrix
[[True, True, True, True],
[False, True, True, False],
[True, True, True, True],
[False, False, False, False]]
"""
# Number of rows * cols halfed and rounded
# in order to fill opposite side
cells = int(self.rows * self.cols / 2 + self.cols % 2)
matrix = [[False] * self.cols for num in range(self.rows)]
for cell_number in range(cells):
# If the bit with index corresponding to this cell is 1
# mark that cell as fg_colour
# Skip byte 1, that's used in determining fg_colour
if self._bit_is_one(cell_number, byte_list[1:]):
# Find cell coordinates in matrix.
x_row = cell_number % self.rows
y_col = int(cell_number / self.cols)
# Set coord True and its opposite side
matrix[x_row][self.cols - y_col - 1] = True
matrix[x_row][y_col] = True
return matrix
def generate(self, text):
"""Generate and save avatars, return a list of file name: [filename_s, filename_m, filename_l].
:param text: The text used to generate image.
"""
sizes = current_app.config['AVATARS_SIZE_TUPLE']
path = current_app.config['AVATARS_SAVE_PATH']
suffix = {sizes[0]: 's', sizes[1]: 'm', sizes[2]: 'l'}
for size in sizes:
image_byte_array = self.get_image(
string=str(text),
width=int(size),
height=int(size),
pad=int(size * 0.1))
self.save(image_byte_array, save_location=os.path.join(path, '%s_%s.png' % (text, suffix[size])))
return [text + '_s.png', text + '_m.png', text + '_l.png']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.