repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
crowdresearch/daemo | crowdsourcing/crypto.py | 3 | 1192 | import base64
from Crypto import Random
from Crypto.Cipher import AES
from django.conf import settings
from hashids import Hashids
def to_hash(pk):
id_hash = Hashids(salt=settings.SECRET_KEY, min_length=12)
return id_hash.encode(pk)
def to_pk(hash_string):
id_hash = Hashids(salt=settings.SECRET_KEY, min_length=12)
pk = id_hash.decode(hash_string)
if len(pk):
return pk[0]
else:
return None
class AESUtil(object):
def __init__(self, key):
self.key = base64.b64decode(key)
@staticmethod
def _pad(data):
length = AES.block_size - (len(data) % AES.block_size)
data += chr(length) * length
return data
def encrypt(self, data):
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(self._pad(data)))
def decrypt(self, data):
decoded_data = base64.b64decode(data)
iv = decoded_data[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
decrypted = cipher.decrypt(decoded_data[AES.block_size:])
return decrypted[:-ord(decrypted[len(decrypted) - 1:])]
| mit | fb9da6bb03476531c94938b63c267b90 | 27.380952 | 69 | 0.637584 | 3.283747 | false | false | false | false |
crowdresearch/daemo | crowdsourcing/validators/utils.py | 4 | 4377 | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from rest_framework.exceptions import ValidationError
class EqualityValidator(object):
message = _('The fields {field_names} must be equal.')
missing_message = _('This field is required.')
def __init__(self, fields, message=None):
self.fields = fields
self.serializer_field = None
self.message = message or self.message
self.instance = None
self.initial_data = None
self.validate_non_fields = False
def set_context(self, serializer):
"""
This hook is called by the serializer instance,
prior to the validation call being made.
"""
self.instance = getattr(serializer, 'instance', None)
self.initial_data = getattr(serializer, 'initial_data', None)
self.validate_non_fields = getattr(serializer, 'validate_non_fields', False)
def __call__(self, *args, **kwargs):
if self.validate_non_fields:
if self.fields[0] not in self.initial_data or self.fields[1] not in self.initial_data:
raise ValidationError("Both fields are required.")
if self.initial_data.get(self.fields[0], 'Password1') != self.initial_data.get(self.fields[1],
'Password2'):
field_names = ', '.join(self.fields)
raise ValidationError(self.message.format(field_names=field_names))
class LengthValidator(object):
message = _('Field {field_name} must be at least {length} characters long.')
missing_message = _('Field {field_name} is required.')
def __init__(self, field, length, message=None):
self.field = field
self.length = length
self.serializer_field = None
self.message = message or self.message
self.initial_data = None
self.validate_non_fields = False
def set_context(self, serializer):
self.initial_data = getattr(serializer, 'initial_data', None)
self.validate_non_fields = getattr(serializer, 'validate_non_fields', False)
def __call__(self, *args, **kwargs):
if self.validate_non_fields:
if self.field not in self.initial_data:
raise ValidationError(self.missing_message.format(field_name=self.field))
if len(self.initial_data[self.field]) < self.length:
raise ValidationError(self.message.format(field_name=self.field, length=self.length))
class InequalityValidator(object):
message = _('Field {field_name} must be {field_operator} than {field_value}.')
def __init__(self, field, value, operator, message=None):
self.field = field
self.value = value
self.operator = 'gt'
self.message = message or self.message
self.initial_data = None
def set_context(self, serializer):
self.initial_data = getattr(serializer, 'initial_data', None)
def __call__(self, *args, **kwargs):
if self.initial_data[self.field] >= self.value and self.operator == 'lt':
raise ValidationError(self.message.format(field_name=self.field,
field_operator='less', field_value=self.value))
elif self.initial_data[self.field] <= self.value and self.operator == 'gt':
raise ValidationError(self.message.format(field_name=self.field,
field_operator='greater', field_value=self.value))
class ConditionallyRequiredValidator(object):
message = _('Field {field2_name} is required because {field_name} is {type_value}.')
def __init__(self, field, value, field2, message=None):
self.field = field
self.value = value
self.field2 = field2
self.message = message or self.message
self.initial_data = None
def set_context(self, serializer):
self.initial_data = getattr(serializer, 'initial_data', None)
def __call__(self, *args, **kwargs):
if self.initial_data[self.field] == self.value and self.field2 not in self.initial_data:
raise ValidationError(self.message.format(field_name=self.field, field2_name=self.field2,
type_value=self.value))
| mit | a52fd5a24840e76ce67642d40323ba41 | 43.212121 | 106 | 0.614576 | 4.184512 | false | false | false | false |
jrkerns/pylinac | docs/source/code_snippets/trs398_class.py | 1 | 1182 | """A script to calculate TRS-398 dose using pylinac classes and following the TRS-398 photon form"""
from pylinac.calibration import trs398
ENERGY = 6
TEMP = 22.1
PRESS = trs398.mmHg2kPa(755.0)
CHAMBER = '30013' # PTW
K_ELEC = 1.000
ND_w = 5.443 # Gy/nC
MU = 200
CLINICAL_PDD = 66.5
trs398_6x = trs398.TRS398Photon(
unit='TrueBeam1',
setup='SSD',
chamber=CHAMBER,
temp=TEMP, press=PRESS,
n_dw=ND_w,
clinical_pdd_zref=CLINICAL_PDD,
tpr2010=(38.2/66.6),
energy=ENERGY,
fff=False,
k_elec=K_ELEC,
voltage_reference=-300, voltage_reduced=-150,
m_reference=(25.65, 25.66, 25.65),
m_opposite=(25.64, 25.65, 25.65),
m_reduced=(25.64, 25.63, 25.63),
mu=MU, tissue_correction=1.0
)
# Done!
print(trs398_6x.dose_mu_zmax)
# examine other parameters
print(trs398_6x.kq)
print(trs398_6x.k_s)
print(trs398_6x.k_tp)
# change readings if you adjust output
trs398_6x.m_reference_adjusted = (25.44, 25.44, 25.43)
# print new dose value
print(trs398_6x.dose_mu_zmax_adjusted)
# generate a PDF for record-keeping
trs398_6x.publish_pdf('TB1 6MV TRS-398.pdf', notes=['My notes', 'I used Pylinac to do this; so easy!'], open_file=False)
| mit | c38ba97644ebae5338b3a92622d2ea10 | 24.695652 | 120 | 0.678511 | 2.299611 | false | false | false | false |
graphql-python/graphql-core | src/graphql/validation/rules/unique_enum_value_names.py | 1 | 2135 | from collections import defaultdict
from typing import Any, Dict
from ...error import GraphQLError
from ...language import SKIP, EnumTypeDefinitionNode, NameNode, VisitorAction
from ...type import is_enum_type
from . import SDLValidationContext, SDLValidationRule
__all__ = ["UniqueEnumValueNamesRule"]
class UniqueEnumValueNamesRule(SDLValidationRule):
"""Unique enum value names
A GraphQL enum type is only valid if all its values are uniquely named.
"""
def __init__(self, context: SDLValidationContext):
super().__init__(context)
schema = context.schema
self.existing_type_map = schema.type_map if schema else {}
self.known_value_names: Dict[str, Dict[str, NameNode]] = defaultdict(dict)
def check_value_uniqueness(
self, node: EnumTypeDefinitionNode, *_args: Any
) -> VisitorAction:
existing_type_map = self.existing_type_map
type_name = node.name.value
value_names = self.known_value_names[type_name]
for value_def in node.values or []:
value_name = value_def.name.value
existing_type = existing_type_map.get(type_name)
if is_enum_type(existing_type) and value_name in existing_type.values:
self.report_error(
GraphQLError(
f"Enum value '{type_name}.{value_name}'"
" already exists in the schema."
" It cannot also be defined in this type extension.",
value_def.name,
)
)
elif value_name in value_names:
self.report_error(
GraphQLError(
f"Enum value '{type_name}.{value_name}'"
" can only be defined once.",
[value_names[value_name], value_def.name],
)
)
else:
value_names[value_name] = value_def.name
return SKIP
enter_enum_type_definition = check_value_uniqueness
enter_enum_type_extension = check_value_uniqueness
| mit | 4fd4d8c14b24e5ab3214c26c5e1011ba | 35.186441 | 82 | 0.581265 | 4.393004 | false | false | false | false |
graphql-python/graphql-core | tests/validation/test_variables_in_allowed_position.py | 1 | 10191 | from functools import partial
from graphql.validation import VariablesInAllowedPositionRule
from .harness import assert_validation_errors
assert_errors = partial(assert_validation_errors, VariablesInAllowedPositionRule)
assert_valid = partial(assert_errors, errors=[])
def describe_validate_variables_are_in_allowed_positions():
def boolean_to_boolean():
assert_valid(
"""
query Query($booleanArg: Boolean)
{
complicatedArgs {
booleanArgField(booleanArg: $booleanArg)
}
}
"""
)
def boolean_to_boolean_in_fragment():
assert_valid(
"""
fragment booleanArgFrag on ComplicatedArgs {
booleanArgField(booleanArg: $booleanArg)
}
query Query($booleanArg: Boolean)
{
complicatedArgs {
...booleanArgFrag
}
}
"""
)
assert_valid(
"""
query Query($booleanArg: Boolean)
{
complicatedArgs {
...booleanArgFrag
}
}
fragment booleanArgFrag on ComplicatedArgs {
booleanArgField(booleanArg: $booleanArg)
}
"""
)
def non_null_boolean_to_boolean():
assert_valid(
"""
query Query($nonNullBooleanArg: Boolean!)
{
complicatedArgs {
booleanArgField(booleanArg: $nonNullBooleanArg)
}
}
"""
)
def non_null_boolean_to_boolean_within_fragment():
assert_valid(
"""
fragment booleanArgFrag on ComplicatedArgs {
booleanArgField(booleanArg: $nonNullBooleanArg)
}
query Query($nonNullBooleanArg: Boolean!)
{
complicatedArgs {
...booleanArgFrag
}
}
"""
)
def array_of_string_to_array_of_string():
assert_valid(
"""
query Query($stringListVar: [String])
{
complicatedArgs {
stringListArgField(stringListArg: $stringListVar)
}
}
"""
)
def array_of_non_null_string_to_array_of_string():
assert_valid(
"""
query Query($stringListVar: [String!])
{
complicatedArgs {
stringListArgField(stringListArg: $stringListVar)
}
}
"""
)
def string_to_array_of_string_in_item_position():
assert_valid(
"""
query Query($stringVar: String)
{
complicatedArgs {
stringListArgField(stringListArg: [$stringVar])
}
}
"""
)
def non_null_string_to_array_of_string_in_item_position():
assert_valid(
"""
query Query($stringVar: String!)
{
complicatedArgs {
stringListArgField(stringListArg: [$stringVar])
}
}
"""
)
def complex_input_to_complex_input():
assert_valid(
"""
query Query($complexVar: ComplexInput)
{
complicatedArgs {
complexArgField(complexArg: $complexVar)
}
}
"""
)
def complex_input_to_complex_input_in_field_position():
assert_valid(
"""
query Query($boolVar: Boolean = false)
{
complicatedArgs {
complexArgField(complexArg: {requiredArg: $boolVar})
}
}
"""
)
def non_null_boolean_to_non_null_boolean_in_directive():
assert_valid(
"""
query Query($boolVar: Boolean!)
{
dog @include(if: $boolVar)
}
"""
)
def int_to_non_null_int():
assert_errors(
"""
query Query($intArg: Int) {
complicatedArgs {
nonNullIntArgField(nonNullIntArg: $intArg)
}
}
""",
[
{
"message": "Variable '$intArg' of type 'Int'"
" used in position expecting type 'Int!'.",
"locations": [(2, 25), (4, 51)],
}
],
)
def int_to_non_null_int_within_fragment():
assert_errors(
"""
fragment nonNullIntArgFieldFrag on ComplicatedArgs {
nonNullIntArgField(nonNullIntArg: $intArg)
}
query Query($intArg: Int) {
complicatedArgs {
...nonNullIntArgFieldFrag
}
}
""",
[
{
"message": "Variable '$intArg' of type 'Int'"
" used in position expecting type 'Int!'.",
"locations": [(6, 25), (3, 49)],
}
],
)
def int_to_non_null_int_within_nested_fragment():
assert_errors(
"""
fragment outerFrag on ComplicatedArgs {
...nonNullIntArgFieldFrag
}
fragment nonNullIntArgFieldFrag on ComplicatedArgs {
nonNullIntArgField(nonNullIntArg: $intArg)
}
query Query($intArg: Int) {
complicatedArgs {
...outerFrag
}
}
""",
[
{
"message": "Variable '$intArg' of type 'Int'"
" used in position expecting type 'Int!'.",
"locations": [(10, 25), (7, 49)],
}
],
)
def string_to_boolean():
assert_errors(
"""
query Query($stringVar: String) {
complicatedArgs {
booleanArgField(booleanArg: $stringVar)
}
}
""",
[
{
"message": "Variable '$stringVar' of type 'String'"
" used in position expecting type 'Boolean'.",
"locations": [(2, 25), (4, 45)],
}
],
)
def string_to_array_of_string():
assert_errors(
"""
query Query($stringVar: String) {
complicatedArgs {
stringListArgField(stringListArg: $stringVar)
}
}
""",
[
{
"message": "Variable '$stringVar' of type 'String'"
" used in position expecting type '[String]'.",
"locations": [(2, 25), (4, 51)],
}
],
)
def boolean_to_non_null_boolean_in_directive():
assert_errors(
"""
query Query($boolVar: Boolean) {
dog @include(if: $boolVar)
}
""",
[
{
"message": "Variable '$boolVar' of type 'Boolean'"
" used in position expecting type 'Boolean!'.",
"locations": [(2, 25), (3, 32)],
}
],
)
def string_to_non_null_boolean_in_directive():
assert_errors(
"""
query Query($stringVar: String) {
dog @include(if: $stringVar)
}
""",
[
{
"message": "Variable '$stringVar' of type 'String'"
" used in position expecting type 'Boolean!'.",
"locations": [(2, 25), (3, 32)],
}
],
)
def array_of_string_to_array_of_non_null_string():
assert_errors(
"""
query Query($stringListVar: [String])
{
complicatedArgs {
stringListNonNullArgField(stringListNonNullArg: $stringListVar)
}
}
""",
[
{
"message": "Variable '$stringListVar' of type '[String]'"
" used in position expecting type '[String!]'.",
"locations": [(2, 25), (5, 65)],
}
],
)
def describe_allows_optional_nullable_variables_with_default_values():
def int_to_non_null_int_fails_when_var_provides_null_default_value():
assert_errors(
"""
query Query($intVar: Int = null) {
complicatedArgs {
nonNullIntArgField(nonNullIntArg: $intVar)
}
}
""",
[
{
"message": "Variable '$intVar' of type 'Int'"
" used in position expecting type 'Int!'.",
"locations": [(2, 29), (4, 55)],
}
],
)
def int_to_non_null_int_when_var_provides_non_null_default_value():
assert_valid(
"""
query Query($intVar: Int = 1) {
complicatedArgs {
nonNullIntArgField(nonNullIntArg: $intVar)
}
}
"""
)
def int_to_non_null_int_when_optional_arg_provides_default_value():
assert_valid(
"""
query Query($intVar: Int) {
complicatedArgs {
nonNullFieldWithDefault(nonNullIntArg: $intVar)
}
}
"""
)
def bool_to_non_null_bool_in_directive_with_default_value_with_option():
assert_valid(
"""
query Query($boolVar: Boolean = false) {
dog @include(if: $boolVar)
}
"""
)
| mit | f2588eb2edcaff5b77a3dac4693f58a6 | 26.844262 | 81 | 0.422628 | 4.899519 | false | false | false | false |
graphql-python/graphql-core | src/graphql/utilities/build_client_schema.py | 1 | 17051 | from itertools import chain
from typing import Callable, Collection, Dict, List, Union, cast
from ..language import DirectiveLocation, parse_value
from ..pyutils import Undefined, inspect
from ..type import (
GraphQLArgument,
GraphQLDirective,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLField,
GraphQLInputField,
GraphQLInputObjectType,
GraphQLInterfaceType,
GraphQLList,
GraphQLNamedType,
GraphQLNonNull,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
GraphQLType,
GraphQLUnionType,
TypeKind,
assert_interface_type,
assert_nullable_type,
assert_object_type,
introspection_types,
is_input_type,
is_output_type,
specified_scalar_types,
)
from .get_introspection_query import (
IntrospectionDirective,
IntrospectionEnumType,
IntrospectionField,
IntrospectionInputObjectType,
IntrospectionInputValue,
IntrospectionInterfaceType,
IntrospectionObjectType,
IntrospectionQuery,
IntrospectionScalarType,
IntrospectionType,
IntrospectionTypeRef,
IntrospectionUnionType,
)
from .value_from_ast import value_from_ast
__all__ = ["build_client_schema"]
def build_client_schema(
introspection: IntrospectionQuery, assume_valid: bool = False
) -> GraphQLSchema:
"""Build a GraphQLSchema for use by client tools.
Given the result of a client running the introspection query, creates and returns
a GraphQLSchema instance which can be then used with all GraphQL-core 3 tools,
but cannot be used to execute a query, as introspection does not represent the
"resolver", "parse" or "serialize" functions or any other server-internal
mechanisms.
This function expects a complete introspection result. Don't forget to check the
"errors" field of a server response before calling this function.
"""
if not isinstance(introspection, dict) or not isinstance(
introspection.get("__schema"), dict
):
raise TypeError(
"Invalid or incomplete introspection result. Ensure that you"
" are passing the 'data' attribute of an introspection response"
f" and no 'errors' were returned alongside: {inspect(introspection)}."
)
# Get the schema from the introspection result.
schema_introspection = introspection["__schema"]
# Given a type reference in introspection, return the GraphQLType instance,
# preferring cached instances before building new instances.
def get_type(type_ref: IntrospectionTypeRef) -> GraphQLType:
kind = type_ref.get("kind")
if kind == TypeKind.LIST.name:
item_ref = type_ref.get("ofType")
if not item_ref:
raise TypeError("Decorated type deeper than introspection query.")
item_ref = cast(IntrospectionTypeRef, item_ref)
return GraphQLList(get_type(item_ref))
if kind == TypeKind.NON_NULL.name:
nullable_ref = type_ref.get("ofType")
if not nullable_ref:
raise TypeError("Decorated type deeper than introspection query.")
nullable_ref = cast(IntrospectionTypeRef, nullable_ref)
nullable_type = get_type(nullable_ref)
return GraphQLNonNull(assert_nullable_type(nullable_type))
type_ref = cast(IntrospectionType, type_ref)
return get_named_type(type_ref)
def get_named_type(type_ref: IntrospectionType) -> GraphQLNamedType:
type_name = type_ref.get("name")
if not type_name:
raise TypeError(f"Unknown type reference: {inspect(type_ref)}.")
type_ = type_map.get(type_name)
if not type_:
raise TypeError(
f"Invalid or incomplete schema, unknown type: {type_name}."
" Ensure that a full introspection query is used in order"
" to build a client schema."
)
return type_
def get_object_type(type_ref: IntrospectionObjectType) -> GraphQLObjectType:
return assert_object_type(get_type(type_ref))
def get_interface_type(
type_ref: IntrospectionInterfaceType,
) -> GraphQLInterfaceType:
return assert_interface_type(get_type(type_ref))
# Given a type's introspection result, construct the correct GraphQLType instance.
def build_type(type_: IntrospectionType) -> GraphQLNamedType:
if type_ and "name" in type_ and "kind" in type_:
builder = type_builders.get(type_["kind"])
if builder: # pragma: no cover else
return builder(type_)
raise TypeError(
"Invalid or incomplete introspection result."
" Ensure that a full introspection query is used in order"
f" to build a client schema: {inspect(type_)}."
)
def build_scalar_def(
scalar_introspection: IntrospectionScalarType,
) -> GraphQLScalarType:
name = scalar_introspection["name"]
try:
return cast(GraphQLScalarType, GraphQLScalarType.reserved_types[name])
except KeyError:
return GraphQLScalarType(
name=name,
description=scalar_introspection.get("description"),
specified_by_url=scalar_introspection.get("specifiedByURL"),
)
def build_implementations_list(
implementing_introspection: Union[
IntrospectionObjectType, IntrospectionInterfaceType
],
) -> List[GraphQLInterfaceType]:
maybe_interfaces = implementing_introspection.get("interfaces")
if maybe_interfaces is None:
# Temporary workaround until GraphQL ecosystem will fully support
# 'interfaces' on interface types
if implementing_introspection["kind"] == TypeKind.INTERFACE.name:
return []
raise TypeError(
"Introspection result missing interfaces:"
f" {inspect(implementing_introspection)}."
)
interfaces = cast(Collection[IntrospectionInterfaceType], maybe_interfaces)
return [get_interface_type(interface) for interface in interfaces]
def build_object_def(
object_introspection: IntrospectionObjectType,
) -> GraphQLObjectType:
name = object_introspection["name"]
try:
return cast(GraphQLObjectType, GraphQLObjectType.reserved_types[name])
except KeyError:
return GraphQLObjectType(
name=name,
description=object_introspection.get("description"),
interfaces=lambda: build_implementations_list(object_introspection),
fields=lambda: build_field_def_map(object_introspection),
)
def build_interface_def(
interface_introspection: IntrospectionInterfaceType,
) -> GraphQLInterfaceType:
return GraphQLInterfaceType(
name=interface_introspection["name"],
description=interface_introspection.get("description"),
interfaces=lambda: build_implementations_list(interface_introspection),
fields=lambda: build_field_def_map(interface_introspection),
)
def build_union_def(
union_introspection: IntrospectionUnionType,
) -> GraphQLUnionType:
maybe_possible_types = union_introspection.get("possibleTypes")
if maybe_possible_types is None:
raise TypeError(
"Introspection result missing possibleTypes:"
f" {inspect(union_introspection)}."
)
possible_types = cast(Collection[IntrospectionObjectType], maybe_possible_types)
return GraphQLUnionType(
name=union_introspection["name"],
description=union_introspection.get("description"),
types=lambda: [get_object_type(type_) for type_ in possible_types],
)
def build_enum_def(enum_introspection: IntrospectionEnumType) -> GraphQLEnumType:
if enum_introspection.get("enumValues") is None:
raise TypeError(
"Introspection result missing enumValues:"
f" {inspect(enum_introspection)}."
)
name = enum_introspection["name"]
try:
return cast(GraphQLEnumType, GraphQLEnumType.reserved_types[name])
except KeyError:
return GraphQLEnumType(
name=name,
description=enum_introspection.get("description"),
values={
value_introspect["name"]: GraphQLEnumValue(
value=value_introspect["name"],
description=value_introspect.get("description"),
deprecation_reason=value_introspect.get("deprecationReason"),
)
for value_introspect in enum_introspection["enumValues"]
},
)
def build_input_object_def(
input_object_introspection: IntrospectionInputObjectType,
) -> GraphQLInputObjectType:
if input_object_introspection.get("inputFields") is None:
raise TypeError(
"Introspection result missing inputFields:"
f" {inspect(input_object_introspection)}."
)
return GraphQLInputObjectType(
name=input_object_introspection["name"],
description=input_object_introspection.get("description"),
fields=lambda: build_input_value_def_map(
input_object_introspection["inputFields"]
),
)
type_builders: Dict[str, Callable[[IntrospectionType], GraphQLNamedType]] = {
TypeKind.SCALAR.name: build_scalar_def, # type: ignore
TypeKind.OBJECT.name: build_object_def, # type: ignore
TypeKind.INTERFACE.name: build_interface_def, # type: ignore
TypeKind.UNION.name: build_union_def, # type: ignore
TypeKind.ENUM.name: build_enum_def, # type: ignore
TypeKind.INPUT_OBJECT.name: build_input_object_def, # type: ignore
}
def build_field_def_map(
type_introspection: Union[IntrospectionObjectType, IntrospectionInterfaceType],
) -> Dict[str, GraphQLField]:
if type_introspection.get("fields") is None:
raise TypeError(
f"Introspection result missing fields: {type_introspection}."
)
return {
field_introspection["name"]: build_field(field_introspection)
for field_introspection in type_introspection["fields"]
}
def build_field(field_introspection: IntrospectionField) -> GraphQLField:
type_introspection = cast(IntrospectionType, field_introspection["type"])
type_ = get_type(type_introspection)
if not is_output_type(type_):
raise TypeError(
"Introspection must provide output type for fields,"
f" but received: {inspect(type_)}."
)
args_introspection = field_introspection.get("args")
if args_introspection is None:
raise TypeError(
"Introspection result missing field args:"
f" {inspect(field_introspection)}."
)
return GraphQLField(
type_,
args=build_argument_def_map(args_introspection),
description=field_introspection.get("description"),
deprecation_reason=field_introspection.get("deprecationReason"),
)
def build_argument_def_map(
argument_value_introspections: Collection[IntrospectionInputValue],
) -> Dict[str, GraphQLArgument]:
return {
argument_introspection["name"]: build_argument(argument_introspection)
for argument_introspection in argument_value_introspections
}
def build_argument(
argument_introspection: IntrospectionInputValue,
) -> GraphQLArgument:
type_introspection = cast(IntrospectionType, argument_introspection["type"])
type_ = get_type(type_introspection)
if not is_input_type(type_):
raise TypeError(
"Introspection must provide input type for arguments,"
f" but received: {inspect(type_)}."
)
default_value_introspection = argument_introspection.get("defaultValue")
default_value = (
Undefined
if default_value_introspection is None
else value_from_ast(parse_value(default_value_introspection), type_)
)
return GraphQLArgument(
type_,
default_value=default_value,
description=argument_introspection.get("description"),
deprecation_reason=argument_introspection.get("deprecationReason"),
)
def build_input_value_def_map(
input_value_introspections: Collection[IntrospectionInputValue],
) -> Dict[str, GraphQLInputField]:
return {
input_value_introspection["name"]: build_input_value(
input_value_introspection
)
for input_value_introspection in input_value_introspections
}
def build_input_value(
input_value_introspection: IntrospectionInputValue,
) -> GraphQLInputField:
type_introspection = cast(IntrospectionType, input_value_introspection["type"])
type_ = get_type(type_introspection)
if not is_input_type(type_):
raise TypeError(
"Introspection must provide input type for input fields,"
f" but received: {inspect(type_)}."
)
default_value_introspection = input_value_introspection.get("defaultValue")
default_value = (
Undefined
if default_value_introspection is None
else value_from_ast(parse_value(default_value_introspection), type_)
)
return GraphQLInputField(
type_,
default_value=default_value,
description=input_value_introspection.get("description"),
deprecation_reason=input_value_introspection.get("deprecationReason"),
)
def build_directive(
directive_introspection: IntrospectionDirective,
) -> GraphQLDirective:
if directive_introspection.get("args") is None:
raise TypeError(
"Introspection result missing directive args:"
f" {inspect(directive_introspection)}."
)
if directive_introspection.get("locations") is None:
raise TypeError(
"Introspection result missing directive locations:"
f" {inspect(directive_introspection)}."
)
return GraphQLDirective(
name=directive_introspection["name"],
description=directive_introspection.get("description"),
is_repeatable=directive_introspection.get("isRepeatable", False),
locations=list(
cast(
Collection[DirectiveLocation],
directive_introspection.get("locations"),
)
),
args=build_argument_def_map(directive_introspection["args"]),
)
# Iterate through all types, getting the type definition for each.
type_map: Dict[str, GraphQLNamedType] = {
type_introspection["name"]: build_type(type_introspection)
for type_introspection in schema_introspection["types"]
}
# Include standard types only if they are used.
for std_type_name, std_type in chain(
specified_scalar_types.items(), introspection_types.items()
):
if std_type_name in type_map:
type_map[std_type_name] = std_type
# Get the root Query, Mutation, and Subscription types.
query_type_ref = schema_introspection.get("queryType")
query_type = None if query_type_ref is None else get_object_type(query_type_ref)
mutation_type_ref = schema_introspection.get("mutationType")
mutation_type = (
None if mutation_type_ref is None else get_object_type(mutation_type_ref)
)
subscription_type_ref = schema_introspection.get("subscriptionType")
subscription_type = (
None
if subscription_type_ref is None
else get_object_type(subscription_type_ref)
)
# Get the directives supported by Introspection, assuming empty-set if directives
# were not queried for.
directive_introspections = schema_introspection.get("directives")
directives = (
[
build_directive(directive_introspection)
for directive_introspection in directive_introspections
]
if directive_introspections
else []
)
# Then produce and return a Schema with these types.
return GraphQLSchema(
query=query_type,
mutation=mutation_type,
subscription=subscription_type,
types=list(type_map.values()),
directives=directives,
description=schema_introspection.get("description"),
assume_valid=assume_valid,
)
| mit | 01e2da3376c456d93c6a264a9063d7a8 | 39.025822 | 88 | 0.635505 | 4.617113 | false | false | false | false |
graphql-python/graphql-core | src/graphql/validation/validation_context.py | 1 | 8712 | from typing import Any, Callable, Dict, List, NamedTuple, Optional, Set, Union, cast
from ..error import GraphQLError
from ..language import (
DocumentNode,
FragmentDefinitionNode,
FragmentSpreadNode,
OperationDefinitionNode,
SelectionSetNode,
VariableNode,
Visitor,
VisitorAction,
visit,
)
from ..type import (
GraphQLArgument,
GraphQLCompositeType,
GraphQLDirective,
GraphQLEnumValue,
GraphQLField,
GraphQLInputType,
GraphQLOutputType,
GraphQLSchema,
)
from ..utilities import TypeInfo, TypeInfoVisitor
try:
from typing import TypeAlias
except ImportError: # Python < 3.10
from typing_extensions import TypeAlias
__all__ = [
"ASTValidationContext",
"SDLValidationContext",
"ValidationContext",
"VariableUsage",
"VariableUsageVisitor",
]
NodeWithSelectionSet: TypeAlias = Union[OperationDefinitionNode, FragmentDefinitionNode]
class VariableUsage(NamedTuple):
node: VariableNode
type: Optional[GraphQLInputType]
default_value: Any
class VariableUsageVisitor(Visitor):
"""Visitor adding all variable usages to a given list."""
usages: List[VariableUsage]
def __init__(self, type_info: TypeInfo):
super().__init__()
self.usages = []
self._append_usage = self.usages.append
self._type_info = type_info
def enter_variable_definition(self, *_args: Any) -> VisitorAction:
return self.SKIP
def enter_variable(self, node: VariableNode, *_args: Any) -> VisitorAction:
type_info = self._type_info
usage = VariableUsage(
node, type_info.get_input_type(), type_info.get_default_value()
)
self._append_usage(usage)
return None
class ASTValidationContext:
"""Utility class providing a context for validation of an AST.
An instance of this class is passed as the context attribute to all Validators,
allowing access to commonly useful contextual information from within a validation
rule.
"""
document: DocumentNode
_fragments: Optional[Dict[str, FragmentDefinitionNode]]
_fragment_spreads: Dict[SelectionSetNode, List[FragmentSpreadNode]]
_recursively_referenced_fragments: Dict[
OperationDefinitionNode, List[FragmentDefinitionNode]
]
def __init__(
self, ast: DocumentNode, on_error: Callable[[GraphQLError], None]
) -> None:
self.document = ast
self.on_error = on_error # type: ignore
self._fragments = None
self._fragment_spreads = {}
self._recursively_referenced_fragments = {}
def on_error(self, error: GraphQLError) -> None:
pass
def report_error(self, error: GraphQLError) -> None:
self.on_error(error)
def get_fragment(self, name: str) -> Optional[FragmentDefinitionNode]:
fragments = self._fragments
if fragments is None:
fragments = {
statement.name.value: statement
for statement in self.document.definitions
if isinstance(statement, FragmentDefinitionNode)
}
self._fragments = fragments
return fragments.get(name)
def get_fragment_spreads(self, node: SelectionSetNode) -> List[FragmentSpreadNode]:
spreads = self._fragment_spreads.get(node)
if spreads is None:
spreads = []
append_spread = spreads.append
sets_to_visit = [node]
append_set = sets_to_visit.append
pop_set = sets_to_visit.pop
while sets_to_visit:
visited_set = pop_set()
for selection in visited_set.selections:
if isinstance(selection, FragmentSpreadNode):
append_spread(selection)
else:
set_to_visit = cast(
NodeWithSelectionSet, selection
).selection_set
if set_to_visit:
append_set(set_to_visit)
self._fragment_spreads[node] = spreads
return spreads
def get_recursively_referenced_fragments(
self, operation: OperationDefinitionNode
) -> List[FragmentDefinitionNode]:
fragments = self._recursively_referenced_fragments.get(operation)
if fragments is None:
fragments = []
append_fragment = fragments.append
collected_names: Set[str] = set()
add_name = collected_names.add
nodes_to_visit = [operation.selection_set]
append_node = nodes_to_visit.append
pop_node = nodes_to_visit.pop
get_fragment = self.get_fragment
get_fragment_spreads = self.get_fragment_spreads
while nodes_to_visit:
visited_node = pop_node()
for spread in get_fragment_spreads(visited_node):
frag_name = spread.name.value
if frag_name not in collected_names:
add_name(frag_name)
fragment = get_fragment(frag_name)
if fragment:
append_fragment(fragment)
append_node(fragment.selection_set)
self._recursively_referenced_fragments[operation] = fragments
return fragments
class SDLValidationContext(ASTValidationContext):
"""Utility class providing a context for validation of an SDL AST.
An instance of this class is passed as the context attribute to all Validators,
allowing access to commonly useful contextual information from within a validation
rule.
"""
schema: Optional[GraphQLSchema]
def __init__(
self,
ast: DocumentNode,
schema: Optional[GraphQLSchema],
on_error: Callable[[GraphQLError], None],
) -> None:
super().__init__(ast, on_error)
self.schema = schema
class ValidationContext(ASTValidationContext):
"""Utility class providing a context for validation using a GraphQL schema.
An instance of this class is passed as the context attribute to all Validators,
allowing access to commonly useful contextual information from within a validation
rule.
"""
schema: GraphQLSchema
_type_info: TypeInfo
_variable_usages: Dict[NodeWithSelectionSet, List[VariableUsage]]
_recursive_variable_usages: Dict[OperationDefinitionNode, List[VariableUsage]]
def __init__(
self,
schema: GraphQLSchema,
ast: DocumentNode,
type_info: TypeInfo,
on_error: Callable[[GraphQLError], None],
) -> None:
super().__init__(ast, on_error)
self.schema = schema
self._type_info = type_info
self._variable_usages = {}
self._recursive_variable_usages = {}
def get_variable_usages(self, node: NodeWithSelectionSet) -> List[VariableUsage]:
usages = self._variable_usages.get(node)
if usages is None:
usage_visitor = VariableUsageVisitor(self._type_info)
visit(node, TypeInfoVisitor(self._type_info, usage_visitor))
usages = usage_visitor.usages
self._variable_usages[node] = usages
return usages
def get_recursive_variable_usages(
self, operation: OperationDefinitionNode
) -> List[VariableUsage]:
usages = self._recursive_variable_usages.get(operation)
if usages is None:
get_variable_usages = self.get_variable_usages
usages = get_variable_usages(operation)
for fragment in self.get_recursively_referenced_fragments(operation):
usages.extend(get_variable_usages(fragment))
self._recursive_variable_usages[operation] = usages
return usages
def get_type(self) -> Optional[GraphQLOutputType]:
return self._type_info.get_type()
def get_parent_type(self) -> Optional[GraphQLCompositeType]:
return self._type_info.get_parent_type()
def get_input_type(self) -> Optional[GraphQLInputType]:
return self._type_info.get_input_type()
def get_parent_input_type(self) -> Optional[GraphQLInputType]:
return self._type_info.get_parent_input_type()
def get_field_def(self) -> Optional[GraphQLField]:
return self._type_info.get_field_def()
def get_directive(self) -> Optional[GraphQLDirective]:
return self._type_info.get_directive()
def get_argument(self) -> Optional[GraphQLArgument]:
return self._type_info.get_argument()
def get_enum_value(self) -> Optional[GraphQLEnumValue]:
return self._type_info.get_enum_value()
| mit | 6746f3b7556404997cfbafd803703208 | 32.898833 | 88 | 0.631772 | 4.274779 | false | false | false | false |
graphql-python/graphql-core | tests/utilities/test_type_comparators.py | 1 | 3962 | from graphql.type import (
GraphQLField,
GraphQLFloat,
GraphQLInt,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLOutputType,
GraphQLSchema,
GraphQLString,
GraphQLUnionType,
)
from graphql.utilities import is_equal_type, is_type_sub_type_of
def describe_type_comparators():
def describe_is_equal_type():
def same_references_are_equal():
assert is_equal_type(GraphQLString, GraphQLString) is True
def int_and_float_are_not_equal():
assert is_equal_type(GraphQLInt, GraphQLFloat) is False
def lists_of_same_type_are_equal():
assert (
is_equal_type(GraphQLList(GraphQLInt), GraphQLList(GraphQLInt)) is True
)
def lists_is_not_equal_to_item():
assert is_equal_type(GraphQLList(GraphQLInt), GraphQLInt) is False
def nonnull_of_same_type_are_equal():
assert (
is_equal_type(GraphQLNonNull(GraphQLInt), GraphQLNonNull(GraphQLInt))
is True
)
def nonnull_is_not_equal_to_nullable():
assert is_equal_type(GraphQLNonNull(GraphQLInt), GraphQLInt) is False
def describe_is_type_sub_type_of():
def _test_schema(field_type: GraphQLOutputType = GraphQLString):
return GraphQLSchema(
query=GraphQLObjectType("Query", {"field": GraphQLField(field_type)})
)
def same_reference_is_subtype():
assert (
is_type_sub_type_of(_test_schema(), GraphQLString, GraphQLString)
is True
)
def int_is_not_subtype_of_float():
assert (
is_type_sub_type_of(_test_schema(), GraphQLInt, GraphQLFloat) is False
)
def non_null_is_subtype_of_nullable():
assert (
is_type_sub_type_of(
_test_schema(), GraphQLNonNull(GraphQLInt), GraphQLInt
)
is True
)
def nullable_is_not_subtype_of_non_null():
assert (
is_type_sub_type_of(
_test_schema(), GraphQLInt, GraphQLNonNull(GraphQLInt)
)
is False
)
def item_is_not_subtype_of_list():
assert not is_type_sub_type_of(
_test_schema(), GraphQLInt, GraphQLList(GraphQLInt)
)
def list_is_not_subtype_of_item():
assert not is_type_sub_type_of(
_test_schema(), GraphQLList(GraphQLInt), GraphQLInt
)
def member_is_subtype_of_union():
member = GraphQLObjectType("Object", {"field": GraphQLField(GraphQLString)})
union = GraphQLUnionType("Union", [member])
schema = _test_schema(union)
assert is_type_sub_type_of(schema, member, union)
def implementing_object_is_subtype_of_interface():
iface = GraphQLInterfaceType(
"Interface", {"field": GraphQLField(GraphQLString)}
)
impl = GraphQLObjectType(
"Object",
{"field": GraphQLField(GraphQLString)},
[iface],
)
schema = _test_schema(impl)
assert is_type_sub_type_of(schema, impl, iface)
def implementing_interface_is_subtype_of_interface():
iface = GraphQLInterfaceType(
"Interface", {"field": GraphQLField(GraphQLString)}
)
iface2 = GraphQLInterfaceType(
"Interface2", {"field": GraphQLField(GraphQLString)}, [iface]
)
impl = GraphQLObjectType(
"Object",
{"field": GraphQLField(GraphQLString)},
[iface2, iface],
)
schema = _test_schema(impl)
assert is_type_sub_type_of(schema, iface2, iface)
| mit | faf41ef019b6f19dbc50442da3f18e58 | 33.155172 | 88 | 0.552246 | 3.907298 | false | true | false | false |
graphql-python/graphql-core | src/graphql/validation/rules/known_directives.py | 1 | 4458 | from typing import Any, Dict, List, Optional, Tuple, Union, cast
from ...error import GraphQLError
from ...language import (
DirectiveDefinitionNode,
DirectiveLocation,
DirectiveNode,
Node,
OperationDefinitionNode,
)
from ...type import specified_directives
from . import ASTValidationRule, SDLValidationContext, ValidationContext
__all__ = ["KnownDirectivesRule"]
class KnownDirectivesRule(ASTValidationRule):
"""Known directives
A GraphQL document is only valid if all ``@directives`` are known by the schema and
legally positioned.
See https://spec.graphql.org/draft/#sec-Directives-Are-Defined
"""
context: Union[ValidationContext, SDLValidationContext]
def __init__(self, context: Union[ValidationContext, SDLValidationContext]):
super().__init__(context)
locations_map: Dict[str, Tuple[DirectiveLocation, ...]] = {}
schema = context.schema
defined_directives = (
schema.directives if schema else cast(List, specified_directives)
)
for directive in defined_directives:
locations_map[directive.name] = directive.locations
ast_definitions = context.document.definitions
for def_ in ast_definitions:
if isinstance(def_, DirectiveDefinitionNode):
locations_map[def_.name.value] = tuple(
DirectiveLocation[name.value] for name in def_.locations
)
self.locations_map = locations_map
def enter_directive(
self,
node: DirectiveNode,
_key: Any,
_parent: Any,
_path: Any,
ancestors: List[Node],
) -> None:
name = node.name.value
locations = self.locations_map.get(name)
if locations:
candidate_location = get_directive_location_for_ast_path(ancestors)
if candidate_location and candidate_location not in locations:
self.report_error(
GraphQLError(
f"Directive '@{name}'"
f" may not be used on {candidate_location.value}.",
node,
)
)
else:
self.report_error(GraphQLError(f"Unknown directive '@{name}'.", node))
_operation_location = {
"query": DirectiveLocation.QUERY,
"mutation": DirectiveLocation.MUTATION,
"subscription": DirectiveLocation.SUBSCRIPTION,
}
_directive_location = {
"field": DirectiveLocation.FIELD,
"fragment_spread": DirectiveLocation.FRAGMENT_SPREAD,
"inline_fragment": DirectiveLocation.INLINE_FRAGMENT,
"fragment_definition": DirectiveLocation.FRAGMENT_DEFINITION,
"variable_definition": DirectiveLocation.VARIABLE_DEFINITION,
"schema_definition": DirectiveLocation.SCHEMA,
"schema_extension": DirectiveLocation.SCHEMA,
"scalar_type_definition": DirectiveLocation.SCALAR,
"scalar_type_extension": DirectiveLocation.SCALAR,
"object_type_definition": DirectiveLocation.OBJECT,
"object_type_extension": DirectiveLocation.OBJECT,
"field_definition": DirectiveLocation.FIELD_DEFINITION,
"interface_type_definition": DirectiveLocation.INTERFACE,
"interface_type_extension": DirectiveLocation.INTERFACE,
"union_type_definition": DirectiveLocation.UNION,
"union_type_extension": DirectiveLocation.UNION,
"enum_type_definition": DirectiveLocation.ENUM,
"enum_type_extension": DirectiveLocation.ENUM,
"enum_value_definition": DirectiveLocation.ENUM_VALUE,
"input_object_type_definition": DirectiveLocation.INPUT_OBJECT,
"input_object_type_extension": DirectiveLocation.INPUT_OBJECT,
}
def get_directive_location_for_ast_path(
ancestors: List[Node],
) -> Optional[DirectiveLocation]:
applied_to = ancestors[-1]
if not isinstance(applied_to, Node): # pragma: no cover
raise TypeError("Unexpected error in directive.")
kind = applied_to.kind
if kind == "operation_definition":
applied_to = cast(OperationDefinitionNode, applied_to)
return _operation_location[applied_to.operation.value]
elif kind == "input_value_definition":
parent_node = ancestors[-3]
return (
DirectiveLocation.INPUT_FIELD_DEFINITION
if parent_node.kind == "input_object_type_definition"
else DirectiveLocation.ARGUMENT_DEFINITION
)
else:
return _directive_location.get(kind)
| mit | 5a0546b084f1b4ee80b1e15cf683646f | 36.15 | 87 | 0.666891 | 4.213611 | false | false | false | false |
graphql-python/graphql-core | src/graphql/language/ast.py | 1 | 20944 | from __future__ import annotations # Python < 3.10
from copy import copy, deepcopy
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
from ..pyutils import camel_to_snake
from .source import Source
from .token_kind import TokenKind
try:
from typing import TypeAlias
except ImportError: # Python < 3.10
from typing_extensions import TypeAlias
__all__ = [
"Location",
"Token",
"Node",
"NameNode",
"DocumentNode",
"DefinitionNode",
"ExecutableDefinitionNode",
"OperationDefinitionNode",
"VariableDefinitionNode",
"SelectionSetNode",
"SelectionNode",
"FieldNode",
"ArgumentNode",
"ConstArgumentNode",
"FragmentSpreadNode",
"InlineFragmentNode",
"FragmentDefinitionNode",
"ValueNode",
"ConstValueNode",
"VariableNode",
"IntValueNode",
"FloatValueNode",
"StringValueNode",
"BooleanValueNode",
"NullValueNode",
"EnumValueNode",
"ListValueNode",
"ConstListValueNode",
"ObjectValueNode",
"ConstObjectValueNode",
"ObjectFieldNode",
"ConstObjectFieldNode",
"DirectiveNode",
"ConstDirectiveNode",
"TypeNode",
"NamedTypeNode",
"ListTypeNode",
"NonNullTypeNode",
"TypeSystemDefinitionNode",
"SchemaDefinitionNode",
"OperationType",
"OperationTypeDefinitionNode",
"TypeDefinitionNode",
"ScalarTypeDefinitionNode",
"ObjectTypeDefinitionNode",
"FieldDefinitionNode",
"InputValueDefinitionNode",
"InterfaceTypeDefinitionNode",
"UnionTypeDefinitionNode",
"EnumTypeDefinitionNode",
"EnumValueDefinitionNode",
"InputObjectTypeDefinitionNode",
"DirectiveDefinitionNode",
"SchemaExtensionNode",
"TypeExtensionNode",
"TypeSystemExtensionNode",
"ScalarTypeExtensionNode",
"ObjectTypeExtensionNode",
"InterfaceTypeExtensionNode",
"UnionTypeExtensionNode",
"EnumTypeExtensionNode",
"InputObjectTypeExtensionNode",
"QUERY_DOCUMENT_KEYS",
]
class Token:
"""AST Token
Represents a range of characters represented by a lexical token within a Source.
"""
__slots__ = "kind", "start", "end", "line", "column", "prev", "next", "value"
kind: TokenKind # the kind of token
start: int # the character offset at which this Node begins
end: int # the character offset at which this Node ends
line: int # the 1-indexed line number on which this Token appears
column: int # the 1-indexed column number at which this Token begins
# for non-punctuation tokens, represents the interpreted value of the token:
value: Optional[str]
# Tokens exist as nodes in a double-linked-list amongst all tokens including
# ignored tokens. <SOF> is always the first node and <EOF> the last.
prev: Optional[Token]
next: Optional[Token]
def __init__(
self,
kind: TokenKind,
start: int,
end: int,
line: int,
column: int,
value: Optional[str] = None,
) -> None:
self.kind = kind
self.start, self.end = start, end
self.line, self.column = line, column
self.value = value
self.prev = self.next = None
def __str__(self) -> str:
return self.desc
def __repr__(self) -> str:
"""Print a simplified form when appearing in repr() or inspect()."""
return f"<Token {self.desc} {self.line}:{self.column}>"
def __inspect__(self) -> str:
return repr(self)
def __eq__(self, other: Any) -> bool:
if isinstance(other, Token):
return (
self.kind == other.kind
and self.start == other.start
and self.end == other.end
and self.line == other.line
and self.column == other.column
and self.value == other.value
)
elif isinstance(other, str):
return other == self.desc
return False
def __hash__(self) -> int:
return hash(
(self.kind, self.start, self.end, self.line, self.column, self.value)
)
def __copy__(self) -> Token:
"""Create a shallow copy of the token"""
token = self.__class__(
self.kind,
self.start,
self.end,
self.line,
self.column,
self.value,
)
token.prev = self.prev
return token
def __deepcopy__(self, memo: Dict) -> Token:
"""Allow only shallow copies to avoid recursion."""
return copy(self)
def __getstate__(self) -> Dict[str, Any]:
"""Remove the links when pickling.
Keeping the links would make pickling a schema too expensive.
"""
return {
key: getattr(self, key)
for key in self.__slots__
if key not in {"prev", "next"}
}
def __setstate__(self, state: Dict[str, Any]) -> None:
"""Reset the links when un-pickling."""
for key, value in state.items():
setattr(self, key, value)
self.prev = self.next = None
@property
def desc(self) -> str:
"""A helper property to describe a token as a string for debugging"""
kind, value = self.kind.value, self.value
return f"{kind} {value!r}" if value else kind
class Location:
"""AST Location
Contains a range of UTF-8 character offsets and token references that identify the
region of the source from which the AST derived.
"""
__slots__ = (
"start",
"end",
"start_token",
"end_token",
"source",
)
start: int # character offset at which this Node begins
end: int # character offset at which this Node ends
start_token: Token # Token at which this Node begins
end_token: Token # Token at which this Node ends.
source: Source # Source document the AST represents
def __init__(self, start_token: Token, end_token: Token, source: Source) -> None:
self.start = start_token.start
self.end = end_token.end
self.start_token = start_token
self.end_token = end_token
self.source = source
def __str__(self) -> str:
return f"{self.start}:{self.end}"
def __repr__(self) -> str:
"""Print a simplified form when appearing in repr() or inspect()."""
return f"<Location {self.start}:{self.end}>"
def __inspect__(self) -> str:
return repr(self)
def __eq__(self, other: Any) -> bool:
if isinstance(other, Location):
return self.start == other.start and self.end == other.end
elif isinstance(other, (list, tuple)) and len(other) == 2:
return self.start == other[0] and self.end == other[1]
return False
def __ne__(self, other: Any) -> bool:
return not self == other
def __hash__(self) -> int:
return hash((self.start, self.end))
class OperationType(Enum):
QUERY = "query"
MUTATION = "mutation"
SUBSCRIPTION = "subscription"
# Default map from node kinds to their node attributes (internal)
QUERY_DOCUMENT_KEYS: Dict[str, Tuple[str, ...]] = {
"name": (),
"document": ("definitions",),
"operation_definition": (
"name",
"variable_definitions",
"directives",
"selection_set",
),
"variable_definition": ("variable", "type", "default_value", "directives"),
"variable": ("name",),
"selection_set": ("selections",),
"field": ("alias", "name", "arguments", "directives", "selection_set"),
"argument": ("name", "value"),
"fragment_spread": ("name", "directives"),
"inline_fragment": ("type_condition", "directives", "selection_set"),
"fragment_definition": (
# Note: fragment variable definitions are deprecated and will be removed in v3.3
"name",
"variable_definitions",
"type_condition",
"directives",
"selection_set",
),
"list_value": ("values",),
"object_value": ("fields",),
"object_field": ("name", "value"),
"directive": ("name", "arguments"),
"named_type": ("name",),
"list_type": ("type",),
"non_null_type": ("type",),
"schema_definition": ("description", "directives", "operation_types"),
"operation_type_definition": ("type",),
"scalar_type_definition": ("description", "name", "directives"),
"object_type_definition": (
"description",
"name",
"interfaces",
"directives",
"fields",
),
"field_definition": ("description", "name", "arguments", "type", "directives"),
"input_value_definition": (
"description",
"name",
"type",
"default_value",
"directives",
),
"interface_type_definition": (
"description",
"name",
"interfaces",
"directives",
"fields",
),
"union_type_definition": ("description", "name", "directives", "types"),
"enum_type_definition": ("description", "name", "directives", "values"),
"enum_value_definition": ("description", "name", "directives"),
"input_object_type_definition": ("description", "name", "directives", "fields"),
"directive_definition": ("description", "name", "arguments", "locations"),
"schema_extension": ("directives", "operation_types"),
"scalar_type_extension": ("name", "directives"),
"object_type_extension": ("name", "interfaces", "directives", "fields"),
"interface_type_extension": ("name", "interfaces", "directives", "fields"),
"union_type_extension": ("name", "directives", "types"),
"enum_type_extension": ("name", "directives", "values"),
"input_object_type_extension": ("name", "directives", "fields"),
}
# Base AST Node
class Node:
"""AST nodes"""
# allow custom attributes and weak references (not used internally)
__slots__ = "__dict__", "__weakref__", "loc", "_hash"
loc: Optional[Location]
kind: str = "ast" # the kind of the node as a snake_case string
keys: Tuple[str, ...] = ("loc",) # the names of the attributes of this node
def __init__(self, **kwargs: Any) -> None:
"""Initialize the node with the given keyword arguments."""
for key in self.keys:
value = kwargs.get(key)
if isinstance(value, list):
value = tuple(value)
setattr(self, key, value)
def __repr__(self) -> str:
"""Get a simple representation of the node."""
name, loc = self.__class__.__name__, getattr(self, "loc", None)
return f"{name} at {loc}" if loc else name
def __eq__(self, other: Any) -> bool:
"""Test whether two nodes are equal (recursively)."""
return (
isinstance(other, Node)
and self.__class__ == other.__class__
and all(getattr(self, key) == getattr(other, key) for key in self.keys)
)
def __hash__(self) -> int:
"""Get a cached hash value for the node."""
# Caching the hash values improves the performance of AST validators
hashed = getattr(self, "_hash", None)
if hashed is None:
self._hash = id(self) # avoid recursion
hashed = hash(tuple(getattr(self, key) for key in self.keys))
self._hash = hashed
return hashed
def __setattr__(self, key: str, value: Any) -> None:
# reset cashed hash value if attributes are changed
if hasattr(self, "_hash") and key in self.keys:
del self._hash
super().__setattr__(key, value)
def __copy__(self) -> Node:
"""Create a shallow copy of the node."""
return self.__class__(**{key: getattr(self, key) for key in self.keys})
def __deepcopy__(self, memo: Dict) -> Node:
"""Create a deep copy of the node"""
# noinspection PyArgumentList
return self.__class__(
**{key: deepcopy(getattr(self, key), memo) for key in self.keys}
)
def __init_subclass__(cls) -> None:
super().__init_subclass__()
name = cls.__name__
try:
name = name.removeprefix("Const").removesuffix("Node")
except AttributeError: # pragma: no cover (Python < 3.9)
if name.startswith("Const"):
name = name[5:]
if name.endswith("Node"):
name = name[:-4]
cls.kind = camel_to_snake(name)
keys: List[str] = []
for base in cls.__bases__:
# noinspection PyUnresolvedReferences
keys.extend(base.keys) # type: ignore
keys.extend(cls.__slots__)
cls.keys = tuple(keys)
def to_dict(self, locations: bool = False) -> Dict:
from ..utilities import ast_to_dict
return ast_to_dict(self, locations)
# Name
class NameNode(Node):
__slots__ = ("value",)
value: str
# Document
class DocumentNode(Node):
__slots__ = ("definitions",)
definitions: Tuple[DefinitionNode, ...]
class DefinitionNode(Node):
__slots__ = ()
class ExecutableDefinitionNode(DefinitionNode):
__slots__ = "name", "directives", "variable_definitions", "selection_set"
name: Optional[NameNode]
directives: Tuple[DirectiveNode, ...]
variable_definitions: Tuple[VariableDefinitionNode, ...]
selection_set: SelectionSetNode
class OperationDefinitionNode(ExecutableDefinitionNode):
__slots__ = ("operation",)
operation: OperationType
class VariableDefinitionNode(Node):
__slots__ = "variable", "type", "default_value", "directives"
variable: VariableNode
type: TypeNode
default_value: Optional[ConstValueNode]
directives: Tuple[ConstDirectiveNode, ...]
class SelectionSetNode(Node):
__slots__ = ("selections",)
selections: Tuple[SelectionNode, ...]
class SelectionNode(Node):
__slots__ = ("directives",)
directives: Tuple[DirectiveNode, ...]
class FieldNode(SelectionNode):
__slots__ = "alias", "name", "arguments", "selection_set"
alias: Optional[NameNode]
name: NameNode
arguments: Tuple[ArgumentNode, ...]
selection_set: Optional[SelectionSetNode]
class ArgumentNode(Node):
__slots__ = "name", "value"
name: NameNode
value: ValueNode
class ConstArgumentNode(ArgumentNode):
value: ConstValueNode
# Fragments
class FragmentSpreadNode(SelectionNode):
__slots__ = ("name",)
name: NameNode
class InlineFragmentNode(SelectionNode):
__slots__ = "type_condition", "selection_set"
type_condition: NamedTypeNode
selection_set: SelectionSetNode
class FragmentDefinitionNode(ExecutableDefinitionNode):
__slots__ = ("type_condition",)
name: NameNode
type_condition: NamedTypeNode
# Values
class ValueNode(Node):
__slots__ = ()
class VariableNode(ValueNode):
__slots__ = ("name",)
name: NameNode
class IntValueNode(ValueNode):
__slots__ = ("value",)
value: str
class FloatValueNode(ValueNode):
__slots__ = ("value",)
value: str
class StringValueNode(ValueNode):
__slots__ = "value", "block"
value: str
block: Optional[bool]
class BooleanValueNode(ValueNode):
__slots__ = ("value",)
value: bool
class NullValueNode(ValueNode):
__slots__ = ()
class EnumValueNode(ValueNode):
__slots__ = ("value",)
value: str
class ListValueNode(ValueNode):
__slots__ = ("values",)
values: Tuple[ValueNode, ...]
class ConstListValueNode(ListValueNode):
values: Tuple[ConstValueNode, ...]
class ObjectValueNode(ValueNode):
__slots__ = ("fields",)
fields: Tuple[ObjectFieldNode, ...]
class ConstObjectValueNode(ObjectValueNode):
fields: Tuple[ConstObjectFieldNode, ...]
class ObjectFieldNode(Node):
__slots__ = "name", "value"
name: NameNode
value: ValueNode
class ConstObjectFieldNode(ObjectFieldNode):
value: ConstValueNode
ConstValueNode: TypeAlias = Union[
IntValueNode,
FloatValueNode,
StringValueNode,
BooleanValueNode,
NullValueNode,
EnumValueNode,
ConstListValueNode,
ConstObjectValueNode,
]
# Directives
class DirectiveNode(Node):
__slots__ = "name", "arguments"
name: NameNode
arguments: Tuple[ArgumentNode, ...]
class ConstDirectiveNode(DirectiveNode):
arguments: Tuple[ConstArgumentNode, ...]
# Type Reference
class TypeNode(Node):
__slots__ = ()
class NamedTypeNode(TypeNode):
__slots__ = ("name",)
name: NameNode
class ListTypeNode(TypeNode):
__slots__ = ("type",)
type: TypeNode
class NonNullTypeNode(TypeNode):
__slots__ = ("type",)
type: Union[NamedTypeNode, ListTypeNode]
# Type System Definition
class TypeSystemDefinitionNode(DefinitionNode):
__slots__ = ()
class SchemaDefinitionNode(TypeSystemDefinitionNode):
__slots__ = "description", "directives", "operation_types"
description: Optional[StringValueNode]
directives: Tuple[ConstDirectiveNode, ...]
operation_types: Tuple[OperationTypeDefinitionNode, ...]
class OperationTypeDefinitionNode(Node):
__slots__ = "operation", "type"
operation: OperationType
type: NamedTypeNode
# Type Definition
class TypeDefinitionNode(TypeSystemDefinitionNode):
__slots__ = "description", "name", "directives"
description: Optional[StringValueNode]
name: NameNode
directives: Tuple[DirectiveNode, ...]
class ScalarTypeDefinitionNode(TypeDefinitionNode):
__slots__ = ()
directives: Tuple[ConstDirectiveNode, ...]
class ObjectTypeDefinitionNode(TypeDefinitionNode):
__slots__ = "interfaces", "fields"
interfaces: Tuple[NamedTypeNode, ...]
directives: Tuple[ConstDirectiveNode, ...]
fields: Tuple[FieldDefinitionNode, ...]
class FieldDefinitionNode(DefinitionNode):
__slots__ = "description", "name", "directives", "arguments", "type"
description: Optional[StringValueNode]
name: NameNode
directives: Tuple[ConstDirectiveNode, ...]
arguments: Tuple[InputValueDefinitionNode, ...]
type: TypeNode
class InputValueDefinitionNode(DefinitionNode):
__slots__ = "description", "name", "directives", "type", "default_value"
description: Optional[StringValueNode]
name: NameNode
directives: Tuple[ConstDirectiveNode, ...]
type: TypeNode
default_value: Optional[ConstValueNode]
class InterfaceTypeDefinitionNode(TypeDefinitionNode):
__slots__ = "fields", "interfaces"
fields: Tuple[FieldDefinitionNode, ...]
directives: Tuple[ConstDirectiveNode, ...]
interfaces: Tuple[NamedTypeNode, ...]
class UnionTypeDefinitionNode(TypeDefinitionNode):
__slots__ = ("types",)
directives: Tuple[ConstDirectiveNode, ...]
types: Tuple[NamedTypeNode, ...]
class EnumTypeDefinitionNode(TypeDefinitionNode):
__slots__ = ("values",)
directives: Tuple[ConstDirectiveNode, ...]
values: Tuple[EnumValueDefinitionNode, ...]
class EnumValueDefinitionNode(DefinitionNode):
__slots__ = "description", "name", "directives"
description: Optional[StringValueNode]
name: NameNode
directives: Tuple[ConstDirectiveNode, ...]
class InputObjectTypeDefinitionNode(TypeDefinitionNode):
__slots__ = ("fields",)
directives: Tuple[ConstDirectiveNode, ...]
fields: Tuple[InputValueDefinitionNode, ...]
# Directive Definitions
class DirectiveDefinitionNode(TypeSystemDefinitionNode):
__slots__ = "description", "name", "arguments", "repeatable", "locations"
description: Optional[StringValueNode]
name: NameNode
arguments: Tuple[InputValueDefinitionNode, ...]
repeatable: bool
locations: Tuple[NameNode, ...]
# Type System Extensions
class SchemaExtensionNode(Node):
__slots__ = "directives", "operation_types"
directives: Tuple[ConstDirectiveNode, ...]
operation_types: Tuple[OperationTypeDefinitionNode, ...]
# Type Extensions
class TypeExtensionNode(TypeSystemDefinitionNode):
__slots__ = "name", "directives"
name: NameNode
directives: Tuple[ConstDirectiveNode, ...]
TypeSystemExtensionNode: TypeAlias = Union[SchemaExtensionNode, TypeExtensionNode]
class ScalarTypeExtensionNode(TypeExtensionNode):
__slots__ = ()
class ObjectTypeExtensionNode(TypeExtensionNode):
__slots__ = "interfaces", "fields"
interfaces: Tuple[NamedTypeNode, ...]
fields: Tuple[FieldDefinitionNode, ...]
class InterfaceTypeExtensionNode(TypeExtensionNode):
__slots__ = "interfaces", "fields"
interfaces: Tuple[NamedTypeNode, ...]
fields: Tuple[FieldDefinitionNode, ...]
class UnionTypeExtensionNode(TypeExtensionNode):
__slots__ = ("types",)
types: Tuple[NamedTypeNode, ...]
class EnumTypeExtensionNode(TypeExtensionNode):
__slots__ = ("values",)
values: Tuple[EnumValueDefinitionNode, ...]
class InputObjectTypeExtensionNode(TypeExtensionNode):
__slots__ = ("fields",)
fields: Tuple[InputValueDefinitionNode, ...]
| mit | 0a20baae720c395c5c2050be5b1d6d8b | 24.666667 | 88 | 0.624141 | 4.047932 | false | false | false | false |
graphql-python/graphql-core | tests/star_wars_schema.py | 1 | 7871 | """Star Wars GraphQL schema
This is designed to be an end-to-end test, demonstrating the full GraphQL stack.
We will create a GraphQL schema that describes the major characters in the original
Star Wars trilogy.
NOTE: This may contain spoilers for the original Star Wars trilogy.
Using our shorthand to describe type systems, the type system for our Star Wars example
is::
enum Episode { NEW_HOPE, EMPIRE, JEDI }
interface Character {
id: String!
name: String
friends: [Character]
appearsIn: [Episode]
}
type Human implements Character {
id: String!
name: String
friends: [Character]
appearsIn: [Episode]
homePlanet: String
}
type Droid implements Character {
id: String!
name: String
friends: [Character]
appearsIn: [Episode]
primaryFunction: String
}
type Query {
hero(episode: Episode): Character
human(id: String!): Human
droid(id: String!): Droid
}
"""
from graphql.type import (
GraphQLArgument,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLField,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLSchema,
GraphQLString,
)
from tests.star_wars_data import (
get_droid,
get_friends,
get_hero,
get_human,
get_secret_backstory,
)
__all__ = ["star_wars_schema"]
# We begin by setting up our schema.
# The original trilogy consists of three movies.
#
# This implements the following type system shorthand:
# enum Episode { NEW_HOPE, EMPIRE, JEDI }
episode_enum = GraphQLEnumType(
"Episode",
{
"NEW_HOPE": GraphQLEnumValue(4, description="Released in 1977."),
"EMPIRE": GraphQLEnumValue(5, description="Released in 1980."),
"JEDI": GraphQLEnumValue(6, description="Released in 1983."),
},
description="One of the films in the Star Wars Trilogy",
)
# Characters in the Star Wars trilogy are either humans or droids.
#
# This implements the following type system shorthand:
# interface Character {
# id: String!
# name: String
# friends: [Character]
# appearsIn: [Episode]
# secretBackstory: String
human_type: GraphQLObjectType
droid_type: GraphQLObjectType
character_interface: GraphQLInterfaceType = GraphQLInterfaceType(
"Character",
lambda: {
"id": GraphQLField(
GraphQLNonNull(GraphQLString), description="The id of the character."
),
"name": GraphQLField(GraphQLString, description="The name of the character."),
"friends": GraphQLField(
GraphQLList(character_interface),
description="The friends of the character,"
" or an empty list if they have none.",
),
"appearsIn": GraphQLField(
GraphQLList(episode_enum), description="Which movies they appear in."
),
"secretBackstory": GraphQLField(
GraphQLString, description="All secrets about their past."
),
},
resolve_type=lambda character, _info, _type: {
"Human": human_type.name,
"Droid": droid_type.name,
}[character.type],
description="A character in the Star Wars Trilogy",
)
# We define our human type, which implements the character interface.
#
# This implements the following type system shorthand:
# type Human : Character {
# id: String!
# name: String
# friends: [Character]
# appearsIn: [Episode]
# secretBackstory: String
# }
human_type = GraphQLObjectType(
"Human",
lambda: {
"id": GraphQLField(
GraphQLNonNull(GraphQLString), description="The id of the human."
),
"name": GraphQLField(GraphQLString, description="The name of the human."),
"friends": GraphQLField(
GraphQLList(character_interface),
description="The friends of the human,"
" or an empty list if they have none.",
resolve=lambda human, _info: get_friends(human),
),
"appearsIn": GraphQLField(
GraphQLList(episode_enum), description="Which movies they appear in."
),
"homePlanet": GraphQLField(
GraphQLString,
description="The home planet of the human, or null if unknown.",
),
"secretBackstory": GraphQLField(
GraphQLString,
resolve=lambda human, _info: get_secret_backstory(human),
description="Where are they from and how they came to be who they are.",
),
},
interfaces=[character_interface],
description="A humanoid creature in the Star Wars universe.",
)
# The other type of character in Star Wars is a droid.
#
# This implements the following type system shorthand:
# type Droid : Character {
# id: String!
# name: String
# friends: [Character]
# appearsIn: [Episode]
# secretBackstory: String
# primaryFunction: String
# }
droid_type = GraphQLObjectType(
"Droid",
lambda: {
"id": GraphQLField(
GraphQLNonNull(GraphQLString), description="The id of the droid."
),
"name": GraphQLField(GraphQLString, description="The name of the droid."),
"friends": GraphQLField(
GraphQLList(character_interface),
description="The friends of the droid,"
" or an empty list if they have none.",
resolve=lambda droid, _info: get_friends(droid),
),
"appearsIn": GraphQLField(
GraphQLList(episode_enum), description="Which movies they appear in."
),
"secretBackstory": GraphQLField(
GraphQLString,
resolve=lambda droid, _info: get_secret_backstory(droid),
description="Construction date and the name of the designer.",
),
"primaryFunction": GraphQLField(
GraphQLString, description="The primary function of the droid."
),
},
interfaces=[character_interface],
description="A mechanical creature in the Star Wars universe.",
)
# This is the type that will be the root of our query, and the
# entry point into our schema. It gives us the ability to fetch
# objects by their IDs, as well as to fetch the undisputed hero
# of the Star Wars trilogy, R2-D2, directly.
#
# This implements the following type system shorthand:
# type Query {
# hero(episode: Episode): Character
# human(id: String!): Human
# droid(id: String!): Droid
# }
# noinspection PyShadowingBuiltins
query_type = GraphQLObjectType(
"Query",
lambda: {
"hero": GraphQLField(
character_interface,
args={
"episode": GraphQLArgument(
episode_enum,
description=(
"If omitted, returns the hero of the whole saga."
" If provided, returns the hero of that particular episode."
),
)
},
resolve=lambda _source, _info, episode=None: get_hero(episode),
),
"human": GraphQLField(
human_type,
args={
"id": GraphQLArgument(
GraphQLNonNull(GraphQLString), description="id of the human"
)
},
resolve=lambda _source, _info, id: get_human(id),
),
"droid": GraphQLField(
droid_type,
args={
"id": GraphQLArgument(
GraphQLNonNull(GraphQLString), description="id of the droid"
)
},
resolve=lambda _source, _info, id: get_droid(id),
),
},
)
# Finally, we construct our schema (whose starting query type is the query
# type we defined above) and export it.
star_wars_schema = GraphQLSchema(query_type, types=[human_type, droid_type])
| mit | f1243ce4a3d3cf0cc74bfa098ec208e3 | 29.389961 | 87 | 0.614788 | 3.923729 | false | false | false | false |
graphql-python/graphql-core | tests/validation/test_possible_fragment_spreads.py | 1 | 10022 | from functools import partial
from graphql.utilities import build_schema
from graphql.validation import PossibleFragmentSpreadsRule
from .harness import assert_validation_errors
test_schema = build_schema(
"""
interface Being {
name: String
}
interface Pet implements Being {
name: String
}
type Dog implements Being & Pet {
name: String
barkVolume: Int
}
type Cat implements Being & Pet {
name: String
meowVolume: Int
}
union CatOrDog = Cat | Dog
interface Intelligent {
iq: Int
}
type Human implements Being & Intelligent {
name: String
pets: [Pet]
iq: Int
}
type Alien implements Being & Intelligent {
name: String
iq: Int
}
union DogOrHuman = Dog | Human
union HumanOrAlien = Human | Alien
type Query {
catOrDog: CatOrDog
dogOrHuman: DogOrHuman
humanOrAlien: HumanOrAlien
}
"""
)
assert_errors = partial(
assert_validation_errors, PossibleFragmentSpreadsRule, schema=test_schema
)
assert_valid = partial(assert_errors, errors=[])
def describe_validate_possible_fragment_spreads():
def of_the_same_object():
assert_valid(
"""
fragment objectWithinObject on Dog { ...dogFragment }
fragment dogFragment on Dog { barkVolume }
"""
)
def of_the_same_object_inline_fragment():
assert_valid(
"""
fragment objectWithinObjectAnon on Dog { ... on Dog { barkVolume } }
"""
)
def object_into_implemented_interface():
assert_valid(
"""
fragment objectWithinInterface on Pet { ...dogFragment }
fragment dogFragment on Dog { barkVolume }
"""
)
def object_into_containing_union():
assert_valid(
"""
fragment objectWithinUnion on CatOrDog { ...dogFragment }
fragment dogFragment on Dog { barkVolume }
"""
)
def union_into_contained_object():
assert_valid(
"""
fragment unionWithinObject on Dog { ...catOrDogFragment }
fragment catOrDogFragment on CatOrDog { __typename }
"""
)
def union_into_overlapping_interface():
assert_valid(
"""
fragment unionWithinInterface on Pet { ...catOrDogFragment }
fragment catOrDogFragment on CatOrDog { __typename }
"""
)
def union_into_overlapping_union():
assert_valid(
"""
fragment unionWithinUnion on DogOrHuman { ...catOrDogFragment }
fragment catOrDogFragment on CatOrDog { __typename }
"""
)
def interface_into_implemented_object():
assert_valid(
"""
fragment interfaceWithinObject on Dog { ...petFragment }
fragment petFragment on Pet { name }
"""
)
def interface_into_overlapping_interface():
assert_valid(
"""
fragment interfaceWithinInterface on Pet { ...beingFragment }
fragment beingFragment on Being { name }
"""
)
def interface_into_overlapping_interface_in_inline_fragment():
assert_valid(
"""
fragment interfaceWithinInterface on Pet { ... on Being { name } }
"""
)
def interface_into_overlapping_union():
assert_valid(
"""
fragment interfaceWithinUnion on CatOrDog { ...petFragment }
fragment petFragment on Pet { name }
"""
)
def ignores_incorrect_type_caught_by_fragments_on_composite_types():
assert_valid(
"""
fragment petFragment on Pet { ...badInADifferentWay }
fragment badInADifferentWay on String { name }
"""
)
def ignores_unknown_fragments_caught_by_known_fragment_names():
assert_valid(
"""
fragment petFragment on Pet { ...UnknownFragment }
"""
)
def different_object_into_object():
assert_errors(
"""
fragment invalidObjectWithinObject on Cat { ...dogFragment }
fragment dogFragment on Dog { barkVolume }
""",
[
{
"message": "Fragment 'dogFragment' cannot be spread here"
" as objects of type 'Cat' can never be of type 'Dog'.",
"locations": [(2, 57)],
},
],
)
def different_object_into_object_in_inline_fragment():
assert_errors(
"""
fragment invalidObjectWithinObjectAnon on Cat {
... on Dog { barkVolume }
}
""",
[
{
"message": "Fragment cannot be spread here"
" as objects of type 'Cat' can never be of type 'Dog'.",
"locations": [(3, 15)],
},
],
)
def object_into_not_implementing_interface():
assert_errors(
"""
fragment invalidObjectWithinInterface on Pet { ...humanFragment }
fragment humanFragment on Human { pets { name } }
""",
[
{
"message": "Fragment 'humanFragment' cannot be spread here"
" as objects of type 'Pet' can never be of type 'Human'.",
"locations": [(2, 60)],
},
],
)
def object_into_not_containing_union():
assert_errors(
"""
fragment invalidObjectWithinUnion on CatOrDog { ...humanFragment }
fragment humanFragment on Human { pets { name } }
""",
[
{
"message": "Fragment 'humanFragment' cannot be spread here"
" as objects of type 'CatOrDog' can never be of type 'Human'.",
"locations": [(2, 61)],
},
],
)
def union_into_not_contained_object():
assert_errors(
"""
fragment invalidUnionWithinObject on Human { ...catOrDogFragment }
fragment catOrDogFragment on CatOrDog { __typename }
""",
[
{
"message": "Fragment 'catOrDogFragment' cannot be spread here"
" as objects of type 'Human' can never be of type 'CatOrDog'.",
"locations": [(2, 58)],
},
],
)
def union_into_non_overlapping_interface():
assert_errors(
"""
fragment invalidUnionWithinInterface on Pet { ...humanOrAlienFragment }
fragment humanOrAlienFragment on HumanOrAlien { __typename }
""",
[
{
"message": "Fragment 'humanOrAlienFragment' cannot be spread here"
" as objects of type 'Pet' can never be of type 'HumanOrAlien'.",
"locations": [(2, 59)],
},
],
)
def union_into_non_overlapping_union():
assert_errors(
"""
fragment invalidUnionWithinUnion on CatOrDog { ...humanOrAlienFragment }
fragment humanOrAlienFragment on HumanOrAlien { __typename }
""",
[
{
"message": "Fragment 'humanOrAlienFragment'"
" cannot be spread here as objects of type 'CatOrDog'"
" can never be of type 'HumanOrAlien'.",
"locations": [(2, 60)],
},
],
)
def interface_into_non_implementing_object():
assert_errors(
"""
fragment invalidInterfaceWithinObject on Cat { ...intelligentFragment }
fragment intelligentFragment on Intelligent { iq }
""",
[
{
"message": "Fragment 'intelligentFragment' cannot be spread here"
" as objects of type 'Cat' can never be of type 'Intelligent'.",
"locations": [(2, 60)],
},
],
)
def interface_into_non_overlapping_interface():
assert_errors(
"""
fragment invalidInterfaceWithinInterface on Pet {
...intelligentFragment
}
fragment intelligentFragment on Intelligent { iq }
""",
[
{
"message": "Fragment 'intelligentFragment' cannot be spread here"
" as objects of type 'Pet' can never be of type 'Intelligent'.",
"locations": [(3, 15)],
},
],
)
def interface_into_non_overlapping_interface_in_inline_fragment():
assert_errors(
"""
fragment invalidInterfaceWithinInterfaceAnon on Pet {
...on Intelligent { iq }
}
""",
[
{
"message": "Fragment cannot be spread here as objects"
" of type 'Pet' can never be of type 'Intelligent'.",
"locations": [(3, 15)],
},
],
)
def interface_into_non_overlapping_union():
assert_errors(
"""
fragment invalidInterfaceWithinUnion on HumanOrAlien { ...petFragment }
fragment petFragment on Pet { name }
""",
[
{
"message": "Fragment 'petFragment' cannot be spread here"
" as objects of type 'HumanOrAlien' can never be of type 'Pet'.",
"locations": [(2, 68)],
},
],
)
| mit | 6018b18db6e40d808d39eb3103dd1289 | 28.827381 | 86 | 0.496109 | 4.707374 | false | false | false | false |
graphql-python/graphql-core | src/graphql/utilities/get_introspection_query.py | 1 | 7946 | from textwrap import dedent
from typing import Any, Dict, List, Optional, Union
from ..language import DirectiveLocation
try:
from typing import Literal, TypedDict
except ImportError: # Python < 3.8
from typing_extensions import Literal, TypedDict # type: ignore
try:
from typing import TypeAlias
except ImportError: # Python < 3.10
from typing_extensions import TypeAlias
__all__ = [
"get_introspection_query",
"IntrospectionDirective",
"IntrospectionEnumType",
"IntrospectionField",
"IntrospectionInputObjectType",
"IntrospectionInputValue",
"IntrospectionInterfaceType",
"IntrospectionListType",
"IntrospectionNonNullType",
"IntrospectionObjectType",
"IntrospectionQuery",
"IntrospectionScalarType",
"IntrospectionSchema",
"IntrospectionType",
"IntrospectionTypeRef",
"IntrospectionUnionType",
]
def get_introspection_query(
descriptions: bool = True,
specified_by_url: bool = False,
directive_is_repeatable: bool = False,
schema_description: bool = False,
input_value_deprecation: bool = False,
) -> str:
"""Get a query for introspection.
Optionally, you can exclude descriptions, include specification URLs,
include repeatability of directives, and specify whether to include
the schema description as well.
"""
maybe_description = "description" if descriptions else ""
maybe_specified_by_url = "specifiedByURL" if specified_by_url else ""
maybe_directive_is_repeatable = "isRepeatable" if directive_is_repeatable else ""
maybe_schema_description = maybe_description if schema_description else ""
def input_deprecation(string: str) -> Optional[str]:
return string if input_value_deprecation else ""
return dedent(
f"""
query IntrospectionQuery {{
__schema {{
{maybe_schema_description}
queryType {{ name }}
mutationType {{ name }}
subscriptionType {{ name }}
types {{
...FullType
}}
directives {{
name
{maybe_description}
{maybe_directive_is_repeatable}
locations
args{input_deprecation("(includeDeprecated: true)")} {{
...InputValue
}}
}}
}}
}}
fragment FullType on __Type {{
kind
name
{maybe_description}
{maybe_specified_by_url}
fields(includeDeprecated: true) {{
name
{maybe_description}
args{input_deprecation("(includeDeprecated: true)")} {{
...InputValue
}}
type {{
...TypeRef
}}
isDeprecated
deprecationReason
}}
inputFields{input_deprecation("(includeDeprecated: true)")} {{
...InputValue
}}
interfaces {{
...TypeRef
}}
enumValues(includeDeprecated: true) {{
name
{maybe_description}
isDeprecated
deprecationReason
}}
possibleTypes {{
...TypeRef
}}
}}
fragment InputValue on __InputValue {{
name
{maybe_description}
type {{ ...TypeRef }}
defaultValue
{input_deprecation("isDeprecated")}
{input_deprecation("deprecationReason")}
}}
fragment TypeRef on __Type {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
}}
}}
}}
}}
}}
}}
}}
}}
"""
)
# Unfortunately, the following type definitions are a bit simplistic
# because of current restrictions in the typing system (mypy):
# - no recursion, see https://github.com/python/mypy/issues/731
# - no generic typed dicts, see https://github.com/python/mypy/issues/3863
# simplified IntrospectionNamedType to avoids cycles
SimpleIntrospectionType: TypeAlias = Dict[str, Any]
class MaybeWithDescription(TypedDict, total=False):
description: Optional[str]
class WithName(MaybeWithDescription):
name: str
class MaybeWithSpecifiedByUrl(TypedDict, total=False):
specifiedByURL: Optional[str]
class WithDeprecated(TypedDict):
isDeprecated: bool
deprecationReason: Optional[str]
class MaybeWithDeprecated(TypedDict, total=False):
isDeprecated: bool
deprecationReason: Optional[str]
class IntrospectionInputValue(WithName, MaybeWithDeprecated):
type: SimpleIntrospectionType # should be IntrospectionInputType
defaultValue: Optional[str]
class IntrospectionField(WithName, WithDeprecated):
args: List[IntrospectionInputValue]
type: SimpleIntrospectionType # should be IntrospectionOutputType
class IntrospectionEnumValue(WithName, WithDeprecated):
pass
class MaybeWithIsRepeatable(TypedDict, total=False):
isRepeatable: bool
class IntrospectionDirective(WithName, MaybeWithIsRepeatable):
locations: List[DirectiveLocation]
args: List[IntrospectionInputValue]
class IntrospectionScalarType(WithName, MaybeWithSpecifiedByUrl):
kind: Literal["scalar"]
class IntrospectionInterfaceType(WithName):
kind: Literal["interface"]
fields: List[IntrospectionField]
interfaces: List[SimpleIntrospectionType] # should be InterfaceType
possibleTypes: List[SimpleIntrospectionType] # should be NamedType
class IntrospectionObjectType(WithName):
kind: Literal["object"]
fields: List[IntrospectionField]
interfaces: List[SimpleIntrospectionType] # should be InterfaceType
class IntrospectionUnionType(WithName):
kind: Literal["union"]
possibleTypes: List[SimpleIntrospectionType] # should be NamedType
class IntrospectionEnumType(WithName):
kind: Literal["enum"]
enumValues: List[IntrospectionEnumValue]
class IntrospectionInputObjectType(WithName):
kind: Literal["input_object"]
inputFields: List[IntrospectionInputValue]
IntrospectionType: TypeAlias = Union[
IntrospectionScalarType,
IntrospectionObjectType,
IntrospectionInterfaceType,
IntrospectionUnionType,
IntrospectionEnumType,
IntrospectionInputObjectType,
]
IntrospectionOutputType: TypeAlias = Union[
IntrospectionScalarType,
IntrospectionObjectType,
IntrospectionInterfaceType,
IntrospectionUnionType,
IntrospectionEnumType,
]
IntrospectionInputType: TypeAlias = Union[
IntrospectionScalarType, IntrospectionEnumType, IntrospectionInputObjectType
]
class IntrospectionListType(TypedDict):
kind: Literal["list"]
ofType: SimpleIntrospectionType # should be IntrospectionType
class IntrospectionNonNullType(TypedDict):
kind: Literal["non_null"]
ofType: SimpleIntrospectionType # should be IntrospectionType
IntrospectionTypeRef: TypeAlias = Union[
IntrospectionType, IntrospectionListType, IntrospectionNonNullType
]
class IntrospectionSchema(MaybeWithDescription):
queryType: IntrospectionObjectType
mutationType: Optional[IntrospectionObjectType]
subscriptionType: Optional[IntrospectionObjectType]
types: List[IntrospectionType]
directives: List[IntrospectionDirective]
class IntrospectionQuery(TypedDict):
"""The root typed dictionary for schema introspections."""
__schema: IntrospectionSchema
| mit | 8acb12b20b0ae690563875dbb4713330 | 25.844595 | 85 | 0.638686 | 4.792521 | false | false | false | false |
graphql-python/graphql-core | src/graphql/validation/rules/overlapping_fields_can_be_merged.py | 1 | 28442 | from itertools import chain
from typing import Any, Dict, List, Optional, Tuple, Union, cast
from ...error import GraphQLError
from ...language import (
FieldNode,
FragmentDefinitionNode,
FragmentSpreadNode,
InlineFragmentNode,
ObjectFieldNode,
ObjectValueNode,
SelectionSetNode,
print_ast,
)
from ...type import (
GraphQLCompositeType,
GraphQLField,
GraphQLNamedType,
GraphQLOutputType,
get_named_type,
is_interface_type,
is_leaf_type,
is_list_type,
is_non_null_type,
is_object_type,
)
from ...utilities import type_from_ast
from ...utilities.sort_value_node import sort_value_node
from . import ValidationContext, ValidationRule
try:
from typing import TypeAlias
except ImportError: # Python < 3.10
from typing_extensions import TypeAlias
MYPY = False
__all__ = ["OverlappingFieldsCanBeMergedRule"]
def reason_message(reason: "ConflictReasonMessage") -> str:
if isinstance(reason, list):
return " and ".join(
f"subfields '{response_name}' conflict"
f" because {reason_message(sub_reason)}"
for response_name, sub_reason in reason
)
return reason
class OverlappingFieldsCanBeMergedRule(ValidationRule):
"""Overlapping fields can be merged
A selection set is only valid if all fields (including spreading any fragments)
either correspond to distinct response names or can be merged without ambiguity.
See https://spec.graphql.org/draft/#sec-Field-Selection-Merging
"""
def __init__(self, context: ValidationContext):
super().__init__(context)
# A memoization for when two fragments are compared "between" each other for
# conflicts. Two fragments may be compared many times, so memoizing this can
# dramatically improve the performance of this validator.
self.compared_fragment_pairs = PairSet()
# A cache for the "field map" and list of fragment names found in any given
# selection set. Selection sets may be asked for this information multiple
# times, so this improves the performance of this validator.
self.cached_fields_and_fragment_names: Dict = {}
def enter_selection_set(self, selection_set: SelectionSetNode, *_args: Any) -> None:
conflicts = find_conflicts_within_selection_set(
self.context,
self.cached_fields_and_fragment_names,
self.compared_fragment_pairs,
self.context.get_parent_type(),
selection_set,
)
for (reason_name, reason), fields1, fields2 in conflicts:
reason_msg = reason_message(reason)
self.report_error(
GraphQLError(
f"Fields '{reason_name}' conflict because {reason_msg}."
" Use different aliases on the fields to fetch both"
" if this was intentional.",
fields1 + fields2,
)
)
Conflict: TypeAlias = Tuple["ConflictReason", List[FieldNode], List[FieldNode]]
# Field name and reason.
ConflictReason: TypeAlias = Tuple[str, "ConflictReasonMessage"]
# Reason is a string, or a nested list of conflicts.
if MYPY: # recursive types not fully supported yet (/python/mypy/issues/731)
ConflictReasonMessage: TypeAlias = Union[str, List]
else:
ConflictReasonMessage: TypeAlias = Union[str, List[ConflictReason]]
# Tuple defining a field node in a context.
NodeAndDef: TypeAlias = Tuple[GraphQLCompositeType, FieldNode, Optional[GraphQLField]]
# Dictionary of lists of those.
NodeAndDefCollection: TypeAlias = Dict[str, List[NodeAndDef]]
# Algorithm:
#
# Conflicts occur when two fields exist in a query which will produce the same
# response name, but represent differing values, thus creating a conflict.
# The algorithm below finds all conflicts via making a series of comparisons
# between fields. In order to compare as few fields as possible, this makes
# a series of comparisons "within" sets of fields and "between" sets of fields.
#
# Given any selection set, a collection produces both a set of fields by
# also including all inline fragments, as well as a list of fragments
# referenced by fragment spreads.
#
# A) Each selection set represented in the document first compares "within" its
# collected set of fields, finding any conflicts between every pair of
# overlapping fields.
# Note: This is the#only time* that a the fields "within" a set are compared
# to each other. After this only fields "between" sets are compared.
#
# B) Also, if any fragment is referenced in a selection set, then a
# comparison is made "between" the original set of fields and the
# referenced fragment.
#
# C) Also, if multiple fragments are referenced, then comparisons
# are made "between" each referenced fragment.
#
# D) When comparing "between" a set of fields and a referenced fragment, first
# a comparison is made between each field in the original set of fields and
# each field in the the referenced set of fields.
#
# E) Also, if any fragment is referenced in the referenced selection set,
# then a comparison is made "between" the original set of fields and the
# referenced fragment (recursively referring to step D).
#
# F) When comparing "between" two fragments, first a comparison is made between
# each field in the first referenced set of fields and each field in the the
# second referenced set of fields.
#
# G) Also, any fragments referenced by the first must be compared to the
# second, and any fragments referenced by the second must be compared to the
# first (recursively referring to step F).
#
# H) When comparing two fields, if both have selection sets, then a comparison
# is made "between" both selection sets, first comparing the set of fields in
# the first selection set with the set of fields in the second.
#
# I) Also, if any fragment is referenced in either selection set, then a
# comparison is made "between" the other set of fields and the
# referenced fragment.
#
# J) Also, if two fragments are referenced in both selection sets, then a
# comparison is made "between" the two fragments.
def find_conflicts_within_selection_set(
context: ValidationContext,
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
parent_type: Optional[GraphQLNamedType],
selection_set: SelectionSetNode,
) -> List[Conflict]:
"""Find conflicts within selection set.
Find all conflicts found "within" a selection set, including those found via
spreading in fragments.
Called when visiting each SelectionSet in the GraphQL Document.
"""
conflicts: List[Conflict] = []
field_map, fragment_names = get_fields_and_fragment_names(
context, cached_fields_and_fragment_names, parent_type, selection_set
)
# (A) Find all conflicts "within" the fields of this selection set.
# Note: this is the *only place* `collect_conflicts_within` is called.
collect_conflicts_within(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
field_map,
)
if fragment_names:
# (B) Then collect conflicts between these fields and those represented by each
# spread fragment name found.
for i, fragment_name in enumerate(fragment_names):
collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
False,
field_map,
fragment_name,
)
# (C) Then compare this fragment with all other fragments found in this
# selection set to collect conflicts within fragments spread together.
# This compares each item in the list of fragment names to every other
# item in that same list (except for itself).
for other_fragment_name in fragment_names[i + 1 :]:
collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
False,
fragment_name,
other_fragment_name,
)
return conflicts
def collect_conflicts_between_fields_and_fragment(
context: ValidationContext,
conflicts: List[Conflict],
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
are_mutually_exclusive: bool,
field_map: NodeAndDefCollection,
fragment_name: str,
) -> None:
"""Collect conflicts between fields and fragment.
Collect all conflicts found between a set of fields and a fragment reference
including via spreading in any nested fragments.
"""
fragment = context.get_fragment(fragment_name)
if not fragment:
return None
field_map2, referenced_fragment_names = get_referenced_fields_and_fragment_names(
context, cached_fields_and_fragment_names, fragment
)
# Do not compare a fragment's fieldMap to itself.
if field_map is field_map2:
return
# (D) First collect any conflicts between the provided collection of fields and the
# collection of fields represented by the given fragment.
collect_conflicts_between(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
field_map,
field_map2,
)
# (E) Then collect any conflicts between the provided collection of fields and any
# fragment names found in the given fragment.
for referenced_fragment_name in referenced_fragment_names:
# Memoize so two fragments are not compared for conflicts more than once.
if compared_fragment_pairs.has(
referenced_fragment_name, fragment_name, are_mutually_exclusive
):
continue
compared_fragment_pairs.add(
referenced_fragment_name, fragment_name, are_mutually_exclusive
)
collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
field_map,
referenced_fragment_name,
)
def collect_conflicts_between_fragments(
context: ValidationContext,
conflicts: List[Conflict],
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
are_mutually_exclusive: bool,
fragment_name1: str,
fragment_name2: str,
) -> None:
"""Collect conflicts between fragments.
Collect all conflicts found between two fragments, including via spreading in any
nested fragments.
"""
# No need to compare a fragment to itself.
if fragment_name1 == fragment_name2:
return
# Memoize so two fragments are not compared for conflicts more than once.
if compared_fragment_pairs.has(
fragment_name1, fragment_name2, are_mutually_exclusive
):
return
compared_fragment_pairs.add(fragment_name1, fragment_name2, are_mutually_exclusive)
fragment1 = context.get_fragment(fragment_name1)
fragment2 = context.get_fragment(fragment_name2)
if not fragment1 or not fragment2:
return None
field_map1, referenced_fragment_names1 = get_referenced_fields_and_fragment_names(
context, cached_fields_and_fragment_names, fragment1
)
field_map2, referenced_fragment_names2 = get_referenced_fields_and_fragment_names(
context, cached_fields_and_fragment_names, fragment2
)
# (F) First, collect all conflicts between these two collections of fields
# (not including any nested fragments)
collect_conflicts_between(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
field_map1,
field_map2,
)
# (G) Then collect conflicts between the first fragment and any nested fragments
# spread in the second fragment.
for referenced_fragment_name2 in referenced_fragment_names2:
collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
fragment_name1,
referenced_fragment_name2,
)
# (G) Then collect conflicts between the second fragment and any nested fragments
# spread in the first fragment.
for referenced_fragment_name1 in referenced_fragment_names1:
collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
referenced_fragment_name1,
fragment_name2,
)
def find_conflicts_between_sub_selection_sets(
context: ValidationContext,
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
are_mutually_exclusive: bool,
parent_type1: Optional[GraphQLNamedType],
selection_set1: SelectionSetNode,
parent_type2: Optional[GraphQLNamedType],
selection_set2: SelectionSetNode,
) -> List[Conflict]:
"""Find conflicts between sub selection sets.
Find all conflicts found between two selection sets, including those found via
spreading in fragments. Called when determining if conflicts exist between the
sub-fields of two overlapping fields.
"""
conflicts: List[Conflict] = []
field_map1, fragment_names1 = get_fields_and_fragment_names(
context, cached_fields_and_fragment_names, parent_type1, selection_set1
)
field_map2, fragment_names2 = get_fields_and_fragment_names(
context, cached_fields_and_fragment_names, parent_type2, selection_set2
)
# (H) First, collect all conflicts between these two collections of field.
collect_conflicts_between(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
field_map1,
field_map2,
)
# (I) Then collect conflicts between the first collection of fields and those
# referenced by each fragment name associated with the second.
if fragment_names2:
for fragment_name2 in fragment_names2:
collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
field_map1,
fragment_name2,
)
# (I) Then collect conflicts between the second collection of fields and those
# referenced by each fragment name associated with the first.
if fragment_names1:
for fragment_name1 in fragment_names1:
collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
field_map2,
fragment_name1,
)
# (J) Also collect conflicts between any fragment names by the first and fragment
# names by the second. This compares each item in the first set of names to each
# item in the second set of names.
for fragment_name1 in fragment_names1:
for fragment_name2 in fragment_names2:
collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
fragment_name1,
fragment_name2,
)
return conflicts
def collect_conflicts_within(
context: ValidationContext,
conflicts: List[Conflict],
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
field_map: NodeAndDefCollection,
) -> None:
"""Collect all Conflicts "within" one collection of fields."""
# A field map is a keyed collection, where each key represents a response name and
# the value at that key is a list of all fields which provide that response name.
# For every response name, if there are multiple fields, they must be compared to
# find a potential conflict.
for response_name, fields in field_map.items():
# This compares every field in the list to every other field in this list
# (except to itself). If the list only has one item, nothing needs to be
# compared.
if len(fields) > 1:
for i, field in enumerate(fields):
for other_field in fields[i + 1 :]:
conflict = find_conflict(
context,
cached_fields_and_fragment_names,
compared_fragment_pairs,
# within one collection is never mutually exclusive
False,
response_name,
field,
other_field,
)
if conflict:
conflicts.append(conflict)
def collect_conflicts_between(
context: ValidationContext,
conflicts: List[Conflict],
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
parent_fields_are_mutually_exclusive: bool,
field_map1: NodeAndDefCollection,
field_map2: NodeAndDefCollection,
) -> None:
"""Collect all Conflicts between two collections of fields.
This is similar to, but different from the :func:`~.collect_conflicts_within`
function above. This check assumes that :func:`~.collect_conflicts_within` has
already been called on each provided collection of fields. This is true because
this validator traverses each individual selection set.
"""
# A field map is a keyed collection, where each key represents a response name and
# the value at that key is a list of all fields which provide that response name.
# For any response name which appears in both provided field maps, each field from
# the first field map must be compared to every field in the second field map to
# find potential conflicts.
for response_name, fields1 in field_map1.items():
fields2 = field_map2.get(response_name)
if fields2:
for field1 in fields1:
for field2 in fields2:
conflict = find_conflict(
context,
cached_fields_and_fragment_names,
compared_fragment_pairs,
parent_fields_are_mutually_exclusive,
response_name,
field1,
field2,
)
if conflict:
conflicts.append(conflict)
def find_conflict(
context: ValidationContext,
cached_fields_and_fragment_names: Dict,
compared_fragment_pairs: "PairSet",
parent_fields_are_mutually_exclusive: bool,
response_name: str,
field1: NodeAndDef,
field2: NodeAndDef,
) -> Optional[Conflict]:
"""Find conflict.
Determines if there is a conflict between two particular fields, including comparing
their sub-fields.
"""
parent_type1, node1, def1 = field1
parent_type2, node2, def2 = field2
# If it is known that two fields could not possibly apply at the same time, due to
# the parent types, then it is safe to permit them to diverge in aliased field or
# arguments used as they will not present any ambiguity by differing. It is known
# that two parent types could never overlap if they are different Object types.
# Interface or Union types might overlap - if not in the current state of the
# schema, then perhaps in some future version, thus may not safely diverge.
are_mutually_exclusive = parent_fields_are_mutually_exclusive or (
parent_type1 != parent_type2
and is_object_type(parent_type1)
and is_object_type(parent_type2)
)
# The return type for each field.
type1 = cast(Optional[GraphQLOutputType], def1 and def1.type)
type2 = cast(Optional[GraphQLOutputType], def2 and def2.type)
if not are_mutually_exclusive:
# Two aliases must refer to the same field.
name1 = node1.name.value
name2 = node2.name.value
if name1 != name2:
return (
(response_name, f"'{name1}' and '{name2}' are different fields"),
[node1],
[node2],
)
# Two field calls must have the same arguments.
if stringify_arguments(node1) != stringify_arguments(node2):
return (response_name, "they have differing arguments"), [node1], [node2]
if type1 and type2 and do_types_conflict(type1, type2):
return (
(response_name, f"they return conflicting types '{type1}' and '{type2}'"),
[node1],
[node2],
)
# Collect and compare sub-fields. Use the same "visited fragment names" list for
# both collections so fields in a fragment reference are never compared to
# themselves.
selection_set1 = node1.selection_set
selection_set2 = node2.selection_set
if selection_set1 and selection_set2:
conflicts = find_conflicts_between_sub_selection_sets(
context,
cached_fields_and_fragment_names,
compared_fragment_pairs,
are_mutually_exclusive,
get_named_type(type1),
selection_set1,
get_named_type(type2),
selection_set2,
)
return subfield_conflicts(conflicts, response_name, node1, node2)
return None # no conflict
def stringify_arguments(field_node: FieldNode) -> str:
input_object_with_args = ObjectValueNode(
fields=tuple(
ObjectFieldNode(name=arg_node.name, value=arg_node.value)
for arg_node in field_node.arguments
)
)
return print_ast(sort_value_node(input_object_with_args))
def do_types_conflict(type1: GraphQLOutputType, type2: GraphQLOutputType) -> bool:
"""Check whether two types conflict
Two types conflict if both types could not apply to a value simultaneously.
Composite types are ignored as their individual field types will be compared later
recursively. However List and Non-Null types must match.
"""
if is_list_type(type1):
return (
do_types_conflict(type1.of_type, type2.of_type)
if is_list_type(type2)
else True
)
if is_list_type(type2):
return True
if is_non_null_type(type1):
return (
do_types_conflict(type1.of_type, type2.of_type)
if is_non_null_type(type2)
else True
)
if is_non_null_type(type2):
return True
if is_leaf_type(type1) or is_leaf_type(type2):
return type1 is not type2
return False
def get_fields_and_fragment_names(
context: ValidationContext,
cached_fields_and_fragment_names: Dict,
parent_type: Optional[GraphQLNamedType],
selection_set: SelectionSetNode,
) -> Tuple[NodeAndDefCollection, List[str]]:
"""Get fields and referenced fragment names
Given a selection set, return the collection of fields (a mapping of response name
to field nodes and definitions) as well as a list of fragment names referenced via
fragment spreads.
"""
cached = cached_fields_and_fragment_names.get(selection_set)
if not cached:
node_and_defs: NodeAndDefCollection = {}
fragment_names: Dict[str, bool] = {}
collect_fields_and_fragment_names(
context, parent_type, selection_set, node_and_defs, fragment_names
)
cached = (node_and_defs, list(fragment_names))
cached_fields_and_fragment_names[selection_set] = cached
return cached
def get_referenced_fields_and_fragment_names(
context: ValidationContext,
cached_fields_and_fragment_names: Dict,
fragment: FragmentDefinitionNode,
) -> Tuple[NodeAndDefCollection, List[str]]:
"""Get referenced fields and nested fragment names
Given a reference to a fragment, return the represented collection of fields as well
as a list of nested fragment names referenced via fragment spreads.
"""
# Short-circuit building a type from the node if possible.
cached = cached_fields_and_fragment_names.get(fragment.selection_set)
if cached:
return cached
fragment_type = type_from_ast(context.schema, fragment.type_condition)
return get_fields_and_fragment_names(
context, cached_fields_and_fragment_names, fragment_type, fragment.selection_set
)
def collect_fields_and_fragment_names(
context: ValidationContext,
parent_type: Optional[GraphQLNamedType],
selection_set: SelectionSetNode,
node_and_defs: NodeAndDefCollection,
fragment_names: Dict[str, bool],
) -> None:
for selection in selection_set.selections:
if isinstance(selection, FieldNode):
field_name = selection.name.value
field_def = (
parent_type.fields.get(field_name)
if is_object_type(parent_type) or is_interface_type(parent_type)
else None
)
response_name = selection.alias.value if selection.alias else field_name
if not node_and_defs.get(response_name):
node_and_defs[response_name] = []
node_and_defs[response_name].append(
cast(NodeAndDef, (parent_type, selection, field_def))
)
elif isinstance(selection, FragmentSpreadNode):
fragment_names[selection.name.value] = True
elif isinstance(selection, InlineFragmentNode): # pragma: no cover else
type_condition = selection.type_condition
inline_fragment_type = (
type_from_ast(context.schema, type_condition)
if type_condition
else parent_type
)
collect_fields_and_fragment_names(
context,
inline_fragment_type,
selection.selection_set,
node_and_defs,
fragment_names,
)
def subfield_conflicts(
conflicts: List[Conflict], response_name: str, node1: FieldNode, node2: FieldNode
) -> Optional[Conflict]:
"""Check whether there are conflicts between sub-fields.
Given a series of Conflicts which occurred between two sub-fields, generate a single
Conflict.
"""
if conflicts:
return (
(response_name, [conflict[0] for conflict in conflicts]),
list(chain([node1], *[conflict[1] for conflict in conflicts])),
list(chain([node2], *[conflict[2] for conflict in conflicts])),
)
return None # no conflict
class PairSet:
"""Pair set
A way to keep track of pairs of things when the ordering of the pair doesn't matter.
"""
__slots__ = ("_data",)
_data: Dict[str, Dict[str, bool]]
def __init__(self) -> None:
self._data = {}
def has(self, a: str, b: str, are_mutually_exclusive: bool) -> bool:
key1, key2 = (a, b) if a < b else (b, a)
map_ = self._data.get(key1)
if map_ is None:
return False
result = map_.get(key2)
if result is None:
return False
# are_mutually_exclusive being False is a superset of being True,
# hence if we want to know if this PairSet "has" these two with no exclusivity,
# we have to ensure it was added as such.
return True if are_mutually_exclusive else are_mutually_exclusive == result
def add(self, a: str, b: str, are_mutually_exclusive: bool) -> None:
key1, key2 = (a, b) if a < b else (b, a)
map_ = self._data.get(key1)
if map_ is None:
self._data[key1] = {key2: are_mutually_exclusive}
else:
map_[key2] = are_mutually_exclusive
| mit | 2bf67a9c54aca985f92e476920a226ba | 36.227749 | 88 | 0.649216 | 4.276349 | false | false | false | false |
graphql-python/graphql-core | src/graphql/utilities/value_from_ast_untyped.py | 1 | 3116 | from math import nan
from typing import Any, Callable, Dict, Optional, Union
from ..language import (
BooleanValueNode,
EnumValueNode,
FloatValueNode,
IntValueNode,
ListValueNode,
NullValueNode,
ObjectValueNode,
StringValueNode,
ValueNode,
VariableNode,
)
from ..pyutils import Undefined, inspect
__all__ = ["value_from_ast_untyped"]
def value_from_ast_untyped(
value_node: ValueNode, variables: Optional[Dict[str, Any]] = None
) -> Any:
"""Produce a Python value given a GraphQL Value AST.
Unlike :func:`~graphql.utilities.value_from_ast`, no type is provided.
The resulting Python value will reflect the provided GraphQL value AST.
=================== ============== ================
GraphQL Value JSON Value Python Value
=================== ============== ================
Input Object Object dict
List Array list
Boolean Boolean bool
String / Enum String str
Int / Float Number int / float
Null null None
=================== ============== ================
"""
func = _value_from_kind_functions.get(value_node.kind)
if func:
return func(value_node, variables)
# Not reachable. All possible value nodes have been considered.
raise TypeError( # pragma: no cover
f"Unexpected value node: {inspect(value_node)}."
)
def value_from_null(_value_node: NullValueNode, _variables: Any) -> Any:
return None
def value_from_int(value_node: IntValueNode, _variables: Any) -> Any:
try:
return int(value_node.value)
except ValueError:
return nan
def value_from_float(value_node: FloatValueNode, _variables: Any) -> Any:
try:
return float(value_node.value)
except ValueError:
return nan
def value_from_string(
value_node: Union[BooleanValueNode, EnumValueNode, StringValueNode], _variables: Any
) -> Any:
return value_node.value
def value_from_list(
value_node: ListValueNode, variables: Optional[Dict[str, Any]]
) -> Any:
return [value_from_ast_untyped(node, variables) for node in value_node.values]
def value_from_object(
value_node: ObjectValueNode, variables: Optional[Dict[str, Any]]
) -> Any:
return {
field.name.value: value_from_ast_untyped(field.value, variables)
for field in value_node.fields
}
def value_from_variable(
value_node: VariableNode, variables: Optional[Dict[str, Any]]
) -> Any:
variable_name = value_node.name.value
if not variables:
return Undefined
return variables.get(variable_name, Undefined)
_value_from_kind_functions: Dict[str, Callable] = {
"null_value": value_from_null,
"int_value": value_from_int,
"float_value": value_from_float,
"string_value": value_from_string,
"enum_value": value_from_string,
"boolean_value": value_from_string,
"list_value": value_from_list,
"object_value": value_from_object,
"variable": value_from_variable,
}
| mit | 3cf1fea2e474e10f0efb864387e3caf3 | 27.327273 | 88 | 0.613607 | 3.82801 | false | false | false | false |
graphql-python/graphql-core | tests/utilities/test_print_schema.py | 1 | 26814 | from typing import Any, Dict, cast
from graphql.language import DirectiveLocation
from graphql.type import (
GraphQLArgument,
GraphQLBoolean,
GraphQLDirective,
GraphQLEnumType,
GraphQLField,
GraphQLFloat,
GraphQLInputField,
GraphQLInputObjectType,
GraphQLInt,
GraphQLInterfaceType,
GraphQLList,
GraphQLNonNull,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
GraphQLString,
GraphQLUnionType,
)
from graphql.utilities import (
build_schema,
print_introspection_schema,
print_schema,
print_value,
)
from ..utils import dedent
def expect_printed_schema(schema: GraphQLSchema) -> str:
schema_text = print_schema(schema)
# keep print_schema and build_schema in sync
assert print_schema(build_schema(schema_text)) == schema_text
return schema_text
def build_single_field_schema(field: GraphQLField):
query = GraphQLObjectType(name="Query", fields={"singleField": field})
return GraphQLSchema(query=query)
def describe_type_system_printer():
def prints_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLString))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: String
}
"""
)
def prints_list_of_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLList(GraphQLString)))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String]
}
"""
)
def prints_non_null_string_field():
schema = build_single_field_schema(GraphQLField(GraphQLNonNull(GraphQLString)))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: String!
}
"""
)
def prints_non_null_list_of_string_field():
schema = build_single_field_schema(
GraphQLField(GraphQLNonNull(GraphQLList(GraphQLString)))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String]!
}
"""
)
def prints_list_of_non_null_string_field():
schema = build_single_field_schema(
GraphQLField((GraphQLList(GraphQLNonNull(GraphQLString))))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String!]
}
"""
)
def prints_non_null_list_of_non_null_string_field():
schema = build_single_field_schema(
GraphQLField(GraphQLNonNull(GraphQLList(GraphQLNonNull(GraphQLString))))
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField: [String!]!
}
"""
)
def prints_object_field():
foo_type = GraphQLObjectType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
schema = GraphQLSchema(types=[foo_type])
assert expect_printed_schema(schema) == dedent(
"""
type Foo {
str: String
}
"""
)
def prints_string_field_with_int_arg():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString, args={"argOne": GraphQLArgument(GraphQLInt)}
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int): String
}
"""
)
def prints_string_field_with_int_arg_with_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLInt, default_value=2)},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = 2): String
}
"""
)
def prints_string_field_with_string_arg_with_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(
GraphQLString, default_value="tes\t de\fault"
)
},
)
)
assert expect_printed_schema(schema) == dedent(
r"""
type Query {
singleField(argOne: String = "tes\t de\fault"): String
}
"""
)
def prints_string_field_with_int_arg_with_default_null():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLInt, default_value=None)},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = null): String
}
"""
)
def prints_string_field_with_non_null_int_arg():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={"argOne": GraphQLArgument(GraphQLNonNull(GraphQLInt))},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int!): String
}
"""
)
def prints_string_field_with_multiple_args():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String): String
}
"""
)
def prints_string_field_with_multiple_args_first_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt, default_value=1),
"argTwo": GraphQLArgument(GraphQLString),
"argThree": GraphQLArgument(GraphQLBoolean),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int = 1, argTwo: String, argThree: Boolean): String
}
"""
)
def prints_string_field_with_multiple_args_second_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString, default_value="foo"),
"argThree": GraphQLArgument(GraphQLBoolean),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String = "foo", argThree: Boolean): String
}
""" # noqa: E501
)
def prints_string_field_with_multiple_args_last_is_default():
schema = build_single_field_schema(
GraphQLField(
type_=GraphQLString,
args={
"argOne": GraphQLArgument(GraphQLInt),
"argTwo": GraphQLArgument(GraphQLString),
"argThree": GraphQLArgument(GraphQLBoolean, default_value=False),
},
)
)
assert expect_printed_schema(schema) == dedent(
"""
type Query {
singleField(argOne: Int, argTwo: String, argThree: Boolean = false): String
}
""" # noqa: E501
)
def prints_schema_with_description():
schema = GraphQLSchema(
description="Schema description.", query=GraphQLObjectType("Query", {})
)
assert expect_printed_schema(schema) == dedent(
'''
"""Schema description."""
schema {
query: Query
}
type Query
'''
)
def omits_schema_of_common_names():
schema = GraphQLSchema(
query=GraphQLObjectType("Query", {}),
mutation=GraphQLObjectType("Mutation", {}),
subscription=GraphQLObjectType("Subscription", {}),
)
assert expect_printed_schema(schema) == dedent(
"""
type Query
type Mutation
type Subscription
"""
)
def prints_custom_query_root_types():
schema = GraphQLSchema(query=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
query: CustomType
}
type CustomType
"""
)
def prints_custom_mutation_root_types():
schema = GraphQLSchema(mutation=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
mutation: CustomType
}
type CustomType
"""
)
def prints_custom_subscription_root_types():
schema = GraphQLSchema(subscription=GraphQLObjectType("CustomType", {}))
assert expect_printed_schema(schema) == dedent(
"""
schema {
subscription: CustomType
}
type CustomType
"""
)
def prints_interface():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
bar_type = GraphQLObjectType(
name="Bar",
fields={"str": GraphQLField(GraphQLString)},
interfaces=[foo_type],
)
schema = GraphQLSchema(types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo {
str: String
}
interface Foo {
str: String
}
"""
)
def prints_multiple_interfaces():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
baz_type = GraphQLInterfaceType(
name="Baz", fields={"int": GraphQLField(GraphQLInt)}
)
bar_type = GraphQLObjectType(
name="Bar",
fields={
"str": GraphQLField(GraphQLString),
"int": GraphQLField(GraphQLInt),
},
interfaces=[foo_type, baz_type],
)
schema = GraphQLSchema(types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo & Baz {
str: String
int: Int
}
interface Foo {
str: String
}
interface Baz {
int: Int
}
"""
)
def prints_hierarchical_interface():
foo_type = GraphQLInterfaceType(
name="Foo", fields={"str": GraphQLField(GraphQLString)}
)
baz_type = GraphQLInterfaceType(
name="Baz",
interfaces=[foo_type],
fields={
"int": GraphQLField(GraphQLInt),
"str": GraphQLField(GraphQLString),
},
)
bar_type = GraphQLObjectType(
name="Bar",
fields={
"str": GraphQLField(GraphQLString),
"int": GraphQLField(GraphQLInt),
},
interfaces=[foo_type, baz_type],
)
query = GraphQLObjectType(name="Query", fields={"bar": GraphQLField(bar_type)})
schema = GraphQLSchema(query, types=[bar_type])
assert expect_printed_schema(schema) == dedent(
"""
type Bar implements Foo & Baz {
str: String
int: Int
}
interface Foo {
str: String
}
interface Baz implements Foo {
int: Int
str: String
}
type Query {
bar: Bar
}
"""
)
def prints_unions():
foo_type = GraphQLObjectType(
name="Foo", fields={"bool": GraphQLField(GraphQLBoolean)}
)
bar_type = GraphQLObjectType(
name="Bar", fields={"str": GraphQLField(GraphQLString)}
)
single_union = GraphQLUnionType(name="SingleUnion", types=[foo_type])
multiple_union = GraphQLUnionType(
name="MultipleUnion", types=[foo_type, bar_type]
)
schema = GraphQLSchema(types=[single_union, multiple_union])
assert expect_printed_schema(schema) == dedent(
"""
union SingleUnion = Foo
type Foo {
bool: Boolean
}
union MultipleUnion = Foo | Bar
type Bar {
str: String
}
"""
)
def prints_input_type():
input_type = GraphQLInputObjectType(
name="InputType", fields={"int": GraphQLInputField(GraphQLInt)}
)
schema = GraphQLSchema(types=[input_type])
assert expect_printed_schema(schema) == dedent(
"""
input InputType {
int: Int
}
"""
)
def prints_custom_scalar():
odd_type = GraphQLScalarType(name="Odd")
schema = GraphQLSchema(types=[odd_type])
assert expect_printed_schema(schema) == dedent(
"""
scalar Odd
"""
)
def prints_custom_scalar_with_specified_by_url():
foo_type = GraphQLScalarType(
name="Foo", specified_by_url="https://example.com/foo_spec"
)
schema = GraphQLSchema(types=[foo_type])
assert expect_printed_schema(schema) == dedent(
"""
scalar Foo @specifiedBy(url: "https://example.com/foo_spec")
"""
)
def prints_enum():
rgb_type = GraphQLEnumType(
name="RGB", values=dict.fromkeys(("RED", "GREEN", "BLUE"))
)
schema = GraphQLSchema(types=[rgb_type])
assert expect_printed_schema(schema) == dedent(
"""
enum RGB {
RED
GREEN
BLUE
}
"""
)
def prints_empty_types():
schema = GraphQLSchema(
types=[
GraphQLEnumType("SomeEnum", cast(Dict[str, Any], {})),
GraphQLInputObjectType("SomeInputObject", {}),
GraphQLInterfaceType("SomeInterface", {}),
GraphQLObjectType("SomeObject", {}),
GraphQLUnionType("SomeUnion", []),
]
)
assert expect_printed_schema(schema) == dedent(
"""
enum SomeEnum
input SomeInputObject
interface SomeInterface
type SomeObject
union SomeUnion
"""
)
def prints_custom_directives():
simple_directive = GraphQLDirective(
"simpleDirective", [DirectiveLocation.FIELD]
)
complex_directive = GraphQLDirective(
"complexDirective",
[DirectiveLocation.FIELD, DirectiveLocation.QUERY],
description="Complex Directive",
args={
"stringArg": GraphQLArgument(GraphQLString),
"intArg": GraphQLArgument(GraphQLInt, default_value=-1),
},
is_repeatable=True,
)
schema = GraphQLSchema(directives=[simple_directive, complex_directive])
assert expect_printed_schema(schema) == dedent(
'''
directive @simpleDirective on FIELD
"""Complex Directive"""
directive @complexDirective(stringArg: String, intArg: Int = -1) repeatable on FIELD | QUERY
''' # noqa: E501
)
def prints_an_empty_description():
schema = build_single_field_schema(GraphQLField(GraphQLString, description=""))
assert expect_printed_schema(schema) == dedent(
'''
type Query {
""""""
singleField: String
}
'''
)
def prints_a_description_with_only_whitespace():
schema = build_single_field_schema(GraphQLField(GraphQLString, description=" "))
assert expect_printed_schema(schema) == dedent(
"""
type Query {
" "
singleField: String
}
"""
)
def one_line_prints_a_short_description():
schema = build_single_field_schema(
GraphQLField(GraphQLString, description="This field is awesome")
)
assert expect_printed_schema(schema) == dedent(
'''
type Query {
"""This field is awesome"""
singleField: String
}
'''
)
def prints_introspection_schema():
schema = GraphQLSchema()
output = print_introspection_schema(schema)
assert output == dedent(
'''
"""
Directs the executor to include this field or fragment only when the `if` argument is true.
"""
directive @include(
"""Included when true."""
if: Boolean!
) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
"""
Directs the executor to skip this field or fragment when the `if` argument is true.
"""
directive @skip(
"""Skipped when true."""
if: Boolean!
) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT
"""Marks an element of a GraphQL schema as no longer supported."""
directive @deprecated(
"""
Explains why this element was deprecated, usually also including a suggestion for how to access supported similar data. Formatted using the Markdown syntax, as specified by [CommonMark](https://commonmark.org/).
"""
reason: String = "No longer supported"
) on FIELD_DEFINITION | ARGUMENT_DEFINITION | INPUT_FIELD_DEFINITION | ENUM_VALUE
"""Exposes a URL that specifies the behaviour of this scalar."""
directive @specifiedBy(
"""The URL that specifies the behaviour of this scalar."""
url: String!
) on SCALAR
"""
A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all available types and directives on the server, as well as the entry points for query, mutation, and subscription operations.
"""
type __Schema {
description: String
"""A list of all types supported by this server."""
types: [__Type!]!
"""The type that query operations will be rooted at."""
queryType: __Type!
"""
If this server supports mutation, the type that mutation operations will be rooted at.
"""
mutationType: __Type
"""
If this server supports subscription, the type that subscription operations will be rooted at.
"""
subscriptionType: __Type
"""A list of all directives supported by this server."""
directives: [__Directive!]!
}
"""
The fundamental unit of any GraphQL Schema is the type. There are many kinds of types in GraphQL as represented by the `__TypeKind` enum.
Depending on the kind of a type, certain fields describe information about that type. Scalar types provide no information beyond a name, description and optional `specifiedByURL`, while Enum types provide their values. Object and Interface types provide the fields they describe. Abstract types, Union and Interface, provide the Object types possible at runtime. List and NonNull types compose other types.
"""
type __Type {
kind: __TypeKind!
name: String
description: String
specifiedByURL: String
fields(includeDeprecated: Boolean = false): [__Field!]
interfaces: [__Type!]
possibleTypes: [__Type!]
enumValues(includeDeprecated: Boolean = false): [__EnumValue!]
inputFields(includeDeprecated: Boolean = false): [__InputValue!]
ofType: __Type
}
"""An enum describing what kind of type a given `__Type` is."""
enum __TypeKind {
"""Indicates this type is a scalar."""
SCALAR
"""
Indicates this type is an object. `fields` and `interfaces` are valid fields.
"""
OBJECT
"""
Indicates this type is an interface. `fields`, `interfaces`, and `possibleTypes` are valid fields.
"""
INTERFACE
"""Indicates this type is a union. `possibleTypes` is a valid field."""
UNION
"""Indicates this type is an enum. `enumValues` is a valid field."""
ENUM
"""
Indicates this type is an input object. `inputFields` is a valid field.
"""
INPUT_OBJECT
"""Indicates this type is a list. `ofType` is a valid field."""
LIST
"""Indicates this type is a non-null. `ofType` is a valid field."""
NON_NULL
}
"""
Object and Interface types are described by a list of Fields, each of which has a name, potentially a list of arguments, and a return type.
"""
type __Field {
name: String!
description: String
args(includeDeprecated: Boolean = false): [__InputValue!]!
type: __Type!
isDeprecated: Boolean!
deprecationReason: String
}
"""
Arguments provided to Fields or Directives and the input fields of an InputObject are represented as Input Values which describe their type and optionally a default value.
"""
type __InputValue {
name: String!
description: String
type: __Type!
"""
A GraphQL-formatted string representing the default value for this input value.
"""
defaultValue: String
isDeprecated: Boolean!
deprecationReason: String
}
"""
One possible value for a given Enum. Enum values are unique values, not a placeholder for a string or numeric value. However an Enum value is returned in a JSON response as a string.
"""
type __EnumValue {
name: String!
description: String
isDeprecated: Boolean!
deprecationReason: String
}
"""
A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.
In some cases, you need to provide options to alter GraphQL's execution behavior in ways field arguments will not suffice, such as conditionally including or skipping a field. Directives provide this by describing additional information to the executor.
"""
type __Directive {
name: String!
description: String
isRepeatable: Boolean!
locations: [__DirectiveLocation!]!
args(includeDeprecated: Boolean = false): [__InputValue!]!
}
"""
A Directive can be adjacent to many parts of the GraphQL language, a __DirectiveLocation describes one such possible adjacencies.
"""
enum __DirectiveLocation {
"""Location adjacent to a query operation."""
QUERY
"""Location adjacent to a mutation operation."""
MUTATION
"""Location adjacent to a subscription operation."""
SUBSCRIPTION
"""Location adjacent to a field."""
FIELD
"""Location adjacent to a fragment definition."""
FRAGMENT_DEFINITION
"""Location adjacent to a fragment spread."""
FRAGMENT_SPREAD
"""Location adjacent to an inline fragment."""
INLINE_FRAGMENT
"""Location adjacent to a variable definition."""
VARIABLE_DEFINITION
"""Location adjacent to a schema definition."""
SCHEMA
"""Location adjacent to a scalar definition."""
SCALAR
"""Location adjacent to an object type definition."""
OBJECT
"""Location adjacent to a field definition."""
FIELD_DEFINITION
"""Location adjacent to an argument definition."""
ARGUMENT_DEFINITION
"""Location adjacent to an interface definition."""
INTERFACE
"""Location adjacent to a union definition."""
UNION
"""Location adjacent to an enum definition."""
ENUM
"""Location adjacent to an enum value definition."""
ENUM_VALUE
"""Location adjacent to an input object type definition."""
INPUT_OBJECT
"""Location adjacent to an input object field definition."""
INPUT_FIELD_DEFINITION
}
''' # noqa: E501
)
def describe_print_value():
def print_value_convenience_function():
assert print_value(1.5, GraphQLFloat) == "1.5"
assert print_value("foo", GraphQLString) == '"foo"'
| mit | c4c83e4ea532e9b5656ad4a990b9c0f0 | 29.75 | 418 | 0.518684 | 4.864659 | false | false | false | false |
graphql-python/graphql-core | tests/error/test_located_error.py | 1 | 1237 | from typing import Any, cast
from graphql.error import GraphQLError, located_error
def describe_located_error():
def throws_without_an_original_error():
e = located_error([], [], []).original_error # type: ignore
assert isinstance(e, TypeError)
assert str(e) == "Unexpected error value: []"
def passes_graphql_error_through():
path = ["path", 3, "to", "field"]
e = GraphQLError("msg", None, None, None, cast(Any, path))
assert located_error(e, [], []) == e
def passes_graphql_error_ish_through():
e = GraphQLError("I am a located GraphQL error")
e.path = []
assert located_error(e, [], []) is e
def does_not_pass_through_elasticsearch_like_errors():
e = Exception("I am from elasticsearch")
cast(Any, e).path = "/something/feed/_search"
assert located_error(e, [], []) is not e
def handles_lazy_error_messages():
class LazyString:
def __str__(self) -> str:
return "lazy"
class LazyError(Exception):
def __init__(self):
self.message = LazyString()
super().__init__()
assert str(located_error(LazyError())) == "lazy"
| mit | 5984c4f50de502cfaf9d214a1f1ce286 | 32.432432 | 68 | 0.569927 | 3.93949 | false | false | false | false |
graphql-python/graphql-core | tests/test_docs.py | 1 | 13482 | """Test all code snippets in the documentation"""
from pathlib import Path
from typing import Any, Dict, List
from .utils import dedent
try:
from typing import TypeAlias
except ImportError: # Python < 3.10
from typing_extensions import TypeAlias
Scope: TypeAlias = Dict[str, Any]
def get_snippets(source, indent=4):
"""Get all code snippets from a given documentation source file."""
if not source.endswith(".rst"): # pragma: no cover
source += ".rst"
source_path = Path(__file__).parents[1] / "docs" / source
lines = open(source_path).readlines()
snippets: List[str] = []
snippet: List[str] = []
snippet_start = " " * indent
for line in lines:
if not line.rstrip() and snippet:
snippet.append(line)
elif line.startswith(snippet_start):
snippet.append(line[indent:])
else:
if snippet:
snippets.append("".join(snippet).rstrip() + "\n")
snippet = []
if snippet:
snippets.append("".join(snippet).rstrip() + "\n")
return snippets
def expected_result(snippets):
"""Get and normalize expected result from snippet."""
out = snippets.pop(0)
assert out.startswith("ExecutionResult(")
return " ".join(out.split()).replace("( ", "(") + "\n"
def expected_errors(snippets):
"""Get and normalize expected errors from snippet."""
out = snippets.pop(0)
assert out.startswith("[GraphQLError(")
return " ".join(out.split()).replace("( ", "(").replace('" "', "")
def describe_introduction():
def getting_started(capsys):
intro = get_snippets("intro")
pip_install = intro.pop(0)
assert "pip install" in pip_install and "graphql-core" in pip_install
poetry_install = intro.pop(0)
assert "poetry install" in poetry_install
create_schema = intro.pop(0)
assert "schema = GraphQLSchema(" in create_schema
scope: Scope = {}
exec(create_schema, scope)
schema = scope.get("schema")
schema_class = scope.get("GraphQLSchema")
assert schema and schema_class and isinstance(schema, schema_class)
query = intro.pop(0)
assert "graphql_sync" in query
exec(query, scope)
out, err = capsys.readouterr()
assert out.startswith("ExecutionResult")
assert not err
expected_out = intro.pop(0)
assert out == expected_out
def describe_usage():
sdl = get_snippets("usage/schema")[0]
resolvers = get_snippets("usage/resolvers")[0]
def building_a_type_schema():
schema = get_snippets("usage/schema")
assert schema.pop(0) == sdl
assert "enum Episode { NEWHOPE, EMPIRE, JEDI }" in sdl
import_blocks = schema.pop(0)
assert "from graphql import" in import_blocks
assert "GraphQLObjectType" in import_blocks
scope: Scope = {}
exec(import_blocks, scope)
assert "GraphQLObjectType" in scope
build_enum = schema.pop(0)
assert "episode_enum = " in build_enum
exec(build_enum, scope)
assert scope["episode_enum"].values["EMPIRE"].value == 5
scope2 = scope.copy()
build_enum2 = schema.pop(0)
assert "episode_enum = " in build_enum2
exec(build_enum2, scope2)
assert scope["episode_enum"].values["EMPIRE"].value == 5
scope3 = scope.copy()
build_enum3 = schema.pop(0)
assert "episode_enum = " in build_enum3
exec(build_enum3, scope3)
assert scope["episode_enum"].values["EMPIRE"].value == 5
build_character = schema.pop(0)
assert "character_interface = " in build_character
exec(resolvers, scope)
exec(build_character, scope)
assert "character_interface" in scope
build_human_and_droid = schema.pop(0)
assert "human_type = " in build_human_and_droid
assert "droid_type = " in build_human_and_droid
exec(build_human_and_droid, scope)
assert "human_type" in scope
assert "droid_type" in scope
build_query_type = schema.pop(0)
assert "query_type = " in build_query_type
exec(build_query_type, scope)
assert "query_type" in scope
define_schema = schema.pop(0)
assert "schema = " in define_schema
exec(define_schema, scope)
def implementing_resolvers():
assert "luke = dict(" in resolvers
assert "def get_human(" in resolvers
scope: Scope = {}
exec(resolvers, scope)
get_human = scope["get_human"]
human = get_human(None, None, "1000")
assert human["name"] == "Luke Skywalker"
def executing_queries(capsys):
scope: Scope = {}
exec(resolvers, scope)
schema = "\n".join(get_snippets("usage/schema")[1:])
exec(schema, scope)
queries = get_snippets("usage/queries")
async_query = queries.pop(0)
assert "asyncio" in async_query and "graphql_sync" not in async_query
assert "asyncio.run" in async_query
from asyncio import run # noqa: F401
exec(async_query, scope)
out, err = capsys.readouterr()
assert not err
assert "R2-D2" in out
assert out == expected_result(queries)
sync_query = queries.pop(0)
assert "graphql_sync" in sync_query and "asyncio" not in sync_query
exec(sync_query, scope)
out, err = capsys.readouterr()
assert not err
assert "Luke" in out
assert out == expected_result(queries)
bad_query = queries.pop(0)
assert "homePlace" in bad_query
exec(bad_query, scope)
out, err = capsys.readouterr()
assert not err
assert "Cannot query" in out
assert out == expected_result(queries)
typename_query = queries.pop(0)
assert "__typename" in typename_query
exec(typename_query, scope)
out, err = capsys.readouterr()
assert not err
assert "__typename" in out and "Human" in out
assert out == expected_result(queries)
backstory_query = queries.pop(0)
assert "secretBackstory" in backstory_query
exec(backstory_query, scope)
out, err = capsys.readouterr()
assert not err
assert "errors" in out and "secretBackstory" in out
assert out == expected_result(queries)
def using_the_sdl(capsys):
use_sdl = get_snippets("usage/sdl")
build_schema = use_sdl.pop(0)
build_schema_sdl = dedent(
build_schema.partition('build_schema("""\n')[2].partition('""")')[0]
)
assert build_schema_sdl == sdl.rstrip()
scope: Scope = {}
exec(build_schema, scope)
schema = scope["schema"]
assert list(schema.query_type.fields) == ["hero", "human", "droid"]
exec(resolvers, scope)
assert schema.query_type.fields["hero"].resolve is None
attach_functions = use_sdl.pop(0)
exec(attach_functions, scope)
assert schema.query_type.fields["hero"].resolve is scope["get_hero"]
define_enum_values = use_sdl.pop(0)
define_episode_enum = get_snippets("usage/schema")[3]
define_episode_enum = define_episode_enum.partition("episode_enum =")[0]
assert "class EpisodeEnum" in define_episode_enum
exec(define_episode_enum, scope)
exec(define_enum_values, scope)
assert schema.get_type("Episode").values["EMPIRE"].value == 5
query = use_sdl.pop(0)
assert "graphql_sync" in query and "print(result)" in query
exec(query, scope)
out, err = capsys.readouterr()
assert not err
assert "Luke" in out and "appearsIn" in out and "EMPIRE" in out
assert out == expected_result(use_sdl)
def using_resolver_methods(capsys):
scope: Scope = {}
exec(resolvers, scope)
build_schema = get_snippets("usage/sdl")[0]
exec(build_schema, scope)
methods = get_snippets("usage/methods")
root_class = methods.pop(0)
assert root_class.startswith("class Root:")
assert "def human(self, info, id):" in root_class
exec(root_class, scope)
assert "Root" in scope
query = methods.pop(0)
assert "graphql_sync" in query and "Root()" in query
exec(query, scope)
out, err = capsys.readouterr()
assert not err
assert "R2-D2" in out and "primaryFunction" in out and "Astromech" in out
assert out == expected_result(methods)
def using_introspection(capsys):
introspect = get_snippets("usage/introspection")
get_query = introspect.pop(0)
assert "import get_introspection_query" in get_query
assert "descriptions=True" in get_query
scope: Scope = {}
exec(get_query, scope)
query = scope["query"]
assert query.lstrip().startswith("query IntrospectionQuery")
assert "description" in query
get_query = introspect.pop(0)
assert "descriptions=False" in get_query
scope2 = scope.copy()
exec(get_query, scope2)
query = scope2["query"]
assert query.lstrip().startswith("query IntrospectionQuery")
assert "description" not in query
exec(resolvers, scope)
create_schema = "\n".join(get_snippets("usage/schema")[1:])
exec(create_schema, scope)
get_result = introspect.pop(0)
assert "result = graphql_sync(" in get_result
exec(get_result, scope)
query_result = scope["introspection_query_result"]
assert query_result.errors is None
result = str(query_result.data)
result = "".join(result.split())
expected_result = introspect.pop(0)
result = "".join(result.split())
expected_result = "\n".join(expected_result.splitlines()[:7])
expected_result = "".join(expected_result.split())
assert result.startswith(expected_result)
build_schema = introspect.pop(0)
assert "schema = build_client_schema(" in build_schema
scope = {"introspection_query_result": query_result}
exec(build_schema, scope)
schema = scope["client_schema"]
assert list(schema.query_type.fields) == ["hero", "human", "droid"]
print_schema = introspect.pop(0)
scope = {"client_schema": schema}
assert "print_schema(" in print_schema
exec(print_schema, scope)
out, err = capsys.readouterr()
assert not err
assert "enum Episode {" in out
assert "id: String!" in out
assert "interface Character {" in out
assert "type Droid implements Character {" in out
assert "type Human implements Character {" in out
assert '"""A character in the Star Wars Trilogy"""' in out
assert '"""A humanoid creature in the Star Wars universe."""' in out
def parsing_graphql():
parser = get_snippets("usage/parser")
parse_document = parser.pop(0)
assert "document = parse(" in parse_document
scope: Scope = {}
exec(parse_document, scope)
document = scope["document"]
name = document.definitions[0].fields[0].name
assert name.value == "me"
assert str(name.loc) == "24:26"
parse_document2 = parser.pop(0)
assert "document = parse(" in parse_document2
assert "..., no_location=True" in parse_document2
parse_document = parse_document.replace('""")', '""", no_location=True)')
scope.clear()
exec(parse_document, scope)
document = scope["document"]
name = document.definitions[0].fields[0].name
assert name.value == "me"
assert name.loc is None
create_document = parser.pop(0)
assert "document = DocumentNode(" in create_document
assert "FieldDefinitionNode(" in create_document
assert "name=NameNode(value='me')," in create_document
scope = {}
exec(create_document, scope)
assert scope["document"] == document
def extending_a_schema(capsys):
scope: Scope = {}
exec(resolvers, scope)
create_schema = "\n".join(get_snippets("usage/schema")[1:])
exec(create_schema, scope)
extension = get_snippets("usage/extension")
extend_schema = extension.pop(0)
assert "extend_schema(" in extend_schema
exec(extend_schema, scope)
schema = scope["schema"]
human_type = schema.get_type("Human")
assert "lastName" in human_type.fields
attach_resolver = extension.pop(0)
exec(attach_resolver, scope)
assert human_type.fields["lastName"].resolve is scope["get_last_name"]
query = extension.pop(0)
assert "graphql_sync(" in query
exec(query, scope)
out, err = capsys.readouterr()
assert not err
assert "lastName" in out and "Skywalker" in out
assert out == expected_result(extension)
def validating_queries():
scope: Scope = {}
exec(resolvers, scope)
create_schema = "\n".join(get_snippets("usage/schema")[1:])
exec(create_schema, scope)
validator = get_snippets("usage/validator")
validate = validator.pop(0)
assert "errors = validate(" in validate
exec(validate, scope)
errors = str(scope["errors"])
assert errors == expected_errors(validator)
| mit | 93e5e21bd9d97d606e41a708f4ff7aa7 | 36.45 | 81 | 0.606512 | 3.906694 | false | false | false | false |
graphql-python/graphql-core | tests/language/test_print_string.py | 1 | 2437 | from graphql.language.print_string import print_string
def describe_print_string():
def prints_a_simple_string():
assert print_string("hello world") == '"hello world"'
def escapes_quotes():
assert print_string('"hello world"') == '"\\"hello world\\""'
def escapes_backslashes():
assert print_string("escape: \\") == '"escape: \\\\"'
def escapes_well_known_control_chars():
assert print_string("\b\f\n\r\t") == '"\\b\\f\\n\\r\\t"'
def escapes_zero_byte():
assert print_string("\x00") == '"\\u0000"'
def does_not_escape_space():
assert print_string(" ") == '" "'
def does_not_escape_non_ascii_character():
assert print_string("\u21BB") == '"\u21BB"'
def does_not_escape_supplementary_character():
assert print_string("\U0001f600") == '"\U0001f600"'
def escapes_all_control_chars():
assert print_string(
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F"
"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F"
"\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2A\x2B\x2C\x2D\x2E\x2F"
"\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x3A\x3B\x3C\x3D\x3E\x3F"
"\x40\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F"
"\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x5B\x5C\x5D\x5E\x5F"
"\x60\x61\x62\x63\x64\x65\x66\x67\x68\x69\x6A\x6B\x6C\x6D\x6E\x6F"
"\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79\x7A\x7B\x7C\x7D\x7E\x7F"
"\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8A\x8B\x8C\x8D\x8E\x8F"
"\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9A\x9B\x9C\x9D\x9E\x9F"
) == (
'"\\u0000\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007'
"\\b\\t\\n\\u000B\\f\\r\\u000E\\u000F"
"\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\u0016\\u0017"
"\\u0018\\u0019\\u001A\\u001B\\u001C\\u001D\\u001E\\u001F"
" !\\\"#$%&'()*+,-./0123456789:;<=>?"
"@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_"
"`abcdefghijklmnopqrstuvwxyz{|}~\\u007F"
"\\u0080\\u0081\\u0082\\u0083\\u0084\\u0085\\u0086\\u0087"
"\\u0088\\u0089\\u008A\\u008B\\u008C\\u008D\\u008E\\u008F"
"\\u0090\\u0091\\u0092\\u0093\\u0094\\u0095\\u0096\\u0097"
'\\u0098\\u0099\\u009A\\u009B\\u009C\\u009D\\u009E\\u009F"'
)
| mit | 83b25ff5f3745a90623e146e575a0dfe | 44.981132 | 78 | 0.571194 | 2.211434 | false | false | false | false |
graphql-python/graphql-core | tests/validation/test_single_field_subscriptions.py | 1 | 8228 | from functools import partial
from graphql.utilities import build_schema
from graphql.validation import SingleFieldSubscriptionsRule
from .harness import assert_validation_errors
schema = build_schema(
"""
type Message {
body: String
sender: String
}
type SubscriptionRoot {
importantEmails: [String]
notImportantEmails: [String]
moreImportantEmails: [String]
spamEmails: [String]
deletedEmails: [String]
newMessage: Message
}
type QueryRoot {
dummy: String
}
schema {
query: QueryRoot
subscription: SubscriptionRoot
}
"""
)
assert_errors = partial(
assert_validation_errors, SingleFieldSubscriptionsRule, schema=schema
)
assert_valid = partial(assert_errors, errors=[])
def describe_validate_subscriptions_with_single_field():
def valid_subscription():
assert_valid(
"""
subscription ImportantEmails {
importantEmails
}
"""
)
def valid_subscription_with_fragment():
assert_valid(
"""
subscription sub {
...newMessageFields
}
fragment newMessageFields on SubscriptionRoot {
newMessage {
body
sender
}
}
"""
)
def valid_subscription_with_fragment_and_field():
assert_valid(
"""
subscription sub {
newMessage {
body
}
...newMessageFields
}
fragment newMessageFields on SubscriptionRoot {
newMessage {
body
sender
}
}
"""
)
def fails_with_more_than_one_root_field():
assert_errors(
"""
subscription ImportantEmails {
importantEmails
notImportantEmails
}
""",
[
{
"message": "Subscription 'ImportantEmails'"
" must select only one top level field.",
"locations": [(4, 15)],
}
],
)
def fails_with_more_than_one_root_field_including_introspection():
assert_errors(
"""
subscription ImportantEmails {
importantEmails
__typename
}
""",
[
{
"message": "Subscription 'ImportantEmails'"
" must select only one top level field.",
"locations": [(4, 15)],
},
{
"message": "Subscription 'ImportantEmails'"
" must not select an introspection top level field.",
"locations": [(4, 15)],
},
],
)
def fails_with_more_than_one_root_field_including_aliased_introspection():
assert_errors(
"""
subscription ImportantEmails {
importantEmails
...Introspection
}
fragment Introspection on SubscriptionRoot {
typename: __typename
}
""",
[
{
"message": "Subscription 'ImportantEmails'"
" must select only one top level field.",
"locations": [(7, 15)],
},
{
"message": "Subscription 'ImportantEmails'"
" must not select an introspection top level field.",
"locations": [(7, 15)],
},
],
)
def fails_with_many_more_than_one_root_field():
assert_errors(
"""
subscription ImportantEmails {
importantEmails
notImportantEmails
spamEmails
}
""",
[
{
"message": "Subscription 'ImportantEmails'"
" must select only one top level field.",
"locations": [(4, 15), (5, 15)],
}
],
)
def fails_with_more_than_one_root_field_via_fragments():
assert_errors(
"""
subscription ImportantEmails {
importantEmails
... {
more: moreImportantEmails
}
...NotImportantEmails
}
fragment NotImportantEmails on SubscriptionRoot {
notImportantEmails
deleted: deletedEmails
...SpamEmails
}
fragment SpamEmails on SubscriptionRoot {
spamEmails
}
""",
[
{
"message": "Subscription 'ImportantEmails'"
" must select only one top level field.",
"locations": [(5, 17), (10, 15), (11, 15), (15, 15)],
},
],
)
def does_not_infinite_loop_on_recursive_fragments():
assert_errors(
"""
subscription NoInfiniteLoop {
...A
}
fragment A on SubscriptionRoot {
...A
}
""",
[],
)
def fails_with_more_than_one_root_field_via_fragments_anonymous():
assert_errors(
"""
subscription {
importantEmails
... {
more: moreImportantEmails
...NotImportantEmails
}
...NotImportantEmails
}
fragment NotImportantEmails on SubscriptionRoot {
notImportantEmails
deleted: deletedEmails
... {
... {
archivedEmails
}
}
...SpamEmails
}
fragment SpamEmails on SubscriptionRoot {
spamEmails
...NonExistentFragment
}
""",
[
{
"message": "Anonymous Subscription"
" must select only one top level field.",
"locations": [(5, 17), (11, 15), (12, 15), (15, 19), (21, 15)],
},
],
)
def fails_with_more_than_one_root_field_in_anonymous_subscriptions():
assert_errors(
"""
subscription {
importantEmails
notImportantEmails
}
""",
[
{
"message": "Anonymous Subscription"
" must select only one top level field.",
"locations": [(4, 15)],
}
],
)
def fails_with_introspection_field():
assert_errors(
"""
subscription ImportantEmails {
__typename
}
""",
[
{
"message": "Subscription 'ImportantEmails' must not"
" select an introspection top level field.",
"locations": [(3, 15)],
}
],
)
def fails_with_introspection_field_in_anonymous_subscription():
assert_errors(
"""
subscription {
__typename
}
""",
[
{
"message": "Anonymous Subscription must not"
" select an introspection top level field.",
"locations": [(3, 15)],
}
],
)
def skips_if_not_subscription_type():
empty_schema = build_schema(
"""
type Query {
dummy: String
}
"""
)
assert_errors(
"""
subscription {
__typename
}
""",
[],
schema=empty_schema,
)
| mit | 5a9b1355388cd02f0f49e5b1bf751637 | 25.456592 | 83 | 0.421609 | 5.481679 | false | false | false | false |
graphql-python/graphql-core | tests/execution/test_execution_result.py | 1 | 4559 | from pytest import raises
from graphql.error import GraphQLError
from graphql.execution import ExecutionResult
def describe_execution_result():
data = {"foo": "Some data"}
error = GraphQLError("Some error")
errors = [error]
extensions = {"bar": "Some extension"}
def initializes_properly():
res = ExecutionResult(data, errors)
assert res.data is data
assert res.errors is errors
assert res.extensions is None
res = ExecutionResult(data, errors, extensions)
assert res.data is data
assert res.errors is errors
assert res.extensions is extensions
def prints_a_representation():
assert repr(ExecutionResult(data, errors)) == (
"ExecutionResult(data={'foo': 'Some data'},"
" errors=[GraphQLError('Some error')])"
)
assert repr(ExecutionResult(data, errors, extensions)) == (
"ExecutionResult(data={'foo': 'Some data'},"
" errors=[GraphQLError('Some error')],"
" extensions={'bar': 'Some extension'})"
)
def formats_properly():
res = ExecutionResult(data, None)
assert res.formatted == {"data": data}
res = ExecutionResult(data, errors)
assert res.formatted == {
"data": data,
"errors": [{"message": "Some error"}],
}
res = ExecutionResult(data, None, extensions)
assert res.formatted == {
"data": data,
"extensions": extensions,
}
res = ExecutionResult(data, errors, extensions)
assert res.formatted == {
"data": data,
"errors": [{"message": "Some error"}],
"extensions": extensions,
}
def compares_to_dict():
res = ExecutionResult(data, errors)
assert res == {"data": data, "errors": errors}
assert res == {"data": data, "errors": errors, "extensions": None}
assert res != {"data": data, "errors": None}
assert res != {"data": None, "errors": errors}
assert res != {"data": data, "errors": errors, "extensions": extensions}
res = ExecutionResult(data, errors, extensions)
assert res == {"data": data, "errors": errors}
assert res == {"data": data, "errors": errors, "extensions": extensions}
assert res != {"data": data, "errors": None}
assert res != {"data": None, "errors": errors}
assert res != {"data": data, "errors": errors, "extensions": None}
def compares_to_tuple():
res = ExecutionResult(data, errors)
assert res == (data, errors)
assert res == (data, errors, None)
assert res != (data, None)
assert res != (None, errors)
assert res != (data, errors, extensions)
res = ExecutionResult(data, errors, extensions)
assert res == (data, errors)
assert res == (data, errors, extensions)
assert res != (data, None)
assert res != (None, errors)
assert res != (data, errors, None)
def does_not_compare_to_list():
res = ExecutionResult(data, errors)
assert res != [data, errors]
res = ExecutionResult(data, errors, extensions)
assert res != [data, errors, extensions]
def compares_to_another_execution_result():
res1 = ExecutionResult(data, errors)
res2 = ExecutionResult(data, errors)
assert res1 == res2
res2 = ExecutionResult({"foo": "other data"}, errors)
assert res1 != res2
res2 = ExecutionResult(data, [GraphQLError("Another error")])
assert res1 != res2
res2 = ExecutionResult(data, errors, extensions)
assert res1 != res2
res1 = ExecutionResult(data, errors, extensions)
res2 = ExecutionResult(data, errors, extensions)
assert res1 == res2
res2 = ExecutionResult({"foo": "other data"}, errors, extensions)
assert res1 != res2
res2 = ExecutionResult(data, [GraphQLError("Another error")], extensions)
assert res1 != res2
res2 = ExecutionResult(data, errors, {"bar": "Another extension"})
assert res1 != res2
def unpacks_as_two_tuple():
res = ExecutionResult(data, errors)
res_data, res_errors = res # type: ignore
assert res_data == data # type: ignore
assert res_errors == errors # type: ignore
with raises(ValueError):
res = ExecutionResult(data, errors, extensions)
_res_data, _res_errors, _res_extensions = res # type: ignore
| mit | 6de819b02ed7725184e44b27edc20047 | 38.301724 | 81 | 0.586093 | 4.24093 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_aws/management/commands/import_ami_catalog.py | 2 | 3497 | import argparse
from csv import DictReader
from django.core.management.base import BaseCommand, CommandError
from ... import models
class Command(BaseCommand):
help = "Import catalog of Amazon images."
def add_arguments(self, parser):
parser.add_argument(
'file',
type=argparse.FileType('r'),
metavar='FILE',
help='AMI catalog file.',
)
parser.add_argument(
'-y',
'--yes',
action='store_true',
dest='yes',
default=False,
help='The answer to any question which would be asked will be yes.',
)
def handle(self, *args, **options):
with open(options['file']) as csvfile:
data = list(DictReader(csvfile))
csv_regions = set([image['region'] for image in data])
nc_regions = {region.name: region.id for region in models.Region.objects.all()}
new_regions = csv_regions - set(nc_regions.keys())
if new_regions:
raise CommandError(
'%s regions are missing in the database.' % ', '.join(new_regions)
)
csv_images = {image['backend_id']: image for image in data}
csv_ids = set(csv_images.keys())
nc_images = {image.backend_id: image for image in models.Image.objects.all()}
nc_ids = set(nc_images.keys())
new_ids = csv_ids - nc_ids
if new_ids:
new_ids_list = ', '.join(sorted(new_ids))
self.stdout.write(
'The following AMIs would be created: {}.'.format(new_ids_list)
)
common_ids = nc_ids & csv_ids
updated_ids = set()
for image_id in common_ids:
csv_image = csv_images[image_id]
nc_image = nc_images[image_id]
csv_region = nc_regions.get(csv_image['region'])
if nc_image.name != csv_image['name'] or nc_image.region_id != csv_region:
updated_ids.add(image_id)
if updated_ids:
updated_ids_list = ', '.join(sorted(updated_ids))
self.stdout.write(
'The following AMIs would be updated: {}'.format(updated_ids_list)
)
stale_ids = nc_ids - csv_ids
if stale_ids:
stale_ids_list = ', '.join(sorted(stale_ids))
self.stdout.write(
'The following AMIs would be deleted: {}'.format(stale_ids_list)
)
if not new_ids and not stale_ids and not updated_ids:
self.stdout.write('There are no changes to apply.')
return
if not options['yes']:
confirm = input('Enter [y] to continue: ')
if confirm.strip().lower() != 'y':
self.stdout.write('Changes are not applied.')
return
for image_id in new_ids:
csv_image = csv_images[image_id]
models.Image.objects.create(
name=csv_image['name'],
backend_id=csv_image['backend_id'],
region_id=nc_regions.get(csv_image['region']),
)
for image_id in updated_ids:
csv_image = csv_images[image_id]
models.Image.objects.filter(backend_id=image_id).update(
name=csv_image['name'], region_id=nc_regions.get(csv_image['region'])
)
models.Image.objects.filter(backend_id__in=stale_ids).delete()
self.stdout.write('All changes are applied.')
| mit | 11bf85790c7d7b675bb98a75acf554e4 | 33.97 | 87 | 0.544181 | 4.001144 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_mastermind/invoices/migrations/0056_fill_quantity.py | 1 | 2792 | from calendar import monthrange
from decimal import ROUND_UP, Decimal
from django.db import migrations, models
class Units:
PER_MONTH = 'month'
PER_HALF_MONTH = 'half_month'
PER_DAY = 'day'
PER_HOUR = 'hour'
QUANTITY = 'quantity'
def quantize_price(value):
return value.quantize(Decimal('0.01'), rounding=ROUND_UP)
def get_full_hours(start, end):
seconds_in_hour = 60 * 60
full_hours, extra_seconds = divmod((end - start).total_seconds(), seconds_in_hour)
if extra_seconds > 0:
full_hours += 1
return int(full_hours)
def get_full_days(start, end):
seconds_in_day = 24 * 60 * 60
full_days, extra_seconds = divmod((end - start).total_seconds(), seconds_in_day)
if extra_seconds > 0:
full_days += 1
return int(full_days)
def get_quantity(item):
month_days = monthrange(item.start.year, item.start.month)[1]
if item.unit == Units.PER_HOUR:
return get_full_hours(item.start, item.end)
elif item.unit == Units.PER_DAY:
return get_full_days(item.start, item.end)
elif item.unit == Units.PER_HALF_MONTH:
if (item.start.day == 1 and item.end.day == 15) or (
item.start.day == 16 and item.end.day == month_days
):
return 1
elif item.start.day == 1 and item.end.day == month_days:
return 2
elif item.start.day == 1 and item.end.day > 15:
return quantize_price(1 + (item.end.day - 15) / Decimal(month_days / 2))
elif item.start.day < 16 and item.end.day == month_days:
return quantize_price(1 + (16 - item.start.day) / Decimal(month_days / 2))
else:
return quantize_price(
(item.end.day - item.start.day + 1) / Decimal(month_days / 2.0)
)
# By default PER_MONTH
else:
if item.start.day == 1 and item.end.day == month_days:
return 1
use_days = (item.end - item.start).days + 1
return quantize_price(Decimal(use_days) / month_days)
def fill_quantity(apps, schema_editor):
InvoiceItem = apps.get_model('invoices', 'InvoiceItem')
for item in InvoiceItem.objects.all():
if item.unit == Units.QUANTITY:
continue
if item.quantity:
continue
item.quantity = get_quantity(item)
item.save(update_fields=['quantity'])
class Migration(migrations.Migration):
dependencies = [
('invoices', '0055_invoice_backend_id'),
]
operations = [
migrations.AlterField(
model_name='invoiceitem',
name='quantity',
field=models.DecimalField(decimal_places=7, default=0, max_digits=22),
),
migrations.RunPython(fill_quantity, reverse_code=migrations.RunPython.noop),
]
| mit | b1021f86de1cabb4a84c5322ddfe9f2b | 29.347826 | 86 | 0.60351 | 3.485643 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_mastermind/marketplace_openstack/migrations/0010_split_invoice_items.py | 2 | 8094 | import decimal
from collections import defaultdict
from datetime import timedelta
from django.db import migrations
from django.utils import timezone
TENANT_TYPE = 'Packages.Template'
RAM_TYPE = 'ram'
CORES_TYPE = 'cores'
STORAGE_TYPE = 'storage'
component_factors = {STORAGE_TYPE: 1024, RAM_TYPE: 1024}
def get_full_days(start, end):
seconds_in_day = 24 * 60 * 60
full_days, extra_seconds = divmod((end - start).total_seconds(), seconds_in_day)
if extra_seconds > 0:
full_days += 1
return int(full_days)
def quantize_price(value):
return value.quantize(decimal.Decimal('0.01'), rounding=decimal.ROUND_UP)
def get_resource_name(resource):
if resource.plan:
return '%s (%s / %s)' % (
resource.name,
resource.offering.name,
resource.plan.name,
)
else:
return '%s (%s)' % (resource.name, resource.offering.name)
def get_invoice_item_name(source, component_type):
resource_name = get_resource_name(source)
if component_type == CORES_TYPE:
return f'{resource_name} / CPU'
elif component_type == RAM_TYPE:
return f'{resource_name} / RAM'
elif component_type == STORAGE_TYPE:
return f'{resource_name} / storage'
elif component_type.startswith('gigabytes_'):
return f'{resource_name} / {component_type.replace("gigabytes_", "")} storage'
else:
return resource_name
def get_component_details(resource, plan_component):
customer = resource.offering.customer
service_provider = getattr(customer, 'serviceprovider', None)
return {
'resource_name': resource.name,
'resource_uuid': resource.uuid.hex,
'plan_name': resource.plan.name if resource.plan else '',
'plan_uuid': resource.plan.uuid.hex if resource.plan else '',
'offering_type': resource.offering.type,
'offering_name': resource.offering.name,
'offering_uuid': resource.offering.uuid.hex,
'service_provider_name': customer.name,
'service_provider_uuid': ''
if not service_provider
else service_provider.uuid.hex,
'plan_component_id': plan_component.id,
'offering_component_type': plan_component.component.type,
'offering_component_name': plan_component.component.name,
}
def collect_limit_periods(resource_invoice_items):
resource_limit_periods = defaultdict(list)
for invoice_item in resource_invoice_items:
for limit_name, limit_value in invoice_item.details['limits'].items():
factor = component_factors.get(limit_name, 1)
quantity = decimal.Decimal(limit_value / factor)
resource_limit_periods[limit_name].append(
{
'start': invoice_item.start,
'end': invoice_item.end,
'quantity': quantity,
}
)
return resource_limit_periods
def merge_consecutive_periods(resource_limit_periods):
output = {}
for limit_name, limit_periods in resource_limit_periods.items():
limit_periods = list(sorted(limit_periods, key=lambda period: period['end']))
if len(limit_periods) == 1:
output[limit_name] = limit_periods
continue
prev_value = limit_periods[0]['quantity']
prev_end = limit_periods[0]['end']
merged_limit_periods = [limit_periods[0]]
for limit_period in limit_periods[1:]:
if limit_period['quantity'] == prev_value and limit_period[
'start'
] - prev_end == timedelta(seconds=1):
# Extend period ie merge consecutive items
merged_limit_periods[-1]['end'] = limit_period['end']
else:
merged_limit_periods.append(limit_period)
prev_end = limit_period['end']
prev_value = limit_period['quantity']
output[limit_name] = merged_limit_periods
return output
def serialize_resource_limit_period(period):
billing_periods = get_full_days(period['start'], period['end'])
return {
'start': period['start'].isoformat(),
'end': period['end'].isoformat(),
'quantity': str(period['quantity']),
'billing_periods': billing_periods,
'total': str(period['quantity'] * billing_periods),
}
def create_invoice_items_for_components(
InvoiceItem, invoice, resource, resource_invoice_items
):
resource_limit_periods = collect_limit_periods(resource_invoice_items)
resource_limit_periods = merge_consecutive_periods(resource_limit_periods)
new_invoice_items = []
for plan_component in resource.plan.components.all():
offering_component = plan_component.component
component_type = offering_component.type
periods = resource_limit_periods.get(component_type)
if not periods:
print(
f'Skipping plan component {component_type} of '
f'resource {resource.id} because resource_limit_periods list is empty.'
)
continue
quantity = sum(
period['quantity'] * get_full_days(period['start'], period['end'])
for period in periods
)
if not quantity:
print(
f'Skipping plan component {component_type} of '
f'resource {resource.id} because aggregated quantity is zero.'
)
continue
details = get_component_details(resource, plan_component)
details['resource_limit_periods'] = list(
map(serialize_resource_limit_period, resource_limit_periods[component_type])
)
new_invoice_item = InvoiceItem.objects.create(
name=get_invoice_item_name(resource, component_type),
unit_price=plan_component.price,
article_code=offering_component.article_code,
measured_unit=f'{offering_component.measured_unit}*day',
resource=resource,
project=resource.project,
unit='quantity',
quantity=quantity,
invoice=invoice,
start=min(period['start'] for period in periods),
end=max(period['end'] for period in periods),
details=details,
)
new_invoice_items.append(new_invoice_item)
return new_invoice_items
def format_items_list(items):
return ', '.join(str(item.id) for item in items)
def process_invoices(apps, schema_editor):
Invoice = apps.get_model('invoices', 'Invoice')
InvoiceItem = apps.get_model('invoices', 'InvoiceItem')
Resource = apps.get_model('marketplace', 'Resource')
today = timezone.now()
for invoice in Invoice.objects.filter(year=today.year, month=today.month):
invoice_items = InvoiceItem.objects.filter(
invoice=invoice,
resource__offering__type=TENANT_TYPE,
details__has_key='limits',
)
if not invoice_items.exists():
continue
resource_ids = invoice_items.values_list('resource_id', flat=True)
for resource_id in resource_ids:
resource = Resource.objects.get(id=resource_id)
# Cache old_invoice_items so that they are not reevaluated later
old_invoice_items = list(invoice_items.filter(resource_id=resource_id))
new_invoice_items = create_invoice_items_for_components(
InvoiceItem, invoice, resource, old_invoice_items
)
if new_invoice_items:
print(
f"Replacing resource items {format_items_list(old_invoice_items)} "
f"with component-based items {format_items_list(new_invoice_items)}"
)
for old_invoice_item in old_invoice_items:
old_invoice_item.delete()
class Migration(migrations.Migration):
dependencies = [
('marketplace_openstack', '0009_fill_project'),
('invoices', '0052_delete_servicedowntime'),
]
operations = [migrations.RunPython(process_invoices, atomic=True)]
| mit | c8c5ba419e3f46230bf27cb012c96190 | 36.472222 | 88 | 0.618483 | 4.096154 | false | false | false | false |
getavalon/core | avalon/vendor/requests/packages/chardet/charsetprober.py | 292 | 5110 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import logging
import re
from .enums import ProbingState
class CharSetProber(object):
SHORTCUT_THRESHOLD = 0.95
def __init__(self, lang_filter=None):
self._state = None
self.lang_filter = lang_filter
self.logger = logging.getLogger(__name__)
def reset(self):
self._state = ProbingState.DETECTING
@property
def charset_name(self):
return None
def feed(self, buf):
pass
@property
def state(self):
return self._state
def get_confidence(self):
return 0.0
@staticmethod
def filter_high_byte_only(buf):
buf = re.sub(b'([\x00-\x7F])+', b' ', buf)
return buf
@staticmethod
def filter_international_words(buf):
"""
We define three types of bytes:
alphabet: english alphabets [a-zA-Z]
international: international characters [\x80-\xFF]
marker: everything else [^a-zA-Z\x80-\xFF]
The input buffer can be thought to contain a series of words delimited
by markers. This function works to filter all words that contain at
least one international character. All contiguous sequences of markers
are replaced by a single space ascii character.
This filter applies to all scripts which do not use English characters.
"""
filtered = bytearray()
# This regex expression filters out only words that have at-least one
# international character. The word may include one marker character at
# the end.
words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?',
buf)
for word in words:
filtered.extend(word[:-1])
# If the last character in the word is a marker, replace it with a
# space as markers shouldn't affect our analysis (they are used
# similarly across all languages and may thus have similar
# frequencies).
last_char = word[-1:]
if not last_char.isalpha() and last_char < b'\x80':
last_char = b' '
filtered.extend(last_char)
return filtered
@staticmethod
def filter_with_english_letters(buf):
"""
Returns a copy of ``buf`` that retains only the sequences of English
alphabet and high byte characters that are not between <> characters.
Also retains English alphabet and high byte characters immediately
before occurrences of >.
This filter can be applied to all scripts which contain both English
characters and extended ASCII characters, but is currently only used by
``Latin1Prober``.
"""
filtered = bytearray()
in_tag = False
prev = 0
for curr in range(len(buf)):
# Slice here to get bytes instead of an int with Python 3
buf_char = buf[curr:curr + 1]
# Check if we're coming out of or entering an HTML tag
if buf_char == b'>':
in_tag = False
elif buf_char == b'<':
in_tag = True
# If current character is not extended-ASCII and not alphabetic...
if buf_char < b'\x80' and not buf_char.isalpha():
# ...and we're not in a tag
if curr > prev and not in_tag:
# Keep everything after last non-extended-ASCII,
# non-alphabetic character
filtered.extend(buf[prev:curr])
# Output a space to delimit stretch we kept
filtered.extend(b' ')
prev = curr + 1
# If we're not in a tag...
if not in_tag:
# Keep everything after last non-extended-ASCII, non-alphabetic
# character
filtered.extend(buf[prev:])
return filtered
| mit | 4818830e630907428f275d46470d3421 | 34.241379 | 80 | 0.608806 | 4.506173 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_mastermind/booking/calendar.py | 2 | 4800 | from django.utils.functional import cached_property
from waldur_mastermind.booking.utils import TimePeriod, get_offering_bookings
from waldur_mastermind.google.backend import GoogleCalendar
class SyncBookingsError(Exception):
pass
class SyncBookings:
def __init__(self, offering):
self.offering = offering
self.credentials = offering.customer.serviceprovider.googlecredentials
self.backend = GoogleCalendar(tokens=self.credentials)
@cached_property
def calendar_id(self):
calendar = self.offering.googlecalendar
if not calendar.backend_id:
self.create_calendar()
calendar.refresh_from_db()
# This code is usually called through the executor for automatically calendar creation,
# and in case of an error,
# it will register the error status for the calendar
return calendar.backend_id
def get_bookings(self):
def get_date(event_date):
date = event_date.get('date', None)
if not date:
return event_date.get('dateTime', None)
return date
waldur_bookings = get_offering_bookings(self.offering)
google_bookings = []
for event in self.backend.get_events(calendar_id=self.calendar_id):
start = event.get('start')
if start:
start = get_date(start)
else:
continue
end = event.get('end')
if end:
end = get_date(end)
else:
continue
attendees = []
if event.get('attendees'):
for attendee in event.get('attendees'):
attendees.append(
{
'displayName': attendee['displayName'],
'email': attendee['email'],
}
)
google_bookings.append(
TimePeriod(
start,
end,
event['id'],
location=event.get('location'),
attendees=attendees,
)
)
need_to_delete = {b.id for b in google_bookings} - {
b.id for b in waldur_bookings
}
need_to_update = []
need_to_add = []
for booking in waldur_bookings:
google_booking = list(filter(lambda x: x.id == booking.id, google_bookings))
if len(google_booking):
google_booking = google_booking[0]
if (
booking.start != google_booking.start
or booking.end != google_booking.end
or booking.location != google_booking.location
or booking.attendees != google_booking.attendees
):
need_to_update.append(booking)
else:
need_to_add.append(booking)
return need_to_add, need_to_delete, need_to_update
def sync_events(self):
need_to_add, need_to_delete, need_to_update = self.get_bookings()
for booking in need_to_add:
self.backend.create_event(
summary=self.offering.name,
event_id=booking.id,
start=booking.start,
end=booking.end,
calendar_id=self.calendar_id,
location=booking.location,
attendees=booking.attendees,
)
for booking_id in need_to_delete:
self.backend.delete_event(
calendar_id=self.calendar_id, event_id=booking_id,
)
for booking in need_to_update:
self.backend.update_event(
summary=self.offering.name,
event_id=booking.id,
start=booking.start,
end=booking.end,
calendar_id=self.calendar_id,
location=booking.location,
attendees=booking.attendees,
)
def update_calendar_name(self):
self.backend.update_calendar(self.calendar_id, summary=self.offering.name)
def share_calendar(self):
self.backend.share_calendar(self.calendar_id)
self.offering.googlecalendar.public = True
self.offering.googlecalendar.save()
def unshare_calendar(self):
self.backend.unshare_calendar(self.calendar_id)
self.offering.googlecalendar.public = False
self.offering.googlecalendar.save()
def create_calendar(self):
calendar = self.offering.googlecalendar
backend_id = self.backend.create_calendar(calendar_name=calendar.offering.name)
calendar.backend_id = backend_id
calendar.save()
| mit | cd498ff0bed771f155d0278d42aabaf1 | 32.802817 | 99 | 0.548542 | 4.289544 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_core/core/monkeypatch.py | 2 | 3181 | """
TODO: drop patch when django-fsm package is updated.
If model with FSM state field has other fields that access their field value
via a property or a virtual Field, then creation of instances will fail.
There is pending patch in upstream project:
https://github.com/kmmbvnr/django-fsm/pull/171
"""
__all__ = ['monkey_patch_fields']
def subfield_get(self, obj, type=None):
"""
Verbatim copy from:
https://github.com/django/django/blob/1.9.13/django/db/models/fields/subclassing.py#L38
"""
if obj is None:
return self
return obj.__dict__[self.field.name]
def get_field_name(self):
return self.field.name
def patch_field_descriptor(cls):
cls.__get__ = subfield_get
setattr(cls, 'field_name', property(get_field_name))
def patch_fsm_field_mixin(cls):
from django_fsm import TransitionNotAllowed, pre_transition, post_transition
def change_state(self, instance, method, *args, **kwargs):
meta = method._django_fsm
method_name = method.__name__
current_state = self.get_state(instance)
try:
current_state_name = list(
filter(lambda x: x[0] == current_state, meta.field.choices)
)[0][1]
except Exception:
current_state_name = current_state
if not meta.has_transition(current_state):
raise TransitionNotAllowed(
"Can't switch from state '{0}' using method '{1}'".format(
current_state_name, method_name
),
object=instance,
method=method,
)
if not meta.conditions_met(instance, current_state):
raise TransitionNotAllowed(
"Transition conditions have not been met for method '{0}'".format(
method_name
),
object=instance,
method=method,
)
next_state = meta.next_state(current_state)
signal_kwargs = {
'sender': instance.__class__,
'instance': instance,
'name': method_name,
'source': current_state,
'target': next_state,
}
pre_transition.send(**signal_kwargs)
try:
result = method(instance, *args, **kwargs)
if next_state is not None:
self.set_proxy(instance, next_state)
self.set_state(instance, next_state)
except Exception as exc:
exception_state = meta.exception_state(current_state)
if exception_state:
self.set_proxy(instance, exception_state)
self.set_state(instance, exception_state)
signal_kwargs['target'] = exception_state
signal_kwargs['exception'] = exc
post_transition.send(**signal_kwargs)
raise
else:
post_transition.send(**signal_kwargs)
return result
cls.change_state = change_state
def monkey_patch_fields():
from django_fsm import FSMFieldDescriptor, FSMFieldMixin
patch_field_descriptor(FSMFieldDescriptor)
patch_fsm_field_mixin(FSMFieldMixin)
| mit | 9f33932a4eacfe1b17debeb07a92262a | 30.186275 | 91 | 0.58818 | 4.19657 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_vmware/backend.py | 2 | 42600 | import logging
import ssl
from urllib.parse import urlencode
import pyVim.connect
import pyVim.task
from django.utils import timezone
from django.utils.functional import cached_property
from pyVmomi import vim
from waldur_core.structure.backend import ServiceBackend, log_backend_action
from waldur_core.structure.exceptions import ServiceBackendError
from waldur_core.structure.utils import update_pulled_fields
from waldur_mastermind.common.utils import parse_datetime
from waldur_vmware.client import VMwareClient
from waldur_vmware.exceptions import VMwareError
from waldur_vmware.utils import is_basic_mode
from . import models, signals
logger = logging.getLogger(__name__)
class VMwareBackendError(ServiceBackendError):
pass
class VMwareBackend(ServiceBackend):
def __init__(self, settings):
"""
:type settings: :class:`waldur_core.structure.models.ServiceSettings`
"""
self.settings = settings
@cached_property
def host(self):
return (
self.settings.backend_url.split('https://')[-1]
.split('http://')[-1]
.strip('/')
)
@cached_property
def client(self):
"""
Construct VMware REST API client using credentials specified in the service settings.
"""
client = VMwareClient(self.host, verify_ssl=False)
client.login(self.settings.username, self.settings.password)
return client
@cached_property
def soap_client(self):
"""
Construct VMware SOAP API client using credentials specified in the service settings.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.verify_mode = ssl.CERT_NONE
return pyVim.connect.SmartConnect(
host=self.host,
user=self.settings.username,
pwd=self.settings.password,
port=443,
sslContext=context,
)
def ping(self, raise_exception=False):
"""
Check if backend is ok.
"""
try:
self.client.list_vms()
except VMwareError as e:
if raise_exception:
raise VMwareBackendError(e)
return False
else:
return True
def pull_service_properties(self):
self.pull_folders()
self.pull_templates()
self.pull_clusters()
self.pull_networks()
self.pull_datastores()
def pull_templates(self):
"""
Pull VMware templates for virtual machine provisioning from content library
using VMware REST API to the local database.
"""
try:
backend_templates = self.client.list_all_templates()
except VMwareError as e:
raise VMwareBackendError(e)
if is_basic_mode():
# If basic mode is enabled, we should filter out templates which have more than 1 NIC
backend_templates = [
template
for template in backend_templates
if len(template['template']['nics']) == 1
]
backend_templates_map = {
item['library_item']['id']: item for item in backend_templates
}
frontend_templates_map = {
p.backend_id: p
for p in models.Template.objects.filter(settings=self.settings)
}
stale_ids = set(frontend_templates_map.keys()) - set(
backend_templates_map.keys()
)
new_ids = set(backend_templates_map.keys()) - set(frontend_templates_map.keys())
common_ids = set(backend_templates_map.keys()) & set(
frontend_templates_map.keys()
)
for library_item_id in new_ids:
template = self._backend_template_to_template(
backend_templates_map[library_item_id]
)
template.save()
for library_item_id in common_ids:
backend_template = self._backend_template_to_template(
backend_templates_map[library_item_id]
)
frontend_template = frontend_templates_map[library_item_id]
fields = (
'cores',
'cores_per_socket',
'ram',
'disk',
'guest_os',
'modified',
'description',
)
update_pulled_fields(frontend_template, backend_template, fields)
models.Template.objects.filter(
settings=self.settings, backend_id__in=stale_ids
).delete()
def _backend_template_to_template(self, backend_template):
library_item = backend_template['library_item']
template = backend_template['template']
total_disk = self._get_total_disk(template['disks'])
return models.Template(
settings=self.settings,
backend_id=library_item['id'],
name=library_item['name'],
description=library_item['description'],
created=parse_datetime(library_item['creation_time']),
modified=parse_datetime(library_item['last_modified_time']),
cores=template['cpu']['count'],
cores_per_socket=template['cpu']['cores_per_socket'],
ram=template['memory']['size_MiB'],
disk=total_disk,
guest_os=template['guest_OS'],
)
def _get_total_disk(self, backend_disks):
# Convert disk size from bytes to MiB
return sum([disk['value']['capacity'] / 1024 / 1024 for disk in backend_disks])
@log_backend_action()
def pull_virtual_machine(self, vm, update_fields=None):
"""
Pull virtual machine from REST API and update its information in local database.
:param vm: Virtual machine database object.
:type vm: :class:`waldur_vmware.models.VirtualMachine`
:param update_fields: iterable of fields to be updated
"""
import_time = timezone.now()
imported_vm = self.import_virtual_machine(vm.backend_id, save=False)
vm.refresh_from_db()
if vm.modified < import_time:
if not update_fields:
update_fields = models.VirtualMachine.get_backend_fields()
update_pulled_fields(vm, imported_vm, update_fields)
def import_virtual_machine(self, backend_id, project=None, save=True):
"""
Import virtual machine by its ID.
:param backend_id: Virtual machine identifier
:type backend_id: str
:param save: Save object in the database
:type save: bool
:param project: Optional service settings model object
:rtype: :class:`waldur_vmware.models.VirtualMachine`
"""
try:
backend_vm = self.client.get_vm(backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
tools_installed = self.get_vm_tools_installed(backend_id)
tools_state = self.get_vm_tools_state(backend_id)
vm = self._backend_vm_to_vm(
backend_vm, tools_installed, tools_state, backend_id
)
vm.service_settings = self.settings
vm.project = project
if save:
vm.save()
return vm
def _backend_vm_to_vm(self, backend_vm, tools_installed, tools_state, backend_id):
"""
Build database model object for virtual machine from REST API spec.
:param backend_vm: virtual machine specification
:type backend_vm: dict
:param tools_installed: whether VMware tools installed or not
:type tools_installed: bool
:param tools_state: Status of VMware Tools.
:type tools_state: str
:param backend_id: Virtual machine identifier
:type backend_id: str
:rtype: :class:`waldur_vmware.models.VirtualMachine`
"""
return models.VirtualMachine(
backend_id=backend_id,
name=backend_vm['name'],
state=models.VirtualMachine.States.OK,
runtime_state=backend_vm['power_state'],
cores=backend_vm['cpu']['count'],
cores_per_socket=backend_vm['cpu']['cores_per_socket'],
ram=backend_vm['memory']['size_MiB'],
disk=self._get_total_disk(backend_vm['disks']),
tools_installed=tools_installed,
tools_state=tools_state,
)
def pull_clusters(self):
try:
backend_clusters = self.client.list_clusters()
except VMwareError as e:
raise VMwareBackendError(e)
backend_clusters_map = {item['cluster']: item for item in backend_clusters}
frontend_clusters_map = {
p.backend_id: p
for p in models.Cluster.objects.filter(settings=self.settings)
}
stale_ids = set(frontend_clusters_map.keys()) - set(backend_clusters_map.keys())
new_ids = set(backend_clusters_map.keys()) - set(frontend_clusters_map.keys())
common_ids = set(backend_clusters_map.keys()) & set(
frontend_clusters_map.keys()
)
for item_id in common_ids:
backend_item = backend_clusters_map[item_id]
frontend_item = frontend_clusters_map[item_id]
if frontend_item.name != backend_item['name']:
frontend_item.name = backend_item['name']
frontend_item.save(update_fields=['name'])
for item_id in new_ids:
item = backend_clusters_map[item_id]
models.Cluster.objects.create(
settings=self.settings, backend_id=item_id, name=item['name'],
)
models.Cluster.objects.filter(
settings=self.settings, backend_id__in=stale_ids
).delete()
def pull_networks(self):
try:
backend_networks = self.client.list_networks()
except VMwareError as e:
raise VMwareBackendError(e)
backend_networks_map = {item['network']: item for item in backend_networks}
frontend_networks_map = {
p.backend_id: p
for p in models.Network.objects.filter(settings=self.settings)
}
stale_ids = set(frontend_networks_map.keys()) - set(backend_networks_map.keys())
new_ids = set(backend_networks_map.keys()) - set(frontend_networks_map.keys())
common_ids = set(frontend_networks_map.keys()) & set(
backend_networks_map.keys()
)
for item_id in common_ids:
backend_item = backend_networks_map[item_id]
frontend_item = frontend_networks_map[item_id]
if frontend_item.name != backend_item['name']:
frontend_item.name = backend_item['name']
frontend_item.save(update_fields=['name'])
for item_id in new_ids:
item = backend_networks_map[item_id]
models.Network.objects.create(
settings=self.settings,
backend_id=item_id,
name=item['name'],
type=item['type'],
)
models.Network.objects.filter(
settings=self.settings, backend_id__in=stale_ids
).delete()
def pull_datastores(self):
try:
backend_datastores = self.client.list_datastores()
except VMwareError as e:
raise VMwareBackendError(e)
backend_datastores_map = {
item['datastore']: item for item in backend_datastores
}
frontend_datastores_map = {
p.backend_id: p
for p in models.Datastore.objects.filter(settings=self.settings)
}
stale_ids = set(frontend_datastores_map.keys()) - set(
backend_datastores_map.keys()
)
new_ids = set(backend_datastores_map.keys()) - set(
frontend_datastores_map.keys()
)
common_ids = set(backend_datastores_map.keys()) & set(
frontend_datastores_map.keys()
)
for item_id in new_ids:
datastore = self._backend_datastore_to_datastore(
backend_datastores_map[item_id]
)
datastore.save()
for item_id in common_ids:
backend_datastore = self._backend_datastore_to_datastore(
backend_datastores_map[item_id]
)
frontend_datastore = frontend_datastores_map[item_id]
fields = ('name', 'capacity', 'free_space')
update_pulled_fields(frontend_datastore, backend_datastore, fields)
models.Datastore.objects.filter(
settings=self.settings, backend_id__in=stale_ids
).delete()
def _backend_datastore_to_datastore(self, backend_datastore):
capacity = backend_datastore.get('capacity')
# Convert from bytes to MB
if capacity:
capacity /= 1024 * 1024
free_space = backend_datastore.get('free_space')
# Convert from bytes to MB
if free_space:
free_space /= 1024 * 1024
return models.Datastore(
settings=self.settings,
backend_id=backend_datastore['datastore'],
name=backend_datastore['name'],
type=backend_datastore['type'],
capacity=capacity,
free_space=free_space,
)
def get_vm_folders(self):
try:
return self.client.list_folders(folder_type='VIRTUAL_MACHINE')
except VMwareError as e:
raise VMwareBackendError(e)
def get_default_vm_folder(self):
"""
Currently VM folder is required for VM provisioning either from template or from scratch.
Therefore when folder is not specified for VM, we should use first available folder.
Please note that it is assumed that there's only one datacenter in this case.
:return: Virtual machine folder identifier.
:rtype: str
"""
return self.get_vm_folders()[0]['folder']
def get_default_resource_pool(self):
"""
Currently resource pool is required for VM provisioning from scratch if cluster is not specified.
Therefore we should use first available resource pool.
Please note that it is assumed that there's only one datacenter in this case.
:return: Resource pool identifier.
:rtype: str
"""
try:
return self.client.list_resource_pools()[0]['resource_pool']
except VMwareError as e:
raise VMwareBackendError(e)
def get_default_datastore(self):
"""
Currently datastore is required for VM provisioning either from template or from scratch.
Therefore when datastore is not specified for VM, we should use first available datastore.
Please note that it is assumed that there's only one datacenter in this case.
:return: Datastore identifier.
:rtype: str
"""
try:
return self.client.list_datastores()[0]['datastore']
except VMwareError as e:
raise VMwareBackendError(e)
def pull_folders(self):
backend_folders = self.get_vm_folders()
backend_folders_map = {item['folder']: item for item in backend_folders}
frontend_folders_map = {
p.backend_id: p
for p in models.Folder.objects.filter(settings=self.settings)
}
stale_ids = set(frontend_folders_map.keys()) - set(backend_folders_map.keys())
new_ids = set(backend_folders_map.keys()) - set(frontend_folders_map.keys())
common_ids = set(backend_folders_map.keys()) & set(frontend_folders_map.keys())
for item_id in common_ids:
backend_item = backend_folders_map[item_id]
frontend_item = frontend_folders_map[item_id]
if frontend_item.name != backend_item['name']:
frontend_item.name = backend_item['name']
frontend_item.save(update_fields=['name'])
for item_id in new_ids:
item = backend_folders_map[item_id]
models.Folder.objects.create(
settings=self.settings, backend_id=item_id, name=item['name'],
)
models.Folder.objects.filter(
settings=self.settings, backend_id__in=stale_ids
).delete()
def create_virtual_machine(self, vm):
"""
Creates a virtual machine.
:param vm: Virtual machine to be created
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
if vm.template:
backend_id = self.create_virtual_machine_from_template(vm)
else:
backend_id = self.create_virtual_machine_from_scratch(vm)
try:
backend_vm = self.client.get_vm(backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
vm.backend_id = backend_id
vm.runtime_state = backend_vm['power_state']
vm.save(update_fields=['backend_id', 'runtime_state'])
for disk in backend_vm['disks']:
disk = self._backend_disk_to_disk(disk['value'], disk['key'])
disk.vm = vm
disk.service_settings = vm.service_settings
disk.project = vm.project
disk.save()
# If virtual machine is not deployed from template, it does not have any networks.
# Therefore we should create network interfaces manually according to VM spec.
if not vm.template:
for network in vm.networks.all():
try:
self.client.create_nic(vm.backend_id, network.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
signals.vm_created.send(self.__class__, vm=vm)
return vm
def _get_vm_placement(self, vm):
placement = {}
if vm.folder:
placement['folder'] = vm.folder.backend_id
else:
logger.warning(
'Folder is not specified for VM with ID: %s. '
'Trying to assign default folder.',
vm.id,
)
placement['folder'] = self.get_default_vm_folder()
if vm.cluster:
placement['cluster'] = vm.cluster.backend_id
else:
logger.warning(
'Cluster is not specified for VM with ID: %s. '
'Trying to assign default resource pool.',
vm.id,
)
placement['resource_pool'] = self.get_default_resource_pool()
return placement
def _get_template_nics(self, template):
"""
Fetch list of NIC IDs assigned to virtual machine template.
:param template: Virtual machine template.
:type template: :class:`waldur_vmware.models.Template`
:rtype: list[str]
"""
try:
backend_template = self.client.get_template_library_item(
template.backend_id
)
except VMwareError as e:
raise VMwareBackendError(e)
else:
return [nic['key'] for nic in backend_template['nics']]
def _get_vm_nics(self, vm):
"""
Serialize map of Ethernet network adapters for virtual machine template deployment.
:param vm: Virtual machine to be created.
:type vm: :class:`waldur_vmware.models.VirtualMachine`
:return: list[dict]
"""
nics = self._get_template_nics(vm.template)
networks = list(vm.networks.all())
if is_basic_mode():
if len(networks) != 1:
logger.warning(
'Skipping network assignment because VM does not have '
'exactly one network in basic mode. VM ID: %s',
vm.id,
)
return
elif len(nics) != 1:
logger.warning(
'Skipping network assignment because related template does '
'not have exactly one NIC in basic mode. VM ID: %s',
vm.id,
)
if len(networks) != len(nics):
logger.warning(
'It is not safe to update network assignment when '
'number of interfaces and networks do not match. VM ID: %s',
vm.id,
)
return [
{'key': nic, 'value': {'network': network.backend_id}}
for (nic, network) in zip(nics, networks)
]
def create_virtual_machine_from_template(self, vm):
spec = {
'name': vm.name,
'description': vm.description,
'hardware_customization': {
'cpu_update': {
'num_cpus': vm.cores,
'num_cores_per_socket': vm.cores_per_socket,
},
'memory_update': {'memory': vm.ram,},
},
'placement': self._get_vm_placement(vm),
}
if vm.datastore:
spec['disk_storage'] = {'datastore': vm.datastore.backend_id}
spec['vm_home_storage'] = {'datastore': vm.datastore.backend_id}
nics = self._get_vm_nics(vm)
if nics:
spec['hardware_customization']['nics'] = nics
try:
return self.client.deploy_vm_from_template(vm.template.backend_id, spec)
except VMwareError as e:
raise VMwareBackendError(e)
def create_virtual_machine_from_scratch(self, vm):
spec = {
'name': vm.name,
'guest_OS': vm.guest_os,
'cpu': {
'count': vm.cores,
'cores_per_socket': vm.cores_per_socket,
'hot_add_enabled': True,
'hot_remove_enabled': True,
},
'memory': {'size_MiB': vm.ram, 'hot_add_enabled': True,},
'placement': self._get_vm_placement(vm),
}
if vm.datastore:
spec['placement']['datastore'] = vm.datastore.backend_id
else:
spec['placement']['datastore'] = self.get_default_datastore()
try:
return self.client.create_vm(spec)
except VMwareError as e:
raise VMwareBackendError(e)
def delete_virtual_machine(self, vm):
"""
Deletes a virtual machine.
:param vm: Virtual machine to be deleted
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
try:
self.client.delete_vm(vm.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
def start_virtual_machine(self, vm):
"""
Powers on a powered-off or suspended virtual machine.
:param vm: Virtual machine to be started
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
try:
self.client.start_vm(vm.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
def stop_virtual_machine(self, vm):
"""
Powers off a powered-on or suspended virtual machine.
:param vm: Virtual machine to be stopped
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
try:
self.client.stop_vm(vm.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
def reset_virtual_machine(self, vm):
"""
Resets a powered-on virtual machine.
:param vm: Virtual machine.
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
try:
self.client.reset_vm(vm.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
def suspend_virtual_machine(self, vm):
"""
Suspends a powered-on virtual machine.
:param vm: Virtual machine.
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
try:
self.client.suspend_vm(vm.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
def shutdown_guest(self, vm):
"""
Issues a request to the guest operating system asking
it to perform a clean shutdown of all services.
:param vm: Virtual machine.
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
try:
self.client.shutdown_guest(vm.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
def reboot_guest(self, vm):
"""
Issues a request to the guest operating system asking it to perform a reboot.
:param vm: Virtual machine.
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
try:
self.client.reboot_guest(vm.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
def is_virtual_machine_shutted_down(self, vm):
try:
guest_power = self.client.get_guest_power(vm.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
else:
return (
guest_power['state']
== models.VirtualMachine.GuestPowerStates.NOT_RUNNING
)
def is_virtual_machine_tools_running(self, vm):
"""
Check VMware tools status and update cache only if its running.
If VMware tools are not running, state is not updated.
It is needed in order to skip extra database updates.
Otherwise VMware tools state in database would be updated
from RUNNING to NOT RUNNING twice when optimistic update is used.
"""
tools_state = self.get_vm_tools_state(vm.backend_id)
result = tools_state == models.VirtualMachine.ToolsStates.RUNNING
if result:
vm.tools_state = tools_state
vm.save(update_fields=['tools_state'])
self.pull_virtual_machine_runtime_state(vm)
return result
def pull_virtual_machine_runtime_state(self, vm):
try:
backend_vm = self.client.get_vm(vm.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
else:
backend_power_state = backend_vm['power_state']
if backend_power_state != vm.runtime_state:
vm.runtime_state = backend_power_state
vm.save(update_fields=['runtime_state'])
def is_virtual_machine_tools_not_running(self, vm):
tools_state = self.get_vm_tools_state(vm.backend_id)
result = tools_state == models.VirtualMachine.ToolsStates.NOT_RUNNING
if result:
vm.tools_state = tools_state
vm.save(update_fields=['tools_state'])
return result
def update_virtual_machine(self, vm):
"""
Updates CPU and RAM of virtual machine.
"""
self.update_cpu(vm)
self.update_memory(vm)
signals.vm_updated.send(self.__class__, vm=vm)
def update_cpu(self, vm):
"""
Updates CPU of virtual machine.
:param vm: Virtual machine.
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
try:
cpu_spec = self.client.get_cpu(vm.backend_id)
if (
cpu_spec['cores_per_socket'] != vm.cores_per_socket
or cpu_spec['count'] != vm.cores
):
self.client.update_cpu(
vm.backend_id,
{'cores_per_socket': vm.cores_per_socket, 'count': vm.cores,},
)
except VMwareError as e:
raise VMwareBackendError(e)
def update_memory(self, vm):
"""
Updates RAM of virtual machine.
:param vm: Virtual machine.
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
try:
memory_spec = self.client.get_memory(vm.backend_id)
if memory_spec['size_MiB'] != vm.ram:
self.client.update_memory(vm.backend_id, {'size_MiB': vm.ram})
except VMwareError as e:
raise VMwareBackendError(e)
def create_port(self, port):
"""
Creates an Ethernet port for given VM and network.
:param port: Port to be created
:type port: :class:`waldur_vmware.models.Port`
"""
try:
backend_id = self.client.create_nic(
port.vm.backend_id, port.network.backend_id
)
except VMwareError as e:
raise VMwareBackendError(e)
else:
port.backend_id = backend_id
port.save(update_fields=['backend_id'])
return port
def delete_port(self, port):
"""
Deletes an Ethernet port.
:param port: Port to be deleted.
:type port: :class:`waldur_vmware.models.Port`
"""
try:
self.client.delete_nic(port.vm.backend_id, port.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
@log_backend_action()
def pull_port(self, port, update_fields=None):
"""
Pull Ethernet port from REST API and update its information in local database.
:param port: Port to be updated.
:type port: :class:`waldur_vmware.models.Port`
:param update_fields: iterable of fields to be updated
:return: None
"""
import_time = timezone.now()
imported_port = self.import_port(
port.vm.backend_id, port.backend_id, save=False
)
port.refresh_from_db()
if port.modified < import_time:
if not update_fields:
update_fields = models.Port.get_backend_fields()
update_pulled_fields(port, imported_port, update_fields)
def import_port(
self,
backend_vm_id,
backend_port_id,
save=True,
service_settings=None,
project=None,
):
"""
Import Ethernet port by its ID.
:param backend_vm_id: Virtual machine identifier
:type backend_vm_id: str
:param backend_port_id: Ethernet port identifier
:type backend_port_id: str
:param save: Save object in the database
:type save: bool
:param service_settings: Optional service settings model object
:param project: Optional service settings model object
:rtype: :class:`waldur_vmware.models.Disk`
"""
try:
backend_port = self.client.get_nic(backend_vm_id, backend_port_id)
backend_port['nic'] = backend_port_id
except VMwareError as e:
raise VMwareBackendError(e)
port = self._backend_port_to_port(backend_port)
port.service_settings = service_settings
port.project = project
if save:
port.save()
return port
def _backend_port_to_port(self, backend_port):
"""
Build database model object for Ethernet port from REST API spec.
:param backend_port: Ethernet port specification
:type backend_port: dict
:rtype: :class:`waldur_vmware.models.Port`
"""
return models.Port(
backend_id=backend_port['nic'],
name=backend_port['label'],
# MAC address is optional
mac_address=backend_port.get('mac_address'),
state=models.Port.States.OK,
runtime_state=backend_port['state'],
)
def pull_vm_ports(self, vm):
try:
backend_ports = self.client.list_nics(vm.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
backend_ports_map = {item['nic']: item for item in backend_ports}
frontend_ports_map = {
p.backend_id: p for p in models.Port.objects.filter(vm=vm)
}
networks_map = {
p.backend_id: p
for p in models.Network.objects.filter(settings=vm.service_settings)
}
stale_ids = set(frontend_ports_map.keys()) - set(backend_ports_map.keys())
new_ids = set(backend_ports_map.keys()) - set(frontend_ports_map.keys())
common_ids = set(backend_ports_map.keys()) & set(frontend_ports_map.keys())
for item_id in new_ids:
backend_port = backend_ports_map[item_id]
port = self._backend_port_to_port(backend_port)
port.service_settings = vm.service_settings
port.port = vm.port
network_id = backend_port['backing']['network']
port.network = networks_map.get(network_id)
port.vm = vm
port.save()
for item_id in common_ids:
backend_port = self._backend_port_to_port(backend_ports_map[item_id])
frontend_port = frontend_ports_map[item_id]
fields = ('mac_address', 'runtime_state')
update_pulled_fields(frontend_port, backend_port, fields)
models.Port.objects.filter(vm=vm, backend_id__in=stale_ids).delete()
def create_disk(self, disk):
"""
Creates a virtual disk.
:param disk: Virtual disk to be created
:type disk: :class:`waldur_vmware.models.Disk`
"""
spec = {
'new_vmdk': {
# Convert from mebibytes to bytes because VMDK is specified in bytes
'capacity': 1024
* 1024
* disk.size,
}
}
try:
backend_id = self.client.create_disk(disk.vm.backend_id, spec)
except VMwareError as e:
raise VMwareBackendError(e)
else:
disk.backend_id = backend_id
disk.save(update_fields=['backend_id'])
signals.vm_updated.send(self.__class__, vm=disk.vm)
return disk
def delete_disk(self, disk, delete_vmdk=True):
"""
Deletes a virtual disk.
:param disk: Virtual disk to be deleted
:type disk: :class:`waldur_vmware.models.Disk`
:param delete_vmdk: Delete backing VMDK file.
"""
backend_disk = self.get_backend_disk(disk)
try:
self.client.delete_disk(disk.vm.backend_id, disk.backend_id)
except VMwareError as e:
raise VMwareBackendError(e)
if delete_vmdk:
vdm = self.soap_client.content.virtualDiskManager
task = vdm.DeleteVirtualDisk(
name=backend_disk.backing.fileName,
datacenter=self.get_disk_datacenter(backend_disk),
)
try:
pyVim.task.WaitForTask(task)
except Exception:
logger.exception('Unable to delete VMware disk. Disk ID: %s.', disk.id)
raise VMwareBackendError('Unknown error.')
signals.vm_updated.send(self.__class__, vm=disk.vm)
def extend_disk(self, disk):
"""
Increase disk capacity.
:param disk: Virtual disk to be extended.
:type disk: :class:`waldur_vmware.models.Disk`
"""
backend_vm = self.get_backend_vm(disk.vm)
backend_disk = self.get_backend_disk(disk)
virtual_disk_spec = vim.vm.device.VirtualDeviceSpec()
virtual_disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
virtual_disk_spec.device = backend_disk
virtual_disk_spec.device.capacityInKB = disk.size * 1024
virtual_disk_spec.device.capacityInBytes = disk.size * 1024 * 1024
spec = vim.vm.ConfigSpec()
spec.deviceChange = [virtual_disk_spec]
task = backend_vm.ReconfigVM_Task(spec=spec)
try:
pyVim.task.WaitForTask(task)
except Exception:
logger.exception('Unable to extend VMware disk. Disk ID: %s.', disk.id)
raise VMwareBackendError('Unknown error.')
signals.vm_updated.send(self.__class__, vm=disk.vm)
def get_object(self, vim_type, vim_id):
"""
Get object by type and ID from SOAP client.
"""
content = self.soap_client.content
try:
items = [
item
for item in content.viewManager.CreateContainerView(
content.rootFolder, [vim_type], recursive=True
).view
]
except Exception:
logger.exception(
'Unable to get VMware object. Type: %s, ID: %s.', vim_type, vim_id
)
raise VMwareBackendError('Unknown error.')
for item in items:
if item._moId == vim_id:
return item
def get_backend_vm(self, vm):
"""
Get virtual machine object from SOAP client.
:param vm: Virtual machine.
:type vm: :class:`waldur_vmware.models.VirtualMachine`
:rtype: :class:`pyVmomi.VmomiSupport.vim.VirtualMachine`
"""
return self._get_backend_vm(vm.backend_id)
def get_vm_tools_state(self, backend_id):
"""
Get running status of VMware Tools.
:param backend_id: Virtual machine identifier.
:type backend_id: str
:rtype: str
"""
backend_vm = self._get_backend_vm(backend_id)
backend_tools_state = backend_vm.guest.toolsRunningStatus
if backend_tools_state == 'guestToolsExecutingScripts':
return models.VirtualMachine.ToolsStates.STARTING
elif backend_tools_state == 'guestToolsNotRunning':
return models.VirtualMachine.ToolsStates.NOT_RUNNING
elif backend_tools_state == 'guestToolsRunning':
return models.VirtualMachine.ToolsStates.RUNNING
def get_vm_tools_installed(self, backend_id):
"""
Check if VMware Tools are installed.
:param backend_id: Virtual machine identifier.
:type backend_id: str
:rtype: bool
"""
backend_vm = self._get_backend_vm(backend_id)
return backend_vm.config.tools.toolsInstallType != 'guestToolsTypeUnknown'
def _get_backend_vm(self, backend_id):
return self.get_object(vim.VirtualMachine, backend_id)
def get_backend_disk(self, disk):
"""
Get virtual disk object from SOAP client.
:param disk: Virtual disk.
:type disk: :class:`waldur_vmware.models.Disk`
:rtype: :class:`pyVmomi.VmomiSupport.vim.vm.device.VirtualDisk`
"""
backend_vm = self.get_backend_vm(disk.vm)
for device in backend_vm.config.hardware.device:
if (
isinstance(device, vim.VirtualDisk)
and str(device.key) == disk.backend_id
):
return device
def get_disk_datacenter(self, backend_disk):
"""
Find the datacenter where virtual disk is located.
:param backend_disk: Virtual disk object returned by SOAP API.
:type backend_disk: :class:`pyVmomi.VmomiSupport.vim.vm.device.VirtualDisk`
:return: VMware datacenter where disk is located.
:rtype: :class:`pyVmomi.VmomiSupport.vim.Datacenter`
"""
parent = backend_disk.backing.datastore.parent
while parent and not isinstance(parent, vim.Datacenter):
parent = parent.parent
return parent
@log_backend_action()
def pull_disk(self, disk, update_fields=None):
"""
Pull virtual disk from REST API and update its information in local database.
:param disk: Virtual disk database object.
:type disk: :class:`waldur_vmware.models.Disk`
:param update_fields: iterable of fields to be updated
:return: None
"""
import_time = timezone.now()
imported_disk = self.import_disk(
disk.vm.backend_id, disk.backend_id, save=False
)
disk.refresh_from_db()
if disk.modified < import_time:
if not update_fields:
update_fields = models.Disk.get_backend_fields()
update_pulled_fields(disk, imported_disk, update_fields)
def import_disk(
self, backend_vm_id, backend_disk_id, save=True, project=None,
):
"""
Import virtual disk by its ID.
:param backend_vm_id: Virtual machine identifier
:type backend_vm_id: str
:param backend_disk_id: Virtual disk identifier
:type backend_disk_id: str
:param save: Save object in the database
:type save: bool
:param project: Project model object
:rtype: :class:`waldur_vmware.models.Disk`
"""
try:
backend_disk = self.client.get_disk(backend_vm_id, backend_disk_id)
except VMwareError as e:
raise VMwareBackendError(e)
disk = self._backend_disk_to_disk(backend_disk, backend_disk_id)
disk.service_settings = self.settings
disk.project = project
if save:
disk.save()
return disk
def _backend_disk_to_disk(self, backend_disk, backend_disk_id):
"""
Build database model object for virtual disk from REST API spec.
:param backend_disk: virtual disk specification
:type backend_disk: dict
:param backend_disk_id: Virtual disk identifier
:type backend_disk_id: str
:rtype: :class:`waldur_vmware.models.Disk`
"""
return models.Disk(
backend_id=backend_disk_id,
name=backend_disk['label'],
# Convert disk size from bytes to MiB
size=backend_disk['capacity'] / 1024 / 1024,
state=models.Disk.States.OK,
)
def get_console_url(self, vm):
"""
Generates a virtual machine's remote console URL (VMRC)
:param vm: Virtual machine.
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
ticket = self.soap_client.content.sessionManager.AcquireCloneTicket()
return 'vmrc://clone:{ticket}@{host}/?moid={vm}'.format(
ticket=ticket, host=self.host, vm=vm.backend_id
)
def get_web_console_url(self, vm):
"""
Generates a virtual machine's web console URL (WMKS)
:param vm: Virtual machine.
:type vm: :class:`waldur_vmware.models.VirtualMachine`
"""
backend_vm = self.get_backend_vm(vm)
ticket = backend_vm.AcquireMksTicket()
params = {
'host': ticket.host,
'port': ticket.port,
'ticket': ticket.ticket,
'cfgFile': ticket.cfgFile,
'thumbprint': ticket.sslThumbprint,
'vmId': vm.backend_id,
'encoding': 'UTF-8',
}
return 'wss://{host}/ui/webconsole/authd?{params}'.format(
host=ticket.host, params=urlencode(params)
)
| mit | 68c34d03de223ab92666e263e3711e7d | 33.975369 | 105 | 0.579319 | 4.190027 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_mastermind/marketplace_script/tasks.py | 2 | 1169 | from celery import shared_task
from django.conf import settings
from waldur_mastermind.marketplace import models
from waldur_mastermind.marketplace_script import PLUGIN_NAME, serializers, utils
@shared_task(name='waldur_marketplace_script.pull_resources')
def pull_resources():
for resource in models.Resource.objects.filter(
offering__type=PLUGIN_NAME,
offering__plugin_options__has_key='pull',
state__in=[models.Resource.States.OK, models.Resource.States.ERRED],
):
pull_resource.delay(resource.id)
@shared_task
def pull_resource(resource_id):
resource = models.Resource.objects.get(id=resource_id)
options = resource.offering.plugin_options
serializer = serializers.ResourceSerializer(instance=resource)
environment = {key.upper(): str(value) for key, value in serializer.data}
if isinstance(options.get('environ'), dict):
environment.update(options['environ'])
language = options['language']
image = settings.WALDUR_MARKETPLACE_SCRIPT['DOCKER_IMAGES'].get(language)
utils.execute_script(
image=image, command=language, src=options['pull'], environment=environment
)
| mit | db268e7c0167ce9ae11281ffa5fa0989 | 35.53125 | 83 | 0.731394 | 3.807818 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_core/core/management/commands/print_commands.py | 1 | 1159 | from argparse import ArgumentParser
from django.core.management import get_commands, load_command_class
from django.core.management.base import BaseCommand
BLACK_LIST = [
'print_commands',
'print_settings',
'print_features',
'print_schema',
'export_api_docs',
'print_events',
'print_templates',
]
class Command(BaseCommand):
def handle(self, *args, **options):
commands = []
for name, path in get_commands().items():
if 'waldur' not in path or name in BLACK_LIST:
continue
command = load_command_class(path, name)
commands.append((name, command))
print('# CLI guide', end='\n\n')
for name, command in sorted(commands, key=lambda x: x[0]):
parser = ArgumentParser(prog=f'waldur {name}', add_help=False)
command.add_arguments(parser)
print('##', name)
print()
print(command.help.strip().replace(' ', ' '))
print()
if parser._actions:
print('```bash')
parser.print_help()
print('```')
print()
| mit | 783962b0b8c8fe0f371fefc9b3897462 | 30.324324 | 74 | 0.553063 | 4.184116 | false | false | false | false |
getavalon/core | avalon/vendor/requests/packages/urllib3/util/request.py | 189 | 3705 | from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b, integer_types
from ..exceptions import UnrewindableBodyError
ACCEPT_ENCODING = 'gzip,deflate'
_FAILEDTELL = object()
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
def set_file_position(body, pos):
"""
If a position is provided, move file to that point.
Otherwise, we'll attempt to record a position for future use.
"""
if pos is not None:
rewind_body(body, pos)
elif getattr(body, 'tell', None) is not None:
try:
pos = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body.
pos = _FAILEDTELL
return pos
def rewind_body(body, body_pos):
"""
Attempt to rewind body to a certain position.
Primarily used for request redirects and retries.
:param body:
File-like object that supports seek.
:param int pos:
Position to seek to in file.
"""
body_seek = getattr(body, 'seek', None)
if body_seek is not None and isinstance(body_pos, integer_types):
try:
body_seek(body_pos)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect/retry.")
elif body_pos is _FAILEDTELL:
raise UnrewindableBodyError("Unable to record file position for rewinding "
"request body during a redirect/retry.")
else:
raise ValueError("body_pos must be of type integer, "
"instead it was %s." % type(body_pos))
| mit | 61aaa10ce3b5da58817a2bf6e86cf837 | 30.398305 | 85 | 0.607287 | 4.04918 | false | false | false | false |
getavalon/core | avalon/vendor/requests/packages/chardet/constants.py | 2996 | 1335 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
| mit | 62207a1c313c12d640f08ce86194decc | 33.230769 | 69 | 0.698876 | 4.224684 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_openstack/openstack/urls.py | 2 | 1139 | from . import views
def register_in(router):
router.register(r'openstack-images', views.ImageViewSet, basename='openstack-image')
router.register(
r'openstack-flavors', views.FlavorViewSet, basename='openstack-flavor'
)
router.register(
r'openstack-volume-types',
views.VolumeTypeViewSet,
basename='openstack-volume-type',
)
router.register(
r'openstack-tenants', views.TenantViewSet, basename='openstack-tenant'
)
router.register(
r'openstack-security-groups',
views.SecurityGroupViewSet,
basename='openstack-sgp',
)
router.register(r'openstack-ports', views.PortViewSet, basename='openstack-port')
router.register(
r'openstack-floating-ips', views.FloatingIPViewSet, basename='openstack-fip'
)
router.register(
r'openstack-routers', views.RouterViewSet, basename='openstack-router'
)
router.register(
r'openstack-networks', views.NetworkViewSet, basename='openstack-network'
)
router.register(
r'openstack-subnets', views.SubNetViewSet, basename='openstack-subnet'
)
| mit | abe4218c086dfb933d03728f95690b5d | 32.5 | 88 | 0.681299 | 4.156934 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_mastermind/invoices/migrations/0033_downtime_offering_and_resource.py | 2 | 1087 | # Generated by Django 2.2.10 on 2020-03-19 13:51
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketplace', '0013_increase_limit_range'),
('invoices', '0032_genericinvoiceitem_name'),
]
operations = [
migrations.AddField(
model_name='servicedowntime',
name='offering',
field=models.ForeignKey(
blank=True,
limit_choices_to={'billable': True},
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='marketplace.Offering',
),
),
migrations.AddField(
model_name='servicedowntime',
name='resource',
field=models.ForeignKey(
blank=True,
limit_choices_to={'offering__billable': True},
null=True,
on_delete=django.db.models.deletion.CASCADE,
to='marketplace.Resource',
),
),
]
| mit | 84fbb25fbc5ce6d71085ccb6b006608d | 28.378378 | 62 | 0.532659 | 4.548117 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_auth_social/migrations/0001_initial.py | 2 | 1285 | # Generated by Django 2.2.24 on 2021-08-09 12:51
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='OAuthToken',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('provider', models.CharField(max_length=32)),
('access_token', models.TextField()),
('refresh_token', models.TextField()),
(
'user',
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name='auth_profile',
to=settings.AUTH_USER_MODEL,
),
),
],
options={'unique_together': {('user', 'provider')},},
),
]
| mit | 3679217862011af2068e08643330d3b8 | 28.883721 | 68 | 0.451362 | 5.266393 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_keycloak_rancher/tasks.py | 1 | 1148 | import logging
from celery import shared_task
from django.conf import settings
from waldur_keycloak.models import ProjectGroup
from waldur_rancher.enums import ClusterRoles
from waldur_rancher.models import Cluster
logger = logging.getLogger(__name__)
@shared_task(name='waldur_keycloak_rancher.sync_groups')
def sync_groups():
if not settings.WALDUR_KEYCLOAK['ENABLED']:
logger.debug('Skipping Keycloak synchronization because plugin is disabled.')
return
for project_group in ProjectGroup.objects.all():
project = project_group.project
for cluster in Cluster.objects.filter(project=project):
backend = cluster.get_backend()
try:
backend.get_or_create_cluster_group_role(
f'keycloakoidc_group://{project.name}',
cluster.backend_id,
ClusterRoles.cluster_member,
)
except Exception:
logger.warning(
'Unable to create cluster group for project %s and cluster %s',
project,
cluster,
)
| mit | 44ee67eac4eb2655403e15d16423442b | 32.764706 | 85 | 0.614983 | 4.432432 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_openstack/openstack_tenant/migrations/0006_error_traceback.py | 2 | 1221 | # Generated by Django 2.2.13 on 2020-10-07 11:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0005_internalip_allowed_address_pairs'),
]
operations = [
migrations.AddField(
model_name='backup',
name='error_traceback',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='backupschedule',
name='error_traceback',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='instance',
name='error_traceback',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='snapshot',
name='error_traceback',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='snapshotschedule',
name='error_traceback',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='volume',
name='error_traceback',
field=models.TextField(blank=True),
),
]
| mit | 215153db030c5df01b287e9385eae423 | 27.395349 | 70 | 0.552007 | 4.660305 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_mastermind/notifications/migrations/0001_initial.py | 2 | 2134 | # Generated by Django 2.2.13 on 2020-10-21 21:47
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
from django.conf import settings
from django.db import migrations, models
import waldur_core.core.fields
import waldur_core.core.validators
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
(
'created',
model_utils.fields.AutoCreatedField(
default=django.utils.timezone.now, editable=False
),
),
(
'subject',
models.CharField(
max_length=1000,
validators=[waldur_core.core.validators.validate_name],
),
),
(
'body',
models.TextField(
validators=[waldur_core.core.validators.validate_name]
),
),
('query', django.contrib.postgres.fields.jsonb.JSONField()),
('emails', django.contrib.postgres.fields.jsonb.JSONField()),
(
'author',
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to=settings.AUTH_USER_MODEL,
),
),
],
options={'abstract': False,},
),
]
| mit | 15ec61dc91198adc9e7adfc92f9f1bf0 | 30.382353 | 79 | 0.450328 | 5.295285 | false | false | false | false |
opennode/waldur-mastermind | src/waldur_core/quotas/tests/unittests/test_models.py | 2 | 3811 | import random
from django.test import TestCase
from waldur_core.quotas import exceptions
from waldur_core.quotas.tests.models import GrandparentModel
class QuotaModelMixinTest(TestCase):
def test_default_quota_is_unlimited(self):
instance = GrandparentModel.objects.create()
self.assertEqual(instance.quotas.get(name='regular_quota').limit, -1)
def test_quota_with_default_limit(self):
instance = GrandparentModel.objects.create()
self.assertEqual(
instance.quotas.get(name='quota_with_default_limit').limit, 100
)
def test_add_usage_validates_with_unlimited_quota(self):
instance = GrandparentModel.objects.create()
try:
instance.add_quota_usage('regular_quota', 10, validate=True)
except exceptions.QuotaValidationError:
self.fail(
'add_quota_usage should not raise exception if quota is unlimited'
)
def test_add_usage_skips_validation_with_limited_quota_but_negative_delta(self):
instance = GrandparentModel.objects.create()
try:
instance.add_quota_usage('quota_with_default_limit', -10, validate=True)
except exceptions.QuotaValidationError:
self.fail('add_quota_usage should not raise exception if delta is negative')
def test_add_usage_fails_if_quota_is_over_limit(self):
instance = GrandparentModel.objects.create()
self.assertRaises(
exceptions.QuotaValidationError,
instance.add_quota_usage,
quota_name='quota_with_default_limit',
usage_delta=200,
validate=True,
)
def test_quotas_sum_calculation_if_all_values_are_positive(self):
# we have 3 memberships:
instances = [GrandparentModel.objects.create() for _ in range(3)]
# each membership has non zero quotas:
for instance in instances:
for quota_name in instance.get_quotas_names():
limit = random.choice([10, 20, 30, 40]) # noqa: S311
instance.set_quota_limit(quota_name, limit)
instance.set_quota_usage(quota_name, limit / 2)
owners = instances[:2]
sum_of_quotas = GrandparentModel.get_sum_of_quotas_as_dict(owners)
expected_sum_of_quotas = {}
for quota_name in GrandparentModel.get_quotas_names():
expected_sum_of_quotas[quota_name] = sum(
owner.quotas.get(name=quota_name).limit for owner in owners
)
expected_sum_of_quotas[quota_name + '_usage'] = sum(
owner.quotas.get(name=quota_name).usage for owner in owners
)
self.assertEqual(expected_sum_of_quotas, sum_of_quotas)
def test_quotas_sum_calculation_if_some_limit_is_negative(self):
instances = [GrandparentModel.objects.create() for _ in range(3)]
instances[0].set_quota_limit('regular_quota', -1)
instances[1].set_quota_limit('regular_quota', 10)
instances[2].set_quota_limit('regular_quota', 30)
sum_of_quotas = GrandparentModel.get_sum_of_quotas_as_dict(
instances, quota_names=['regular_quota'], fields=['limit']
)
self.assertEqual({'regular_quota': -1}, sum_of_quotas)
def test_quotas_sum_calculation_if_all_limits_are_negative(self):
instances = [GrandparentModel.objects.create() for _ in range(3)]
instances[0].set_quota_limit('regular_quota', -1)
instances[1].set_quota_limit('regular_quota', -1)
instances[2].set_quota_limit('regular_quota', -1)
sum_of_quotas = GrandparentModel.get_sum_of_quotas_as_dict(
instances, quota_names=['regular_quota'], fields=['limit']
)
self.assertEqual({'regular_quota': -1}, sum_of_quotas)
| mit | ca20b787c2032daf7a34bd5441154826 | 40.879121 | 88 | 0.640252 | 3.78827 | false | true | false | false |
opennode/waldur-mastermind | src/waldur_openstack/openstack_tenant/migrations/0020_create_or_update_security_group_rules.py | 2 | 2671 | from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.db import migrations
def create_or_update_security_group_rules(apps, schema_editor):
SecurityGroupRuleResource = apps.get_model('openstack', 'SecurityGroupRule')
SecurityGroupProperty = apps.get_model('openstack_tenant', 'SecurityGroup')
SecurityGroupRuleProperty = apps.get_model('openstack_tenant', 'SecurityGroupRule')
ServiceSettings = apps.get_model('structure', 'ServiceSettings')
Tenant = apps.get_model('openstack', 'Tenant')
ContentType = apps.get_model('contenttypes', 'ContentType')
content_type = ContentType.objects.get_for_model(Tenant)
for rule_resource in SecurityGroupRuleResource.objects.exclude(backend_id=''):
try:
service_settings = ServiceSettings.objects.get(
type='OpenStackTenant',
content_type=content_type,
object_id=rule_resource.security_group.tenant_id,
)
security_group = SecurityGroupProperty.objects.get(
settings=service_settings,
backend_id=rule_resource.security_group.backend_id,
)
remote_group = None
if rule_resource.remote_group and rule_resource.remote_group.backend_id:
try:
remote_group = SecurityGroupProperty.objects.get(
settings=service_settings,
backend_id=rule_resource.remote_group.backend_id,
)
except ObjectDoesNotExist:
pass
SecurityGroupRuleProperty.objects.update_or_create(
security_group=security_group,
backend_id=rule_resource.backend_id,
defaults=dict(
ethertype=rule_resource.ethertype,
direction=rule_resource.direction,
protocol=rule_resource.protocol,
from_port=rule_resource.from_port,
to_port=rule_resource.to_port,
cidr=rule_resource.cidr,
description=rule_resource.description,
remote_group=remote_group,
),
)
except ObjectDoesNotExist:
continue
except MultipleObjectsReturned:
print(rule_resource)
class Migration(migrations.Migration):
dependencies = [
(
'openstack_tenant',
'0019_enforce_uniqueness_constraint_on_security_group_rule',
),
]
operations = [
migrations.RunPython(create_or_update_security_group_rules),
]
| mit | 1377e7b8e1f6308c826924b99090fe59 | 40.092308 | 87 | 0.60277 | 4.795332 | false | false | false | false |
jorisroovers/gitlint | gitlint-core/gitlint/tests/config/test_config.py | 1 | 13939 | from unittest.mock import patch
from gitlint import rules
from gitlint.config import LintConfig, LintConfigError, LintConfigGenerator, GITLINT_CONFIG_TEMPLATE_SRC_PATH
from gitlint import options
from gitlint.tests.base import BaseTestCase
class LintConfigTests(BaseTestCase):
def test_set_rule_option(self):
config = LintConfig()
# assert default title line-length
self.assertEqual(config.get_rule_option("title-max-length", "line-length"), 72)
# change line length and assert it is set
config.set_rule_option("title-max-length", "line-length", 60)
self.assertEqual(config.get_rule_option("title-max-length", "line-length"), 60)
def test_set_rule_option_negative(self):
config = LintConfig()
# non-existing rule
expected_error_msg = "No such rule 'föobar'"
with self.assertRaisesMessage(LintConfigError, expected_error_msg):
config.set_rule_option("föobar", "lïne-length", 60)
# non-existing option
expected_error_msg = "Rule 'title-max-length' has no option 'föobar'"
with self.assertRaisesMessage(LintConfigError, expected_error_msg):
config.set_rule_option("title-max-length", "föobar", 60)
# invalid option value
expected_error_msg = (
"'föo' is not a valid value for option 'title-max-length.line-length'. "
"Option 'line-length' must be a positive integer (current value: 'föo')."
)
with self.assertRaisesMessage(LintConfigError, expected_error_msg):
config.set_rule_option("title-max-length", "line-length", "föo")
def test_set_general_option(self):
config = LintConfig()
# Check that default general options are correct
self.assertTrue(config.ignore_merge_commits)
self.assertTrue(config.ignore_fixup_commits)
self.assertTrue(config.ignore_fixup_amend_commits)
self.assertTrue(config.ignore_squash_commits)
self.assertTrue(config.ignore_revert_commits)
self.assertFalse(config.ignore_stdin)
self.assertFalse(config.staged)
self.assertFalse(config.fail_without_commits)
self.assertFalse(config.regex_style_search)
self.assertFalse(config.debug)
self.assertEqual(config.verbosity, 3)
active_rule_classes = tuple(type(rule) for rule in config.rules)
self.assertTupleEqual(active_rule_classes, config.default_rule_classes)
# ignore - set by string
config.set_general_option("ignore", "title-trailing-whitespace, B2")
self.assertEqual(config.ignore, ["title-trailing-whitespace", "B2"])
# ignore - set by list
config.set_general_option("ignore", ["T1", "B3"])
self.assertEqual(config.ignore, ["T1", "B3"])
# verbosity
config.set_general_option("verbosity", 1)
self.assertEqual(config.verbosity, 1)
# ignore_merge_commit
config.set_general_option("ignore-merge-commits", "false")
self.assertFalse(config.ignore_merge_commits)
# ignore_fixup_commit
config.set_general_option("ignore-fixup-commits", "false")
self.assertFalse(config.ignore_fixup_commits)
# ignore_fixup_amend_commit
config.set_general_option("ignore-fixup-amend-commits", "false")
self.assertFalse(config.ignore_fixup_amend_commits)
# ignore_squash_commit
config.set_general_option("ignore-squash-commits", "false")
self.assertFalse(config.ignore_squash_commits)
# ignore_revert_commit
config.set_general_option("ignore-revert-commits", "false")
self.assertFalse(config.ignore_revert_commits)
# debug
config.set_general_option("debug", "true")
self.assertTrue(config.debug)
# ignore-stdin
config.set_general_option("ignore-stdin", "true")
self.assertTrue(config.debug)
# staged
config.set_general_option("staged", "true")
self.assertTrue(config.staged)
# fail-without-commits
config.set_general_option("fail-without-commits", "true")
self.assertTrue(config.fail_without_commits)
# regex-style-search
config.set_general_option("regex-style-search", "true")
self.assertTrue(config.regex_style_search)
# target
config.set_general_option("target", self.SAMPLES_DIR)
self.assertEqual(config.target, self.SAMPLES_DIR)
# extra_path has its own test: test_extra_path and test_extra_path_negative
# contrib has its own tests: test_contrib and test_contrib_negative
def test_contrib(self):
config = LintConfig()
contrib_rules = ["contrib-title-conventional-commits", "CC1"]
config.set_general_option("contrib", ",".join(contrib_rules))
self.assertEqual(config.contrib, contrib_rules)
# Check contrib-title-conventional-commits contrib rule
actual_rule = config.rules.find_rule("contrib-title-conventional-commits")
self.assertTrue(actual_rule.is_contrib)
self.assertEqual(str(type(actual_rule)), "<class 'conventional_commit.ConventionalCommit'>")
self.assertEqual(actual_rule.id, "CT1")
self.assertEqual(actual_rule.name, "contrib-title-conventional-commits")
self.assertEqual(actual_rule.target, rules.CommitMessageTitle)
expected_rule_option = options.ListOption(
"types",
["fix", "feat", "chore", "docs", "style", "refactor", "perf", "test", "revert", "ci", "build"],
"Comma separated list of allowed commit types.",
)
self.assertListEqual(actual_rule.options_spec, [expected_rule_option])
self.assertDictEqual(actual_rule.options, {"types": expected_rule_option})
# Check contrib-body-requires-signed-off-by contrib rule
actual_rule = config.rules.find_rule("contrib-body-requires-signed-off-by")
self.assertTrue(actual_rule.is_contrib)
self.assertEqual(str(type(actual_rule)), "<class 'signedoff_by.SignedOffBy'>")
self.assertEqual(actual_rule.id, "CC1")
self.assertEqual(actual_rule.name, "contrib-body-requires-signed-off-by")
# reset value (this is a different code path)
config.set_general_option("contrib", "contrib-body-requires-signed-off-by")
self.assertEqual(actual_rule, config.rules.find_rule("contrib-body-requires-signed-off-by"))
self.assertIsNone(config.rules.find_rule("contrib-title-conventional-commits"))
# empty value
config.set_general_option("contrib", "")
self.assertListEqual(config.contrib, [])
def test_contrib_negative(self):
config = LintConfig()
# non-existent contrib rule
with self.assertRaisesMessage(LintConfigError, "No contrib rule with id or name 'föo' found."):
config.contrib = "contrib-title-conventional-commits,föo"
# UserRuleError, RuleOptionError should be re-raised as LintConfigErrors
side_effects = [rules.UserRuleError("üser-rule"), options.RuleOptionError("rüle-option")]
for side_effect in side_effects:
with patch("gitlint.config.rule_finder.find_rule_classes", side_effect=side_effect):
with self.assertRaisesMessage(LintConfigError, str(side_effect)):
config.contrib = "contrib-title-conventional-commits"
def test_extra_path(self):
config = LintConfig()
config.set_general_option("extra-path", self.get_user_rules_path())
self.assertEqual(config.extra_path, self.get_user_rules_path())
actual_rule = config.rules.find_rule("UC1")
self.assertTrue(actual_rule.is_user_defined)
self.assertEqual(str(type(actual_rule)), "<class 'my_commit_rules.MyUserCommitRule'>")
self.assertEqual(actual_rule.id, "UC1")
self.assertEqual(actual_rule.name, "my-üser-commit-rule")
self.assertEqual(actual_rule.target, None)
expected_rule_option = options.IntOption("violation-count", 1, "Number of violåtions to return")
self.assertListEqual(actual_rule.options_spec, [expected_rule_option])
self.assertDictEqual(actual_rule.options, {"violation-count": expected_rule_option})
# reset value (this is a different code path)
config.set_general_option("extra-path", self.SAMPLES_DIR)
self.assertEqual(config.extra_path, self.SAMPLES_DIR)
self.assertIsNone(config.rules.find_rule("UC1"))
def test_extra_path_negative(self):
config = LintConfig()
regex = "Option extra-path must be either an existing directory or file (current value: 'föo/bar')"
# incorrect extra_path
with self.assertRaisesMessage(LintConfigError, regex):
config.extra_path = "föo/bar"
# extra path contains classes with errors
with self.assertRaisesMessage(
LintConfigError, "User-defined rule class 'MyUserLineRule' must have a 'validate' method"
):
config.extra_path = self.get_sample_path("user_rules/incorrect_linerule")
def test_set_general_option_negative(self):
config = LintConfig()
# Note that we shouldn't test whether we can set unicode because python just doesn't allow unicode attributes
with self.assertRaisesMessage(LintConfigError, "'foo' is not a valid gitlint option"):
config.set_general_option("foo", "bår")
# try setting _config_path, this is a real attribute of LintConfig, but the code should prevent it from
# being set
with self.assertRaisesMessage(LintConfigError, "'_config_path' is not a valid gitlint option"):
config.set_general_option("_config_path", "bår")
# invalid verbosity
incorrect_values = [-1, "föo"]
for value in incorrect_values:
expected_msg = f"Option 'verbosity' must be a positive integer (current value: '{value}')"
with self.assertRaisesMessage(LintConfigError, expected_msg):
config.verbosity = value
incorrect_values = [4]
for value in incorrect_values:
with self.assertRaisesMessage(LintConfigError, "Option 'verbosity' must be set between 0 and 3"):
config.verbosity = value
# invalid ignore_xxx_commits
ignore_attributes = [
"ignore_merge_commits",
"ignore_fixup_commits",
"ignore_fixup_amend_commits",
"ignore_squash_commits",
"ignore_revert_commits",
]
incorrect_values = [-1, 4, "föo"]
for attribute in ignore_attributes:
for value in incorrect_values:
option_name = attribute.replace("_", "-")
with self.assertRaisesMessage(
LintConfigError, f"Option '{option_name}' must be either 'true' or 'false'"
):
setattr(config, attribute, value)
# invalid ignore -> not here because ignore is a ListOption which converts everything to a string before
# splitting which means it it will accept just about everything
# invalid boolean options
for attribute in ["debug", "staged", "ignore_stdin", "fail_without_commits", "regex_style_search"]:
option_name = attribute.replace("_", "-")
with self.assertRaisesMessage(LintConfigError, f"Option '{option_name}' must be either 'true' or 'false'"):
setattr(config, attribute, "föobar")
# extra-path has its own negative test
# invalid target
with self.assertRaisesMessage(
LintConfigError, "Option target must be an existing directory (current value: 'föo/bar')"
):
config.target = "föo/bar"
def test_ignore_independent_from_rules(self):
# Test that the lintconfig rules are not modified when setting config.ignore
# This was different in the past, this test is mostly here to catch regressions
config = LintConfig()
original_rules = config.rules
config.ignore = ["T1", "T2"]
self.assertEqual(config.ignore, ["T1", "T2"])
self.assertSequenceEqual(config.rules, original_rules)
def test_config_equality(self):
self.assertEqual(LintConfig(), LintConfig())
self.assertNotEqual(LintConfig(), LintConfigGenerator())
# Ensure LintConfig are not equal if they differ on their attributes
attrs = [
("verbosity", 1),
("rules", []),
("ignore_stdin", True),
("fail_without_commits", True),
("regex_style_search", True),
("debug", True),
("ignore", ["T1"]),
("staged", True),
("_config_path", self.get_sample_path()),
("ignore_merge_commits", False),
("ignore_fixup_commits", False),
("ignore_fixup_amend_commits", False),
("ignore_squash_commits", False),
("ignore_revert_commits", False),
("extra_path", self.get_sample_path("user_rules")),
("target", self.get_sample_path()),
("contrib", ["CC1"]),
]
for attr, val in attrs:
config = LintConfig()
setattr(config, attr, val)
self.assertNotEqual(LintConfig(), config)
# Other attributes don't matter
config1 = LintConfig()
config2 = LintConfig()
config1.foo = "bår"
self.assertEqual(config1, config2)
config2.foo = "dūr"
self.assertEqual(config1, config2)
class LintConfigGeneratorTests(BaseTestCase):
@staticmethod
@patch("gitlint.config.shutil.copyfile")
def test_install_commit_msg_hook_negative(copy):
LintConfigGenerator.generate_config("föo/bar/test")
copy.assert_called_with(GITLINT_CONFIG_TEMPLATE_SRC_PATH, "föo/bar/test")
| mit | 3f06da3b5e1f82db14d8ab03b9c5790a | 43.025316 | 119 | 0.644192 | 3.973722 | false | true | false | false |
jorisroovers/gitlint | gitlint-core/gitlint/rules.py | 1 | 17284 | # pylint: disable=inconsistent-return-statements
import copy
import logging
import re
from gitlint.options import IntOption, BoolOption, StrOption, ListOption, RegexOption
from gitlint.exception import GitlintError
from gitlint.deprecation import Deprecation
class Rule:
"""Class representing gitlint rules."""
options_spec = []
id = None
name = None
target = None
_log = None
_log_deprecated_regex_style_search = None
def __init__(self, opts=None):
if not opts:
opts = {}
self.options = {}
for op_spec in self.options_spec:
self.options[op_spec.name] = copy.deepcopy(op_spec)
actual_option = opts.get(op_spec.name)
if actual_option is not None:
self.options[op_spec.name].set(actual_option)
@property
def log(self):
if not self._log:
self._log = logging.getLogger(__name__)
logging.basicConfig()
return self._log
def __eq__(self, other):
return (
self.id == other.id
and self.name == other.name
and self.options == other.options
and self.target == other.target
)
def __str__(self):
return f"{self.id} {self.name}" # pragma: no cover
class ConfigurationRule(Rule):
"""Class representing rules that can dynamically change the configuration of gitlint during runtime."""
pass
class CommitRule(Rule):
"""Class representing rules that act on an entire commit at once"""
pass
class LineRule(Rule):
"""Class representing rules that act on a line by line basis"""
pass
class LineRuleTarget:
"""Base class for LineRule targets. A LineRuleTarget specifies where a given rule will be applied
(e.g. commit message title, commit message body).
Each LineRule MUST have a target specified."""
pass
class CommitMessageTitle(LineRuleTarget):
"""Target class used for rules that apply to a commit message title"""
pass
class CommitMessageBody(LineRuleTarget):
"""Target class used for rules that apply to a commit message body"""
pass
class RuleViolation:
"""Class representing a violation of a rule. I.e.: When a rule is broken, the rule will instantiate this class
to indicate how and where the rule was broken."""
def __init__(self, rule_id, message, content=None, line_nr=None):
self.rule_id = rule_id
self.line_nr = line_nr
self.message = message
self.content = content
def __eq__(self, other):
equal = self.rule_id == other.rule_id and self.message == other.message
equal = equal and self.content == other.content and self.line_nr == other.line_nr
return equal
def __str__(self):
return f'{self.line_nr}: {self.rule_id} {self.message}: "{self.content}"'
class UserRuleError(GitlintError):
"""Error used to indicate that an error occurred while trying to load a user rule"""
pass
class MaxLineLength(LineRule):
name = "max-line-length"
id = "R1"
options_spec = [IntOption("line-length", 80, "Max line length")]
violation_message = "Line exceeds max length ({0}>{1})"
def validate(self, line, _commit):
max_length = self.options["line-length"].value
if len(line) > max_length:
return [RuleViolation(self.id, self.violation_message.format(len(line), max_length), line)]
class TrailingWhiteSpace(LineRule):
name = "trailing-whitespace"
id = "R2"
violation_message = "Line has trailing whitespace"
pattern = re.compile(r"\s$", re.UNICODE)
def validate(self, line, _commit):
if self.pattern.search(line):
return [RuleViolation(self.id, self.violation_message, line)]
class HardTab(LineRule):
name = "hard-tab"
id = "R3"
violation_message = "Line contains hard tab characters (\\t)"
def validate(self, line, _commit):
if "\t" in line:
return [RuleViolation(self.id, self.violation_message, line)]
class LineMustNotContainWord(LineRule):
"""Violation if a line contains one of a list of words (NOTE: using a word in the list inside another word is not
a violation, e.g: WIPING is not a violation if 'WIP' is a word that is not allowed.)"""
name = "line-must-not-contain"
id = "R5"
options_spec = [ListOption("words", [], "Comma separated list of words that should not be found")]
violation_message = "Line contains {0}"
def validate(self, line, _commit):
strings = self.options["words"].value
violations = []
for string in strings:
regex = re.compile(rf"\b{string.lower()}\b", re.IGNORECASE | re.UNICODE)
match = regex.search(line.lower())
if match:
violations.append(RuleViolation(self.id, self.violation_message.format(string), line))
return violations if violations else None
class LeadingWhiteSpace(LineRule):
name = "leading-whitespace"
id = "R6"
violation_message = "Line has leading whitespace"
def validate(self, line, _commit):
pattern = re.compile(r"^\s", re.UNICODE)
if pattern.search(line):
return [RuleViolation(self.id, self.violation_message, line)]
class TitleMaxLength(MaxLineLength):
name = "title-max-length"
id = "T1"
target = CommitMessageTitle
options_spec = [IntOption("line-length", 72, "Max line length")]
violation_message = "Title exceeds max length ({0}>{1})"
class TitleTrailingWhitespace(TrailingWhiteSpace):
name = "title-trailing-whitespace"
id = "T2"
target = CommitMessageTitle
violation_message = "Title has trailing whitespace"
class TitleTrailingPunctuation(LineRule):
name = "title-trailing-punctuation"
id = "T3"
target = CommitMessageTitle
def validate(self, title, _commit):
punctuation_marks = "?:!.,;"
for punctuation_mark in punctuation_marks:
if title.endswith(punctuation_mark):
return [RuleViolation(self.id, f"Title has trailing punctuation ({punctuation_mark})", title)]
class TitleHardTab(HardTab):
name = "title-hard-tab"
id = "T4"
target = CommitMessageTitle
violation_message = "Title contains hard tab characters (\\t)"
class TitleMustNotContainWord(LineMustNotContainWord):
name = "title-must-not-contain-word"
id = "T5"
target = CommitMessageTitle
options_spec = [ListOption("words", ["WIP"], "Must not contain word")]
violation_message = "Title contains the word '{0}' (case-insensitive)"
class TitleLeadingWhitespace(LeadingWhiteSpace):
name = "title-leading-whitespace"
id = "T6"
target = CommitMessageTitle
violation_message = "Title has leading whitespace"
class TitleRegexMatches(LineRule):
name = "title-match-regex"
id = "T7"
target = CommitMessageTitle
options_spec = [RegexOption("regex", None, "Regex the title should match")]
def validate(self, title, _commit):
# If no regex is specified, immediately return
if not self.options["regex"].value:
return
if not self.options["regex"].value.search(title):
violation_msg = f"Title does not match regex ({self.options['regex'].value.pattern})"
return [RuleViolation(self.id, violation_msg, title)]
class TitleMinLength(LineRule):
name = "title-min-length"
id = "T8"
target = CommitMessageTitle
options_spec = [IntOption("min-length", 5, "Minimum required title length")]
def validate(self, title, _commit):
min_length = self.options["min-length"].value
actual_length = len(title)
if actual_length < min_length:
violation_message = f"Title is too short ({actual_length}<{min_length})"
return [RuleViolation(self.id, violation_message, title, 1)]
class BodyMaxLineLength(MaxLineLength):
name = "body-max-line-length"
id = "B1"
target = CommitMessageBody
class BodyTrailingWhitespace(TrailingWhiteSpace):
name = "body-trailing-whitespace"
id = "B2"
target = CommitMessageBody
class BodyHardTab(HardTab):
name = "body-hard-tab"
id = "B3"
target = CommitMessageBody
class BodyFirstLineEmpty(CommitRule):
name = "body-first-line-empty"
id = "B4"
def validate(self, commit):
if len(commit.message.body) >= 1:
first_line = commit.message.body[0]
if first_line != "":
return [RuleViolation(self.id, "Second line is not empty", first_line, 2)]
class BodyMinLength(CommitRule):
name = "body-min-length"
id = "B5"
options_spec = [IntOption("min-length", 20, "Minimum body length")]
def validate(self, commit):
min_length = self.options["min-length"].value
body_message_no_newline = "".join([line for line in commit.message.body if line is not None])
actual_length = len(body_message_no_newline)
if 0 < actual_length < min_length:
violation_message = f"Body message is too short ({actual_length}<{min_length})"
return [RuleViolation(self.id, violation_message, body_message_no_newline, 3)]
class BodyMissing(CommitRule):
name = "body-is-missing"
id = "B6"
options_spec = [BoolOption("ignore-merge-commits", True, "Ignore merge commits")]
def validate(self, commit):
# ignore merges when option tells us to, which may have no body
if self.options["ignore-merge-commits"].value and commit.is_merge_commit:
return
if len(commit.message.body) < 2 or not "".join(commit.message.body).strip():
return [RuleViolation(self.id, "Body message is missing", None, 3)]
class BodyChangedFileMention(CommitRule):
name = "body-changed-file-mention"
id = "B7"
options_spec = [ListOption("files", [], "Files that need to be mentioned")]
def validate(self, commit):
violations = []
for needs_mentioned_file in self.options["files"].value:
# if a file that we need to look out for is actually changed, then check whether it occurs
# in the commit msg body
if needs_mentioned_file in commit.changed_files:
if needs_mentioned_file not in " ".join(commit.message.body):
violation_message = f"Body does not mention changed file '{needs_mentioned_file}'"
violations.append(RuleViolation(self.id, violation_message, None, len(commit.message.body) + 1))
return violations if violations else None
class BodyRegexMatches(CommitRule):
name = "body-match-regex"
id = "B8"
options_spec = [RegexOption("regex", None, "Regex the body should match")]
def validate(self, commit):
# If no regex is specified, immediately return
if not self.options["regex"].value:
return
# We intentionally ignore the first line in the body as that's the empty line after the title,
# which most users are not going to expect to be part of the body when matching a regex.
# If this causes contention, we can always introduce an option to change the behavior in a backward-
# compatible way.
body_lines = commit.message.body[1:] if len(commit.message.body) > 1 else []
# Similarly, the last line is often empty, this has to do with how git returns commit messages
# User's won't expect this, so prune it off by default
if body_lines and body_lines[-1] == "":
body_lines.pop()
full_body = "\n".join(body_lines)
if not self.options["regex"].value.search(full_body):
violation_msg = f"Body does not match regex ({self.options['regex'].value.pattern})"
return [RuleViolation(self.id, violation_msg, None, len(commit.message.body) + 1)]
class AuthorValidEmail(CommitRule):
name = "author-valid-email"
id = "M1"
DEFAULT_AUTHOR_VALID_EMAIL_REGEX = r"^[^@ ]+@[^@ ]+\.[^@ ]+"
options_spec = [
RegexOption("regex", DEFAULT_AUTHOR_VALID_EMAIL_REGEX, "Regex that author email address should match")
]
def validate(self, commit):
# If no regex is specified, immediately return
if not self.options["regex"].value:
return
# We're replacing regex match with search semantics, see https://github.com/jorisroovers/gitlint/issues/254
# In case the user is using the default regex, we can silently change to using search
# If not, it depends on config (handled by Deprecation class)
if self.DEFAULT_AUTHOR_VALID_EMAIL_REGEX == self.options["regex"].value.pattern:
regex_method = self.options["regex"].value.search
else:
regex_method = Deprecation.get_regex_method(self, self.options["regex"])
if commit.author_email and not regex_method(commit.author_email):
return [RuleViolation(self.id, "Author email for commit is invalid", commit.author_email)]
class IgnoreByTitle(ConfigurationRule):
name = "ignore-by-title"
id = "I1"
options_spec = [
RegexOption("regex", None, "Regex matching the titles of commits this rule should apply to"),
StrOption("ignore", "all", "Comma-separated list of rules to ignore"),
]
def apply(self, config, commit):
# If no regex is specified, immediately return
if not self.options["regex"].value:
return
# We're replacing regex match with search semantics, see https://github.com/jorisroovers/gitlint/issues/254
regex_method = Deprecation.get_regex_method(self, self.options["regex"])
if regex_method(commit.message.title):
config.ignore = self.options["ignore"].value
message = (
f"Commit title '{commit.message.title}' matches the regex "
f"'{self.options['regex'].value.pattern}', ignoring rules: {self.options['ignore'].value}"
)
self.log.debug("Ignoring commit because of rule '%s': %s", self.id, message)
class IgnoreByBody(ConfigurationRule):
name = "ignore-by-body"
id = "I2"
options_spec = [
RegexOption("regex", None, "Regex matching lines of the body of commits this rule should apply to"),
StrOption("ignore", "all", "Comma-separated list of rules to ignore"),
]
def apply(self, config, commit):
# If no regex is specified, immediately return
if not self.options["regex"].value:
return
# We're replacing regex match with search semantics, see https://github.com/jorisroovers/gitlint/issues/254
regex_method = Deprecation.get_regex_method(self, self.options["regex"])
for line in commit.message.body:
if regex_method(line):
config.ignore = self.options["ignore"].value
message = (
f"Commit message line '{line}' matches the regex '{self.options['regex'].value.pattern}',"
f" ignoring rules: {self.options['ignore'].value}"
)
self.log.debug("Ignoring commit because of rule '%s': %s", self.id, message)
# No need to check other lines if we found a match
return
class IgnoreBodyLines(ConfigurationRule):
name = "ignore-body-lines"
id = "I3"
options_spec = [RegexOption("regex", None, "Regex matching lines of the body that should be ignored")]
def apply(self, _, commit):
# If no regex is specified, immediately return
if not self.options["regex"].value:
return
# We're replacing regex match with search semantics, see https://github.com/jorisroovers/gitlint/issues/254
regex_method = Deprecation.get_regex_method(self, self.options["regex"])
new_body = []
for line in commit.message.body:
if regex_method(line):
debug_msg = "Ignoring line '%s' because it matches '%s'"
self.log.debug(debug_msg, line, self.options["regex"].value.pattern)
else:
new_body.append(line)
commit.message.body = new_body
commit.message.full = "\n".join([commit.message.title] + new_body)
class IgnoreByAuthorName(ConfigurationRule):
name = "ignore-by-author-name"
id = "I4"
options_spec = [
RegexOption("regex", None, "Regex matching the author name of commits this rule should apply to"),
StrOption("ignore", "all", "Comma-separated list of rules to ignore"),
]
def apply(self, config, commit):
# If no regex is specified, immediately return
if not self.options["regex"].value:
return
regex_method = Deprecation.get_regex_method(self, self.options["regex"])
if regex_method(commit.author_name):
config.ignore = self.options["ignore"].value
message = (
f"Commit Author Name '{commit.author_name}' matches the regex "
f"'{self.options['regex'].value.pattern}', ignoring rules: {self.options['ignore'].value}"
)
self.log.debug("Ignoring commit because of rule '%s': %s", self.id, message)
# No need to check other lines if we found a match
return
| mit | 304405d9f05d519deb6812a8bf10af59 | 34.345603 | 117 | 0.638567 | 3.924614 | false | false | false | false |
conan-io/conan | conans/test/integration/graph_lock/graph_lock_ci_test.py | 1 | 53402 | import json
import os
import textwrap
import unittest
from parameterized import parameterized
import pytest
from conans.model.graph_lock import LOCKFILE
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient, TestServer
from conans.util.env_reader import get_env
from conans.util.files import load
conanfile = textwrap.dedent("""
from conans import ConanFile, load
import os
class Pkg(ConanFile):
{requires}
exports_sources = "myfile.txt"
keep_imports = True
def imports(self):
self.copy("myfile.txt", folder=True)
def package(self):
self.copy("*myfile.txt")
def package_info(self):
self.output.info("SELF FILE: %s"
% load(os.path.join(self.package_folder, "myfile.txt")))
for d in os.listdir(self.package_folder):
p = os.path.join(self.package_folder, d, "myfile.txt")
if os.path.isfile(p):
self.output.info("DEP FILE %s: %s" % (d, load(p)))
""")
class GraphLockCITest(unittest.TestCase):
@parameterized.expand([("recipe_revision_mode",), ("package_revision_mode",)])
@pytest.mark.skipif(not get_env("TESTING_REVISIONS_ENABLED", False), reason="Only revisions")
def test_revisions(self, package_id_mode):
test_server = TestServer(users={"user": "mypass"})
client = TestClient(servers={"default": test_server},
users={"default": [("user", "mypass")]})
client.run("config set general.default_package_id_mode=%s" % package_id_mode)
client.save({"conanfile.py": conanfile.format(requires=""),
"myfile.txt": "HelloA"})
client.run("create . PkgA/0.1@user/channel")
client.save({"conanfile.py": conanfile.format(
requires='requires = "PkgA/0.1@user/channel"'),
"myfile.txt": "HelloB"})
client.run("create . PkgB/0.1@user/channel")
client.save({"conanfile.py": conanfile.format(
requires='requires = "PkgB/0.1@user/channel"'),
"myfile.txt": "HelloC"})
client.run("create . PkgC/0.1@user/channel")
client.save({"conanfile.py": conanfile.format(
requires='requires = "PkgC/0.1@user/channel"'),
"myfile.txt": "HelloD"})
client.run("create . PkgD/0.1@user/channel")
self.assertIn("PkgD/0.1@user/channel: SELF FILE: HelloD", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgA: HelloA", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgB: HelloB", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgC: HelloC", client.out)
client.run("upload * --all --confirm")
client.run("lock create --reference=PkgD/0.1@user/channel --lockfile-out=conan.lock")
initial_lock_file = client.load(LOCKFILE)
# Do a change in B, this will be a new revision
clientb = TestClient(cache_folder=client.cache_folder, servers={"default": test_server})
clientb.save({"conanfile.py": conanfile.format(requires='requires="PkgA/0.1@user/channel"'),
"myfile.txt": "ByeB World!!"})
clientb.run("create . PkgB/0.1@user/channel")
# Go back to main orchestrator
client.run("lock create --reference=PkgD/0.1@user/channel --lockfile-out=conan.lock")
client.run("lock build-order conan.lock --json=build_order.json")
master_lockfile = client.load("conan.lock")
build_order = client.load("build_order.json")
to_build = json.loads(build_order)
lock_fileaux = master_lockfile
while to_build:
for ref, _, _, _ in to_build[0]:
client_aux = TestClient(cache_folder=client.cache_folder,
servers={"default": test_server})
client_aux.save({LOCKFILE: lock_fileaux})
client_aux.run("install %s --build=%s --lockfile=conan.lock"
" --lockfile-out=conan.lock" % (ref, ref))
lock_fileaux = load(os.path.join(client_aux.current_folder, LOCKFILE))
client.save({"new_lock/%s" % LOCKFILE: lock_fileaux})
client.run("lock update conan.lock new_lock/conan.lock")
client.run("lock build-order conan.lock --json=bo.json")
lock_fileaux = client.load(LOCKFILE)
to_build = json.loads(client.load("bo.json"))
new_lockfile = client.load(LOCKFILE)
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgC/0.1@user/channel: DEP FILE PkgB: ByeB World!!", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgB: ByeB World!!", client.out)
client.run("upload * --all --confirm")
client.save({LOCKFILE: initial_lock_file})
client.run("remove * -f")
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgC/0.1@user/channel: DEP FILE PkgB: HelloB", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgB: HelloB", client.out)
client.save({LOCKFILE: new_lockfile})
client.run("remove * -f")
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgC/0.1@user/channel: DEP FILE PkgB: ByeB World!!", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgB: ByeB World!!", client.out)
@parameterized.expand([(False,), (True,)])
def test_version_ranges(self, partial_lock):
client = TestClient()
client.run("config set general.default_package_id_mode=full_package_mode")
files = {
"pkga/conanfile.py": conanfile.format(requires=""),
"pkga/myfile.txt": "HelloA",
"pkgb/conanfile.py": conanfile.format(requires='requires="PkgA/[*]@user/channel"'),
"pkgb/myfile.txt": "HelloB",
"pkgc/conanfile.py": conanfile.format(requires='requires="PkgB/[*]@user/channel"'),
"pkgc/myfile.txt": "HelloC",
"pkgd/conanfile.py": conanfile.format(requires='requires="PkgC/[*]@user/channel"'),
"pkgd/myfile.txt": "HelloD",
}
client.save(files)
client.run("create pkga PkgA/0.1@user/channel")
client.run("create pkgb PkgB/0.1@user/channel")
client.run("create pkgc PkgC/0.1@user/channel")
client.run("create pkgd PkgD/0.1@user/channel")
self.assertIn("PkgD/0.1@user/channel: SELF FILE: HelloD", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgA: HelloA", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgB: HelloB", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgC: HelloC", client.out)
client.run("lock create --reference=PkgD/0.1@user/channel --lockfile-out=conan.lock")
initial_lockfile = client.load("conan.lock")
# Do a change in B
client.save({"pkgb/myfile.txt": "ByeB World!!"})
if not partial_lock:
client.run("export pkgb PkgB/0.2@user/channel")
# Go back to main orchestrator
client.run("lock create --reference=PkgD/0.1@user/channel --lockfile-out=productd.lock")
# Now it is locked, PkgA can change
client.save({"pkga/myfile.txt": "ByeA World!!"})
client.run("create pkga PkgA/0.2@user/channel")
else:
client.run("lock create pkgb/conanfile.py --name=PkgB --version=0.2 --user=user "
"--channel=channel --lockfile-out=buildb.lock")
self.assertIn("PkgA/0.1", client.out)
self.assertNotIn("PkgA/0.2", client.out)
# Now it is locked, PkgA can change
client.save({"pkga/myfile.txt": "ByeA World!!"})
client.run("create pkga PkgA/0.2@user/channel")
# Package can be created with previous lock, keep PkgA/0.1
client.run("create pkgb PkgB/0.2@user/channel --lockfile=buildb.lock "
"--lockfile-out=buildb.lock")
self.assertIn("PkgA/0.1", client.out)
self.assertNotIn("PkgA/0.2", client.out)
self.assertIn("PkgB/0.2@user/channel: DEP FILE PkgA: HelloA", client.out)
self.assertNotIn("ByeA", client.out)
buildblock = client.load("buildb.lock")
# Go back to main orchestrator, buildb.lock can be used to lock PkgA/0.1 too
client.save({"buildb.lock": buildblock})
client.run("lock create --reference=PkgD/0.1@user/channel --lockfile=buildb.lock "
"--lockfile-out=productd.lock")
self.assertIn("PkgA/0.1", client.out)
self.assertNotIn("PkgA/0.2", client.out)
client.run("lock build-order productd.lock --json=build_order.json")
productd_lockfile = client.load("productd.lock")
json_file = client.load("build_order.json")
to_build = json.loads(json_file)
lock_fileaux = productd_lockfile
while to_build:
for ref, _, _, _ in to_build[0]:
client_aux = TestClient(cache_folder=client.cache_folder)
client_aux.save({"productd.lock": lock_fileaux})
client_aux.run("install %s --build=%s --lockfile=productd.lock "
"--lockfile-out=productd.lock" % (ref, ref))
lock_fileaux = client_aux.load("productd.lock")
client.save({"new_lock/productd.lock": lock_fileaux})
client.run("lock update productd.lock new_lock/productd.lock")
client.run("lock build-order productd.lock --json=bo.json")
lock_fileaux = client.load("productd.lock")
to_build = json.loads(client.load("bo.json"))
# Make sure built packages are marked as modified
productd_lockfile = client.load("productd.lock")
productd_lockfile_json = json.loads(productd_lockfile)
nodes = productd_lockfile_json["graph_lock"]["nodes"]
pkgb = nodes["0"] if partial_lock else nodes["3"]
pkgc = nodes["4"] if partial_lock else nodes["2"]
pkgd = nodes["3"] if partial_lock else nodes["1"]
self.assertIn("PkgB/0.2", pkgb["ref"])
self.assertTrue(pkgb["modified"])
self.assertIn("PkgC/0.1", pkgc["ref"])
self.assertTrue(pkgc["modified"])
self.assertIn("PkgD/0.1", pkgd["ref"])
self.assertTrue(pkgd["modified"])
new_lockfile = client.load("productd.lock")
client.run("install PkgD/0.1@user/channel --lockfile=productd.lock")
self.assertIn("HelloA", client.out)
self.assertNotIn("ByeA", client.out)
self.assertIn("PkgC/0.1@user/channel: DEP FILE PkgB: ByeB World!!", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgB: ByeB World!!", client.out)
client.save({LOCKFILE: initial_lockfile})
self.assertIn("HelloA", client.out)
self.assertNotIn("ByeA", client.out)
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgC/0.1@user/channel: DEP FILE PkgB: HelloB", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgB: HelloB", client.out)
client.save({LOCKFILE: new_lockfile})
self.assertIn("HelloA", client.out)
self.assertNotIn("ByeA", client.out)
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgC/0.1@user/channel: DEP FILE PkgB: ByeB World!!", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgB: ByeB World!!", client.out)
# Not locked will retrieve newer versions
client.run("install PkgD/0.1@user/channel", assert_error=True)
self.assertIn("PkgA/0.2@user/channel:5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9 - Cache",
client.out)
self.assertIn("PkgB/0.2@user/channel:11b376c6e7a22ec390c215a8584ef9237a6da32f - Missing",
client.out)
def test_version_ranges_diamond(self):
client = TestClient()
client.run("config set general.default_package_id_mode=full_package_mode")
client.save({"conanfile.py": conanfile.format(requires=""),
"myfile.txt": "HelloA"})
client.run("create . PkgA/0.1@user/channel")
client.save({"conanfile.py": conanfile.format(requires='requires="PkgA/[*]@user/channel"'),
"myfile.txt": "HelloB"})
client.run("create . PkgB/0.1@user/channel")
client.save({"conanfile.py": conanfile.format(requires='requires="PkgA/[*]@user/channel"'),
"myfile.txt": "HelloC"})
client.run("create . PkgC/0.1@user/channel")
client.save({"conanfile.py": conanfile.format(requires='requires="PkgB/[*]@user/channel",'
' "PkgC/[*]@user/channel"'),
"myfile.txt": "HelloD"})
client.run("create . PkgD/0.1@user/channel")
self.assertIn("PkgD/0.1@user/channel: SELF FILE: HelloD", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgA: HelloA", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgB: HelloB", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgC: HelloC", client.out)
client.run("lock create --reference=PkgD/0.1@user/channel --lockfile-out=conan.lock")
lock_file = client.load(LOCKFILE)
initial_lock_file = lock_file
# Do a change in A
clientb = TestClient(cache_folder=client.cache_folder)
clientb.run("config set general.default_package_id_mode=full_package_mode")
clientb.save({"conanfile.py": conanfile.format(requires=''),
"myfile.txt": "ByeA World!!"})
clientb.run("create . PkgA/0.2@user/channel")
client.run("lock create --reference=PkgD/0.1@user/channel --lockfile-out=conan.lock")
client.run("lock build-order conan.lock --json=build_order.json")
master_lockfile = client.load("conan.lock")
json_file = os.path.join(client.current_folder, "build_order.json")
to_build = json.loads(load(json_file))
lock_fileaux = master_lockfile
while to_build:
ref, _, _, _ = to_build[0].pop(0)
client_aux = TestClient(cache_folder=client.cache_folder)
client_aux.run("config set general.default_package_id_mode=full_package_mode")
client_aux.save({LOCKFILE: lock_fileaux})
client_aux.run("install %s --build=%s --lockfile=conan.lock "
"--lockfile-out=conan.lock" % (ref, ref))
lock_fileaux = load(os.path.join(client_aux.current_folder, "conan.lock"))
client.save({"new_lock/conan.lock": lock_fileaux})
client.run("lock update conan.lock new_lock/conan.lock")
client.run("lock build-order conan.lock")
lock_fileaux = client.load("conan.lock")
output = str(client.out).splitlines()[-1]
to_build = eval(output)
new_lockfile = client.load(LOCKFILE)
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgB/0.1@user/channel: DEP FILE PkgA: ByeA World!!", client.out)
self.assertIn("PkgC/0.1@user/channel: DEP FILE PkgA: ByeA World!!", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgA: ByeA World!!", client.out)
client.save({LOCKFILE: initial_lock_file})
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgB/0.1@user/channel: DEP FILE PkgA: HelloA", client.out)
self.assertIn("PkgC/0.1@user/channel: DEP FILE PkgA: HelloA", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgA: HelloA", client.out)
client.save({LOCKFILE: new_lockfile})
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgB/0.1@user/channel: DEP FILE PkgA: ByeA World!!", client.out)
self.assertIn("PkgC/0.1@user/channel: DEP FILE PkgA: ByeA World!!", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgA: ByeA World!!", client.out)
def test_override(self):
client = TestClient()
client.run("config set general.default_package_id_mode=full_package_mode")
# The original (unresolved) graph shows the requirements how they are
# specified in the package recipes. The graph contains conflicts:
# There are three different versions of PkgA and two different versions
# of PkgB.
#
# The overridden (resolved) graph shows the requirements after
# conflicts have been resolved by overriding required package versions
# in package PkgE. This graph contains only one version per package.
#
# Original (unresolved) graph: : Overridden (resolved) graph:
# ============================ : ============================
# :
# PkgA/0.1 PkgA/0.2 PkgA/0.3 : PkgA/0.3
# | | |\____ : __________________/|
# | | | \ : / |
# PkgB/0.1 | PkgB/0.3 | : PkgB/0.1 |
# |\____ | _________/ | : |\_______ ________/|
# | \ |/ | : | \ / |
# | | PkgC/0.2 | : | PkgC/0.2 |
# | | | | : | | |
# | | | | : | | |
# PkgD/0.1 | | | : PkgD/0.1 | |
# | ___/____/_________________/ : | _______/__________/
# |/ : |/
# PkgE/0.1 : PkgE/0.1
# PkgA/0.1
client.save({
"conanfile.py": conanfile.format(requires=""),
"myfile.txt": "This is PkgA/0.1!",
})
client.run("export . PkgA/0.1@user/channel")
# PkgA/0.2
client.save({
"conanfile.py": conanfile.format(requires=""),
"myfile.txt": "This is PkgA/0.2!",
})
client.run("export . PkgA/0.2@user/channel")
# PkgA/0.3
client.save({
"conanfile.py": conanfile.format(requires=""),
"myfile.txt": "This is PkgA/0.3!",
})
client.run("export . PkgA/0.3@user/channel")
# PkgB/0.1
client.save({
"conanfile.py": conanfile.format(requires='requires="PkgA/0.1@user/channel"'),
"myfile.txt": "This is PkgB/0.1!",
})
client.run("export . PkgB/0.1@user/channel")
# PkgB/0.3
client.save({
"conanfile.py": conanfile.format(requires='requires="PkgA/0.3@user/channel"'),
"myfile.txt": "This is PkgB/0.3!",
})
client.run("export . PkgB/0.3@user/channel")
# PkgC/0.2
client.save({
"conanfile.py": conanfile.format(requires=textwrap.dedent("""
# This comment line is required to yield the correct indentation
def requirements(self):
self.requires("PkgA/0.2@user/channel", override=True)
self.requires("PkgB/0.3@user/channel", override=False)
""")),
"myfile.txt": "This is PkgC/0.2!",
})
client.run("export . PkgC/0.2@user/channel")
# PkgD/0.1
client.save({
"conanfile.py": conanfile.format(requires='requires="PkgB/0.1@user/channel"'),
"myfile.txt": "This is PkgD/0.1!",
})
client.run("export . PkgD/0.1@user/channel")
# PkgE/0.1
client.save({
"conanfile.py": conanfile.format(requires=textwrap.dedent("""
# This comment line is required to yield the correct indentation
def requirements(self):
self.requires("PkgA/0.3@user/channel", override=True)
self.requires("PkgB/0.1@user/channel", override=True)
self.requires("PkgC/0.2@user/channel", override=False)
self.requires("PkgD/0.1@user/channel", override=False)
""")),
"myfile.txt": "This is PkgE/0.1!",
})
client.run("export . PkgE/0.1@user/channel")
client.run("lock create --reference=PkgE/0.1@user/channel --lockfile-out=master.lock")
while True:
client.run("lock build-order master.lock --json=build_order.json")
json_file = os.path.join(client.current_folder, "build_order.json")
to_build = json.loads(load(json_file))
if not to_build:
break
ref, _, _, _ = to_build[0].pop(0)
client.run("lock create --reference=%s --lockfile=master.lock "
"--lockfile-out=derived.lock" % ref)
client.run("install %s --build=%s --lockfile=derived.lock "
"--lockfile-out=update.lock" % (ref, ref))
client.run("lock update master.lock update.lock")
client.run("install PkgE/0.1@user/channel --lockfile=master.lock")
filtered_output = [
line for line in str(client.out).splitlines()
if any(pattern in line for pattern in ["SELF FILE", "DEP FILE"])
]
expected_output = [
"PkgA/0.3@user/channel: SELF FILE: This is PkgA/0.3!",
"PkgB/0.1@user/channel: DEP FILE PkgA: This is PkgA/0.3!",
"PkgB/0.1@user/channel: SELF FILE: This is PkgB/0.1!",
"PkgC/0.2@user/channel: DEP FILE PkgA: This is PkgA/0.3!",
"PkgC/0.2@user/channel: DEP FILE PkgB: This is PkgB/0.1!",
"PkgC/0.2@user/channel: SELF FILE: This is PkgC/0.2!",
"PkgD/0.1@user/channel: DEP FILE PkgA: This is PkgA/0.3!",
"PkgD/0.1@user/channel: DEP FILE PkgB: This is PkgB/0.1!",
"PkgD/0.1@user/channel: SELF FILE: This is PkgD/0.1!",
"PkgE/0.1@user/channel: DEP FILE PkgA: This is PkgA/0.3!",
"PkgE/0.1@user/channel: DEP FILE PkgB: This is PkgB/0.1!",
"PkgE/0.1@user/channel: DEP FILE PkgC: This is PkgC/0.2!",
"PkgE/0.1@user/channel: DEP FILE PkgD: This is PkgD/0.1!",
"PkgE/0.1@user/channel: SELF FILE: This is PkgE/0.1!",
]
self.assertListEqual(
sorted(filtered_output),
sorted(expected_output),
msg = "Original client output:\n%s" % client.out,
)
def test_options(self):
conanfile = textwrap.dedent("""
from conans import ConanFile
class Pkg(ConanFile):
{requires}
options = {{"myoption": [1, 2, 3, 4, 5]}}
default_options = {{"myoption": 1}}
def build(self):
self.output.info("BUILDING WITH OPTION: %s!!" % self.options.myoption)
def package_info(self):
self.output.info("PACKAGE_INFO OPTION: %s!!" % self.options.myoption)
""")
client = TestClient()
client.save({"conanfile.py": conanfile.format(requires="")})
client.run("export . PkgA/0.1@user/channel")
client.save({"conanfile.py": conanfile.format(requires='requires="PkgA/0.1@user/channel"')})
client.run("export . PkgB/0.1@user/channel")
client.save({"conanfile.py": conanfile.format(requires='requires="PkgB/0.1@user/channel"')})
client.run("export . PkgC/0.1@user/channel")
conanfiled = conanfile.format(requires='requires="PkgC/0.1@user/channel"')
conanfiled = conanfiled.replace('default_options = {"myoption": 1}',
'default_options = {"myoption": 2, "PkgC:myoption": 3,'
'"PkgB:myoption": 4, "PkgA:myoption": 5}')
client.save({"conanfile.py": conanfiled})
client.run("export . PkgD/0.1@user/channel")
client.run("profile new myprofile")
# To make sure we can provide a profile as input
client.run("lock create --reference=PkgD/0.1@user/channel -pr=myprofile "
"--lockfile-out=conan.lock")
lock_file = client.load(LOCKFILE)
client2 = TestClient(cache_folder=client.cache_folder)
client2.save({"conanfile.py": conanfile.format(requires=""), LOCKFILE: lock_file})
client2.run("create . PkgA/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgA/0.1@user/channel: BUILDING WITH OPTION: 5!!", client2.out)
self.assertIn("PkgA/0.1@user/channel: PACKAGE_INFO OPTION: 5!!", client2.out)
client2.save({"conanfile.py": conanfile.format(
requires='requires="PkgA/0.1@user/channel"')})
client2.run("create . PkgB/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgB/0.1@user/channel: PACKAGE_INFO OPTION: 4!!", client2.out)
self.assertIn("PkgB/0.1@user/channel: BUILDING WITH OPTION: 4!!", client2.out)
self.assertIn("PkgA/0.1@user/channel: PACKAGE_INFO OPTION: 5!!", client2.out)
client2.save({"conanfile.py": conanfile.format(
requires='requires="PkgB/0.1@user/channel"')})
client2.run("create . PkgC/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgC/0.1@user/channel: PACKAGE_INFO OPTION: 3!!", client2.out)
self.assertIn("PkgC/0.1@user/channel: BUILDING WITH OPTION: 3!!", client2.out)
self.assertIn("PkgB/0.1@user/channel: PACKAGE_INFO OPTION: 4!!", client2.out)
self.assertIn("PkgA/0.1@user/channel: PACKAGE_INFO OPTION: 5!!", client2.out)
client2.save({"conanfile.py": conanfiled})
client2.run("create . PkgD/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgD/0.1@user/channel: PACKAGE_INFO OPTION: 2!!", client2.out)
self.assertIn("PkgD/0.1@user/channel: BUILDING WITH OPTION: 2!!", client2.out)
self.assertIn("PkgC/0.1@user/channel: PACKAGE_INFO OPTION: 3!!", client2.out)
self.assertIn("PkgB/0.1@user/channel: PACKAGE_INFO OPTION: 4!!", client2.out)
self.assertIn("PkgA/0.1@user/channel: PACKAGE_INFO OPTION: 5!!", client2.out)
@pytest.mark.skipif(not get_env("TESTING_REVISIONS_ENABLED", False), reason="Only revisions")
def test_package_revisions_unkown_id_update(self):
# https://github.com/conan-io/conan/issues/7588
client = TestClient()
client.run("config set general.default_package_id_mode=package_revision_mode")
files = {
"pkga/conanfile.py": conanfile.format(requires=""),
"pkga/myfile.txt": "HelloA",
"pkgb/conanfile.py": conanfile.format(requires='requires="PkgA/[*]@user/channel"'),
"pkgb/myfile.txt": "HelloB",
"pkgc/conanfile.py": conanfile.format(requires='requires="PkgB/[*]@user/channel"'),
"pkgc/myfile.txt": "HelloC",
"pkgd/conanfile.py": conanfile.format(requires='requires="PkgC/[*]@user/channel"'),
"pkgd/myfile.txt": "HelloD",
}
client.save(files)
client.run("export pkga PkgA/0.1@user/channel")
client.run("export pkgb PkgB/0.1@user/channel")
client.run("export pkgc PkgC/0.1@user/channel")
client.run("export pkgd PkgD/0.1@user/channel")
client.run("lock create --reference=PkgD/0.1@user/channel --lockfile-out=conan.lock")
lockfile = json.loads(client.load("conan.lock"))
nodes = lockfile["graph_lock"]["nodes"]
self.assertEqual(nodes["3"]["ref"], "PkgB/0.1@user/channel#fa97c46bf83849a5db4564327b3cfada")
self.assertEqual(nodes["3"]["package_id"], "Package_ID_unknown")
client.run("install PkgA/0.1@user/channel --build=PkgA/0.1@user/channel "
"--lockfile=conan.lock --lockfile-out=conan_out.lock")
client.run("lock update conan.lock conan_out.lock")
client.run("install PkgB/0.1@user/channel --build=PkgB/0.1@user/channel "
"--lockfile=conan.lock --lockfile-out=conan_out.lock")
lockfile = json.loads(client.load("conan_out.lock"))
nodes = lockfile["graph_lock"]["nodes"]
self.assertEqual(nodes["3"]["ref"], "PkgB/0.1@user/channel#fa97c46bf83849a5db4564327b3cfada")
self.assertEqual(nodes["3"]["package_id"], "6e9742c2106791c1c777da8ccfb12a1408385d8d")
self.assertEqual(nodes["3"]["prev"], "f971905c142e0de728f32a7237553622")
client.run("lock update conan.lock conan_out.lock")
lockfile = json.loads(client.load("conan.lock"))
nodes = lockfile["graph_lock"]["nodes"]
self.assertEqual(nodes["3"]["ref"], "PkgB/0.1@user/channel#fa97c46bf83849a5db4564327b3cfada")
self.assertEqual(nodes["3"]["package_id"], "6e9742c2106791c1c777da8ccfb12a1408385d8d")
self.assertEqual(nodes["3"]["prev"], "f971905c142e0de728f32a7237553622")
class CIPythonRequiresTest(unittest.TestCase):
python_req = textwrap.dedent("""
from conans import ConanFile
def msg(conanfile):
conanfile.output.info("{}")
class Pkg(ConanFile):
pass
""")
consumer = textwrap.dedent("""
from conans import ConanFile, load
import os
class Pkg(ConanFile):
{requires}
python_requires = "pyreq/[*]@user/channel"
def package_info(self):
self.python_requires["pyreq"].module.msg(self)
""")
def setUp(self):
client = TestClient()
client.run("config set general.default_package_id_mode=full_package_mode")
client.save({"conanfile.py": self.python_req.format("HelloPyWorld")})
client.run("export . pyreq/0.1@user/channel")
client.save({"conanfile.py": self.consumer.format(requires="")})
client.run("create . PkgA/0.1@user/channel")
client.save(
{"conanfile.py": self.consumer.format(requires='requires="PkgA/0.1@user/channel"')})
client.run("create . PkgB/0.1@user/channel")
client.save(
{"conanfile.py": self.consumer.format(requires='requires="PkgB/[~0]@user/channel"')})
client.run("create . PkgC/0.1@user/channel")
client.save(
{"conanfile.py": self.consumer.format(requires='requires="PkgC/0.1@user/channel"')})
client.run("create . PkgD/0.1@user/channel")
for pkg in ("PkgA", "PkgB", "PkgC", "PkgD"):
self.assertIn("{}/0.1@user/channel: HelloPyWorld".format(pkg), client.out)
client.run("lock create --reference=PkgD/0.1@user/channel --lockfile-out=conan.lock")
self.client = client
def test_version_ranges(self):
client = self.client
initial_lockfile = client.load("conan.lock")
# Do a change in python_require
client.save({"conanfile.py": self.python_req.format("ByePyWorld")})
client.run("export . pyreq/0.2@user/channel")
# Go back to main orchestrator
client.run("lock create --reference=PkgD/0.1@user/channel --lockfile-out=conan.lock")
client.run("lock build-order conan.lock --json=build_order.json")
master_lockfile = client.load("conan.lock")
json_file = client.load("build_order.json")
to_build = json.loads(json_file)
lock_fileaux = master_lockfile
while to_build:
for ref, _, _, _ in to_build[0]:
client_aux = TestClient(cache_folder=client.cache_folder)
client_aux.save({"conan.lock": lock_fileaux})
client_aux.run("install %s --build=%s --lockfile=conan.lock "
"--lockfile-out=conan.lock" % (ref, ref))
lock_fileaux = client_aux.load("conan.lock")
client.save({"new_lock/conan.lock": lock_fileaux})
client.run("lock update conan.lock new_lock/conan.lock")
client.run("lock build-order conan.lock --json=bo.json")
lock_fileaux = client.load("conan.lock")
to_build = json.loads(client.load("bo.json"))
new_lockfile = client.load("conan.lock")
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
for pkg in ("PkgA", "PkgB", "PkgC", "PkgD"):
self.assertIn("{}/0.1@user/channel: ByePyWorld".format(pkg), client.out)
client.save({"conan.lock": initial_lockfile})
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
for pkg in ("PkgA", "PkgB", "PkgC", "PkgD"):
self.assertIn("{}/0.1@user/channel: HelloPyWorld".format(pkg), client.out)
client.save({"conan.lock": new_lockfile})
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
for pkg in ("PkgA", "PkgB", "PkgC", "PkgD"):
self.assertIn("{}/0.1@user/channel: ByePyWorld".format(pkg), client.out)
def test_version_ranges_partial_unused(self):
client = self.client
consumer = self.consumer
# Do a change in B
client.save({"conanfile.py": consumer.format(requires='requires="PkgA/0.1@user/channel"')})
client.run("lock create conanfile.py --name=PkgB --version=1.0 --user=user "
"--channel=channel --lockfile-out=buildb.lock")
# Do a change in python_require
client.save({"conanfile.py": self.python_req.format("ByePyWorld")})
client.run("export . pyreq/0.2@user/channel")
# create the package with the previous version of python_require
client.save({"conanfile.py": consumer.format(requires='requires="PkgA/0.1@user/channel"')})
# It is a new version, it will not be used in the product build!
client.run("create . PkgB/1.0@user/channel --lockfile=buildb.lock")
self.assertIn("pyreq/0.1", client.out)
self.assertNotIn("pyreq/0.2", client.out)
# Go back to main orchestrator
# This should fail, as PkgB/1.0 is not involved in the new resolution
client.run("lock create --reference=PkgD/0.1@user/channel "
"--lockfile=buildb.lock --lockfile-out=error.lock")
# User can perfectly go and check the resulting lockfile and check if PkgB/0.1 is there
# We can probably help automate this with a "conan lock find" subcommand
error_lock = client.load("error.lock")
self.assertNotIn("PkgB/1.0@user/channel", error_lock)
client.run("lock build-order conan.lock --json=build_order.json")
json_file = client.load("build_order.json")
to_build = json.loads(json_file)
self.assertEqual(to_build, [])
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
for pkg in ("PkgA", "PkgB", "PkgC", "PkgD"):
self.assertIn("{}/0.1@user/channel: HelloPyWorld".format(pkg), client.out)
client.run("install PkgD/0.1@user/channel", assert_error=True)
self.assertIn("ERROR: Missing prebuilt package", client.out)
client.run("install PkgD/0.1@user/channel --build=missing")
for pkg in ("PkgA", "PkgB", "PkgC", "PkgD"):
self.assertIn("{}/0.1@user/channel: ByePyWorld".format(pkg), client.out)
def test_version_ranges_partial(self):
client = self.client
consumer = self.consumer
# Do a change in B
client.save({"conanfile.py": consumer.format(requires='requires="PkgA/0.1@user/channel"')})
client.run("lock create conanfile.py --name=PkgB --version=0.2 --user=user "
"--channel=channel --lockfile-out=buildb.lock")
# Do a change in python_require
client.save({"conanfile.py": self.python_req.format("ByePyWorld")})
client.run("export . pyreq/0.2@user/channel")
# create the package with the previous version of python_require
client.save({"conanfile.py": consumer.format(requires='requires="PkgA/0.1@user/channel"')})
# It is a new version, it will not be used in the product build!
client.run("create . PkgB/0.2@user/channel --lockfile=buildb.lock")
self.assertIn("pyreq/0.1", client.out)
self.assertNotIn("pyreq/0.2", client.out)
# Go back to main orchestrator
client.run("lock create --reference=PkgD/0.1@user/channel "
"--lockfile=buildb.lock --lockfile-out=conan.lock")
client.run("lock build-order conan.lock --json=build_order.json")
json_file = client.load("build_order.json")
to_build = json.loads(json_file)
if client.cache.config.revisions_enabled:
build_order = [[['PkgC/0.1@user/channel#9e5471ca39a16a120b25ee5690539c71',
'bca7337f8d2fde6cdc9dd17cdc56bc0b0a0e352d', 'host', '4']],
[['PkgD/0.1@user/channel#068fd3ce2a88181dff0b44de344a93a4',
'63a3463d4dd4cc8d7bca7a9fe5140abe582f349a', 'host', '3']]]
else:
build_order = [[['PkgC/0.1@user/channel',
'bca7337f8d2fde6cdc9dd17cdc56bc0b0a0e352d', 'host', '4']],
[['PkgD/0.1@user/channel',
'63a3463d4dd4cc8d7bca7a9fe5140abe582f349a', 'host', '3']]]
self.assertEqual(to_build, build_order)
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock --build=missing")
self.assertIn("PkgA/0.1@user/channel: HelloPyWorld", client.out)
self.assertIn("PkgB/0.2@user/channel: HelloPyWorld", client.out)
self.assertIn("PkgC/0.1@user/channel: ByePyWorld", client.out)
self.assertIn("PkgD/0.1@user/channel: ByePyWorld", client.out)
client.run("install PkgD/0.1@user/channel", assert_error=True)
self.assertIn("ERROR: Missing prebuilt package", client.out)
client.run("install PkgD/0.1@user/channel --build=missing")
self.assertIn("PkgA/0.1@user/channel: ByePyWorld", client.out)
self.assertIn("PkgB/0.2@user/channel: ByePyWorld", client.out)
self.assertIn("PkgC/0.1@user/channel: ByePyWorld", client.out)
self.assertIn("PkgD/0.1@user/channel: ByePyWorld", client.out)
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
self.assertIn("PkgA/0.1@user/channel: HelloPyWorld", client.out)
self.assertIn("PkgB/0.2@user/channel: HelloPyWorld", client.out)
self.assertIn("PkgC/0.1@user/channel: ByePyWorld", client.out)
self.assertIn("PkgD/0.1@user/channel: ByePyWorld", client.out)
class CIBuildRequiresTest(unittest.TestCase):
def test_version_ranges(self):
client = TestClient()
client.run("config set general.default_package_id_mode=full_package_mode")
myprofile = textwrap.dedent("""
[build_requires]
br/[>=0.1]@user/channel
""")
files = {
"myprofile": myprofile,
"br/conanfile.py": GenConanfile(),
"pkga/conanfile.py": conanfile.format(requires=""),
"pkga/myfile.txt": "HelloA",
"pkgb/conanfile.py": conanfile.format(requires='requires="PkgA/[*]@user/channel"'),
"pkgb/myfile.txt": "HelloB",
"pkgc/conanfile.py": conanfile.format(requires='requires="PkgB/[*]@user/channel"'),
"pkgc/myfile.txt": "HelloC",
"pkgd/conanfile.py": conanfile.format(requires='requires="PkgC/[*]@user/channel"'),
"pkgd/myfile.txt": "HelloD",
}
client.save(files)
client.run("create br br/0.1@user/channel")
client.run("create pkga PkgA/0.1@user/channel -pr=myprofile")
client.run("create pkgb PkgB/0.1@user/channel -pr=myprofile")
client.run("create pkgc PkgC/0.1@user/channel -pr=myprofile")
client.run("create pkgd PkgD/0.1@user/channel -pr=myprofile")
self.assertIn("PkgD/0.1@user/channel: SELF FILE: HelloD", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgA: HelloA", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgB: HelloB", client.out)
self.assertIn("PkgD/0.1@user/channel: DEP FILE PkgC: HelloC", client.out)
# Go back to main orchestrator
client.run("lock create --reference=PkgD/0.1@user/channel --build -pr=myprofile "
" --lockfile-out=conan.lock")
# Do a change in br
client.run("create br br/0.2@user/channel")
client.run("lock build-order conan.lock --json=build_order.json")
self.assertIn("br/0.1", client.out)
self.assertNotIn("br/0.2", client.out)
master_lockfile = client.load("conan.lock")
json_file = client.load("build_order.json")
to_build = json.loads(json_file)
if client.cache.config.revisions_enabled:
build_order = [[['br/0.1@user/channel#f3367e0e7d170aa12abccb175fee5f97',
'5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9', 'host', '5']],
[['PkgA/0.1@user/channel#189390ce059842ce984e0502c52cf736',
'5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9', 'host', '4']],
[['PkgB/0.1@user/channel#fa97c46bf83849a5db4564327b3cfada',
'096f747d204735584fa0115bcbd7482d424094bc', 'host', '3']],
[['PkgC/0.1@user/channel#c6f95948619d28d9d96b0ae86c46a482',
'f6d5dbb6f309dbf8519278bae8d07d3b739b3dec', 'host', '2']],
[['PkgD/0.1@user/channel#fce78c934bc0de73eeb05eb4060fc2b7',
'de4467a3fa6ef01b09b7464e85553fb4be2d2096', 'host', '1']]]
else:
build_order = [[['br/0.1@user/channel',
'5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9', 'host', '5']],
[['PkgA/0.1@user/channel',
'5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9', 'host', '4']],
[['PkgB/0.1@user/channel',
'096f747d204735584fa0115bcbd7482d424094bc', 'host', '3']],
[['PkgC/0.1@user/channel',
'f6d5dbb6f309dbf8519278bae8d07d3b739b3dec', 'host', '2']],
[['PkgD/0.1@user/channel',
'de4467a3fa6ef01b09b7464e85553fb4be2d2096', 'host', '1']]]
self.assertEqual(to_build, build_order)
lock_fileaux = master_lockfile
while to_build:
for ref, _, _, _ in to_build[0]:
client_aux = TestClient(cache_folder=client.cache_folder)
client_aux.save({LOCKFILE: lock_fileaux})
client_aux.run("install %s --build=%s --lockfile=conan.lock "
"--lockfile-out=conan.lock" % (ref, ref))
self.assertIn("br/0.1", client_aux.out)
self.assertNotIn("br/0.2", client_aux.out)
lock_fileaux = client_aux.load(LOCKFILE)
client.save({"new_lock/%s" % LOCKFILE: lock_fileaux})
client.run("lock update conan.lock new_lock/conan.lock")
client.run("lock build-order conan.lock --json=build_order.json")
lock_fileaux = client.load(LOCKFILE)
to_build = json.loads(client.load("build_order.json"))
client.run("install PkgD/0.1@user/channel --lockfile=conan.lock")
# No build require at all
self.assertNotIn("br/0.", client.out)
client.run("install PkgD/0.1@user/channel --build -pr=myprofile")
self.assertIn("br/0.2", client.out)
self.assertNotIn("br/0.1", client.out)
class CIBuildRequiresTwoProfilesTest(unittest.TestCase):
def test_version_ranges(self):
client = TestClient()
client.run("config set general.default_package_id_mode=full_package_mode")
myprofile_host = textwrap.dedent("""
[settings]
os=Linux
[build_requires]
br/[>=0.1]
""")
myprofile_build = textwrap.dedent("""
[settings]
os=Windows
""")
conanfile_os = textwrap.dedent("""
from conans import ConanFile, load
from conans.tools import save
import os
class Pkg(ConanFile):
settings = "os"
{requires}
keep_imports = True
def imports(self):
self.copy("myfile.txt", folder=True)
def package(self):
save(os.path.join(self.package_folder, "myfile.txt"),
"%s %s" % (self.name, self.settings.os))
self.copy("*myfile.txt")
def package_info(self):
self.output.info("MYOS=%s!!!" % self.settings.os)
self.output.info("SELF FILE: %s"
% load(os.path.join(self.package_folder, "myfile.txt")))
for d in os.listdir(self.package_folder):
p = os.path.join(self.package_folder, d, "myfile.txt")
if os.path.isfile(p):
self.output.info("DEP FILE %s: %s" % (d, load(p)))
""")
files = {
"profile_host": myprofile_host,
"profile_build": myprofile_build,
"br/conanfile.py": conanfile_os.format(requires=""),
"pkga/conanfile.py": conanfile_os.format(requires=""),
"pkgb/conanfile.py": conanfile_os.format(requires='requires="PkgA/[*]"'),
"pkgc/conanfile.py": conanfile_os.format(requires='requires="PkgB/[*]"'),
"pkgd/conanfile.py": conanfile_os.format(requires='requires="PkgC/[*]"'),
}
client.save(files)
# Note the creating of BR is in the BUILD profile
client.run("create br br/0.1@ --build-require -pr:h=profile_host -pr:b=profile_build")
assert "br/0.1: SELF FILE: br Linux" not in client.out
client.run("create pkga PkgA/0.1@ -pr:h=profile_host -pr:b=profile_build")
client.run("create pkgb PkgB/0.1@ -pr:h=profile_host -pr:b=profile_build")
client.run("create pkgc PkgC/0.1@ -pr:h=profile_host -pr:b=profile_build")
client.run("create pkgd PkgD/0.1@ -pr:h=profile_host -pr:b=profile_build")
self.assertIn("PkgD/0.1: SELF FILE: PkgD Linux", client.out)
self.assertIn("PkgD/0.1: DEP FILE PkgA: PkgA Linux", client.out)
self.assertIn("PkgD/0.1: DEP FILE PkgB: PkgB Linux", client.out)
self.assertIn("PkgD/0.1: DEP FILE PkgC: PkgC Linux", client.out)
# Go back to main orchestrator
client.run("lock create --reference=PkgD/0.1@ --build -pr:h=profile_host -pr:b=profile_build"
" --lockfile-out=conan.lock")
# Do a change in br
client.run("create br br/0.2@ ")
client.run("lock build-order conan.lock --json=build_order.json")
self.assertIn("br/0.1", client.out)
self.assertNotIn("br/0.2", client.out)
master_lockfile = client.load("conan.lock")
json_file = client.load("build_order.json")
to_build = json.loads(json_file)
if client.cache.config.revisions_enabled:
build_order = [[['br/0.1@#583b8302673adce66f12f2bec01fe9c3',
'3475bd55b91ae904ac96fde0f106a136ab951a5e', 'build', '5']],
[['PkgA/0.1@#583b8302673adce66f12f2bec01fe9c3',
'cb054d0b3e1ca595dc66bc2339d40f1f8f04ab31', 'host', '4']],
[['PkgB/0.1@#4b1da86739946fe16a9545d1f6bc9022',
'4a87f1e30266a1c1c685c0904cfb137a3dba11c7', 'host', '3']],
[['PkgC/0.1@#3e1048668b2a795f6742d04971f11a7d',
'50ad117314ca51a58e427a26f264e27e79b94cd4', 'host', '2']],
[['PkgD/0.1@#e6cc0ca095ca32bba1a6dff0af6f4eb3',
'e66cc39a683367fdd17218bdbab7d6e95c0414e1', 'host', '1']]]
else:
build_order = [[['br/0.1@', '3475bd55b91ae904ac96fde0f106a136ab951a5e', 'build', '5']],
[['PkgA/0.1@', 'cb054d0b3e1ca595dc66bc2339d40f1f8f04ab31', 'host', '4']],
[['PkgB/0.1@', '4a87f1e30266a1c1c685c0904cfb137a3dba11c7', 'host', '3']],
[['PkgC/0.1@', '50ad117314ca51a58e427a26f264e27e79b94cd4', 'host', '2']],
[['PkgD/0.1@', 'e66cc39a683367fdd17218bdbab7d6e95c0414e1', 'host', '1']]]
self.assertEqual(to_build, build_order)
lock_fileaux = master_lockfile
while to_build:
for ref, _, build, _ in to_build[0]:
client_aux = TestClient(cache_folder=client.cache_folder)
client_aux.save({LOCKFILE: lock_fileaux})
is_build_require = "--build-require" if build == "build" else ""
client_aux.run("install %s --build=%s --lockfile=conan.lock "
"--lockfile-out=conan.lock %s" % (ref, ref, is_build_require))
assert "br/0.1: SELF FILE: br Windows" in client_aux.out
self.assertIn("br/0.1", client_aux.out)
self.assertNotIn("br/0.2", client_aux.out)
lock_fileaux = client_aux.load(LOCKFILE)
client.save({"new_lock/%s" % LOCKFILE: lock_fileaux})
client.run("lock update conan.lock new_lock/conan.lock")
client.run("lock build-order conan.lock --json=build_order.json")
lock_fileaux = client.load(LOCKFILE)
to_build = json.loads(client.load("build_order.json"))
client.run("install PkgD/0.1@ --lockfile=conan.lock")
# No build require at all
self.assertNotIn("br/0.", client.out)
client.run("install PkgD/0.1@ --build -pr:h=profile_host -pr:b=profile_build")
self.assertIn("br/0.2", client.out)
self.assertNotIn("br/0.1", client.out)
class CIPrivateRequiresTest(unittest.TestCase):
def test(self):
# https://github.com/conan-io/conan/issues/7985
client = TestClient()
files = {
"private/conanfile.py": GenConanfile().with_option("myoption", [True, False]),
"pkga/conanfile.py": textwrap.dedent("""
from conans import ConanFile
class PkgA(ConanFile):
requires = ("private/0.1", "private"),
def configure(self):
self.options["private"].myoption = True
"""),
"pkgb/conanfile.py": textwrap.dedent("""
from conans import ConanFile
class PkgB(ConanFile):
requires = "pkga/0.1", ("private/0.1", "private"),
def configure(self):
self.options["private"].myoption = False
"""),
"pkgc/conanfile.py": GenConanfile().with_require("pkgb/0.1")
}
client.save(files)
client.run("export private private/0.1@")
client.run("export pkga pkga/0.1@")
client.run("export pkgb pkgb/0.1@")
client.run("lock create pkgc/conanfile.py --name=pkgc --version=0.1 --build "
"--lockfile-out=conan.lock")
client.run("lock build-order conan.lock --json=build_order.json")
json_file = client.load("build_order.json")
to_build = json.loads(json_file)
if client.cache.config.revisions_enabled:
build_order = [[['private/0.1@#e31c7a656abb86256b08af0e64d37d42',
'd2560ba1787c188a1d7fabeb5f8e012ac53301bb', 'host', '3'],
['private/0.1@#e31c7a656abb86256b08af0e64d37d42',
'5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9', 'host', '4']],
[['pkga/0.1@#edf085c091f9f4adfdb623bda9415a79',
'5b0fc4382d9c849ae3ef02a57b62b26ad5137990', 'host', '2']],
[['pkgb/0.1@#9b0edf8f61a88f92e05919b406d74089',
'd7d6ac48b43e368b0a5ff79015acea49b758ffdf', 'host', '1']]]
else:
build_order = [[['private/0.1@',
'd2560ba1787c188a1d7fabeb5f8e012ac53301bb', 'host', '3'],
['private/0.1@',
'5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9', 'host', '4']],
[['pkga/0.1@',
'5b0fc4382d9c849ae3ef02a57b62b26ad5137990', 'host', '2']],
[['pkgb/0.1@',
'd7d6ac48b43e368b0a5ff79015acea49b758ffdf', 'host', '1']]]
self.assertEqual(to_build, build_order)
for ref, pid, _, node_id in build_order[0]:
client.run("install %s --build=%s --lockfile=conan.lock --lockfile-out=conan.lock "
"--lockfile-node-id=%s" % (ref, ref, node_id))
self.assertIn('private/0.1:{} - Build'.format(pid), client.out)
| mit | 18d50d8a2b02aed1f3894c333eb85a3a | 50.947471 | 101 | 0.577544 | 3.285468 | false | true | false | false |
conan-io/conan | conans/client/generators/visualstudiolegacy.py | 2 | 1518 | from conans.model import Generator
class VisualStudioLegacyGenerator(Generator):
template = '''<?xml version="1.0" encoding="Windows-1252"?>
<VisualStudioPropertySheet
ProjectType="Visual C++"
Version="8.00"
Name="conanbuildinfo"
>
<Tool
Name="VCCLCompilerTool"
AdditionalOptions="{compiler_flags}"
AdditionalIncludeDirectories="{include_dirs}"
PreprocessorDefinitions="{definitions}"
/>
<Tool
Name="VCLinkerTool"
AdditionalOptions="{linker_flags}"
AdditionalDependencies="{libs}"
AdditionalLibraryDirectories="{lib_dirs}"
/>
</VisualStudioPropertySheet>'''
@property
def filename(self):
return 'conanbuildinfo.vsprops'
@property
def content(self):
fields = {
'include_dirs': "".join(""%s";" % p for p in self._deps_build_info.include_paths).replace("\\", "/"),
'lib_dirs': "".join(""%s";" % p for p in self._deps_build_info.lib_paths).replace("\\", "/"),
'libs': "".join(['%s.lib ' % lib if not lib.endswith(".lib")
else '%s ' % lib for lib in self._deps_build_info.libs]),
'definitions': "".join("%s;" % d for d in self._deps_build_info.defines),
'compiler_flags': " ".join(self._deps_build_info.cxxflags + self._deps_build_info.cflags),
'linker_flags': " ".join(self._deps_build_info.sharedlinkflags),
}
return self.template.format(**fields)
| mit | 64e3eff28558c3f70afe8a8f851c470d | 36.95 | 123 | 0.596838 | 3.973822 | false | false | false | false |
conan-io/conan | conans/test/integration/conanfile/generators_list_test.py | 1 | 1976 | import textwrap
import unittest
from conans.test.utils.tools import TestClient
class ConanfileRepeatedGeneratorsTestCase(unittest.TestCase):
def test_conanfile_txt(self):
conanfile = textwrap.dedent("""
[generators]
cmake
CMakeDeps
cmake
""")
t = TestClient()
t.save({'conanfile.txt': conanfile})
t.run("install conanfile.txt")
self.assertEqual(str(t.out).count("Generator cmake created"), 1)
def test_conanfile_py(self):
conanfile = textwrap.dedent("""
from conans import ConanFile
class Recipe(ConanFile):
settings = "build_type"
generators = "cmake", "CMakeDeps", "cmake"
""")
t = TestClient()
t.save({'conanfile.py': conanfile})
t.run("install conanfile.py")
self.assertEqual(str(t.out).count("Generator cmake created"), 1)
def test_python_requires_inheritance(self):
pyreq = textwrap.dedent("""
from conans import ConanFile
class Recipe(ConanFile):
pass
class BaseConan(object):
generators = "cmake", "CMakeDeps"
""")
conanfile = textwrap.dedent("""
from conans import ConanFile
class Recipe(ConanFile):
settings = "build_type"
python_requires = "base/1.0"
python_requires_extend = "base.BaseConan"
generators = "cmake", "CMakeDeps"
def init(self):
base = self.python_requires["base"].module.BaseConan
self.generators = base.generators + self.generators
""")
t = TestClient()
t.save({'pyreq.py': pyreq, 'conanfile.py': conanfile})
t.run("export pyreq.py base/1.0@")
t.run("install conanfile.py")
self.assertEqual(str(t.out).count("Generator cmake created"), 1)
| mit | 96586e484fa151a69f0eda7f6d089004 | 29.875 | 72 | 0.555162 | 4.142558 | false | true | false | false |
conan-io/conan | conans/model/conan_file.py | 1 | 18746 | import os
import platform
from contextlib import contextmanager
from pathlib import Path
import six
from six import string_types
from conans.client import tools
from conans.client.output import ScopedOutput
from conans.client.subsystems import command_env_wrapper
from conans.client.tools.env import environment_append, no_op, pythonpath
from conans.client.tools.oss import OSInfo
from conans.errors import ConanException, ConanInvalidConfiguration
from conans.model.build_info import DepsCppInfo
from conans.model.conf import Conf
from conans.model.dependencies import ConanFileDependencies
from conans.model.env_info import DepsEnvInfo
from conans.model.layout import Folders, Infos
from conans.model.new_build_info import from_old_cppinfo
from conans.model.options import Options, OptionsValues, PackageOptions
from conans.model.requires import Requirements
from conans.model.user_info import DepsUserInfo
from conans.paths import RUN_LOG_NAME
from conans.util.conan_v2_mode import conan_v2_error
def create_options(conanfile):
try:
package_options = PackageOptions(getattr(conanfile, "options", None))
options = Options(package_options)
default_options = getattr(conanfile, "default_options", None)
if default_options:
if isinstance(default_options, dict):
default_values = OptionsValues(default_options)
elif isinstance(default_options, (list, tuple)):
conan_v2_error("Declare 'default_options' as a dictionary")
default_values = OptionsValues(default_options)
elif isinstance(default_options, six.string_types):
conan_v2_error("Declare 'default_options' as a dictionary")
default_values = OptionsValues.loads(default_options)
else:
raise ConanException("Please define your default_options as list, "
"multiline string or dictionary")
options.values = default_values
return options
except Exception as e:
raise ConanException("Error while initializing options. %s" % str(e))
def create_requirements(conanfile):
try:
# Actual requirements of this package
if not hasattr(conanfile, "requires"):
return Requirements()
else:
if not conanfile.requires:
return Requirements()
if isinstance(conanfile.requires, (tuple, list)):
return Requirements(*conanfile.requires)
else:
return Requirements(conanfile.requires, )
except Exception as e:
raise ConanException("Error while initializing requirements. %s" % str(e))
def create_settings(conanfile, settings):
try:
defined_settings = getattr(conanfile, "settings", None)
if isinstance(defined_settings, str):
defined_settings = [defined_settings]
current = defined_settings or {}
settings.constraint(current)
return settings
except Exception as e:
raise ConanInvalidConfiguration("The recipe %s is constraining settings. %s" % (
conanfile.display_name, str(e)))
@contextmanager
def _env_and_python(conanfile):
with environment_append(conanfile.env):
# FIXME Conan 2.0, Remove old ways of reusing python code
with pythonpath(conanfile):
yield
def get_env_context_manager(conanfile, without_python=False):
if not conanfile.apply_env:
return no_op()
if without_python:
return environment_append(conanfile.env)
return _env_and_python(conanfile)
class ConanFile(object):
""" The base class for all package recipes
"""
name = None
version = None # Any str, can be "1.1" or whatever
url = None # The URL where this File is located, as github, to collaborate in package
# The license of the PACKAGE, just a shortcut, does not replace or
# change the actual license of the source code
license = None
author = None # Main maintainer/responsible for the package, any format
description = None
topics = None
homepage = None
build_policy = None
upload_policy = None
short_paths = False
apply_env = True # Apply environment variables from requires deps_env_info and profiles
exports = None
exports_sources = None
generators = ["txt"]
revision_mode = "hash"
# Vars to control the build steps (build(), package())
should_configure = True
should_build = True
should_install = True
should_test = True
in_local_cache = True
develop = False
# Defaulting the reference fields
default_channel = None
default_user = None
# Settings and Options
settings = None
options = None
default_options = None
provides = None
deprecated = None
# Folders
folders = None
patterns = None
# Run in windows bash
win_bash = None
win_bash_run = None # For run scope
tested_reference_str = None
def __init__(self, output, runner, display_name="", user=None, channel=None):
# an output stream (writeln, info, warn error)
self.output = ScopedOutput(display_name, output)
self.display_name = display_name
# something that can run commands, as os.sytem
self._conan_runner = runner
self._conan_user = user
self._conan_channel = channel
self.compatible_packages = []
self._conan_using_build_profile = False
self._conan_requester = None
from conan.tools.env import Environment
self.buildenv_info = Environment()
self.runenv_info = Environment()
# At the moment only for build_requires, others will be ignored
self.conf_info = Conf()
self._conan_buildenv = None # The profile buildenv, will be assigned initialize()
self._conan_runenv = None
self._conan_node = None # access to container Node object, to access info, context, deps...
self._conan_new_cpp_info = None # Will be calculated lazy in the getter
self._conan_dependencies = None
self.env_scripts = {} # Accumulate the env scripts generated in order
# layout() method related variables:
self.folders = Folders()
self.cpp = Infos()
self.cpp.package.includedirs = ["include"]
self.cpp.package.libdirs = ["lib"]
self.cpp.package.bindirs = ["bin"]
self.cpp.package.resdirs = []
self.cpp.package.builddirs = [""]
self.cpp.package.frameworkdirs = []
@property
def context(self):
return self._conan_node.context
@property
def dependencies(self):
# Caching it, this object is requested many times
if self._conan_dependencies is None:
self._conan_dependencies = ConanFileDependencies.from_node(self._conan_node)
return self._conan_dependencies
@property
def ref(self):
return self._conan_node.ref
@property
def pref(self):
return self._conan_node.pref
@property
def buildenv(self):
# Lazy computation of the package buildenv based on the profileone
from conan.tools.env import Environment
if not isinstance(self._conan_buildenv, Environment):
# TODO: missing user/channel
ref_str = "{}/{}".format(self.name, self.version)
self._conan_buildenv = self._conan_buildenv.get_profile_env(ref_str)
return self._conan_buildenv
@property
def runenv(self):
# Lazy computation of the package runenv based on the profile one
from conan.tools.env import Environment
if not isinstance(self._conan_runenv, Environment):
# TODO: missing user/channel
ref_str = "{}/{}".format(self.name, self.version)
self._conan_runenv = self._conan_runenv.get_profile_env(ref_str)
return self._conan_runenv
def initialize(self, settings, env, buildenv=None, runenv=None):
self._conan_buildenv = buildenv
self._conan_runenv = runenv
if isinstance(self.generators, str):
self.generators = [self.generators]
# User defined options
self.options = create_options(self)
self.requires = create_requirements(self)
self.settings = create_settings(self, settings)
conan_v2_error("Setting 'cppstd' is deprecated in favor of 'compiler.cppstd',"
" please update your recipe.", 'cppstd' in self.settings.fields)
# needed variables to pack the project
self.cpp_info = None # Will be initialized at processing time
self._conan_dep_cpp_info = None # Will be initialized at processing time
self.deps_cpp_info = DepsCppInfo()
# environment variables declared in the package_info
self.env_info = None # Will be initialized at processing time
self.deps_env_info = DepsEnvInfo()
# user declared variables
self.user_info = None
# Keys are the package names (only 'host' if different contexts)
self.deps_user_info = DepsUserInfo()
# user specified env variables
self._conan_env_values = env.copy() # user specified -e
if self.description is not None and not isinstance(self.description, six.string_types):
raise ConanException("Recipe 'description' must be a string.")
if not hasattr(self, "virtualbuildenv"): # Allow the user to override it with True or False
self.virtualbuildenv = True
if not hasattr(self, "virtualrunenv"): # Allow the user to override it with True or False
self.virtualrunenv = True
@property
def new_cpp_info(self):
if not self._conan_new_cpp_info:
self._conan_new_cpp_info = from_old_cppinfo(self.cpp_info)
# The new_cpp_info will be already absolute paths if layout() is defined
if self.package_folder is not None: # to not crash when editable and layout()
self._conan_new_cpp_info.set_relative_base_folder(self.package_folder)
return self._conan_new_cpp_info
@property
def source_folder(self):
return self.folders.source_folder
@property
def source_path(self) -> Path:
assert self.source_folder is not None, "`source_folder` is `None`"
return Path(self.source_folder)
@property
def export_sources_folder(self):
"""points to the base source folder when calling source() and to the cache export sources
folder while calling the exports_sources() method. Prepared in case we want to introduce a
'no_copy_export_sources' and point to the right location always."""
return self.folders.base_export_sources
@property
def export_sources_path(self) -> Path:
assert self.export_sources_folder is not None, "`export_sources_folder` is `None`"
return Path(self.export_sources_folder)
@property
def export_folder(self):
return self.folders.base_export
@property
def export_path(self) -> Path:
assert self.export_folder is not None, "`export_folder` is `None`"
return Path(self.export_folder)
@property
def build_folder(self):
return self.folders.build_folder
@property
def build_path(self) -> Path:
assert self.build_folder is not None, "`build_folder` is `None`"
return Path(self.build_folder)
@property
def package_folder(self):
return self.folders.base_package
@property
def package_path(self) -> Path:
assert self.package_folder is not None, "`package_folder` is `None`"
return Path(self.package_folder)
@property
def install_folder(self):
# FIXME: Remove in 2.0, no self.install_folder
return self.folders.base_install
@property
def generators_folder(self):
# FIXME: Remove in 2.0, no self.install_folder
return self.folders.generators_folder if self.folders.generators else self.install_folder
@property
def generators_path(self) -> Path:
assert self.generators_folder is not None, "`generators_folder` is `None`"
return Path(self.generators_folder)
@property
def imports_folder(self):
return self.folders.imports_folder
@property
def env(self):
"""Apply the self.deps_env_info into a copy of self._conan_env_values (will prioritize the
self._conan_env_values, user specified from profiles or -e first, then inherited)"""
# Cannot be lazy cached, because it's called in configure node, and we still don't have
# the deps_env_info objects available
tmp_env_values = self._conan_env_values.copy()
tmp_env_values.update(self.deps_env_info)
ret, multiple = tmp_env_values.env_dicts(self.name, self.version, self._conan_user,
self._conan_channel)
ret.update(multiple)
return ret
@property
def channel(self):
if not self._conan_channel:
_env_channel = os.getenv("CONAN_CHANNEL")
conan_v2_error("Environment variable 'CONAN_CHANNEL' is deprecated", _env_channel)
self._conan_channel = _env_channel or self.default_channel
if not self._conan_channel:
raise ConanException("channel not defined, but self.channel is used in conanfile")
return self._conan_channel
@property
def user(self):
if not self._conan_user:
_env_username = os.getenv("CONAN_USERNAME")
conan_v2_error("Environment variable 'CONAN_USERNAME' is deprecated", _env_username)
self._conan_user = _env_username or self.default_user
if not self._conan_user:
raise ConanException("user not defined, but self.user is used in conanfile")
return self._conan_user
def collect_libs(self, folder=None):
conan_v2_error("'self.collect_libs' is deprecated, use 'tools.collect_libs(self)' instead")
return tools.collect_libs(self, folder=folder)
@property
def build_policy_missing(self):
return self.build_policy == "missing"
@property
def build_policy_always(self):
return self.build_policy == "always"
def source(self):
pass
def system_requirements(self):
""" this method can be overwritten to implement logic for system package
managers, as apt-get
You can define self.global_system_requirements = True, if you want the installation
to be for all packages (not depending on settings/options/requirements)
"""
def config_options(self):
""" modify options, probably conditioned to some settings. This call is executed
before config_settings. E.g.
if self.settings.os == "Windows":
del self.options.shared # shared/static not supported in win
"""
def configure(self):
""" modify settings, probably conditioned to some options. This call is executed
after config_options. E.g.
if self.options.header_only:
self.settings.clear()
This is also the place for conditional requirements
"""
def build(self):
""" build your project calling the desired build tools as done in the command line.
E.g. self.run("cmake --build .") Or use the provided build helpers. E.g. cmake.build()
"""
self.output.warn("This conanfile has no build step")
def package(self):
""" package the needed files from source and build folders.
E.g. self.copy("*.h", src="src/includes", dst="includes")
"""
self.output.warn("This conanfile has no package step")
def package_info(self):
""" define cpp_build_info, flags, etc
"""
def run(self, command, output=True, cwd=None, win_bash=False, subsystem=None, msys_mingw=True,
ignore_errors=False, run_environment=False, with_login=True, env="", scope="build"):
# NOTE: "self.win_bash" is the new parameter "win_bash" for Conan 2.0
if env == "": # This default allows not breaking for users with ``env=None`` indicating
# they don't want any env-file applied
env = "conanbuild" if scope == "build" else "conanrun"
def _run(cmd, _env):
# FIXME: run in windows bash is not using output
if platform.system() == "Windows":
if win_bash:
return tools.run_in_windows_bash(self, bashcmd=cmd, cwd=cwd, subsystem=subsystem,
msys_mingw=msys_mingw, with_login=with_login)
envfiles_folder = self.generators_folder or os.getcwd()
_env = [_env] if _env and isinstance(_env, str) else (_env or [])
assert isinstance(_env, list)
wrapped_cmd = command_env_wrapper(self, cmd, _env, envfiles_folder=envfiles_folder,
scope=scope)
return self._conan_runner(wrapped_cmd, output, os.path.abspath(RUN_LOG_NAME), cwd)
if run_environment:
# When using_build_profile the required environment is already applied through
# 'conanfile.env' in the contextmanager 'get_env_context_manager'
with tools.run_environment(self) if not self._conan_using_build_profile else no_op():
if OSInfo().is_macos and isinstance(command, string_types):
# Security policy on macOS clears this variable when executing /bin/sh. To
# keep its value, set it again inside the shell when running the command.
command = 'DYLD_LIBRARY_PATH="%s" DYLD_FRAMEWORK_PATH="%s" %s' % \
(os.environ.get('DYLD_LIBRARY_PATH', ''),
os.environ.get("DYLD_FRAMEWORK_PATH", ''),
command)
retcode = _run(command, env)
else:
retcode = _run(command, env)
if not ignore_errors and retcode != 0:
raise ConanException("Error %d while executing %s" % (retcode, command))
return retcode
def package_id(self):
""" modify the binary info, typically to narrow values
e.g.: self.info.settings.compiler = "Any" => All compilers will generate same ID
"""
def test(self):
""" test the generated executable.
E.g. self.run("./example")
"""
raise ConanException("You need to create a method 'test' in your test/conanfile.py")
def __repr__(self):
return self.display_name
| mit | 4b4701655da69b09659e1c22e0e6c0fb | 38.135699 | 101 | 0.636562 | 4.075217 | false | false | false | false |
choderalab/yank | Yank/cli.py | 1 | 2974 | #!/usr/local/bin/env python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
YANK command-line interface (cli)
"""
# =============================================================================================
# MODULE IMPORTS
# =============================================================================================
from . import version # This file is created upon install
from docopt import docopt
from . import commands
import inspect
# =============================================================================================
# COMMAND-LINE INTERFACE
# =============================================================================================
usage = """
YANK
Usage:
yank [-h | --help] [-c | --cite] COMMAND [<ARGS>]...
Commands:
help Get specific help for the command given in ARGS
selftest Run selftests
platforms List available OpenMM platforms.
script Set up and run free energy calculations from a YAML script.
status Get the current status
analyze Analyze data OR extract trajectory from a NetCDF file in a common format.
cleanup Clean up (delete) run files.
Options:
-h --help Display this message and quit
-c, --cite Print relevant citations
See 'yank help COMMAND' for more information on a specific command.
"""
# TODO: Add optional arguments that we can use to override sys.argv for testing purposes.
def main(argv=None):
# Parse command-line arguments.
# Parse the initial options, the options_first flag must be True to for <ARGS> to act as a wildcard for options
# (- and --) as well
args = docopt(usage, version=version.version, argv=argv, options_first=True)
dispatched = False # Flag set to True if we have correctly dispatched a command.
# Build the list of commands based on the <command>.py modules in the ./commands folder
command_list = [module[0] for module in inspect.getmembers(commands, inspect.ismodule)]
# Handle simple arguments.
if args['--cite']:
dispatched = commands.cite.dispatch(args)
# Handle commands.
if args['COMMAND'] in command_list:
# Check that command is valid:
command = args['COMMAND']
command_usage = getattr(commands, command).usage
# This will terminate if command is invalid
command_args = docopt(command_usage, version=version.version, argv=argv)
# Execute Command
dispatched = getattr(commands, command).dispatch(command_args)
# If unsuccessful, print usage and exit with an error.
if not dispatched:
print(usage)
return True
# Indicate success.
return False
| mit | c23240ecb4c89484f9721ccfdfc5a97d | 35.716049 | 115 | 0.51883 | 5.06644 | false | false | false | false |
choderalab/yank | docs/sphinxext/notebook_sphinxext.py | 1 | 5913 | import os
import shutil
import glob
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from nbconvert.exporters import html, python
from runipy.notebook_runner import NotebookRunner
HERE = os.path.dirname(os.path.abspath(__file__))
IPYTHON_NOTEBOOK_DIR = "%s/../../examples" % HERE
class NotebookDirective(Directive):
"""Insert an evaluated notebook into a document
This uses runipy and nbconvert to transform a path to an unevaluated notebook
into html suitable for embedding in a Sphinx document.
"""
required_arguments = 1
optional_arguments = 1
option_spec = {'skip_exceptions' : directives.flag}
def run(self):
# check if raw html is supported
if not self.state.document.settings.raw_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
# get path to notebook
source_dir = os.path.dirname(
os.path.abspath(self.state.document.current_source))
nb_basename = os.path.basename(self.arguments[0])
rst_file = self.state_machine.document.attributes['source']
rst_dir = os.path.abspath(os.path.dirname(rst_file))
nb_abs_path = os.path.join(IPYTHON_NOTEBOOK_DIR, nb_basename)
# Move files around.
rel_dir = os.path.relpath(rst_dir, setup.confdir)
rel_path = os.path.join(rel_dir, nb_basename)
dest_dir = os.path.join(setup.app.builder.outdir, rel_dir)
dest_path = os.path.join(dest_dir, nb_basename)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
# Copy unevaluated script
try:
shutil.copyfile(nb_abs_path, dest_path)
except IOError:
raise RuntimeError("Unable to copy notebook to build destination. %s -> %s" % (nb_abs_path, dest_path))
dest_path_eval = dest_path.replace('.ipynb', '_evaluated.ipynb')
dest_path_script = dest_path.replace('.ipynb', '.py')
rel_path_eval = nb_basename.replace('.ipynb', '_evaluated.ipynb')
rel_path_script = nb_basename.replace('.ipynb', '.py')
# Create python script vesion
unevaluated_text = nb_to_html(nb_abs_path)
script_text = nb_to_python(nb_abs_path)
f = open(dest_path_script, 'wb')
f.write(script_text.encode('utf8'))
f.close()
skip_exceptions = 'skip_exceptions' in self.options
try:
evaluated_text = evaluate_notebook(nb_abs_path, dest_path_eval,
skip_exceptions=skip_exceptions)
except:
# bail
return []
# Create link to notebook and script files
link_rst = "(" + \
formatted_link(nb_basename) + "; " + \
formatted_link(rel_path_eval) + "; " + \
formatted_link(rel_path_script) + \
")"
self.state_machine.insert_input([link_rst], rst_file)
# create notebook node
attributes = {'format': 'html', 'source': 'nb_path'}
nb_node = notebook_node('', evaluated_text, **attributes)
(nb_node.source, nb_node.line) = \
self.state_machine.get_source_and_line(self.lineno)
# add dependency
self.state.document.settings.record_dependencies.add(nb_abs_path)
# clean up png files left behind by notebooks.
png_files = glob.glob("*.png")
fits_files = glob.glob("*.fits")
h5_files = glob.glob("*.h5")
for file in png_files:
os.remove(file)
return [nb_node]
class notebook_node(nodes.raw):
pass
def nb_to_python(nb_path):
"""convert notebook to python script"""
exporter = python.PythonExporter()
output, resources = exporter.from_filename(nb_path)
return output
def nb_to_html(nb_path):
"""convert notebook to html"""
exporter = html.HTMLExporter(template_file='full')
output, resources = exporter.from_filename(nb_path)
header = output.split('<head>', 1)[1].split('</head>',1)[0]
body = output.split('<body>', 1)[1].split('</body>',1)[0]
# http://imgur.com/eR9bMRH
header = header.replace('<style', '<style scoped="scoped"')
header = header.replace('body {\n overflow: visible;\n padding: 8px;\n}\n', '')
# Filter out styles that conflict with the sphinx theme.
filter_strings = [
'navbar',
'body{',
'alert{',
'uneditable-input{',
'collapse{',
]
filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
header_lines = filter(
lambda x: not any([s in x for s in filter_strings]), header.split('\n'))
header = '\n'.join(header_lines)
# concatenate raw html lines
lines = ['<div class="ipynotebook">']
lines.append(header)
lines.append(body)
lines.append('</div>')
return '\n'.join(lines)
def evaluate_notebook(nb_path, dest_path=None, skip_exceptions=False):
# Create evaluated version and save it to the dest path.
# Always use --pylab so figures appear inline
# perhaps this is questionable?
nb_runner = NotebookRunner(nb_path, pylab=True)
nb_runner.run_notebook(skip_exceptions=skip_exceptions)
if dest_path is None:
dest_path = 'temp_evaluated.ipynb'
nb_runner.save_notebook(dest_path)
ret = nb_to_html(dest_path)
if dest_path is 'temp_evaluated.ipynb':
os.remove(dest_path)
return ret
def formatted_link(path):
return "`%s <%s>`__" % (os.path.basename(path), path)
def visit_notebook_node(self, node):
self.visit_raw(node)
def depart_notebook_node(self, node):
self.depart_raw(node)
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_node(notebook_node,
html=(visit_notebook_node, depart_notebook_node))
app.add_directive('notebook', NotebookDirective)
| mit | 840ac0e20be25f939823acda11ed2f3c | 31.668508 | 115 | 0.616607 | 3.590164 | false | false | false | false |
choderalab/yank | Yank/commands/help.py | 2 | 1644 | #!/usr/local/bin/env python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Print YANK help.
"""
# =============================================================================================
# MODULE IMPORTS
# =============================================================================================
# =============================================================================================
# COMMAND-LINE INTERFACE
# =============================================================================================
usage = """
YANK help
Usage:
yank help [COMMAND]
Description:
Get COMMAND's usage usage and arguments
Required Arguments:
COMMAND Name of the command you want more information about
"""
# =============================================================================================
# COMMAND DISPATCH
# =============================================================================================
def dispatch(args):
try:
command = args['COMMAND']
except:
# Case when no args are passed (like in the nosetests)
command = None
# Handle the null case or itself
if command is None or command == 'help':
from .. import cli
print(cli.usage)
return True
else:
from .. import commands
command_usage = getattr(commands, command).usage
print(command_usage)
return True
# In case something went wrong
return False
| mit | 237481ee77403ac52ad485ce671704ba | 28.357143 | 95 | 0.332117 | 6.447059 | false | false | false | false |
choderalab/yank | Yank/analyze.py | 1 | 51715 | #!/usr/local/bin/env python
# ==============================================================================
# MODULE DOCSTRING
# ==============================================================================
"""
Analyze
=======
YANK Specific analysis tools for YANK simulations from the :class:`yank.yank.AlchemicalPhase` classes
Extends classes from the MultiStateAnalyzer package to include the
"""
# =============================================================================================
# MODULE IMPORTS
# =============================================================================================
import os
import abc
import copy
import yaml
import mdtraj
import pickle
import logging
from functools import wraps
import mpiplus
import numpy as np
import simtk.unit as units
import openmmtools as mmtools
from pymbar import timeseries
from . import version
from . import utils
from .experiment import ExperimentBuilder
logger = logging.getLogger(__name__)
# =============================================================================================
# MODULE PARAMETERS
# =============================================================================================
# Extend registry to support standard_state_correction
yank_registry = mmtools.multistate.default_observables_registry
yank_registry.register_phase_observable('standard_state_correction')
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
# =============================================================================================
# MODULE CLASSES
# =============================================================================================
class YankPhaseAnalyzer(mmtools.multistate.PhaseAnalyzer):
def __init__(self, *args, registry=yank_registry, **kwargs):
super().__init__(*args, registry=registry, **kwargs)
# Abstract methods
@abc.abstractmethod
def analyze_phase(self, *args, **kwargs):
"""
Auto-analysis function for the phase
Function which broadly handles "auto-analysis" for those that do not wish to call all the methods on their own.
Returns a dictionary of analysis objects
"""
raise NotImplementedError()
class YankMultiStateSamplerAnalyzer(mmtools.multistate.MultiStateSamplerAnalyzer, YankPhaseAnalyzer):
def get_standard_state_correction(self):
"""
Compute the standard state correction free energy associated with the Phase.
Returns
-------
standard_state_correction : float
Free energy contribution from the standard_state_correction
"""
if self._computed_observables['standard_state_correction'] is not None:
return self._computed_observables['standard_state_correction']
# Determine if we need to recompute the standard state correction.
compute_ssc = self.unbias_restraint
try:
restraint_force, _, _ = self._get_radially_symmetric_restraint_data()
except (mmtools.forces.NoForceFoundError, TypeError):
compute_ssc = False
if compute_ssc:
thermodynamic_state = self._get_end_thermodynamic_states()[0]
restraint_energy_cutoff, restraint_distance_cutoff = self._get_restraint_cutoffs()
# TODO: Compute average box volume here to feed to max_volume?
ssc = restraint_force.compute_standard_state_correction(
thermodynamic_state, square_well=True, radius_cutoff=restraint_distance_cutoff,
energy_cutoff=restraint_energy_cutoff, max_volume='system')
# Update observable.
self._computed_observables['standard_state_correction'] = ssc
logger.debug('Computed a new standard state correction of {} kT'.format(ssc))
# Reads the SSC from the reporter if compute_ssc is False.
if self._computed_observables['standard_state_correction'] is None:
ssc = self._reporter.read_dict('metadata')['standard_state_correction']
self._computed_observables['standard_state_correction'] = ssc
return self._computed_observables['standard_state_correction']
def analyze_phase(self, show_mixing=False, cutoff=0.05):
"""
Auto-analysis function for the phase
Function which broadly handles "auto-analysis" for those that do not wish to call all the methods on their own.
This variant computes the following:
* Equilibration data (accessible through ``n_equilibration_iterations`` and ``statistical_inefficiency``
properties)
* Optional mixing statistics printout
* Free Energy difference between all states with error
* Enthalpy difference between all states with error (as expectation of the reduced potential)
* Free energy of the Standard State Correction for the phase.
Parameters
----------
show_mixing : bool, optional. Default: False
Toggle to show mixing statistics or not. This can be a slow step so is disabled for speed by default.
cutoff : float, optional. Default: 0.05
Threshold below which % of mixing mixing from one state to another is not shown.
Makes the output table more clean to read (rendered as whitespace)
Returns
-------
data : dict
A dictionary of analysis objects, in this case Delta F, Delta H, and Standard State Free Energy
In units of kT (dimensionless)
"""
number_equilibrated, g_t, n_effective_max = self._equilibration_data
if show_mixing:
self.show_mixing_statistics(cutoff=cutoff, number_equilibrated=number_equilibrated)
data = {}
# Accumulate free energy differences
Deltaf_ij, dDeltaf_ij = self.get_free_energy()
DeltaH_ij, dDeltaH_ij = self.get_enthalpy()
data['free_energy_diff'] = Deltaf_ij[self.reference_states[0], self.reference_states[1]]
data['free_energy_diff_error'] = dDeltaf_ij[self.reference_states[0], self.reference_states[1]]
data['enthalpy_diff'] = DeltaH_ij[self.reference_states[0], self.reference_states[1]]
data['enthalpy_diff_error'] = dDeltaH_ij[self.reference_states[0], self.reference_states[1]]
data['free_energy_diff_standard_state_correction'] = self.get_standard_state_correction()
return data
class YankReplicaExchangeAnalyzer(mmtools.multistate.ReplicaExchangeAnalyzer, YankMultiStateSamplerAnalyzer):
pass
class YankParallelTemperingAnalyzer(mmtools.multistate.ParallelTemperingAnalyzer, YankMultiStateSamplerAnalyzer):
pass
def _copyout(wrap):
"""Copy output of function. Small helper to avoid ending each function with the copy call"""
@wraps(wrap)
def make_copy(*args, **kwargs):
return copy.deepcopy(wrap(*args, **kwargs))
return make_copy
class ExperimentAnalyzer(object):
"""
Semi-automated YANK Experiment analysis with serializable data.
This class is designed to replace the older ``analyze_directory`` functions by providing a common analysis data
interface which other classes and methods can draw on. This is designed to semi-automate the combination of
multi-phase data
Each of the main methods fetches the data from each phase and returns them as a dictionary to the user. The total
dump of data to serialized YAML files can also be done.
Each function documents what its output data structure and entries surrounded by curly braces (``{ }``) indicate
variables which change per experiment, often the data.
Output dictionary is of the form:
.. code-block:: python
yank_version: {YANK Version}
phase_names: {Name of each phase, depends on simulation type}
general: {See :func:`get_general_simulation_data`}
equilibration: {See :func:`get_equilibration_data`}
mixing: {See :func:`get_mixing_data`}
free_energy: {See :func:`get_experiment_free_energy_data`}
Parameters
----------
store_directory : string
Location where the analysis.yaml file is and where the NetCDF files are
**analyzer_kwargs
Keyword arguments to pass to the analyzer class. Quantities can be
passed as strings.
Attributes
----------
use_full_trajectory : bool. Analyze with subsampled or complete trajectory
nphases : int. Number of phases detected
phases_names : list of phase names. Used as keys on all attributes below
signs : dict of str. Sign assigned to each phase
analyzers : dict of YankPhaseAnalyzer
iterations : dict of int. Number of maximum iterations in each phase
u_ns : dict of np.ndarray. Timeseries of each phase
nequils : dict of int. Number of equilibrium iterations in each phase
g_ts : dict of int. Subsample rate past nequils in each phase
Neff_maxs : dict of int. Number of effective samples in each phase
Examples
--------
Start with an experiment (Running from the :class:`yank.experiment.ExperimentBuilder` example)
>>> import textwrap
>>> import openmmtools as mmtools
>>> import yank.utils
>>> import yank.experiment.ExperimentBuilder as ExperimentBuilder
>>> setup_dir = yank.utils.get_data_filename(os.path.join('..', 'examples',
... 'p-xylene-implicit', 'input'))
>>> pxylene_path = os.path.join(setup_dir, 'p-xylene.mol2')
>>> lysozyme_path = os.path.join(setup_dir, '181L-pdbfixer.pdb')
>>> with mmtools.utils.temporary_directory() as tmp_dir:
... yaml_content = '''
... ---
... options:
... default_number_of_iterations: 1
... output_dir: {}
... molecules:
... T4lysozyme:
... filepath: {}
... p-xylene:
... filepath: {}
... antechamber:
... charge_method: bcc
... solvents:
... vacuum:
... nonbonded_method: NoCutoff
... systems:
... my_system:
... receptor: T4lysozyme
... ligand: p-xylene
... solvent: vacuum
... leap:
... parameters: [leaprc.gaff, leaprc.ff14SB]
... protocols:
... absolute-binding:
... complex:
... alchemical_path:
... lambda_electrostatics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
... lambda_sterics: [1.0, 0.9, 0.8, 0.6, 0.4, 0.2, 0.0]
... solvent:
... alchemical_path:
... lambda_electrostatics: [1.0, 0.8, 0.6, 0.3, 0.0]
... lambda_sterics: [1.0, 0.8, 0.6, 0.3, 0.0]
... experiments:
... system: my_system
... protocol: absolute-binding
... '''.format(tmp_dir, lysozyme_path, pxylene_path)
>>> yaml_builder = ExperimentBuilder(textwrap.dedent(yaml_content))
>>> yaml_builder.run_experiments()
Now analyze the experiment
>>> import os
>>> exp_analyzer = ExperimentAnalyzer(os.path.join(tmp_dir, 'experiment'))
>>> analysis_data = exp_analyzer.auto_analyze()
"""
def __init__(self, store_directory, **analyzer_kwargs):
# Convert analyzer string quantities into variables.
for key, value in analyzer_kwargs.items():
try:
quantity = utils.quantity_from_string(value)
except:
pass
else:
analyzer_kwargs[key] = quantity
# Read in data
analysis_script_path = os.path.join(store_directory, 'analysis.yaml')
if not os.path.isfile(analysis_script_path):
err_msg = 'Cannot find analysis.yaml script in {}'.format(store_directory)
raise RuntimeError(err_msg)
with open(analysis_script_path, 'r') as f:
analysis = yaml.load(f)
phases_names = []
signs = {}
analyzers = {}
for phase, sign in analysis:
phases_names.append(phase)
signs[phase] = sign
storage_path = os.path.join(store_directory, phase + '.nc')
analyzers[phase] = get_analyzer(storage_path, **analyzer_kwargs)
self.phase_names = phases_names
self.signs = signs
self.analyzers = analyzers
self.nphases = len(phases_names)
# Additional data
self.use_full_trajectory = False
if 'use_full_trajectory' in analyzer_kwargs:
self.use_full_trajectory = bool(analyzer_kwargs['use_full_trajectory'])
# Assign flags for other sections along with their global variables
# General Data
self._general_run = False
self.iterations = {}
# Equilibration
self._equilibration_run = False
self.u_ns = {}
self.nequils = {}
self.g_ts = {}
self.Neff_maxs = {}
self._n_discarded = 0
# Mixing Run (state)
self._mixing_run = False
# Replica mixing
self._replica_mixing_run = False
self._free_energy_run = False
self._serialized_data = {'yank_version': version.version, 'phase_names': self.phase_names}
def __del__(self):
# Explicitly close storage
for phase, analyzer in self.analyzers.items():
if analyzer is not None:
del analyzer
@_copyout
def get_general_simulation_data(self):
"""
General purpose simulation data on number of iterations, number of states, and number of atoms.
This just prints out this data in a regular, formatted pattern.
Output is of the form:
.. code-block:: python
{for phase_name in phase_names}
iterations : {int}
natoms : {int}
nreplicas : {int}
nstates : {int}
Returns
-------
general_data : dict
General simulation data by phase.
"""
if not self._general_run:
general_serial = {}
for phase_name in self.phase_names:
serial = {}
analyzer = self.analyzers[phase_name]
try:
positions = analyzer.reporter.read_sampler_states(0)[0].positions
natoms, _ = positions.shape
except AttributeError: # Trap unloaded checkpoint file
natoms = 'No Cpt.'
energies, _, _, = analyzer.reporter.read_energies()
iterations, nreplicas, nstates = energies.shape
serial['iterations'] = iterations
serial['nstates'] = nstates
serial['natoms'] = natoms
serial['nreplicas'] = nreplicas
general_serial[phase_name] = serial
self.iterations = {phase_name: general_serial[phase_name]['iterations'] for phase_name in self.phase_names}
self._serialized_data['general'] = general_serial
self._general_run = True
return self._serialized_data['general']
@_copyout
def get_equilibration_data(self, discard_from_start=1):
"""
Create the equilibration scatter plots showing the trend lines, correlation time,
and number of effective samples
Output is of the form:
.. code-block:: python
{for phase_name in phase_names}
discarded_from_start : {int}
effective_samples : {float}
subsample_rate : {float}
iterations_considered : {1D np.ndarray of int}
subsample_rate_by_iterations_considered : {1D np.ndarray of float}
effective_samples_by_iterations_considered : {1D np.ndarray of float}
count_total_equilibration_samples : {int}
count_decorrelated_samples : {int}
count_correlated_samples : {int}
percent_total_equilibration_samples : {float}
percent_decorrelated_samples : {float}
percent_correlated_samples : {float}
Returns
-------
equilibration_data : dict
Dictionary with the equilibration data
"""
if not self._equilibration_run or discard_from_start != self._n_discarded:
eq_serial = {}
for i, phase_name in enumerate(self.phase_names):
serial = {}
analyzer = self.analyzers[phase_name]
# Data crunching to get timeseries
# TODO: Figure out how not to discard the first sample
# Sample at index 0 is actually the minimized structure and NOT from the equilibrium distribution
# This throws off all of the equilibrium data
t0 = discard_from_start
self._n_discarded = t0
series = analyzer.get_effective_energy_timeseries()
# Update discard_from_start to match t0 if present
try:
iteration = len(series)
data = analyzer.reporter.read_online_analysis_data(None, 't0')
t0 = max(t0, int(data['t0'][0]))
logger.debug('t0 found; using initial t0 = {} instead of 1'.format(t0))
self._n_discarded = t0
except Exception as e:
# No t0 found
logger.debug('Could not find t0: {}'.format(e))
pass
if series.size <= t0:
# Trap case where user has dropped their whole set.
# Rare, but happens, often with debugging
t0 = 0
logger.warning("Alert: analyzed timeseries has the same or fewer number of values as "
"discard_from_start! The whole series has been preserved to ensure there is "
"*something* to analyze.")
self._n_discarded = 0
self.u_ns[phase_name] = analyzer.get_effective_energy_timeseries()[1:]
# Timeseries statistics
i_t, g_i, n_effective_i = mmtools.multistate.get_equilibration_data_per_sample(self.u_ns[phase_name])
n_effective_max = n_effective_i.max()
i_max = n_effective_i.argmax()
n_equilibration = i_t[i_max] + t0
g_t = g_i[i_max]
self.Neff_maxs[phase_name] = n_effective_max
self.nequils[phase_name] = n_equilibration
self.g_ts[phase_name] = g_t
serial['discarded_from_start'] = int(t0)
serial['effective_samples'] = float(self.Neff_maxs[phase_name])
serial['equilibration_samples'] = int(self.nequils[phase_name])
serial['subsample_rate'] = float(self.g_ts[phase_name])
serial['iterations_considered'] = i_t
serial['subsample_rate_by_iterations_considered'] = g_i
serial['effective_samples_by_iterations_considered'] = n_effective_i
# Determine total number of iterations
n_iter = self.iterations[phase_name]
eq = self.nequils[phase_name] + self._n_discarded # Make sure we include the discarded
decor = int(np.floor(self.Neff_maxs[phase_name]))
cor = n_iter - eq - decor
dat = np.array([decor, cor, eq]) / float(n_iter)
serial['count_total_equilibration_samples'] = int(eq)
serial['count_decorrelated_samples'] = int(decor)
serial['count_correlated_samples'] = int(cor)
serial['percent_total_equilibration_samples'] = float(dat[2])
serial['percent_decorrelated_samples'] = float(dat[0])
serial['percent_correlated_samples'] = float(dat[1])
eq_serial[phase_name] = serial
self._serialized_data['equilibration'] = eq_serial
# Set flag
self._equilibration_run = True
return self._serialized_data['equilibration']
@_copyout
def get_mixing_data(self):
"""
Get state diffusion mixing arrays
Output is of the form:
.. code-block:: python
{for phase_name in phase_names}
transitions : {[nstates, nstates] np.ndarray of float}
eigenvalues : {[nstates] np.ndarray of float}
stat_inefficiency : {float}
Returns
-------
mixing_data : dict
Dictionary of mixing data
"""
if not self._mixing_run:
mixing_serial = {}
# Plot a diffusing mixing map for each phase.
for phase_name in self.phase_names:
serial = {}
# Generate mixing statistics.
analyzer = self.analyzers[phase_name]
mixing_statistics = analyzer.generate_mixing_statistics(
number_equilibrated=self.nequils[phase_name])
transition_matrix, eigenvalues, statistical_inefficiency = mixing_statistics
serial['transitions'] = transition_matrix
serial['eigenvalues'] = eigenvalues
serial['stat_inefficiency'] = statistical_inefficiency
mixing_serial[phase_name] = serial
self._serialized_data['mixing'] = mixing_serial
self._mixing_run = True
return self._serialized_data['mixing']
@_copyout
def get_experiment_free_energy_data(self):
"""
Get the free Yank Experiment free energy, broken down by phase and total experiment
Output is of the form:
.. code-block:: python
{for phase_name in phase_names}
sign : {str of either '+' or '-'}
kT : {units.quantity}
free_energy_diff : {float (has units of kT)}
free_energy_diff_error : {float (has units of kT)}
free_energy_diff_standard_state_correction : {float (has units of kT)}
enthalpy_diff : {float (has units of kT)}
enthalpy_diff_error : {float (has units of kT)}
free_energy_diff : {float (has units of kT)}
free_energy_diff_error : {float (has units of kT)}
free_energy_diff_unit : {units.quantity compatible with energy/mole. Corrected for different phase kT}
free_energy_diff_error_unit : {units.quantity compatible with energy/mole. Corrected for different phase kT}
enthalpy_diff : {float (has units of kT)}
enthalpy_diff_error : {float (has units of kT)}
enthalpy_diff_unit : {units.quantity compatible with energy/mole. Corrected for different phase kT}
enthalpy_diff_error_unit : {units.quantity compatible with energy/mole. Corrected for different phase kT}
Returns
-------
free_energy_data : dict
Dictionary of free energy data
"""
# TODO: Possibly rename this function to not confuse with "experimental" free energy?
if not self._free_energy_run:
if not self._equilibration_run:
raise RuntimeError("Cannot run free energy without first running the equilibration. Please run the "
"corresponding function/cell first!")
fe_serial = dict()
data = dict()
for phase_name in self.phase_names:
analyzer = self.analyzers[phase_name]
data[phase_name] = analyzer.analyze_phase()
# Compute free energy and enthalpy
delta_f = 0.0
delta_f_err = 0.0
delta_h = 0.0
delta_h_err = 0.0
# Not assigning units to be more general to whatever kt is later
delta_f_unit = 0.0
delta_f_err_unit = 0.0
delta_h_unit = 0.0
delta_h_err_unit = 0.0
for phase_name in self.phase_names:
serial = {}
kt = self.analyzers[phase_name].kT
serial['kT'] = kt
if not isinstance(delta_f_unit, units.Quantity):
# Assign units to the float if not assigned
held_unit = kt.unit
delta_f_unit *= held_unit
delta_f_err_unit *= held_unit ** 2 # Errors are held in square until the end
delta_h_unit *= held_unit
delta_h_err_unit *= held_unit ** 2
serial['kT'] = kt
sign = self.signs[phase_name]
serial['sign'] = sign
phase_delta_f = data[phase_name]['free_energy_diff']
phase_delta_f_ssc = data[phase_name]['free_energy_diff_standard_state_correction']
phase_delta_f_err = data[phase_name]['free_energy_diff_error']
serial['free_energy_diff'] = phase_delta_f
serial['free_energy_diff_error'] = phase_delta_f_err
serial['free_energy_diff_standard_state_correction'] = data[phase_name][
'free_energy_diff_standard_state_correction'
]
phase_exp_delta_f = sign * (phase_delta_f + phase_delta_f_ssc)
delta_f -= phase_exp_delta_f
delta_f_err += phase_delta_f_err**2
delta_f_unit -= phase_exp_delta_f * kt
delta_f_err_unit += (phase_delta_f_err * kt)**2
phase_delta_h = data[phase_name]['enthalpy_diff']
phase_delta_h_err = data[phase_name]['enthalpy_diff_error']
serial['enthalpy_diff'] = phase_delta_h
serial['enthalpy_diff_error'] = np.sqrt(phase_delta_h_err)
phase_exp_delta_h = sign * (phase_delta_h + phase_delta_f_ssc)
delta_h -= phase_exp_delta_h
delta_h_err += phase_delta_h_err ** 2
delta_h_unit -= phase_exp_delta_h * kt
delta_h_err_unit += (phase_delta_h_err * kt) ** 2
fe_serial[phase_name] = serial
delta_f_err = np.sqrt(delta_f_err) # np.sqrt is fine here since these dont have units
delta_h_err = np.sqrt(delta_h_err)
# Use ** 0.5 instead of np.sqrt since the function strips units, see github issue pandegroup/openmm#2106
delta_f_err_unit = delta_f_err_unit ** 0.5
delta_h_err_unit = delta_h_err_unit ** 0.5
fe_serial['free_energy_diff'] = delta_f
fe_serial['enthalpy_diff'] = delta_h
fe_serial['free_energy_diff_error'] = delta_f_err
fe_serial['enthalpy_diff_error'] = delta_h_err
fe_serial['free_energy_diff_unit'] = delta_f_unit
fe_serial['enthalpy_diff_unit'] = delta_h_unit
fe_serial['free_energy_diff_error_unit'] = delta_f_err_unit
fe_serial['enthalpy_diff_error_unit'] = delta_h_err_unit
self._serialized_data['free_energy'] = fe_serial
self._free_energy_run = True
return self._serialized_data['free_energy']
@_copyout
def auto_analyze(self):
"""
Run the analysis
Output is of the form:
.. code-block:: python
yank_version: {YANK Version}
phase_names: {Name of each phase, depends on simulation type}
general:
{for phase_name in phase_names}
iterations : {int}
natoms : {int}
nreplicas : {int}
nstates : {int}
equilibration:
{for phase_name in phase_names}
discarded_from_start : {int}
effective_samples : {float}
subsample_rate : {float}
iterations_considered : {1D np.ndarray of int}
subsample_rate_by_iterations_considered : {1D np.ndarray of float}
effective_samples_by_iterations_considered : {1D np.ndarray of float}
count_total_equilibration_samples : {int}
count_decorrelated_samples : {int}
count_correlated_samples : {int}
percent_total_equilibration_samples : {float}
percent_decorrelated_samples : {float}
percent_correlated_samples : {float}
mixing:
{for phase_name in phase_names}
transitions : {[nstates, nstates] np.ndarray of float}
eigenvalues : {[nstates] np.ndarray of float}
stat_inefficiency : {float}
free_energy:
{for phase_name in phase_names}
sign : {str of either '+' or '-'}
kT : {units.quantity}
free_energy_diff : {float (has units of kT)}
free_energy_diff_error : {float (has units of kT)}
free_energy_diff_standard_state_correction : {float (has units of kT)}
enthalpy_diff : {float (has units of kT)}
enthalpy_diff_error : {float (has units of kT)}
free_energy_diff : {float (has units of kT)}
free_energy_diff_error : {float (has units of kT)}
free_energy_diff_unit : {units.quantity compatible with energy/mole. Corrected for different phase kT}
free_energy_diff_error_unit : {units.quantity compatible with energy/mole. Corrected for different phase kT}
enthalpy_diff : {float (has units of kT)}
enthalpy_diff_error : {float (has units of kT)}
enthalpy_diff_unit : {units.quantity compatible with energy/mole. Corrected for different phase kT}
enthalpy_diff_error_unit : {units.quantity compatible with energy/mole. Corrected for different phase kT}
Returns
-------
serialized_data : dict
Dictionary of all the auto-analysis calls organized by section headers.
See each of the functions to see each of the sub-dictionary structures
See Also
--------
get_general_simulation_data
get_equilibration_data
get_mixing_data
get_experiment_free_energy_data
"""
_ = self.get_general_simulation_data()
_ = self.get_equilibration_data()
_ = self.get_mixing_data()
_ = self.get_experiment_free_energy_data()
return self._serialized_data
def dump_serial_data(self, path):
"""
Dump the serialized data to YAML file
Parameters
----------
path : str
File name to dump the data to
"""
true_path, ext = os.path.splitext(path)
if not ext: # empty string check
ext = '.yaml'
true_path += ext
with open(true_path, 'w') as f:
f.write(yaml.dump(self._serialized_data))
@staticmethod
def report_version():
print("Rendered with YANK Version {}".format(version.version))
# =============================================================================================
# MODULE FUNCTIONS
# =============================================================================================
def get_analyzer(file_base_path, **analyzer_kwargs):
"""
Utility function to convert storage file to a Reporter and Analyzer by reading the data on file
For now this is mostly placeholder functions since there is only the implemented :class:`ReplicaExchangeAnalyzer`,
but creates the API for the user to work with.
Parameters
----------
file_base_path : string
Complete path to the storage file with filename and extension.
**analyzer_kwargs
Keyword arguments to pass to the analyzer.
Returns
-------
analyzer : instance of implemented :class:`Yank*Analyzer`
Analyzer for the specific phase.
"""
# Eventually extend this to get more reporters, but for now simple placeholder
reporter = mmtools.multistate.MultiStateReporter(file_base_path, open_mode='r')
# Eventually change this to auto-detect simulation from reporter:
if True:
analyzer = YankReplicaExchangeAnalyzer(reporter, **analyzer_kwargs)
else:
raise RuntimeError("Cannot automatically determine analyzer for Reporter: {}".format(reporter))
return analyzer
def analyze_directory(source_directory, **analyzer_kwargs):
"""
Analyze contents of store files to compute free energy differences.
This function is needed to preserve the old auto-analysis style of YANK. What it exactly does can be refined when
more analyzers and simulations are made available. For now this function exposes the API.
Parameters
----------
source_directory : string
The location of the simulation storage files.
**analyzer_kwargs
Keyword arguments to pass to the analyzer.
Returns
-------
analysis_data : dict
Dictionary containing all the automatic analysis data
"""
auto_experiment_analyzer = ExperimentAnalyzer(source_directory, **analyzer_kwargs)
analysis_data = auto_experiment_analyzer.auto_analyze()
print_analysis_data(analysis_data)
# Clean up analyzer, forcing NetCDF file to close.
del auto_experiment_analyzer
return analysis_data
@mpiplus.on_single_node(0)
def print_analysis_data(analysis_data, header=None):
"""
Helper function of printing the analysis data payload from a :func:`ExperimentAnalyzer.auto_analyze` call
Parameters
----------
analysis_data : dict
Output from :func:`ExperimentAnalyzer.auto_analyze`
header : str, Optional
Optional header string to print before the formatted helper, useful if you plan to make this call multiple
times but want to divide the outputs.
"""
if header is not None:
print(header)
try:
fe_data = analysis_data['free_energy']
delta_f = fe_data['free_energy_diff']
delta_h = fe_data['enthalpy_diff']
delta_f_err = fe_data['free_energy_diff_error']
delta_h_err = fe_data['enthalpy_diff_error']
delta_f_unit = fe_data['free_energy_diff_unit']
delta_h_unit = fe_data['enthalpy_diff_unit']
delta_f_err_unit = fe_data['free_energy_diff_error_unit']
delta_h_err_unit = fe_data['enthalpy_diff_error_unit']
except (KeyError, TypeError):
# Trap error in formatted data
print("Error in reading analysis data! It may be the analysis threw an error, please see below for what "
"was received instead:\n\n{}\n\n".format(analysis_data))
return
# Attempt to guess type of calculation
calculation_type = ''
for phase in analysis_data['phase_names']:
if 'complex' in phase:
calculation_type = ' of binding'
elif 'solvent1' in phase:
calculation_type = ' of solvation'
print('Free energy{:<13}: {:9.3f} +- {:.3f} kT ({:.3f} +- {:.3f} kcal/mol)'.format(
calculation_type, delta_f, delta_f_err, delta_f_unit / units.kilocalories_per_mole,
delta_f_err_unit / units.kilocalories_per_mole))
for phase in analysis_data['phase_names']:
delta_f_phase = fe_data[phase]['free_energy_diff']
delta_f_err_phase = fe_data[phase]['free_energy_diff_error']
detla_f_ssc_phase = fe_data[phase]['free_energy_diff_standard_state_correction']
print('DeltaG {:<17}: {:9.3f} +- {:.3f} kT'.format(phase, delta_f_phase,
delta_f_err_phase))
if detla_f_ssc_phase != 0.0:
print('DeltaG {:<17}: {:18.3f} kT'.format('standard state correction', detla_f_ssc_phase))
print('')
print('Enthalpy{:<16}: {:9.3f} +- {:.3f} kT ({:.3f} +- {:.3f} kcal/mol)'.format(
calculation_type, delta_h, delta_h_err, delta_h_unit / units.kilocalories_per_mole,
delta_h_err_unit / units.kilocalories_per_mole))
class MultiExperimentAnalyzer(object):
"""
Automatic Analysis tool for Multiple YANK Experiments from YAML file
This class takes in a YAML file, infers the experiments from expansion of all combinatorial options,
then does the automatic analysis run from :func:`ExperimentAnalyzer.auto_analyze` yielding a final
dictionary output.
Parameters
----------
script : str or dict
Full path to the YAML file which made the YANK experiments.
OR
The loaded yaml content of said script.
builder_kwargs
Additional keyword arguments which normally are passed to the :class:`ExperimentBuilder` constructor.
The experiments are not setup or built, only the output structure is referenced.
See Also
--------
ExperimentAnalyzer.auto_analyze
"""
def __init__(self, script, **builder_kwargs):
self.script = script
self.builder = ExperimentBuilder(script=script, **builder_kwargs)
self.paths = self.builder.get_experiment_directories()
def run_all_analysis(self, serialize_data=True, serial_data_path=None, **analyzer_kwargs):
"""
Run all the automatic analysis through the :func:`ExperimentAnalyzer.auto_analyze`
Parameters
----------
serialize_data : bool, Default: True
Choose whether or not to serialize the data
serial_data_path: str, Optional
Name of the serial data file. If not specified, name will be {YAML file name}_analysis.pkl`
analyzer_kwargs
Additional keywords which will be fed into the :class:`YankMultiStateSamplerAnalyzer` for each phase of
each experiment.
Returns
-------
serial_output : dict
Dictionary of each experiment's output of format
{exp_name: ExperimentAnalyzer.auto_analyze() for exp_name in ExperimentBuilder's Experiments}
The sub-dictionary of each key can be seen in :func:`ExperimentAnalyzer.auto_analyze()` docstring
See Also
--------
ExperimentAnalyzer.auto_analyze
"""
if serial_data_path is None:
serial_ending = 'analysis.pkl'
try:
if not os.path.isfile(self.script):
# Not a file, string like input
raise TypeError
script_base, _ = os.path.splitext(self.script)
serial_data_path = script_base + '_' + serial_ending
except TypeError:
# Traps both YAML content as string, and YAML content as dict
serial_data_path = os.path.join('.', serial_ending)
analysis_serials = mpiplus.distribute(self._run_analysis,
self.paths,
**analyzer_kwargs)
output = {}
for path, analysis in zip(self.paths, analysis_serials):
name = os.path.split(path)[-1] # Get the name of the experiment
# Corner case where user has specified a singular experiment and name is just an empty directory
# Impossible with any type of combinatorial action
if name == '':
name = 'experiment'
# Check to ensure the output is stable
output[name] = analysis if not isinstance(analysis, Exception) else str(analysis)
if serialize_data:
self._serialize(serial_data_path, output)
return output
@staticmethod
def _serialize(serial_path, payload):
"""
Helper function to serialize data which can be subclassed
Parameters
----------
serial_path : str
Path of serial file
payload : object
Object to serialize (pickle)
"""
with open(serial_path, 'wb') as f:
pickle.dump(payload, f)
def _run_analysis(self, path, **analyzer_kwargs):
"""
Helper function to allow parallel through MPI analysis of the experiments
Parameters
----------
path : str
Location of YANK experiment output data.
analyzer_kwargs
Additional keywords which will be fed into the :class:`YankMultiStateSamplerAnalyzer` for each phase of
each experiment.
Returns
-------
payload : dict or Exception
Results from automatic analysis output or the exception that was thrown.
Having the exception trapped but thrown later allows for one experiment to fail but not stop the others
from analyzing.
"""
try:
# Allow the _run_specific_analysis to be subclassable without requiring the error trap to be re-written.
return self._run_specific_analysis(path, **analyzer_kwargs)
except Exception as e:
# Trap any error in a non-crashing way
return e
@staticmethod
def _run_specific_analysis(path, **analyzer_kwargs):
""" Helper function to run an individual auto analysis which can be subclassed"""
return ExperimentAnalyzer(path, **analyzer_kwargs).auto_analyze()
# ==========================================
# HELPER FUNCTIONS FOR TRAJECTORY EXTRACTION
# ==========================================
def extract_u_n(ncfile):
"""
Extract timeseries of u_n = - log q(X_n) from store file
where q(X_n) = \pi_{k=1}^K u_{s_{nk}}(x_{nk})
with X_n = [x_{n1}, ..., x_{nK}] is the current collection of replica configurations
s_{nk} is the current state of replica k at iteration n
u_k(x) is the kth reduced potential
TODO: Figure out a way to remove this function
Parameters
----------
ncfile : netCDF4.Dataset
Open NetCDF file to analyze
Returns
-------
u_n : numpy array of numpy.float64
u_n[n] is -log q(X_n)
"""
# Get current dimensions.
niterations = ncfile.variables['energies'].shape[0]
nstates = ncfile.variables['energies'].shape[1]
natoms = ncfile.variables['energies'].shape[2]
# Extract energies.
logger.info("Reading energies...")
energies = ncfile.variables['energies']
u_kln_replica = np.zeros([nstates, nstates, niterations], np.float64)
for n in range(niterations):
u_kln_replica[:, :, n] = energies[n, :, :]
logger.info("Done.")
# Deconvolute replicas
logger.info("Deconvoluting replicas...")
u_kln = np.zeros([nstates, nstates, niterations], np.float64)
for iteration in range(niterations):
state_indices = ncfile.variables['states'][iteration, :]
u_kln[state_indices, :, iteration] = energies[iteration, :, :]
logger.info("Done.")
# Compute total negative log probability over all iterations.
u_n = np.zeros([niterations], np.float64)
for iteration in range(niterations):
u_n[iteration] = np.sum(np.diagonal(u_kln[:, :, iteration]))
return u_n
# ==============================================================================
# Extract trajectory from NetCDF4 file
# ==============================================================================
def extract_trajectory(nc_path, nc_checkpoint_file=None, state_index=None, replica_index=None,
start_frame=0, end_frame=-1, skip_frame=1, keep_solvent=True,
discard_equilibration=False, image_molecules=False):
"""Extract phase trajectory from the NetCDF4 file.
Parameters
----------
nc_path : str
Path to the primary nc_file storing the analysis options
nc_checkpoint_file : str or None, Optional
File name of the checkpoint file housing the main trajectory
Used if the checkpoint file is differently named from the default one chosen by the nc_path file.
Default: None
state_index : int, optional
The index of the alchemical state for which to extract the trajectory.
One and only one between state_index and replica_index must be not None
(default is None).
replica_index : int, optional
The index of the replica for which to extract the trajectory. One and
only one between state_index and replica_index must be not None (default
is None).
start_frame : int, optional
Index of the first frame to include in the trajectory (default is 0).
end_frame : int, optional
Index of the last frame to include in the trajectory. If negative, will
count from the end (default is -1).
skip_frame : int, optional
Extract one frame every skip_frame (default is 1).
keep_solvent : bool, optional
If False, solvent molecules are ignored (default is True).
discard_equilibration : bool, optional
If True, initial equilibration frames are discarded (see the method
pymbar.timeseries.detectEquilibration() for details, default is False).
Returns
-------
trajectory: mdtraj.Trajectory
The trajectory extracted from the netcdf file.
"""
# Check correct input
if (state_index is None) == (replica_index is None):
raise ValueError('One and only one between "state_index" and '
'"replica_index" must be specified.')
if not os.path.isfile(nc_path):
raise ValueError('Cannot find file {}'.format(nc_path))
# Import simulation data
reporter = None
try:
reporter = mmtools.multistate.MultiStateReporter(nc_path, open_mode='r', checkpoint_storage=nc_checkpoint_file)
metadata = reporter.read_dict('metadata')
reference_system = mmtools.utils.deserialize(metadata['reference_state']).system
topography = mmtools.utils.deserialize(metadata['topography'])
topology = topography.topology
# Determine if system is periodic
is_periodic = reference_system.usesPeriodicBoundaryConditions()
logger.info('Detected periodic boundary conditions: {}'.format(is_periodic))
# Get dimensions
# Assume full iteration until proven otherwise
last_checkpoint = True
trajectory_storage = reporter._storage_checkpoint
if not keep_solvent:
# If tracked solute particles, use any last iteration, set with this logic test
full_iteration = len(reporter.analysis_particle_indices) == 0
if not full_iteration:
trajectory_storage = reporter._storage_analysis
topology = topology.subset(reporter.analysis_particle_indices)
n_iterations = reporter.read_last_iteration(last_checkpoint=last_checkpoint)
n_frames = trajectory_storage.variables['positions'].shape[0]
n_atoms = trajectory_storage.variables['positions'].shape[2]
logger.info('Number of frames: {}, atoms: {}'.format(n_frames, n_atoms))
# Determine frames to extract.
# Convert negative indices to last indices.
if start_frame < 0:
start_frame = n_frames + start_frame
if end_frame < 0:
end_frame = n_frames + end_frame + 1
frame_indices = range(start_frame, end_frame, skip_frame)
if len(frame_indices) == 0:
raise ValueError('No frames selected')
logger.info('Extracting frames from {} to {} every {}'.format(
start_frame, end_frame, skip_frame))
# Discard equilibration samples
if discard_equilibration:
u_n = extract_u_n(reporter._storage_analysis)
# Discard frame 0 with minimized energy which throws off automatic equilibration detection.
n_equil_iterations, g, n_eff = timeseries.detectEquilibration(u_n[1:])
n_equil_iterations += 1
logger.info(("Discarding initial {} equilibration samples (leaving {} "
"effectively uncorrelated samples)...").format(n_equil_iterations, n_eff))
# Find first frame post-equilibration.
if not full_iteration:
for iteration in range(n_equil_iterations, n_iterations):
n_equil_frames = reporter._calculate_checkpoint_iteration(iteration)
if n_equil_frames is not None:
break
else:
n_equil_frames = n_equil_iterations
frame_indices = frame_indices[n_equil_frames:-1]
# Determine the number of frames that the trajectory will have.
if state_index is None:
n_trajectory_frames = len(frame_indices)
else:
# With SAMS, an iteration can have 0 or more replicas in a given state.
# Deconvolute state indices.
state_indices = [None for _ in frame_indices]
for i, iteration in enumerate(frame_indices):
replica_indices = reporter._storage_analysis.variables['states'][iteration, :]
state_indices[i] = np.where(replica_indices == state_index)[0]
n_trajectory_frames = sum(len(x) for x in state_indices)
# Initialize positions and box vectors arrays.
# MDTraj Cython code expects float32 positions.
positions = np.zeros((n_trajectory_frames, n_atoms, 3), dtype=np.float32)
if is_periodic:
box_vectors = np.zeros((n_trajectory_frames, 3, 3), dtype=np.float32)
# Extract state positions and box vectors.
if state_index is not None:
logger.info('Extracting positions of state {}...'.format(state_index))
# Extract state positions and box vectors.
frame_idx = 0
for i, iteration in enumerate(frame_indices):
for replica_index in state_indices[i]:
positions[frame_idx, :, :] = trajectory_storage.variables['positions'][iteration, replica_index, :, :].astype(np.float32)
if is_periodic:
box_vectors[frame_idx, :, :] = trajectory_storage.variables['box_vectors'][iteration, replica_index, :, :].astype(np.float32)
frame_idx += 1
else: # Extract replica positions and box vectors
logger.info('Extracting positions of replica {}...'.format(replica_index))
for i, iteration in enumerate(frame_indices):
positions[i, :, :] = trajectory_storage.variables['positions'][iteration, replica_index, :, :].astype(np.float32)
if is_periodic:
box_vectors[i, :, :] = trajectory_storage.variables['box_vectors'][iteration, replica_index, :, :].astype(np.float32)
finally:
if reporter is not None:
reporter.close()
# Create trajectory object
logger.info('Creating trajectory object...')
trajectory = mdtraj.Trajectory(positions, topology)
if is_periodic:
trajectory.unitcell_vectors = box_vectors
# Force periodic boundary conditions to molecules positions
if image_molecules and is_periodic:
logger.info('Applying periodic boundary conditions to molecules positions...')
# Use the receptor as an anchor molecule.
anchor_atom_indices = set(topography.receptor_atoms)
if len(anchor_atom_indices) == 0: # Hydration free energy.
anchor_atom_indices = set(topography.solute_atoms)
anchor_molecules = [{a for a in topology.atoms if a.index in anchor_atom_indices}]
trajectory.image_molecules(inplace=True, anchor_molecules=anchor_molecules)
elif image_molecules:
logger.warning('The molecules will not be imaged because the system is non-periodic.')
return trajectory
| mit | e08ebfe427c742a10b4d5b815b11d2bb | 42.024126 | 149 | 0.588978 | 4.213035 | false | false | false | false |
aleju/imgaug | checks/check_directed_edge_detect.py | 2 | 1111 | from __future__ import print_function, division
from itertools import cycle
import numpy as np
from skimage import data
import cv2
from imgaug import augmenters as iaa
POINT_SIZE = 5
DEG_PER_STEP = 1
TIME_PER_STEP = 10
def main():
image = data.astronaut()
cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
cv2.imshow("aug", image)
cv2.waitKey(TIME_PER_STEP)
height, width = image.shape[0], image.shape[1]
center_x = width // 2
center_y = height // 2
r = int(min(image.shape[0], image.shape[1]) / 3)
for deg in cycle(np.arange(0, 360, DEG_PER_STEP)):
rad = np.deg2rad(deg-90)
point_x = int(center_x + r * np.cos(rad))
point_y = int(center_y + r * np.sin(rad))
direction = deg / 360
aug = iaa.DirectedEdgeDetect(alpha=1.0, direction=direction)
img_aug = aug.augment_image(image)
img_aug[point_y-POINT_SIZE:point_y+POINT_SIZE+1, point_x-POINT_SIZE:point_x+POINT_SIZE+1, :] =\
np.array([0, 255, 0])
cv2.imshow("aug", img_aug)
cv2.waitKey(TIME_PER_STEP)
if __name__ == "__main__":
main()
| mit | a193d846cdb7a942e739f737cc01a339 | 24.837209 | 103 | 0.612961 | 2.900783 | false | false | false | false |
aleju/imgaug | imgaug/external/opensimplex.py | 3 | 79631 | """
This is a copy of the OpenSimplex library,
based on commit d861cb290531ad15825f21dc4cc35c5d4f407259 from 20.07.2017.
"""
# Based on: https://gist.github.com/KdotJPG/b1270127455a94ac5d19
import sys
from ctypes import c_long
from math import floor as _floor
if sys.version_info[0] < 3:
def floor(num):
return int(_floor(num))
else:
floor = _floor
STRETCH_CONSTANT_2D = -0.211324865405187 # (1/Math.sqrt(2+1)-1)/2
SQUISH_CONSTANT_2D = 0.366025403784439 # (Math.sqrt(2+1)-1)/2
STRETCH_CONSTANT_3D = -1.0 / 6 # (1/Math.sqrt(3+1)-1)/3
SQUISH_CONSTANT_3D = 1.0 / 3 # (Math.sqrt(3+1)-1)/3
STRETCH_CONSTANT_4D = -0.138196601125011 # (1/Math.sqrt(4+1)-1)/4
SQUISH_CONSTANT_4D = 0.309016994374947 # (Math.sqrt(4+1)-1)/4
NORM_CONSTANT_2D = 47
NORM_CONSTANT_3D = 103
NORM_CONSTANT_4D = 30
DEFAULT_SEED = 0
# Gradients for 2D. They approximate the directions to the
# vertices of an octagon from the center.
GRADIENTS_2D = (
5, 2, 2, 5,
-5, 2, -2, 5,
5, -2, 2, -5,
-5, -2, -2, -5,
)
# Gradients for 3D. They approximate the directions to the
# vertices of a rhombicuboctahedron from the center, skewed so
# that the triangular and square facets can be inscribed inside
# circles of the same radius.
GRADIENTS_3D = (
-11, 4, 4, -4, 11, 4, -4, 4, 11,
11, 4, 4, 4, 11, 4, 4, 4, 11,
-11, -4, 4, -4, -11, 4, -4, -4, 11,
11, -4, 4, 4, -11, 4, 4, -4, 11,
-11, 4, -4, -4, 11, -4, -4, 4, -11,
11, 4, -4, 4, 11, -4, 4, 4, -11,
-11, -4, -4, -4, -11, -4, -4, -4, -11,
11, -4, -4, 4, -11, -4, 4, -4, -11,
)
# Gradients for 4D. They approximate the directions to the
# vertices of a disprismatotesseractihexadecachoron from the center,
# skewed so that the tetrahedral and cubic facets can be inscribed inside
# spheres of the same radius.
GRADIENTS_4D = (
3, 1, 1, 1, 1, 3, 1, 1, 1, 1, 3, 1, 1, 1, 1, 3,
-3, 1, 1, 1, -1, 3, 1, 1, -1, 1, 3, 1, -1, 1, 1, 3,
3, -1, 1, 1, 1, -3, 1, 1, 1, -1, 3, 1, 1, -1, 1, 3,
-3, -1, 1, 1, -1, -3, 1, 1, -1, -1, 3, 1, -1, -1, 1, 3,
3, 1, -1, 1, 1, 3, -1, 1, 1, 1, -3, 1, 1, 1, -1, 3,
-3, 1, -1, 1, -1, 3, -1, 1, -1, 1, -3, 1, -1, 1, -1, 3,
3, -1, -1, 1, 1, -3, -1, 1, 1, -1, -3, 1, 1, -1, -1, 3,
-3, -1, -1, 1, -1, -3, -1, 1, -1, -1, -3, 1, -1, -1, -1, 3,
3, 1, 1, -1, 1, 3, 1, -1, 1, 1, 3, -1, 1, 1, 1, -3,
-3, 1, 1, -1, -1, 3, 1, -1, -1, 1, 3, -1, -1, 1, 1, -3,
3, -1, 1, -1, 1, -3, 1, -1, 1, -1, 3, -1, 1, -1, 1, -3,
-3, -1, 1, -1, -1, -3, 1, -1, -1, -1, 3, -1, -1, -1, 1, -3,
3, 1, -1, -1, 1, 3, -1, -1, 1, 1, -3, -1, 1, 1, -1, -3,
-3, 1, -1, -1, -1, 3, -1, -1, -1, 1, -3, -1, -1, 1, -1, -3,
3, -1, -1, -1, 1, -3, -1, -1, 1, -1, -3, -1, 1, -1, -1, -3,
-3, -1, -1, -1, -1, -3, -1, -1, -1, -1, -3, -1, -1, -1, -1, -3,
)
def overflow(x):
# Since normal python ints and longs can be quite humongous we have to use
# this hack to make them be able to overflow
return c_long(x).value
class OpenSimplex(object):
"""
OpenSimplex n-dimensional gradient noise functions.
"""
def __init__(self, seed=DEFAULT_SEED):
"""
Initiate the class and generate permutation arrays from a seed number.
"""
# Initializes the class using a permutation array generated from a 64-bit seed.
# Generates a proper permutation (i.e. doesn't merely perform N
# successive pair swaps on a base array)
perm = self._perm = [0] * 256 # Have to zero fill so we can properly loop over it later
perm_grad_index_3D = self._perm_grad_index_3D = [0] * 256
source = [i for i in range(0, 256)]
seed = overflow(seed * 6364136223846793005 + 1442695040888963407)
seed = overflow(seed * 6364136223846793005 + 1442695040888963407)
seed = overflow(seed * 6364136223846793005 + 1442695040888963407)
for i in range(255, -1, -1):
seed = overflow(seed * 6364136223846793005 + 1442695040888963407)
r = int((seed + 31) % (i + 1))
if r < 0:
r += i + 1
perm[i] = source[r]
perm_grad_index_3D[i] = int((perm[i] % (len(GRADIENTS_3D) / 3)) * 3)
source[r] = source[i]
def _extrapolate2d(self, xsb, ysb, dx, dy):
perm = self._perm
index = perm[(perm[xsb & 0xFF] + ysb) & 0xFF] & 0x0E
g1, g2 = GRADIENTS_2D[index:index + 2]
return g1 * dx + g2 * dy
def _extrapolate3d(self, xsb, ysb, zsb, dx, dy, dz):
perm = self._perm
index = self._perm_grad_index_3D[
(perm[(perm[xsb & 0xFF] + ysb) & 0xFF] + zsb) & 0xFF
]
g1, g2, g3 = GRADIENTS_3D[index:index + 3]
return g1 * dx + g2 * dy + g3 * dz
def _extrapolate4d(self, xsb, ysb, zsb, wsb, dx, dy, dz, dw):
perm = self._perm
index = perm[(
perm[(
perm[(perm[xsb & 0xFF] + ysb) & 0xFF] + zsb
) & 0xFF] + wsb
) & 0xFF] & 0xFC
g1, g2, g3, g4 = GRADIENTS_4D[index:index + 4]
return g1 * dx + g2 * dy + g3 * dz + g4 * dw
def noise2d(self, x, y):
"""
Generate 2D OpenSimplex noise from X,Y coordinates.
"""
# Place input coordinates onto grid.
stretch_offset = (x + y) * STRETCH_CONSTANT_2D
xs = x + stretch_offset
ys = y + stretch_offset
# Floor to get grid coordinates of rhombus (stretched square) super-cell origin.
xsb = floor(xs)
ysb = floor(ys)
# Skew out to get actual coordinates of rhombus origin. We'll need these later.
squish_offset = (xsb + ysb) * SQUISH_CONSTANT_2D
xb = xsb + squish_offset
yb = ysb + squish_offset
# Compute grid coordinates relative to rhombus origin.
xins = xs - xsb
yins = ys - ysb
# Sum those together to get a value that determines which region we're in.
in_sum = xins + yins
# Positions relative to origin point.
dx0 = x - xb
dy0 = y - yb
value = 0
# Contribution (1,0)
dx1 = dx0 - 1 - SQUISH_CONSTANT_2D
dy1 = dy0 - 0 - SQUISH_CONSTANT_2D
attn1 = 2 - dx1 * dx1 - dy1 * dy1
extrapolate = self._extrapolate2d
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, dx1, dy1)
# Contribution (0,1)
dx2 = dx0 - 0 - SQUISH_CONSTANT_2D
dy2 = dy0 - 1 - SQUISH_CONSTANT_2D
attn2 = 2 - dx2 * dx2 - dy2 * dy2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, dx2, dy2)
if in_sum <= 1: # We're inside the triangle (2-Simplex) at (0,0)
zins = 1 - in_sum
if zins > xins or zins > yins: # (0,0) is one of the closest two triangular vertices
if xins > yins:
xsv_ext = xsb + 1
ysv_ext = ysb - 1
dx_ext = dx0 - 1
dy_ext = dy0 + 1
else:
xsv_ext = xsb - 1
ysv_ext = ysb + 1
dx_ext = dx0 + 1
dy_ext = dy0 - 1
else: # (1,0) and (0,1) are the closest two vertices.
xsv_ext = xsb + 1
ysv_ext = ysb + 1
dx_ext = dx0 - 1 - 2 * SQUISH_CONSTANT_2D
dy_ext = dy0 - 1 - 2 * SQUISH_CONSTANT_2D
else: # We're inside the triangle (2-Simplex) at (1,1)
zins = 2 - in_sum
if zins < xins or zins < yins: # (0,0) is one of the closest two triangular vertices
if xins > yins:
xsv_ext = xsb + 2
ysv_ext = ysb + 0
dx_ext = dx0 - 2 - 2 * SQUISH_CONSTANT_2D
dy_ext = dy0 + 0 - 2 * SQUISH_CONSTANT_2D
else:
xsv_ext = xsb + 0
ysv_ext = ysb + 2
dx_ext = dx0 + 0 - 2 * SQUISH_CONSTANT_2D
dy_ext = dy0 - 2 - 2 * SQUISH_CONSTANT_2D
else: # (1,0) and (0,1) are the closest two vertices.
dx_ext = dx0
dy_ext = dy0
xsv_ext = xsb
ysv_ext = ysb
xsb += 1
ysb += 1
dx0 = dx0 - 1 - 2 * SQUISH_CONSTANT_2D
dy0 = dy0 - 1 - 2 * SQUISH_CONSTANT_2D
# Contribution (0,0) or (1,1)
attn0 = 2 - dx0 * dx0 - dy0 * dy0
if attn0 > 0:
attn0 *= attn0
value += attn0 * attn0 * extrapolate(xsb, ysb, dx0, dy0)
# Extra Vertex
attn_ext = 2 - dx_ext * dx_ext - dy_ext * dy_ext
if attn_ext > 0:
attn_ext *= attn_ext
value += attn_ext * attn_ext * extrapolate(xsv_ext, ysv_ext, dx_ext, dy_ext)
return value / NORM_CONSTANT_2D
def noise3d(self, x, y, z):
"""
Generate 3D OpenSimplex noise from X,Y,Z coordinates.
"""
# Place input coordinates on simplectic honeycomb.
stretch_offset = (x + y + z) * STRETCH_CONSTANT_3D
xs = x + stretch_offset
ys = y + stretch_offset
zs = z + stretch_offset
# Floor to get simplectic honeycomb coordinates of rhombohedron (stretched cube) super-cell origin.
xsb = floor(xs)
ysb = floor(ys)
zsb = floor(zs)
# Skew out to get actual coordinates of rhombohedron origin. We'll need these later.
squish_offset = (xsb + ysb + zsb) * SQUISH_CONSTANT_3D
xb = xsb + squish_offset
yb = ysb + squish_offset
zb = zsb + squish_offset
# Compute simplectic honeycomb coordinates relative to rhombohedral origin.
xins = xs - xsb
yins = ys - ysb
zins = zs - zsb
# Sum those together to get a value that determines which region we're in.
in_sum = xins + yins + zins
# Positions relative to origin point.
dx0 = x - xb
dy0 = y - yb
dz0 = z - zb
value = 0
extrapolate = self._extrapolate3d
if in_sum <= 1: # We're inside the tetrahedron (3-Simplex) at (0,0,0)
# Determine which two of (0,0,1), (0,1,0), (1,0,0) are closest.
a_point = 0x01
a_score = xins
b_point = 0x02
b_score = yins
if a_score >= b_score and zins > b_score:
b_score = zins
b_point = 0x04
elif a_score < b_score and zins > a_score:
a_score = zins
a_point = 0x04
# Now we determine the two lattice points not part of the tetrahedron that may contribute.
# This depends on the closest two tetrahedral vertices, including (0,0,0)
wins = 1 - in_sum
if wins > a_score or wins > b_score: # (0,0,0) is one of the closest two tetrahedral vertices.
c = b_point if (b_score > a_score) else a_point # Our other closest vertex is the closest out of a and b.
if (c & 0x01) == 0:
xsv_ext0 = xsb - 1
xsv_ext1 = xsb
dx_ext0 = dx0 + 1
dx_ext1 = dx0
else:
xsv_ext0 = xsv_ext1 = xsb + 1
dx_ext0 = dx_ext1 = dx0 - 1
if (c & 0x02) == 0:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0
if (c & 0x01) == 0:
ysv_ext1 -= 1
dy_ext1 += 1
else:
ysv_ext0 -= 1
dy_ext0 += 1
else:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1
if (c & 0x04) == 0:
zsv_ext0 = zsb
zsv_ext1 = zsb - 1
dz_ext0 = dz0
dz_ext1 = dz0 + 1
else:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz_ext1 = dz0 - 1
else: # (0,0,0) is not one of the closest two tetrahedral vertices.
c = (a_point | b_point) # Our two extra vertices are determined by the closest two.
if (c & 0x01) == 0:
xsv_ext0 = xsb
xsv_ext1 = xsb - 1
dx_ext0 = dx0 - 2 * SQUISH_CONSTANT_3D
dx_ext1 = dx0 + 1 - SQUISH_CONSTANT_3D
else:
xsv_ext0 = xsv_ext1 = xsb + 1
dx_ext0 = dx0 - 1 - 2 * SQUISH_CONSTANT_3D
dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_3D
if (c & 0x02) == 0:
ysv_ext0 = ysb
ysv_ext1 = ysb - 1
dy_ext0 = dy0 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 + 1 - SQUISH_CONSTANT_3D
else:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy0 - 1 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_3D
if (c & 0x04) == 0:
zsv_ext0 = zsb
zsv_ext1 = zsb - 1
dz_ext0 = dz0 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 + 1 - SQUISH_CONSTANT_3D
else:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz0 - 1 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_3D
# Contribution (0,0,0)
attn0 = 2 - dx0 * dx0 - dy0 * dy0 - dz0 * dz0
if attn0 > 0:
attn0 *= attn0
value += attn0 * attn0 * extrapolate(xsb + 0, ysb + 0, zsb + 0, dx0, dy0, dz0)
# Contribution (1,0,0)
dx1 = dx0 - 1 - SQUISH_CONSTANT_3D
dy1 = dy0 - 0 - SQUISH_CONSTANT_3D
dz1 = dz0 - 0 - SQUISH_CONSTANT_3D
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, zsb + 0, dx1, dy1, dz1)
# Contribution (0,1,0)
dx2 = dx0 - 0 - SQUISH_CONSTANT_3D
dy2 = dy0 - 1 - SQUISH_CONSTANT_3D
dz2 = dz1
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, zsb + 0, dx2, dy2, dz2)
# Contribution (0,0,1)
dx3 = dx2
dy3 = dy1
dz3 = dz0 - 1 - SQUISH_CONSTANT_3D
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 0, ysb + 0, zsb + 1, dx3, dy3, dz3)
elif in_sum >= 2: # We're inside the tetrahedron (3-Simplex) at (1,1,1)
# Determine which two tetrahedral vertices are the closest, out of (1,1,0), (1,0,1), (0,1,1) but not (1,1,1).
a_point = 0x06
a_score = xins
b_point = 0x05
b_score = yins
if a_score <= b_score and zins < b_score:
b_score = zins
b_point = 0x03
elif a_score > b_score and zins < a_score:
a_score = zins
a_point = 0x03
# Now we determine the two lattice points not part of the tetrahedron that may contribute.
# This depends on the closest two tetrahedral vertices, including (1,1,1)
wins = 3 - in_sum
if wins < a_score or wins < b_score: # (1,1,1) is one of the closest two tetrahedral vertices.
c = b_point if (b_score < a_score) else a_point # Our other closest vertex is the closest out of a and b.
if (c & 0x01) != 0:
xsv_ext0 = xsb + 2
xsv_ext1 = xsb + 1
dx_ext0 = dx0 - 2 - 3 * SQUISH_CONSTANT_3D
dx_ext1 = dx0 - 1 - 3 * SQUISH_CONSTANT_3D
else:
xsv_ext0 = xsv_ext1 = xsb
dx_ext0 = dx_ext1 = dx0 - 3 * SQUISH_CONSTANT_3D
if (c & 0x02) != 0:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1 - 3 * SQUISH_CONSTANT_3D
if (c & 0x01) != 0:
ysv_ext1 += 1
dy_ext1 -= 1
else:
ysv_ext0 += 1
dy_ext0 -= 1
else:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0 - 3 * SQUISH_CONSTANT_3D
if (c & 0x04) != 0:
zsv_ext0 = zsb + 1
zsv_ext1 = zsb + 2
dz_ext0 = dz0 - 1 - 3 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 - 3 * SQUISH_CONSTANT_3D
else:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz_ext1 = dz0 - 3 * SQUISH_CONSTANT_3D
else: # (1,1,1) is not one of the closest two tetrahedral vertices.
c = (a_point & b_point) # Our two extra vertices are determined by the closest two.
if (c & 0x01) != 0:
xsv_ext0 = xsb + 1
xsv_ext1 = xsb + 2
dx_ext0 = dx0 - 1 - SQUISH_CONSTANT_3D
dx_ext1 = dx0 - 2 - 2 * SQUISH_CONSTANT_3D
else:
xsv_ext0 = xsv_ext1 = xsb
dx_ext0 = dx0 - SQUISH_CONSTANT_3D
dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D
if (c & 0x02) != 0:
ysv_ext0 = ysb + 1
ysv_ext1 = ysb + 2
dy_ext0 = dy0 - 1 - SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 - 2 * SQUISH_CONSTANT_3D
else:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy0 - SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D
if (c & 0x04) != 0:
zsv_ext0 = zsb + 1
zsv_ext1 = zsb + 2
dz_ext0 = dz0 - 1 - SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 - 2 * SQUISH_CONSTANT_3D
else:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz0 - SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D
# Contribution (1,1,0)
dx3 = dx0 - 1 - 2 * SQUISH_CONSTANT_3D
dy3 = dy0 - 1 - 2 * SQUISH_CONSTANT_3D
dz3 = dz0 - 0 - 2 * SQUISH_CONSTANT_3D
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 1, ysb + 1, zsb + 0, dx3, dy3, dz3)
# Contribution (1,0,1)
dx2 = dx3
dy2 = dy0 - 0 - 2 * SQUISH_CONSTANT_3D
dz2 = dz0 - 1 - 2 * SQUISH_CONSTANT_3D
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 1, ysb + 0, zsb + 1, dx2, dy2, dz2)
# Contribution (0,1,1)
dx1 = dx0 - 0 - 2 * SQUISH_CONSTANT_3D
dy1 = dy3
dz1 = dz2
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 0, ysb + 1, zsb + 1, dx1, dy1, dz1)
# Contribution (1,1,1)
dx0 = dx0 - 1 - 3 * SQUISH_CONSTANT_3D
dy0 = dy0 - 1 - 3 * SQUISH_CONSTANT_3D
dz0 = dz0 - 1 - 3 * SQUISH_CONSTANT_3D
attn0 = 2 - dx0 * dx0 - dy0 * dy0 - dz0 * dz0
if attn0 > 0:
attn0 *= attn0
value += attn0 * attn0 * extrapolate(xsb + 1, ysb + 1, zsb + 1, dx0, dy0, dz0)
else: # We're inside the octahedron (Rectified 3-Simplex) in between.
# Decide between point (0,0,1) and (1,1,0) as closest
p1 = xins + yins
if p1 > 1:
a_score = p1 - 1
a_point = 0x03
a_is_further_side = True
else:
a_score = 1 - p1
a_point = 0x04
a_is_further_side = False
# Decide between point (0,1,0) and (1,0,1) as closest
p2 = xins + zins
if p2 > 1:
b_score = p2 - 1
b_point = 0x05
b_is_further_side = True
else:
b_score = 1 - p2
b_point = 0x02
b_is_further_side = False
# The closest out of the two (1,0,0) and (0,1,1) will replace the furthest out of the two decided above, if closer.
p3 = yins + zins
if p3 > 1:
score = p3 - 1
if a_score <= b_score and a_score < score:
a_point = 0x06
a_is_further_side = True
elif a_score > b_score and b_score < score:
b_point = 0x06
b_is_further_side = True
else:
score = 1 - p3
if a_score <= b_score and a_score < score:
a_point = 0x01
a_is_further_side = False
elif a_score > b_score and b_score < score:
b_point = 0x01
b_is_further_side = False
# Where each of the two closest points are determines how the extra two vertices are calculated.
if a_is_further_side == b_is_further_side:
if a_is_further_side: # Both closest points on (1,1,1) side
# One of the two extra points is (1,1,1)
dx_ext0 = dx0 - 1 - 3 * SQUISH_CONSTANT_3D
dy_ext0 = dy0 - 1 - 3 * SQUISH_CONSTANT_3D
dz_ext0 = dz0 - 1 - 3 * SQUISH_CONSTANT_3D
xsv_ext0 = xsb + 1
ysv_ext0 = ysb + 1
zsv_ext0 = zsb + 1
# Other extra point is based on the shared axis.
c = (a_point & b_point)
if (c & 0x01) != 0:
dx_ext1 = dx0 - 2 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D
xsv_ext1 = xsb + 2
ysv_ext1 = ysb
zsv_ext1 = zsb
elif (c & 0x02) != 0:
dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D
xsv_ext1 = xsb
ysv_ext1 = ysb + 2
zsv_ext1 = zsb
else:
dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 - 2 * SQUISH_CONSTANT_3D
xsv_ext1 = xsb
ysv_ext1 = ysb
zsv_ext1 = zsb + 2
else:# Both closest points on (0,0,0) side
# One of the two extra points is (0,0,0)
dx_ext0 = dx0
dy_ext0 = dy0
dz_ext0 = dz0
xsv_ext0 = xsb
ysv_ext0 = ysb
zsv_ext0 = zsb
# Other extra point is based on the omitted axis.
c = (a_point | b_point)
if (c & 0x01) == 0:
dx_ext1 = dx0 + 1 - SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_3D
xsv_ext1 = xsb - 1
ysv_ext1 = ysb + 1
zsv_ext1 = zsb + 1
elif (c & 0x02) == 0:
dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_3D
dy_ext1 = dy0 + 1 - SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_3D
xsv_ext1 = xsb + 1
ysv_ext1 = ysb - 1
zsv_ext1 = zsb + 1
else:
dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_3D
dz_ext1 = dz0 + 1 - SQUISH_CONSTANT_3D
xsv_ext1 = xsb + 1
ysv_ext1 = ysb + 1
zsv_ext1 = zsb - 1
else: # One point on (0,0,0) side, one point on (1,1,1) side
if a_is_further_side:
c1 = a_point
c2 = b_point
else:
c1 = b_point
c2 = a_point
# One contribution is a _permutation of (1,1,-1)
if (c1 & 0x01) == 0:
dx_ext0 = dx0 + 1 - SQUISH_CONSTANT_3D
dy_ext0 = dy0 - 1 - SQUISH_CONSTANT_3D
dz_ext0 = dz0 - 1 - SQUISH_CONSTANT_3D
xsv_ext0 = xsb - 1
ysv_ext0 = ysb + 1
zsv_ext0 = zsb + 1
elif (c1 & 0x02) == 0:
dx_ext0 = dx0 - 1 - SQUISH_CONSTANT_3D
dy_ext0 = dy0 + 1 - SQUISH_CONSTANT_3D
dz_ext0 = dz0 - 1 - SQUISH_CONSTANT_3D
xsv_ext0 = xsb + 1
ysv_ext0 = ysb - 1
zsv_ext0 = zsb + 1
else:
dx_ext0 = dx0 - 1 - SQUISH_CONSTANT_3D
dy_ext0 = dy0 - 1 - SQUISH_CONSTANT_3D
dz_ext0 = dz0 + 1 - SQUISH_CONSTANT_3D
xsv_ext0 = xsb + 1
ysv_ext0 = ysb + 1
zsv_ext0 = zsb - 1
# One contribution is a _permutation of (0,0,2)
dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_3D
dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_3D
dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_3D
xsv_ext1 = xsb
ysv_ext1 = ysb
zsv_ext1 = zsb
if (c2 & 0x01) != 0:
dx_ext1 -= 2
xsv_ext1 += 2
elif (c2 & 0x02) != 0:
dy_ext1 -= 2
ysv_ext1 += 2
else:
dz_ext1 -= 2
zsv_ext1 += 2
# Contribution (1,0,0)
dx1 = dx0 - 1 - SQUISH_CONSTANT_3D
dy1 = dy0 - 0 - SQUISH_CONSTANT_3D
dz1 = dz0 - 0 - SQUISH_CONSTANT_3D
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, zsb + 0, dx1, dy1, dz1)
# Contribution (0,1,0)
dx2 = dx0 - 0 - SQUISH_CONSTANT_3D
dy2 = dy0 - 1 - SQUISH_CONSTANT_3D
dz2 = dz1
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, zsb + 0, dx2, dy2, dz2)
# Contribution (0,0,1)
dx3 = dx2
dy3 = dy1
dz3 = dz0 - 1 - SQUISH_CONSTANT_3D
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 0, ysb + 0, zsb + 1, dx3, dy3, dz3)
# Contribution (1,1,0)
dx4 = dx0 - 1 - 2 * SQUISH_CONSTANT_3D
dy4 = dy0 - 1 - 2 * SQUISH_CONSTANT_3D
dz4 = dz0 - 0 - 2 * SQUISH_CONSTANT_3D
attn4 = 2 - dx4 * dx4 - dy4 * dy4 - dz4 * dz4
if attn4 > 0:
attn4 *= attn4
value += attn4 * attn4 * extrapolate(xsb + 1, ysb + 1, zsb + 0, dx4, dy4, dz4)
# Contribution (1,0,1)
dx5 = dx4
dy5 = dy0 - 0 - 2 * SQUISH_CONSTANT_3D
dz5 = dz0 - 1 - 2 * SQUISH_CONSTANT_3D
attn5 = 2 - dx5 * dx5 - dy5 * dy5 - dz5 * dz5
if attn5 > 0:
attn5 *= attn5
value += attn5 * attn5 * extrapolate(xsb + 1, ysb + 0, zsb + 1, dx5, dy5, dz5)
# Contribution (0,1,1)
dx6 = dx0 - 0 - 2 * SQUISH_CONSTANT_3D
dy6 = dy4
dz6 = dz5
attn6 = 2 - dx6 * dx6 - dy6 * dy6 - dz6 * dz6
if attn6 > 0:
attn6 *= attn6
value += attn6 * attn6 * extrapolate(xsb + 0, ysb + 1, zsb + 1, dx6, dy6, dz6)
# First extra vertex
attn_ext0 = 2 - dx_ext0 * dx_ext0 - dy_ext0 * dy_ext0 - dz_ext0 * dz_ext0
if attn_ext0 > 0:
attn_ext0 *= attn_ext0
value += attn_ext0 * attn_ext0 * extrapolate(xsv_ext0, ysv_ext0, zsv_ext0, dx_ext0, dy_ext0, dz_ext0)
# Second extra vertex
attn_ext1 = 2 - dx_ext1 * dx_ext1 - dy_ext1 * dy_ext1 - dz_ext1 * dz_ext1
if attn_ext1 > 0:
attn_ext1 *= attn_ext1
value += attn_ext1 * attn_ext1 * extrapolate(xsv_ext1, ysv_ext1, zsv_ext1, dx_ext1, dy_ext1, dz_ext1)
return value / NORM_CONSTANT_3D
def noise4d(self, x, y, z, w):
"""
Generate 4D OpenSimplex noise from X,Y,Z,W coordinates.
"""
# Place input coordinates on simplectic honeycomb.
stretch_offset = (x + y + z + w) * STRETCH_CONSTANT_4D
xs = x + stretch_offset
ys = y + stretch_offset
zs = z + stretch_offset
ws = w + stretch_offset
# Floor to get simplectic honeycomb coordinates of rhombo-hypercube super-cell origin.
xsb = floor(xs)
ysb = floor(ys)
zsb = floor(zs)
wsb = floor(ws)
# Skew out to get actual coordinates of stretched rhombo-hypercube origin. We'll need these later.
squish_offset = (xsb + ysb + zsb + wsb) * SQUISH_CONSTANT_4D
xb = xsb + squish_offset
yb = ysb + squish_offset
zb = zsb + squish_offset
wb = wsb + squish_offset
# Compute simplectic honeycomb coordinates relative to rhombo-hypercube origin.
xins = xs - xsb
yins = ys - ysb
zins = zs - zsb
wins = ws - wsb
# Sum those together to get a value that determines which region we're in.
in_sum = xins + yins + zins + wins
# Positions relative to origin po.
dx0 = x - xb
dy0 = y - yb
dz0 = z - zb
dw0 = w - wb
value = 0
extrapolate = self._extrapolate4d
if in_sum <= 1: # We're inside the pentachoron (4-Simplex) at (0,0,0,0)
# Determine which two of (0,0,0,1), (0,0,1,0), (0,1,0,0), (1,0,0,0) are closest.
a_po = 0x01
a_score = xins
b_po = 0x02
b_score = yins
if a_score >= b_score and zins > b_score:
b_score = zins
b_po = 0x04
elif a_score < b_score and zins > a_score:
a_score = zins
a_po = 0x04
if a_score >= b_score and wins > b_score:
b_score = wins
b_po = 0x08
elif a_score < b_score and wins > a_score:
a_score = wins
a_po = 0x08
# Now we determine the three lattice pos not part of the pentachoron that may contribute.
# This depends on the closest two pentachoron vertices, including (0,0,0,0)
uins = 1 - in_sum
if uins > a_score or uins > b_score: # (0,0,0,0) is one of the closest two pentachoron vertices.
c = b_po if (b_score > a_score) else a_po # Our other closest vertex is the closest out of a and b.
if (c & 0x01) == 0:
xsv_ext0 = xsb - 1
xsv_ext1 = xsv_ext2 = xsb
dx_ext0 = dx0 + 1
dx_ext1 = dx_ext2 = dx0
else:
xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb + 1
dx_ext0 = dx_ext1 = dx_ext2 = dx0 - 1
if (c & 0x02) == 0:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb
dy_ext0 = dy_ext1 = dy_ext2 = dy0
if (c & 0x01) == 0x01:
ysv_ext0 -= 1
dy_ext0 += 1
else:
ysv_ext1 -= 1
dy_ext1 += 1
else:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + 1
dy_ext0 = dy_ext1 = dy_ext2 = dy0 - 1
if (c & 0x04) == 0:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb
dz_ext0 = dz_ext1 = dz_ext2 = dz0
if (c & 0x03) != 0:
if (c & 0x03) == 0x03:
zsv_ext0 -= 1
dz_ext0 += 1
else:
zsv_ext1 -= 1
dz_ext1 += 1
else:
zsv_ext2 -= 1
dz_ext2 += 1
else:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + 1
dz_ext0 = dz_ext1 = dz_ext2 = dz0 - 1
if (c & 0x08) == 0:
wsv_ext0 = wsv_ext1 = wsb
wsv_ext2 = wsb - 1
dw_ext0 = dw_ext1 = dw0
dw_ext2 = dw0 + 1
else:
wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb + 1
dw_ext0 = dw_ext1 = dw_ext2 = dw0 - 1
else: # (0,0,0,0) is not one of the closest two pentachoron vertices.
c = (a_po | b_po) # Our three extra vertices are determined by the closest two.
if (c & 0x01) == 0:
xsv_ext0 = xsv_ext2 = xsb
xsv_ext1 = xsb - 1
dx_ext0 = dx0 - 2 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 + 1 - SQUISH_CONSTANT_4D
dx_ext2 = dx0 - SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb + 1
dx_ext0 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dx_ext1 = dx_ext2 = dx0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x02) == 0:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb
dy_ext0 = dy0 - 2 * SQUISH_CONSTANT_4D
dy_ext1 = dy_ext2 = dy0 - SQUISH_CONSTANT_4D
if (c & 0x01) == 0x01:
ysv_ext1 -= 1
dy_ext1 += 1
else:
ysv_ext2 -= 1
dy_ext2 += 1
else:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + 1
dy_ext0 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dy_ext1 = dy_ext2 = dy0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x04) == 0:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb
dz_ext0 = dz0 - 2 * SQUISH_CONSTANT_4D
dz_ext1 = dz_ext2 = dz0 - SQUISH_CONSTANT_4D
if (c & 0x03) == 0x03:
zsv_ext1 -= 1
dz_ext1 += 1
else:
zsv_ext2 -= 1
dz_ext2 += 1
else:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + 1
dz_ext0 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dz_ext1 = dz_ext2 = dz0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x08) == 0:
wsv_ext0 = wsv_ext1 = wsb
wsv_ext2 = wsb - 1
dw_ext0 = dw0 - 2 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - SQUISH_CONSTANT_4D
dw_ext2 = dw0 + 1 - SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb + 1
dw_ext0 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
dw_ext1 = dw_ext2 = dw0 - 1 - SQUISH_CONSTANT_4D
# Contribution (0,0,0,0)
attn0 = 2 - dx0 * dx0 - dy0 * dy0 - dz0 * dz0 - dw0 * dw0
if attn0 > 0:
attn0 *= attn0
value += attn0 * attn0 * extrapolate(xsb + 0, ysb + 0, zsb + 0, wsb + 0, dx0, dy0, dz0, dw0)
# Contribution (1,0,0,0)
dx1 = dx0 - 1 - SQUISH_CONSTANT_4D
dy1 = dy0 - 0 - SQUISH_CONSTANT_4D
dz1 = dz0 - 0 - SQUISH_CONSTANT_4D
dw1 = dw0 - 0 - SQUISH_CONSTANT_4D
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, zsb + 0, wsb + 0, dx1, dy1, dz1, dw1)
# Contribution (0,1,0,0)
dx2 = dx0 - 0 - SQUISH_CONSTANT_4D
dy2 = dy0 - 1 - SQUISH_CONSTANT_4D
dz2 = dz1
dw2 = dw1
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, zsb + 0, wsb + 0, dx2, dy2, dz2, dw2)
# Contribution (0,0,1,0)
dx3 = dx2
dy3 = dy1
dz3 = dz0 - 1 - SQUISH_CONSTANT_4D
dw3 = dw1
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 0, ysb + 0, zsb + 1, wsb + 0, dx3, dy3, dz3, dw3)
# Contribution (0,0,0,1)
dx4 = dx2
dy4 = dy1
dz4 = dz1
dw4 = dw0 - 1 - SQUISH_CONSTANT_4D
attn4 = 2 - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4
if attn4 > 0:
attn4 *= attn4
value += attn4 * attn4 * extrapolate(xsb + 0, ysb + 0, zsb + 0, wsb + 1, dx4, dy4, dz4, dw4)
elif in_sum >= 3: # We're inside the pentachoron (4-Simplex) at (1,1,1,1)
# Determine which two of (1,1,1,0), (1,1,0,1), (1,0,1,1), (0,1,1,1) are closest.
a_po = 0x0E
a_score = xins
b_po = 0x0D
b_score = yins
if a_score <= b_score and zins < b_score:
b_score = zins
b_po = 0x0B
elif a_score > b_score and zins < a_score:
a_score = zins
a_po = 0x0B
if a_score <= b_score and wins < b_score:
b_score = wins
b_po = 0x07
elif a_score > b_score and wins < a_score:
a_score = wins
a_po = 0x07
# Now we determine the three lattice pos not part of the pentachoron that may contribute.
# This depends on the closest two pentachoron vertices, including (0,0,0,0)
uins = 4 - in_sum
if uins < a_score or uins < b_score: # (1,1,1,1) is one of the closest two pentachoron vertices.
c = b_po if (b_score < a_score) else a_po # Our other closest vertex is the closest out of a and b.
if (c & 0x01) != 0:
xsv_ext0 = xsb + 2
xsv_ext1 = xsv_ext2 = xsb + 1
dx_ext0 = dx0 - 2 - 4 * SQUISH_CONSTANT_4D
dx_ext1 = dx_ext2 = dx0 - 1 - 4 * SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb
dx_ext0 = dx_ext1 = dx_ext2 = dx0 - 4 * SQUISH_CONSTANT_4D
if (c & 0x02) != 0:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + 1
dy_ext0 = dy_ext1 = dy_ext2 = dy0 - 1 - 4 * SQUISH_CONSTANT_4D
if (c & 0x01) != 0:
ysv_ext1 += 1
dy_ext1 -= 1
else:
ysv_ext0 += 1
dy_ext0 -= 1
else:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb
dy_ext0 = dy_ext1 = dy_ext2 = dy0 - 4 * SQUISH_CONSTANT_4D
if (c & 0x04) != 0:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + 1
dz_ext0 = dz_ext1 = dz_ext2 = dz0 - 1 - 4 * SQUISH_CONSTANT_4D
if (c & 0x03) != 0x03:
if (c & 0x03) == 0:
zsv_ext0 += 1
dz_ext0 -= 1
else:
zsv_ext1 += 1
dz_ext1 -= 1
else:
zsv_ext2 += 1
dz_ext2 -= 1
else:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb
dz_ext0 = dz_ext1 = dz_ext2 = dz0 - 4 * SQUISH_CONSTANT_4D
if (c & 0x08) != 0:
wsv_ext0 = wsv_ext1 = wsb + 1
wsv_ext2 = wsb + 2
dw_ext0 = dw_ext1 = dw0 - 1 - 4 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 2 - 4 * SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb
dw_ext0 = dw_ext1 = dw_ext2 = dw0 - 4 * SQUISH_CONSTANT_4D
else: # (1,1,1,1) is not one of the closest two pentachoron vertices.
c = (a_po & b_po) # Our three extra vertices are determined by the closest two.
if (c & 0x01) != 0:
xsv_ext0 = xsv_ext2 = xsb + 1
xsv_ext1 = xsb + 2
dx_ext0 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 - 2 - 3 * SQUISH_CONSTANT_4D
dx_ext2 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsv_ext2 = xsb
dx_ext0 = dx0 - 2 * SQUISH_CONSTANT_4D
dx_ext1 = dx_ext2 = dx0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x02) != 0:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb + 1
dy_ext0 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dy_ext1 = dy_ext2 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c & 0x01) != 0:
ysv_ext2 += 1
dy_ext2 -= 1
else:
ysv_ext1 += 1
dy_ext1 -= 1
else:
ysv_ext0 = ysv_ext1 = ysv_ext2 = ysb
dy_ext0 = dy0 - 2 * SQUISH_CONSTANT_4D
dy_ext1 = dy_ext2 = dy0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x04) != 0:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb + 1
dz_ext0 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dz_ext1 = dz_ext2 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c & 0x03) != 0:
zsv_ext2 += 1
dz_ext2 -= 1
else:
zsv_ext1 += 1
dz_ext1 -= 1
else:
zsv_ext0 = zsv_ext1 = zsv_ext2 = zsb
dz_ext0 = dz0 - 2 * SQUISH_CONSTANT_4D
dz_ext1 = dz_ext2 = dz0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x08) != 0:
wsv_ext0 = wsv_ext1 = wsb + 1
wsv_ext2 = wsb + 2
dw_ext0 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 2 - 3 * SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsv_ext2 = wsb
dw_ext0 = dw0 - 2 * SQUISH_CONSTANT_4D
dw_ext1 = dw_ext2 = dw0 - 3 * SQUISH_CONSTANT_4D
# Contribution (1,1,1,0)
dx4 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
dy4 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
dz4 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
dw4 = dw0 - 3 * SQUISH_CONSTANT_4D
attn4 = 2 - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4
if attn4 > 0:
attn4 *= attn4
value += attn4 * attn4 * extrapolate(xsb + 1, ysb + 1, zsb + 1, wsb + 0, dx4, dy4, dz4, dw4)
# Contribution (1,1,0,1)
dx3 = dx4
dy3 = dy4
dz3 = dz0 - 3 * SQUISH_CONSTANT_4D
dw3 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 1, ysb + 1, zsb + 0, wsb + 1, dx3, dy3, dz3, dw3)
# Contribution (1,0,1,1)
dx2 = dx4
dy2 = dy0 - 3 * SQUISH_CONSTANT_4D
dz2 = dz4
dw2 = dw3
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 1, ysb + 0, zsb + 1, wsb + 1, dx2, dy2, dz2, dw2)
# Contribution (0,1,1,1)
dx1 = dx0 - 3 * SQUISH_CONSTANT_4D
dz1 = dz4
dy1 = dy4
dw1 = dw3
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 0, ysb + 1, zsb + 1, wsb + 1, dx1, dy1, dz1, dw1)
# Contribution (1,1,1,1)
dx0 = dx0 - 1 - 4 * SQUISH_CONSTANT_4D
dy0 = dy0 - 1 - 4 * SQUISH_CONSTANT_4D
dz0 = dz0 - 1 - 4 * SQUISH_CONSTANT_4D
dw0 = dw0 - 1 - 4 * SQUISH_CONSTANT_4D
attn0 = 2 - dx0 * dx0 - dy0 * dy0 - dz0 * dz0 - dw0 * dw0
if attn0 > 0:
attn0 *= attn0
value += attn0 * attn0 * extrapolate(xsb + 1, ysb + 1, zsb + 1, wsb + 1, dx0, dy0, dz0, dw0)
elif in_sum <= 2: # We're inside the first dispentachoron (Rectified 4-Simplex)
a_is_bigger_side = True
b_is_bigger_side = True
# Decide between (1,1,0,0) and (0,0,1,1)
if xins + yins > zins + wins:
a_score = xins + yins
a_po = 0x03
else:
a_score = zins + wins
a_po = 0x0C
# Decide between (1,0,1,0) and (0,1,0,1)
if xins + zins > yins + wins:
b_score = xins + zins
b_po = 0x05
else:
b_score = yins + wins
b_po = 0x0A
# Closer between (1,0,0,1) and (0,1,1,0) will replace the further of a and b, if closer.
if xins + wins > yins + zins:
score = xins + wins
if a_score >= b_score and score > b_score:
b_score = score
b_po = 0x09
elif a_score < b_score and score > a_score:
a_score = score
a_po = 0x09
else:
score = yins + zins
if a_score >= b_score and score > b_score:
b_score = score
b_po = 0x06
elif a_score < b_score and score > a_score:
a_score = score
a_po = 0x06
# Decide if (1,0,0,0) is closer.
p1 = 2 - in_sum + xins
if a_score >= b_score and p1 > b_score:
b_score = p1
b_po = 0x01
b_is_bigger_side = False
elif a_score < b_score and p1 > a_score:
a_score = p1
a_po = 0x01
a_is_bigger_side = False
# Decide if (0,1,0,0) is closer.
p2 = 2 - in_sum + yins
if a_score >= b_score and p2 > b_score:
b_score = p2
b_po = 0x02
b_is_bigger_side = False
elif a_score < b_score and p2 > a_score:
a_score = p2
a_po = 0x02
a_is_bigger_side = False
# Decide if (0,0,1,0) is closer.
p3 = 2 - in_sum + zins
if a_score >= b_score and p3 > b_score:
b_score = p3
b_po = 0x04
b_is_bigger_side = False
elif a_score < b_score and p3 > a_score:
a_score = p3
a_po = 0x04
a_is_bigger_side = False
# Decide if (0,0,0,1) is closer.
p4 = 2 - in_sum + wins
if a_score >= b_score and p4 > b_score:
b_po = 0x08
b_is_bigger_side = False
elif a_score < b_score and p4 > a_score:
a_po = 0x08
a_is_bigger_side = False
# Where each of the two closest pos are determines how the extra three vertices are calculated.
if a_is_bigger_side == b_is_bigger_side:
if a_is_bigger_side: # Both closest pos on the bigger side
c1 = (a_po | b_po)
c2 = (a_po & b_po)
if (c1 & 0x01) == 0:
xsv_ext0 = xsb
xsv_ext1 = xsb - 1
dx_ext0 = dx0 - 3 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 + 1 - 2 * SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsb + 1
dx_ext0 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
if (c1 & 0x02) == 0:
ysv_ext0 = ysb
ysv_ext1 = ysb - 1
dy_ext0 = dy0 - 3 * SQUISH_CONSTANT_4D
dy_ext1 = dy0 + 1 - 2 * SQUISH_CONSTANT_4D
else:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
dy_ext1 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
if (c1 & 0x04) == 0:
zsv_ext0 = zsb
zsv_ext1 = zsb - 1
dz_ext0 = dz0 - 3 * SQUISH_CONSTANT_4D
dz_ext1 = dz0 + 1 - 2 * SQUISH_CONSTANT_4D
else:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
dz_ext1 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
if (c1 & 0x08) == 0:
wsv_ext0 = wsb
wsv_ext1 = wsb - 1
dw_ext0 = dw0 - 3 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 + 1 - 2 * SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsb + 1
dw_ext0 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
# One combination is a _permutation of (0,0,0,2) based on c2
xsv_ext2 = xsb
ysv_ext2 = ysb
zsv_ext2 = zsb
wsv_ext2 = wsb
dx_ext2 = dx0 - 2 * SQUISH_CONSTANT_4D
dy_ext2 = dy0 - 2 * SQUISH_CONSTANT_4D
dz_ext2 = dz0 - 2 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 2 * SQUISH_CONSTANT_4D
if (c2 & 0x01) != 0:
xsv_ext2 += 2
dx_ext2 -= 2
elif (c2 & 0x02) != 0:
ysv_ext2 += 2
dy_ext2 -= 2
elif (c2 & 0x04) != 0:
zsv_ext2 += 2
dz_ext2 -= 2
else:
wsv_ext2 += 2
dw_ext2 -= 2
else: # Both closest pos on the smaller side
# One of the two extra pos is (0,0,0,0)
xsv_ext2 = xsb
ysv_ext2 = ysb
zsv_ext2 = zsb
wsv_ext2 = wsb
dx_ext2 = dx0
dy_ext2 = dy0
dz_ext2 = dz0
dw_ext2 = dw0
# Other two pos are based on the omitted axes.
c = (a_po | b_po)
if (c & 0x01) == 0:
xsv_ext0 = xsb - 1
xsv_ext1 = xsb
dx_ext0 = dx0 + 1 - SQUISH_CONSTANT_4D
dx_ext1 = dx0 - SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsb + 1
dx_ext0 = dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x02) == 0:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0 - SQUISH_CONSTANT_4D
if (c & 0x01) == 0x01:
ysv_ext0 -= 1
dy_ext0 += 1
else:
ysv_ext1 -= 1
dy_ext1 += 1
else:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x04) == 0:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz_ext1 = dz0 - SQUISH_CONSTANT_4D
if (c & 0x03) == 0x03:
zsv_ext0 -= 1
dz_ext0 += 1
else:
zsv_ext1 -= 1
dz_ext1 += 1
else:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_4D
if (c & 0x08) == 0:
wsv_ext0 = wsb
wsv_ext1 = wsb - 1
dw_ext0 = dw0 - SQUISH_CONSTANT_4D
dw_ext1 = dw0 + 1 - SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsb + 1
dw_ext0 = dw_ext1 = dw0 - 1 - SQUISH_CONSTANT_4D
else: # One po on each "side"
if a_is_bigger_side:
c1 = a_po
c2 = b_po
else:
c1 = b_po
c2 = a_po
# Two contributions are the bigger-sided po with each 0 replaced with -1.
if (c1 & 0x01) == 0:
xsv_ext0 = xsb - 1
xsv_ext1 = xsb
dx_ext0 = dx0 + 1 - SQUISH_CONSTANT_4D
dx_ext1 = dx0 - SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsb + 1
dx_ext0 = dx_ext1 = dx0 - 1 - SQUISH_CONSTANT_4D
if (c1 & 0x02) == 0:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0 - SQUISH_CONSTANT_4D
if (c1 & 0x01) == 0x01:
ysv_ext0 -= 1
dy_ext0 += 1
else:
ysv_ext1 -= 1
dy_ext1 += 1
else:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1 - SQUISH_CONSTANT_4D
if (c1 & 0x04) == 0:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz_ext1 = dz0 - SQUISH_CONSTANT_4D
if (c1 & 0x03) == 0x03:
zsv_ext0 -= 1
dz_ext0 += 1
else:
zsv_ext1 -= 1
dz_ext1 += 1
else:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz_ext1 = dz0 - 1 - SQUISH_CONSTANT_4D
if (c1 & 0x08) == 0:
wsv_ext0 = wsb
wsv_ext1 = wsb - 1
dw_ext0 = dw0 - SQUISH_CONSTANT_4D
dw_ext1 = dw0 + 1 - SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsb + 1
dw_ext0 = dw_ext1 = dw0 - 1 - SQUISH_CONSTANT_4D
# One contribution is a _permutation of (0,0,0,2) based on the smaller-sided po
xsv_ext2 = xsb
ysv_ext2 = ysb
zsv_ext2 = zsb
wsv_ext2 = wsb
dx_ext2 = dx0 - 2 * SQUISH_CONSTANT_4D
dy_ext2 = dy0 - 2 * SQUISH_CONSTANT_4D
dz_ext2 = dz0 - 2 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 2 * SQUISH_CONSTANT_4D
if (c2 & 0x01) != 0:
xsv_ext2 += 2
dx_ext2 -= 2
elif (c2 & 0x02) != 0:
ysv_ext2 += 2
dy_ext2 -= 2
elif (c2 & 0x04) != 0:
zsv_ext2 += 2
dz_ext2 -= 2
else:
wsv_ext2 += 2
dw_ext2 -= 2
# Contribution (1,0,0,0)
dx1 = dx0 - 1 - SQUISH_CONSTANT_4D
dy1 = dy0 - 0 - SQUISH_CONSTANT_4D
dz1 = dz0 - 0 - SQUISH_CONSTANT_4D
dw1 = dw0 - 0 - SQUISH_CONSTANT_4D
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 1, ysb + 0, zsb + 0, wsb + 0, dx1, dy1, dz1, dw1)
# Contribution (0,1,0,0)
dx2 = dx0 - 0 - SQUISH_CONSTANT_4D
dy2 = dy0 - 1 - SQUISH_CONSTANT_4D
dz2 = dz1
dw2 = dw1
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 0, ysb + 1, zsb + 0, wsb + 0, dx2, dy2, dz2, dw2)
# Contribution (0,0,1,0)
dx3 = dx2
dy3 = dy1
dz3 = dz0 - 1 - SQUISH_CONSTANT_4D
dw3 = dw1
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 0, ysb + 0, zsb + 1, wsb + 0, dx3, dy3, dz3, dw3)
# Contribution (0,0,0,1)
dx4 = dx2
dy4 = dy1
dz4 = dz1
dw4 = dw0 - 1 - SQUISH_CONSTANT_4D
attn4 = 2 - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4
if attn4 > 0:
attn4 *= attn4
value += attn4 * attn4 * extrapolate(xsb + 0, ysb + 0, zsb + 0, wsb + 1, dx4, dy4, dz4, dw4)
# Contribution (1,1,0,0)
dx5 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy5 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz5 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw5 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn5 = 2 - dx5 * dx5 - dy5 * dy5 - dz5 * dz5 - dw5 * dw5
if attn5 > 0:
attn5 *= attn5
value += attn5 * attn5 * extrapolate(xsb + 1, ysb + 1, zsb + 0, wsb + 0, dx5, dy5, dz5, dw5)
# Contribution (1,0,1,0)
dx6 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy6 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz6 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw6 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn6 = 2 - dx6 * dx6 - dy6 * dy6 - dz6 * dz6 - dw6 * dw6
if attn6 > 0:
attn6 *= attn6
value += attn6 * attn6 * extrapolate(xsb + 1, ysb + 0, zsb + 1, wsb + 0, dx6, dy6, dz6, dw6)
# Contribution (1,0,0,1)
dx7 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy7 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz7 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw7 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn7 = 2 - dx7 * dx7 - dy7 * dy7 - dz7 * dz7 - dw7 * dw7
if attn7 > 0:
attn7 *= attn7
value += attn7 * attn7 * extrapolate(xsb + 1, ysb + 0, zsb + 0, wsb + 1, dx7, dy7, dz7, dw7)
# Contribution (0,1,1,0)
dx8 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy8 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz8 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw8 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn8 = 2 - dx8 * dx8 - dy8 * dy8 - dz8 * dz8 - dw8 * dw8
if attn8 > 0:
attn8 *= attn8
value += attn8 * attn8 * extrapolate(xsb + 0, ysb + 1, zsb + 1, wsb + 0, dx8, dy8, dz8, dw8)
# Contribution (0,1,0,1)
dx9 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy9 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz9 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw9 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn9 = 2 - dx9 * dx9 - dy9 * dy9 - dz9 * dz9 - dw9 * dw9
if attn9 > 0:
attn9 *= attn9
value += attn9 * attn9 * extrapolate(xsb + 0, ysb + 1, zsb + 0, wsb + 1, dx9, dy9, dz9, dw9)
# Contribution (0,0,1,1)
dx10 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy10 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz10 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw10 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn10 = 2 - dx10 * dx10 - dy10 * dy10 - dz10 * dz10 - dw10 * dw10
if attn10 > 0:
attn10 *= attn10
value += attn10 * attn10 * extrapolate(xsb + 0, ysb + 0, zsb + 1, wsb + 1, dx10, dy10, dz10, dw10)
else: # We're inside the second dispentachoron (Rectified 4-Simplex)
a_is_bigger_side = True
b_is_bigger_side = True
# Decide between (0,0,1,1) and (1,1,0,0)
if xins + yins < zins + wins:
a_score = xins + yins
a_po = 0x0C
else:
a_score = zins + wins
a_po = 0x03
# Decide between (0,1,0,1) and (1,0,1,0)
if xins + zins < yins + wins:
b_score = xins + zins
b_po = 0x0A
else:
b_score = yins + wins
b_po = 0x05
# Closer between (0,1,1,0) and (1,0,0,1) will replace the further of a and b, if closer.
if xins + wins < yins + zins:
score = xins + wins
if a_score <= b_score and score < b_score:
b_score = score
b_po = 0x06
elif a_score > b_score and score < a_score:
a_score = score
a_po = 0x06
else:
score = yins + zins
if a_score <= b_score and score < b_score:
b_score = score
b_po = 0x09
elif a_score > b_score and score < a_score:
a_score = score
a_po = 0x09
# Decide if (0,1,1,1) is closer.
p1 = 3 - in_sum + xins
if a_score <= b_score and p1 < b_score:
b_score = p1
b_po = 0x0E
b_is_bigger_side = False
elif a_score > b_score and p1 < a_score:
a_score = p1
a_po = 0x0E
a_is_bigger_side = False
# Decide if (1,0,1,1) is closer.
p2 = 3 - in_sum + yins
if a_score <= b_score and p2 < b_score:
b_score = p2
b_po = 0x0D
b_is_bigger_side = False
elif a_score > b_score and p2 < a_score:
a_score = p2
a_po = 0x0D
a_is_bigger_side = False
# Decide if (1,1,0,1) is closer.
p3 = 3 - in_sum + zins
if a_score <= b_score and p3 < b_score:
b_score = p3
b_po = 0x0B
b_is_bigger_side = False
elif a_score > b_score and p3 < a_score:
a_score = p3
a_po = 0x0B
a_is_bigger_side = False
# Decide if (1,1,1,0) is closer.
p4 = 3 - in_sum + wins
if a_score <= b_score and p4 < b_score:
b_po = 0x07
b_is_bigger_side = False
elif a_score > b_score and p4 < a_score:
a_po = 0x07
a_is_bigger_side = False
# Where each of the two closest pos are determines how the extra three vertices are calculated.
if a_is_bigger_side == b_is_bigger_side:
if a_is_bigger_side: # Both closest pos on the bigger side
c1 = (a_po & b_po)
c2 = (a_po | b_po)
# Two contributions are _permutations of (0,0,0,1) and (0,0,0,2) based on c1
xsv_ext0 = xsv_ext1 = xsb
ysv_ext0 = ysv_ext1 = ysb
zsv_ext0 = zsv_ext1 = zsb
wsv_ext0 = wsv_ext1 = wsb
dx_ext0 = dx0 - SQUISH_CONSTANT_4D
dy_ext0 = dy0 - SQUISH_CONSTANT_4D
dz_ext0 = dz0 - SQUISH_CONSTANT_4D
dw_ext0 = dw0 - SQUISH_CONSTANT_4D
dx_ext1 = dx0 - 2 * SQUISH_CONSTANT_4D
dy_ext1 = dy0 - 2 * SQUISH_CONSTANT_4D
dz_ext1 = dz0 - 2 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - 2 * SQUISH_CONSTANT_4D
if (c1 & 0x01) != 0:
xsv_ext0 += 1
dx_ext0 -= 1
xsv_ext1 += 2
dx_ext1 -= 2
elif (c1 & 0x02) != 0:
ysv_ext0 += 1
dy_ext0 -= 1
ysv_ext1 += 2
dy_ext1 -= 2
elif (c1 & 0x04) != 0:
zsv_ext0 += 1
dz_ext0 -= 1
zsv_ext1 += 2
dz_ext1 -= 2
else:
wsv_ext0 += 1
dw_ext0 -= 1
wsv_ext1 += 2
dw_ext1 -= 2
# One contribution is a _permutation of (1,1,1,-1) based on c2
xsv_ext2 = xsb + 1
ysv_ext2 = ysb + 1
zsv_ext2 = zsb + 1
wsv_ext2 = wsb + 1
dx_ext2 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy_ext2 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz_ext2 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
if (c2 & 0x01) == 0:
xsv_ext2 -= 2
dx_ext2 += 2
elif (c2 & 0x02) == 0:
ysv_ext2 -= 2
dy_ext2 += 2
elif (c2 & 0x04) == 0:
zsv_ext2 -= 2
dz_ext2 += 2
else:
wsv_ext2 -= 2
dw_ext2 += 2
else: # Both closest pos on the smaller side
# One of the two extra pos is (1,1,1,1)
xsv_ext2 = xsb + 1
ysv_ext2 = ysb + 1
zsv_ext2 = zsb + 1
wsv_ext2 = wsb + 1
dx_ext2 = dx0 - 1 - 4 * SQUISH_CONSTANT_4D
dy_ext2 = dy0 - 1 - 4 * SQUISH_CONSTANT_4D
dz_ext2 = dz0 - 1 - 4 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 1 - 4 * SQUISH_CONSTANT_4D
# Other two pos are based on the shared axes.
c = (a_po & b_po)
if (c & 0x01) != 0:
xsv_ext0 = xsb + 2
xsv_ext1 = xsb + 1
dx_ext0 = dx0 - 2 - 3 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsb
dx_ext0 = dx_ext1 = dx0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x02) != 0:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c & 0x01) == 0:
ysv_ext0 += 1
dy_ext0 -= 1
else:
ysv_ext1 += 1
dy_ext1 -= 1
else:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x04) != 0:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz_ext1 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c & 0x03) == 0:
zsv_ext0 += 1
dz_ext0 -= 1
else:
zsv_ext1 += 1
dz_ext1 -= 1
else:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz_ext1 = dz0 - 3 * SQUISH_CONSTANT_4D
if (c & 0x08) != 0:
wsv_ext0 = wsb + 1
wsv_ext1 = wsb + 2
dw_ext0 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - 2 - 3 * SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsb
dw_ext0 = dw_ext1 = dw0 - 3 * SQUISH_CONSTANT_4D
else: # One po on each "side"
if a_is_bigger_side:
c1 = a_po
c2 = b_po
else:
c1 = b_po
c2 = a_po
# Two contributions are the bigger-sided po with each 1 replaced with 2.
if (c1 & 0x01) != 0:
xsv_ext0 = xsb + 2
xsv_ext1 = xsb + 1
dx_ext0 = dx0 - 2 - 3 * SQUISH_CONSTANT_4D
dx_ext1 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
else:
xsv_ext0 = xsv_ext1 = xsb
dx_ext0 = dx_ext1 = dx0 - 3 * SQUISH_CONSTANT_4D
if (c1 & 0x02) != 0:
ysv_ext0 = ysv_ext1 = ysb + 1
dy_ext0 = dy_ext1 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c1 & 0x01) == 0:
ysv_ext0 += 1
dy_ext0 -= 1
else:
ysv_ext1 += 1
dy_ext1 -= 1
else:
ysv_ext0 = ysv_ext1 = ysb
dy_ext0 = dy_ext1 = dy0 - 3 * SQUISH_CONSTANT_4D
if (c1 & 0x04) != 0:
zsv_ext0 = zsv_ext1 = zsb + 1
dz_ext0 = dz_ext1 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
if (c1 & 0x03) == 0:
zsv_ext0 += 1
dz_ext0 -= 1
else:
zsv_ext1 += 1
dz_ext1 -= 1
else:
zsv_ext0 = zsv_ext1 = zsb
dz_ext0 = dz_ext1 = dz0 - 3 * SQUISH_CONSTANT_4D
if (c1 & 0x08) != 0:
wsv_ext0 = wsb + 1
wsv_ext1 = wsb + 2
dw_ext0 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
dw_ext1 = dw0 - 2 - 3 * SQUISH_CONSTANT_4D
else:
wsv_ext0 = wsv_ext1 = wsb
dw_ext0 = dw_ext1 = dw0 - 3 * SQUISH_CONSTANT_4D
# One contribution is a _permutation of (1,1,1,-1) based on the smaller-sided po
xsv_ext2 = xsb + 1
ysv_ext2 = ysb + 1
zsv_ext2 = zsb + 1
wsv_ext2 = wsb + 1
dx_ext2 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy_ext2 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz_ext2 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw_ext2 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
if (c2 & 0x01) == 0:
xsv_ext2 -= 2
dx_ext2 += 2
elif (c2 & 0x02) == 0:
ysv_ext2 -= 2
dy_ext2 += 2
elif (c2 & 0x04) == 0:
zsv_ext2 -= 2
dz_ext2 += 2
else:
wsv_ext2 -= 2
dw_ext2 += 2
# Contribution (1,1,1,0)
dx4 = dx0 - 1 - 3 * SQUISH_CONSTANT_4D
dy4 = dy0 - 1 - 3 * SQUISH_CONSTANT_4D
dz4 = dz0 - 1 - 3 * SQUISH_CONSTANT_4D
dw4 = dw0 - 3 * SQUISH_CONSTANT_4D
attn4 = 2 - dx4 * dx4 - dy4 * dy4 - dz4 * dz4 - dw4 * dw4
if attn4 > 0:
attn4 *= attn4
value += attn4 * attn4 * extrapolate(xsb + 1, ysb + 1, zsb + 1, wsb + 0, dx4, dy4, dz4, dw4)
# Contribution (1,1,0,1)
dx3 = dx4
dy3 = dy4
dz3 = dz0 - 3 * SQUISH_CONSTANT_4D
dw3 = dw0 - 1 - 3 * SQUISH_CONSTANT_4D
attn3 = 2 - dx3 * dx3 - dy3 * dy3 - dz3 * dz3 - dw3 * dw3
if attn3 > 0:
attn3 *= attn3
value += attn3 * attn3 * extrapolate(xsb + 1, ysb + 1, zsb + 0, wsb + 1, dx3, dy3, dz3, dw3)
# Contribution (1,0,1,1)
dx2 = dx4
dy2 = dy0 - 3 * SQUISH_CONSTANT_4D
dz2 = dz4
dw2 = dw3
attn2 = 2 - dx2 * dx2 - dy2 * dy2 - dz2 * dz2 - dw2 * dw2
if attn2 > 0:
attn2 *= attn2
value += attn2 * attn2 * extrapolate(xsb + 1, ysb + 0, zsb + 1, wsb + 1, dx2, dy2, dz2, dw2)
# Contribution (0,1,1,1)
dx1 = dx0 - 3 * SQUISH_CONSTANT_4D
dz1 = dz4
dy1 = dy4
dw1 = dw3
attn1 = 2 - dx1 * dx1 - dy1 * dy1 - dz1 * dz1 - dw1 * dw1
if attn1 > 0:
attn1 *= attn1
value += attn1 * attn1 * extrapolate(xsb + 0, ysb + 1, zsb + 1, wsb + 1, dx1, dy1, dz1, dw1)
# Contribution (1,1,0,0)
dx5 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy5 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz5 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw5 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn5 = 2 - dx5 * dx5 - dy5 * dy5 - dz5 * dz5 - dw5 * dw5
if attn5 > 0:
attn5 *= attn5
value += attn5 * attn5 * extrapolate(xsb + 1, ysb + 1, zsb + 0, wsb + 0, dx5, dy5, dz5, dw5)
# Contribution (1,0,1,0)
dx6 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy6 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz6 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw6 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn6 = 2 - dx6 * dx6 - dy6 * dy6 - dz6 * dz6 - dw6 * dw6
if attn6 > 0:
attn6 *= attn6
value += attn6 * attn6 * extrapolate(xsb + 1, ysb + 0, zsb + 1, wsb + 0, dx6, dy6, dz6, dw6)
# Contribution (1,0,0,1)
dx7 = dx0 - 1 - 2 * SQUISH_CONSTANT_4D
dy7 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz7 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw7 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn7 = 2 - dx7 * dx7 - dy7 * dy7 - dz7 * dz7 - dw7 * dw7
if attn7 > 0:
attn7 *= attn7
value += attn7 * attn7 * extrapolate(xsb + 1, ysb + 0, zsb + 0, wsb + 1, dx7, dy7, dz7, dw7)
# Contribution (0,1,1,0)
dx8 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy8 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz8 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw8 = dw0 - 0 - 2 * SQUISH_CONSTANT_4D
attn8 = 2 - dx8 * dx8 - dy8 * dy8 - dz8 * dz8 - dw8 * dw8
if attn8 > 0:
attn8 *= attn8
value += attn8 * attn8 * extrapolate(xsb + 0, ysb + 1, zsb + 1, wsb + 0, dx8, dy8, dz8, dw8)
# Contribution (0,1,0,1)
dx9 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy9 = dy0 - 1 - 2 * SQUISH_CONSTANT_4D
dz9 = dz0 - 0 - 2 * SQUISH_CONSTANT_4D
dw9 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn9 = 2 - dx9 * dx9 - dy9 * dy9 - dz9 * dz9 - dw9 * dw9
if attn9 > 0:
attn9 *= attn9
value += attn9 * attn9 * extrapolate(xsb + 0, ysb + 1, zsb + 0, wsb + 1, dx9, dy9, dz9, dw9)
# Contribution (0,0,1,1)
dx10 = dx0 - 0 - 2 * SQUISH_CONSTANT_4D
dy10 = dy0 - 0 - 2 * SQUISH_CONSTANT_4D
dz10 = dz0 - 1 - 2 * SQUISH_CONSTANT_4D
dw10 = dw0 - 1 - 2 * SQUISH_CONSTANT_4D
attn10 = 2 - dx10 * dx10 - dy10 * dy10 - dz10 * dz10 - dw10 * dw10
if attn10 > 0:
attn10 *= attn10
value += attn10 * attn10 * extrapolate(xsb + 0, ysb + 0, zsb + 1, wsb + 1, dx10, dy10, dz10, dw10)
# First extra vertex
attn_ext0 = 2 - dx_ext0 * dx_ext0 - dy_ext0 * dy_ext0 - dz_ext0 * dz_ext0 - dw_ext0 * dw_ext0
if attn_ext0 > 0:
attn_ext0 *= attn_ext0
value += attn_ext0 * attn_ext0 * extrapolate(xsv_ext0, ysv_ext0, zsv_ext0, wsv_ext0, dx_ext0, dy_ext0, dz_ext0, dw_ext0)
# Second extra vertex
attn_ext1 = 2 - dx_ext1 * dx_ext1 - dy_ext1 * dy_ext1 - dz_ext1 * dz_ext1 - dw_ext1 * dw_ext1
if attn_ext1 > 0:
attn_ext1 *= attn_ext1
value += attn_ext1 * attn_ext1 * extrapolate(xsv_ext1, ysv_ext1, zsv_ext1, wsv_ext1, dx_ext1, dy_ext1, dz_ext1, dw_ext1)
# Third extra vertex
attn_ext2 = 2 - dx_ext2 * dx_ext2 - dy_ext2 * dy_ext2 - dz_ext2 * dz_ext2 - dw_ext2 * dw_ext2
if attn_ext2 > 0:
attn_ext2 *= attn_ext2
value += attn_ext2 * attn_ext2 * extrapolate(xsv_ext2, ysv_ext2, zsv_ext2, wsv_ext2, dx_ext2, dy_ext2, dz_ext2, dw_ext2)
return value / NORM_CONSTANT_4D
| mit | 9b8d99bbd163dc159aa1585d40127aeb | 40.17425 | 132 | 0.408107 | 3.089706 | false | false | false | false |
aliyun/aliyun-oss-python-sdk | examples/sign_v2.py | 1 | 3178 | # -*- coding: utf-8 -*-
import os
import oss2
import requests
import datetime
import time
import hashlib
import hmac
# 下面的代码展示了使用OSS V2签名算法来对请求进行签名
# 首先,初始化AccessKeyId、AccessKeySecret和Endpoint.
# 你可以通过设置环境变量来设置access_key_id等, 或者直接使用真实access_key_id替换'<Your AccessKeyId>'等
#
# 以杭州(华东1)作为例子, endpoint应该是
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 对HTTP和HTTPS请求,同样的处理
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<Your AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<Your AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<Your Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<Your Endpoint>')
if not endpoint.startswith('http://') and not endpoint.startswith('https://'):
endpoint = 'http://' + endpoint
# 验证access_key_id和其他参数都被合理地初始化
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, 'Please set variable: ' + param
# 创建一个AuthV2对象,这样我们就可以用V2算法来签名请求。也可以使用oss2.make_auth函数,默认采用V1算法
auth = oss2.AuthV2(access_key_id, access_key_secret)
# auth = oss2.make_auth(access_key_id, access_key_secret, oss2.AUTH_VERSION_2)
# 创建一个Bucket,利用它进行所有bucket与object相关操作
bucket = oss2.Bucket(auth, endpoint, bucket_name)
content = b'Never give up. - Jack Ma'
# 上传一个Object
bucket.put_object('motto.txt', content)
# 下载一个object
result = bucket.get_object('motto.txt')
assert result.read() == content
# 生成一个签名的URL,将在60秒后过期
url = bucket.sign_url('GET', 'motto.txt', 60)
print(url)
# 人工构造一个使用V2签名的请求
key = 'object-from-post.txt'
boundary = 'arbitraryboundaryvalue'
headers = {'Content-Type': 'multipart/form-data; boundary=' + boundary}
encoded_policy = oss2.utils.b64encode_as_string(oss2.to_bytes('{ "expiration": "%s","conditions": [["starts-with", "$key", ""]]}'
% oss2.date_to_iso8601(datetime.datetime.utcfromtimestamp(int(time.time()) + 60))))
digest = hmac.new(oss2.to_bytes(access_key_secret), oss2.to_bytes(encoded_policy), hashlib.sha256).digest()
signature = oss2.utils.b64encode_as_string(digest)
form_fields = {
'x-oss-signature-version': 'OSS2',
'x-oss-signature': signature,
'x-oss-access-key-id': access_key_id,
'policy': encoded_policy,
'key': key,
}
# 对象的内容
content = 'file content for post object request'
body = ''
for k, v in form_fields.items():
body += '--%s\r\nContent-Disposition: form-data; name="%s"\r\n\r\n%s\r\n' % (boundary, k, v)
body += '--%s\r\nContent-Disposition: form-data; name="file"; filename="%s"\r\n\r\n%s\r\n' % (boundary, key, content)
body += '--%s\r\nContent-Disposition: form-data; name="submit"\r\n\r\nUpload to OSS\r\n--%s--\r\n' % (boundary, boundary)
p = oss2.urlparse(endpoint)
requests.post('%s://%s.%s' % (p.scheme, bucket_name, p.netloc), data=body, headers=headers)
| mit | b0493bb4e87ed73bf95385d45ed84abc | 29.813187 | 129 | 0.702211 | 2.294599 | false | false | false | false |
aleju/imgaug | test/test_multicore.py | 2 | 39854 | from __future__ import print_function, division, absolute_import
import time
import multiprocessing
import pickle
from collections import defaultdict
import warnings
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import numpy as np
import six.moves as sm
import imgaug as ia
import imgaug.multicore as multicore
import imgaug.random as iarandom
from imgaug import augmenters as iaa
from imgaug.testutils import reseed
from imgaug.augmentables.batches import Batch, UnnormalizedBatch
IS_SUPPORTING_CONTEXTS = (sys.version_info[0] == 3
and sys.version_info[1] >= 4)
class clean_context():
def __init__(self):
self.old_context = None
def __enter__(self):
self.old_context = multicore._CONTEXT
multicore._CONTEXT = None
def __exit__(self, exc_type, exc_val, exc_tb):
multicore._CONTEXT = self.old_context
class Test__get_context(unittest.TestCase):
@unittest.skipUnless(not IS_SUPPORTING_CONTEXTS,
"Behaviour happens only in python <=3.3")
@mock.patch("imgaug.imgaug.warn")
@mock.patch("platform.version")
def test_mocked_nixos_python2(self, mock_version, mock_warn):
with clean_context():
mock_version.return_value = "NixOS"
_ctx = multicore._get_context()
assert mock_warn.call_count == 1
@unittest.skipUnless(IS_SUPPORTING_CONTEXTS,
"Behaviour is only supported in python 3.4+")
@mock.patch("platform.version")
@mock.patch("multiprocessing.get_context")
def test_mocked_nixos_python3(self, mock_gctx, mock_version):
with clean_context():
mock_version.return_value = "NixOS"
_ctx = multicore._get_context()
mock_gctx.assert_called_once_with("spawn")
@unittest.skipUnless(not IS_SUPPORTING_CONTEXTS,
"Behaviour happens only in python <=3.3")
@mock.patch("platform.version")
def test_mocked_no_nixos_python2(self, mock_version):
with clean_context():
mock_version.return_value = "Ubuntu"
ctx = multicore._get_context()
assert ctx is multiprocessing
@unittest.skipUnless(IS_SUPPORTING_CONTEXTS,
"Behaviour is only supported in python 3.4+")
@mock.patch("platform.system")
@mock.patch("multiprocessing.get_context")
@mock.patch("platform.version")
def test_mocked_no_nixos_python3(self, mock_version, mock_gctx, mock_system):
with clean_context():
mock_version.return_value = "Ubuntu"
mock_system.return_value = "Linux"
_ctx = multicore._get_context()
assert mock_gctx.call_count == 1
assert mock_gctx.call_args_list[0][0][0] is None
@unittest.skipUnless(IS_SUPPORTING_CONTEXTS,
"Behaviour is only supported in python 3.4+")
@mock.patch.object(sys, "version_info")
@mock.patch("platform.system")
@mock.patch("multiprocessing.get_context")
@mock.patch("platform.version")
def test_mocked_mac_and_37_cause_spawn(
self,
mock_version,
mock_gctx,
mock_system,
mock_vi
):
with clean_context():
def version_info(index):
if isinstance(index, slice):
return 3, 7
return 3 if index == 0 else 7
mock_vi.__getitem__.side_effect = version_info
mock_version.return_value = "foo"
mock_system.return_value = "Darwin"
_ctx = multicore._get_context()
mock_gctx.assert_called_once_with("spawn")
class TestPool(unittest.TestCase):
def setUp(self):
reseed()
def test___init___seed_out_of_bounds(self):
augseq = iaa.Identity()
with self.assertRaises(AssertionError) as context:
_ = multicore.Pool(augseq, seed=iarandom.SEED_MAX_VALUE + 100)
assert "Expected `seed` to be" in str(context.exception)
def test_property_pool(self):
mock_Pool = mock.MagicMock()
mock_Pool.return_value = mock_Pool
mock_Pool.close.return_value = None
mock_Pool.join.return_value = None
# We cannot just mock multiprocessing.Pool here, because of using
# a custom context. We would have to mock each possible context's
# Pool() method or overwrite here the Pool() method of the
# actually used context.
with mock.patch("multiprocessing.pool.Pool", mock_Pool):
augseq = iaa.Identity()
pool_config = multicore.Pool(
augseq, processes=1, maxtasksperchild=4, seed=123)
with pool_config as pool:
assert pool.processes == 1
assert pool._pool is None
assert mock_Pool.call_count == 1
assert mock_Pool.close.call_count == 1
assert mock_Pool.join.call_count == 1
# see
# https://github.com/
# python/cpython/blob/master/Lib/multiprocessing/context.py
# L119 (method Pool()) for an example of how Pool() is called
# internally.
assert mock_Pool.call_args[0][0] == 1 # processes
assert mock_Pool.call_args[0][1] is multicore._Pool_initialize_worker
assert mock_Pool.call_args[0][2] == (augseq, 123)
assert mock_Pool.call_args[0][3] == 4
def test_processes(self):
augseq = iaa.Identity()
mock_Pool = mock.MagicMock()
mock_cpu_count = mock.Mock()
# We cannot just mock multiprocessing.Pool here, because of using
# a custom context. We would have to mock each possible context's
# Pool() method or overwrite here the Pool() method of the
# actually used context.
patch_pool = mock.patch("multiprocessing.pool.Pool", mock_Pool)
# Multiprocessing seems to always access os.cpu_count to get the
# current count of cpu cores.
# See
# https://github.com/
# python/cpython/blob/master/Lib/multiprocessing/context.py
# L41.
fname = ("os.cpu_count" if IS_SUPPORTING_CONTEXTS
else "multiprocessing.cpu_count")
patch_cpu_count = mock.patch(fname, mock_cpu_count)
with patch_pool, patch_cpu_count:
# (cpu cores available, processes requested, processes started)
combos = [
(1, 1, 1),
(2, 1, 1),
(3, 1, 1),
(1, 2, 2),
(3, 2, 2),
(1, None, None),
(2, None, None),
(3, None, None),
(1, -1, 1),
(2, -1, 1),
(3, -1, 2),
(4, -2, 2)
]
for cores_available, processes_req, expected in combos:
with self.subTest(cpu_count_available=cores_available,
processes_requested=processes_req):
mock_cpu_count.return_value = cores_available
with multicore.Pool(augseq,
processes=processes_req) as _pool:
pass
if expected is None:
assert mock_Pool.call_args[0][0] is None
else:
assert mock_Pool.call_args[0][0] == expected
@mock.patch("multiprocessing.pool.Pool")
def test_cpu_count_does_not_exist(self, mock_pool):
def _side_effect():
raise NotImplementedError
old_method = multicore._get_context().cpu_count
mock_cpu_count = mock.Mock()
mock_cpu_count.side_effect = _side_effect
multicore._get_context().cpu_count = mock_cpu_count
augseq = iaa.Identity()
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
with multicore.Pool(augseq, processes=-1):
pass
assert mock_cpu_count.call_count == 1
assert mock_pool.call_count == 1
# 'processes' arg to Pool was expected to be set to None as cpu_count
# produced an error
assert mock_pool.call_args_list[0][0][0] is None
assert len(caught_warnings) == 1
assert (
"Could not find method multiprocessing.cpu_count(). "
in str(caught_warnings[-1].message))
multicore._get_context().cpu_count = old_method
@classmethod
def _test_map_batches_both(cls, call_async):
for clazz in [Batch, UnnormalizedBatch]:
augseq = iaa.Identity()
mock_Pool = mock.MagicMock()
mock_Pool.return_value = mock_Pool
mock_Pool.map.return_value = "X"
mock_Pool.map_async.return_value = "X"
with mock.patch("multiprocessing.pool.Pool", mock_Pool):
batches = [
clazz(images=[ia.data.quokka()]),
clazz(images=[ia.data.quokka()+1])
]
with multicore.Pool(augseq, processes=1) as pool:
if call_async:
_ = pool.map_batches_async(batches)
else:
_ = pool.map_batches(batches)
if call_async:
to_check = mock_Pool.map_async
else:
to_check = mock_Pool.map
assert to_check.call_count == 1
# args, arg 0
assert to_check.call_args[0][0] == multicore._Pool_starworker
# args, arg 1 (batches with ids), tuple 0,
# entry 0 in tuple (=> batch id)
assert to_check.call_args[0][1][0][0] == 0
# args, arg 1 (batches with ids), tuple 0,
# entry 1 in tuple (=> batch)
assert np.array_equal(
to_check.call_args[0][1][0][1].images_unaug,
batches[0].images_unaug)
# args, arg 1 (batches with ids), tuple 1,
# entry 0 in tuple (=> batch id)
assert to_check.call_args[0][1][1][0] == 1
# args, arg 1 (batches with ids), tuple 1,
# entry 1 in tuple (=> batch)
assert np.array_equal(
to_check.call_args[0][1][1][1].images_unaug,
batches[1].images_unaug)
def test_map_batches(self):
self._test_map_batches_both(call_async=False)
def test_map_batches_async(self):
self._test_map_batches_both(call_async=True)
@classmethod
def _test_imap_batches_both(cls, call_unordered):
for clazz in [Batch, UnnormalizedBatch]:
batches = [clazz(images=[ia.data.quokka()]),
clazz(images=[ia.data.quokka()+1])]
def _generate_batches():
for batch in batches:
yield batch
augseq = iaa.Identity()
mock_Pool = mock.MagicMock()
mock_Pool.return_value = mock_Pool
mock_Pool.imap.return_value = batches
mock_Pool.imap_unordered.return_value = batches
with mock.patch("multiprocessing.pool.Pool", mock_Pool):
with multicore.Pool(augseq, processes=1) as pool:
gen = _generate_batches()
if call_unordered:
_ = list(pool.imap_batches_unordered(gen))
else:
_ = list(pool.imap_batches(gen))
if call_unordered:
to_check = mock_Pool.imap_unordered
else:
to_check = mock_Pool.imap
assert to_check.call_count == 1
assert to_check.call_args[0][0] == multicore._Pool_starworker
# convert generator to list, make it subscriptable
arg_batches = list(to_check.call_args[0][1])
# args, arg 1 (batches with ids), tuple 0,
# entry 0 in tuple (=> batch id)
assert arg_batches[0][0] == 0
# tuple 0, entry 1 in tuple (=> batch)
assert np.array_equal(
arg_batches[0][1].images_unaug,
batches[0].images_unaug)
# tuple 1, entry 0 in tuple (=> batch id)
assert arg_batches[1][0] == 1
# tuple 1, entry 1 in tuple (=> batch)
assert np.array_equal(
arg_batches[1][1].images_unaug,
batches[1].images_unaug)
@classmethod
def _test_imap_batches_both_output_buffer_size(cls, call_unordered,
timeout=0.075):
batches = [
ia.Batch(images=[np.full((1, 1), i, dtype=np.uint8)])
for i in range(8)]
def _generate_batches(times):
for batch in batches:
yield batch
times.append(time.time())
def callfunc(pool, gen, output_buffer_size):
func = (
pool.imap_batches_unordered
if call_unordered
else pool.imap_batches
)
for v in func(gen, output_buffer_size=output_buffer_size):
yield v
def contains_all_ids(inputs):
arrs = np.uint8([batch.images_aug for batch in inputs])
ids_uq = np.unique(arrs)
return (
len(ids_uq) == len(batches)
and np.all(0 <= ids_uq)
and np.all(ids_uq < len(batches))
)
augseq = iaa.Identity()
with multicore.Pool(augseq, processes=1) as pool:
# no output buffer limit, there should be no noteworthy lag
# for any batch requested from _generate_batches()
times = []
gen = callfunc(pool, _generate_batches(times), None)
result = next(gen)
time.sleep(timeout)
result = [result] + list(gen)
times = np.float64(times)
times_diffs = times[1:] - times[0:-1]
assert np.all(times_diffs < timeout * 1.01)
assert contains_all_ids(result)
# with output buffer limit, but set to the number of batches,
# i.e. should again not lead to any lag
times = []
gen = callfunc(pool, _generate_batches(times), len(batches))
result = next(gen)
time.sleep(timeout)
result = [result] + list(gen)
times = np.float64(times)
times_diffs = times[1:] - times[0:-1]
assert np.all(times_diffs < timeout * 1.01)
assert contains_all_ids(result)
# With output buffer limit of #batches/2 (=4), followed by a
# timeout after starting the loading process. This should quickly
# load batches until the buffer is full, then wait until the
# batches are requested from the buffer (i.e. after the timeout
# ended) and then proceed to produce batches at the speed at which
# they are requested. This should lead to a measureable lag between
# batch 4 and 5 (matching the timeout).
times = []
gen = callfunc(pool, _generate_batches(times), 4)
result = next(gen)
time.sleep(timeout)
result = [result] + list(gen)
times = np.float64(times)
times_diffs = times[1:] - times[0:-1]
# use -1 here because we have N-1 times for N batches as
# diffs denote diffs between Nth and N+1th batch
assert np.all(times_diffs[0:4-1] < timeout * 1.01)
assert np.all(times_diffs[4-1:4-1+1] >= timeout * 0.99)
assert np.all(times_diffs[4-1+1:] < timeout * 1.01)
assert contains_all_ids(result)
def test_imap_batches(self):
self._test_imap_batches_both(call_unordered=False)
def test_imap_batches_unordered(self):
self._test_imap_batches_both(call_unordered=True)
def test_imap_batches_output_buffer_size(self):
self._test_imap_batches_both_output_buffer_size(call_unordered=False)
def test_imap_batches_unordered_output_buffer_size(self):
self._test_imap_batches_both_output_buffer_size(call_unordered=True)
@classmethod
def _assert_each_augmentation_not_more_than_once(cls, batches_aug):
sum_to_vecs = defaultdict(list)
for batch in batches_aug:
assert not np.array_equal(batch.images_aug[0], batch.images_aug[1])
vec = batch.images_aug.flatten()
vecsum = int(np.sum(vec))
if vecsum in sum_to_vecs:
for other_vec in sum_to_vecs[vecsum]:
assert not np.array_equal(vec, other_vec)
else:
sum_to_vecs[vecsum].append(vec)
def test_augmentations_with_seed_match(self):
nb_batches = 60
augseq = iaa.AddElementwise((0, 255))
image = np.zeros((10, 10, 1), dtype=np.uint8)
batch = ia.Batch(images=np.uint8([image, image]))
batches = [batch.deepcopy() for _ in sm.xrange(nb_batches)]
# seed=1
with multicore.Pool(augseq, processes=2, maxtasksperchild=30,
seed=1) as pool:
batches_aug1 = pool.map_batches(batches, chunksize=2)
# seed=1
with multicore.Pool(augseq, processes=2, seed=1) as pool:
batches_aug2 = pool.map_batches(batches, chunksize=1)
# seed=2
with multicore.Pool(augseq, processes=2, seed=2) as pool:
batches_aug3 = pool.map_batches(batches, chunksize=1)
assert len(batches_aug1) == nb_batches
assert len(batches_aug2) == nb_batches
assert len(batches_aug3) == nb_batches
for b1, b2, b3 in zip(batches_aug1, batches_aug2, batches_aug3):
# images were augmented
assert not np.array_equal(b1.images_unaug, b1.images_aug)
assert not np.array_equal(b2.images_unaug, b2.images_aug)
assert not np.array_equal(b3.images_unaug, b3.images_aug)
# original images still the same
assert np.array_equal(b1.images_unaug, batch.images_unaug)
assert np.array_equal(b2.images_unaug, batch.images_unaug)
assert np.array_equal(b3.images_unaug, batch.images_unaug)
# augmentations for same seed are the same
assert np.array_equal(b1.images_aug, b2.images_aug)
# augmentations for different seeds are different
assert not np.array_equal(b1.images_aug, b3.images_aug)
# make sure that batches for the two pools with same seed did not
# repeat within results (only between the results of the two pools)
for batches_aug in [batches_aug1, batches_aug2, batches_aug3]:
self._assert_each_augmentation_not_more_than_once(batches_aug)
def test_augmentations_with_seed_match_for_images_and_keypoints(self):
augseq = iaa.AddElementwise((0, 255))
image = np.zeros((10, 10, 1), dtype=np.uint8)
# keypoints here will not be changed by augseq, but they will induce
# deterministic mode to start in augment_batches() as each batch
# contains images AND keypoints
kps = ia.KeypointsOnImage([ia.Keypoint(x=2, y=0)], shape=(10, 10, 1))
batch = ia.Batch(images=np.uint8([image, image]), keypoints=[kps, kps])
batches = [batch.deepcopy() for _ in sm.xrange(60)]
# seed=1
with multicore.Pool(augseq, processes=2, maxtasksperchild=30,
seed=1) as pool:
batches_aug1 = pool.map_batches(batches, chunksize=2)
# seed=1
with multicore.Pool(augseq, processes=2, seed=1) as pool:
batches_aug2 = pool.map_batches(batches, chunksize=1)
# seed=2
with multicore.Pool(augseq, processes=2, seed=2) as pool:
batches_aug3 = pool.map_batches(batches, chunksize=1)
assert len(batches_aug1) == 60
assert len(batches_aug2) == 60
assert len(batches_aug3) == 60
for batches_aug in [batches_aug1, batches_aug2, batches_aug3]:
for batch in batches_aug:
for keypoints_aug in batch.keypoints_aug:
assert keypoints_aug.keypoints[0].x == 2
assert keypoints_aug.keypoints[0].y == 0
for b1, b2, b3 in zip(batches_aug1, batches_aug2, batches_aug3):
# images were augmented
assert not np.array_equal(b1.images_unaug, b1.images_aug)
assert not np.array_equal(b2.images_unaug, b2.images_aug)
assert not np.array_equal(b3.images_unaug, b3.images_aug)
# original images still the same
assert np.array_equal(b1.images_unaug, batch.images_unaug)
assert np.array_equal(b2.images_unaug, batch.images_unaug)
assert np.array_equal(b3.images_unaug, batch.images_unaug)
# augmentations for same seed are the same
assert np.array_equal(b1.images_aug, b2.images_aug)
# augmentations for different seeds are different
assert not np.array_equal(b1.images_aug, b3.images_aug)
# make sure that batches for the two pools with same seed did not
# repeat within results (only between the results of the two pools)
for batches_aug in [batches_aug1, batches_aug2, batches_aug3]:
self._assert_each_augmentation_not_more_than_once(batches_aug)
def test_augmentations_without_seed_differ(self):
augseq = iaa.AddElementwise((0, 255))
image = np.zeros((10, 10, 1), dtype=np.uint8)
batch = ia.Batch(images=np.uint8([image, image]))
batches = [batch.deepcopy() for _ in sm.xrange(20)]
with multicore.Pool(augseq, processes=2, maxtasksperchild=5) as pool:
batches_aug = pool.map_batches(batches, chunksize=2)
with multicore.Pool(augseq, processes=2) as pool:
batches_aug.extend(pool.map_batches(batches, chunksize=1))
assert len(batches_aug) == 2*20
self._assert_each_augmentation_not_more_than_once(batches_aug)
def test_augmentations_without_seed_differ_for_images_and_keypoints(self):
augseq = iaa.AddElementwise((0, 255))
image = np.zeros((10, 10, 1), dtype=np.uint8)
# keypoints here will not be changed by augseq, but they will
# induce deterministic mode to start in augment_batches() as each
# batch contains images AND keypoints
kps = ia.KeypointsOnImage([ia.Keypoint(x=2, y=0)], shape=(10, 10, 1))
batch = ia.Batch(images=np.uint8([image, image]), keypoints=[kps, kps])
batches = [batch.deepcopy() for _ in sm.xrange(20)]
with multicore.Pool(augseq, processes=2, maxtasksperchild=5) as pool:
batches_aug = pool.map_batches(batches, chunksize=2)
with multicore.Pool(augseq, processes=2) as pool:
batches_aug.extend(pool.map_batches(batches, chunksize=1))
assert len(batches_aug) == 2*20
for batch in batches_aug:
for keypoints_aug in batch.keypoints_aug:
assert keypoints_aug.keypoints[0].x == 2
assert keypoints_aug.keypoints[0].y == 0
self._assert_each_augmentation_not_more_than_once(batches_aug)
def test_inputs_not_lost(self):
"""Test to make sure that inputs (e.g. images) are never lost."""
def _assert_contains_all_ids(batches_aug):
# batch.images_unaug
ids = set()
for batch_aug in batches_aug:
ids.add(int(batch_aug.images_unaug.flat[0]))
ids.add(int(batch_aug.images_unaug.flat[1]))
for idx in sm.xrange(2*100):
assert idx in ids
assert len(ids) == 200
# batch.images_aug
ids = set()
for batch_aug in batches_aug:
ids.add(int(batch_aug.images_aug.flat[0]))
ids.add(int(batch_aug.images_aug.flat[1]))
for idx in sm.xrange(2*100):
assert idx in ids
assert len(ids) == 200
augseq = iaa.Identity()
image = np.zeros((1, 1, 1), dtype=np.uint8)
# creates batches containing images with ids from 0 to 199 (one pair
# of consecutive ids per batch)
batches = [
ia.Batch(images=np.uint8([image + b_idx*2, image + b_idx*2+1]))
for b_idx
in sm.xrange(100)]
with multicore.Pool(augseq, processes=2, maxtasksperchild=25) as pool:
batches_aug = pool.map_batches(batches)
_assert_contains_all_ids(batches_aug)
with multicore.Pool(augseq, processes=2, maxtasksperchild=25,
seed=1) as pool:
batches_aug = pool.map_batches(batches)
_assert_contains_all_ids(batches_aug)
with multicore.Pool(augseq, processes=3, seed=2) as pool:
batches_aug = pool.map_batches(batches)
_assert_contains_all_ids(batches_aug)
with multicore.Pool(augseq, processes=2, seed=None) as pool:
batches_aug = pool.map_batches(batches)
_assert_contains_all_ids(batches_aug)
batches_aug = pool.map_batches(batches)
_assert_contains_all_ids(batches_aug)
def test_close(self):
augseq = iaa.Identity()
with multicore.Pool(augseq, processes=2) as pool:
pool.close()
def test_terminate(self):
augseq = iaa.Identity()
with multicore.Pool(augseq, processes=2) as pool:
pool.terminate()
def test_join(self):
augseq = iaa.Identity()
with multicore.Pool(augseq, processes=2) as pool:
pool.close()
pool.join()
@mock.patch("multiprocessing.pool.Pool")
def test_join_via_mock(self, mock_pool):
# According to codecov, the join() does not get beyond its initial
# if statement in the test_join() test, even though it should be.
# Might be a simple travis multicore problem?
# It is tested here again via some mocking.
mock_pool.return_value = mock_pool
mock_pool.join.return_value = True
with multicore.Pool(iaa.Identity(), processes=2) as pool:
pool.join()
# Make sure that __exit__ does not call close(), which would then
# call join() again and we would get a call_count of 2
pool._pool = None
assert mock_pool.join.call_count == 1
# This should already be part of the Pool tests, but according to codecov
# it is not tested. Likely some travis error related to running multiple
# python processes.
class Test_Pool_initialize_worker(unittest.TestCase):
def tearDown(self):
# without this, other tests can break as e.g. the functions in
# multicore assert that _WORKER_AUGSEQ is None
multicore.Pool._WORKER_AUGSEQ = None
multicore.Pool._WORKER_SEED_START = None
@mock.patch("imgaug.multicore.Pool")
def test_with_seed_start(self, mock_ia_pool):
augseq = mock.MagicMock()
multicore._Pool_initialize_worker(augseq, 1)
assert mock_ia_pool._WORKER_SEED_START == 1
assert mock_ia_pool._WORKER_AUGSEQ is augseq
assert augseq.localize_random_state_.call_count == 1
@mock.patch.object(sys, 'version_info')
@mock.patch("time.time_ns", create=True) # doesnt exist in <=3.6
@mock.patch("imgaug.random.seed")
@mock.patch("multiprocessing.current_process")
def test_without_seed_start_simulate_py37_or_higher(self,
mock_cp,
mock_ia_seed,
mock_time_ns,
mock_vi):
def version_info(index):
return 3 if index == 0 else 7
mock_vi.__getitem__.side_effect = version_info
mock_time_ns.return_value = 1
mock_cp.return_value = mock.MagicMock()
mock_cp.return_value.name = "foo"
augseq = mock.MagicMock()
multicore._Pool_initialize_worker(augseq, None)
assert mock_time_ns.call_count == 1
assert mock_ia_seed.call_count == 1
assert augseq.seed_.call_count == 1
seed_global = mock_ia_seed.call_args_list[0][0][0]
seed_local = augseq.seed_.call_args_list[0][0][0]
assert seed_global != seed_local
@mock.patch.object(sys, 'version_info')
@mock.patch("time.time")
@mock.patch("imgaug.random.seed")
@mock.patch("multiprocessing.current_process")
def test_without_seed_start_simulate_py36_or_lower(self,
mock_cp,
mock_ia_seed,
mock_time,
mock_vi):
def version_info(index):
return 3 if index == 0 else 6
mock_vi.__getitem__.side_effect = version_info
mock_time.return_value = 1
mock_cp.return_value = mock.MagicMock()
mock_cp.return_value.name = "foo"
augseq = mock.MagicMock()
multicore._Pool_initialize_worker(augseq, None)
assert mock_time.call_count == 1
assert mock_ia_seed.call_count == 1
assert augseq.seed_.call_count == 1
seed_global = mock_ia_seed.call_args_list[0][0][0]
seed_local = augseq.seed_.call_args_list[0][0][0]
assert seed_global != seed_local
@mock.patch("imgaug.random.seed")
def test_without_seed_start(self, mock_ia_seed):
augseq = mock.MagicMock()
multicore._Pool_initialize_worker(augseq, None)
time.sleep(0.01)
multicore._Pool_initialize_worker(augseq, None)
seed_global_call_1 = mock_ia_seed.call_args_list[0][0][0]
seed_local_call_1 = augseq.seed_.call_args_list[0][0][0]
seed_global_call_2 = mock_ia_seed.call_args_list[0][0][0]
seed_local_call_2 = augseq.seed_.call_args_list[0][0][0]
assert (
seed_global_call_1
!= seed_local_call_1
!= seed_global_call_2
!= seed_local_call_2
), "Got seeds: %d, %d, %d, %d" % (
seed_global_call_1, seed_local_call_1,
seed_global_call_2, seed_local_call_2)
assert mock_ia_seed.call_count == 2
assert augseq.seed_.call_count == 2
# This should already be part of the Pool tests, but according to codecov
# it is not tested. Likely some travis error related to running multiple
# python processes.
class Test_Pool_worker(unittest.TestCase):
def tearDown(self):
# without this, other tests can break as e.g. the functions in
# multicore assert that _WORKER_AUGSEQ is None
multicore.Pool._WORKER_AUGSEQ = None
multicore.Pool._WORKER_SEED_START = None
def test_without_seed_start(self):
augseq = mock.MagicMock()
augseq.augment_batch_.return_value = "augmented_batch_"
image = np.zeros((1, 1, 3), dtype=np.uint8)
batch = UnnormalizedBatch(images=[image])
multicore.Pool._WORKER_AUGSEQ = augseq
result = multicore._Pool_worker(1, batch)
assert result == "augmented_batch_"
assert augseq.augment_batch_.call_count == 1
augseq.augment_batch_.assert_called_once_with(batch)
@mock.patch("imgaug.random.seed")
def test_with_seed_start(self, mock_ia_seed):
augseq = mock.MagicMock()
augseq.augment_batch_.return_value = "augmented_batch_"
image = np.zeros((1, 1, 3), dtype=np.uint8)
batch = UnnormalizedBatch(images=[image])
batch_idx = 1
seed_start = 10
multicore.Pool._WORKER_AUGSEQ = augseq
multicore.Pool._WORKER_SEED_START = seed_start
result = multicore._Pool_worker(batch_idx, batch)
# expected seeds used
seed = seed_start + batch_idx
seed_global_expected = (
iarandom.SEED_MIN_VALUE
+ (seed - 10**9)
% (iarandom.SEED_MAX_VALUE - iarandom.SEED_MIN_VALUE)
)
seed_local_expected = (
iarandom.SEED_MIN_VALUE
+ seed
% (iarandom.SEED_MAX_VALUE - iarandom.SEED_MIN_VALUE)
)
assert result == "augmented_batch_"
assert augseq.augment_batch_.call_count == 1
augseq.augment_batch_.assert_called_once_with(batch)
mock_ia_seed.assert_called_once_with(seed_global_expected)
augseq.seed_.assert_called_once_with(seed_local_expected)
# This should already be part of the Pool tests, but according to codecov
# it is not tested. Likely some travis error related to running multiple
# python processes.
class Test_Pool_starworker(unittest.TestCase):
def tearDown(self):
# without this, other tests can break as e.g. the functions in
# multicore assert that _WORKER_AUGSEQ is None
multicore.Pool._WORKER_AUGSEQ = None
multicore.Pool._WORKER_SEED_START = None
@mock.patch("imgaug.multicore._Pool_worker")
def test_simple_call(self, mock_worker):
image = np.zeros((1, 1, 3), dtype=np.uint8)
batch = UnnormalizedBatch(images=[image])
batch_idx = 1
mock_worker.return_value = "returned_batch"
result = multicore._Pool_starworker((batch_idx, batch))
assert result == "returned_batch"
mock_worker.assert_called_once_with(batch_idx, batch)
# ---------
# loading function used in TestBatchLoader.test_basic_functionality()
# it is outside of the test as putting it inside of it caused issues
# with spawn mode not being able to pickle this method, see issue #414.
def _batch_loader_load_func():
for _ in sm.xrange(20):
yield ia.Batch(images=np.zeros((2, 4, 4, 3), dtype=np.uint8))
# ---------
# Note that BatchLoader is deprecated
class TestBatchLoader(unittest.TestCase):
def setUp(self):
reseed()
def test_basic_functionality(self):
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as caught_warnings:
for nb_workers in [1, 2]:
# repeat these tests many times to catch rarer race conditions
for _ in sm.xrange(5):
loader = multicore.BatchLoader(
_batch_loader_load_func, queue_size=2,
nb_workers=nb_workers, threaded=True)
loaded = []
counter = 0
while ((not loader.all_finished()
or not loader.queue.empty())
and counter < 1000):
try:
batch = loader.queue.get(timeout=0.001)
loaded.append(batch)
except:
pass
counter += 1
assert len(loaded) == 20*nb_workers, \
"Expected %d to be loaded by threads, got %d for %d " \
"workers at counter %d." % (
20*nb_workers, len(loaded), nb_workers, counter
)
loader = multicore.BatchLoader(
_batch_loader_load_func, queue_size=200,
nb_workers=nb_workers, threaded=True)
loader.terminate()
assert loader.all_finished()
loader = multicore.BatchLoader(
_batch_loader_load_func, queue_size=2,
nb_workers=nb_workers, threaded=False)
loaded = []
counter = 0
while ((not loader.all_finished()
or not loader.queue.empty())
and counter < 1000):
try:
batch = loader.queue.get(timeout=0.001)
loaded.append(batch)
except:
pass
counter += 1
assert len(loaded) == 20*nb_workers, \
"Expected %d to be loaded by background processes, " \
"got %d for %d workers at counter %d." % (
20*nb_workers, len(loaded), nb_workers, counter
)
loader = multicore.BatchLoader(
_batch_loader_load_func, queue_size=200,
nb_workers=nb_workers, threaded=False)
loader.terminate()
assert loader.all_finished()
assert len(caught_warnings) > 0
for warning in caught_warnings:
assert "is deprecated" in str(warning.message)
# Note that BackgroundAugmenter is deprecated
class TestBackgroundAugmenter(unittest.TestCase):
def setUp(self):
reseed()
def test_augment_images_worker(self):
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as caught_warnings:
def gen():
yield ia.Batch(images=np.zeros((1, 4, 4, 3), dtype=np.uint8))
bl = multicore.BatchLoader(gen(), queue_size=2)
bgaug = multicore.BackgroundAugmenter(bl, iaa.Identity(),
queue_size=1, nb_workers=1)
queue_source = multiprocessing.Queue(2)
queue_target = multiprocessing.Queue(2)
queue_source.put(
pickle.dumps(
ia.Batch(images=np.zeros((1, 4, 8, 3), dtype=np.uint8)),
protocol=-1
)
)
queue_source.put(pickle.dumps(None, protocol=-1))
bgaug._augment_images_worker(iaa.Add(1), queue_source,
queue_target, 1)
batch_aug = pickle.loads(queue_target.get())
assert isinstance(batch_aug, ia.Batch)
assert batch_aug.images_unaug is not None
assert batch_aug.images_unaug.dtype == np.uint8
assert batch_aug.images_unaug.shape == (1, 4, 8, 3)
assert np.array_equal(
batch_aug.images_unaug,
np.zeros((1, 4, 8, 3), dtype=np.uint8))
assert batch_aug.images_aug is not None
assert batch_aug.images_aug.dtype == np.uint8
assert batch_aug.images_aug.shape == (1, 4, 8, 3)
assert np.array_equal(
batch_aug.images_aug,
np.zeros((1, 4, 8, 3), dtype=np.uint8) + 1)
finished_signal = pickle.loads(queue_target.get())
assert finished_signal is None
source_finished_signal = pickle.loads(queue_source.get())
assert source_finished_signal is None
assert queue_source.empty()
assert queue_target.empty()
queue_source.close()
queue_target.close()
queue_source.join_thread()
queue_target.join_thread()
bl.terminate()
bgaug.terminate()
assert len(caught_warnings) > 0
for warning in caught_warnings:
assert "is deprecated" in str(warning.message)
| mit | 5713d09dec56ac0ca0461b56fa614880 | 39.584521 | 81 | 0.565263 | 3.861448 | false | true | false | false |
aleju/imgaug | checks/check_multicore_pool.py | 2 | 13915 | from __future__ import print_function, division
import time
import multiprocessing
import numpy as np
from skimage import data
import imgaug as ia
import imgaug.multicore as multicore
from imgaug import augmenters as iaa
class PoolWithMarkedWorker(multicore.Pool):
def __init__(self, *args, **kwargs):
super(PoolWithMarkedWorker, self).__init__(*args, **kwargs)
@classmethod
def _worker(cls, batch_idx, batch):
process_name = multiprocessing.current_process().name
# print("[_worker] called %s. images in batch: %d" % (process_name, len(batch.images_unaug),))
if "-1" in process_name:
for image in batch.images_unaug:
image[::4, ::4, :] = [255, 255, 255]
return multicore.Pool._worker(batch_idx, batch)
def main():
augseq = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.CoarseDropout(p=0.1, size_percent=0.1)
])
def func_images(images, random_state, parents, hooks):
time.sleep(0.2)
return images
def func_heatmaps(heatmaps, random_state, parents, hooks):
return heatmaps
def func_keypoints(keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
augseq_slow = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Lambda(
func_images=func_images,
func_heatmaps=func_heatmaps,
func_keypoints=func_keypoints
)
])
print("------------------")
print(".pool()")
print("------------------")
with augseq.pool() as pool:
time_start = time.time()
batches = list(load_images())
batches_aug = pool.map_batches(batches)
images_aug = []
keypoints_aug = []
for batch_aug in batches_aug:
images_aug.append(batch_aug.images_aug)
keypoints_aug.append(batch_aug.keypoints_aug)
print("Done in %.4fs" % (time.time() - time_start,))
# ia.imshow(draw_grid(images_aug, keypoints_aug))
print("------------------")
print("Pool.map_batches(batches)")
print("------------------")
with multicore.Pool(augseq) as pool:
time_start = time.time()
batches = list(load_images())
batches_aug = pool.map_batches(batches)
images_aug = []
keypoints_aug = []
for batch_aug in batches_aug:
images_aug.append(batch_aug.images_aug)
keypoints_aug.append(batch_aug.keypoints_aug)
print("Done in %.4fs" % (time.time() - time_start,))
# ia.imshow(draw_grid(images_aug, keypoints_aug))
print("------------------")
print("Pool.imap_batches(batches)")
print("------------------")
with multicore.Pool(augseq) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images())
images_aug = []
keypoints_aug = []
for batch in batches_aug:
images_aug.append(batch.images_aug)
keypoints_aug.append(batch.keypoints_aug)
print("Done in %.4fs" % (time.time() - time_start,))
# ia.imshow(draw_grid(images_aug, keypoints_aug))
print("------------------")
print("Pool.imap_batches(batches, chunksize=32)")
print("------------------")
with multicore.Pool(augseq) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images(n_batches=1000), chunksize=32)
count = 0
for batch in batches_aug:
count += 1
assert count == 1000
print("Done in %.4fs" % (time.time() - time_start,))
print("------------------")
print("Pool.imap_batches(batches, chunksize=2)")
print("------------------")
with multicore.Pool(augseq) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images(n_batches=1000), chunksize=2)
count = 0
for batch in batches_aug:
count += 1
assert count == 1000
print("Done in %.4fs" % (time.time() - time_start,))
print("------------------")
print("Pool.imap_batches(batches, chunksize=1)")
print("------------------")
with multicore.Pool(augseq) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images(n_batches=1000), chunksize=1)
count = 0
for batch in batches_aug:
count += 1
assert count == 1000
print("Done in %.4fs" % (time.time() - time_start,))
print("------------------")
print("Pool.map_batches(batches, chunksize=32)")
print("------------------")
with multicore.Pool(augseq) as pool:
time_start = time.time()
batches_aug = pool.map_batches(list(load_images(n_batches=1000)), chunksize=32)
assert len(batches_aug) == 1000
print("Done in %.4fs" % (time.time() - time_start,))
print("------------------")
print("Pool.map_batches chunksize with fast aug")
print("------------------")
def test_fast(processes, chunksize):
augseq = iaa.Dropout(0.1)
with multicore.Pool(augseq, processes=processes) as pool:
batches = list(load_images(n_batches=10000, draw_text=False))
time_start = time.time()
batches_aug = pool.map_batches(batches, chunksize=chunksize)
assert len(batches_aug) == 10000
print("chunksize=%d, worker=%s, time=%.4fs" % (chunksize, processes, time.time() - time_start))
test_fast(-4, 1)
test_fast(1, 1)
test_fast(None, 1)
test_fast(1, 4)
test_fast(None, 4)
test_fast(1, 32)
test_fast(None, 32)
print("------------------")
print("Pool.imap_batches chunksize with fast aug")
print("------------------")
def test_fast_imap(processes, chunksize):
augseq = iaa.Dropout(0.1)
with multicore.Pool(augseq, processes=processes) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images(n_batches=10000, draw_text=False), chunksize=chunksize)
batches_aug = list(batches_aug)
assert len(batches_aug) == 10000
print("chunksize=%d, worker=%s, time=%.4fs" % (chunksize, processes, time.time() - time_start))
test_fast_imap(-4, 1)
test_fast_imap(1, 1)
test_fast_imap(None, 1)
test_fast_imap(1, 4)
test_fast_imap(None, 4)
test_fast_imap(1, 32)
test_fast_imap(None, 32)
print("------------------")
print("Pool.map_batches with computationally expensive aug")
print("------------------")
def test_heavy(processes, chunksize):
augseq_heavy = iaa.PiecewiseAffine(scale=0.2, nb_cols=8, nb_rows=8)
with multicore.Pool(augseq_heavy, processes=processes) as pool:
batches = list(load_images(n_batches=500, draw_text=False))
time_start = time.time()
batches_aug = pool.map_batches(batches, chunksize=chunksize)
assert len(batches_aug) == 500
print("chunksize=%d, worker=%s, time=%.4fs" % (chunksize, processes, time.time() - time_start))
test_heavy(-4, 1)
test_heavy(1, 1)
test_heavy(None, 1)
test_heavy(1, 4)
test_heavy(None, 4)
test_heavy(1, 32)
test_heavy(None, 32)
print("------------------")
print("Pool.imap_batches(batches), slow loading")
print("------------------")
with multicore.Pool(augseq) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images(n_batches=100, sleep=0.2))
images_aug = []
keypoints_aug = []
for batch in batches_aug:
images_aug.append(batch.images_aug)
keypoints_aug.append(batch.keypoints_aug)
print("Done in %.4fs" % (time.time() - time_start,))
print("------------------")
print("Pool.imap_batches(batches), maxtasksperchild=4")
print("------------------")
with multicore.Pool(augseq, maxtasksperchild=4) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images(n_batches=100))
images_aug = []
keypoints_aug = []
for batch in batches_aug:
images_aug.append(batch.images_aug)
keypoints_aug.append(batch.keypoints_aug)
print("Done in %.4fs" % (time.time() - time_start,))
ia.imshow(draw_grid(images_aug, keypoints_aug))
print("------------------")
print("Pool.imap_batches(batches), seed=1")
print("------------------")
# we color here the images of the first worker to see in the grids which images belong to one worker
with PoolWithMarkedWorker(augseq, seed=1) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images(n_batches=4))
images_aug = []
keypoints_aug = []
for batch in batches_aug:
images_aug.append(batch.images_aug)
keypoints_aug.append(batch.keypoints_aug)
print("Done in %.4fs" % (time.time() - time_start,))
grid_a = draw_grid(images_aug, keypoints_aug)
with multicore.Pool(augseq, seed=1) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images(n_batches=4))
images_aug = []
keypoints_aug = []
for batch in batches_aug:
images_aug.append(batch.images_aug)
keypoints_aug.append(batch.keypoints_aug)
print("Done in %.4fs" % (time.time() - time_start,))
grid_b = draw_grid(images_aug, keypoints_aug)
grid_b[:, 0:2, 0] = 0
grid_b[:, 0:2, 1] = 255
grid_b[:, 0:2, 2] = 0
ia.imshow(np.hstack([grid_a, grid_b]))
print("------------------")
print("Pool.imap_batches(batches), seed=None")
print("------------------")
with multicore.Pool(augseq, seed=None) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images(n_batches=4))
images_aug = []
keypoints_aug = []
for batch in batches_aug:
images_aug.append(batch.images_aug)
keypoints_aug.append(batch.keypoints_aug)
print("Done in %.4fs" % (time.time() - time_start,))
grid_a = draw_grid(images_aug, keypoints_aug)
with multicore.Pool(augseq, seed=None) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images(n_batches=4))
images_aug = []
keypoints_aug = []
for batch in batches_aug:
images_aug.append(batch.images_aug)
keypoints_aug.append(batch.keypoints_aug)
print("Done in %.4fs" % (time.time() - time_start,))
grid_b = draw_grid(images_aug, keypoints_aug)
ia.imshow(np.hstack([grid_a, grid_b]))
print("------------------")
print("Pool.imap_batches(batches), maxtasksperchild=4, seed=1")
print("------------------")
with multicore.Pool(augseq, maxtasksperchild=4, seed=1) as pool:
time_start = time.time()
batches_aug = pool.imap_batches(load_images(n_batches=100))
images_aug = []
keypoints_aug = []
for batch in batches_aug:
images_aug.append(batch.images_aug)
keypoints_aug.append(batch.keypoints_aug)
print("Done in %.4fs" % (time.time() - time_start,))
ia.imshow(draw_grid(images_aug, keypoints_aug))
for augseq_i in [augseq, augseq_slow]:
print("------------------")
print("Many very small runs (batches=1)")
print("------------------")
with multicore.Pool(augseq_i) as pool:
time_start = time.time()
for i in range(100):
_ = pool.map_batches(list(load_images(n_batches=1)))
print("Done in %.4fs" % (time.time() - time_start,))
print("------------------")
print("Many very small runs (batches=2)")
print("------------------")
with multicore.Pool(augseq_i) as pool:
time_start = time.time()
for i in range(100):
_ = pool.map_batches(list(load_images(n_batches=2)))
print("Done in %.4fs" % (time.time() - time_start,))
def load_images(n_batches=10, sleep=0.0, draw_text=True):
batch_size = 4
astronaut = data.astronaut()
astronaut = ia.imresize_single_image(astronaut, (64, 64))
kps = ia.KeypointsOnImage([ia.Keypoint(x=15, y=25)], shape=astronaut.shape)
counter = 0
for i in range(n_batches):
if draw_text:
batch_images = []
batch_kps = []
for b in range(batch_size):
astronaut_text = ia.draw_text(astronaut, x=0, y=0, text="%d" % (counter,), color=[0, 255, 0], size=16)
batch_images.append(astronaut_text)
batch_kps.append(kps)
counter += 1
batch = ia.Batch(
images=np.array(batch_images, dtype=np.uint8),
keypoints=batch_kps
)
else:
if i == 0:
batch_images = np.array([np.copy(astronaut) for _ in range(batch_size)], dtype=np.uint8)
batch = ia.Batch(
images=np.copy(batch_images),
keypoints=[kps.deepcopy() for _ in range(batch_size)]
)
yield batch
if sleep > 0:
time.sleep(sleep)
def draw_grid(images_aug, keypoints_aug):
if keypoints_aug is None:
keypoints_aug = []
for bidx in range(len(images_aug)):
keypoints_aug.append([None for image in images_aug[bidx]])
images_kps_batches = []
for bidx in range(len(images_aug)):
images_kps_batch = []
for image, kps in zip(images_aug[bidx], keypoints_aug[bidx]):
if kps is None:
image_kps = image
else:
image_kps = kps.draw_on_image(image, size=5, color=[255, 0, 0])
images_kps_batch.append(image_kps)
images_kps_batches.extend(images_kps_batch)
grid = ia.draw_grid(images_kps_batches, cols=len(images_aug[0]))
return grid
if __name__ == "__main__":
main()
| mit | 47f1bc1b9b173b232de0b3a5a1b4c7eb | 36.007979 | 118 | 0.55839 | 3.455426 | false | true | false | false |
voc/voctomix | voctocore/lib/overlay.py | 1 | 1937 | #!/usr/bin/env python3
from gi.repository import Gst, GstController
import logging
import gi
gi.require_version('GstController', '1.0')
class Overlay:
log = logging.getLogger('Overlay')
def __init__(self, pipeline, location=None, blend_time=300):
# get overlay element and config
self.overlay = pipeline.get_by_name('overlay')
self.location = location
self.isVisible = location != None
self.blend_time = blend_time
# initialize overlay control binding
self.alpha = GstController.InterpolationControlSource()
self.alpha.set_property('mode', GstController.InterpolationMode.LINEAR)
cb = GstController.DirectControlBinding.new_absolute(self.overlay, 'alpha', self.alpha)
self.overlay.add_control_binding(cb)
def set( self, location ):
self.location = location if location else ""
if self.isVisible:
self.overlay.set_property('location', self.location )
def show(self, visible, playtime):
''' set overlay visibility '''
# check if control binding is available
assert self.alpha
# if state changes
if self.isVisible != visible:
# set blending animation
if self.blend_time > 0:
self.alpha.set(playtime, 0.0 if visible else 1.0)
self.alpha.set(playtime + int(Gst.SECOND / 1000.0 * self.blend_time), 1.0 if visible else 0.0)
else:
self.alpha.set(playtime, 1.0 if visible else 0.0)
# set new visibility state
self.isVisible = visible
# re-set location if we get visible
if visible:
self.overlay.set_property('location', self.location )
def get(self):
''' get current overlay file location '''
return self.location
def visible(self):
''' get overlay visibility '''
return self.isVisible
| mit | 7b986bf5aaf2940c00f2cbe2ec06d541 | 35.54717 | 110 | 0.621064 | 4.165591 | false | false | false | false |
voc/voctomix | voctocore/lib/sources/decklinkavsource.py | 1 | 3283 | #!/usr/bin/env python3
import logging
import re
from lib.config import Config
from lib.sources.avsource import AVSource
class DeckLinkAVSource(AVSource):
timer_resolution = 0.5
def __init__(self, name, has_audio=True, has_video=True):
super().__init__('DecklinkAVSource', name, has_audio, has_video, show_no_signal=True)
self.device = Config.getDeckLinkDeviceNumber(name)
self.aconn = Config.getDeckLinkAudioConnection(name)
self.vconn = Config.getDeckLinkVideoConnection(name)
self.vmode = Config.getDeckLinkVideoMode(name)
self.vfmt = Config.getDeckLinkVideoFormat(name)
self.name = name
self.signalPad = None
self.build_pipeline()
def port(self):
return "Decklink #{}".format(self.device)
def attach(self, pipeline):
super().attach(pipeline)
self.signalPad = pipeline.get_by_name(
'decklinkvideosrc-{}'.format(self.name))
def num_connections(self):
return 1 if self.signalPad and self.signalPad.get_property('signal') else 0
def get_valid_channel_numbers(self):
return (2, 8, 16)
def __str__(self):
return 'DecklinkAVSource[{name}] reading card #{device}'.format(
name=self.name,
device=self.device
)
def build_source(self):
# A video source is required even when we only need audio
pipe = """
decklinkvideosrc
name=decklinkvideosrc-{name}
device-number={device}
connection={conn}
video-format={fmt}
mode={mode}
""".format(name=self.name,
device=self.device,
conn=self.vconn,
mode=self.vmode,
fmt=self.vfmt
)
# add rest of the video pipeline
if self.has_video:
# maybe add deinterlacer
if self.build_deinterlacer():
pipe += """\
! {deinterlacer}
""".format(deinterlacer=self.build_deinterlacer())
pipe += """\
! videoconvert
! videoscale
! videorate
name=vout-{name}
""".format(
deinterlacer=self.build_deinterlacer(),
name=self.name
)
else:
pipe += """\
! fakesink
"""
if self.internal_audio_channels():
pipe += """
decklinkaudiosrc
name=decklinkaudiosrc-{name}
device-number={device}
connection={conn}
channels={channels}
""".format(name=self.name,
device=self.device,
conn=self.aconn,
channels=self.internal_audio_channels())
return pipe
def build_audioport(self):
return 'decklinkaudiosrc-{name}.'.format(name=self.name)
def build_videoport(self):
return 'vout-{}.'.format(self.name)
def get_nosignal_text(self):
return super().get_nosignal_text() + "/BM%d" % self.device
| mit | 0d3c6856f8cb1c1edfd4d95ef2e0e1bf | 30.266667 | 93 | 0.521779 | 4.236129 | false | true | false | false |
voc/voctomix | example-scripts/voctolight/voctolight.py | 1 | 2682 | #!/usr/bin/env python3
import socket
from lib.config import Config
import time
import re
DO_GPIO = True
try:
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
except ModuleNotFoundError:
DO_GPIO = False
class TallyHandling:
def __init__(self, source, gpio_port, all_gpios=()):
self.source = source
self.gpio_port = gpio_port
if DO_GPIO:
GPIO.setup(all_gpios, GPIO.OUT)
GPIO.output(all_gpios, GPIO.HIGH)
def update(self, composite_func):
restr = "\(|,|\)"
cf = re.split(restr, composite_func)
if cf[0] == 'fs':
if cf[1] == self.source:
self.tally_on()
else:
self.tally_off()
else:
if self.source in cf[1:]:
self.tally_on()
else:
self.tally_off()
def tally_on(self):
if DO_GPIO:
GPIO.output(self.gpio_port, GPIO.LOW)
print('Tally on')
def tally_off(self):
if DO_GPIO:
GPIO.output(self.gpio_port, GPIO.HIGH)
print('Tally off')
def start_connection(tally_handler):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.connect((Config.get('server', 'host'), 9999))
sock.settimeout(None)
messages = []
sock.send(b'get_composite\n')
while True:
if len(messages) == 0:
message = sock.recv(2048)
message = str(message, 'utf-8')
if not message:
start_connection(tally_handler)
break
messages = message.split('\n')
message = messages[0].split()
if len(messages) != 0:
messages = messages[1:]
try:
if message[0] == 'composite':
composite_func = message[1]
tally_handler.update(composite_func)
except IndexError:
pass
if __name__ in '__main__':
try:
all_gpios = Config.get('light', 'gpios').split(',')
all_gpios = [int(i) for i in all_gpios]
tally_handler = TallyHandling(Config.get('light', 'cam'), int(Config.get('light', 'gpio_red')),
all_gpios=all_gpios)
while True:
try:
start_connection(tally_handler)
except (TimeoutError, ConnectionRefusedError, socket.timeout):
print('Connection error trying to reconnect in 1s.')
time.sleep(1)
continue
finally:
print('cleanup')
if DO_GPIO:
GPIO.cleanup()
| mit | 1130eae57c7a67265a159cb4d9953a11 | 25.82 | 103 | 0.530947 | 3.74581 | false | true | false | false |
voc/voctomix | vocto/config.py | 1 | 17747 | #!/usr/bin/env python3
import logging
import re
import os
from gi.repository import Gst
from configparser import SafeConfigParser
from lib.args import Args
from vocto.transitions import Composites, Transitions
from vocto.audio_streams import AudioStreams
from vocto import kind_has_audio, kind_has_video
testPatternCount = 0
GST_TYPE_VIDEO_TEST_SRC_PATTERN = [
"smpte",
"ball",
"red",
"green",
"blue",
"black",
"white",
"checkers-1",
"checkers-2",
"checkers-4",
"checkers-8",
"circular",
"blink",
"smpte75",
"zone-plate",
"gamut",
"chroma-zone-plate",
"solid-color",
"smpte100",
"bar",
"snow",
"pinwheel",
"spokes",
"gradient",
"colors"
]
GST_TYPE_AUDIO_TEST_SRC_WAVE = [
"sine",
"square",
"saw",
"triangle",
"silence",
"white-noise",
"pink-noise",
"sine-table",
"ticks",
"gaussian-noise",
"red-noise",
"blue-noise",
"violet-noise",
]
class VocConfigParser(SafeConfigParser):
log = logging.getLogger('VocConfigParser')
audio_streams = None
def getList(self, section, option, fallback=None):
if self.has_option(section, option):
option = self.get(section, option).strip()
if len(option) == 0:
return []
unfiltered = [x.strip() for x in option.split(',')]
return list(filter(None, unfiltered))
else:
return fallback
def getSources(self):
return self.getList('mix', 'sources')
def getLiveSources(self):
return ["mix"] + self.getList('mix', 'livesources', [])
def getBackgroundSources(self):
if self.has_option('mix', 'backgrounds'):
return self.getList('mix', 'backgrounds')
elif self.has_section('source.background'):
return ["background"]
else:
return []
def getBackgroundSource(self, composite):
if not self.getBackgroundSources():
return None
for source in self.getBackgroundSources():
if composite in self.getList('source.{}'.format(source), 'composites', fallback=[]):
return source
return self.getBackgroundSources()[0]
def getSourceKind(self, source):
return self.get('source.{}'.format(source), 'kind', fallback='test')
def getNoSignal(self):
nosignal = self.get('mix', 'nosignal', fallback='smpte100').lower()
if nosignal in ['none', 'false', 'no']:
return None
elif nosignal in GST_TYPE_VIDEO_TEST_SRC_PATTERN:
return nosignal
else:
self.log.error("Configuration value mix/nosignal has unknown pattern '{}'".format(nosignal))
def getDeckLinkDeviceNumber(self, source):
return self.getint('source.{}'.format(source), 'devicenumber', fallback=0)
def getDeckLinkAudioConnection(self, source):
return self.get('source.{}'.format(source), 'audio_connection', fallback='auto')
def getDeckLinkVideoConnection(self, source):
return self.get('source.{}'.format(source), 'video_connection', fallback='auto')
def getDeckLinkVideoMode(self, source):
return self.get('source.{}'.format(source), 'video_mode', fallback='auto')
def getDeckLinkVideoFormat(self, source):
return self.get('source.{}'.format(source), 'video_format', fallback='auto')
def getPulseAudioDevice(self, source):
return self.get('source.{}'.format(source), 'device', fallback='auto')
def getAlsaAudioDevice(self, source):
return self.get('source.{}'.format(source), 'device', fallback='hw:0')
def getV4l2Device(self, source):
return self.get('source.{}'.format(source), 'device', fallback='/dev/video0')
def getV4l2Type(self, source):
return self.get('source.{}'.format(source), 'type', fallback='video/x-raw')
def getV4l2Width(self, source):
return self.get('source.{}'.format(source), 'width', fallback=1920)
def getV4l2Height(self, source):
return self.get('source.{}'.format(source), 'height', fallback=1080)
def getV4l2Format(self, source):
return self.get('source.{}'.format(source), 'format', fallback='YUY2')
def getV4l2Framerate(self, source):
return self.get('source.{}'.format(source), 'framerate', fallback='25/1')
def getRPICamDevice(self, source):
return self.get('source.{}'.format(source), 'device', fallback='/dev/video0')
def getRPICamType(self, source):
return self.get('source.{}'.format(source), 'type', fallback='video/x-raw')
def getRPICamWidth(self, source):
return self.get('source.{}'.format(source), 'width', fallback=1920)
def getRPICamHeight(self, source):
return self.get('source.{}'.format(source), 'height', fallback=1080)
def getRPICamCrop(self, source):
return self.get('source.{}'.format(source), 'crop', fallback=None)
def getRPICamFormat(self, source):
return self.get('source.{}'.format(source), 'format', fallback='YUY2')
def getRPICamFramerate(self, source):
return self.get('source.{}'.format(source), 'framerate', fallback='25/1')
def getRPICamAnnotation(self, source):
return self.get('source.{}'.format(source), 'annotation', fallback=None)
def getImageURI(self, source):
if self.has_option('source.{}'.format(source), 'imguri'):
return self.get('source.{}'.format(source), 'imguri')
else:
path = os.path.abspath(self.get('source.{}'.format(source), 'file'))
if not os.path.isfile(path):
self.log.error("image file '%s' could not be found" % path)
return "file://{}".format(path)
def getLocation(self, source):
return self.get('source.{}'.format(source), 'location')
def getLoop(self, source):
return self.get('source.{}'.format(source), 'loop', fallback="true")
def getTestPattern(self, source):
if not self.has_section('source.{}'.format(source)):
# default blinder source shall be smpte (if not defined otherwise)
if source == "blinder":
return "smpte"
# default background source shall be black (if not defined otherwise)
if source in self.getBackgroundSources():
return "black"
pattern = self.get('source.{}'.format(source), 'pattern', fallback=None)
if not pattern:
global testPatternCount
testPatternCount += 1
pattern = GST_TYPE_VIDEO_TEST_SRC_PATTERN[testPatternCount % len(GST_TYPE_VIDEO_TEST_SRC_PATTERN)]
self.log.info("Test pattern of source '{}' unspecified, picking '{} ({})'"
.format(source, pattern, testPatternCount))
return pattern
def getTestWave(self, source):
if not self.has_section('source.{}'.format(source)):
# background needs no sound, blinder should have no sound
if source == "blinder" or source == "background":
return "silence"
return self.get('source.{}'.format(source), 'wave', fallback="sine")
def getSourceScan(self, source):
section = 'source.{}'.format(source)
if self.has_option(section, 'deinterlace'):
self.log.error(
"source attribute 'deinterlace' is obsolete. Use 'scan' instead! Falling back to 'progressive' scheme")
return self.get(section, 'scan', fallback='progressive')
def getAudioStreams(self):
if self.audio_streams is None:
self.audio_streams = AudioStreams()
sources = self.getSources()
for source in sources:
section = 'source.{}'.format(source)
if self.has_section(section):
self.audio_streams.configure_source(self.items(section), source)
return self.audio_streams
def getBlinderAudioStreams(self):
self.audio_streams = AudioStreams()
section = 'source.blinder'
if self.has_section(section):
self.audio_streams.configure_source(self.items(section), "blinder", use_source_as_name=True)
return self.audio_streams
def getAudioStream(self, source):
'''
:param source: name of the source in the config file
:return:
'''
section = 'source.{}'.format(source)
if self.has_section(section):
return AudioStreams.configure(self.items(section), source)
return AudioStreams()
def getNumAudioStreams(self):
num_audio_streams = len(self.getAudioStreams())
if self.getAudioChannels() < num_audio_streams:
self.log.error(
"number of audio channels in mix/audiocaps differs from the available audio input channels within the sources!")
return num_audio_streams
def getAudioChannels(self):
'''
get the number of audio channels configured for voc2mix
:return:
'''
caps = Gst.Caps.from_string(self.getAudioCaps()).get_structure(0)
_, channels = caps.get_int('channels')
return channels
def getVideoResolution(self):
caps = Gst.Caps.from_string(
self.getVideoCaps()).get_structure(0)
_, width = caps.get_int('width')
_, height = caps.get_int('height')
return (width, height)
def getVideoRatio(self):
width, height = self.getVideoResolution()
return float(width) / float(height)
def getFramerate(self):
caps = Gst.Caps.from_string(
self.getVideoCaps()).get_structure(0)
(_, numerator, denominator) = caps.get_fraction('framerate')
return (numerator, denominator)
def getFramesPerSecond(self):
num, denom = self.getFramerate()
return float(num) / float(denom)
def getVideoSystem(self):
return self.get('videodisplay', 'system', fallback='gl')
def getPlayAudio(self):
return self.getboolean('audio', 'play', fallback=False)
def getVolumeControl(self):
# Check if there is a fixed audio source configured.
# If so, we will remove the volume sliders entirely
# instead of setting them up.
return (self.getboolean('audio', 'volumecontrol', fallback=True)
or self.getboolean('audio', 'forcevolumecontrol', fallback=False))
def getBlinderEnabled(self):
return self.getboolean('blinder', 'enabled', fallback=False)
def isBlinderDefault(self):
return not self.has_option('blinder', 'videos')
def getBlinderSources(self):
if self.getBlinderEnabled():
if self.isBlinderDefault():
return ["blinder"]
else:
return self.getList('blinder', 'videos')
else:
return []
def getBlinderVolume(self):
return self.getfloat('source.blinder', 'volume', fallback=1.0)
def getMirrorsEnabled(self):
return self.getboolean('mirrors', 'enabled', fallback=False)
def getMirrorsSources(self):
if self.getMirrorsEnabled():
if self.has_option('mirrors', 'sources'):
return self.getList('mirrors', 'sources')
else:
return self.getSources()
else:
return []
def getOutputBuffers(self, channel):
return self.getint('output-buffers', channel, fallback=500)
def splitOptions(line):
if len(line) == 0:
return None
quote = False
options = [""]
for char in line:
if char == ',' and not quote:
options.append("")
else:
if char == '"':
quote = not quote
options[-1] += char
return options
def get_audio_encoder(self, section):
return self.get(section, 'audioencoder') # => move to audio_codec class
def get_sink_audio_channels(self, section):
return self.getint(section, 'audio_channels')
def get_sink_audio_map(self, section):
return self.get(section, 'audio_map')
def getVideoCodec(self, section):
if self.has_option(section, 'videocodec'):
codec = self.get(section, 'videocodec').split(',', 1)
if len(codec) > 1:
codec, options = self.get(section, 'videocodec').split(',', 1)
return codec, VocConfigParser.splitOptions(options) if options else None
else:
return codec[0], None
return "jpeg", ["quality=90"]
def getVideoEncoder(self, section):
if self.has_option(section, 'videoencoder'):
return self.get(section, 'videoencoder')
return None
def getVideoDecoder(self, section):
if self.has_option(section, 'videodecoder'):
return self.get(section, 'videodecoder')
return None
def getDenoise(self, section):
if self.has_option(section, 'denoise'):
if self.getboolean(section, 'denoise'):
return 1
return 0
def getScaleMethod(self, section):
if self.has_option(section, 'scale-method'):
return self.getint(section, 'scale-method')
return 0
def getDeinterlace(self, section):
return self.getboolean(section, 'deinterlace', fallback=False)
def getAudioCaps(self, section='mix'):
if self.has_option(section, 'audiocaps'):
return self.get(section, 'audiocaps')
elif self.has_option('mix', 'audiocaps'):
return self.get('mix', 'audiocaps')
else:
return "audio/x-raw,format=S16LE,channels=2,layout=interleaved,rate=48000"
def getVideoCaps(self, section='mix'):
if self.has_option(section, 'videocaps'):
return self.get(section, 'videocaps')
elif self.has_option('mix', 'videocaps'):
return self.get('mix', 'videocaps')
else:
return "video/x-raw,format=I420,width=1920,height=1080,framerate=25/1,pixel-aspect-ratio=1/1"
def getPreviewSize(self):
width = self.getint('previews', 'width') if self.has_option(
'previews', 'width') else 320
height = self.getint('previews', 'height') if self.has_option(
'previews', 'height') else int(width * 9 / 16)
return (width, height)
def getLocalPlayoutEnabled(self):
return self.getboolean('localplayout', 'enabled', fallback=False)
def getLocalPlayoutAudioEnabled(self):
return self.getboolean('localplayout', 'audioenabled', fallback=True)
def getRecordingEnabled(self):
return self.getboolean('localplayout', 'record', fallback=True)
def getPreviewsEnabled(self):
return self.getboolean('previews', 'enabled', fallback=False)
def getAVRawOutputEnabled(self):
return self.getboolean('avrawoutput', 'enabled', fallback=True)
def getLocalUIEnabled(self):
return self.getboolean('localui', 'enabled', fallback=False)
def getLocalUIVideoSystem(self):
return self.get('localui', 'system', fallback="autovideosink")
def getLivePreviews(self):
if self.getBlinderEnabled():
singleval = self.get('previews', 'live').lower()
if singleval in ["true", "yes"]:
return ["mix"]
if singleval == "all":
return self.getLiveSources()
previews = self.getList('previews', 'live')
result = []
for preview in previews:
if preview not in self.getLiveSources():
self.log.error(
"source '{}' configured in 'preview/live' must be listed in 'mix/livesources'!".format(preview))
else:
result.append(preview)
return result
else:
self.log.warning("configuration attribute 'preview/live' is set but blinder is not in use!")
return []
def getComposites(self):
return Composites.configure(self, self.items('composites'), self.getVideoResolution())
def getTargetComposites(self):
return Composites.targets(self, self.getComposites())
def getTransitions(self, composites):
return Transitions.configure(self, self.items('transitions'),
composites,
fps=self.getFramesPerSecond())
def getPreviewNameOverlay(self):
return self.getboolean('previews', 'nameoverlay', fallback=True)
def hasSource(self, source):
return self.has_section('source.{}'.format(source))
def hasOverlay(self):
return self.has_section('overlay')
def getOverlayAutoOff(self):
return self.getboolean('overlay', 'auto-off', fallback=True)
def getOverlayUserAutoOff(self):
return self.getboolean('overlay', 'user-auto-off', fallback=False)
def getVideoSources(self, internal=False):
def source_has_video(source):
return kind_has_video(self.getSourceKind(source))
sources = self.getSources()
if internal:
sources += ["mix"]
if self.getBlinderEnabled():
sources += ['blinder', 'mix-blinded']
return list(filter(source_has_video, sources))
def getAudioSources(self, internal=False):
def source_has_audio(source):
return kind_has_audio(self.getSourceKind(source))
sources = self.getSources()
if internal:
sources += ['mix']
if self.getBlinderEnabled():
sources += ['blinder', 'mix-blinded']
return list(filter(source_has_audio, sources))
| mit | 575d86455506a276deb63977156f7f45 | 34.636546 | 128 | 0.606919 | 3.984508 | false | false | false | false |
simpeg/simpeg | tutorials/03-gravity/plot_inv_1b_gravity_anomaly_irls.py | 1 | 14070 | """
Sparse Norm Inversion of Gravity Anomaly Data
=============================================
Here we invert gravity anomaly data to recover a density contrast model. We formulate the inverse problem as an iteratively
re-weighted least-squares (IRLS) optimization problem. For this tutorial, we
focus on the following:
- Defining the survey from xyz formatted data
- Generating a mesh based on survey geometry
- Including surface topography
- Defining the inverse problem (data misfit, regularization, optimization)
- Specifying directives for the inversion
- Setting sparse and blocky norms
- Plotting the recovered model and data misfit
Although we consider gravity anomaly data in this tutorial, the same approach
can be used to invert gradiometry and other types of geophysical data.
"""
#########################################################################
# Import modules
# --------------
#
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import tarfile
from discretize import TensorMesh
from SimPEG.utils import plot2Ddata, surface2ind_topo, model_builder
from SimPEG.potential_fields import gravity
from SimPEG import (
maps,
data,
data_misfit,
inverse_problem,
regularization,
optimization,
directives,
inversion,
utils,
)
# sphinx_gallery_thumbnail_number = 3
#############################################
# Define File Names
# -----------------
#
# File paths for assets we are loading. To set up the inversion, we require
# topography and field observations. The true model defined on the whole mesh
# is loaded to compare with the inversion result. These files are stored as a
# tar-file on our google cloud bucket:
# "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz"
#
# storage bucket where we have the data
data_source = "https://storage.googleapis.com/simpeg/doc-assets/gravity.tar.gz"
# download the data
downloaded_data = utils.download(data_source, overwrite=True)
# unzip the tarfile
tar = tarfile.open(downloaded_data, "r")
tar.extractall()
tar.close()
# path to the directory containing our data
dir_path = downloaded_data.split(".")[0] + os.path.sep
# files to work with
topo_filename = dir_path + "gravity_topo.txt"
data_filename = dir_path + "gravity_data.obs"
model_filename = dir_path + "true_model.txt"
#############################################
# Load Data and Plot
# ------------------
#
# Here we load and plot synthetic gravity anomaly data. Topography is generally
# defined as an (N, 3) array. Gravity data is generally defined with 4 columns:
# x, y, z and data.
#
# Load topography
xyz_topo = np.loadtxt(str(topo_filename))
# Load field data
dobs = np.loadtxt(str(data_filename))
# Define receiver locations and observed data
receiver_locations = dobs[:, 0:3]
dobs = dobs[:, -1]
# Plot
mpl.rcParams.update({"font.size": 12})
fig = plt.figure(figsize=(7, 5))
ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.85])
plot2Ddata(receiver_locations, dobs, ax=ax1, contourOpts={"cmap": "bwr"})
ax1.set_title("Gravity Anomaly")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("y (m)")
ax2 = fig.add_axes([0.8, 0.1, 0.03, 0.85])
norm = mpl.colors.Normalize(vmin=-np.max(np.abs(dobs)), vmax=np.max(np.abs(dobs)))
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", cmap=mpl.cm.bwr, format="%.1e"
)
cbar.set_label("$mgal$", rotation=270, labelpad=15, size=12)
plt.show()
#############################################
# Assign Uncertainties
# --------------------
#
# Inversion with SimPEG requires that we define standard deviation on our data.
# This represents our estimate of the noise in our data. For gravity inversion,
# a constant floor value is generally applied to all data. For this tutorial,
# the standard deviation on each datum will be 1% of the maximum observed
# gravity anomaly value.
#
maximum_anomaly = np.max(np.abs(dobs))
uncertainties = 0.01 * maximum_anomaly * np.ones(np.shape(dobs))
#############################################
# Defining the Survey
# -------------------
#
# Here, we define survey that will be used for this tutorial. Gravity
# surveys are simple to create. The user only needs an (N, 3) array to define
# the xyz locations of the observation locations. From this, the user can
# define the receivers and the source field.
#
# Define the receivers. The data consist of vertical gravity anomaly measurements.
# The set of receivers must be defined as a list.
receiver_list = gravity.receivers.Point(receiver_locations, components="gz")
receiver_list = [receiver_list]
# Define the source field
source_field = gravity.sources.SourceField(receiver_list=receiver_list)
# Define the survey
survey = gravity.survey.Survey(source_field)
#############################################
# Defining the Data
# -----------------
#
# Here is where we define the data that are inverted. The data are defined by
# the survey, the observation values and the standard deviation.
#
data_object = data.Data(survey, dobs=dobs, standard_deviation=uncertainties)
#############################################
# Defining a Tensor Mesh
# ----------------------
#
# Here, we create the tensor mesh that will be used to invert gravity anomaly
# data. If desired, we could define an OcTree mesh.
#
dh = 5.0
hx = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)]
hy = [(dh, 5, -1.3), (dh, 40), (dh, 5, 1.3)]
hz = [(dh, 5, -1.3), (dh, 15)]
mesh = TensorMesh([hx, hy, hz], "CCN")
########################################################
# Starting/Reference Model and Mapping on Tensor Mesh
# ---------------------------------------------------
#
# Here, we create starting and/or reference models for the inversion as
# well as the mapping from the model space to the active cells. Starting and
# reference models can be a constant background value or contain a-priori
# structures.
#
# Find the indices of the active cells in forward model (ones below surface)
ind_active = surface2ind_topo(mesh, xyz_topo)
# Define mapping from model to active cells
nC = int(ind_active.sum())
model_map = maps.IdentityMap(nP=nC) # model consists of a value for each active cell
# Define and plot starting model
starting_model = np.zeros(nC)
##############################################
# Define the Physics
# ------------------
#
# Here, we define the physics of the gravity problem by using the simulation
# class.
#
simulation = gravity.simulation.Simulation3DIntegral(
survey=survey, mesh=mesh, rhoMap=model_map, ind_active=ind_active
)
#######################################################################
# Define the Inverse Problem
# --------------------------
#
# The inverse problem is defined by 3 things:
#
# 1) Data Misfit: a measure of how well our recovered model explains the field data
# 2) Regularization: constraints placed on the recovered model and a priori information
# 3) Optimization: the numerical approach used to solve the inverse problem
#
# Define the data misfit. Here the data misfit is the L2 norm of the weighted
# residual between the observed data and the data predicted for a given model.
# Within the data misfit, the residual between predicted and observed data are
# normalized by the data's standard deviation.
dmis = data_misfit.L2DataMisfit(data=data_object, simulation=simulation)
dmis.W = utils.sdiag(1 / uncertainties)
# Define the regularization (model objective function).
reg = regularization.Sparse(mesh, active_cells=ind_active, mapping=model_map)
reg.norms = [0, 2, 2, 2]
# Define how the optimization problem is solved. Here we will use a projected
# Gauss-Newton approach that employs the conjugate gradient solver.
opt = optimization.ProjectedGNCG(
maxIter=100, lower=-1.0, upper=1.0, maxIterLS=20, maxIterCG=10, tolCG=1e-3
)
# Here we define the inverse problem that is to be solved
inv_prob = inverse_problem.BaseInvProblem(dmis, reg, opt)
#######################################################################
# Define Inversion Directives
# ---------------------------
#
# Here we define any directiveas that are carried out during the inversion. This
# includes the cooling schedule for the trade-off parameter (beta), stopping
# criteria for the inversion and saving inversion results at each iteration.
#
# Defining a starting value for the trade-off parameter (beta) between the data
# misfit and the regularization.
starting_beta = directives.BetaEstimate_ByEig(beta0_ratio=1e0)
# Defines the directives for the IRLS regularization. This includes setting
# the cooling schedule for the trade-off parameter.
update_IRLS = directives.Update_IRLS(
f_min_change=1e-4,
max_irls_iterations=30,
coolEpsFact=1.5,
beta_tol=1e-2,
)
# Defining the fractional decrease in beta and the number of Gauss-Newton solves
# for each beta value.
beta_schedule = directives.BetaSchedule(coolingFactor=5, coolingRate=1)
# Options for outputting recovered models and predicted data for each beta.
save_iteration = directives.SaveOutputEveryIteration(save_txt=False)
# Updating the preconditionner if it is model dependent.
update_jacobi = directives.UpdatePreconditioner()
# Add sensitivity weights
sensitivity_weights = directives.UpdateSensitivityWeights(everyIter=False)
# The directives are defined as a list.
directives_list = [
update_IRLS,
sensitivity_weights,
starting_beta,
beta_schedule,
save_iteration,
update_jacobi,
]
#####################################################################
# Running the Inversion
# ---------------------
#
# To define the inversion object, we need to define the inversion problem and
# the set of directives. We can then run the inversion.
#
# Here we combine the inverse problem and the set of directives
inv = inversion.BaseInversion(inv_prob, directives_list)
# Run inversion
recovered_model = inv.run(starting_model)
############################################################
# Recreate True Model
# -------------------
#
# Define density contrast values for each unit in g/cc
background_density = 0.0
block_density = -0.2
sphere_density = 0.2
# Define model. Models in SimPEG are vector arrays.
true_model = background_density * np.ones(nC)
# You could find the indicies of specific cells within the model and change their
# value to add structures.
ind_block = (
(mesh.gridCC[ind_active, 0] > -50.0)
& (mesh.gridCC[ind_active, 0] < -20.0)
& (mesh.gridCC[ind_active, 1] > -15.0)
& (mesh.gridCC[ind_active, 1] < 15.0)
& (mesh.gridCC[ind_active, 2] > -50.0)
& (mesh.gridCC[ind_active, 2] < -30.0)
)
true_model[ind_block] = block_density
# You can also use SimPEG utilities to add structures to the model more concisely
ind_sphere = model_builder.getIndicesSphere(np.r_[35.0, 0.0, -40.0], 15.0, mesh.gridCC)
ind_sphere = ind_sphere[ind_active]
true_model[ind_sphere] = sphere_density
############################################################
# Plotting True Model and Recovered Model
# ---------------------------------------
#
# Plot True Model
fig = plt.figure(figsize=(9, 4))
plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan)
ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.8])
mesh.plot_slice(
plotting_map * true_model,
normal="Y",
ax=ax1,
ind=int(mesh.shape_cells[1] / 2),
grid=True,
clim=(np.min(true_model), np.max(true_model)),
pcolor_opts={"cmap": "viridis"},
)
ax1.set_title("Model slice at y = 0 m")
ax2 = fig.add_axes([0.85, 0.1, 0.05, 0.8])
norm = mpl.colors.Normalize(vmin=np.min(true_model), vmax=np.max(true_model))
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis, format="%.1e"
)
cbar.set_label("$g/cm^3$", rotation=270, labelpad=15, size=12)
plt.show()
# Plot Recovered Model
fig = plt.figure(figsize=(9, 4))
plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan)
ax1 = fig.add_axes([0.1, 0.1, 0.73, 0.8])
mesh.plot_slice(
plotting_map * recovered_model,
normal="Y",
ax=ax1,
ind=int(mesh.shape_cells[1] / 2),
grid=True,
clim=(np.min(recovered_model), np.max(recovered_model)),
pcolor_opts={"cmap": "viridis"},
)
ax1.set_title("Model slice at y = 0 m")
ax2 = fig.add_axes([0.85, 0.1, 0.05, 0.8])
norm = mpl.colors.Normalize(vmin=np.min(recovered_model), vmax=np.max(recovered_model))
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", cmap=mpl.cm.viridis
)
cbar.set_label("$g/cm^3$", rotation=270, labelpad=15, size=12)
plt.show()
###################################################################
# Plotting Predicted Data and Normalized Misfit
# ---------------------------------------------
#
# Predicted data with final recovered model
# SimPEG uses right handed coordinate where Z is positive upward.
# This causes gravity signals look "inconsistent" with density values in visualization.
dpred = inv_prob.dpred
# Observed data | Predicted data | Normalized data misfit
data_array = np.c_[dobs, dpred, (dobs - dpred) / uncertainties]
fig = plt.figure(figsize=(17, 4))
plot_title = ["Observed", "Predicted", "Normalized Misfit"]
plot_units = ["mgal", "mgal", ""]
ax1 = 3 * [None]
ax2 = 3 * [None]
norm = 3 * [None]
cbar = 3 * [None]
cplot = 3 * [None]
v_lim = [np.max(np.abs(dobs)), np.max(np.abs(dobs)), np.max(np.abs(data_array[:, 2]))]
for ii in range(0, 3):
ax1[ii] = fig.add_axes([0.33 * ii + 0.03, 0.11, 0.23, 0.84])
cplot[ii] = plot2Ddata(
receiver_list[0].locations,
data_array[:, ii],
ax=ax1[ii],
ncontour=30,
clim=(-v_lim[ii], v_lim[ii]),
contourOpts={"cmap": "bwr"},
)
ax1[ii].set_title(plot_title[ii])
ax1[ii].set_xlabel("x (m)")
ax1[ii].set_ylabel("y (m)")
ax2[ii] = fig.add_axes([0.33 * ii + 0.25, 0.11, 0.01, 0.85])
norm[ii] = mpl.colors.Normalize(vmin=-v_lim[ii], vmax=v_lim[ii])
cbar[ii] = mpl.colorbar.ColorbarBase(
ax2[ii], norm=norm[ii], orientation="vertical", cmap=mpl.cm.bwr
)
cbar[ii].set_label(plot_units[ii], rotation=270, labelpad=15, size=12)
plt.show()
| mit | eb3600d9bdd97455546cad7f7438d87f | 30.904762 | 123 | 0.650888 | 3.350798 | false | false | false | false |
simpeg/simpeg | SimPEG/fields.py | 1 | 13551 | import numpy as np
from .simulation import BaseSimulation, BaseTimeSimulation
from .utils import mkvc, validate_type
class Fields:
"""Fancy Field Storage
.. code::python
fields = Fields(
simulation=simulation, knownFields={"phi": "CC"}
)
fields[:,'phi'] = phi
print(fields[src0,'phi'])
"""
_dtype = float
_knownFields = {}
_aliasFields = {}
def __init__(
self, simulation, knownFields=None, aliasFields=None, dtype=None, **kwargs
):
super().__init__(**kwargs)
self.simulation = simulation
if knownFields is not None:
knownFields = validate_type("knownFields", knownFields, dict, cast=False)
self._knownFields = knownFields
if aliasFields is not None:
aliasFields = validate_type("aliasFields", aliasFields, dict, cast=False)
self._aliasFields = aliasFields
if dtype is not None:
self._dtype = dtype
# check overlapping fields
if any(key in self.aliasFields for key in self.knownFields):
raise KeyError(
"Aliased fields and Known Fields have overlapping definitions."
)
self._fields = {}
self.startup()
@property
def simulation(self):
"""The simulation object that created these fields
Returns
-------
SimPEG.simulation.BaseSimulation
"""
return self._simulation
@simulation.setter
def simulation(self, value):
self._simulation = validate_type(
"simulation", value, BaseSimulation, cast=False
)
@property
def knownFields(self):
"""The known fields of this object.
The dictionary representing the known fields and their locations on the simulation
mesh. The keys are the names of the fields, and the values are the location on
the mesh.
>>> fields.knownFields
{'e': 'E', 'phi': 'CC'}
Would represent that the `e` field and `phi` fields are known, and they are
located on the mesh edges and cell centers, respectively.
Returns
-------
dict
They keys are the field names and the values are the field locations.
"""
return self._knownFields
@property
def aliasFields(self):
"""The aliased fields of this object.
The dictionary representing the aliased fields that can be accessed on this
object. The keys are the names of the fields, and the values are a list of the
known field, the aliased field's location on the mesh, and a function that goes
from the known field to the aliased field.
>>> fields.aliasFields
{'b': ['e', 'F', '_e']}
Would represent that the `e` field and `phi` fields are known, and they are
located on the mesh edges and cell centers, respectively.
Returns
-------
dict of {str: list}
They keys are the field names and the values are list consiting of the
field's alias, it's location on the mesh, and the function (or the name of
it) to create it from the aliased field.
"""
return self._aliasFields
@property
def dtype(self):
"""The data type of the storage matrix
Returns
-------
dtype or dict of {str : dtype}
"""
return self._dtype
@property
def knownFields(self):
"""Fields known to this object."""
return self._knownFields
@property
def mesh(self):
return self.simulation.mesh
@property
def survey(self):
return self.simulation.survey
def startup(self):
pass
@property
def approxSize(self):
"""The approximate cost to storing all of the known fields."""
sz = 0.0
for f in self.knownFields:
loc = self.knownFields[f]
sz += np.array(self._storageShape(loc)).prod() * 8.0 / (1024 ** 2)
return "{0:e} MB".format(sz)
def _storageShape(self, loc):
n_fields = self.survey._n_fields
nP = {
"CC": self.mesh.nC,
"N": self.mesh.nN,
"F": self.mesh.nF,
"E": self.mesh.nE,
}[loc]
return (nP, n_fields)
def _initStore(self, name):
if name in self._fields:
return self._fields[name]
assert name in self.knownFields, "field name is not known."
loc = self.knownFields[name]
if isinstance(self.dtype, dict):
dtype = self.dtype[name]
else:
dtype = self.dtype
# field = zarr.create(self._storageShape(loc), dtype=dtype)
field = np.zeros(self._storageShape(loc), dtype=dtype)
self._fields[name] = field
return field
def _srcIndex(self, srcTestList):
if type(srcTestList) is slice:
ind = srcTestList
else:
ind = self.survey.get_source_indices(srcTestList)
return ind
def _nameIndex(self, name, accessType):
if type(name) is slice:
assert name == slice(
None, None, None
), "Fancy field name slicing is not supported... yet."
name = None
if name is None:
return
if accessType == "set" and name not in self.knownFields:
if name in self.aliasFields:
raise KeyError(
"Invalid field name ({0!s}) for setter, you can't "
"set an aliased property".format(name)
)
else:
raise KeyError("Invalid field name ({0!s}) for setter".format(name))
elif accessType == "get" and (
name not in self.knownFields and name not in self.aliasFields
):
raise KeyError("Invalid field name ({0!s}) for getter".format(name))
return name
def _index_name_srclist_from_key(self, key, accessType):
if not isinstance(key, tuple):
key = (key,)
if len(key) == 1:
key += (None,)
assert len(key) == 2, "must be [Src, fieldName]"
srcTestList, name = key
name = self._nameIndex(name, accessType)
ind = self._srcIndex(srcTestList)
if isinstance(srcTestList, slice):
srcTestList = self.survey.source_list[srcTestList]
return ind, name, srcTestList
def __setitem__(self, key, value):
ind, name, src_list = self._index_name_srclist_from_key(key, "set")
if name is None:
assert isinstance(
value, dict
), "New fields must be a dictionary, if field is not specified."
newFields = value
elif name in self.knownFields:
newFields = {name: value}
else:
raise Exception("Unknown setter")
for name in newFields:
field = self._initStore(name)
self._setField(field, newFields[name], name, ind)
def __getitem__(self, key):
ind, name, src_list = self._index_name_srclist_from_key(key, "get")
if name is None:
out = {}
for name in self._fields:
out[name] = self._getField(name, ind, src_list)
return out
return self._getField(name, ind, src_list)
def _setField(self, field, val, name, ind):
if isinstance(val, np.ndarray) and (
field.shape[0] == field.size or val.ndim == 1
):
val = mkvc(val, 2)
field[:, ind] = val
def _getField(self, name, ind, src_list):
# ind will always be an list, thus the output will always
# be (len(fields), n_inds)
if name in self._fields:
out = self._fields[name][:, ind]
else:
# Aliased fields
alias, loc, func = self.aliasFields[name]
if isinstance(func, str):
assert hasattr(self, func), (
"The alias field function is a string, but it does not "
"exist in the Fields class."
)
func = getattr(self, func)
if not isinstance(src_list, list):
src_list = [src_list]
out = func(self._fields[alias][:, ind], src_list)
# if out.shape[0] == out.size or out.ndim == 1:
# out = mkvc(out, 2)
return out
def __contains__(self, other):
if other in self.aliasFields:
other = self.aliasFields[other][0]
return self._fields.__contains__(other)
class TimeFields(Fields):
"""Fancy Field Storage for time domain problems
.. code:: python
fields = TimeFields(simulation=simulation, knownFields={'phi':'CC'})
fields[:,'phi', timeInd] = phi
print(fields[src0,'phi'])
"""
@property
def simulation(self):
"""The simulation object that created these fields
Returns
-------
SimPEG.simulation.BaseTimeSimulation
"""
return self._simulation
@simulation.setter
def simulation(self, value):
self._simulation = validate_type(
"simulation", value, BaseTimeSimulation, cast=False
)
def _storageShape(self, loc):
nP = {
"CC": self.mesh.nC,
"N": self.mesh.nN,
"F": self.mesh.nF,
"E": self.mesh.nE,
}[loc]
nSrc = self.survey.nSrc
nT = self.simulation.nT + 1
return (nP, nSrc, nT)
def _index_name_srclist_from_key(self, key, accessType):
if not isinstance(key, tuple):
key = (key,)
if len(key) == 1:
key += (None,)
if len(key) == 2:
key += (slice(None, None, None),)
assert len(key) == 3, "must be [Src, fieldName, times]"
srcTestList, name, timeInd = key
name = self._nameIndex(name, accessType)
srcInd = self._srcIndex(srcTestList)
if isinstance(srcTestList, slice):
srcTestList = self.survey.source_list[srcTestList]
return (srcInd, timeInd), name, srcTestList
def _correctShape(self, name, ind, deflate=False):
srcInd, timeInd = ind
if name in self.knownFields:
loc = self.knownFields[name]
else:
loc = self.aliasFields[name][1]
nP, total_nSrc, total_nT = self._storageShape(loc)
nSrc = np.ones(total_nSrc, dtype=bool)[srcInd].sum()
nT = np.ones(total_nT, dtype=bool)[timeInd].sum()
shape = nP, nSrc, nT
if deflate:
shape = tuple([s for s in shape if s > 1])
if len(shape) == 1:
shape = shape + (1,)
return shape
def _setField(self, field, val, name, ind):
srcInd, timeInd = ind
shape = self._correctShape(name, ind)
if isinstance(val, np.ndarray) and val.size == 1:
val = val[0]
if np.isscalar(val):
field[:, srcInd, timeInd] = val
return
if val.size != np.array(shape).prod():
raise ValueError("Incorrect size for data.")
correctShape = field[:, srcInd, timeInd].shape
field[:, srcInd, timeInd] = val.reshape(correctShape, order="F")
def _getField(self, name, ind, src_list):
srcInd, timeInd = ind
if name in self._fields:
out = self._fields[name][:, srcInd, timeInd]
else:
# Aliased fields
alias, loc, func = self.aliasFields[name]
if isinstance(func, str):
assert hasattr(self, func), (
"The alias field function is a string, but it does "
"not exist in the Fields class."
)
func = getattr(self, func)
pointerFields = self._fields[alias][:, srcInd, timeInd]
pointerShape = self._correctShape(alias, ind)
pointerFields = pointerFields.reshape(pointerShape, order="F")
# First try to return the function as three arguments (without timeInd)
if timeInd == slice(None, None, None):
try:
# assume it will take care of integrating over all times
return func(pointerFields, srcInd)
except TypeError:
pass
timeII = np.arange(self.simulation.nT + 1)[timeInd]
if not isinstance(src_list, list):
src_list = [src_list]
if timeII.size == 1:
pointerShapeDeflated = self._correctShape(alias, ind, deflate=True)
pointerFields = pointerFields.reshape(pointerShapeDeflated, order="F")
out = func(pointerFields, src_list, timeII)
else: # loop over the time steps
nT = pointerShape[2]
out = list(range(nT))
for i, TIND_i in enumerate(timeII):
fieldI = pointerFields[:, :, i]
if fieldI.shape[0] == fieldI.size:
fieldI = mkvc(fieldI, 2)
out[i] = func(fieldI, src_list, TIND_i)
if out[i].ndim == 1:
out[i] = out[i][:, np.newaxis, np.newaxis]
elif out[i].ndim == 2:
out[i] = out[i][:, :, np.newaxis]
out = np.concatenate(out, axis=2)
shape = self._correctShape(name, ind, deflate=True)
return out.reshape(shape, order="F")
| mit | bfd53309a8eca4207f60ed9b5f141930 | 31.890777 | 90 | 0.546676 | 4.1593 | false | false | false | false |
santoshphilip/eppy | eppy/idfreader.py | 1 | 10391 | # Copyright (c) 2012, 2022 Santosh Philip
# Copyright (c) 2021 Dimitris Mantas
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""use epbunch"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from itertools import chain
from eppy.EPlusInterfaceFunctions import readidf
import eppy.bunchhelpers as bunchhelpers
from eppy.EPlusInterfaceFunctions.structures import CaseInsensitiveDict
from eppy.bunch_subclass import EpBunch
# from eppy.bunch_subclass import fieldnames, fieldvalues
import eppy.iddgaps as iddgaps
import eppy.function_helpers as fh
from eppy.idf_msequence import Idf_MSequence
import eppy.ext_field_functions as extff
class NoIDDFieldsError(Exception):
pass
def iddversiontuple(afile):
"""given the idd file or filehandle, return the version handle"""
def versiontuple(vers):
"""version tuple"""
return tuple([int(num) for num in vers.split(".")])
try:
fhandle = open(afile, "rb")
except TypeError:
fhandle = afile
line1 = fhandle.readline()
try:
line1 = line1.decode("ISO-8859-2")
except AttributeError:
pass
line = line1.strip()
if line1 == "":
return (0,)
vers = line.split()[-1]
return versiontuple(vers)
def makeabunch(commdct, obj, obj_i, debugidd=True, block=None):
"""make a bunch from the object"""
objidd = commdct[obj_i]
objfields = [comm.get("field") for comm in commdct[obj_i]]
if debugidd:
if len(obj) > len(objfields):
# there are not enough fields in the IDD to match the IDF
# -- increase the number of fields in the IDD (in block and commdct)
# -- start
n = len(obj) - len(objfields)
key_txt = obj[0]
objfields = extff.increaseIDDfields(block, commdct, obj_i, key_txt, n)
# -- increase the number of fields in the IDD (in block and commdct)
# -- end
#
# -- convertfields for added fields - start
key_i = obj_i
key_comm = commdct[obj_i]
try:
inblock = block[obj_i]
except TypeError as e:
inblock = None
obj = convertfields(key_comm, obj, inblock)
# -- convertfields for added fields - end
objfields[0] = ["key"]
objfields = [field[0] for field in objfields]
obj_fields = [bunchhelpers.makefieldname(field) for field in objfields]
bobj = EpBunch(obj, obj_fields, objidd)
return bobj
def makebunches(data, commdct):
"""make bunches with data"""
bunchdt = CaseInsensitiveDict()
ddtt, dtls = data.dt, data.dtls
for obj_i, key in enumerate(dtls):
key = key.upper()
bunchdt[key] = []
objs = ddtt[key]
for obj in objs:
bobj = makeabunch(commdct, obj, obj_i)
bunchdt[key].append(bobj)
return bunchdt
def makebunches_alter(data, commdct, theidf, block=None):
"""make bunches with data"""
bunchdt = CaseInsensitiveDict()
dt, dtls = data.dt, data.dtls
for obj_i, key in enumerate(dtls):
key = key.upper()
objs = dt[key]
list1 = []
for obj in objs:
bobj = makeabunch(commdct, obj, obj_i, block=block)
list1.append(bobj)
bunchdt[key] = Idf_MSequence(list1, objs, theidf)
return bunchdt
class ConvInIDD(object):
"""hold the conversion function to integer, real and no_type"""
def no_type(self, x, avar):
if avar.startswith("N"): # is a number if it starts with N
try:
return float(x) # in case x=autosize
except ValueError as e:
return x
else:
return x # starts with A, is not a number
def integer(self, x, y):
try:
return int(x)
except ValueError as e:
return x
def real(self, x, y):
try:
return float(x)
except ValueError as e:
return x
def conv_dict(self):
"""dictionary of conversion"""
return dict(integer=self.integer, real=self.real, no_type=self.no_type)
def convertafield(field_comm, field_val, field_iddname):
"""convert field based on field info in IDD"""
convinidd = ConvInIDD()
field_typ = field_comm.get("type", [None])[0]
conv = convinidd.conv_dict().get(field_typ, convinidd.no_type)
return conv(field_val, field_iddname)
def convertfields(key_comm, obj, inblock=None):
"""convert based on float, integer, and A1, N1"""
# f_ stands for field_
convinidd = ConvInIDD()
if not inblock:
inblock = ["does not start with N"] * len(obj)
for i, (f_comm, f_val, f_iddname) in enumerate(zip(key_comm, obj, inblock)):
if i == 0:
# inblock[0] is the iddobject key. No conversion here
pass
else:
obj[i] = convertafield(f_comm, f_val, f_iddname)
return obj
def convertallfields(data, commdct, block=None):
"""docstring for convertallfields"""
for key in list(data.dt.keys()):
objs = data.dt[key]
for i, obj in enumerate(objs):
key_i = data.dtls.index(key)
key_comm = commdct[key_i]
try:
inblock = block[key_i]
except TypeError as e:
inblock = None
obj = convertfields(key_comm, obj, inblock)
objs[i] = obj
def addfunctions(dtls, bunchdt):
"""add functions to the objects"""
snames = [
"BuildingSurface:Detailed",
"Wall:Detailed",
"RoofCeiling:Detailed",
"Floor:Detailed",
"FenestrationSurface:Detailed",
"Shading:Site:Detailed",
"Shading:Building:Detailed",
"Shading:Zone:Detailed",
]
for sname in snames:
if sname.upper() in bunchdt:
surfaces = bunchdt[sname.upper()]
for surface in surfaces:
func_dict = {
"area": fh.area,
"height": fh.height, # not working correctly
"width": fh.width, # not working correctly
"azimuth": fh.azimuth,
"tilt": fh.tilt,
"coords": fh.getcoords, # needed for debugging
}
try:
surface.__functions.update(func_dict)
except KeyError as e:
surface.__functions = func_dict
# add common functions
# for name in dtls:
# for idfobject in bunchdt[name]:
# idfobject.__functions
# idfobject['__functions']['fieldnames'] = fieldnames
# idfobject['__functions']['fieldvalues'] = fieldvalues
# idfobject['__functions']['getrange'] = GetRange(idfobject)
# idfobject['__functions']['checkrange'] = CheckRange(idfobject)
def addfunctions2new(abunch, key):
"""add functions to a new bunch/munch object"""
snames = [
"BuildingSurface:Detailed",
"Wall:Detailed",
"RoofCeiling:Detailed",
"Floor:Detailed",
"FenestrationSurface:Detailed",
"Shading:Site:Detailed",
"Shading:Building:Detailed",
"Shading:Zone:Detailed",
]
snames = [sname.upper() for sname in snames]
if key in snames:
func_dict = {
"area": fh.area,
"height": fh.height, # not working correctly
"width": fh.width, # not working correctly
"azimuth": fh.azimuth,
"tilt": fh.tilt,
"coords": fh.getcoords, # needed for debugging
}
try:
abunch.__functions.update(func_dict)
except KeyError as e:
abunch.__functions = func_dict
return abunch
def idfreader(fname, iddfile, conv=True):
"""read idf file and return bunches"""
data, commdct, idd_index = readidf.readdatacommdct(fname, iddfile=iddfile)
if conv:
convertallfields(data, commdct)
# fill gaps in idd
ddtt, dtls = data.dt, data.dtls
# skiplist = ["TABLE:MULTIVARIABLELOOKUP"]
nofirstfields = iddgaps.missingkeys_standard(
commdct, dtls, skiplist=["TABLE:MULTIVARIABLELOOKUP"]
)
iddgaps.missingkeys_nonstandard(None, commdct, dtls, nofirstfields)
bunchdt = makebunches(data, commdct)
return bunchdt, data, commdct, idd_index
def idfreader1(fname, iddfile, theidf, conv=True, commdct=None, block=None):
"""read idf file and return bunches"""
versiontuple = iddversiontuple(iddfile)
# import pdb; pdb.set_trace()
block, data, commdct, idd_index = readidf.readdatacommdct1(
fname, iddfile=iddfile, commdct=commdct, block=block
)
if conv:
convertallfields(data, commdct, block)
# fill gaps in idd
ddtt, dtls = data.dt, data.dtls
if versiontuple < (8,):
skiplist = ["TABLE:MULTIVARIABLELOOKUP"]
else:
skiplist = None
nofirstfields = iddgaps.missingkeys_standard(commdct, dtls, skiplist=skiplist)
iddgaps.missingkeys_nonstandard(block, commdct, dtls, nofirstfields)
# bunchdt = makebunches(data, commdct)
bunchdt = makebunches_alter(data, commdct, theidf, block)
return bunchdt, block, data, commdct, idd_index, versiontuple
# complete -- remove this junk below
# working code - working on it now.
# N3, A4, M8, A5
#
# N3, A4, M8, A5
# N4, A6, M9, A7
# N5, A8, M10, A9
# N6, A10, M11, A11
# ref = idf1.newidfobject("Refrigeration:WalkIn".upper())
# lastvars = ["N3", "A4", "M8", "A5"]
# lastvars = [u'A18',
# u'N29',
# u'N30',
# u'N31',
# u'N32',
# u'N33',
# u'A19',
# u'N34',
# u'N35',
# u'N36',
# u'A20',
# u'A21']
# alpha_lastvars = [i[0] for i in lastvars]
# int_lastvars = [int(i[1:]) for i in lastvars]
#
#
#
# n = 2
#
# lst = []
# for alpha, start in zip(alpha_lastvars, int_lastvars):
# step = alpha_lastvars.count(alpha)
# rng = range(start +1, start + 1 + n * step, step)
# lst.append(["{}{}".format(alpha, item) for item in rng])
#
# from itertools import chain
# c = list(chain(*zip(*lst)))
#
#
| mit | 6aa6d4127023761d7e42701bc0064983 | 30.679878 | 82 | 0.587335 | 3.386897 | false | false | false | false |
simpeg/simpeg | tutorials/06-ip/plot_fwd_2_dcip2d.py | 1 | 16333 | # -*- coding: utf-8 -*-
"""
2.5D Forward Simulation of a DCIP Line
======================================
Here we use the module *SimPEG.electromagnetics.static.resistivity* to predict
DC resistivity data and the module *SimPEG.electromagnetics.static.induced_polarization*
to predict IP data for a dipole-dipole survey. In this tutorial, we focus on
the following:
- How to define the survey
- How to define the problem
- How to predict DC resistivity data for a synthetic resistivity model
- How to predict IP data for a synthetic chargeability model
- How to include surface topography
- The units of the models and resulting data
This tutorial is split into two parts. First we create a resistivity model and
predict DC resistivity data as measured voltages. Next we create a chargeability
model and a background conductivity model to compute IP data defined as
secondary potentials. We show how DC and IP in units of Volts can be plotted on
pseudo-sections as apparent conductivities and apparent chargeabilities.
"""
#########################################################################
# Import modules
# --------------
#
from discretize import TreeMesh
from discretize.utils import mkvc, refine_tree_xyz
from SimPEG.utils import model_builder, surface2ind_topo
from SimPEG.utils.io_utils.io_utils_electromagnetics import write_dcip2d_ubc
from SimPEG import maps, data
from SimPEG.electromagnetics.static import resistivity as dc
from SimPEG.electromagnetics.static import induced_polarization as ip
from SimPEG.electromagnetics.static.utils.static_utils import (
generate_dcip_sources_line,
plot_pseudosection,
apparent_resistivity_from_voltage,
)
import os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
mpl.rcParams.update({"font.size": 16})
write_output = False
# sphinx_gallery_thumbnail_number = 5
###############################################################
# Defining Topography
# -------------------
#
# Here we define surface topography as an (N, 3) numpy array. Topography could
# also be loaded from a file. In our case, our survey takes place within a set
# of valleys that run North-South.
#
x_topo, y_topo = np.meshgrid(
np.linspace(-3000, 3000, 601), np.linspace(-3000, 3000, 101)
)
z_topo = 40.0 * np.sin(2 * np.pi * x_topo / 800) - 40.0
x_topo, y_topo, z_topo = mkvc(x_topo), mkvc(y_topo), mkvc(z_topo)
topo_xyz = np.c_[x_topo, y_topo, z_topo]
# Create 2D topography. Since our 3D topography only changes in the x direction,
# it is easy to define the 2D topography projected along the survey line. For
# arbitrary topography and for an arbitrary survey orientation, the user must
# define the 2D topography along the survey line.
topo_2d = np.unique(topo_xyz[:, [0, 2]], axis=0)
#####################################################################
# Create Dipole-Dipole Survey
# ---------------------------
#
# Here we define a single EW survey line that uses a dipole-dipole configuration.
# For the source, we must define the AB electrode locations. For the receivers
# we must define the MN electrode locations. Instead of creating the survey
# from scratch (see 1D example), we will use the *generat_dcip_survey_line* utility.
#
# Define survey line parameters
survey_type = "dipole-dipole"
dimension_type = "2D"
dc_data_type = "volt"
end_locations = np.r_[-400.0, 400.0]
station_separation = 40.0
num_rx_per_src = 10
# Generate source list for DC survey line
source_list = generate_dcip_sources_line(
survey_type,
dc_data_type,
dimension_type,
end_locations,
topo_xyz,
num_rx_per_src,
station_separation,
)
# Define survey
dc_survey = dc.survey.Survey(source_list)
###############################################################
# Create OcTree Mesh
# ------------------
#
# Here, we create the OcTree mesh that will be used to predict both DC
# resistivity and IP data.
#
dh = 4 # base cell width
dom_width_x = 3200.0 # domain width x
dom_width_z = 2400.0 # domain width z
nbcx = 2 ** int(np.round(np.log(dom_width_x / dh) / np.log(2.0))) # num. base cells x
nbcz = 2 ** int(np.round(np.log(dom_width_z / dh) / np.log(2.0))) # num. base cells z
# Define the base mesh
hx = [(dh, nbcx)]
hz = [(dh, nbcz)]
mesh = TreeMesh([hx, hz], x0="CN")
# Mesh refinement based on topography
mesh = refine_tree_xyz(
mesh,
topo_xyz[:, [0, 2]],
octree_levels=[0, 0, 4, 4],
method="surface",
finalize=False,
)
# Mesh refinement near transmitters and receivers. First we need to obtain the
# set of unique electrode locations.
electrode_locations = np.c_[
dc_survey.locations_a,
dc_survey.locations_b,
dc_survey.locations_m,
dc_survey.locations_n,
]
unique_locations = np.unique(
np.reshape(electrode_locations, (4 * dc_survey.nD, 2)), axis=0
)
mesh = refine_tree_xyz(
mesh, unique_locations, octree_levels=[4, 4], method="radial", finalize=False
)
# Refine core mesh region
xp, zp = np.meshgrid([-600.0, 600.0], [-400.0, 0.0])
xyz = np.c_[mkvc(xp), mkvc(zp)]
mesh = refine_tree_xyz(
mesh, xyz, octree_levels=[0, 0, 2, 8], method="box", finalize=False
)
mesh.finalize()
###############################################################
# Create Conductivity Model and Mapping for OcTree Mesh
# -----------------------------------------------------
#
# Here we define the conductivity model that will be used to predict DC
# resistivity data. The model consists of a conductive sphere and a
# resistive sphere within a moderately conductive background. Note that
# you can carry through this work flow with a resistivity model if desired.
#
# Define conductivity model in S/m (or resistivity model in Ohm m)
air_conductivity = 1e-8
background_conductivity = 1e-2
conductor_conductivity = 1e-1
resistor_conductivity = 1e-3
# Find active cells in forward modeling (cell below surface)
ind_active = surface2ind_topo(mesh, topo_xyz[:, [0, 2]])
# Define mapping from model to active cells
nC = int(ind_active.sum())
conductivity_map = maps.InjectActiveCells(mesh, ind_active, air_conductivity)
# Define model
conductivity_model = background_conductivity * np.ones(nC)
ind_conductor = model_builder.getIndicesSphere(np.r_[-120.0, -160.0], 60.0, mesh.gridCC)
ind_conductor = ind_conductor[ind_active]
conductivity_model[ind_conductor] = conductor_conductivity
ind_resistor = model_builder.getIndicesSphere(np.r_[120.0, -100.0], 60.0, mesh.gridCC)
ind_resistor = ind_resistor[ind_active]
conductivity_model[ind_resistor] = resistor_conductivity
# Plot Conductivity Model
fig = plt.figure(figsize=(9, 4))
plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan)
norm = LogNorm(vmin=1e-3, vmax=1e-1)
ax1 = fig.add_axes([0.14, 0.17, 0.68, 0.7])
mesh.plot_image(
plotting_map * conductivity_model, ax=ax1, grid=False, pcolor_opts={"norm": norm}
)
ax1.set_xlim(-600, 600)
ax1.set_ylim(-600, 0)
ax1.set_title("Conductivity Model")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("z (m)")
ax2 = fig.add_axes([0.84, 0.17, 0.03, 0.7])
cbar = mpl.colorbar.ColorbarBase(ax2, norm=norm, orientation="vertical")
cbar.set_label(r"$\sigma$ (S/m)", rotation=270, labelpad=15, size=12)
plt.show()
###############################################################
# Project Survey to Discretized Topography
# ----------------------------------------
#
# It is important that electrodes are not modeled as being in the air. Even if the
# electrodes are properly located along surface topography, they may lie above
# the discretized topography. This step is carried out to ensure all electrodes
# lie on the discretized surface.
#
dc_survey.drape_electrodes_on_topography(mesh, ind_active, option="top")
#######################################################################
# Predict DC Resistivity Data
# ---------------------------
#
# Here we predict DC resistivity data. If the keyword argument *sigmaMap* is
# defined, the simulation will expect a conductivity model. If the keyword
# argument *rhoMap* is defined, the simulation will expect a resistivity model.
#
dc_simulation = dc.Simulation2DNodal(
mesh, survey=dc_survey, sigmaMap=conductivity_map, solver=Solver
)
# Predict the data by running the simulation. The data are the raw voltage in
# units of volts.
dpred_dc = dc_simulation.dpred(conductivity_model)
#######################################################################
# Plotting DC Data in Pseudo-Section
# ----------------------------------
#
# Here, we demonstrate how to plot 2D DC data in pseudo-section.
# First, we plot the voltages in pseudo-section as a scatter plot. This
# allows us to visualize the pseudo-sensitivity locations for our survey.
# Next, we plot the apparent conductivities in pseudo-section as a filled
# contour plot.
#
# Plot voltages pseudo-section
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78])
plot_pseudosection(
dc_survey,
dpred_dc,
"scatter",
ax=ax1,
scale="log",
cbar_label="V/A",
scatter_opts={"cmap": mpl.cm.viridis},
)
ax1.set_title("Normalized Voltages")
plt.show()
# Get apparent conductivities from volts and survey geometry
apparent_conductivities = 1 / apparent_resistivity_from_voltage(dc_survey, dpred_dc)
# Plot apparent conductivity pseudo-section
fig = plt.figure(figsize=(12, 5))
ax1 = fig.add_axes([0.1, 0.15, 0.75, 0.78])
plot_pseudosection(
dc_survey,
apparent_conductivities,
"contourf",
ax=ax1,
scale="log",
cbar_label="S/m",
mask_topography=True,
contourf_opts={"levels": 20, "cmap": mpl.cm.viridis},
)
ax1.set_title("Apparent Conductivity")
plt.show()
#######################################################################
# Define IP Survey
# ----------------
#
# The geometry of the survey was defined earlier. We will define the IP
# data as apparent chargeability in V/V.
#
# Generate source list for IP survey line
ip_data_type = "apparent_chargeability"
source_list = generate_dcip_sources_line(
survey_type,
ip_data_type,
dimension_type,
end_locations,
topo_xyz,
num_rx_per_src,
station_separation,
)
# Define survey
ip_survey = ip.survey.Survey(source_list, survey_type=survey_type)
# Drape over discrete topography
ip_survey.drape_electrodes_on_topography(mesh, ind_active, option="top")
###############################################################
# Create Chargeability Model and Mapping for OcTree Mesh
# ------------------------------------------------------
#
# Here we define the chargeability model that will be used to predict IP data.
# Here we assume that the conductive sphere is also chargeable but the resistive
# sphere is not. Here, the chargeability is defined as mV/V.
#
# Define chargeability model as intrinsic chargeability (V/V).
air_chargeability = 0.0
background_chargeability = 1e-6
sphere_chargeability = 1e-1
# Find active cells in forward modeling (cells below surface)
ind_active = surface2ind_topo(mesh, topo_xyz[:, [0, 2]])
# Define mapping from model to active cells
nC = int(ind_active.sum())
chargeability_map = maps.InjectActiveCells(mesh, ind_active, air_chargeability)
# Define chargeability model
chargeability_model = background_chargeability * np.ones(nC)
ind_chargeable = model_builder.getIndicesSphere(
np.r_[-120.0, -160.0], 60.0, mesh.gridCC
)
ind_chargeable = ind_chargeable[ind_active]
chargeability_model[ind_chargeable] = sphere_chargeability
# Plot Chargeability Model
fig = plt.figure(figsize=(9, 4))
ax1 = fig.add_axes([0.14, 0.17, 0.68, 0.7])
mesh.plot_image(
plotting_map * chargeability_model,
ax=ax1,
grid=False,
clim=(background_chargeability, sphere_chargeability),
pcolor_opts={"cmap": mpl.cm.plasma},
)
ax1.set_xlim(-600, 600)
ax1.set_ylim(-600, 0)
ax1.set_title("Intrinsic Chargeability")
ax1.set_xlabel("x (m)")
ax1.set_ylabel("z (m)")
ax2 = fig.add_axes([0.84, 0.17, 0.03, 0.7])
norm = mpl.colors.Normalize(vmin=background_chargeability, vmax=sphere_chargeability)
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", cmap=mpl.cm.plasma
)
cbar.set_label("Intrinsic Chargeability (V/V)", rotation=270, labelpad=15, size=12)
plt.show()
#######################################################################
# Predict IP Data
# ---------------
#
# Here we use a chargeability model and a background conductivity/resistivity
# model to predict IP data.
#
# We use the keyword argument *sigma* to define the background conductivity on
# the mesh. We could use the keyword argument *rho* to accomplish the same thing
# using a background resistivity model.
simulation_ip = ip.Simulation2DNodal(
mesh,
survey=ip_survey,
etaMap=chargeability_map,
sigma=conductivity_map * conductivity_model,
solver=Solver,
)
# Run forward simulation and predicted IP data. The data are the voltage (V)
dpred_ip = simulation_ip.dpred(chargeability_model)
###############################################
# Plot 2D IP Data in Pseudosection
# --------------------------------
#
# We want to plot apparent chargeability. To accomplish this, we must normalize the IP
# voltage by the DC voltage. This is then multiplied by 1000 so that our
# apparent chargeability is in units mV/V.
fig = plt.figure(figsize=(12, 11))
# Plot apparent conductivity
ax1 = fig.add_axes([0.1, 0.58, 0.7, 0.35])
cax1 = fig.add_axes([0.82, 0.58, 0.025, 0.35])
plot_pseudosection(
dc_survey,
apparent_conductivities,
"contourf",
ax=ax1,
cax=cax1,
scale="log",
cbar_label="S/m",
mask_topography=True,
contourf_opts={"levels": 20, "cmap": mpl.cm.viridis},
)
ax1.set_title("Apparent Conductivity")
# Plot apparent chargeability
ax2 = fig.add_axes([0.1, 0.08, 0.7, 0.35])
cax2 = fig.add_axes([0.82, 0.08, 0.025, 0.35])
plot_pseudosection(
ip_survey,
dpred_ip,
"contourf",
ax=ax2,
cax=cax2,
scale="linear",
cbar_label="V/V",
mask_topography=True,
contourf_opts={"levels": 20, "cmap": mpl.cm.plasma},
)
ax2.set_title("Apparent Chargeability (V/V)")
plt.show()
#######################################################################
# Write Outputs (Optional)
# ------------------------
#
if write_output:
dir_path = os.path.dirname(__file__).split(os.path.sep)
dir_path.extend(["outputs"])
dir_path = os.path.sep.join(dir_path) + os.path.sep
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# Write topography
fname = dir_path + "topo_xyz.txt"
np.savetxt(fname, topo_xyz, fmt="%.4e")
# Add 5% Gaussian noise to each DC datum
np.random.seed(225)
std = 0.05 * np.abs(dpred_dc)
dc_noise = std * np.random.rand(len(dpred_dc))
dobs = dpred_dc + dc_noise
# Create a survey with the original electrode locations
# and not the shifted ones
# Generate source list for DC survey line
source_list = generate_dcip_sources_line(
survey_type,
dc_data_type,
dimension_type,
end_locations,
topo_xyz,
num_rx_per_src,
station_separation,
)
dc_survey_original = dc.survey.Survey(source_list)
# Write out data at their original electrode locations (not shifted)
data_obj = data.Data(dc_survey_original, dobs=dobs, standard_deviation=std)
fname = dir_path + "dc_data.obs"
write_dcip2d_ubc(fname, data_obj, "volt", "dobs")
# Add Gaussian noise equal to 5e-3 V/V
std = 5e-3 * np.ones_like(dpred_ip)
ip_noise = std * np.random.rand(len(dpred_ip))
dobs = dpred_ip + ip_noise
# Create a survey with the original electrode locations
# and not the shifted ones
# Generate source list for DC survey line
source_list = generate_dcip_sources_line(
survey_type,
ip_data_type,
dimension_type,
end_locations,
topo_xyz,
num_rx_per_src,
station_separation,
)
ip_survey_original = dc.survey.Survey(source_list)
# Write out data at their original electrode locations (not shifted)
data_obj = data.Data(ip_survey_original, dobs=dobs, standard_deviation=std)
fname = dir_path + "ip_data.obs"
write_dcip2d_ubc(fname, data_obj, "apparent_chargeability", "dobs")
| mit | bb3db0e6311f9f079dc58fe614c0bbb1 | 30.349328 | 88 | 0.65536 | 3.224679 | false | false | false | false |
simpeg/simpeg | SimPEG/electromagnetics/frequency_domain/receivers.py | 1 | 12700 | from ... import survey
from ...utils import validate_string, validate_type, validate_direction
import warnings
from discretize.utils import Zero
class BaseRx(survey.BaseRx):
"""Base FDEM receivers class.
Parameters
----------
locations : (n_loc, n_dim) numpy.ndarray
Receiver locations.
orientation : {'z', 'x', 'y'} or numpy.ndarray
Receiver orientation.
component : {'real', 'imag', 'both', 'complex'}
Component of the receiver; i.e. 'real' or 'imag'. The options 'both' and
'complex' are only available for the 1D layered simulations.
data_type : {'field', 'ppm'}
Data type observed by the receiver, either field, or ppm secondary
of the total field.
use_source_receiver_offset : bool, optional
Whether to interpret the receiver locations as defining the source and receiver
offset.
"""
def __init__(
self,
locations,
orientation="z",
component="real",
data_type="field",
use_source_receiver_offset=False,
**kwargs,
):
proj = kwargs.pop("projComp", None)
if proj is not None:
warnings.warn(
"'projComp' overrides the 'orientation' property which automatically"
" handles the projection from the mesh the receivers!!! "
"'projComp' is deprecated and will be removed in SimPEG 0.19.0."
)
self.projComp = proj
self.orientation = orientation
self.component = component
self.data_type = data_type
self.use_source_receiver_offset = use_source_receiver_offset
super().__init__(locations, **kwargs)
@property
def orientation(self):
"""Orientation of the receiver.
Returns
-------
numpy.ndarray
"""
return self._orientation
@orientation.setter
def orientation(self, var):
self._orientation = validate_direction("orientation", var, dim=3)
@property
def component(self):
"""Data component; i.e. real or imaginary.
Returns
-------
str : {'real', 'imag', 'both', 'complex'}
Component of the receiver; i.e. 'real' or 'imag'. The options 'both' and
'complex' are only available for the 1D layered simulations.
"""
return self._component
@component.setter
def component(self, val):
self._component = validate_string(
"component",
val,
(
("real", "re", "in-phase", "in phase"),
(
"imag",
"imaginary",
"im",
"out-of-phase",
"out of phase",
"quadrature",
),
"both",
"complex",
),
)
@property
def data_type(self):
"""The type of data for this receiver.
The data type is either a field measurement or a part per million (ppm) measurement
of the primary field.
Returns
-------
str : {'field', 'ppm'}
Notes
-----
This is currently only implemented for the 1D layered simulations.
"""
return self._data_type
@data_type.setter
def data_type(self, val):
self._data_type = validate_string(
"data_type", val, string_list=("field", "ppm")
)
@property
def use_source_receiver_offset(self):
"""Use source-receiver offset.
Whether to interpret the location as a source-receiver offset.
Returns
-------
bool
Notes
-----
This is currently only implemented for the 1D layered code.
"""
return self._use_source_receiver_offset
@use_source_receiver_offset.setter
def use_source_receiver_offset(self, val):
self._use_source_receiver_offset = validate_type(
"use_source_receiver_offset", val, bool
)
def getP(self, mesh, projected_grid):
"""Get projection matrix from mesh to receivers
Parameters
----------
mesh : discretize.BaseMesh
A discretize mesh
projected_grid : str
Define what part of the mesh (i.e. edges, faces, centers, nodes) to
project from. Must be one of::
'E', 'edges_' -> field defined on edges
'F', 'faces_' -> field defined on faces
'CCV', 'cell_centers_' -> vector field defined on cell centers
Returns
-------
scipy.sparse.csr_matrix
P, the interpolation matrix
"""
if (mesh, projected_grid) in self._Ps:
return self._Ps[(mesh, projected_grid)]
P = Zero()
for strength, comp in zip(self.orientation, ["x", "y", "z"]):
if strength != 0.0:
P = P + strength * mesh.get_interpolation_matrix(
self.locations, projected_grid + comp
)
if self.storeProjections:
self._Ps[(mesh, projected_grid)] = P
return P
def eval(self, src, mesh, f):
"""Project fields from the mesh to the receiver(s).
Parameters
----------
src : SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc
A frequency-domain EM source
mesh : discretize.base.BaseMesh
The mesh on which the discrete set of equations is solved
f : SimPEG.electromagnetic.frequency_domain.fields.FieldsFDEM
The solution for the fields defined on the mesh
Returns
-------
numpy.ndarray
Fields projected to the receiver(s)
"""
projected_grid = f._GLoc(self.projField)
P = self.getP(mesh, projected_grid)
f_part_complex = f[src, self.projField]
f_part = getattr(f_part_complex, self.component) # real or imag component
return P * f_part
def evalDeriv(self, src, mesh, f, du_dm_v=None, v=None, adjoint=False):
"""Derivative of the projected fields with respect to the model, times a vector.
Parameters
----------
src : SimPEG.electromagnetics.frequency_domain.sources.BaseFDEMSrc
A frequency-domain EM source
mesh : discretize.base.BaseMesh
The mesh on which the discrete set of equations is solved
f : SimPEG.electromagnetic.frequency_domain.fields.FieldsFDEM
The solution for the fields defined on the mesh
du_dm_v : numpy.ndarray
The derivative of the fields on the mesh with respect to the model,
times a vector.
v : numpy.ndarray, optional
The vector which being multiplied
adjoint : bool
If ``True``, return the ajoint
Returns
-------
numpy.ndarray
The derivative times a vector at the receiver(s)
"""
df_dmFun = getattr(f, "_{0}Deriv".format(self.projField), None)
assert v is not None, "v must be provided to compute the deriv or adjoint"
projected_grid = f._GLoc(self.projField)
P = self.getP(mesh, projected_grid)
if not adjoint:
assert (
du_dm_v is not None
), "du_dm_v must be provided to evaluate the receiver deriv"
df_dm_v = df_dmFun(src, du_dm_v, v, adjoint=False)
Pv_complex = P * df_dm_v
Pv = getattr(Pv_complex, self.component)
return Pv
elif adjoint:
PTv_real = P.T * v
if self.component == "imag":
PTv = -1j * PTv_real
elif self.component == "real":
PTv = PTv_real.astype(complex)
else:
raise NotImplementedError("must be real or imag")
df_duT, df_dmT = df_dmFun(src, None, PTv, adjoint=True)
# if self.component == "imag": # conjugate
# df_duT *= -1
# df_dmT *= -1
return df_duT, df_dmT
@property
def nD(self):
if self.component == "both":
return int(self.locations.shape[0] * 2)
else:
return self.locations.shape[0]
class PointElectricField(BaseRx):
"""Measure FDEM electric field at a point.
Parameters
----------
locations : (n_loc, n_dim) numpy.ndarray
Receiver locations.
orientation : {'x', 'y', 'z'}
Receiver orientation.
component : {'real', 'imag'}
Real or imaginary component.
"""
def __init__(self, locations, orientation="x", component="real", **kwargs):
self.projField = "e"
super().__init__(locations, orientation, component, **kwargs)
class PointMagneticFluxDensity(BaseRx):
"""Measure FDEM total field magnetic flux density at a point.
Parameters
----------
locations : (n_loc, n_dim) numpy.ndarray
Receiver locations.
orientation : {'x', 'y', 'z'}
Receiver orientation.
component : {'real', 'imag'}
Real or imaginary component.
"""
def __init__(self, locations, orientation="x", component="real", **kwargs):
self.projField = "b"
super().__init__(locations, orientation, component, **kwargs)
class PointMagneticFluxDensitySecondary(BaseRx):
"""Measure FDEM secondary magnetic flux density at a point.
Parameters
----------
locations : (n_loc, n_dim) numpy.ndarray
Receiver locations.
orientation : {'x', 'y', 'z'}
Receiver orientation.
component : {'real', 'imag'}
Real or imaginary component.
"""
def __init__(self, locations, orientation="x", component="real", **kwargs):
self.projField = "bSecondary"
super().__init__(locations, orientation, component, **kwargs)
class PointMagneticField(BaseRx):
"""Measure FDEM total magnetic field at a point.
Parameters
----------
locations : (n_loc, n_dim) numpy.ndarray
Receiver locations.
orientation : {'x', 'y', 'z'}
Receiver orientation.
component : {'real', 'imag', 'both', 'complex'}
Component of the receiver; i.e. 'real' or 'imag'. The options 'both' and
'complex' are only available for the 1D layered simulations.
data_type : {'field', 'ppm'}
Data type observed by the receiver, either field, or ppm secondary
of the total field.
use_source_receiver_offset : bool, optional
Whether to interpret the receiver locations as defining the source and receiver
offset.
Notes
-----
`data_type`, `use_source_receiver_offset`, and the options of `'both'` and
`'complex'` for component are only implemented for the `Simulation1DLayered`.
"""
def __init__(self, locations, orientation="x", component="real", **kwargs):
self.projField = "h"
super().__init__(locations, orientation, component, **kwargs)
class PointMagneticFieldSecondary(BaseRx):
"""
Magnetic flux FDEM receiver
locations : (n_loc, n_dim) numpy.ndarray
Receiver locations.
orientation : {'x', 'y', 'z'}
Receiver orientation
component : {'real', 'imag', 'both', 'complex'}
Component of the receiver; i.e. 'real' or 'imag'. The options 'both' and
'complex' are only available for the 1D layered simulations.
data_type : {'field', 'ppm'}
Data type observed by the receiver, either field, or ppm secondary
of the total field.
use_source_receiver_offset : bool, optional
Whether to interpret the receiver locations as defining the source and receiver
offset.
Notes
-----
`data_type`, `use_source_receiver_offset`, and the options of `'both'` and
`'complex'` for component are only implemented for the `Simulation1DLayered`.
"""
def __init__(self, locations, orientation="x", component="real", **kwargs):
self.projField = "hSecondary"
super().__init__(
locations, orientation=orientation, component=component, **kwargs
)
class PointCurrentDensity(BaseRx):
"""Measure FDEM current density at a point.
Parameters
----------
locations : (n_loc, n_dim) numpy.ndarray
Receiver locations.
orientation : {'x', 'y', 'z'}
Receiver orientation.
component : {'real', 'imag'}
Real or imaginary component.
"""
def __init__(self, locations, orientation="x", component="real", **kwargs):
self.projField = "j"
super().__init__(locations, orientation, component, **kwargs)
| mit | 1a14dc7bfd7bbaeaa46d1b2268cfd785 | 30.75 | 91 | 0.570945 | 4.220671 | false | false | false | false |
simpeg/simpeg | SimPEG/_EM/Static/SP/ProblemSP.py | 1 | 5508 | from SimPEG import Problem, Utils, Maps, Mesh
from SimPEG.EM.Base import BaseEMProblem
from SimPEG.EM.Static.DC.FieldsDC import FieldsDC, Fields3DCellCentered
from SimPEG.EM.Static.DC import Survey, BaseDCProblem, Simulation3DCellCentered
from SimPEG.Utils import sdiag
import numpy as np
import scipy.sparse as sp
from SimPEG.Utils import Zero
from SimPEG.EM.Static.DC import getxBCyBC_CC
from SimPEG import Props
class BaseSPProblem(BaseDCProblem):
h, hMap, hDeriv = Props.Invertible("Hydraulic Head (m)")
q, qMap, qDeriv = Props.Invertible("Streaming current source (A/m^3)")
jsx, jsxMap, jsxDeriv = Props.Invertible(
"Streaming current density in x-direction (A/m^2)"
)
jsy, jsyMap, jsyDeriv = Props.Invertible(
"Streaming current density in y-direction (A/m^2)"
)
jsz, jszMap, jszDeriv = Props.Invertible(
"Streaming current density in z-direction (A/m^2)"
)
sigma = Props.PhysicalProperty("Electrical conductivity (S/m)")
rho = Props.PhysicalProperty("Electrical resistivity (Ohm m)")
Props.Reciprocal(sigma, rho)
modelType = None
surveyPair = Survey
fieldsPair = FieldsDC
@property
def deleteTheseOnModelUpdate(self):
toDelete = []
return toDelete
def evalq(self, Qv, vel):
MfQviI = self.mesh.get_face_inner_product(1.0 / Qv, invert_matrix=True)
Mf = self.mesh.get_face_inner_product()
return self.Div * (Mf * (MfQviI * vel))
class Problem_CC(BaseSPProblem, Simulation3DCellCentered):
_solutionType = "phiSolution"
_formulation = "HJ" # CC potentials means J is on faces
fieldsPair = Fields3DCellCentered
modelType = None
bc_type = "Mixed"
# coordinate_system = StringChoice(
# "Type of coordinate system we are regularizing in",
# choices=["cartesian", "spherical"],
# default="cartesian",
# )
def __init__(self, mesh, **kwargs):
BaseSPProblem.__init__(self, mesh, **kwargs)
self.setBC()
def getADeriv(self, u, v, adjoint=False):
# We assume conductivity is known
return Zero()
def getRHSDeriv(self, src, v, adjoint=False):
"""
Derivative of the right hand side with respect to the model
"""
return src.evalDeriv(self, v=v, adjoint=adjoint)
class Problem_CC_Jstore(Problem_CC):
"""docstring for Problem_CC_jstore"""
_S = None
@property
def G(self):
"""
Inverse of :code:`_G`
"""
if getattr(self, "_G", None) is None:
A = self.getA()
self.Ainv = self.solver(A, **self.solver_opts)
src = self.survey.source_list[0]
rx = src.receiver_list[0]
P = rx.getP(self.mesh, "CC").toarray()
src = self.survey.source_list[0]
self._G = (self.Ainv * P.T).T * src.evalDeriv(
self, v=Utils.sdiag(np.ones_like(self.model))
)
self.Ainv.clean()
del self.Ainv
return self._G
def getJ(self, m, f=None):
if self.coordinate_system == "cartesian":
return self.G
else:
self.model = m
return self.G * self.S
def Jvec(self, m, v, f=None):
self.model = m
if self.coordinate_system == "cartesian":
return self.G.dot(v)
else:
return np.dot(self.G, self.S.dot(v))
def Jtvec(self, m, v, f=None):
self.model = m
if self.coordinate_system == "cartesian":
return self.G.T.dot(v)
else:
return self.S.T * (self.G.T.dot(v))
@Utils.count
def fields(self, m):
self.model = m
if self.coordinate_system == "spherical":
m = Utils.mat_utils.atp2xyz(m)
return self.G.dot(m)
@property
def S(self):
"""
Derivatives for the spherical transformation
"""
if getattr(self, "_S", None) is None:
if self.verbose:
print("Compute S")
if self.model is None:
raise Exception("Requires a model")
# Assume it is vector model in spherical coordinates
nC = int(self.model.shape[0] / 3)
a = self.model[:nC]
t = self.model[nC : 2 * nC]
p = self.model[2 * nC :]
Sx = sp.hstack(
[
sp.diags(np.cos(t) * np.cos(p), 0),
sp.diags(-a * np.sin(t) * np.cos(p), 0),
sp.diags(-a * np.cos(t) * np.sin(p), 0),
]
)
Sy = sp.hstack(
[
sp.diags(np.cos(t) * np.sin(p), 0),
sp.diags(-a * np.sin(t) * np.sin(p), 0),
sp.diags(a * np.cos(t) * np.cos(p), 0),
]
)
Sz = sp.hstack(
[
sp.diags(np.sin(t), 0),
sp.diags(a * np.cos(t), 0),
sp.csr_matrix((nC, nC)),
]
)
self._S = sp.vstack([Sx, Sy, Sz])
return self._S
@property
def deleteTheseOnModelUpdate(self):
toDelete = super().deleteTheseOnModelUpdate
if self._S is not None:
toDelete = toDelete + ["_S"]
return toDelete
class SurveySP_store(Survey):
@Utils.count
@Utils.requires("prob")
def dpred(self, m=None, f=None):
return self.prob.fields(m)
| mit | 645ab8e77dbf761bd100fe6144ecc941 | 26.818182 | 79 | 0.544481 | 3.404203 | false | false | false | false |
simpeg/simpeg | tutorials/08-tdem/plot_fwd_2_tem_cyl.py | 1 | 8158 | """
3D Forward Simulation for Transient Response on a Cylindrical Mesh
==================================================================
Here we use the module *SimPEG.electromagnetics.time_domain* to simulate the
transient response for borehole survey using a cylindrical mesh and a
radially symmetric conductivity. For this tutorial, we focus on the following:
- How to define the transmitters and receivers
- How to define the transmitter waveform for a step-off
- How to define the time-stepping
- How to define the survey
- How to solve TDEM problems on a cylindrical mesh
- The units of the conductivity/resistivity model and resulting data
Please note that we have used a coarse mesh larger time-stepping to shorten the
time of the simulation. Proper discretization in space and time is required to
simulate the fields at each time channel with sufficient accuracy.
"""
#########################################################################
# Import Modules
# --------------
#
from discretize import CylindricalMesh
from discretize.utils import mkvc
from SimPEG import maps
import SimPEG.electromagnetics.time_domain as tdem
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
from pymatsolver import Pardiso as Solver
except ImportError:
from SimPEG import SolverLU as Solver
write_file = False
# sphinx_gallery_thumbnail_number = 2
#################################################################
# Defining the Waveform
# ---------------------
#
# Under *SimPEG.electromagnetic.time_domain.sources*
# there are a multitude of waveforms that can be defined (VTEM, Ramp-off etc...).
# Here we simulate the response due to a step off waveform where the off-time
# begins at t=0. Other waveforms are discuss in the OcTree simulation example.
#
waveform = tdem.sources.StepOffWaveform(off_time=0.0)
#####################################################################
# Create Airborne Survey
# ----------------------
#
# Here we define the survey used in our simulation. For time domain
# simulations, we must define the geometry of the source and its waveform. For
# the receivers, we define their geometry, the type of field they measure and the time
# channels at which they measure the field. For this example,
# the survey consists of a borehold survey with a coincident loop geometry.
#
# Observation times for response (time channels)
time_channels = np.logspace(-4, -2, 11)
# Defining transmitter locations
xtx, ytx, ztx = np.meshgrid([0], [0], np.linspace(0, -500, 26) - 2.5)
source_locations = np.c_[mkvc(xtx), mkvc(ytx), mkvc(ztx)]
ntx = np.size(xtx)
# Define receiver locations
xrx, yrx, zrx = np.meshgrid([0], [0], np.linspace(0, -500, 26) - 2.5)
receiver_locations = np.c_[mkvc(xrx), mkvc(yrx), mkvc(zrx)]
source_list = [] # Create empty list to store sources
# Each unique location defines a new transmitter
for ii in range(ntx):
# Define receivers at each location.
dbzdt_receiver = tdem.receivers.PointMagneticFluxTimeDerivative(
receiver_locations[ii, :], time_channels, "z"
)
receivers_list = [
dbzdt_receiver
] # Make a list containing all receivers even if just one
# Must define the transmitter properties and associated receivers
source_list.append(
tdem.sources.CircularLoop(
receivers_list,
location=source_locations[ii],
waveform=waveform,
radius=10.0,
)
)
survey = tdem.Survey(source_list)
###############################################################
# Create Cylindrical Mesh
# -----------------------
#
# Here we create the cylindrical mesh that will be used for this tutorial
# example. We chose to design a coarser mesh to decrease the run time.
# When designing a mesh to solve practical time domain problems:
#
# - Your smallest cell size should be 10%-20% the size of your smallest diffusion distance
# - The thickness of your padding needs to be 2-3 times biggest than your largest diffusion distance
# - The diffusion distance is ~1260*np.sqrt(rho*t)
#
#
hr = [(5.0, 40), (5.0, 15, 1.5)]
hz = [(5.0, 15, -1.5), (5.0, 300), (5.0, 15, 1.5)]
mesh = CylindricalMesh([hr, 1, hz], x0="00C")
###############################################################
# Create Conductivity/Resistivity Model and Mapping
# -------------------------------------------------
#
# Here, we create the model that will be used to predict frequency domain
# data and the mapping from the model to the mesh. The model
# consists of several layers. For this example, we will have only flat topography.
#
# Conductivity in S/m (or resistivity in Ohm m)
air_conductivity = 1e-8
background_conductivity = 1e-1
layer_conductivity_1 = 1e0
layer_conductivity_2 = 1e-2
# Find cells that are active in the forward modeling (cells below surface)
ind_active = mesh.cell_centers[:, 2] < 0
# Define mapping from model to active cells
model_map = maps.InjectActiveCells(mesh, ind_active, air_conductivity)
# Define the model
model = background_conductivity * np.ones(ind_active.sum())
ind = (mesh.cell_centers[ind_active, 2] > -200.0) & (
mesh.cell_centers[ind_active, 2] < -0
)
model[ind] = layer_conductivity_1
ind = (mesh.cell_centers[ind_active, 2] > -400.0) & (
mesh.cell_centers[ind_active, 2] < -200
)
model[ind] = layer_conductivity_2
# Plot Conductivity Model
mpl.rcParams.update({"font.size": 14})
fig = plt.figure(figsize=(5, 6))
plotting_map = maps.InjectActiveCells(mesh, ind_active, np.nan)
log_model = np.log10(model)
ax1 = fig.add_axes([0.20, 0.1, 0.54, 0.85])
mesh.plot_image(
plotting_map * log_model,
ax=ax1,
grid=False,
clim=(np.log10(layer_conductivity_2), np.log10(layer_conductivity_1)),
)
ax1.set_title("Conductivity Model")
ax2 = fig.add_axes([0.76, 0.1, 0.05, 0.85])
norm = mpl.colors.Normalize(
vmin=np.log10(layer_conductivity_2), vmax=np.log10(layer_conductivity_1)
)
cbar = mpl.colorbar.ColorbarBase(
ax2, norm=norm, orientation="vertical", format="$10^{%.1f}$"
)
cbar.set_label("Conductivity [S/m]", rotation=270, labelpad=15, size=12)
######################################################
# Define the Time-Stepping
# ------------------------
#
# Stuff about time-stepping and some rule of thumb for step-off waveform
#
time_steps = [(5e-06, 20), (0.0001, 20), (0.001, 21)]
######################################################
# Define the Simulation
# ---------------------
#
# Here we define the formulation for solving Maxwell's equations. Since we are
# measuring the time-derivative of the magnetic flux density and working with
# a conductivity model, the EB formulation is the most natural. We must also
# remember to define the mapping for the conductivity model. Use *rhoMap* instead
# of *sigmaMap* if you defined a resistivity model.
#
simulation = tdem.simulation.Simulation3DMagneticFluxDensity(
mesh, survey=survey, sigmaMap=model_map, solver=Solver
)
# Set the time-stepping for the simulation
simulation.time_steps = time_steps
###########################################################
# Predict Data and Plot
# ---------------------
#
#
# Data are organized by transmitter, then by
# receiver then by observation time. dBdt data are in T/s.
dpred = simulation.dpred(model)
# Plot the response
dpred = np.reshape(dpred, (ntx, len(time_channels)))
# TDEM Profile
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_axes([0.15, 0.15, 0.8, 0.75])
for ii in range(0, len(time_channels)):
ax1.semilogx(
-dpred[:, ii], receiver_locations[:, -1], "k", lw=2
) # -ve sign to plot -dBz/dt
ax1.set_xlabel("-dBz/dt [T/s]")
ax1.set_ylabel("Elevation [m]")
ax1.set_title("Airborne TDEM Profile")
# Response for all time channels
fig = plt.figure(figsize=(5, 5))
ax1 = fig.add_axes([0.15, 0.15, 0.8, 0.75])
ax1.loglog(time_channels, -dpred[0, :], "b", lw=2)
ax1.loglog(time_channels, -dpred[-1, :], "r", lw=2)
ax1.set_xlim((np.min(time_channels), np.max(time_channels)))
ax1.set_xlabel("time [s]")
ax1.set_ylabel("-dBz/dt [T/s]")
ax1.set_title("Decay Curve")
ax1.legend(["First Sounding", "Last Sounding"], loc="upper right")
| mit | 689723a23111fc81dd166a164e54c68e | 32.02834 | 104 | 0.646237 | 3.29483 | false | false | false | false |
willmcgugan/rich | examples/log.py | 1 | 1943 | """
A simulation of Rich console logging.
"""
import time
from rich.console import Console
from rich.style import Style
from rich.theme import Theme
from rich.highlighter import RegexHighlighter
class RequestHighlighter(RegexHighlighter):
base_style = "req."
highlights = [
r"^(?P<protocol>\w+) (?P<method>\w+) (?P<path>\S+) (?P<result>\w+) (?P<stats>\[.+\])$",
r"\/(?P<filename>\w+\..{3,4})",
]
theme = Theme(
{
"req.protocol": Style.parse("dim bold green"),
"req.method": Style.parse("bold cyan"),
"req.path": Style.parse("magenta"),
"req.filename": Style.parse("bright_magenta"),
"req.result": Style.parse("yellow"),
"req.stats": Style.parse("dim"),
}
)
console = Console(theme=theme)
console.log("Server starting...")
console.log("Serving on http://127.0.0.1:8000")
time.sleep(1)
request_highlighter = RequestHighlighter()
console.log(
request_highlighter("HTTP GET /foo/bar/baz/egg.html 200 [0.57, 127.0.0.1:59076]"),
)
console.log(
request_highlighter(
"HTTP GET /foo/bar/baz/background.jpg 200 [0.57, 127.0.0.1:59076]"
),
)
time.sleep(1)
def test_locals():
foo = (1, 2, 3)
movies = ["Deadpool", "Rise of the Skywalker"]
console = Console()
console.log(
"[b]JSON[/b] RPC [i]batch[/i]",
[
{"jsonrpc": "2.0", "method": "sum", "params": [1, 2, 4], "id": "1"},
{"jsonrpc": "2.0", "method": "notify_hello", "params": [7]},
{"jsonrpc": "2.0", "method": "subtract", "params": [42, 23], "id": "2"},
{"foo": "boo"},
{
"jsonrpc": "2.0",
"method": "foo.get",
"params": {"name": "myself", "enable": False, "grommits": None},
"id": "5",
},
{"jsonrpc": "2.0", "method": "get_data", "id": "9"},
],
log_locals=True,
)
test_locals()
| mit | 6254b2fcfa434fff75ee01e27eb9b601 | 24.233766 | 95 | 0.530623 | 3.074367 | false | false | false | false |
willmcgugan/rich | rich/screen.py | 1 | 1579 | from typing import Optional, TYPE_CHECKING
from .segment import Segment
from .style import StyleType
from ._loop import loop_last
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
RenderResult,
RenderableType,
Group,
)
class Screen:
"""A renderable that fills the terminal screen and crops excess.
Args:
renderable (RenderableType): Child renderable.
style (StyleType, optional): Optional background style. Defaults to None.
"""
renderable: "RenderableType"
def __init__(
self,
*renderables: "RenderableType",
style: Optional[StyleType] = None,
application_mode: bool = False,
) -> None:
from rich.console import Group
self.renderable = Group(*renderables)
self.style = style
self.application_mode = application_mode
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
width, height = options.size
style = console.get_style(self.style) if self.style else None
render_options = options.update(width=width, height=height)
lines = console.render_lines(
self.renderable or "", render_options, style=style, pad=True
)
lines = Segment.set_shape(lines, width, height, style=style)
new_line = Segment("\n\r") if self.application_mode else Segment.line()
for last, line in loop_last(lines):
yield from line
if not last:
yield new_line
| mit | ea219563ca6797cdcad0b208dd407479 | 28.240741 | 81 | 0.621279 | 4.048718 | false | false | false | false |
willmcgugan/rich | rich/_windows.py | 1 | 2076 | import sys
from dataclasses import dataclass
@dataclass
class WindowsConsoleFeatures:
"""Windows features available."""
vt: bool = False
"""The console supports VT codes."""
truecolor: bool = False
"""The console supports truecolor."""
try:
import ctypes
from ctypes import wintypes
from ctypes import LibraryLoader
if sys.platform == "win32":
windll = LibraryLoader(ctypes.WinDLL)
else:
windll = None
raise ImportError("Not windows")
except (AttributeError, ImportError, ValueError):
# Fallback if we can't load the Windows DLL
def get_windows_console_features() -> WindowsConsoleFeatures:
features = WindowsConsoleFeatures()
return features
else:
STDOUT = -11
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
_GetConsoleMode = windll.kernel32.GetConsoleMode
_GetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.LPDWORD]
_GetConsoleMode.restype = wintypes.BOOL
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
def get_windows_console_features() -> WindowsConsoleFeatures:
"""Get windows console features.
Returns:
WindowsConsoleFeatures: An instance of WindowsConsoleFeatures.
"""
handle = _GetStdHandle(STDOUT)
console_mode = wintypes.DWORD()
result = _GetConsoleMode(handle, console_mode)
vt = bool(result and console_mode.value & ENABLE_VIRTUAL_TERMINAL_PROCESSING)
truecolor = False
if vt:
win_version = sys.getwindowsversion()
truecolor = win_version.major > 10 or (
win_version.major == 10 and win_version.build >= 15063
)
features = WindowsConsoleFeatures(vt=vt, truecolor=truecolor)
return features
if __name__ == "__main__":
import platform
features = get_windows_console_features()
from rich import print
print(f'platform="{platform.system()}"')
print(repr(features))
| mit | 0cc3c7e489d304262b208bbfa1f3d99d | 27.054054 | 85 | 0.658478 | 4.325 | false | false | false | false |
willmcgugan/rich | rich/palette.py | 1 | 3288 | from math import sqrt
from functools import lru_cache
from typing import Sequence, Tuple, TYPE_CHECKING
from .color_triplet import ColorTriplet
if TYPE_CHECKING:
from rich.table import Table
class Palette:
"""A palette of available colors."""
def __init__(self, colors: Sequence[Tuple[int, int, int]]):
self._colors = colors
def __getitem__(self, number: int) -> ColorTriplet:
return ColorTriplet(*self._colors[number])
def __rich__(self) -> "Table":
from rich.color import Color
from rich.style import Style
from rich.text import Text
from rich.table import Table
table = Table(
"index",
"RGB",
"Color",
title="Palette",
caption=f"{len(self._colors)} colors",
highlight=True,
caption_justify="right",
)
for index, color in enumerate(self._colors):
table.add_row(
str(index),
repr(color),
Text(" " * 16, style=Style(bgcolor=Color.from_rgb(*color))),
)
return table
# This is somewhat inefficient and needs caching
@lru_cache(maxsize=1024)
def match(self, color: Tuple[int, int, int]) -> int:
"""Find a color from a palette that most closely matches a given color.
Args:
color (Tuple[int, int, int]): RGB components in range 0 > 255.
Returns:
int: Index of closes matching color.
"""
red1, green1, blue1 = color
_sqrt = sqrt
get_color = self._colors.__getitem__
def get_color_distance(index: int) -> float:
"""Get the distance to a color."""
red2, green2, blue2 = get_color(index)
red_mean = (red1 + red2) // 2
red = red1 - red2
green = green1 - green2
blue = blue1 - blue2
return _sqrt(
(((512 + red_mean) * red * red) >> 8)
+ 4 * green * green
+ (((767 - red_mean) * blue * blue) >> 8)
)
min_index = min(range(len(self._colors)), key=get_color_distance)
return min_index
if __name__ == "__main__": # pragma: no cover
import colorsys
from typing import Iterable
from rich.color import Color
from rich.console import Console, ConsoleOptions
from rich.segment import Segment
from rich.style import Style
class ColorBox:
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> Iterable[Segment]:
height = console.size.height - 3
for y in range(0, height):
for x in range(options.max_width):
h = x / options.max_width
l = y / (height + 1)
r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
r2, g2, b2 = colorsys.hls_to_rgb(h, l + (1 / height / 2), 1.0)
bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
yield Segment("▄", Style(color=color, bgcolor=bgcolor))
yield Segment.line()
console = Console()
console.print(ColorBox())
| mit | 22a3e4bade659d644f7c9a3b1a228790 | 31.86 | 82 | 0.532562 | 4.002436 | false | false | false | false |
willmcgugan/rich | rich/table.py | 1 | 35052 | from dataclasses import dataclass, field, replace
from typing import (
Dict,
TYPE_CHECKING,
Iterable,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
Union,
)
from . import box, errors
from ._loop import loop_first_last, loop_last
from ._pick import pick_bool
from ._ratio import ratio_distribute, ratio_reduce
from .jupyter import JupyterMixin
from .measure import Measurement
from .padding import Padding, PaddingDimensions
from .protocol import is_renderable
from .segment import Segment
from .style import Style, StyleType
from .text import Text, TextType
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
JustifyMethod,
OverflowMethod,
RenderableType,
RenderResult,
)
@dataclass
class Column:
"""Defines a column in a table."""
header: "RenderableType" = ""
"""RenderableType: Renderable for the header (typically a string)"""
footer: "RenderableType" = ""
"""RenderableType: Renderable for the footer (typically a string)"""
header_style: StyleType = ""
"""StyleType: The style of the header."""
footer_style: StyleType = ""
"""StyleType: The style of the footer."""
style: StyleType = ""
"""StyleType: The style of the column."""
justify: "JustifyMethod" = "left"
"""str: How to justify text within the column ("left", "center", "right", or "full")"""
overflow: "OverflowMethod" = "ellipsis"
"""str: Overflow method."""
width: Optional[int] = None
"""Optional[int]: Width of the column, or ``None`` (default) to auto calculate width."""
min_width: Optional[int] = None
"""Optional[int]: Minimum width of column, or ``None`` for no minimum. Defaults to None."""
max_width: Optional[int] = None
"""Optional[int]: Maximum width of column, or ``None`` for no maximum. Defaults to None."""
ratio: Optional[int] = None
"""Optional[int]: Ratio to use when calculating column width, or ``None`` (default) to adapt to column contents."""
no_wrap: bool = False
"""bool: Prevent wrapping of text within the column. Defaults to ``False``."""
_index: int = 0
"""Index of column."""
_cells: List["RenderableType"] = field(default_factory=list)
def copy(self) -> "Column":
"""Return a copy of this Column."""
return replace(self, _cells=[])
@property
def cells(self) -> Iterable["RenderableType"]:
"""Get all cells in the column, not including header."""
yield from self._cells
@property
def flexible(self) -> bool:
"""Check if this column is flexible."""
return self.ratio is not None
@dataclass
class Row:
"""Information regarding a row."""
style: Optional[StyleType] = None
"""Style to apply to row."""
end_section: bool = False
"""Indicated end of section, which will force a line beneath the row."""
class _Cell(NamedTuple):
"""A single cell in a table."""
style: StyleType
"""Style to apply to cell."""
renderable: "RenderableType"
"""Cell renderable."""
class Table(JupyterMixin):
"""A console renderable to draw a table.
Args:
*headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
title (Union[str, Text], optional): The title of the table rendered at the top. Defaults to None.
caption (Union[str, Text], optional): The table caption rendered below. Defaults to None.
width (int, optional): The width in characters of the table, or ``None`` to automatically fit. Defaults to None.
min_width (Optional[int], optional): The minimum width of the table, or ``None`` for no minimum. Defaults to None.
box (box.Box, optional): One of the constants in box.py used to draw the edges (see :ref:`appendix_box`), or ``None`` for no box lines. Defaults to box.HEAVY_HEAD.
safe_box (Optional[bool], optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
padding (PaddingDimensions, optional): Padding for cells (top, right, bottom, left). Defaults to (0, 1).
collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to False.
pad_edge (bool, optional): Enable padding of edge cells. Defaults to True.
expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
show_header (bool, optional): Show a header row. Defaults to True.
show_footer (bool, optional): Show a footer row. Defaults to False.
show_edge (bool, optional): Draw a box around the outside of the table. Defaults to True.
show_lines (bool, optional): Draw lines between every row. Defaults to False.
leading (bool, optional): Number of blank lines between rows (precludes ``show_lines``). Defaults to 0.
style (Union[str, Style], optional): Default style for the table. Defaults to "none".
row_styles (List[Union, str], optional): Optional list of row styles, if more than one style is given then the styles will alternate. Defaults to None.
header_style (Union[str, Style], optional): Style of the header. Defaults to "table.header".
footer_style (Union[str, Style], optional): Style of the footer. Defaults to "table.footer".
border_style (Union[str, Style], optional): Style of the border. Defaults to None.
title_style (Union[str, Style], optional): Style of the title. Defaults to None.
caption_style (Union[str, Style], optional): Style of the caption. Defaults to None.
title_justify (str, optional): Justify method for title. Defaults to "center".
caption_justify (str, optional): Justify method for caption. Defaults to "center".
highlight (bool, optional): Highlight cell contents (if str). Defaults to False.
"""
columns: List[Column]
rows: List[Row]
def __init__(
self,
*headers: Union[Column, str],
title: Optional[TextType] = None,
caption: Optional[TextType] = None,
width: Optional[int] = None,
min_width: Optional[int] = None,
box: Optional[box.Box] = box.HEAVY_HEAD,
safe_box: Optional[bool] = None,
padding: PaddingDimensions = (0, 1),
collapse_padding: bool = False,
pad_edge: bool = True,
expand: bool = False,
show_header: bool = True,
show_footer: bool = False,
show_edge: bool = True,
show_lines: bool = False,
leading: int = 0,
style: StyleType = "none",
row_styles: Optional[Iterable[StyleType]] = None,
header_style: Optional[StyleType] = "table.header",
footer_style: Optional[StyleType] = "table.footer",
border_style: Optional[StyleType] = None,
title_style: Optional[StyleType] = None,
caption_style: Optional[StyleType] = None,
title_justify: "JustifyMethod" = "center",
caption_justify: "JustifyMethod" = "center",
highlight: bool = False,
) -> None:
self.columns: List[Column] = []
self.rows: List[Row] = []
self.title = title
self.caption = caption
self.width = width
self.min_width = min_width
self.box = box
self.safe_box = safe_box
self._padding = Padding.unpack(padding)
self.pad_edge = pad_edge
self._expand = expand
self.show_header = show_header
self.show_footer = show_footer
self.show_edge = show_edge
self.show_lines = show_lines
self.leading = leading
self.collapse_padding = collapse_padding
self.style = style
self.header_style = header_style or ""
self.footer_style = footer_style or ""
self.border_style = border_style
self.title_style = title_style
self.caption_style = caption_style
self.title_justify: "JustifyMethod" = title_justify
self.caption_justify: "JustifyMethod" = caption_justify
self.highlight = highlight
self.row_styles: Sequence[StyleType] = list(row_styles or [])
append_column = self.columns.append
for header in headers:
if isinstance(header, str):
self.add_column(header=header)
else:
header._index = len(self.columns)
append_column(header)
@classmethod
def grid(
cls,
*headers: Union[Column, str],
padding: PaddingDimensions = 0,
collapse_padding: bool = True,
pad_edge: bool = False,
expand: bool = False,
) -> "Table":
"""Get a table with no lines, headers, or footer.
Args:
*headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
padding (PaddingDimensions, optional): Get padding around cells. Defaults to 0.
collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to True.
pad_edge (bool, optional): Enable padding around edges of table. Defaults to False.
expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
Returns:
Table: A table instance.
"""
return cls(
*headers,
box=None,
padding=padding,
collapse_padding=collapse_padding,
show_header=False,
show_footer=False,
show_edge=False,
pad_edge=pad_edge,
expand=expand,
)
@property
def expand(self) -> bool:
"""Setting a non-None self.width implies expand."""
return self._expand or self.width is not None
@expand.setter
def expand(self, expand: bool) -> None:
"""Set expand."""
self._expand = expand
@property
def _extra_width(self) -> int:
"""Get extra width to add to cell content."""
width = 0
if self.box and self.show_edge:
width += 2
if self.box:
width += len(self.columns) - 1
return width
@property
def row_count(self) -> int:
"""Get the current number of rows."""
return len(self.rows)
def get_row_style(self, console: "Console", index: int) -> StyleType:
"""Get the current row style."""
style = Style.null()
if self.row_styles:
style += console.get_style(self.row_styles[index % len(self.row_styles)])
row_style = self.rows[index].style
if row_style is not None:
style += console.get_style(row_style)
return style
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
max_width = options.max_width
if self.width is not None:
max_width = self.width
if max_width < 0:
return Measurement(0, 0)
extra_width = self._extra_width
max_width = sum(
self._calculate_column_widths(
console, options.update_width(max_width - extra_width)
)
)
_measure_column = self._measure_column
measurements = [
_measure_column(console, options.update_width(max_width), column)
for column in self.columns
]
minimum_width = (
sum(measurement.minimum for measurement in measurements) + extra_width
)
maximum_width = (
sum(measurement.maximum for measurement in measurements) + extra_width
if (self.width is None)
else self.width
)
measurement = Measurement(minimum_width, maximum_width)
measurement = measurement.clamp(self.min_width)
return measurement
@property
def padding(self) -> Tuple[int, int, int, int]:
"""Get cell padding."""
return self._padding
@padding.setter
def padding(self, padding: PaddingDimensions) -> "Table":
"""Set cell padding."""
self._padding = Padding.unpack(padding)
return self
def add_column(
self,
header: "RenderableType" = "",
footer: "RenderableType" = "",
*,
header_style: Optional[StyleType] = None,
footer_style: Optional[StyleType] = None,
style: Optional[StyleType] = None,
justify: "JustifyMethod" = "left",
overflow: "OverflowMethod" = "ellipsis",
width: Optional[int] = None,
min_width: Optional[int] = None,
max_width: Optional[int] = None,
ratio: Optional[int] = None,
no_wrap: bool = False,
) -> None:
"""Add a column to the table.
Args:
header (RenderableType, optional): Text or renderable for the header.
Defaults to "".
footer (RenderableType, optional): Text or renderable for the footer.
Defaults to "".
header_style (Union[str, Style], optional): Style for the header, or None for default. Defaults to None.
footer_style (Union[str, Style], optional): Style for the footer, or None for default. Defaults to None.
style (Union[str, Style], optional): Style for the column cells, or None for default. Defaults to None.
justify (JustifyMethod, optional): Alignment for cells. Defaults to "left".
overflow (OverflowMethod): Overflow method: "crop", "fold", "ellipsis". Defaults to "ellipsis".
width (int, optional): Desired width of column in characters, or None to fit to contents. Defaults to None.
min_width (Optional[int], optional): Minimum width of column, or ``None`` for no minimum. Defaults to None.
max_width (Optional[int], optional): Maximum width of column, or ``None`` for no maximum. Defaults to None.
ratio (int, optional): Flexible ratio for the column (requires ``Table.expand`` or ``Table.width``). Defaults to None.
no_wrap (bool, optional): Set to ``True`` to disable wrapping of this column.
"""
column = Column(
_index=len(self.columns),
header=header,
footer=footer,
header_style=header_style or "",
footer_style=footer_style or "",
style=style or "",
justify=justify,
overflow=overflow,
width=width,
min_width=min_width,
max_width=max_width,
ratio=ratio,
no_wrap=no_wrap,
)
self.columns.append(column)
def add_row(
self,
*renderables: Optional["RenderableType"],
style: Optional[StyleType] = None,
end_section: bool = False,
) -> None:
"""Add a row of renderables.
Args:
*renderables (None or renderable): Each cell in a row must be a renderable object (including str),
or ``None`` for a blank cell.
style (StyleType, optional): An optional style to apply to the entire row. Defaults to None.
end_section (bool, optional): End a section and draw a line. Defaults to False.
Raises:
errors.NotRenderableError: If you add something that can't be rendered.
"""
def add_cell(column: Column, renderable: "RenderableType") -> None:
column._cells.append(renderable)
cell_renderables: List[Optional["RenderableType"]] = list(renderables)
columns = self.columns
if len(cell_renderables) < len(columns):
cell_renderables = [
*cell_renderables,
*[None] * (len(columns) - len(cell_renderables)),
]
for index, renderable in enumerate(cell_renderables):
if index == len(columns):
column = Column(_index=index)
for _ in self.rows:
add_cell(column, Text(""))
self.columns.append(column)
else:
column = columns[index]
if renderable is None:
add_cell(column, "")
elif is_renderable(renderable):
add_cell(column, renderable)
else:
raise errors.NotRenderableError(
f"unable to render {type(renderable).__name__}; a string or other renderable object is required"
)
self.rows.append(Row(style=style, end_section=end_section))
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if not self.columns:
yield Segment("\n")
return
max_width = options.max_width
if self.width is not None:
max_width = self.width
extra_width = self._extra_width
widths = self._calculate_column_widths(
console, options.update_width(max_width - extra_width)
)
table_width = sum(widths) + extra_width
render_options = options.update(
width=table_width, highlight=self.highlight, height=None
)
def render_annotation(
text: TextType, style: StyleType, justify: "JustifyMethod" = "center"
) -> "RenderResult":
render_text = (
console.render_str(text, style=style, highlight=False)
if isinstance(text, str)
else text
)
return console.render(
render_text, options=render_options.update(justify=justify)
)
if self.title:
yield from render_annotation(
self.title,
style=Style.pick_first(self.title_style, "table.title"),
justify=self.title_justify,
)
yield from self._render(console, render_options, widths)
if self.caption:
yield from render_annotation(
self.caption,
style=Style.pick_first(self.caption_style, "table.caption"),
justify=self.caption_justify,
)
def _calculate_column_widths(
self, console: "Console", options: "ConsoleOptions"
) -> List[int]:
"""Calculate the widths of each column, including padding, not including borders."""
max_width = options.max_width
columns = self.columns
width_ranges = [
self._measure_column(console, options, column) for column in columns
]
widths = [_range.maximum or 1 for _range in width_ranges]
get_padding_width = self._get_padding_width
extra_width = self._extra_width
if self.expand:
ratios = [col.ratio or 0 for col in columns if col.flexible]
if any(ratios):
fixed_widths = [
0 if column.flexible else _range.maximum
for _range, column in zip(width_ranges, columns)
]
flex_minimum = [
(column.width or 1) + get_padding_width(column._index)
for column in columns
if column.flexible
]
flexible_width = max_width - sum(fixed_widths)
flex_widths = ratio_distribute(flexible_width, ratios, flex_minimum)
iter_flex_widths = iter(flex_widths)
for index, column in enumerate(columns):
if column.flexible:
widths[index] = fixed_widths[index] + next(iter_flex_widths)
table_width = sum(widths)
if table_width > max_width:
widths = self._collapse_widths(
widths,
[(column.width is None and not column.no_wrap) for column in columns],
max_width,
)
table_width = sum(widths)
# last resort, reduce columns evenly
if table_width > max_width:
excess_width = table_width - max_width
widths = ratio_reduce(excess_width, [1] * len(widths), widths, widths)
table_width = sum(widths)
width_ranges = [
self._measure_column(console, options.update_width(width), column)
for width, column in zip(widths, columns)
]
widths = [_range.maximum or 0 for _range in width_ranges]
if (table_width < max_width and self.expand) or (
self.min_width is not None and table_width < (self.min_width - extra_width)
):
_max_width = (
max_width
if self.min_width is None
else min(self.min_width - extra_width, max_width)
)
pad_widths = ratio_distribute(_max_width - table_width, widths)
widths = [_width + pad for _width, pad in zip(widths, pad_widths)]
return widths
@classmethod
def _collapse_widths(
cls, widths: List[int], wrapable: List[bool], max_width: int
) -> List[int]:
"""Reduce widths so that the total is under max_width.
Args:
widths (List[int]): List of widths.
wrapable (List[bool]): List of booleans that indicate if a column may shrink.
max_width (int): Maximum width to reduce to.
Returns:
List[int]: A new list of widths.
"""
total_width = sum(widths)
excess_width = total_width - max_width
if any(wrapable):
while total_width and excess_width > 0:
max_column = max(
width for width, allow_wrap in zip(widths, wrapable) if allow_wrap
)
second_max_column = max(
width if allow_wrap and width != max_column else 0
for width, allow_wrap in zip(widths, wrapable)
)
column_difference = max_column - second_max_column
ratios = [
(1 if (width == max_column and allow_wrap) else 0)
for width, allow_wrap in zip(widths, wrapable)
]
if not any(ratios) or not column_difference:
break
max_reduce = [min(excess_width, column_difference)] * len(widths)
widths = ratio_reduce(excess_width, ratios, max_reduce, widths)
total_width = sum(widths)
excess_width = total_width - max_width
return widths
def _get_cells(
self, console: "Console", column_index: int, column: Column
) -> Iterable[_Cell]:
"""Get all the cells with padding and optional header."""
collapse_padding = self.collapse_padding
pad_edge = self.pad_edge
padding = self.padding
any_padding = any(padding)
first_column = column_index == 0
last_column = column_index == len(self.columns) - 1
_padding_cache: Dict[Tuple[bool, bool], Tuple[int, int, int, int]] = {}
def get_padding(first_row: bool, last_row: bool) -> Tuple[int, int, int, int]:
cached = _padding_cache.get((first_row, last_row))
if cached:
return cached
top, right, bottom, left = padding
if collapse_padding:
if not first_column:
left = max(0, left - right)
if not last_row:
bottom = max(0, top - bottom)
if not pad_edge:
if first_column:
left = 0
if last_column:
right = 0
if first_row:
top = 0
if last_row:
bottom = 0
_padding = (top, right, bottom, left)
_padding_cache[(first_row, last_row)] = _padding
return _padding
raw_cells: List[Tuple[StyleType, "RenderableType"]] = []
_append = raw_cells.append
get_style = console.get_style
if self.show_header:
header_style = get_style(self.header_style or "") + get_style(
column.header_style
)
_append((header_style, column.header))
cell_style = get_style(column.style or "")
for cell in column.cells:
_append((cell_style, cell))
if self.show_footer:
footer_style = get_style(self.footer_style or "") + get_style(
column.footer_style
)
_append((footer_style, column.footer))
if any_padding:
_Padding = Padding
for first, last, (style, renderable) in loop_first_last(raw_cells):
yield _Cell(style, _Padding(renderable, get_padding(first, last)))
else:
for (style, renderable) in raw_cells:
yield _Cell(style, renderable)
def _get_padding_width(self, column_index: int) -> int:
"""Get extra width from padding."""
_, pad_right, _, pad_left = self.padding
if self.collapse_padding:
if column_index > 0:
pad_left = max(0, pad_left - pad_right)
return pad_left + pad_right
def _measure_column(
self,
console: "Console",
options: "ConsoleOptions",
column: Column,
) -> Measurement:
"""Get the minimum and maximum width of the column."""
max_width = options.max_width
if max_width < 1:
return Measurement(0, 0)
padding_width = self._get_padding_width(column._index)
if column.width is not None:
# Fixed width column
return Measurement(
column.width + padding_width, column.width + padding_width
).with_maximum(max_width)
# Flexible column, we need to measure contents
min_widths: List[int] = []
max_widths: List[int] = []
append_min = min_widths.append
append_max = max_widths.append
get_render_width = Measurement.get
for cell in self._get_cells(console, column._index, column):
_min, _max = get_render_width(console, options, cell.renderable)
append_min(_min)
append_max(_max)
measurement = Measurement(
max(min_widths) if min_widths else 1,
max(max_widths) if max_widths else max_width,
).with_maximum(max_width)
measurement = measurement.clamp(
None if column.min_width is None else column.min_width + padding_width,
None if column.max_width is None else column.max_width + padding_width,
)
return measurement
def _render(
self, console: "Console", options: "ConsoleOptions", widths: List[int]
) -> "RenderResult":
table_style = console.get_style(self.style or "")
border_style = table_style + console.get_style(self.border_style or "")
_column_cells = (
self._get_cells(console, column_index, column)
for column_index, column in enumerate(self.columns)
)
row_cells: List[Tuple[_Cell, ...]] = list(zip(*_column_cells))
_box = (
self.box.substitute(
options, safe=pick_bool(self.safe_box, console.safe_box)
)
if self.box
else None
)
# _box = self.box
new_line = Segment.line()
columns = self.columns
show_header = self.show_header
show_footer = self.show_footer
show_edge = self.show_edge
show_lines = self.show_lines
leading = self.leading
_Segment = Segment
if _box:
box_segments = [
(
_Segment(_box.head_left, border_style),
_Segment(_box.head_right, border_style),
_Segment(_box.head_vertical, border_style),
),
(
_Segment(_box.foot_left, border_style),
_Segment(_box.foot_right, border_style),
_Segment(_box.foot_vertical, border_style),
),
(
_Segment(_box.mid_left, border_style),
_Segment(_box.mid_right, border_style),
_Segment(_box.mid_vertical, border_style),
),
]
if show_edge:
yield _Segment(_box.get_top(widths), border_style)
yield new_line
else:
box_segments = []
get_row_style = self.get_row_style
get_style = console.get_style
for index, (first, last, row_cell) in enumerate(loop_first_last(row_cells)):
header_row = first and show_header
footer_row = last and show_footer
row = (
self.rows[index - show_header]
if (not header_row and not footer_row)
else None
)
max_height = 1
cells: List[List[List[Segment]]] = []
if header_row or footer_row:
row_style = Style.null()
else:
row_style = get_style(
get_row_style(console, index - 1 if show_header else index)
)
for width, cell, column in zip(widths, row_cell, columns):
render_options = options.update(
width=width,
justify=column.justify,
no_wrap=column.no_wrap,
overflow=column.overflow,
height=None,
)
cell_style = table_style + row_style + get_style(cell.style)
lines = console.render_lines(
cell.renderable, render_options, style=cell_style
)
max_height = max(max_height, len(lines))
cells.append(lines)
cells[:] = [
_Segment.set_shape(
_cell, width, max_height, style=table_style + row_style
)
for width, _cell in zip(widths, cells)
]
if _box:
if last and show_footer:
yield _Segment(
_box.get_row(widths, "foot", edge=show_edge), border_style
)
yield new_line
left, right, _divider = box_segments[0 if first else (2 if last else 1)]
# If the column divider is whitespace also style it with the row background
divider = (
_divider
if _divider.text.strip()
else _Segment(
_divider.text, row_style.background_style + _divider.style
)
)
for line_no in range(max_height):
if show_edge:
yield left
for last_cell, rendered_cell in loop_last(cells):
yield from rendered_cell[line_no]
if not last_cell:
yield divider
if show_edge:
yield right
yield new_line
else:
for line_no in range(max_height):
for rendered_cell in cells:
yield from rendered_cell[line_no]
yield new_line
if _box and first and show_header:
yield _Segment(
_box.get_row(widths, "head", edge=show_edge), border_style
)
yield new_line
end_section = row and row.end_section
if _box and (show_lines or leading or end_section):
if (
not last
and not (show_footer and index >= len(row_cells) - 2)
and not (show_header and header_row)
):
if leading:
yield _Segment(
_box.get_row(widths, "mid", edge=show_edge) * leading,
border_style,
)
else:
yield _Segment(
_box.get_row(widths, "row", edge=show_edge), border_style
)
yield new_line
if _box and show_edge:
yield _Segment(_box.get_bottom(widths), border_style)
yield new_line
if __name__ == "__main__": # pragma: no cover
from rich.console import Console
from rich.highlighter import ReprHighlighter
from rich.table import Table as Table
from ._timer import timer
with timer("Table render"):
table = Table(
title="Star Wars Movies",
caption="Rich example table",
caption_justify="right",
)
table.add_column(
"Released", header_style="bright_cyan", style="cyan", no_wrap=True
)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row(
"Dec 20, 2019",
"Star Wars: The Rise of Skywalker",
"$952,110,690",
)
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row(
"Dec 15, 2017",
"Star Wars Ep. V111: The Last Jedi",
"$1,332,539,889",
style="on black",
end_section=True,
)
table.add_row(
"Dec 16, 2016",
"Rogue One: A Star Wars Story",
"$1,332,439,889",
)
def header(text: str) -> None:
console.print()
console.rule(highlight(text))
console.print()
console = Console()
highlight = ReprHighlighter()
header("Example Table")
console.print(table, justify="center")
table.expand = True
header("expand=True")
console.print(table)
table.width = 50
header("width=50")
console.print(table, justify="center")
table.width = None
table.expand = False
table.row_styles = ["dim", "none"]
header("row_styles=['dim', 'none']")
console.print(table, justify="center")
table.width = None
table.expand = False
table.row_styles = ["dim", "none"]
table.leading = 1
header("leading=1, row_styles=['dim', 'none']")
console.print(table, justify="center")
table.width = None
table.expand = False
table.row_styles = ["dim", "none"]
table.show_lines = True
table.leading = 0
header("show_lines=True, row_styles=['dim', 'none']")
console.print(table, justify="center")
| mit | e4e86d7471968b74e28943061793f7b6 | 36.935065 | 171 | 0.553863 | 4.225169 | false | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/transform/nonuniform_illumination.py | 1 | 1443 | # Correct for nonuniform illumination
import os
import cv2
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import rgb2gray
from plantcv.plantcv import gaussian_blur
from plantcv.plantcv.transform import rescale
from plantcv.plantcv._debug import _debug
def nonuniform_illumination(img, ksize):
"""Correct for non uniform illumination i.e. spotlight correction.
Inputs:
img = RGB or grayscale image data
ksize = (optional) new minimum value for range of interest. default = 0
Returns:
corrected_img = rescaled image
:param img: numpy.ndarray
:param ksize: int
:return corrected_img: numpy.ndarray
"""
if len(np.shape(img)) == 3:
img = rgb2gray(img)
# Fill foreground objects
kernel = np.ones((ksize, ksize), np.uint8)
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
# Store debug mode
debug = params.debug
params.debug = None
# Heavily blurred image acts like a background image
blurred_img = gaussian_blur(opening, ksize=(ksize, ksize))
img_mean = np.mean(blurred_img)
corrected_img = img - blurred_img + img_mean
corrected_img = rescale(gray_img=corrected_img, min_value=0, max_value=255)
# Reset debug mode
params.debug = debug
_debug(visual=corrected_img, filename=os.path.join(params.debug_outdir, str(params.device) + '_correct_illumination.png'))
return corrected_img
| mit | 916e9e099bf28835096ad46864362a22 | 28.44898 | 126 | 0.702703 | 3.452153 | false | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/analyze_thermal_values.py | 1 | 3672 | # Analyze signal data in Thermal image
import os
import numpy as np
from plantcv.plantcv import deprecation_warning, params
from plantcv.plantcv import outputs
from plantcv.plantcv._debug import _debug
from plantcv.plantcv.visualize import histogram
from plotnine import labs
def analyze_thermal_values(thermal_array, mask, histplot=None, label="default"):
"""This extracts the thermal values of each pixel writes the values out to
a file. It can also print out a histogram plot of pixel intensity
and a pseudocolor image of the plant.
Inputs:
array = numpy array of thermal values
mask = Binary mask made from selected contours
histplot = if True plots histogram of intensity values
label = optional label parameter, modifies the variable name of observations recorded
Returns:
analysis_image = output image
:param thermal_array: numpy.ndarray
:param mask: numpy.ndarray
:param histplot: bool
:param label: str
:return analysis_image: ggplot
"""
if histplot is not None:
deprecation_warning("'histplot' will be deprecated in a future version of PlantCV. "
"This function creates a histogram by default.")
# Store debug mode
debug = params.debug
# apply plant shaped mask to image and calculate statistics based on the masked image
masked_thermal = thermal_array[np.where(mask > 0)]
maxtemp = np.amax(masked_thermal)
mintemp = np.amin(masked_thermal)
avgtemp = np.average(masked_thermal)
mediantemp = np.median(masked_thermal)
# call the histogram function
params.debug = None
hist_fig, hist_data = histogram(thermal_array, mask=mask, hist_data=True)
bin_labels, hist_percent = hist_data['pixel intensity'].tolist(), hist_data['proportion of pixels (%)'].tolist()
# Store data into outputs class
outputs.add_observation(sample=label, variable='max_temp', trait='maximum temperature',
method='plantcv.plantcv.analyze_thermal_values', scale='degrees', datatype=float,
value=maxtemp, label='degrees')
outputs.add_observation(sample=label, variable='min_temp', trait='minimum temperature',
method='plantcv.plantcv.analyze_thermal_values', scale='degrees', datatype=float,
value=mintemp, label='degrees')
outputs.add_observation(sample=label, variable='mean_temp', trait='mean temperature',
method='plantcv.plantcv.analyze_thermal_values', scale='degrees', datatype=float,
value=avgtemp, label='degrees')
outputs.add_observation(sample=label, variable='median_temp', trait='median temperature',
method='plantcv.plantcv.analyze_thermal_values', scale='degrees', datatype=float,
value=mediantemp, label='degrees')
outputs.add_observation(sample=label, variable='thermal_frequencies', trait='thermal frequencies',
method='plantcv.plantcv.analyze_thermal_values', scale='frequency', datatype=list,
value=hist_percent, label=bin_labels)
# Restore user debug setting
params.debug = debug
# change column names of "hist_data"
hist_fig = hist_fig + labs(x="Temperature C", y="Proportion of pixels (%)")
# Print or plot histogram
_debug(visual=hist_fig, filename=os.path.join(params.debug_outdir, str(params.device) + "_therm_histogram.png"))
analysis_image = hist_fig
# Store images
outputs.images.append(analysis_image)
return analysis_image
| mit | 8415a0d11d83f851b1988be479d8dc7b | 44.9 | 116 | 0.668301 | 4.196571 | false | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/cluster_contour_mask.py | 1 | 2561 | # cluster objects and split into masks
def cluster_contour_mask(rgb_img, clusters_i, contours, hierarchies):
"""Outputs masks for the grouped clusters. Since there can be a variable number of
clusters/masks the output is a list of arrays.
Inputs:
rgb_img = RGB image data
clusters_i = clusters, output from cluster_contours function
contours = contours, contours from cluster_contours function
hierarchies = hierarchies, hierarchies from cluster_contours function
Returns:
output_masks = resulting masks
masked_images = masked_images
:param rgb_img: numpy.ndarray
:param clusters_i: numpy array
:param contours: numpy array
:param hierarchies: numpy array
:return output_masks: list of resulting masks
:return masked_images: list of masked images
"""
# WORK IN PROGRESS
# CURRENTLY DISCUSSING THE POSSIBILITY OF A LARGE RESTRUCTURING WHERE WE HIDE CONTOURS AND HIERARCHIES FROM
# USERS AND USE MASKS AND find_objects() INSIDE FUNCTIONS TO SIMPLIFY INPUTS AND OUTPUTS
# params.device += 1
#
# output_masks = []
# masked_images = []
#
# for y, x in enumerate(clusters_i):
# iy, ix, iz = np.shape(rgb_img)
# mask = np.zeros((iy, ix, 3), dtype=np.uint8)
# masked_img = np.copy(rgb_img)
# for a in x:
# if hierarchies[0][a][3] > -1:
# cv2.drawContours(mask, contours, a, (0, 0, 0), -1, lineType=8, hierarchy=hierarchies)
# else:
# cv2.drawContours(mask, contours, a, (255, 255, 255), -1, lineType=8, hierarchy=hierarchies)
#
# mask_binary = mask[:, :, 0]
# output_masks.append(mask_binary)
#
# if np.sum(mask_binary) == 0:
# pass
# else:
# retval, mask_binary = cv2.threshold(mask_binary, 254, 255, cv2.THRESH_BINARY)
# masked1 = apply_mask(masked_img, mask_binary, 'white')
# masked_images.append(masked1)
#
# if params.debug == 'print':
# print_image(masked1,
# os.path.join(params.debug_outdir, str(params.device) + '_clusters_' + str(y) + ".png"))
# print_image(mask_binary,
# os.path.join(params.debug_outdir, str(params.device) + '_clusters_mask_' + str(y) + ".png"))
# elif params.debug == 'plot':
# plot_image(masked1)
# plot_image(mask_binary, cmap='gray')
#
# return output_masks, masked_images
| mit | 3bcb41c2206b2abe598cbc345f94cd3c | 39.015625 | 122 | 0.591566 | 3.446837 | false | false | false | false |
danforthcenter/plantcv | tests/parallel/test_process_results.py | 1 | 1903 | import pytest
import os
from plantcv.parallel import process_results
def test_process_results(parallel_test_data, tmpdir):
"""Test for PlantCV."""
# Create a test tmp directory and results file
result_file = tmpdir.mkdir("sub").join("appended_results.json")
# Run twice to create appended results
process_results(job_dir=parallel_test_data.parallel_results_dir, json_file=result_file)
process_results(job_dir=parallel_test_data.parallel_results_dir, json_file=result_file)
# Assert that the output JSON file matches the expected output JSON file
results = parallel_test_data.load_json(json_file=result_file)
expected = parallel_test_data.appended_results()
assert results == expected
def test_process_results_new_output(parallel_test_data, tmpdir):
"""Test for PlantCV."""
# Create a test tmp directory and results file
result_file = tmpdir.mkdir("sub").join("new_result.json")
process_results(job_dir=parallel_test_data.parallel_results_dir, json_file=result_file)
# Assert output matches expected values
results = parallel_test_data.new_results()
expected = parallel_test_data.load_json(json_file=result_file)
assert results == expected
def test_process_results_valid_json(parallel_test_data):
"""Test for PlantCV."""
# Test when the file is a valid json file but doesn't contain expected keys
with pytest.raises(RuntimeError):
process_results(job_dir=parallel_test_data.parallel_results_dir, json_file=parallel_test_data.valid_json_file)
def test_process_results_invalid_json(tmpdir):
"""Test for PlantCV."""
# Create a test tmp directory and invalid results file
result_file = tmpdir.mkdir("bad_results").join("invalid.txt")
result_file.write("Invalid")
with pytest.raises(RuntimeError):
process_results(job_dir=os.path.split(str(result_file))[0], json_file=result_file)
| mit | 038e59758c32ad2ea394314485b63b5e | 43.255814 | 118 | 0.729375 | 3.695146 | false | true | false | false |
danforthcenter/plantcv | plantcv/plantcv/morphology/find_branch_pts.py | 1 | 3836 | # Find branch points from skeleton image
import os
import cv2
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import dilate
from plantcv.plantcv import outputs
from plantcv.plantcv import find_objects
from plantcv.plantcv._debug import _debug
def find_branch_pts(skel_img, mask=None, label="default"):
"""Find branch points in a skeletonized image.
The branching algorithm was inspired by Jean-Patrick Pommier: https://gist.github.com/jeanpat/5712699
Inputs:
skel_img = Skeletonized image
mask = (Optional) binary mask for debugging. If provided, debug image will be overlaid on the mask.
label = optional label parameter, modifies the variable name of observations recorded
Returns:
branch_pts_img = Image with just branch points, rest 0
:param skel_img: numpy.ndarray
:param mask: np.ndarray
:param label: str
:return branch_pts_img: numpy.ndarray
"""
# In a kernel: 1 values line up with 255s, -1s line up with 0s, and 0s correspond to don't care
# T like branch points
t1 = np.array([[-1, 1, -1],
[1, 1, 1],
[-1, -1, -1]])
t2 = np.array([[1, -1, 1],
[-1, 1, -1],
[1, -1, -1]])
t3 = np.rot90(t1)
t4 = np.rot90(t2)
t5 = np.rot90(t3)
t6 = np.rot90(t4)
t7 = np.rot90(t5)
t8 = np.rot90(t6)
# Y like branch points
y1 = np.array([[1, -1, 1],
[0, 1, 0],
[0, 1, 0]])
y2 = np.array([[-1, 1, -1],
[1, 1, 0],
[-1, 0, 1]])
y3 = np.rot90(y1)
y4 = np.rot90(y2)
y5 = np.rot90(y3)
y6 = np.rot90(y4)
y7 = np.rot90(y5)
y8 = np.rot90(y6)
kernels = [t1, t2, t3, t4, t5, t6, t7, t8, y1, y2, y3, y4, y5, y6, y7, y8]
branch_pts_img = np.zeros(skel_img.shape[:2], dtype=int)
# Store branch points
for kernel in kernels:
branch_pts_img = np.logical_or(cv2.morphologyEx(skel_img, op=cv2.MORPH_HITMISS, kernel=kernel,
borderType=cv2.BORDER_CONSTANT, borderValue=0), branch_pts_img)
# Switch type to uint8 rather than bool
branch_pts_img = branch_pts_img.astype(np.uint8) * 255
# Store debug
debug = params.debug
params.debug = None
# Make debugging image
if mask is None:
dilated_skel = dilate(skel_img, params.line_thickness, 1)
branch_plot = cv2.cvtColor(dilated_skel, cv2.COLOR_GRAY2RGB)
else:
# Make debugging image on mask
mask_copy = mask.copy()
branch_plot = cv2.cvtColor(mask_copy, cv2.COLOR_GRAY2RGB)
skel_obj, skel_hier = find_objects(skel_img, skel_img)
cv2.drawContours(branch_plot, skel_obj, -1, (150, 150, 150), params.line_thickness, lineType=8,
hierarchy=skel_hier)
branch_objects, _ = find_objects(branch_pts_img, branch_pts_img)
# Initialize list of tip data points
branch_list = []
branch_labels = []
for i, branch in enumerate(branch_objects):
x, y = branch.ravel()[:2]
coord = (int(x), int(y))
branch_list.append(coord)
branch_labels.append(i)
cv2.circle(branch_plot, (x, y), params.line_thickness, (255, 0, 255), -1)
outputs.add_observation(sample=label, variable='branch_pts',
trait='list of branch-point coordinates identified from a skeleton',
method='plantcv.plantcv.morphology.find_branch_pts', scale='pixels', datatype=list,
value=branch_list, label=branch_labels)
# Reset debug mode
params.debug = debug
_debug(visual=branch_plot, filename=os.path.join(params.debug_outdir, f"{params.device}_branch_pts.png"))
return branch_pts_img
| mit | 6bdd9206b1a385b940476b1c92bef827 | 34.518519 | 119 | 0.593587 | 3.178128 | false | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/plot_image.py | 1 | 1243 | # Plot image to screen
import cv2
import numpy
import matplotlib
from plantcv.plantcv import fatal_error, params
from matplotlib import pyplot as plt
def plot_image(img, cmap=None):
"""Plot an image to the screen.
:param img: numpy.ndarray
:param cmap: str
:return:
"""
image_type = type(img)
dimensions = numpy.shape(img)
if image_type == numpy.ndarray:
matplotlib.rcParams['figure.dpi'] = params.dpi
# If the image is color then OpenCV stores it as BGR, we plot it as RGB
if len(dimensions) == 3:
plt.figure()
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
elif cmap is None and len(dimensions) == 2:
plt.figure()
plt.imshow(img, cmap="gray")
plt.show()
elif cmap is not None and len(dimensions) == 2:
plt.figure()
plt.imshow(img, cmap=cmap)
plt.show()
elif image_type == matplotlib.figure.Figure:
fatal_error("Error, matplotlib Figure not supported. Instead try running without plot_image.")
# Plot if the image is a plotnine ggplot image
elif str(image_type) == "<class 'plotnine.ggplot.ggplot'>":
print(img)
| mit | 495a48364752a753da5940360b53ae23 | 27.906977 | 102 | 0.612228 | 3.789634 | false | false | false | false |
danforthcenter/plantcv | plantcv/learn/naive_bayes.py | 1 | 9072 | # Naive Bayes
import os
import cv2
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
def naive_bayes(imgdir, maskdir, outfile, mkplots=False):
"""Naive Bayes training function
Inputs:
imgdir = Path to a directory of original 8-bit RGB images.
maskdir = Path to a directory of binary mask images. Mask images must have the same name as their corresponding
color images.
outfile = Name of the output text file that will store the color channel probability density functions.
mkplots = Make PDF plots (True or False).
:param imgdir: str
:param maskdir: str
:param outfile: str
:param mkplots: bool
"""
# Initialize color channel ndarrays for plant (foreground) and background
plant = {"hue": np.array([], dtype=np.uint8), "saturation": np.array([], dtype=np.uint8),
"value": np.array([], dtype=np.uint8)}
background = {"hue": np.array([], dtype=np.uint8), "saturation": np.array([], dtype=np.uint8),
"value": np.array([], dtype=np.uint8)}
# Walk through the image directory
print("Reading images...")
for (dirpath, dirnames, filenames) in os.walk(imgdir):
for filename in filenames:
# Is this an image type we can work with?
if filename[-3:] in ['png', 'jpg', 'jpeg']:
# Does the mask exist?
if os.path.exists(os.path.join(maskdir, filename)):
# Read the image as BGR
img = cv2.imread(os.path.join(dirpath, filename), 1)
# Read the mask as grayscale
mask = cv2.imread(os.path.join(maskdir, filename), 0)
# Convert the image to HSV and split into component channels
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hue, saturation, value = cv2.split(hsv)
# Store channels in a dictionary
channels = {"hue": hue, "saturation": saturation, "value": value}
# Split channels into plant and non-plant signal
for channel in channels:
fg, bg = _split_plant_background_signal(channels[channel], mask)
# Randomly sample from the plant class (sample 10% of the pixels)
fg = fg[np.random.randint(0, len(fg) - 1, int(len(fg) / 10))]
# Randomly sample from the background class the same n as the plant class
bg = bg[np.random.randint(0, len(bg) - 1, len(fg))]
plant[channel] = np.append(plant[channel], fg)
background[channel] = np.append(background[channel], bg)
# Calculate a probability density function for each channel using a Gaussian kernel density estimator
# Create an output file for the PDFs
with open(outfile, "w") as out:
out.write("class\tchannel\t" + "\t".join(map(str, range(0, 256))) + "\n")
for channel in plant:
print("Calculating PDF for the " + channel + " channel...")
plant_kde = stats.gaussian_kde(plant[channel])
bg_kde = stats.gaussian_kde(background[channel])
# Calculate p from the PDFs for each 8-bit intensity value and save to outfile
plant_pdf = plant_kde(range(0, 256))
out.write("plant\t" + channel + "\t" + "\t".join(map(str, plant_pdf)) + "\n")
bg_pdf = bg_kde(range(0, 256))
out.write("background\t" + channel + "\t" + "\t".join(map(str, bg_pdf)) + "\n")
if mkplots:
# If mkplots is True, make the PDF charts
_plot_pdf(channel, os.path.dirname(outfile), plant=plant_pdf, background=bg_pdf)
def naive_bayes_multiclass(samples_file, outfile, mkplots=False):
"""Naive Bayes training function for two or more classes from sampled pixel RGB values.
Inputs:
samples_file = Input text file containing sampled pixel RGB values for each training class. The file should be a
tab-delimited table with one training class per column. The required first row must contain header
labels for each class. The row values for each class must be comma-delimited RGB values.
You must have at least 2 classes. See the file plantcv/tests/data/sampled_rgb_points.txt for
an example.
outfile = Name of the output text file that will store the color channel probability density functions.
mkplots = Make PDF plots (True or False).
:param samples_file: str
:param outfile: str
:param mkplots: bool
"""
# Initialize a dictionary to store sampled RGB pixel values for each input class
sample_points = {}
# Open the sampled points text file
with open(samples_file, "r") as f:
# Read the first line and use the column headers as class labels
header = f.readline()
header = header.rstrip("\n")
class_list = header.split("\t")
# Initialize a dictionary for the red, green, and blue channels for each class
for cls in class_list:
sample_points[cls] = {"red": [], "green": [], "blue": []}
# Loop over the rest of the data in the input file
for row in f:
# Remove newlines and quotes
row = row.rstrip("\n")
row = row.replace('"', '')
# If this is not a blank line, parse the data
if len(row) > 0:
# Split the row into a list of points per class
points = row.split("\t")
# For each point per class
for i, point in enumerate(points):
if len(point) > 0:
# Split the point into red, green, and blue integer values
red, green, blue = map(int, point.split(","))
# Append each intensity value into the appropriate class list
sample_points[class_list[i]]["red"].append(red)
sample_points[class_list[i]]["green"].append(green)
sample_points[class_list[i]]["blue"].append(blue)
# Initialize a dictionary to store probability density functions per color channel in HSV colorspace
pdfs = {"hue": {}, "saturation": {}, "value": {}}
# For each class
for cls in class_list:
# Create a blue, green, red-formatted image ndarray with the class RGB values
bgr_img = cv2.merge((np.asarray(sample_points[cls]["blue"], dtype=np.uint8),
np.asarray(sample_points[cls]["green"], dtype=np.uint8),
np.asarray(sample_points[cls]["red"], dtype=np.uint8)))
# Convert the BGR ndarray to an HSV ndarray
hsv_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2HSV)
# Split the HSV ndarray into the component HSV channels
hue, saturation, value = cv2.split(hsv_img)
# Create an HSV channel dictionary that stores the channels as lists (horizontally stacked ndarrays)
channels = {"hue": np.hstack(hue), "saturation": np.hstack(saturation), "value": np.hstack(value)}
# For each channel
for channel in channels:
# Create a kernel density estimator for the channel values (Gaussian kernel)
kde = stats.gaussian_kde(channels[channel])
# Use the KDE to calculate a probability density function for the channel
# Sample at each of the possible 8-bit values
pdfs[channel][cls] = kde(range(0, 256))
if mkplots:
# If mkplots is True, generate a density curve plot per channel for each class
for channel, cls in pdfs.items():
_plot_pdf(channel, os.path.dirname(outfile), **cls)
# Write the PDFs to a text file
out = open(outfile, "w")
# Write the column labels
out.write("class\tchannel\t" + "\t".join(map(str, range(0, 256))) + "\n")
# For each channel
for channel, cls in pdfs.items():
# For each class
for class_name, pdf in cls.items():
# Each row is the PDF for the given class and color channel
out.write(class_name + "\t" + channel + "\t" + "\t".join(map(str, pdf)) + "\n")
def _split_plant_background_signal(channel, mask):
"""Split a single-channel image by foreground and background using a mask
:param channel: ndarray
:param mask: ndarray
:return plant: ndarray
:return background: ndarray
"""
plant = channel[np.where(mask == 255)]
background = channel[np.where(mask == 0)]
return plant, background
def _plot_pdf(channel, outdir, **kwargs):
"""Plot the probability density function of one or more classes for the given channel
:param channel: str
:param outdir: str
:param kwargs: dict
"""
for class_name, pdf in kwargs.items():
plt.plot(pdf, label=class_name)
plt.legend(loc="best")
plt.savefig(os.path.join(outdir, str(channel) + "_pdf.svg"))
plt.close()
| mit | 5be86ec76611b8fd13cb483f93788fb9 | 47.255319 | 117 | 0.597663 | 4.071813 | false | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/image_fusion.py | 1 | 3547 | # Fuse two images
import os
import numpy as np
from skimage import img_as_ubyte
from plantcv.plantcv import fatal_error
from plantcv.plantcv import Spectral_data
from plantcv.plantcv import params
from plantcv.plantcv.hyperspectral.read_data import _make_pseudo_rgb
from plantcv.plantcv._debug import _debug
def image_fusion(img1, img2, wvs1, wvs2, array_type="multispectral"):
"""Fuse two images of the same size together to create a multispectral image
img1: 1st image to be fused
img2: 2nd image to be fused
wvs1: list of wavelengths represent bands in img1
wvs2: list of wavelengths represent bands in img2
array_type: (optional) description of the fused array
:param img1: numpy.ndarray
:param img2: numpy.ndarray
:param wvs1: list
:param wvs2: list
:param array_type: str
:return fused_array: plantcv.Spectral_data
"""
# If the image is 2D, expand to 3D to make stackable
img1 = _expand_img_dims(img1)
r1, c1, _ = img1.shape
# If the image is 2D, expand to 3D to make stackable
img2 = _expand_img_dims(img2)
r2, c2, _ = img2.shape
# Fatal error if images are not the same spatial dimensions
if (r1, c1) != (r2, c2):
fatal_error("Input images should have the same image size")
# If the images are not the same data type, convert to 8-bit unsigned integer
if img1.dtype != img2.dtype:
img1 = img_as_ubyte(img1)
img2 = img_as_ubyte(img2)
# Concatenate the images on the depth/spectral (z) axis
array_data = np.concatenate((img1, img2), axis=2)
# sort all wavelengths
wavelengths = np.array(wvs1 + wvs2)
ind = np.argsort(wavelengths)
wavelengths = wavelengths[ind]
wavelength_dict = dict()
for (idx, wv) in enumerate(wavelengths):
wavelength_dict[wv] = float(idx)
# sort array_data based on wavelengths
array_data = array_data[:, :, ind]
# Scale the array data to 0-1 by dividing by the maximum data type value
array_data = (array_data / np.iinfo(array_data.dtype).max).astype(np.float32)
r, c, b = array_data.shape
fused_array = Spectral_data(array_data=array_data,
max_wavelength=float(max(wavelengths)),
min_wavelength=float(min(wavelengths)),
max_value=float(np.amax(array_data)),
min_value=float(np.amin(array_data)),
d_type=array_data.dtype,
wavelength_dict=wavelength_dict,
samples=c, lines=r, interleave="NA",
wavelength_units="nm", array_type=array_type,
pseudo_rgb=None, filename="NA", default_bands=None)
# Make pseudo-rgb image and replace it inside the class instance object
pseudo_rgb = _make_pseudo_rgb(fused_array)
fused_array.pseudo_rgb = pseudo_rgb
_debug(visual=pseudo_rgb, filename=os.path.join(params.debug_outdir, str(params.device) + "_fused_pseudo_rgb.png"))
return fused_array
def _expand_img_dims(img):
"""Expand 2D images to 3D
Inputs:
img - input image
Returns:
img - image with expanded dimensions
:params img: numpy.ndarray
:return img: numpy.ndarray
"""
# If the image is 2D, expand to 3D to make stackable
if len(img.shape) == 2:
return np.expand_dims(img, axis=2)
# Return copy of image to break the reference to the input image
return img.copy()
| mit | 403e7f7dcee90a667641a93d4227a007 | 34.47 | 119 | 0.634903 | 3.56841 | false | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/hyperspectral/_avg_reflectance.py | 1 | 1205 | # Calculate masked average background reflectance
import numpy as np
def _avg_reflectance(spectral_data, mask):
"""Find average reflectance of masked hyperspectral data instance.
This is useful for calculating a target signature (n_band x 1 - column array) which is required in various GatorSense
hyperspectral tools (https://github.com/GatorSense/hsi_toolkit_py)
Inputs:
spectral_array = Hyperspectral data instance
mask = Target wavelength value
Returns:
idx = Index
:param spectral_data: __main__.Spectral_data
:param mask: numpy.ndarray
:return spectral_array: __main__.Spectral_data
"""
# Initialize list of average reflectance values
avg_r = []
# For each band in a hyperspectral datacube mask and take the average
for i in range(0, len(spectral_data.wavelength_dict)):
band = spectral_data.array_data[:, :, [i]]
band_reshape = np.transpose(np.transpose(band)[0])
masked_band = band_reshape[np.where(mask > 0)]
band_avg = np.average(masked_band)
avg_r.append(band_avg)
# Convert into array object rather than list
avg_r = np.asarray(avg_r)
return avg_r
| mit | 403e0c526f0f09d64fe4411bf47c7e0b | 32.472222 | 121 | 0.673029 | 3.899676 | false | false | false | false |
danforthcenter/plantcv | tests/plantcv/test_landmark_reference_pt_dist.py | 1 | 1217 | import pytest
import numpy as np
from plantcv.plantcv import landmark_reference_pt_dist, outputs
@pytest.mark.parametrize("points,centroid,bline", [
[[(10, 1000)], (10, 10), (10, 10)],
[[], (0, 0), (0, 0)],
[[(0.0139, 0.2569), (0.2361, 0.2917), (0.3542, 0.3819), (0.3542, 0.4167), (0.375, 0.4236), (0.7431, 0.3681),
(0.8958, 0.3542), (0.9931, 0.3125), (0.1667, 0.5139), (0.4583, 0.8889), (0.4931, 0.5903), (0.3889, 0.5694),
(0.4792, 0.4306), (0.2083, 0.5417), (0.3194, 0.5278), (0.3889, 0.375), (0.3681, 0.3472), (0.2361, 0.0139),
(0.5417, 0.2292), (0.7708, 0.3472), (0.6458, 0.3472), (0.6389, 0.5208), (0.6458, 0.625)], (0.4685, 0.4945),
(0.4685, 0.2569)]
])
def test_landmark_reference_pt_dist(points, centroid, bline):
"""Test for PlantCV."""
# Clear previous outputs
outputs.clear()
landmark_reference_pt_dist(points_r=points, centroid_r=centroid, bline_r=bline)
assert len(outputs.observations['default'].keys()) == 8
def test_landmark_reference_pt_dist_bad_centroid():
"""Test for PlantCV."""
result = landmark_reference_pt_dist(points_r=[], centroid_r=('a', 'b'), bline_r=(0, 0))
assert np.array_equal(np.unique(result), np.array(["NA"]))
| mit | 6b2bda21f9bb301fa0f1a4fed3708511 | 45.807692 | 113 | 0.604766 | 2.363107 | false | true | false | false |
danforthcenter/plantcv | plantcv/plantcv/transform/color_correction.py | 1 | 34122 | # Color Corrections Functions
import os
import cv2
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plantcv.plantcv.roi import circle
from plantcv.plantcv import fatal_error
from plantcv.plantcv._debug import _debug
def get_color_matrix(rgb_img, mask):
"""Calculate the average value of pixels in each color chip for each color channel.
Inputs:
rgb_img = RGB image with color chips visualized
mask = a gray-scale img with unique values for each segmented space, representing unique, discrete
color chips.
Outputs:
color_matrix = a 22x4 matrix containing the average red value, average green value, and average blue value
for each color chip.
headers = a list of 4 headers corresponding to the 4 columns of color_matrix respectively
:param rgb_img: numpy.ndarray
:param mask: numpy.ndarray
:return headers: string array
:return color_matrix: numpy.ndarray
"""
# Check for RGB input
if len(np.shape(rgb_img)) != 3:
fatal_error("Input rgb_img is not an RGB image.")
# Check mask for gray-scale
if len(np.shape(mask)) != 2:
fatal_error("Input mask is not an gray-scale image.")
img_dtype = rgb_img.dtype
# normalization value as max number if the type is unsigned int
max_val = 1.0
if img_dtype.kind == 'u':
max_val = np.iinfo(img_dtype).max
# convert to float and normalize to work with values between 0-1
rgb_img = rgb_img.astype(np.float64)/max_val
# create empty color_matrix
color_matrix = np.zeros((len(np.unique(mask))-1, 4))
# create headers
headers = ["chip_number", "r_avg", "g_avg", "b_avg"]
# declare row_counter variable and initialize to 0
row_counter = 0
# for each unique color chip calculate each average RGB value
for i in np.unique(mask):
if i != 0:
chip = rgb_img[np.where(mask == i)]
color_matrix[row_counter][0] = i
color_matrix[row_counter][1] = np.mean(chip[:, 2])
color_matrix[row_counter][2] = np.mean(chip[:, 1])
color_matrix[row_counter][3] = np.mean(chip[:, 0])
row_counter += 1
return headers, color_matrix
def get_matrix_m(target_matrix, source_matrix):
"""Calculate Moore-Penrose inverse matrix for use in calculating transformation_matrix
Inputs:
target_matrix = a 22x4 matrix containing the average red value, average green value, and average blue value
for each color chip.
source_matrix = a 22x4 matrix containing the average red value, average green value, and average blue value
for each color chip.
Outputs:
matrix_a = a concatenated 22x9 matrix of source_matrix red, green, and blue values to the powers 1, 2, 3
matrix_m = a 9x22 Moore-Penrose inverse matrix
matrix_b = a 22x9 matrix of linear, square, and cubic rgb values from target_img
:param target_matrix: numpy.ndarray
:param source_matrix: numpy.ndarray
:return matrix_a: numpy.ndarray
:return matrix_m: numpy.ndarray
:return matrix_b: numpy.ndarray
"""
# if the number of chips in source_img match the number of chips in target_matrix
if np.shape(target_matrix) == np.shape(source_matrix):
t_cc, t_r, t_g, t_b = np.split(target_matrix, 4, 1)
s_cc, s_r, s_g, s_b = np.split(source_matrix, 4, 1)
else:
combined_matrix = np.zeros((np.ma.size(source_matrix, 0), 7))
row_count = 0
for r in range(0, np.ma.size(target_matrix, 0)):
for i in range(0, np.ma.size(source_matrix, 0)):
if target_matrix[r][0] == source_matrix[i][0]:
combined_matrix[row_count][0] = target_matrix[r][0]
combined_matrix[row_count][1] = target_matrix[r][1]
combined_matrix[row_count][2] = target_matrix[r][2]
combined_matrix[row_count][3] = target_matrix[r][3]
combined_matrix[row_count][4] = source_matrix[i][1]
combined_matrix[row_count][5] = source_matrix[i][2]
combined_matrix[row_count][6] = source_matrix[i][3]
row_count += 1
t_cc, t_r, t_g, t_b, s_r, s_g, s_b = np.split(combined_matrix, 7, 1)
t_r2 = np.square(t_r)
t_r3 = np.power(t_r, 3)
t_g2 = np.square(t_g)
t_g3 = np.power(t_g, 3)
t_b2 = np.square(t_b)
t_b3 = np.power(t_b, 3)
s_r2 = np.square(s_r)
s_r3 = np.power(s_r, 3)
s_g2 = np.square(s_g)
s_g3 = np.power(s_g, 3)
s_b2 = np.square(s_b)
s_b3 = np.power(s_b, 3)
# create matrix_a
matrix_a = np.concatenate((s_r, s_g, s_b, s_r2, s_g2, s_b2, s_r3, s_g3, s_b3), 1)
# create matrix_m
matrix_m = np.linalg.solve(np.matmul(matrix_a.T, matrix_a), matrix_a.T)
# create matrix_b
matrix_b = np.concatenate((t_r, t_r2, t_r3, t_g, t_g2, t_g3, t_b, t_b2, t_b3), 1)
return matrix_a, matrix_m, matrix_b
def calc_transformation_matrix(matrix_m, matrix_b):
"""Calculates transformation matrix (transformation_matrix).
Inputs:
matrix_m = a 9x22 Moore-Penrose inverse matrix
matrix_b = a 22x9 matrix of linear, square, and cubic rgb values from target_img
Outputs:
1-t_det = "deviance" the measure of how greatly the source image deviates from the target image's color space.
Two images of the same color space should have a deviance of ~0.
transformation_matrix = a 9x9 matrix of linear, square, and cubic transformation coefficients
:param matrix_m: numpy.ndarray
:param matrix_b: numpy.ndarray
:return red: numpy.ndarray
:return blue: numpy.ndarray
:return green: numpy.ndarray
:return 1-t_det: float
:return transformation_matrix: numpy.ndarray
"""
# check matrix_m and matrix_b are matrices
if len(np.shape(matrix_b)) != 2 or len(np.shape(matrix_m)) != 2:
fatal_error("matrix_m and matrix_b must be n x m matrices such that m,n != 1.")
# check matrix_b has 9 columns
if np.shape(matrix_b)[1] != 9:
fatal_error("matrix_b must have 9 columns.")
# check matrix_m and matrix_b for multiplication
if np.shape(matrix_m)[0] != np.shape(matrix_b)[1] or np.shape(matrix_m)[1] != np.shape(matrix_b)[0]:
fatal_error("Cannot multiply matrices.")
t_r, t_r2, t_r3, t_g, t_g2, t_g3, t_b, t_b2, t_b3 = np.split(matrix_b, 9, 1)
# multiply each 22x1 matrix from target color space by matrix_m
red = np.matmul(matrix_m, t_r)
green = np.matmul(matrix_m, t_g)
blue = np.matmul(matrix_m, t_b)
red2 = np.matmul(matrix_m, t_r2)
green2 = np.matmul(matrix_m, t_g2)
blue2 = np.matmul(matrix_m, t_b2)
red3 = np.matmul(matrix_m, t_r3)
green3 = np.matmul(matrix_m, t_g3)
blue3 = np.matmul(matrix_m, t_b3)
# concatenate each product column into 9X9 transformation matrix
transformation_matrix = np.concatenate((red, green, blue, red2, green2, blue2, red3, green3, blue3), 1)
# find determinant of transformation matrix
t_det = np.linalg.det(transformation_matrix)
return 1-t_det, transformation_matrix
def apply_transformation_matrix(source_img, target_img, transformation_matrix):
"""Apply the transformation matrix to the source_image.
Inputs:
source_img = an RGB image to be corrected to the target color space
target_img = an RGB image with the target color space
transformation_matrix = a 9x9 matrix of tranformation coefficients
Outputs:
corrected_img = an RGB image in correct color space
:param source_img: numpy.ndarray
:param target_img: numpy.ndarray
:param transformation_matrix: numpy.ndarray
:return corrected_img: numpy.ndarray
"""
# check transformation_matrix for 9x9
if np.shape(transformation_matrix) != (9, 9):
fatal_error("transformation_matrix must be a 9x9 matrix of transformation coefficients.")
# Check for RGB input
if len(np.shape(source_img)) != 3:
fatal_error("Source_img is not an RGB image.")
# split transformation_matrix
red, green, blue, red2, green2, blue2, red3, green3, blue3 = np.split(transformation_matrix, 9, 1)
source_dtype = source_img.dtype
# normalization value as max number if the type is unsigned int
max_val = 1.0
if source_dtype.kind == 'u':
max_val = np.iinfo(source_dtype).max
# convert img to float to avoid integer overflow, normalize between 0-1
source_flt = source_img.astype(np.float64)/max_val
# find linear, square, and cubic values of source_img color channels
source_b, source_g, source_r = cv2.split(source_flt)
source_b2 = np.square(source_b)
source_b3 = np.power(source_b, 3)
source_g2 = np.square(source_g)
source_g3 = np.power(source_g, 3)
source_r2 = np.square(source_r)
source_r3 = np.power(source_r, 3)
# apply linear model to source color channels
b = 0 + source_r * blue[0] + source_g * blue[1] + source_b * blue[2] + source_r2 * blue[3] + source_g2 * blue[
4] + source_b2 * blue[5] + source_r3 * blue[6] + source_g3 * blue[7] + source_b3 * blue[8]
g = 0 + source_r * green[0] + source_g * green[1] + source_b * green[2] + source_r2 * green[3] + source_g2 * green[
4] + source_b2 * green[5] + source_r3 * green[6] + source_g3 * green[7] + source_b3 * green[8]
r = 0 + source_r * red[0] + source_g * red[1] + source_b * red[2] + source_r2 * red[3] + source_g2 * red[
4] + source_b2 * red[5] + source_r3 * red[6] + source_g3 * red[7] + source_b3 * red[8]
# merge corrected color channels onto source_image
bgr = [b, g, r]
corrected_img = cv2.merge(bgr)
# return values of the image to the original range
corrected_img = max_val*np.clip(corrected_img, 0, 1)
# cast back to original dtype (if uint the value defaults to the closest smaller integer)
corrected_img = corrected_img.astype(source_dtype)
# For debugging, create a horizontal view of source_img, corrected_img, and target_img to the plotting device
# plot horizontal comparison of source_img, corrected_img (with rounded elements) and target_img
out_img = np.hstack([source_img, corrected_img, target_img])
# Change range of visualization image to 0-255 and convert to uin8
out_img = ((255.0/max_val)*out_img).astype(np.uint8)
_debug(visual=out_img, filename=os.path.join(params.debug_outdir, str(params.device) + '_corrected.png'))
# return corrected_img
return corrected_img
def save_matrix(matrix, filename):
"""Serializes a matrix as an numpy.ndarray object and save to a .npz file.
Inputs:
matrix = a numpy.matrix
filename = name of file to which matrix will be saved. Must end in .npz
:param matrix: numpy.ndarray
:param filename: string ending in ".npz"
"""
if ".npz" not in filename:
fatal_error("File must be an .npz file.")
# Autoincrement the device counter
params.device += 1
np.savez(filename, matrix)
def load_matrix(filename):
"""Deserializes from file an numpy.ndarray object as a matrix
Inputs:
filename = .npz file to which a numpy.matrix or numpy.ndarray is saved
Outputs:
matrix = a numpy.matrix
:param filename: string ending in ".npz"
:return matrix: numpy.matrix
"""
matrix_file = np.load(filename, encoding="latin1")
matrix = matrix_file['arr_0']
np.asmatrix(matrix)
return matrix
def correct_color(target_img, target_mask, source_img, source_mask, output_directory):
"""Takes a target_img with preferred color_space and converts source_img to that color_space.
Inputs:
target_img = an RGB image with color chips visualized
source_img = an RGB image with color chips visualized
target_mask = a gray-scale image with color chips and background each represented with unique values
target_mask = a gray-scale image with color chips and background each represented as unique values
output_directory = a file path to which outputs will be saved
Outputs:
target_matrix = saved in .npz file, a 22x4 matrix containing the average red value, average green value, and
average blue value for each color chip.
source_matrix = saved in .npz file, a 22x4 matrix containing the average red value, average green value, and
average blue value for each color chip.
transformation_matrix = saved in .npz file, a 9x9 transformation matrix
corrected_img = the source_img converted to the correct color space.
:param target_img: numpy.ndarray
:param source_img: numpy.ndarray
:param target_mask: numpy.ndarray
:param source_mask: numpy.ndarray
:param output_directory: string
:return target_matrix: numpy.matrix
:return source_matrix: numpy.matrix
:return transformation_matrix: numpy.matrix
:return corrected_img: numpy.ndarray
"""
# check output_directory, if it does not exist, create
if not os.path.exists(output_directory):
os.mkdir(output_directory)
# get color matrices for target and source images
target_headers, target_matrix = get_color_matrix(target_img, target_mask)
source_headers, source_matrix = get_color_matrix(source_img, source_mask)
# save target and source matrices
save_matrix(target_matrix, os.path.join(output_directory, "target_matrix.npz"))
save_matrix(source_matrix, os.path.join(output_directory, "source_matrix.npz"))
# get matrix_m
matrix_a, matrix_m, matrix_b = get_matrix_m(target_matrix=target_matrix, source_matrix=source_matrix)
# calculate transformation_matrix and save
deviance, transformation_matrix = calc_transformation_matrix(matrix_m, matrix_b)
save_matrix(transformation_matrix, os.path.join(output_directory, "transformation_matrix.npz"))
# apply transformation
corrected_img = apply_transformation_matrix(source_img, target_img, transformation_matrix)
return target_matrix, source_matrix, transformation_matrix, corrected_img
def create_color_card_mask(rgb_img, radius, start_coord, spacing, nrows, ncols, exclude=[]):
"""Create a labeled mask for color card chips
Inputs:
rgb_img = Input RGB image data containing a color card.
radius = Radius of color masks.
start_coord = Two-element tuple of the first chip mask starting x and y coordinate.
spacing = Two-element tuple of the horizontal and vertical spacing between chip masks.
nrows = Number of chip rows.
ncols = Number of chip columns.
exclude = Optional list of chips to exclude.
Returns:
mask = Labeled mask of chips
:param rgb_img: numpy.ndarray
:param radius: int
:param start_coord: tuple
:param spacing: tuple
:param nrows: int
:param ncols: int
:param exclude: list
:return mask: numpy.ndarray
"""
# Initialize chip list
chips = []
# Store debug mode
debug = params.debug
params.debug = None
# Loop over each color card row
for i in range(0, nrows):
# The upper left corner is the y starting coordinate + the chip offset * the vertical spacing between chips
y = start_coord[1] + i * spacing[1]
# Loop over each column
for j in range(0, ncols):
# The upper left corner is the x starting coordinate + the chip offset * the
# horizontal spacing between chips
x = start_coord[0] + j * spacing[0]
# Create a chip ROI
chips.append(circle(img=rgb_img, x=x, y=y, r=radius))
# Restore debug parameter
params.debug = debug
# Sort excluded chips from largest to smallest
exclude.sort(reverse=True)
# Remove any excluded chips
for chip in exclude:
del chips[chip]
# Create mask
mask = np.zeros(shape=np.shape(rgb_img)[:2], dtype=np.uint8())
# Mask label index
i = 1
# Draw labeled chip boxes on the mask
for chip in chips:
mask = cv2.drawContours(mask, chip[0], -1, (i * 10), -1)
i += 1
# Create a copy of the input image for plotting
canvas = np.copy(rgb_img)
# Draw chip ROIs on the canvas image
for chip in chips:
cv2.drawContours(canvas, chip[0], -1, (255, 255, 0), params.line_thickness)
_debug(visual=canvas, filename=os.path.join(params.debug_outdir, str(params.device) + '_color_card_mask_rois.png'))
_debug(visual=mask, filename=os.path.join(params.debug_outdir, str(params.device) + '_color_card_mask.png'))
return mask
def quick_color_check(target_matrix, source_matrix, num_chips):
"""Quickly plot target matrix values against source matrix values to determine
over saturated color chips or other issues.
Inputs:
source_matrix = an nrowsXncols matrix containing the avg red, green, and blue values for each color chip
of the source image
target_matrix = an nrowsXncols matrix containing the avg red, green, and blue values for each color chip
of the target image
num_chips = number of color card chips included in the matrices (integer)
:param source_matrix: numpy.ndarray
:param target_matrix: numpy.ndarray
:param num_chips: int
"""
# Imports
from plotnine import ggplot, geom_point, geom_smooth, theme_seaborn, facet_grid, geom_label, scale_x_continuous, \
scale_y_continuous, scale_color_manual, aes
import pandas as pd
# Scale matrices to 0-255
target_matrix = 255*target_matrix
source_matrix = 255*source_matrix
# Extract and organize matrix info
tr = target_matrix[:num_chips, 1:2]
tg = target_matrix[:num_chips, 2:3]
tb = target_matrix[:num_chips, 3:4]
sr = source_matrix[:num_chips, 1:2]
sg = source_matrix[:num_chips, 2:3]
sb = source_matrix[:num_chips, 3:4]
# Create columns of color labels
red = []
blue = []
green = []
for i in range(num_chips):
red.append('red')
blue.append('blue')
green.append('green')
# Make a column of chip numbers
chip = np.arange(0, num_chips).reshape((num_chips, 1))
chips = np.row_stack((chip, chip, chip))
# Combine info
color_data_r = np.column_stack((sr, tr, red))
color_data_g = np.column_stack((sg, tg, green))
color_data_b = np.column_stack((sb, tb, blue))
all_color_data = np.row_stack((color_data_b, color_data_g, color_data_r))
# Create a dataframe with headers
dataset = pd.DataFrame({'source': all_color_data[:, 0], 'target': all_color_data[:, 1],
'color': all_color_data[:, 2]})
# Add chip numbers to the dataframe
dataset['chip'] = chips
dataset = dataset.astype({'color': str, 'chip': str, 'target': float, 'source': float})
# Make the plot
p1 = ggplot(dataset, aes(x='target', y='source', color='color', label='chip')) + \
geom_point(show_legend=False, size=2) + \
geom_smooth(method='lm', size=.5, show_legend=False) + \
theme_seaborn() + facet_grid('.~color') + \
geom_label(angle=15, size=7, nudge_y=-.25, nudge_x=.5, show_legend=False) + \
scale_x_continuous(limits=(-5, 270)) + scale_y_continuous(limits=(-5, 275)) + \
scale_color_manual(values=['blue', 'green', 'red'])
_debug(visual=p1, filename=os.path.join(params.debug_outdir, 'color_quick_check.png'))
def find_color_card(rgb_img, threshold_type='adaptgauss', threshvalue=125, blurry=False, background='dark',
record_chip_size="median", label="default"):
"""Automatically detects a color card and output info to use in create_color_card_mask function
Algorithm written by Brandon Hurr. Updated and implemented into PlantCV by Haley Schuhl.
Inputs:
rgb_img = Input RGB image data containing a color card.
threshold_type = Threshold method, either 'normal', 'otsu', or 'adaptgauss', optional (default 'adaptgauss')
threshvalue = Thresholding value, optional (default 125)
blurry = Bool (default False) if True then image sharpening applied
background = Type of image background either 'dark' or 'light' (default 'dark'); if 'light' then histogram
expansion applied to better detect edges, but histogram expansion will be hindered if there
is a dark background
record_chip_size = Optional str for choosing chip size measurement to be recorded, either "median",
"mean", or None
label = optional label parameter, modifies the variable name of observations recorded (default 'default')
Returns:
df = Dataframe containing information about the filtered contours
start_coord = Two element tuple of starting coordinates, location of the top left pixel detected
spacing = Two element tuple of spacing between centers of chips
:param rgb_img: numpy.ndarray
:param threshold_type: str
:param threshvalue: int
:param blurry: bool
:param background: str
:param record_chip_size: str
:param label: str
:return df: pandas.core.frame.DataFrame
:return start_coord: tuple
:return spacing: tuple
"""
# Imports
import skimage
import pandas as pd
from scipy.spatial.distance import squareform, pdist
# Get image attributes
height, width, channels = rgb_img.shape
total_pix = float(height * width)
# Minimum and maximum square size based upon 12 MP image
min_area = 1000. / 12000000. * total_pix
max_area = 8000000. / 12000000. * total_pix
# Create gray image for further processing
gray_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
# Laplacian Fourier Transform detection of blurriness
blurfactor = cv2.Laplacian(gray_img, cv2.CV_64F).var()
# If image is blurry then try to deblur using kernel
if blurry:
# from https://www.packtpub.com/mapt/book/Application+Development/9781785283932/2/ch02lvl1sec22/Sharpening
kernel = np.array([[-1, -1, -1, -1, -1],
[-1, 2, 2, 2, -1],
[-1, 2, 8, 2, -1],
[-1, 2, 2, 2, -1],
[-1, -1, -1, -1, -1]]) / 8.0
# Store result back out for further processing
gray_img = cv2.filter2D(gray_img, -1, kernel)
# In darker samples, the expansion of the histogram hinders finding the squares due to problems with the otsu
# thresholding. If your image has a bright background then apply
if background == 'light':
clahe = cv2.createCLAHE(clipLimit=3.25, tileGridSize=(4, 4))
# apply CLAHE histogram expansion to find squares better with canny edge detection
gray_img = clahe.apply(gray_img)
elif background != 'dark':
fatal_error('Background parameter ' + str(background) + ' is not "light" or "dark"!')
# Thresholding
if threshold_type.upper() == "OTSU":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
elif threshold_type.upper() == "NORMAL":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, threshold = cv2.threshold(gaussian, threshvalue, 255, cv2.THRESH_BINARY)
elif threshold_type.upper() == "ADAPTGAUSS":
# Blur slightly so defects on card squares and background patterns are less likely to be picked up
gaussian = cv2.GaussianBlur(gray_img, (11, 11), 0)
threshold = cv2.adaptiveThreshold(gaussian, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 51, 2)
else:
fatal_error('Input threshold_type=' + str(threshold_type) + ' but should be "otsu", "normal", or "adaptgauss"!')
# Apply automatic Canny edge detection using the computed median
canny_edges = skimage.feature.canny(threshold)
canny_edges.dtype = 'uint8'
# Compute contours to find the squares of the card
contours, hierarchy = cv2.findContours(canny_edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
# Variable of which contour is which
mindex = []
# Variable to store moments
mu = []
# Variable to x,y coordinates in tuples
mc = []
# Variable to x coordinate as integer
mx = []
# Variable to y coordinate as integer
my = []
# Variable to store area
marea = []
# Variable to store whether something is a square (1) or not (0)
msquare = []
# Variable to store square approximation coordinates
msquarecoords = []
# Variable to store child hierarchy element
mchild = []
# Fitted rectangle height
mheight = []
# Fitted rectangle width
mwidth = []
# Ratio of height/width
mwhratio = []
# Extract moments from contour image
for x in range(0, len(contours)):
mu.append(cv2.moments(contours[x]))
marea.append(cv2.contourArea(contours[x]))
mchild.append(int(hierarchy[0][x][2]))
mindex.append(x)
# Cycle through moment data and compute location for each moment
for m in mu:
if m['m00'] != 0: # This is the area term for a moment
mc.append((int(m['m10'] / m['m00']), int(m['m01']) / m['m00']))
mx.append(int(m['m10'] / m['m00']))
my.append(int(m['m01'] / m['m00']))
else:
mc.append((0, 0))
mx.append((0))
my.append((0))
# Loop over our contours and extract data about them
for index, c in enumerate(contours):
# Area isn't 0, but greater than min-area and less than max-area
if marea[index] != 0 and min_area < marea[index] < max_area:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.1 * peri, True)
center, wh, angle = cv2.minAreaRect(c) # Rotated rectangle
mwidth.append(wh[0])
mheight.append(wh[1])
# In different versions of OpenCV, width and height can be listed in a different order
# To normalize the ratio we sort them and take the ratio of the longest / shortest
wh_sorted = list(wh)
wh_sorted.sort()
mwhratio.append(wh_sorted[1] / wh_sorted[0])
msquare.append(len(approx))
# If the approx contour has 4 points then we can assume we have 4-sided objects
if len(approx) in (4, 5):
msquarecoords.append(approx)
else: # It's not square
msquarecoords.append(0)
else: # Contour has area of 0, not interesting
msquare.append(0)
msquarecoords.append(0)
mwidth.append(0)
mheight.append(0)
mwhratio.append(0)
# Make a pandas df from data for filtering out junk
all_contours = {'index': mindex, 'x': mx, 'y': my, 'width': mwidth, 'height': mheight, 'res_ratio': mwhratio,
'area': marea, 'square': msquare, 'child': mchild}
df = pd.DataFrame(all_contours)
# Add calculated blur factor to output
df['blurriness'] = blurfactor
# Filter df for attributes that would isolate squares of reasonable size
df = df[(df['area'] > min_area) & (df['area'] < max_area) & (df['child'] != -1) &
(df['square'].isin([4, 5])) & (df['res_ratio'] < 1.2) & (df['res_ratio'] > 0.85)]
# Filter nested squares from dataframe, was having issues with median being towards smaller nested squares
df = df[~(df['index'].isin(df['index'] + 1))]
# Count up squares that are within a given radius, more squares = more likelihood of them being the card
# Median width of square time 2.5 gives proximity radius for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 6
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Compute how similar in area the squares are. lots of similar values indicates card isolate area measurements
filtered_area = df['area']
# Create empty matrix for storing comparisons
sizecomp = np.zeros((len(filtered_area), len(filtered_area)))
# Double loop through all areas to compare to each other
for p in range(0, len(filtered_area)):
for o in range(0, len(filtered_area)):
big = max(filtered_area.iloc[p], filtered_area.iloc[o])
small = min(filtered_area.iloc[p], filtered_area.iloc[o])
pct = 100. * (small / big)
sizecomp[p][o] = pct
# How many comparisons given 90% square similarity
sizematrix = pd.DataFrame(sizecomp).apply(lambda sim: sim[sim >= 90].count() - 1, axis=1)
# Append sizeprox summary to dataframe
df = df.assign(sizeprox=sizematrix.values)
# Reorder dataframe for better printing
df = df[['index', 'x', 'y', 'width', 'height', 'res_ratio', 'area', 'square', 'child',
'blurriness', 'distprox', 'sizeprox']]
# Loosely filter for size and distance (relative size to median)
minsqwidth = median_sq_width_px * 0.80
maxsqwidth = median_sq_width_px * 1.2
df = df[(df['distprox'] >= 5) & (df['sizeprox'] >= 5) & (df['width'] > minsqwidth) &
(df['width'] < maxsqwidth)]
# Filter for proximity again to root out stragglers. Find and count up squares that are within given radius,
# more squares = more likelihood of them being the card. Median width of square time 2.5 gives proximity radius
# for searching for similar squares
median_sq_width_px = df["width"].median()
# Squares that are within 6 widths of the current square
pixeldist = median_sq_width_px * 5
# Computes euclidean distance matrix for the x and y contour centroids
distmatrix = pd.DataFrame(squareform(pdist(df[['x', 'y']])))
# Add up distances that are less than ones have distance less than pixeldist pixels
distmatrixflat = distmatrix.apply(lambda dist: dist[dist <= pixeldist].count() - 1, axis=1)
# Append distprox summary to dataframe
df = df.assign(distprox=distmatrixflat.values)
# Filter results for distance proximity to other squares
df = df[(df['distprox'] >= 4)]
# Remove all not numeric values use to_numeric with parameter, errors='coerce' - it replace non numeric to NaNs:
df['x'] = pd.to_numeric(df['x'], errors='coerce')
df['y'] = pd.to_numeric(df['y'], errors='coerce')
# Remove NaN
df = df.dropna()
if df['x'].min() is np.nan or df['y'].min() is np.nan:
fatal_error('No color card found under current parameters')
else:
# Extract the starting coordinate
start_coord = (df['x'].min(), df['y'].min())
# start_coord = (int(df['X'].min()), int(df['Y'].min()))
# Calculate the range
spacingx_short = (df['x'].max() - df['x'].min()) / 3
spacingy_short = (df['y'].max() - df['y'].min()) / 3
spacingx_long = (df['x'].max() - df['x'].min()) / 5
spacingy_long = (df['y'].max() - df['y'].min()) / 5
# Chip spacing since 4x6 card assumed
spacing_short = min(spacingx_short, spacingy_short)
spacing_long = max(spacingx_long, spacingy_long)
# Smaller spacing measurement might have a chip missing
spacing = int(max(spacing_short, spacing_long))
spacing = (spacing, spacing)
if record_chip_size is not None:
if record_chip_size.upper() == "MEDIAN":
chip_size = df.loc[:, "area"].median()
chip_height = df.loc[:, "height"].median()
chip_width = df.loc[:, "width"].median()
elif record_chip_size.upper() == "MEAN":
chip_size = df.loc[:, "area"].mean()
chip_height = df.loc[:, "height"].mean()
chip_width = df.loc[:, "width"].mean()
else:
print(str(record_chip_size) + " Is not a valid entry for record_chip_size." +
" Must be either 'mean', 'median', or None.")
chip_size = None
chip_height = None
chip_width = None
# Store into global measurements
outputs.add_observation(sample=label, variable='color_chip_size', trait='size of color card chips identified',
method='plantcv.plantcv.transform.find_color_card', scale='none',
datatype=float, value=chip_size, label=str(record_chip_size))
method = record_chip_size.lower()
outputs.add_observation(sample=label, variable=f'{method}_color_chip_height',
trait=f'{method} height of color card chips identified',
method='plantcv.plantcv.transform.find_color_card', scale='none',
datatype=float, value=chip_height, label=str(record_chip_size))
outputs.add_observation(sample=label, variable=f'{method}_color_chip_width',
trait=f'{method} size of color card chips identified',
method='plantcv.plantcv.transform.find_color_card', scale='none',
datatype=float, value=chip_width, label=str(record_chip_size))
return df, start_coord, spacing
| mit | cd84424f244a6797ba273c03fefccb1f | 43.028387 | 120 | 0.634019 | 3.572236 | false | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/morphology/segment_path_length.py | 1 | 2068 | # Find geodesic lengths of skeleton segments
import os
import cv2
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plantcv.plantcv._debug import _debug
def segment_path_length(segmented_img, objects, label="default"):
"""Use segments to calculate geodesic distance per segment.
Inputs:
segmented_img = Segmented image to plot lengths on
objects = List of contours
label = optional label parameter, modifies the variable name of observations recorded
Returns:
labeled_img = Segmented debugging image with lengths labeled
:param segmented_img: numpy.ndarray
:param objects: list
:param label: str
:return labeled_img: numpy.ndarray
"""
label_coord_x = []
label_coord_y = []
segment_lengths = []
labeled_img = segmented_img.copy()
for i, cnt in enumerate(objects):
# Calculate geodesic distance, divide by two since cv2 seems to be taking the perimeter of the contour
segment_lengths.append(float(cv2.arcLength(objects[i], False) / 2))
# Store coordinates for labels
label_coord_x.append(objects[i][0][0][0])
label_coord_y.append(objects[i][0][0][1])
segment_ids = []
# Put labels of length
for c, value in enumerate(segment_lengths):
text = "{:.2f}".format(value)
w = label_coord_x[c]
h = label_coord_y[c]
cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=(150, 150, 150), thickness=params.text_thickness)
segment_ids.append(c)
outputs.add_observation(sample=label, variable='segment_path_length', trait='segment path length',
method='plantcv.plantcv.morphology.segment_path_length', scale='pixels', datatype=list,
value=segment_lengths, label=segment_ids)
_debug(visual=labeled_img, filename=os.path.join(params.debug_outdir, f"{params.device}_segment_path_lengths.png"))
return labeled_img
| mit | 7113aef2820287c9f088bde3db03f2a9 | 37.296296 | 119 | 0.66441 | 3.739602 | false | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/output_mask_ori_img.py | 1 | 1887 | # Find NIR image
import os
from plantcv.plantcv import print_image
from plantcv.plantcv import params
from plantcv.plantcv._debug import _debug
def output_mask(img, mask, filename, outdir=None, mask_only=False):
"""Prints ori image and mask to directories.
Inputs:
img = original image, read in with plantcv function read_image
mask = binary mask image (single chanel)
filename = vis image file name (output of plantcv read_image function)
outdir = output directory
mask_only = bool for printing only mask
Returns:
imgpath = path to image
maskpath path to mask
:param img: numpy.ndarray
:param mask: numpy.ndarray
:param filename: str
:param outdir: str
:param mask_only: bool
:return imgpath: str
:return maskpath: str
"""
analysis_images = []
directory = os.getcwd() if outdir is None else outdir
# Return values
results = []
# Save the original image unless mask_only=True
if not mask_only:
path = os.path.join(str(directory), "ori-images")
os.makedirs(path, exist_ok=True)
imgpath = os.path.join(str(path), str(filename))
print_image(img, imgpath)
analysis_images.append(['IMAGE', 'ori-img', imgpath])
results.append(imgpath)
# Print/plot original image
_debug(visual=img, filename=os.path.join(params.debug_outdir, f"{params.device}_ori-img.png"))
# Save the mask
path = os.path.join(str(directory), "mask-images")
os.makedirs(path, exist_ok=True)
maskpath = os.path.join(str(path), str(filename))
print_image(mask, maskpath)
analysis_images.append(['IMAGE', 'mask', maskpath])
results.append(maskpath)
# Print/plot mask
_debug(visual=mask, filename=os.path.join(params.debug_outdir, f"{params.device}_mask-img.png"))
results.append(analysis_images)
return results
| mit | 3313d3d540e7b486e0d0ad0870d14dda | 29.934426 | 102 | 0.671436 | 3.621881 | false | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/morphology/segment_combine.py | 1 | 2804 | # Plot segment ID numbers after combining segments
import os
import cv2
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv import fatal_error
from plantcv.plantcv import color_palette
from plantcv.plantcv._debug import _debug
def segment_combine(segment_list, objects, mask):
"""Combine user specified segments together.
Inputs:
segment_list = List of segment indices to get combined
objects = List of contours
mask = Binary mask for debugging image
Returns:
segmented_img = Segmented image
objects = Updated list of contours
:param segment_list: list
:param objects: list
:param mask: numpy.ndarray
:return labeled_img: numpy.ndarray
:return objects: list
"""
label_coord_x = []
label_coord_y = []
all_objects = objects[:]
if type(segment_list[0]) is not int:
fatal_error("segment_list must be a list of object ID's")
segment_list_copy = sorted(segment_list, reverse=True)
# If user provides a single list of objects to combine
num_contours = len(segment_list)
count = 1
# Store the first object into the new object array
combined_object = objects[segment_list_copy[0]]
# Remove the objects getting combined from the list of all objects
all_objects.pop(segment_list_copy[0])
while count < num_contours:
# Combine segments into a single object
combined_object = np.append(combined_object, objects[segment_list_copy[count]], 0)
# Remove the segment that was combined from the list of all objects
all_objects.pop(segment_list_copy[count])
count += 1
# Replace with the combined object
all_objects.append(combined_object)
labeled_img = mask.copy()
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_GRAY2RGB)
# Color each segment a different color, use a previously saved scale if available
rand_color = color_palette(num=len(all_objects), saved=True)
# Plot all segment contours
for i, cnt in enumerate(all_objects):
cv2.drawContours(labeled_img, all_objects[i], -1, rand_color[i], params.line_thickness, lineType=8)
# Store coordinates for labels
label_coord_x.append(all_objects[i][0][0][0])
label_coord_y.append(all_objects[i][0][0][1])
# Label segments
for i, cnt in enumerate(all_objects):
w = label_coord_x[i]
h = label_coord_y[i]
text = "ID:{}".format(i)
cv2.putText(img=labeled_img, text=text, org=(w, h), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=rand_color[i], thickness=2)
_debug(visual=labeled_img, filename=os.path.join(params.debug_outdir, f"{params.device}_combined_segment_ids.png"))
return labeled_img, all_objects
| mit | c3d1f2b11124e1d28f42b0b341fcec90 | 35.415584 | 119 | 0.677247 | 3.641558 | false | false | false | false |
danforthcenter/plantcv | plantcv/plantcv/rgb2gray_cmyk.py | 2 | 1727 | # RGB -> CMYK -> Gray
import cv2
import os
from plantcv.plantcv._debug import _debug
from plantcv.plantcv import fatal_error
from plantcv.plantcv import params
import numpy as np
def rgb2gray_cmyk(rgb_img, channel):
"""Convert image from RGB colorspace to CMYK colorspace. Returns the specified subchannel as a gray image.
Inputs:
rgb_img = RGB image data
channel = color subchannel (c = cyan, m = magenta, y = yellow, k=black)
Returns:
c | m | y | k = grayscale image from one CMYK color channel
:param rgb_img: numpy.ndarray
:param channel: str
:return channel: numpy.ndarray
"""
# The allowable channel inputs are c, m , y or k
names = {"c": "cyan", "m": "magenta", "y": "yellow", "k": "black"}
channel = channel.lower()
if channel not in names:
fatal_error("Channel " + str(channel) + " is not c, m, y or k!")
# Create float
bgr = rgb_img.astype(float)/255.
# K channel
k = 1 - np.max(bgr, axis=2)
# C Channel
c = (1 - bgr[..., 2] - k) / (1 - k)
# M Channel
m = (1 - bgr[..., 1] - k) / (1 - k)
# Y Channel
y = (1 - bgr[..., 0] - k) / (1 - k)
# Convert the input BGR image to LAB colorspace
cmyk = (np.dstack((c, m, y, k)) * 255).astype(np.uint8)
# Split CMYK channels
y, m, c, k = cv2.split(cmyk)
# Create a channel dictionaries for lookups by a channel name index
channels = {"c": c, "m": m, "y": y, "k": k}
# Save or display the grayscale image
_debug(visual=channels[channel], filename=os.path.join(params.debug_outdir,
str(params.device) + "_cmyk_" + names[channel] + ".png"))
return channels[channel]
| mit | 1a4c9998c3c87f85bc8825cbab8d1816 | 29.298246 | 116 | 0.583092 | 3.198148 | false | false | false | false |
chainer/chainercv | chainercv/evaluations/eval_instance_segmentation_coco.py | 3 | 12753 | import itertools
import numpy as np
import os
import six
from chainercv.evaluations.eval_detection_coco import _redirect_stdout
from chainercv.evaluations.eval_detection_coco import _summarize
try:
import pycocotools.coco
import pycocotools.cocoeval
import pycocotools.mask as mask_tools
_available = True
except ImportError:
_available = False
def eval_instance_segmentation_coco(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, gt_areas=None, gt_crowdeds=None):
"""Evaluate instance segmentations based on evaluation code of MS COCO.
This function evaluates predicted instance segmentations obtained from
a dataset by using average precision for each class.
The code is based on the evaluation code used in MS COCO.
Args:
pred_masks (iterable of numpy.ndarray): See the table below.
pred_labels (iterable of numpy.ndarray): See the table below.
pred_scores (iterable of numpy.ndarray): See the table below.
gt_masks (iterable of numpy.ndarray): See the table below.
gt_labels (iterable of numpy.ndarray): See the table below.
gt_areas (iterable of numpy.ndarray): See the table below. If
:obj:`None`, some scores are not returned.
gt_crowdeds (iterable of numpy.ndarray): See the table below.
.. csv-table::
:header: name, shape, dtype, format
:obj:`pred_masks`, ":math:`[(R, H, W)]`", :obj:`bool`, --
:obj:`pred_labels`, ":math:`[(R,)]`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
:obj:`pred_scores`, ":math:`[(R,)]`", :obj:`float32`, \
--
:obj:`gt_masks`, ":math:`[(R, H, W)]`", :obj:`bool`, --
:obj:`gt_labels`, ":math:`[(R,)]`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
:obj:`gt_areas`, ":math:`[(R,)]`", \
:obj:`float32`, --
:obj:`gt_crowdeds`, ":math:`[(R,)]`", :obj:`bool`, --
All inputs should have the same length. For more detailed explanation
of the inputs, please refer to
:class:`chainercv.datasets.COCOInstanceSegmentationDataset`.
.. seealso::
:class:`chainercv.datasets.COCOInstanceSegmentationDataset`.
Returns:
dict:
The keys, value-types and the description of the values are listed
below. The APs and ARs calculated with different iou
thresholds, sizes of objects, and numbers of detections
per image. For more details on the 12 patterns of evaluation metrics,
please refer to COCO's official `evaluation page`_.
.. csv-table::
:header: key, type, description
ap/iou=0.50:0.95/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_
ap/iou=0.50/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_
ap/iou=0.75/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_
ap/iou=0.50:0.95/area=small/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_ [#coco_ins_eval_5]_
ap/iou=0.50:0.95/area=medium/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_ [#coco_ins_eval_5]_
ap/iou=0.50:0.95/area=large/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_1]_ [#coco_ins_eval_5]_
ar/iou=0.50:0.95/area=all/max_dets=1, *numpy.ndarray*, \
[#coco_ins_eval_2]_
ar/iou=0.50/area=all/max_dets=10, *numpy.ndarray*, \
[#coco_ins_eval_2]_
ar/iou=0.75/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_2]_
ar/iou=0.50:0.95/area=small/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_2]_ [#coco_ins_eval_5]_
ar/iou=0.50:0.95/area=medium/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_2]_ [#coco_ins_eval_5]_
ar/iou=0.50:0.95/area=large/max_dets=100, *numpy.ndarray*, \
[#coco_ins_eval_2]_ [#coco_ins_eval_5]_
map/iou=0.50:0.95/area=all/max_dets=100, *float*, \
[#coco_ins_eval_3]_
map/iou=0.50/area=all/max_dets=100, *float*, \
[#coco_ins_eval_3]_
map/iou=0.75/area=all/max_dets=100, *float*, \
[#coco_ins_eval_3]_
map/iou=0.50:0.95/area=small/max_dets=100, *float*, \
[#coco_ins_eval_3]_ [#coco_ins_eval_5]_
map/iou=0.50:0.95/area=medium/max_dets=100, *float*, \
[#coco_ins_eval_3]_ [#coco_ins_eval_5]_
map/iou=0.50:0.95/area=large/max_dets=100, *float*, \
[#coco_ins_eval_3]_ [#coco_ins_eval_5]_
mar/iou=0.50:0.95/area=all/max_dets=1, *float*, \
[#coco_ins_eval_4]_
mar/iou=0.50/area=all/max_dets=10, *float*, \
[#coco_ins_eval_4]_
mar/iou=0.75/area=all/max_dets=100, *float*, \
[#coco_ins_eval_4]_
mar/iou=0.50:0.95/area=small/max_dets=100, *float*, \
[#coco_ins_eval_4]_ [#coco_ins_eval_5]_
mar/iou=0.50:0.95/area=medium/max_dets=100, *float*, \
[#coco_ins_eval_4]_ [#coco_ins_eval_5]_
mar/iou=0.50:0.95/area=large/max_dets=100, *float*, \
[#coco_ins_eval_4]_ [#coco_ins_eval_5]_
coco_eval, *pycocotools.cocoeval.COCOeval*, \
result from :obj:`pycocotools`
existent_labels, *numpy.ndarray*, \
used labels \
.. [#coco_ins_eval_1] An array of average precisions. \
The :math:`l`-th value corresponds to the average precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, the corresponding \
value is set to :obj:`numpy.nan`.
.. [#coco_ins_eval_2] An array of average recalls. \
The :math:`l`-th value corresponds to the average precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, the corresponding \
value is set to :obj:`numpy.nan`.
.. [#coco_ins_eval_3] The average of average precisions over classes.
.. [#coco_ins_eval_4] The average of average recalls over classes.
.. [#coco_ins_eval_5] Skip if :obj:`gt_areas` is :obj:`None`.
"""
if not _available:
raise ValueError(
'Please install pycocotools \n'
'pip install -e \'git+https://github.com/cocodataset/coco.git'
'#egg=pycocotools&subdirectory=PythonAPI\'')
gt_coco = pycocotools.coco.COCO()
pred_coco = pycocotools.coco.COCO()
pred_masks = iter(pred_masks)
pred_labels = iter(pred_labels)
pred_scores = iter(pred_scores)
gt_masks = iter(gt_masks)
gt_labels = iter(gt_labels)
if gt_areas is None:
compute_area_dependent_metrics = False
gt_areas = itertools.repeat(None)
else:
compute_area_dependent_metrics = True
gt_areas = iter(gt_areas)
gt_crowdeds = (iter(gt_crowdeds) if gt_crowdeds is not None
else itertools.repeat(None))
images = []
pred_annos = []
gt_annos = []
existent_labels = {}
for i, (pred_mask, pred_label, pred_score, gt_mask, gt_label,
gt_area, gt_crowded) in enumerate(six.moves.zip(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, gt_areas, gt_crowdeds)):
size = pred_mask.shape[1:]
if gt_area is None:
gt_area = itertools.repeat(None)
if gt_crowded is None:
gt_crowded = itertools.repeat(None)
# Starting ids from 1 is important when using COCO.
img_id = i + 1
for pred_msk, pred_lb, pred_sc in zip(
pred_mask, pred_label, pred_score):
pred_annos.append(
_create_anno(pred_msk, pred_lb, pred_sc,
img_id=img_id, anno_id=len(pred_annos) + 1,
crw=0, ar=None))
existent_labels[pred_lb] = True
for gt_msk, gt_lb, gt_ar, gt_crw in zip(
gt_mask, gt_label, gt_area, gt_crowded):
gt_annos.append(
_create_anno(gt_msk, gt_lb, None,
img_id=img_id, anno_id=len(gt_annos) + 1,
ar=gt_ar, crw=gt_crw))
existent_labels[gt_lb] = True
images.append({'id': img_id, 'height': size[0], 'width': size[1]})
existent_labels = sorted(existent_labels.keys())
pred_coco.dataset['categories'] = [{'id': i} for i in existent_labels]
gt_coco.dataset['categories'] = [{'id': i} for i in existent_labels]
pred_coco.dataset['annotations'] = pred_annos
gt_coco.dataset['annotations'] = gt_annos
pred_coco.dataset['images'] = images
gt_coco.dataset['images'] = images
with _redirect_stdout(open(os.devnull, 'w')):
pred_coco.createIndex()
gt_coco.createIndex()
coco_eval = pycocotools.cocoeval.COCOeval(gt_coco, pred_coco, 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
results = {'coco_eval': coco_eval}
p = coco_eval.params
common_kwargs = {
'prec': coco_eval.eval['precision'],
'rec': coco_eval.eval['recall'],
'iou_threshs': p.iouThrs,
'area_ranges': p.areaRngLbl,
'max_detection_list': p.maxDets}
all_kwargs = {
'ap/iou=0.50:0.95/area=all/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 100},
'ap/iou=0.50/area=all/max_dets=100': {
'ap': True, 'iou_thresh': 0.5, 'area_range': 'all',
'max_detection': 100},
'ap/iou=0.75/area=all/max_dets=100': {
'ap': True, 'iou_thresh': 0.75, 'area_range': 'all',
'max_detection': 100},
'ar/iou=0.50:0.95/area=all/max_dets=1': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 1},
'ar/iou=0.50:0.95/area=all/max_dets=10': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 10},
'ar/iou=0.50:0.95/area=all/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 100},
}
if compute_area_dependent_metrics:
all_kwargs.update({
'ap/iou=0.50:0.95/area=small/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'small',
'max_detection': 100},
'ap/iou=0.50:0.95/area=medium/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'medium',
'max_detection': 100},
'ap/iou=0.50:0.95/area=large/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'large',
'max_detection': 100},
'ar/iou=0.50:0.95/area=small/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'small',
'max_detection': 100},
'ar/iou=0.50:0.95/area=medium/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'medium',
'max_detection': 100},
'ar/iou=0.50:0.95/area=large/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'large',
'max_detection': 100},
})
for key, kwargs in all_kwargs.items():
kwargs.update(common_kwargs)
metrics, mean_metric = _summarize(**kwargs)
# pycocotools ignores classes that are not included in
# either gt or prediction, but lies between 0 and
# the maximum label id.
# We set values for these classes to np.nan.
results[key] = np.nan * np.ones(np.max(existent_labels) + 1)
results[key][existent_labels] = metrics
results['m' + key] = mean_metric
results['existent_labels'] = existent_labels
return results
def _create_anno(msk, lb, sc, img_id, anno_id, ar=None, crw=None):
H, W = msk.shape
if crw is None:
crw = False
msk = np.asfortranarray(msk.astype(np.uint8))
rle = mask_tools.encode(msk)
if ar is None:
# We compute dummy area to pass to pycocotools.
# Note that area dependent scores are ignored afterwards.
ar = mask_tools.area(rle)
if crw is None:
crw = False
# Rounding is done to make the result consistent with COCO.
anno = {
'image_id': img_id, 'category_id': lb,
'segmentation': rle,
'area': ar,
'id': anno_id,
'iscrowd': crw}
if sc is not None:
anno.update({'score': sc})
return anno
| mit | 2b9d6c75e41e79e57e5ca1bc798f6013 | 41.939394 | 77 | 0.555085 | 3.105186 | false | false | false | false |
chainer/chainercv | chainercv/extensions/evaluator/semantic_segmentation_evaluator.py | 3 | 4741 | import copy
import numpy as np
from chainer import reporter
import chainer.training.extensions
from chainercv.evaluations import eval_semantic_segmentation
from chainercv.utils import apply_to_iterator
class SemanticSegmentationEvaluator(chainer.training.extensions.Evaluator):
"""An extension that evaluates a semantic segmentation model.
This extension iterates over an iterator and evaluates the prediction
results of the model by common evaluation metrics for semantic
segmentation.
This extension reports values with keys below.
Please note that :obj:`'iou/<label_names[l]>'` and
:obj:`'class_accuracy/<label_names[l]>'` are reported only if
:obj:`label_names` is specified.
* :obj:`'miou'`: Mean of IoUs (mIoU).
* :obj:`'iou/<label_names[l]>'`: IoU for class \
:obj:`label_names[l]`, where :math:`l` is the index of the class. \
For example, if :obj:`label_names` is \
:obj:`~chainercv.datasets.camvid_label_names`, \
this evaluator reports :obj:`'iou/Sky'`, \
:obj:`'ap/Building'`, etc.
* :obj:`'mean_class_accuracy'`: Mean of class accuracies.
* :obj:`'class_accuracy/<label_names[l]>'`: Class accuracy for class \
:obj:`label_names[l]`, where :math:`l` is the index of the class.
* :obj:`'pixel_accuracy'`: Pixel accuracy.
If there is no label assigned to class :obj:`label_names[l]`
in the ground truth, values corresponding to keys
:obj:`'iou/<label_names[l]>'` and :obj:`'class_accuracy/<label_names[l]>'`
are :obj:`numpy.nan`.
In that case, the means of them are calculated by excluding them from
calculation.
For details on the evaluation metrics, please see the documentation
for :func:`chainercv.evaluations.eval_semantic_segmentation`.
.. seealso::
:func:`chainercv.evaluations.eval_semantic_segmentation`.
Args:
iterator (chainer.Iterator): An iterator. Each sample should be
following tuple :obj:`img, label`.
:obj:`img` is an image, :obj:`label` is pixel-wise label.
target (chainer.Link): A semantic segmentation link. This link should
have :meth:`predict` method that takes a list of images and
returns :obj:`labels`.
label_names (iterable of strings): An iterable of names of classes.
If this value is specified, IoU and class accuracy for each class
are also reported with the keys
:obj:`'iou/<label_names[l]>'` and
:obj:`'class_accuracy/<label_names[l]>'`.
comm (~chainermn.communicators.CommunicatorBase):
A ChainerMN communicator.
If it is specified, this extension scatters the iterator of
root worker and gathers the results to the root worker.
"""
trigger = 1, 'epoch'
default_name = 'validation'
priority = chainer.training.PRIORITY_WRITER
def __init__(self, iterator, target, label_names=None, comm=None):
if iterator is None:
iterator = {}
super(SemanticSegmentationEvaluator, self).__init__(
iterator, target)
self.label_names = label_names
self.comm = comm
def evaluate(self):
target = self._targets['main']
if self.comm is not None and self.comm.rank != 0:
apply_to_iterator(target.predict, None, comm=self.comm)
return {}
iterator = self._iterators['main']
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
in_values, out_values, rest_values = apply_to_iterator(
target.predict, it, comm=self.comm)
# delete unused iterators explicitly
del in_values
pred_labels, = out_values
gt_labels, = rest_values
result = eval_semantic_segmentation(pred_labels, gt_labels)
report = {'miou': result['miou'],
'pixel_accuracy': result['pixel_accuracy'],
'mean_class_accuracy': result['mean_class_accuracy']}
if self.label_names is not None:
for l, label_name in enumerate(self.label_names):
try:
report['iou/{:s}'.format(label_name)] = result['iou'][l]
report['class_accuracy/{:s}'.format(label_name)] =\
result['class_accuracy'][l]
except IndexError:
report['iou/{:s}'.format(label_name)] = np.nan
report['class_accuracy/{:s}'.format(label_name)] = np.nan
observation = {}
with reporter.report_scope(observation):
reporter.report(report, target)
return observation
| mit | f37209ca1ec08979b6c64ad6a969ef80 | 38.840336 | 78 | 0.61949 | 4.031463 | false | false | false | false |
chainer/chainercv | chainercv/links/connection/conv_2d_bn_activ.py | 3 | 4690 | import chainer
from chainer.functions import relu
from chainer.links import BatchNormalization
from chainer.links import Convolution2D
try:
from chainermn.links import MultiNodeBatchNormalization
except ImportError:
pass
class Conv2DBNActiv(chainer.Chain):
"""Convolution2D --> Batch Normalization --> Activation
This is a chain that sequentially applies a two-dimensional convolution,
a batch normalization and an activation.
The arguments are the same as that of
:class:`chainer.links.Convolution2D`
except for :obj:`activ` and :obj:`bn_kwargs`.
:obj:`bn_kwargs` can include :obj:`comm` key and a communicator of
ChainerMN as the value to use
:class:`chainermn.links.MultiNodeBatchNormalization`. If
:obj:`comm` is not included in :obj:`bn_kwargs`,
:class:`chainer.links.BatchNormalization` link from Chainer is used.
Note that the default value for the :obj:`nobias`
is changed to :obj:`True`.
Example:
There are several ways to initialize a :class:`Conv2DBNActiv`.
1. Give the first three arguments explicitly:
>>> l = Conv2DBNActiv(5, 10, 3)
2. Omit :obj:`in_channels` or fill it with :obj:`None`:
In these ways, attributes are initialized at runtime based on
the channel size of the input.
>>> l = Conv2DBNActiv(10, 3)
>>> l = Conv2DBNActiv(None, 10, 3)
Args:
in_channels (int or None): Number of channels of input arrays.
If :obj:`None`, parameter initialization will be deferred until the
first forward data pass at which time the size will be determined.
out_channels (int): Number of channels of output arrays.
ksize (int or tuple of ints): Size of filters (a.k.a. kernels).
:obj:`ksize=k` and :obj:`ksize=(k, k)` are equivalent.
stride (int or tuple of ints): Stride of filter applications.
:obj:`stride=s` and :obj:`stride=(s, s)` are equivalent.
pad (int or tuple of ints): Spatial padding width for input arrays.
:obj:`pad=p` and :obj:`pad=(p, p)` are equivalent.
dilate (int or tuple of ints): Dilation factor of filter applications.
:obj:`dilate=d` and :obj:`dilate=(d, d)` are equivalent.
groups (int): The number of groups to use grouped convolution. The
default is one, where grouped convolution is not used.
nobias (bool): If :obj:`True`,
then this link does not use the bias term.
initialW (callable): Initial weight value. If :obj:`None`, the default
initializer is used.
May also be a callable that takes :obj:`numpy.ndarray` or
:obj:`cupy.ndarray` and edits its value.
initial_bias (callable): Initial bias value. If :obj:`None`, the bias
is set to 0.
May also be a callable that takes :obj:`numpy.ndarray` or
:obj:`cupy.ndarray` and edits its value.
activ (callable): An activation function. The default value is
:func:`chainer.functions.relu`. If this is :obj:`None`,
no activation is applied (i.e. the activation is the identity
function).
bn_kwargs (dict): Keyword arguments passed to initialize
:class:`chainer.links.BatchNormalization`. If a ChainerMN
communicator (:class:`~chainermn.communicators.CommunicatorBase`)
is given with the key :obj:`comm`,
:obj:`~chainermn.links.MultiNodeBatchNormalization` will be used
for the batch normalization. Otherwise,
:obj:`~chainer.links.BatchNormalization` will be used.
"""
def __init__(self, in_channels, out_channels, ksize=None,
stride=1, pad=0, dilate=1, groups=1, nobias=True,
initialW=None, initial_bias=None, activ=relu, bn_kwargs={}):
if ksize is None:
out_channels, ksize, in_channels = in_channels, out_channels, None
self.activ = activ
super(Conv2DBNActiv, self).__init__()
with self.init_scope():
self.conv = Convolution2D(
in_channels, out_channels, ksize, stride, pad,
nobias, initialW, initial_bias, dilate=dilate, groups=groups)
if 'comm' in bn_kwargs:
self.bn = MultiNodeBatchNormalization(
out_channels, **bn_kwargs)
else:
self.bn = BatchNormalization(out_channels, **bn_kwargs)
def forward(self, x):
h = self.conv(x)
h = self.bn(h)
if self.activ is None:
return h
else:
return self.activ(h)
| mit | 75e9ee9b72280cd045269ea1a6e9ae1f | 42.425926 | 79 | 0.625373 | 4.074718 | false | false | false | false |
chainer/chainercv | examples/classification/eval_imagenet.py | 2 | 4065 | import argparse
import numpy as np
import chainer
import chainer.functions as F
from chainer import iterators
from chainercv.datasets import directory_parsing_label_names
from chainercv.datasets import DirectoryParsingLabelDataset
from chainercv.links import FeaturePredictor
from chainercv.links import MobileNetV2
from chainercv.links import ResNet101
from chainercv.links import ResNet152
from chainercv.links import ResNet50
from chainercv.links import SEResNet101
from chainercv.links import SEResNet152
from chainercv.links import SEResNet50
from chainercv.links import SEResNeXt101
from chainercv.links import SEResNeXt50
from chainercv.links import VGG16
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
models = {
# model: (class, dataset -> pretrained_model, default batchsize,
# crop, resnet_arch)
'vgg16': (VGG16, {}, 32, 'center', None),
'resnet50': (ResNet50, {}, 32, 'center', 'fb'),
'resnet101': (ResNet101, {}, 32, 'center', 'fb'),
'resnet152': (ResNet152, {}, 32, 'center', 'fb'),
'se-resnet50': (SEResNet50, {}, 32, 'center', None),
'se-resnet101': (SEResNet101, {}, 32, 'center', None),
'se-resnet152': (SEResNet152, {}, 32, 'center', None),
'se-resnext50': (SEResNeXt50, {}, 32, 'center', None),
'se-resnext101': (SEResNeXt101, {}, 32, 'center', None),
'mobilenet_v2_1.0': (MobileNetV2, {}, 32, 'center', None),
'mobilenet_v2_1.4': (MobileNetV2, {}, 32, 'center', None)
}
def setup(dataset, model, pretrained_model, batchsize, val, crop, resnet_arch):
dataset_name = dataset
if dataset_name == 'imagenet':
dataset = DirectoryParsingLabelDataset(val)
label_names = directory_parsing_label_names(val)
def eval_(out_values, rest_values):
pred_probs, = out_values
gt_labels, = rest_values
accuracy = F.accuracy(
np.array(list(pred_probs)), np.array(list(gt_labels))).data
print()
print('Top 1 Error {}'.format(1. - accuracy))
cls, pretrained_models, default_batchsize = models[model][:3]
if pretrained_model is None:
pretrained_model = pretrained_models.get(dataset_name, dataset_name)
if crop is None:
crop = models[model][3]
kwargs = {
'n_class': len(label_names),
'pretrained_model': pretrained_model,
}
if model in ['resnet50', 'resnet101', 'resnet152']:
if resnet_arch is None:
resnet_arch = models[model][4]
kwargs.update({'arch': resnet_arch})
extractor = cls(**kwargs)
model = FeaturePredictor(
extractor, crop_size=224, scale_size=256, crop=crop)
if batchsize is None:
batchsize = default_batchsize
return dataset, eval_, model, batchsize
def main():
parser = argparse.ArgumentParser(
description='Evaluating convnet from ILSVRC2012 dataset')
parser.add_argument('val', help='Path to root of the validation dataset')
parser.add_argument('--model', choices=sorted(models.keys()))
parser.add_argument('--pretrained-model')
parser.add_argument('--dataset', choices=('imagenet',))
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int)
parser.add_argument('--crop', choices=('center', '10'))
parser.add_argument('--resnet-arch')
args = parser.parse_args()
dataset, eval_, model, batchsize = setup(
args.dataset, args.model, args.pretrained_model, args.batchsize,
args.val, args.crop, args.resnet_arch)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
iterator = iterators.MultiprocessIterator(
dataset, batchsize, repeat=False, shuffle=False,
n_processes=6, shared_mem=300000000)
print('Model has been prepared. Evaluation starts.')
in_values, out_values, rest_values = apply_to_iterator(
model.predict, iterator, hook=ProgressHook(len(dataset)))
del in_values
eval_(out_values, rest_values)
if __name__ == '__main__':
main()
| mit | b1ae437c6249fe810d36df83a71d6003 | 34.043103 | 79 | 0.666913 | 3.50431 | false | false | false | false |
sixty-north/cosmic-ray | tests/resources/example_project/adam/adam_2.py | 1 | 1630 | """adam.adam_2
"""
# pylint: disable=C0111
import ctypes
import functools
import operator
def trigger_infinite_loop():
result = None
# When `break` becomes `continue`, this should enter an infinite loop. This
# helps us test timeouts.
# Any object which isn't None passes the truth value testing so here
# we use `while object()` instead of `while True` b/c the later becomes
# `while False` when ReplaceTrueFalse is applied and we don't trigger an
# infinite loop.
while object():
result = object()
break
# when `while object()` becomes `while not object()`
# the code below will be triggered
return result
def single_iteration():
result = None
iterable = [object()]
for i in iterable: # pylint: disable=W0612
result = True
return result
def handle_exception():
result = None
try:
raise IOError
except IOError:
result = True
return result
def decorator(func):
func.cosmic_ray = True
return func
@decorator
def decorated_func():
result = None
if decorated_func.cosmic_ray:
result = True
return result
def use_ctypes(size):
array_type = ctypes.c_char * size
chars_a = array_type(*(b"a" * size))
chars_b = array_type(*(b"b" * size))
# This odd construct ensures that, under number mutation to increase number
# values, `size` varies by amounts big enough to trigger a segfault on the
# subsequent memmove.
size = functools.reduce(operator.mul, [10, 10, 10, 10, 10, 10])
ctypes.memmove(chars_a, chars_b, size)
return chars_a.value
| mit | e5c44ab41662333b2e49384f7aee069e | 21.328767 | 79 | 0.648466 | 3.890215 | false | false | false | false |
spesmilo/electrum | electrum/gui/qml/qeinvoice.py | 1 | 21812 | import threading
import asyncio
from urllib.parse import urlparse
from PyQt5.QtCore import pyqtProperty, pyqtSignal, pyqtSlot, QObject, Q_ENUMS
from electrum import bitcoin
from electrum import lnutil
from electrum.i18n import _
from electrum.invoices import Invoice
from electrum.invoices import (PR_UNPAID, PR_EXPIRED, PR_UNKNOWN, PR_PAID, PR_INFLIGHT,
PR_FAILED, PR_ROUTING, PR_UNCONFIRMED)
from electrum.lnaddr import LnInvoiceException
from electrum.logging import get_logger
from electrum.transaction import PartialTxOutput
from electrum.util import (parse_URI, InvalidBitcoinURI, InvoiceError,
maybe_extract_lightning_payment_identifier)
from electrum.lnurl import decode_lnurl, request_lnurl, callback_lnurl
from electrum.bitcoin import COIN
from .qetypes import QEAmount
from .qewallet import QEWallet
class QEInvoice(QObject):
class Type:
Invalid = -1
OnchainInvoice = 0
LightningInvoice = 1
LightningAndOnchainInvoice = 2
LNURLPayRequest = 3
class Status:
Unpaid = PR_UNPAID
Expired = PR_EXPIRED
Unknown = PR_UNKNOWN
Paid = PR_PAID
Inflight = PR_INFLIGHT
Failed = PR_FAILED
Routing = PR_ROUTING
Unconfirmed = PR_UNCONFIRMED
Q_ENUMS(Type)
Q_ENUMS(Status)
_logger = get_logger(__name__)
_wallet = None
_canSave = False
_canPay = False
_key = None
def __init__(self, parent=None):
super().__init__(parent)
walletChanged = pyqtSignal()
@pyqtProperty(QEWallet, notify=walletChanged)
def wallet(self):
return self._wallet
@wallet.setter
def wallet(self, wallet: QEWallet):
if self._wallet != wallet:
self._wallet = wallet
self.walletChanged.emit()
canSaveChanged = pyqtSignal()
@pyqtProperty(bool, notify=canSaveChanged)
def canSave(self):
return self._canSave
@canSave.setter
def canSave(self, canSave):
if self._canSave != canSave:
self._canSave = canSave
self.canSaveChanged.emit()
canPayChanged = pyqtSignal()
@pyqtProperty(bool, notify=canPayChanged)
def canPay(self):
return self._canPay
@canPay.setter
def canPay(self, canPay):
if self._canPay != canPay:
self._canPay = canPay
self.canPayChanged.emit()
keyChanged = pyqtSignal()
@pyqtProperty(str, notify=keyChanged)
def key(self):
return self._key
@key.setter
def key(self, key):
if self._key != key:
self._key = key
self.keyChanged.emit()
userinfoChanged = pyqtSignal()
@pyqtProperty(str, notify=userinfoChanged)
def userinfo(self):
return self._userinfo
@userinfo.setter
def userinfo(self, userinfo):
if self._userinfo != userinfo:
self._userinfo = userinfo
self.userinfoChanged.emit()
def get_max_spendable_onchain(self):
spendable = self._wallet.confirmedBalance.satsInt
if not self._wallet.wallet.config.get('confirmed_only', False):
spendable += self._wallet.unconfirmedBalance.satsInt
return spendable
def get_max_spendable_lightning(self):
return self._wallet.wallet.lnworker.num_sats_can_send()
class QEInvoiceParser(QEInvoice):
_logger = get_logger(__name__)
_invoiceType = QEInvoice.Type.Invalid
_recipient = ''
_effectiveInvoice = None
_amount = QEAmount()
_userinfo = ''
invoiceChanged = pyqtSignal()
invoiceSaved = pyqtSignal([str], arguments=['key'])
validationSuccess = pyqtSignal()
validationWarning = pyqtSignal([str,str], arguments=['code', 'message'])
validationError = pyqtSignal([str,str], arguments=['code', 'message'])
invoiceCreateError = pyqtSignal([str,str], arguments=['code', 'message'])
lnurlRetrieved = pyqtSignal()
lnurlError = pyqtSignal([str,str], arguments=['code', 'message'])
def __init__(self, parent=None):
super().__init__(parent)
self.clear()
@pyqtProperty(int, notify=invoiceChanged)
def invoiceType(self):
return self._invoiceType
# not a qt setter, don't let outside set state
def setInvoiceType(self, invoiceType: QEInvoice.Type):
self._invoiceType = invoiceType
recipientChanged = pyqtSignal()
@pyqtProperty(str, notify=recipientChanged)
def recipient(self):
return self._recipient
@recipient.setter
def recipient(self, recipient: str):
#if self._recipient != recipient:
self.canPay = False
self._recipient = recipient
self._lnurlData = None
if recipient:
self.validateRecipient(recipient)
self.recipientChanged.emit()
@pyqtProperty('QVariantMap', notify=lnurlRetrieved)
def lnurlData(self):
return self._lnurlData
@pyqtProperty(str, notify=invoiceChanged)
def message(self):
return self._effectiveInvoice.message if self._effectiveInvoice else ''
@pyqtProperty(QEAmount, notify=invoiceChanged)
def amount(self):
# store ref to QEAmount on instance, otherwise we get destroyed when going out of scope
self._amount = QEAmount()
if not self._effectiveInvoice:
return self._amount
self._amount = QEAmount(from_invoice=self._effectiveInvoice)
return self._amount
@amount.setter
def amount(self, new_amount):
self._logger.debug(f'set new amount {repr(new_amount)}')
if self._effectiveInvoice:
self._effectiveInvoice.amount_msat = '!' if new_amount.isMax else int(new_amount.satsInt * 1000)
self.determine_can_pay()
self.invoiceChanged.emit()
@pyqtProperty('quint64', notify=invoiceChanged)
def expiration(self):
return self._effectiveInvoice.exp if self._effectiveInvoice else 0
@pyqtProperty('quint64', notify=invoiceChanged)
def time(self):
return self._effectiveInvoice.time if self._effectiveInvoice else 0
statusChanged = pyqtSignal()
@pyqtProperty(int, notify=statusChanged)
def status(self):
if not self._effectiveInvoice:
return PR_UNKNOWN
return self._wallet.wallet.get_invoice_status(self._effectiveInvoice)
@pyqtProperty(str, notify=statusChanged)
def status_str(self):
if not self._effectiveInvoice:
return ''
status = self._wallet.wallet.get_invoice_status(self._effectiveInvoice)
return self._effectiveInvoice.get_status_str(status)
# single address only, TODO: n outputs
@pyqtProperty(str, notify=invoiceChanged)
def address(self):
return self._effectiveInvoice.get_address() if self._effectiveInvoice else ''
@pyqtProperty('QVariantMap', notify=invoiceChanged)
def lnprops(self):
if not self.invoiceType == QEInvoice.Type.LightningInvoice:
return {}
lnaddr = self._effectiveInvoice._lnaddr
self._logger.debug(str(lnaddr))
self._logger.debug(str(lnaddr.get_routing_info('t')))
return {
'pubkey': lnaddr.pubkey.serialize().hex(),
'payment_hash': lnaddr.paymenthash.hex(),
't': '', #lnaddr.get_routing_info('t')[0][0].hex(),
'r': '' #lnaddr.get_routing_info('r')[0][0][0].hex()
}
@pyqtSlot()
def clear(self):
self.recipient = ''
self.setInvoiceType(QEInvoice.Type.Invalid)
self._bip21 = None
self._lnurlData = None
self.canSave = False
self.canPay = False
self.userinfo = ''
self.invoiceChanged.emit()
# don't parse the recipient string, but init qeinvoice from an invoice key
# this should not emit validation signals
@pyqtSlot(str)
def initFromKey(self, key):
self.clear()
invoice = self._wallet.wallet.get_invoice(key)
self._logger.debug(repr(invoice))
if invoice:
self.set_effective_invoice(invoice)
self.key = key
def set_effective_invoice(self, invoice: Invoice):
self._effectiveInvoice = invoice
if invoice.is_lightning():
self.setInvoiceType(QEInvoice.Type.LightningInvoice)
else:
self.setInvoiceType(QEInvoice.Type.OnchainInvoice)
self.canSave = True
self.determine_can_pay()
self.invoiceChanged.emit()
self.statusChanged.emit()
def determine_can_pay(self):
self.canPay = False
self.userinfo = ''
if self.amount.isEmpty: # unspecified amount
return
if self.invoiceType == QEInvoice.Type.LightningInvoice:
if self.status in [PR_UNPAID, PR_FAILED]:
if self.get_max_spendable_lightning() >= self.amount.satsInt:
lnaddr = self._effectiveInvoice._lnaddr
if lnaddr.amount and self.amount.satsInt < lnaddr.amount * COIN:
self.userinfo = _('Cannot pay less than the amount specified in the invoice')
else:
self.canPay = True
else:
self.userinfo = _('Insufficient balance')
else:
self.userinfo = {
PR_EXPIRED: _('Invoice is expired'),
PR_PAID: _('Invoice is already paid'),
PR_INFLIGHT: _('Invoice is already being paid'),
PR_ROUTING: _('Invoice is already being paid'),
PR_UNKNOWN: _('Invoice has unknown status'),
}[self.status]
elif self.invoiceType == QEInvoice.Type.OnchainInvoice:
if self.status in [PR_UNPAID, PR_FAILED]:
if self.amount.isMax and self.get_max_spendable_onchain() > 0:
# TODO: dust limit?
self.canPay = True
elif self.get_max_spendable_onchain() >= self.amount.satsInt:
# TODO: dust limit?
self.canPay = True
else:
self.userinfo = _('Insufficient balance')
else:
self.userinfo = {
PR_EXPIRED: _('Invoice is expired'),
PR_PAID: _('Invoice is already paid'),
PR_UNCONFIRMED: _('Invoice is already paid'),
PR_UNKNOWN: _('Invoice has unknown status'),
}[self.status]
def setValidOnchainInvoice(self, invoice: Invoice):
self._logger.debug('setValidOnchainInvoice')
if invoice.is_lightning():
raise Exception('unexpected LN invoice')
self.set_effective_invoice(invoice)
def setValidLightningInvoice(self, invoice: Invoice):
self._logger.debug('setValidLightningInvoice')
if not invoice.is_lightning():
raise Exception('unexpected Onchain invoice')
self.set_effective_invoice(invoice)
def setValidLNURLPayRequest(self):
self._logger.debug('setValidLNURLPayRequest')
self.setInvoiceType(QEInvoice.Type.LNURLPayRequest)
self._effectiveInvoice = None
self.invoiceChanged.emit()
def create_onchain_invoice(self, outputs, message, payment_request, uri):
return self._wallet.wallet.create_invoice(
outputs=outputs,
message=message,
pr=payment_request,
URI=uri
)
def validateRecipient(self, recipient):
if not recipient:
self.setInvoiceType(QEInvoice.Type.Invalid)
return
maybe_lightning_invoice = recipient
def _payment_request_resolved(request):
self._logger.debug('resolved payment request')
outputs = request.get_outputs()
invoice = self.create_onchain_invoice(outputs, None, request, None)
self.setValidOnchainInvoice(invoice)
try:
self._bip21 = parse_URI(recipient, _payment_request_resolved)
if self._bip21:
if 'r' in self._bip21 or ('name' in self._bip21 and 'sig' in self._bip21): # TODO set flag in util?
# let callback handle state
return
if ':' not in recipient:
# address only
# create bare invoice
outputs = [PartialTxOutput.from_address_and_value(self._bip21['address'], 0)]
invoice = self.create_onchain_invoice(outputs, None, None, None)
self._logger.debug(repr(invoice))
self.setValidOnchainInvoice(invoice)
self.validationSuccess.emit()
return
else:
# fallback lightning invoice?
if 'lightning' in self._bip21:
maybe_lightning_invoice = self._bip21['lightning']
except InvalidBitcoinURI as e:
self._bip21 = None
self._logger.debug(repr(e))
lninvoice = None
maybe_lightning_invoice = maybe_extract_lightning_payment_identifier(maybe_lightning_invoice)
if maybe_lightning_invoice is not None:
if maybe_lightning_invoice.startswith('lnurl'):
self.resolve_lnurl(maybe_lightning_invoice)
return
try:
lninvoice = Invoice.from_bech32(maybe_lightning_invoice)
except InvoiceError as e:
e2 = e.__cause__
if isinstance(e2, LnInvoiceException):
self.validationError.emit('unknown', _("Error parsing Lightning invoice") + f":\n{e2}")
self.clear()
return
if isinstance(e2, lnutil.IncompatibleOrInsaneFeatures):
self.validationError.emit('unknown', _("Invoice requires unknown or incompatible Lightning feature") + f":\n{e2!r}")
self.clear()
return
self._logger.exception(repr(e))
if not lninvoice and not self._bip21:
self.validationError.emit('unknown',_('Unknown invoice'))
self.clear()
return
if lninvoice:
if not self._wallet.wallet.has_lightning():
if not self._bip21:
# TODO: lightning onchain fallback in ln invoice
#self.validationError.emit('no_lightning',_('Detected valid Lightning invoice, but Lightning not enabled for wallet'))
self.setValidLightningInvoice(lninvoice)
self.validationSuccess.emit()
# self.clear()
return
else:
self._logger.debug('flow with LN but not LN enabled AND having bip21 uri')
self.setValidOnchainInvoice(self._bip21['address'])
else:
self.setValidLightningInvoice(lninvoice)
if not self._wallet.wallet.lnworker.channels:
self.validationWarning.emit('no_channels',_('Detected valid Lightning invoice, but there are no open channels'))
else:
self.validationSuccess.emit()
else:
self._logger.debug('flow without LN but having bip21 uri')
if 'amount' not in self._bip21:
amount = 0
else:
amount = self._bip21['amount']
outputs = [PartialTxOutput.from_address_and_value(self._bip21['address'], amount)]
self._logger.debug(outputs)
message = self._bip21['message'] if 'message' in self._bip21 else ''
invoice = self.create_onchain_invoice(outputs, message, None, self._bip21)
self._logger.debug(repr(invoice))
self.setValidOnchainInvoice(invoice)
self.validationSuccess.emit()
def resolve_lnurl(self, lnurl):
self._logger.debug('resolve_lnurl')
url = decode_lnurl(lnurl)
self._logger.debug(f'{repr(url)}')
def resolve_task():
try:
coro = request_lnurl(url)
fut = asyncio.run_coroutine_threadsafe(coro, self._wallet.wallet.network.asyncio_loop)
self.on_lnurl(fut.result())
except Exception as e:
self.validationError.emit('lnurl', repr(e))
threading.Thread(target=resolve_task).start()
def on_lnurl(self, lnurldata):
self._logger.debug('on_lnurl')
self._logger.debug(f'{repr(lnurldata)}')
self._lnurlData = {
'domain': urlparse(lnurldata.callback_url).netloc,
'callback_url' : lnurldata.callback_url,
'min_sendable_sat': lnurldata.min_sendable_sat,
'max_sendable_sat': lnurldata.max_sendable_sat,
'metadata_plaintext': lnurldata.metadata_plaintext,
'comment_allowed': lnurldata.comment_allowed
}
self.setValidLNURLPayRequest()
self.lnurlRetrieved.emit()
@pyqtSlot('quint64')
@pyqtSlot('quint64', str)
def lnurlGetInvoice(self, amount, comment=None):
assert self._lnurlData
if self._lnurlData['comment_allowed'] == 0:
comment = None
self._logger.debug(f'fetching callback url {self._lnurlData["callback_url"]}')
def fetch_invoice_task():
try:
params = { 'amount': amount * 1000 }
if comment:
params['comment'] = comment
coro = callback_lnurl(self._lnurlData['callback_url'], params)
fut = asyncio.run_coroutine_threadsafe(coro, self._wallet.wallet.network.asyncio_loop)
self.on_lnurl_invoice(fut.result())
except Exception as e:
self.lnurlError.emit('lnurl', repr(e))
threading.Thread(target=fetch_invoice_task).start()
def on_lnurl_invoice(self, invoice):
self._logger.debug('on_lnurl_invoice')
self._logger.debug(f'{repr(invoice)}')
invoice = invoice['pr']
self.recipient = invoice
@pyqtSlot()
def save_invoice(self):
self.canSave = False
if not self._effectiveInvoice:
return
self.key = self._effectiveInvoice.get_id()
if self._wallet.wallet.get_invoice(self.key):
self._logger.info(f'invoice {self.key} already exists')
else:
self._wallet.wallet.save_invoice(self._effectiveInvoice)
self._wallet.invoiceModel.addInvoice(self.key)
self.invoiceSaved.emit(self.key)
class QEUserEnteredPayment(QEInvoice):
_logger = get_logger(__name__)
_recipient = None
_message = None
_amount = QEAmount()
validationError = pyqtSignal([str,str], arguments=['code','message'])
invoiceCreateError = pyqtSignal([str,str], arguments=['code', 'message'])
invoiceSaved = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.clear()
recipientChanged = pyqtSignal()
@pyqtProperty(str, notify=recipientChanged)
def recipient(self):
return self._recipient
@recipient.setter
def recipient(self, recipient: str):
if self._recipient != recipient:
self._recipient = recipient
self.validate()
self.recipientChanged.emit()
messageChanged = pyqtSignal()
@pyqtProperty(str, notify=messageChanged)
def message(self):
return self._message
@message.setter
def message(self, message):
if self._message != message:
self._message = message
self.messageChanged.emit()
amountChanged = pyqtSignal()
@pyqtProperty(QEAmount, notify=amountChanged)
def amount(self):
return self._amount
@amount.setter
def amount(self, amount):
if self._amount != amount:
self._amount = amount
self.validate()
self.amountChanged.emit()
def validate(self):
self.canPay = False
self.canSave = False
self._logger.debug('validate')
if not self._recipient:
self.validationError.emit('recipient', _('Recipient not specified.'))
return
if not bitcoin.is_address(self._recipient):
self.validationError.emit('recipient', _('Invalid Bitcoin address'))
return
if self._amount.isEmpty:
self.validationError.emit('amount', _('Invalid amount'))
return
if self._amount.isMax:
self.canPay = True
else:
self.canSave = True
if self.get_max_spendable_onchain() >= self._amount.satsInt:
self.canPay = True
@pyqtSlot()
def save_invoice(self):
assert self.canSave
assert not self._amount.isMax
self._logger.debug('saving invoice to %s, amount=%s, message=%s' % (self._recipient, repr(self._amount), self._message))
inv_amt = self._amount.satsInt
try:
outputs = [PartialTxOutput.from_address_and_value(self._recipient, inv_amt)]
self._logger.debug(repr(outputs))
invoice = self._wallet.wallet.create_invoice(outputs=outputs, message=self._message, pr=None, URI=None)
except InvoiceError as e:
self.invoiceCreateError.emit('fatal', _('Error creating payment') + ':\n' + str(e))
return
self.key = invoice.get_id()
self._wallet.wallet.save_invoice(invoice)
self.invoiceSaved.emit()
@pyqtSlot()
def clear(self):
self._recipient = None
self._amount = QEAmount()
self._message = None
self.canSave = False
self.canPay = False
| mit | 875ed951a5fbff2cacffe6baa538841d | 34.757377 | 138 | 0.59669 | 4.177744 | false | false | false | false |
spesmilo/electrum | electrum/gui/kivy/main_window.py | 1 | 60400 | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from typing import TYPE_CHECKING, Optional, Union, Callable, Sequence
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet, InternalAddressCorruption, Abstract_Wallet
from electrum.plugin import run_hook
from electrum import util
from electrum.util import (profiler, InvalidPassword, send_exception_to_crash_reporter,
format_satoshis, format_satoshis_plain, format_fee_satoshis,
parse_max_spend)
from electrum.util import EventListener, event_listener
from electrum.invoices import PR_PAID, PR_FAILED, Invoice
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.interface import PREFERRED_NETWORK_PROTOCOL, ServerAddr
from electrum.logging import Logger
from electrum.bitcoin import COIN
from electrum.gui import messages
from .i18n import _
from .util import get_default_language
from . import KIVY_GUI_PATH
from kivy.app import App
from kivy.core.window import Window
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
from .uix.dialogs.password_dialog import OpenWalletDialog, ChangePasswordDialog, PincodeDialog, PasswordDialog
from .uix.dialogs.choice_dialog import ChoiceDialog
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
from .uix.dialogs.question import Question
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register(
'Roboto',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
KIVY_GUI_PATH + '/data/fonts/Roboto-Bold.ttf',
)
from electrum.util import (NoDynamicFeeEstimates, NotEnoughFunds,
BITCOIN_BIP21_URI_SCHEME, LIGHTNING_URI_SCHEME,
UserFacingException)
from .uix.dialogs.lightning_open_channel import LightningOpenChannelDialog
from .uix.dialogs.lightning_channels import LightningChannelsDialog, SwapDialog
if TYPE_CHECKING:
from . import ElectrumGui
from electrum.simple_config import SimpleConfig
from electrum.plugin import Plugins
from electrum.paymentrequest import PaymentRequest
class ElectrumWindow(App, Logger, EventListener):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
lightning_gossip_num_peers = NumericProperty(0)
lightning_gossip_num_nodes = NumericProperty(0)
lightning_gossip_num_channels = NumericProperty(0)
lightning_gossip_num_queries = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
if not self._init_finished:
return
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def set_auto_connect(self, b: bool):
# This method makes sure we persist x into the config even if self.auto_connect == b.
# Note: on_auto_connect() only gets called if the value of the self.auto_connect property *changes*.
self.electrum_config.set_key('auto_connect', b)
self.auto_connect = b
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
if not self._init_finished:
return
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
protocol = PREFERRED_NETWORK_PROTOCOL
def cb2(server_str):
popup.ids.server_str.text = server_str
servers = self.network.get_servers()
server_choices = {}
for _host, d in sorted(servers.items()):
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
server_choices[server.net_addr_str()] = _host
ChoiceDialog(_('Choose a server'), server_choices, popup.ids.server_str.text, cb2).open()
def maybe_switch_to_server(self, server_str: str):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(server_str)
if not server: raise Exception("failed to parse")
except Exception as e:
self.show_error(_("Invalid server details: {}").format(repr(e)))
return
net_params = net_params._replace(server=server)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def choose_blockchain_dialog(self, dt):
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_gossip = BooleanProperty(False)
def on_use_gossip(self, instance, x):
self.electrum_config.set_key('use_gossip', self.use_gossip, True)
if self.network:
if self.use_gossip:
self.network.start_gossip()
else:
self.network.run_from_another_thread(
self.network.stop_gossip())
enable_debug_logs = BooleanProperty(False)
def on_enable_debug_logs(self, instance, x):
self.electrum_config.set_key('gui_enable_debug_logs', self.enable_debug_logs, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
if self.wallet:
self.wallet.use_change = self.use_change
self.wallet.db.put('use_change', self.use_change)
self.wallet.save_db()
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
use_recoverable_channels = BooleanProperty(True)
def on_use_recoverable_channels(self, instance, x):
self.electrum_config.set_key('use_recoverable_channels', self.use_recoverable_channels, True)
def switch_to_send_screen(func):
# try until send_screen is available
def wrapper(self, *args):
f = lambda dt: (bool(func(self, *args) and False) if self.send_screen else bool(self.switch_to('send') or True)) if self.wallet else True
Clock.schedule_interval(f, 0.1)
return wrapper
@switch_to_send_screen
def set_URI(self, uri):
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
data = str(intent.getDataString())
scheme = str(intent.getScheme()).lower()
if scheme == BITCOIN_BIP21_URI_SCHEME or scheme == LIGHTNING_URI_SCHEME:
self.set_URI(data)
def on_language(self, instance, language):
self.logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
@event_listener
def on_event_on_quotes(self):
self.logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
@event_listener
def on_event_on_history(self):
self.logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
@event_listener
def on_event_fee_histogram(self, *args):
self._trigger_update_history()
@event_listener
def on_event_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.get_request(key)
if req is None:
return
if self.receive_screen:
if status == PR_PAID:
self.receive_screen.update()
else:
self.receive_screen.update_item(key, req)
if self.request_popup and self.request_popup.key == key:
self.request_popup.update_status()
if status == PR_PAID:
self.show_info(_('Payment Received') + '\n' + key)
self._trigger_update_history()
@event_listener
def on_event_invoice_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.get_invoice(key)
if req is None:
return
if self.send_screen:
if status == PR_PAID:
self.send_screen.update()
else:
self.send_screen.update_item(key, req)
if self.invoice_popup and self.invoice_popup.key == key:
self.invoice_popup.update_status()
@event_listener
def on_event_payment_succeeded(self, wallet, key):
if wallet != self.wallet:
return
description = self.wallet.get_label_for_rhash(key)
self.show_info(_('Payment succeeded') + '\n\n' + description)
self._trigger_update_history()
@event_listener
def on_event_payment_failed(self, wallet, key, reason):
if wallet != self.wallet:
return
self.show_info(_('Payment failed') + '\n\n' + reason)
def _get_bu(self):
return self.electrum_config.get_base_unit()
def _set_bu(self, value):
self.electrum_config.set_base_unit(value)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return self.electrum_config.get_decimal_point()
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / COIN
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = COIN * Decimal(fiat_amount) / Decimal(rate)
return format_satoshis_plain(satoshis, decimal_point=self.decimal_point())
def get_amount(self, amount_str: str) -> Optional[int]:
if not amount_str:
return None
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
_init_finished = False
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None # type: Optional[Abstract_Wallet]
self.pause_time = 0
self.asyncio_loop = util.get_asyncio_loop()
self.password = None
self._use_single_password = False
self.resume_dialog = None
self.gui_thread = threading.current_thread()
App.__init__(self)#, **kwargs)
Logger.__init__(self)
self.electrum_config = config = kwargs.get('config', None) # type: SimpleConfig
self.language = config.get('language', get_default_language())
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.server.host
self.server_port = str(net_params.server.port)
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', None) # type: Plugins
self.gui_object = kwargs.get('gui_object', None) # type: ElectrumGui
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_gossip = config.get('use_gossip', False)
self.use_unconfirmed = not config.get('confirmed_only', False)
self.enable_debug_logs = config.get('gui_enable_debug_logs', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._channels_dialog = None
self._addresses_dialog = None
self.set_fee_status()
self.invoice_popup = None
self.request_popup = None
self._init_finished = True
def on_pr(self, pr: 'PaymentRequest'):
Clock.schedule_once(lambda dt, pr=pr: self._on_pr(pr))
def _on_pr(self, pr: 'PaymentRequest'):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
invoice = Invoice.from_bip70_payreq(pr, height=0)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
elif pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data: str):
self.on_data_input(data)
def on_data_input(self, data: str) -> None:
"""on_qr / on_paste shared logic"""
data = data.strip()
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# try to decode as transaction
from electrum.transaction import tx_from_any
try:
tx = tx_from_any(data)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# try to decode as URI/address
self.set_URI(data)
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for name in ['send', 'history', 'receive']:
self.update_tab(name)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, key):
from .uix.dialogs.request_dialog import RequestDialog
self.request_popup = RequestDialog('Request', key)
self.request_popup.open()
def show_invoice(self, key):
from .uix.dialogs.invoice_dialog import InvoiceDialog
invoice = self.wallet.get_invoice(key)
if not invoice:
return
data = invoice.lightning_invoice if invoice.is_lightning() else key
self.invoice_popup = InvoiceDialog('Invoice', data, key)
self.invoice_popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None, help_text=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(
title, data, show_text,
failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard,
help_text=help_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return self.scan_qr_non_android(on_complete)
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def scan_qr_non_android(self, on_complete):
from electrum import qrscanner
try:
video_dev = self.electrum_config.get_video_device()
data = qrscanner.scan_barcode(video_dev)
if data is not None:
on_complete(data)
except UserFacingException as e:
self.show_error(e)
except BaseException as e:
self.logger.exception('camera error')
self.show_error(repr(e))
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file(KIVY_GUI_PATH + '/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def handle_crash_on_startup(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
self.logger.exception('crash on startup')
from .uix.dialogs.crash_reporter import CrashReporter
# show the crash reporter, and when it's closed, shutdown the app
cr = CrashReporter(self, exctype=type(e), value=e, tb=e.__traceback__)
cr.on_dismiss = lambda: self.stop()
Clock.schedule_once(lambda _, cr=cr: cr.open(), 0)
return wrapper
@handle_crash_on_startup
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
self.logger.info('Time to on_start: {} <<<<<<<<'.format(time.process_time()))
Window.bind(size=self.on_size, on_keyboard=self.on_keyboard)
#Window.softinput_mode = 'below_target'
self.on_size(Window, Window.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
self.register_callbacks()
if self.network and self.electrum_config.get('auto_connect') is None:
self.popup_dialog("first_screen")
# load_wallet_on_start will be called later, after initial network setup is completed
else:
# load wallet
self.load_wallet_on_start()
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
@event_listener
def on_event_channel_db(self, num_nodes, num_channels, num_policies):
self.lightning_gossip_num_nodes = num_nodes
self.lightning_gossip_num_channels = num_channels
@event_listener
def on_event_gossip_peers(self, num_peers):
self.lightning_gossip_num_peers = num_peers
@event_listener
def on_event_unknown_channels(self, unknown):
self.lightning_gossip_num_queries = unknown
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_success(self, storage, db, password):
self.password = password
if self.electrum_config.get('single_password'):
self._use_single_password = self.daemon.update_password_for_directory(
old_password=password, new_password=password)
self.logger.info(f'use single password: {self._use_single_password}')
wallet = Wallet(db, storage, config=self.electrum_config)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def on_wizard_aborted(self):
# wizard did not return a wallet; and there is no wallet open atm
if not self.wallet:
self.stop()
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
if self.password and self._use_single_password:
storage = WalletStorage(path)
# call check_password to decrypt
storage.check_password(self.password)
self.on_open_wallet(self.password, storage)
return
d = OpenWalletDialog(self, path, self.on_open_wallet)
d.open()
def load_wallet_on_start(self):
"""As part of app startup, try to load last wallet."""
self.load_wallet_by_name(self.electrum_config.get_wallet_path(use_gui_last_wallet=True))
def on_open_wallet(self, password, storage):
if not storage.file_exists():
wizard = InstallWizard(self.electrum_config, self.plugins)
wizard.path = storage.path
wizard.run('new')
else:
assert storage.is_past_initial_decryption()
db = WalletDB(storage.read(), manual_upgrades=False)
assert not db.requires_upgrade()
self.on_wizard_success(storage, db, password)
def on_stop(self):
self.logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
else:
self._settings_dialog.update()
self._settings_dialog.open()
def lightning_open_channel_dialog(self):
if not self.wallet.has_lightning():
self.show_error(_('Lightning is not enabled for this wallet'))
return
if not self.wallet.lnworker.channels and not self.wallet.lnworker.channel_backups:
warning = _(messages.MSG_LIGHTNING_WARNING)
d = Question(_('Do you want to create your first channel?') +
'\n\n' + warning, self.open_channel_dialog_with_warning)
d.open()
else:
d = LightningOpenChannelDialog(self)
d.open()
def swap_dialog(self):
d = SwapDialog(self, self.electrum_config)
d.open()
def open_channel_dialog_with_warning(self, b):
if b:
d = LightningOpenChannelDialog(self)
d.open()
def lightning_channels_dialog(self):
if self._channels_dialog is None:
self._channels_dialog = LightningChannelsDialog(self)
self._channels_dialog.open()
def delete_ln_gossip_dialog(self):
def delete_gossip(b: bool):
if not b:
return
if self.network:
self.network.run_from_another_thread(
self.network.stop_gossip(full_shutdown=True))
os.unlink(gossip_db_file)
self.show_error(_("Local gossip database deleted."))
self.network.start_gossip()
if self.network is None or self.network.channel_db is None:
return # TODO show msg to user, or the button should be disabled instead
gossip_db_file = self.network.channel_db.get_file_path(self.electrum_config)
try:
size_mb = os.path.getsize(gossip_db_file) / (1024**2)
except OSError:
self.logger.exception("Cannot get file size.")
return
d = Question(
_('Do you want to delete the local gossip database?') + '\n' +
'(' + _('file size') + f': {size_mb:.2f} MiB)\n' +
_('It will be automatically re-downloaded after, unless you disable the gossip.'),
delete_gossip)
d.open()
@event_listener
def on_event_channel(self, wallet, chan):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
@event_listener
def on_event_channels(self, wallet):
if self._channels_dialog:
Clock.schedule_once(lambda dt: self._channels_dialog.update())
def is_wallet_creation_disabled(self):
return bool(self.electrum_config.get('single_password')) and self.password is None
def wallets_dialog(self):
from .uix.dialogs.wallets import WalletDialog
dirname = os.path.dirname(self.electrum_config.get_wallet_path())
d = WalletDialog(dirname, self.load_wallet_by_name, self.is_wallet_creation_disabled())
d.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
self.wallets_dialog()
elif name == 'status':
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
elif name == 'lightning_channels_dialog' and not self.wallet.can_have_lightning():
self.show_error(_("Not available for this wallet.") + "\n\n" +
_("Lightning is currently restricted to HD wallets with p2wpkh addresses."))
elif name.endswith("_dialog"):
getattr(self, name)()
else:
popup = Builder.load_file(KIVY_GUI_PATH + f'/uix/ui_screens/{name}.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.send_screen = None
self.receive_screen = None
self.icon = os.path.dirname(KIVY_GUI_PATH) + "/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.server.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
@event_listener
def on_event_network_updated(self):
self._trigger_update_interfaces()
self._trigger_update_status()
@event_listener
def on_event_wallet_updated(self, *args):
self._trigger_update_wallet()
self._trigger_update_status()
@event_listener
def on_event_blockchain_updated(self, *args):
# to update number of confirmations in history
self._trigger_update_wallet()
@event_listener
def on_event_status(self, *args):
self._trigger_update_status()
@event_listener
def on_event_new_transaction(self, *args):
self._trigger_update_wallet()
@event_listener
def on_event_verified(self, *args):
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet: 'Abstract_Wallet'):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return
self.use_change = self.wallet.use_change
self.electrum_config.save_last_wallet(wallet)
self.request_focus_for_main_view()
def request_focus_for_main_view(self):
if platform != 'android':
return
# The main view of the activity might be not have focus
# in which case e.g. the OS "back" button would not work.
# see #6276 (specifically "method 2" and "method 3")
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
activity = PythonActivity.mActivity
activity.requestFocusForMainView()
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.is_up_to_date() or server_height == 0:
num_sent, num_answered = self.wallet.adb.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
l = int(self.wallet.lnworker.get_balance()) if self.wallet.lnworker else 0
balance_sat = c + u + x + l
text = self.format_amount(balance_sat)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(balance_sat) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.is_up_to_date():
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import PartialTxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None)
if not inputs:
return ''
addr = None
if self.send_screen:
addr = str(self.send_screen.address)
if not addr:
addr = self.wallet.dummy_address()
outputs = [PartialTxOutput.from_address_and_value(addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(coins=inputs, outputs=outputs)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, decimal_point=self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return self.electrum_config.format_amount(x, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x) -> str:
if x is None:
return 'none'
if parse_max_spend(x):
return f'max({x})'
# FIXME this is using format_satoshis_plain instead of config.format_amount
# as we sometimes convert the returned string back to numbers,
# via self.get_amount()... the need for converting back should be removed
return format_satoshis_plain(x, decimal_point=self.decimal_point()) + ' ' + self.base_unit
def format_amount_and_units_with_fiat(self, x) -> str:
text = self.format_amount_and_units(x)
fiat = self.fx.format_amount_and_units(x) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000) + ' sat/byte'
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.is_up_to_date() or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
self.logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
if self.resume_dialog is not None:
return
now = time.time()
if self.wallet and self.has_pin_code() and now - self.pause_time > 5*60:
def on_success(x):
self.resume_dialog = None
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=self.stop)
self.resume_dialog = d
d.open()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, *, show_text_with_qr: bool = True):
if not label.data:
return
self.qr_dialog(label.name, label.data, show_text_with_qr)
def scheduled_in_gui_thread(func):
"""Decorator to ensure that func runs in the GUI thread.
Note: the return value is swallowed!
"""
def wrapper(self: 'ElectrumWindow', *args, **kwargs):
if threading.current_thread() == self.gui_thread:
func(self, *args, **kwargs)
else:
Clock.schedule_once(lambda dt: func(self, *args, **kwargs))
return wrapper
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble(text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon=f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
@scheduled_in_gui_thread
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
text = str(text) # so that we also handle e.g. Exception
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = f'atlas://{KIVY_GUI_PATH}/theming/atlas/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def show_transaction(self, txid):
tx = self.wallet.db.get_transaction(txid)
if not tx and self.wallet.lnworker:
tx = self.wallet.adb.get_transaction(txid)
if tx:
self.tx_dialog(tx)
else:
self.show_error(f'Transaction not found {txid}')
def lightning_tx_dialog(self, tx):
from .uix.dialogs.lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
if amount == '!':
screen.is_max = True
max_amt = self.get_max_amount()
screen.amount = (max_amt + ' ' + self.base_unit) if max_amt else ''
else:
screen.amount = amount
screen.is_max = False
popup = AmountDialog(show_max, amount, cb)
popup.open()
def addresses_dialog(self):
from .uix.dialogs.addresses import AddressesDialog
if self._addresses_dialog is None:
self._addresses_dialog = AddressesDialog(self)
else:
self._addresses_dialog.update()
self._addresses_dialog.open()
def fee_dialog(self):
from .uix.dialogs.fee_dialog import FeeDialog
fee_dialog = FeeDialog(self, self.electrum_config, self.set_fee_status)
fee_dialog.open()
def set_fee_status(self):
target, tooltip, dyn = self.electrum_config.get_fee_target()
self.fee_status = target
@event_listener
def on_event_fee(self, *arg):
self.set_fee_status()
def protected(self, msg, f, args):
if self.electrum_config.get('pin_code'):
msg += "\n" + _("Enter your PIN code to proceed")
on_success = lambda pw: f(*args, self.password)
d = PincodeDialog(
self,
message = msg,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=lambda: None)
d.open()
else:
d = Question(
msg,
lambda b: f(*args, self.password) if b else None,
yes_str=_("OK"),
no_str=_("Cancel"),
title=_("Confirm action"))
d.open()
def delete_wallet(self):
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Are you sure you want to delete wallet {}?").format(basename),
self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except InvalidPassword:
self.show_error("Invalid password")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path(use_gui_last_wallet=True)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Display your seed?"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
label.data = seed
if passphrase:
label.data += '\n\n' + _('Passphrase') + ': ' + passphrase
def has_pin_code(self):
return bool(self.electrum_config.get('pin_code'))
def check_pin_code(self, pin):
if pin != self.electrum_config.get('pin_code'):
raise InvalidPassword
def change_password(self, cb):
def on_success(old_password, new_password):
# called if old_password works on self.wallet
self.password = new_password
if self._use_single_password:
self.daemon.update_password_for_directory(old_password=old_password, new_password=new_password)
msg = _("Password updated successfully")
else:
self.wallet.update_password(old_password, new_password)
msg = _("Password updated for {}").format(os.path.basename(self.wallet.storage.path))
self.show_info(msg)
on_failure = lambda: self.show_error(_("Password not updated"))
d = ChangePasswordDialog(self, self.wallet, on_success, on_failure)
d.open()
def pin_code_dialog(self, cb):
if self._use_single_password and self.has_pin_code():
def on_choice(choice):
if choice == 0:
self.change_pin_code(cb)
else:
self.reset_pin_code(cb)
choices = {0:'Change PIN code', 1:'Reset PIN'}
dialog = ChoiceDialog(
_('PIN Code'), choices, 0,
on_choice,
keep_choice_order=True)
dialog.open()
else:
self.change_pin_code(cb)
def reset_pin_code(self, cb):
on_success = lambda x: self._set_new_pin_code(None, cb)
d = PasswordDialog(self,
basename = self.wallet.basename(),
check_password = self.wallet.check_password,
on_success=on_success,
on_failure=lambda: None,
is_change=False,
has_password=self.wallet.has_password())
d.open()
def _set_new_pin_code(self, new_pin, cb):
self.electrum_config.set_key('pin_code', new_pin)
cb()
self.show_info(_("PIN updated") if new_pin else _('PIN disabled'))
def change_pin_code(self, cb):
on_failure = lambda: self.show_error(_("PIN not updated"))
on_success = lambda old_pin, new_pin: self._set_new_pin_code(new_pin, cb)
d = PincodeDialog(
self,
check_password=self.check_pin_code,
on_success=on_success,
on_failure=on_failure,
is_change=True,
has_password = self.has_pin_code())
d.open()
def save_backup(self):
if platform != 'android':
backup_dir = self.electrum_config.get_backup_dir()
if backup_dir:
self._save_backup(backup_dir)
else:
self.show_error(_("Backup NOT saved. Backup directory not configured."))
return
from android.permissions import request_permissions, Permission
def cb(permissions, grant_results: Sequence[bool]):
if not grant_results or not grant_results[0]:
self.show_error(_("Cannot save backup without STORAGE permission"))
return
try:
backup_dir = util.android_backup_dir()
except OSError as e:
self.logger.exception("Cannot save backup")
self.show_error(f"Cannot save backup: {e!r}")
return
# note: Clock.schedule_once is a hack so that we get called on a non-daemon thread
# (needed for WalletDB.write)
Clock.schedule_once(lambda dt: self._save_backup(backup_dir))
request_permissions([Permission.WRITE_EXTERNAL_STORAGE], cb)
def _save_backup(self, backup_dir):
try:
new_path = self.wallet.save_backup(backup_dir)
except Exception as e:
self.logger.exception("Failed to save wallet backup")
self.show_error("Failed to save wallet backup" + '\n' + str(e))
return
self.show_info(_("Backup saved:") + f"\n{new_path}")
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password))
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Decrypt your private key?"), show_private_key, (addr, pk_label))
def import_channel_backup(self, encrypted):
if not self.wallet.has_lightning():
msg = _('Cannot import channel backup.')
if self.wallet.can_have_lightning():
msg += ' ' + _('Lightning is not enabled.')
else:
msg += ' ' + _('Lightning is not available for this wallet.')
self.show_error(msg)
return
d = Question(_('Import Channel Backup?'), lambda b: self._import_channel_backup(b, encrypted))
d.open()
def _import_channel_backup(self, b, encrypted):
if not b:
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.logger.exception("failed to import backup")
self.show_error("failed to import backup" + '\n' + str(e))
return
self.lightning_channels_dialog()
def lightning_status(self):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
status = _('Enabled')
else:
status = _('Enabled, non-recoverable channels')
else:
if self.wallet.can_have_lightning():
status = _('Not enabled')
else:
status = _("Not available for this wallet.")
return status
def on_lightning_status(self, root):
if self.wallet.has_lightning():
if self.wallet.lnworker.has_deterministic_node_id():
pass
else:
if self.wallet.db.get('seed_type') == 'segwit':
msg = _("Your channels cannot be recovered from seed, because they were created with an old version of Electrum. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want this wallet to have recoverable channels, you must close your existing channels and restore this wallet from seed")
else:
msg = _("Your channels cannot be recovered from seed. "
"This means that you must save a backup of your wallet everytime you create a new channel.\n\n"
"If you want to have recoverable channels, you must create a new wallet with an Electrum seed")
self.show_info(msg)
elif self.wallet.can_have_lightning():
root.dismiss()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new channel. "
"Create lightning keys?")
d = Question(msg, self._enable_lightning, title=_('Enable Lightning?'))
d.open()
def _enable_lightning(self, b):
if not b:
return
self.wallet.init_lightning(password=self.password)
self.show_info(_('Lightning keys have been initialized.'))
| mit | 53462c8a9ca56f83cd37bf1c4c9e81ba | 38.554682 | 156 | 0.596987 | 3.905088 | false | false | false | false |
spesmilo/electrum | electrum/dns_hacks.py | 3 | 4577 | # Copyright (C) 2020 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import sys
import socket
import concurrent
from concurrent import futures
import ipaddress
from typing import Optional
import dns
import dns.resolver
from .logging import get_logger
_logger = get_logger(__name__)
_dns_threads_executor = None # type: Optional[concurrent.futures.Executor]
def configure_dns_depending_on_proxy(is_proxy: bool) -> None:
# Store this somewhere so we can un-monkey-patch:
if not hasattr(socket, "_getaddrinfo"):
socket._getaddrinfo = socket.getaddrinfo
if is_proxy:
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
def getaddrinfo(host, port, *args, **kwargs):
if _is_force_system_dns_for_host(host):
return socket._getaddrinfo(host, port, *args, **kwargs)
return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (host, port))]
socket.getaddrinfo = getaddrinfo
else:
if sys.platform == 'win32':
# On Windows, socket.getaddrinfo takes a mutex, and might hold it for up to 10 seconds
# when dns-resolving. To speed it up drastically, we resolve dns ourselves, outside that lock.
# See https://github.com/spesmilo/electrum/issues/4421
try:
_prepare_windows_dns_hack()
except Exception as e:
_logger.exception('failed to apply windows dns hack.')
else:
socket.getaddrinfo = _fast_getaddrinfo
else:
socket.getaddrinfo = socket._getaddrinfo
def _prepare_windows_dns_hack():
# enable dns cache
resolver = dns.resolver.get_default_resolver()
if resolver.cache is None:
resolver.cache = dns.resolver.Cache()
# ensure overall timeout for requests is long enough
resolver.lifetime = max(resolver.lifetime or 1, 30.0)
# prepare threads
global _dns_threads_executor
if _dns_threads_executor is None:
_dns_threads_executor = concurrent.futures.ThreadPoolExecutor(max_workers=20,
thread_name_prefix='dns_resolver')
def _is_force_system_dns_for_host(host: str) -> bool:
return str(host) in ('localhost', 'localhost.',)
def _fast_getaddrinfo(host, *args, **kwargs):
def needs_dns_resolving(host):
try:
ipaddress.ip_address(host)
return False # already valid IP
except ValueError:
pass # not an IP
if _is_force_system_dns_for_host(host):
return False
return True
def resolve_with_dnspython(host):
addrs = []
expected_errors = (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer,
concurrent.futures.CancelledError, concurrent.futures.TimeoutError)
ipv6_fut = _dns_threads_executor.submit(dns.resolver.resolve, host, dns.rdatatype.AAAA)
ipv4_fut = _dns_threads_executor.submit(dns.resolver.resolve, host, dns.rdatatype.A)
# try IPv6
try:
answers = ipv6_fut.result()
addrs += [str(answer) for answer in answers]
except expected_errors as e:
pass
except BaseException as e:
_logger.info(f'dnspython failed to resolve dns (AAAA) for {repr(host)} with error: {repr(e)}')
# try IPv4
try:
answers = ipv4_fut.result()
addrs += [str(answer) for answer in answers]
except expected_errors as e:
# dns failed for some reason, e.g. dns.resolver.NXDOMAIN this is normal.
# Simply report back failure; except if we already have some results.
if not addrs:
raise socket.gaierror(11001, 'getaddrinfo failed') from e
except BaseException as e:
# Possibly internal error in dnspython :( see #4483 and #5638
_logger.info(f'dnspython failed to resolve dns (A) for {repr(host)} with error: {repr(e)}')
if addrs:
return addrs
# Fall back to original socket.getaddrinfo to resolve dns.
return [host]
addrs = [host]
if needs_dns_resolving(host):
addrs = resolve_with_dnspython(host)
list_of_list_of_socketinfos = [socket._getaddrinfo(addr, *args, **kwargs) for addr in addrs]
list_of_socketinfos = [item for lst in list_of_list_of_socketinfos for item in lst]
return list_of_socketinfos
| mit | 538ab0a6ca2853239217fd803dbe5394 | 39.149123 | 106 | 0.630544 | 3.959343 | false | false | false | false |
spesmilo/electrum | electrum/wallet.py | 1 | 152055 | # Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - Imported_Wallet: imported addresses or single keys, 0 or 1 keystore
# - Standard_Wallet: one HD keystore, P2PKH-like scripts
# - Multisig_Wallet: several HD keystores, M-of-N OP_CHECKMULTISIG scripts
import os
import sys
import random
import time
import json
import copy
import errno
import traceback
import operator
import math
from functools import partial
from collections import defaultdict
from numbers import Number
from decimal import Decimal
from typing import TYPE_CHECKING, List, Optional, Tuple, Union, NamedTuple, Sequence, Dict, Any, Set, Iterable
from abc import ABC, abstractmethod
import itertools
import threading
import enum
import asyncio
from aiorpcx import timeout_after, TaskTimeout, ignore_after, run_in_thread
from .i18n import _
from .bip32 import BIP32Node, convert_bip32_intpath_to_strpath, convert_bip32_path_to_list_of_uint32
from .crypto import sha256
from . import util
from .util import (NotEnoughFunds, UserCancelled, profiler, OldTaskGroup, ignore_exceptions,
format_satoshis, format_fee_satoshis, NoDynamicFeeEstimates,
WalletFileException, BitcoinException,
InvalidPassword, format_time, timestamp_to_datetime, Satoshis,
Fiat, bfh, bh2u, TxMinedInfo, quantize_feerate, create_bip21_uri, OrderedDictWithIndex, parse_max_spend)
from .simple_config import SimpleConfig, FEE_RATIO_HIGH_WARNING, FEERATE_WARNING_HIGH_FEE
from .bitcoin import COIN, TYPE_ADDRESS
from .bitcoin import is_address, address_to_script, is_minikey, relayfee, dust_threshold
from .crypto import sha256d
from . import keystore
from .keystore import (load_keystore, Hardware_KeyStore, KeyStore, KeyStoreWithMPK,
AddressIndexGeneric, CannotDerivePubkey)
from .util import multisig_type
from .storage import StorageEncryptionVersion, WalletStorage
from .wallet_db import WalletDB
from . import transaction, bitcoin, coinchooser, paymentrequest, ecc, bip32
from .transaction import (Transaction, TxInput, UnknownTxinType, TxOutput,
PartialTransaction, PartialTxInput, PartialTxOutput, TxOutpoint)
from .plugin import run_hook
from .address_synchronizer import (AddressSynchronizer, TX_HEIGHT_LOCAL,
TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_FUTURE)
from .invoices import Invoice
from .invoices import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED, PR_UNCONFIRMED
from .contacts import Contacts
from .interface import NetworkException
from .mnemonic import Mnemonic
from .logging import get_logger, Logger
from .lnworker import LNWallet
from .paymentrequest import PaymentRequest
from .util import read_json_file, write_json_file, UserFacingException, FileImportFailed
from .util import EventListener, event_listener
if TYPE_CHECKING:
from .network import Network
from .exchange_rate import FxThread
_logger = get_logger(__name__)
TX_STATUS = [
_('Unconfirmed'),
_('Unconfirmed parent'),
_('Not Verified'),
_('Local'),
]
class BumpFeeStrategy(enum.Enum):
COINCHOOSER = enum.auto()
DECREASE_CHANGE = enum.auto()
DECREASE_PAYMENT = enum.auto()
async def _append_utxos_to_inputs(*, inputs: List[PartialTxInput], network: 'Network',
pubkey: str, txin_type: str, imax: int) -> None:
if txin_type in ('p2pkh', 'p2wpkh', 'p2wpkh-p2sh'):
address = bitcoin.pubkey_to_address(txin_type, pubkey)
scripthash = bitcoin.address_to_scripthash(address)
elif txin_type == 'p2pk':
script = bitcoin.public_key_to_p2pk_script(pubkey)
scripthash = bitcoin.script_to_scripthash(script)
else:
raise Exception(f'unexpected txin_type to sweep: {txin_type}')
async def append_single_utxo(item):
prev_tx_raw = await network.get_transaction(item['tx_hash'])
prev_tx = Transaction(prev_tx_raw)
prev_txout = prev_tx.outputs()[item['tx_pos']]
if scripthash != bitcoin.script_to_scripthash(prev_txout.scriptpubkey.hex()):
raise Exception('scripthash mismatch when sweeping')
prevout_str = item['tx_hash'] + ':%d' % item['tx_pos']
prevout = TxOutpoint.from_str(prevout_str)
txin = PartialTxInput(prevout=prevout)
txin.utxo = prev_tx
txin.block_height = int(item['height'])
txin.script_type = txin_type
txin.pubkeys = [bfh(pubkey)]
txin.num_sig = 1
if txin_type == 'p2wpkh-p2sh':
txin.redeem_script = bfh(bitcoin.p2wpkh_nested_script(pubkey))
inputs.append(txin)
u = await network.listunspent_for_scripthash(scripthash)
async with OldTaskGroup() as group:
for item in u:
if len(inputs) >= imax:
break
await group.spawn(append_single_utxo(item))
async def sweep_preparations(privkeys, network: 'Network', imax=100):
async def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
await _append_utxos_to_inputs(
inputs=inputs,
network=network,
pubkey=pubkey,
txin_type=txin_type,
imax=imax)
keypairs[pubkey] = privkey, compressed
inputs = [] # type: List[PartialTxInput]
keypairs = {}
async with OldTaskGroup() as group:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
await group.spawn(find_utxos_for_privkey(txin_type, privkey, compressed))
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
await group.spawn(find_utxos_for_privkey(txin_type, privkey, not compressed))
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
await group.spawn(find_utxos_for_privkey('p2pk', privkey, compressed))
if not inputs:
raise UserFacingException(_('No inputs found.'))
return inputs, keypairs
async def sweep(
privkeys,
*,
network: 'Network',
config: 'SimpleConfig',
to_address: str,
fee: int = None,
imax=100,
locktime=None,
tx_version=None) -> PartialTransaction:
inputs, keypairs = await sweep_preparations(privkeys, network, imax)
total = sum(txin.value_sats() for txin in inputs)
if fee is None:
outputs = [PartialTxOutput(scriptpubkey=bfh(bitcoin.address_to_script(to_address)),
value=total)]
tx = PartialTransaction.from_io(inputs, outputs)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise Exception(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise Exception(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [PartialTxOutput(scriptpubkey=bfh(bitcoin.address_to_script(to_address)),
value=total - fee)]
if locktime is None:
locktime = get_locktime_for_new_transaction(network)
tx = PartialTransaction.from_io(inputs, outputs, locktime=locktime, version=tx_version)
rbf = bool(config.get('use_rbf', True))
tx.set_rbf(rbf)
tx.sign(keypairs)
return tx
def get_locktime_for_new_transaction(network: 'Network') -> int:
# if no network or not up to date, just set locktime to zero
if not network:
return 0
chain = network.blockchain()
if chain.is_tip_stale():
return 0
# discourage "fee sniping"
locktime = chain.height()
# sometimes pick locktime a bit further back, to help privacy
# of setups that need more time (offline/multisig/coinjoin/...)
if random.randint(0, 9) == 0:
locktime = max(0, locktime - random.randint(0, 99))
return locktime
class CannotBumpFee(Exception):
def __str__(self):
return _('Cannot bump fee') + ':\n\n' + Exception.__str__(self)
class CannotDoubleSpendTx(Exception):
def __str__(self):
return _('Cannot cancel transaction') + ':\n\n' + Exception.__str__(self)
class CannotCPFP(Exception):
def __str__(self):
return _('Cannot create child transaction') + ':\n\n' + Exception.__str__(self)
class InternalAddressCorruption(Exception):
def __str__(self):
return _("Wallet file corruption detected. "
"Please restore your wallet from seed, and compare the addresses in both files")
class ReceiveRequestHelp(NamedTuple):
# help texts (warnings/errors):
address_help: str
URI_help: str
ln_help: str
# whether the texts correspond to an error (or just a warning):
address_is_error: bool
URI_is_error: bool
ln_is_error: bool
ln_swap_suggestion: Optional[Any] = None
ln_rebalance_suggestion: Optional[Any] = None
def can_swap(self) -> bool:
return bool(self.ln_swap_suggestion)
def can_rebalance(self) -> bool:
return bool(self.ln_rebalance_suggestion)
class TxWalletDelta(NamedTuple):
is_relevant: bool # "related to wallet?"
is_any_input_ismine: bool
is_all_input_ismine: bool
delta: int
fee: Optional[int]
class TxWalletDetails(NamedTuple):
txid: Optional[str]
status: str
label: str
can_broadcast: bool
can_bump: bool
can_cpfp: bool
can_dscancel: bool # whether user can double-spend to self
can_save_as_local: bool
amount: Optional[int]
fee: Optional[int]
tx_mined_status: TxMinedInfo
mempool_depth_bytes: Optional[int]
can_remove: bool # whether user should be allowed to delete tx
is_lightning_funding_tx: bool
class Abstract_Wallet(ABC, Logger, EventListener):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
LOGGING_SHORTCUT = 'w'
max_change_outputs = 3
gap_limit_for_change = 10
txin_type: str
wallet_type: str
lnworker: Optional['LNWallet']
def __init__(self, db: WalletDB, storage: Optional[WalletStorage], *, config: SimpleConfig):
if not db.is_ready_to_be_used_by_wallet():
raise Exception("storage not ready to be used by Abstract_Wallet")
self.config = config
assert self.config is not None, "config must not be None"
self.db = db
self.storage = storage
# load addresses needs to be called before constructor for sanity checks
db.load_addresses(self.wallet_type)
self.keystore = None # type: Optional[KeyStore] # will be set by load_keystore
Logger.__init__(self)
self.network = None
self.adb = AddressSynchronizer(db, config, name=self.diagnostic_name())
for addr in self.get_addresses():
self.adb.add_address(addr)
self.lock = self.adb.lock
self.transaction_lock = self.adb.transaction_lock
self.taskgroup = OldTaskGroup()
# saved fields
self.use_change = db.get('use_change', True)
self.multiple_change = db.get('multiple_change', False)
self._labels = db.get_dict('labels')
self._frozen_addresses = set(db.get('frozen_addresses', []))
self._frozen_coins = db.get_dict('frozen_coins') # type: Dict[str, bool]
self.fiat_value = db.get_dict('fiat_value')
self._receive_requests = db.get_dict('payment_requests') # type: Dict[str, Invoice]
self._invoices = db.get_dict('invoices') # type: Dict[str, Invoice]
self._reserved_addresses = set(db.get('reserved_addresses', []))
self._freeze_lock = threading.RLock() # for mutating/iterating frozen_{addresses,coins}
self.load_keystore()
self._init_lnworker()
self._init_requests_rhash_index()
self._prepare_onchain_invoice_paid_detection()
self.calc_unused_change_addresses()
# save wallet type the first time
if self.db.get('wallet_type') is None:
self.db.put('wallet_type', self.wallet_type)
self.contacts = Contacts(self.db)
self._coin_price_cache = {}
# true when synchronized. this is stricter than adb.is_up_to_date():
# to-be-generated (HD) addresses are also considered here (gap-limit-roll-forward)
self._up_to_date = False
self.test_addresses_sanity()
self.register_callbacks()
def _init_lnworker(self):
self.lnworker = None
@ignore_exceptions # don't kill outer taskgroup
async def main_loop(self):
self.logger.info("starting taskgroup.")
try:
async with self.taskgroup as group:
await group.spawn(asyncio.Event().wait) # run forever (until cancel)
await group.spawn(self.do_synchronize_loop())
except Exception as e:
self.logger.exception("taskgroup died.")
finally:
self.logger.info("taskgroup stopped.")
async def do_synchronize_loop(self):
"""Generates new deterministic addresses if needed (gap limit roll-forward),
and sets up_to_date.
"""
while True:
# polling.
# TODO if adb had "up_to_date_changed" asyncio.Event(), we could *also* trigger on that.
# The polling would still be useful as often need to gen new addrs while adb.is_up_to_date() is False
await asyncio.sleep(0.1)
# note: we only generate new HD addresses if the existing ones
# have history that are mined and SPV-verified.
await run_in_thread(self.synchronize)
def save_db(self):
if self.storage:
self.db.write(self.storage)
def save_backup(self, backup_dir):
new_db = WalletDB(self.db.dump(), manual_upgrades=False)
if self.lnworker:
channel_backups = new_db.get_dict('imported_channel_backups')
for chan_id, chan in self.lnworker.channels.items():
channel_backups[chan_id.hex()] = self.lnworker.create_channel_backup(chan_id)
new_db.put('channels', None)
new_db.put('lightning_privkey2', None)
new_path = os.path.join(backup_dir, self.basename() + '.backup')
new_storage = WalletStorage(new_path)
new_storage._encryption_version = self.storage._encryption_version
new_storage.pubkey = self.storage.pubkey
new_db.set_modified(True)
new_db.write(new_storage)
return new_path
def has_lightning(self) -> bool:
return bool(self.lnworker)
def can_have_lightning(self) -> bool:
# we want static_remotekey to be a wallet address
return self.txin_type == 'p2wpkh'
def can_have_deterministic_lightning(self) -> bool:
if not self.can_have_lightning():
return False
if not self.keystore:
return False
return self.keystore.can_have_deterministic_lightning_xprv()
def init_lightning(self, *, password) -> None:
assert self.can_have_lightning()
assert self.db.get('lightning_xprv') is None
assert self.db.get('lightning_privkey2') is None
if self.can_have_deterministic_lightning():
assert isinstance(self.keystore, keystore.BIP32_KeyStore)
ln_xprv = self.keystore.get_lightning_xprv(password)
self.db.put('lightning_xprv', ln_xprv)
else:
seed = os.urandom(32)
node = BIP32Node.from_rootseed(seed, xtype='standard')
ln_xprv = node.to_xprv()
self.db.put('lightning_privkey2', ln_xprv)
if self.network:
self.network.run_from_another_thread(self.stop())
self.lnworker = LNWallet(self, ln_xprv)
if self.network:
self.start_network(self.network)
async def stop(self):
"""Stop all networking and save DB to disk."""
self.unregister_callbacks()
try:
async with ignore_after(5):
if self.network:
if self.lnworker:
await self.lnworker.stop()
self.lnworker = None
await self.adb.stop()
await self.taskgroup.cancel_remaining()
finally: # even if we get cancelled
if any([ks.is_requesting_to_be_rewritten_to_wallet_file for ks in self.get_keystores()]):
self.save_keystore()
self.save_db()
def is_up_to_date(self) -> bool:
return self._up_to_date
@event_listener
async def on_event_adb_set_up_to_date(self, adb):
if self.adb != adb:
return
num_new_addrs = await run_in_thread(self.synchronize)
up_to_date = self.adb.is_up_to_date() and num_new_addrs == 0
with self.lock:
status_changed = self._up_to_date != up_to_date
self._up_to_date = up_to_date
if up_to_date:
self.adb.reset_netrequest_counters() # sync progress indicator
self.save_db()
# fire triggers
util.trigger_callback('wallet_updated', self)
util.trigger_callback('status')
if status_changed:
self.logger.info(f'set_up_to_date: {up_to_date}')
@event_listener
def on_event_adb_added_tx(self, adb, tx_hash):
if self.adb != adb:
return
tx = self.db.get_transaction(tx_hash)
if not tx:
raise Exception(tx_hash)
is_mine = any([self.is_mine(out.address) for out in tx.outputs()])
is_mine |= any([self.is_mine(self.adb.get_txin_address(txin)) for txin in tx.inputs()])
if not is_mine:
return
if self.lnworker:
self.lnworker.maybe_add_backup_from_tx(tx)
self._update_invoices_and_reqs_touched_by_tx(tx_hash)
util.trigger_callback('new_transaction', self, tx)
@event_listener
def on_event_adb_added_verified_tx(self, adb, tx_hash):
if adb != self.adb:
return
self._update_invoices_and_reqs_touched_by_tx(tx_hash)
tx_mined_status = self.adb.get_tx_height(tx_hash)
util.trigger_callback('verified', self, tx_hash, tx_mined_status)
@event_listener
def on_event_adb_removed_verified_tx(self, adb, tx_hash):
if adb != self.adb:
return
self._update_invoices_and_reqs_touched_by_tx(tx_hash)
def clear_history(self):
self.adb.clear_history()
self.save_db()
def start_network(self, network):
self.network = network
if network:
asyncio.run_coroutine_threadsafe(self.main_loop(), self.network.asyncio_loop)
self.adb.start_network(network)
if self.lnworker:
self.lnworker.start_network(network)
# only start gossiping when we already have channels
if self.db.get('channels'):
self.network.start_gossip()
@abstractmethod
def load_keystore(self) -> None:
pass
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def get_master_public_keys(self):
return []
def basename(self) -> str:
return self.storage.basename() if self.storage else 'no name'
def test_addresses_sanity(self) -> None:
addrs = self.get_receiving_addresses()
if len(addrs) > 0:
addr = str(addrs[0])
if not bitcoin.is_address(addr):
neutered_addr = addr[:5] + '..' + addr[-2:]
raise WalletFileException(f'The addresses in this wallet are not bitcoin addresses.\n'
f'e.g. {neutered_addr} (length: {len(addr)})')
def check_returned_address_for_corruption(func):
def wrapper(self, *args, **kwargs):
addr = func(self, *args, **kwargs)
self.check_address_for_corruption(addr)
return addr
return wrapper
def calc_unused_change_addresses(self) -> Sequence[str]:
"""Returns a list of change addresses to choose from, for usage in e.g. new transactions.
The caller should give priority to earlier ones in the list.
"""
with self.lock:
# We want a list of unused change addresses.
# As a performance optimisation, to avoid checking all addresses every time,
# we maintain a list of "not old" addresses ("old" addresses have deeply confirmed history),
# and only check those.
if not hasattr(self, '_not_old_change_addresses'):
self._not_old_change_addresses = self.get_change_addresses()
self._not_old_change_addresses = [addr for addr in self._not_old_change_addresses
if not self.adb.address_is_old(addr)]
unused_addrs = [addr for addr in self._not_old_change_addresses
if not self.adb.is_used(addr) and not self.is_address_reserved(addr)]
return unused_addrs
def is_deterministic(self) -> bool:
return self.keystore.is_deterministic()
def _set_label(self, key: str, value: Optional[str]) -> None:
with self.lock:
if value is None:
self._labels.pop(key, None)
else:
self._labels[key] = value
def set_label(self, name: str, text: str = None) -> bool:
if not name:
return False
changed = False
with self.lock:
old_text = self._labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self._labels[name] = text
changed = True
else:
if old_text is not None:
self._labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
return changed
def import_labels(self, path):
data = read_json_file(path)
for key, value in data.items():
self.set_label(key, value)
def export_labels(self, path):
write_json_file(path, self.get_all_labels())
def set_fiat_value(self, txid, ccy, text, fx, value_sat):
if not self.db.get_transaction(txid):
return
# since fx is inserting the thousands separator,
# and not util, also have fx remove it
text = fx.remove_thousands_separator(text)
def_fiat = self.default_fiat_value(txid, fx, value_sat)
formatted = fx.ccy_amount_str(def_fiat, commas=False)
def_fiat_rounded = Decimal(formatted)
reset = not text
if not reset:
try:
text_dec = Decimal(text)
text_dec_rounded = Decimal(fx.ccy_amount_str(text_dec, commas=False))
reset = text_dec_rounded == def_fiat_rounded
except:
# garbage. not resetting, but not saving either
return False
if reset:
d = self.fiat_value.get(ccy, {})
if d and txid in d:
d.pop(txid)
else:
# avoid saving empty dict
return True
else:
if ccy not in self.fiat_value:
self.fiat_value[ccy] = {}
self.fiat_value[ccy][txid] = text
return reset
def get_fiat_value(self, txid, ccy):
fiat_value = self.fiat_value.get(ccy, {}).get(txid)
try:
return Decimal(fiat_value)
except:
return
def is_mine(self, address) -> bool:
if not address: return False
return bool(self.get_address_index(address))
def is_change(self, address) -> bool:
if not self.is_mine(address):
return False
return self.get_address_index(address)[0] == 1
@abstractmethod
def get_addresses(self) -> Sequence[str]:
pass
@abstractmethod
def get_address_index(self, address: str) -> Optional[AddressIndexGeneric]:
pass
@abstractmethod
def get_address_path_str(self, address: str) -> Optional[str]:
"""Returns derivation path str such as "m/0/5" to address,
or None if not applicable.
"""
pass
@abstractmethod
def get_redeem_script(self, address: str) -> Optional[str]:
pass
@abstractmethod
def get_witness_script(self, address: str) -> Optional[str]:
pass
@abstractmethod
def get_txin_type(self, address: str) -> str:
"""Return script type of wallet address."""
pass
def export_private_key(self, address: str, password: Optional[str]) -> str:
if self.is_watching_only():
raise Exception(_("This is a watching-only wallet"))
if not is_address(address):
raise Exception(f"Invalid bitcoin address: {address}")
if not self.is_mine(address):
raise Exception(_('Address not in wallet.') + f' {address}')
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
txin_type = self.get_txin_type(address)
serialized_privkey = bitcoin.serialize_privkey(pk, compressed, txin_type)
return serialized_privkey
def export_private_key_for_path(self, path: Union[Sequence[int], str], password: Optional[str]) -> str:
raise Exception("this wallet is not deterministic")
@abstractmethod
def get_public_keys(self, address: str) -> Sequence[str]:
pass
def get_public_keys_with_deriv_info(self, address: str) -> Dict[bytes, Tuple[KeyStoreWithMPK, Sequence[int]]]:
"""Returns a map: pubkey -> (keystore, derivation_suffix)"""
return {}
def is_lightning_funding_tx(self, txid: Optional[str]) -> bool:
if not self.lnworker or txid is None:
return False
return any([chan.funding_outpoint.txid == txid
for chan in self.lnworker.channels.values()])
def is_swap_tx(self, tx: Transaction) -> bool:
return bool(self.lnworker.swap_manager.get_swap_by_tx(tx)) if self.lnworker else False
def get_wallet_delta(self, tx: Transaction) -> TxWalletDelta:
"""Return the effect a transaction has on the wallet.
This method must use self.is_mine, not self.adb.is_mine()
"""
is_relevant = False # "related to wallet?"
num_input_ismine = 0
v_in = v_in_mine = v_out = v_out_mine = 0
with self.lock, self.transaction_lock:
for txin in tx.inputs():
addr = self.adb.get_txin_address(txin)
value = self.adb.get_txin_value(txin, address=addr)
if self.is_mine(addr):
num_input_ismine += 1
is_relevant = True
assert value is not None
v_in_mine += value
if value is None:
v_in = None
elif v_in is not None:
v_in += value
for txout in tx.outputs():
v_out += txout.value
if self.is_mine(txout.address):
v_out_mine += txout.value
is_relevant = True
delta = v_out_mine - v_in_mine
if v_in is not None:
fee = v_in - v_out
else:
fee = None
if fee is None and isinstance(tx, PartialTransaction):
fee = tx.get_fee()
return TxWalletDelta(
is_relevant=is_relevant,
is_any_input_ismine=num_input_ismine > 0,
is_all_input_ismine=num_input_ismine == len(tx.inputs()),
delta=delta,
fee=fee,
)
def get_tx_info(self, tx: Transaction) -> TxWalletDetails:
tx_wallet_delta = self.get_wallet_delta(tx)
is_relevant = tx_wallet_delta.is_relevant
is_any_input_ismine = tx_wallet_delta.is_any_input_ismine
is_swap = self.is_swap_tx(tx)
fee = tx_wallet_delta.fee
exp_n = None
can_broadcast = False
can_bump = False
can_cpfp = False
tx_hash = tx.txid() # note: txid can be None! e.g. when called from GUI tx dialog
is_lightning_funding_tx = self.is_lightning_funding_tx(tx_hash)
tx_we_already_have_in_db = self.adb.db.get_transaction(tx_hash)
can_save_as_local = (is_relevant and tx.txid() is not None
and (tx_we_already_have_in_db is None or not tx_we_already_have_in_db.is_complete()))
label = ''
tx_mined_status = self.adb.get_tx_height(tx_hash)
can_remove = ((tx_mined_status.height in [TX_HEIGHT_FUTURE, TX_HEIGHT_LOCAL])
# otherwise 'height' is unreliable (typically LOCAL):
and is_relevant
# don't offer during common signing flow, e.g. when watch-only wallet starts creating a tx:
and bool(tx_we_already_have_in_db))
can_dscancel = False
if tx.is_complete():
if tx_we_already_have_in_db:
label = self.get_label_for_txid(tx_hash)
if tx_mined_status.height > 0:
if tx_mined_status.conf:
status = _("{} confirmations").format(tx_mined_status.conf)
else:
status = _('Not verified')
elif tx_mined_status.height in (TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED):
status = _('Unconfirmed')
if fee is None:
fee = self.adb.get_tx_fee(tx_hash)
if fee and self.network and self.config.has_fee_mempool():
size = tx.estimated_size()
fee_per_byte = fee / size
exp_n = self.config.fee_to_depth(fee_per_byte)
can_bump = (is_any_input_ismine or is_swap) and not tx.is_final()
can_dscancel = (is_any_input_ismine and not tx.is_final()
and not all([self.is_mine(txout.address) for txout in tx.outputs()]))
try:
self.cpfp(tx, 0)
can_cpfp = True
except:
can_cpfp = False
else:
status = _('Local')
can_broadcast = self.network is not None
can_bump = (is_any_input_ismine or is_swap) and not tx.is_final()
else:
status = _("Signed")
can_broadcast = self.network is not None
else:
assert isinstance(tx, PartialTransaction)
s, r = tx.signature_count()
status = _("Unsigned") if s == 0 else _('Partially signed') + ' (%d/%d)'%(s,r)
if is_relevant:
if tx_wallet_delta.is_all_input_ismine:
assert fee is not None
amount = tx_wallet_delta.delta + fee
else:
amount = tx_wallet_delta.delta
else:
amount = None
if is_lightning_funding_tx:
can_bump = False # would change txid
return TxWalletDetails(
txid=tx_hash,
status=status,
label=label,
can_broadcast=can_broadcast,
can_bump=can_bump,
can_cpfp=can_cpfp,
can_dscancel=can_dscancel,
can_save_as_local=can_save_as_local,
amount=amount,
fee=fee,
tx_mined_status=tx_mined_status,
mempool_depth_bytes=exp_n,
can_remove=can_remove,
is_lightning_funding_tx=is_lightning_funding_tx,
)
def get_balance(self, **kwargs):
domain = self.get_addresses()
return self.adb.get_balance(domain, **kwargs)
def get_addr_balance(self, address):
return self.adb.get_balance([address])
def get_utxos(
self,
domain: Optional[Iterable[str]] = None,
**kwargs,
):
if domain is None:
domain = self.get_addresses()
return self.adb.get_utxos(domain=domain, **kwargs)
def get_spendable_coins(
self,
domain: Optional[Iterable[str]] = None,
*,
nonlocal_only: bool = False,
) -> Sequence[PartialTxInput]:
confirmed_only = self.config.get('confirmed_only', False)
with self._freeze_lock:
frozen_addresses = self._frozen_addresses.copy()
utxos = self.get_utxos(
domain=domain,
excluded_addresses=frozen_addresses,
mature_only=True,
confirmed_funding_only=confirmed_only,
nonlocal_only=nonlocal_only,
)
utxos = [utxo for utxo in utxos if not self.is_frozen_coin(utxo)]
return utxos
@abstractmethod
def get_receiving_addresses(self, *, slice_start=None, slice_stop=None) -> Sequence[str]:
pass
@abstractmethod
def get_change_addresses(self, *, slice_start=None, slice_stop=None) -> Sequence[str]:
pass
def dummy_address(self):
# first receiving address
return self.get_receiving_addresses(slice_start=0, slice_stop=1)[0]
def get_frozen_balance(self):
with self._freeze_lock:
frozen_addresses = self._frozen_addresses.copy()
# note: for coins, use is_frozen_coin instead of _frozen_coins,
# as latter only contains *manually* frozen ones
frozen_coins = {utxo.prevout.to_str() for utxo in self.get_utxos()
if self.is_frozen_coin(utxo)}
if not frozen_coins: # shortcut
return self.adb.get_balance(frozen_addresses)
c1, u1, x1 = self.get_balance()
c2, u2, x2 = self.get_balance(
excluded_addresses=frozen_addresses,
excluded_coins=frozen_coins,
)
return c1-c2, u1-u2, x1-x2
def get_balances_for_piechart(self):
# return only positive values
# todo: add lightning frozen
c, u, x = self.get_balance()
fc, fu, fx = self.get_frozen_balance()
lightning = self.lnworker.get_balance() if self.has_lightning() else 0
f_lightning = self.lnworker.get_balance(frozen=True) if self.has_lightning() else 0
# subtract frozen funds
cc = c - fc
uu = u - fu
xx = x - fx
frozen = fc + fu + fx
return cc, uu, xx, frozen, lightning - f_lightning, f_lightning
def balance_at_timestamp(self, domain, target_timestamp):
# we assume that get_history returns items ordered by block height
# we also assume that block timestamps are monotonic (which is false...!)
h = self.adb.get_history(domain=domain)
balance = 0
for hist_item in h:
balance = hist_item.balance
if hist_item.tx_mined_status.timestamp is None or hist_item.tx_mined_status.timestamp > target_timestamp:
return balance - hist_item.delta
# return last balance
return balance
def get_onchain_history(self, *, domain=None):
if domain is None:
domain = self.get_addresses()
monotonic_timestamp = 0
for hist_item in self.adb.get_history(domain=domain):
monotonic_timestamp = max(monotonic_timestamp, (hist_item.tx_mined_status.timestamp or 999_999_999_999))
yield {
'txid': hist_item.txid,
'fee_sat': hist_item.fee,
'height': hist_item.tx_mined_status.height,
'confirmations': hist_item.tx_mined_status.conf,
'timestamp': hist_item.tx_mined_status.timestamp,
'monotonic_timestamp': monotonic_timestamp,
'incoming': True if hist_item.delta>0 else False,
'bc_value': Satoshis(hist_item.delta),
'bc_balance': Satoshis(hist_item.balance),
'date': timestamp_to_datetime(hist_item.tx_mined_status.timestamp),
'label': self.get_label_for_txid(hist_item.txid),
'txpos_in_block': hist_item.tx_mined_status.txpos,
}
def create_invoice(self, *, outputs: List[PartialTxOutput], message, pr, URI) -> Invoice:
height = self.adb.get_local_height()
if pr:
return Invoice.from_bip70_payreq(pr, height=height)
amount_msat = 0
for x in outputs:
if parse_max_spend(x.value):
amount_msat = '!'
break
else:
assert isinstance(x.value, int), f"{x.value!r}"
amount_msat += x.value * 1000
timestamp = None
exp = None
if URI:
timestamp = URI.get('time')
exp = URI.get('exp')
timestamp = timestamp or int(time.time())
exp = exp or 0
invoice = Invoice(
amount_msat=amount_msat,
message=message,
time=timestamp,
exp=exp,
outputs=outputs,
bip70=None,
height=height,
lightning_invoice=None,
)
return invoice
def save_invoice(self, invoice: Invoice, *, write_to_disk: bool = True) -> None:
key = invoice.get_id()
if not invoice.is_lightning():
if self.is_onchain_invoice_paid(invoice)[0]:
_logger.info("saving invoice... but it is already paid!")
with self.transaction_lock:
for txout in invoice.get_outputs():
self._invoices_from_scriptpubkey_map[txout.scriptpubkey].add(key)
self._invoices[key] = invoice
if write_to_disk:
self.save_db()
def clear_invoices(self):
self._invoices.clear()
self.save_db()
def clear_requests(self):
self._receive_requests.clear()
self._requests_addr_to_key.clear()
self.save_db()
def get_invoices(self):
out = list(self._invoices.values())
out.sort(key=lambda x:x.time)
return out
def get_unpaid_invoices(self):
invoices = self.get_invoices()
return [x for x in invoices if self.get_invoice_status(x) != PR_PAID]
def get_invoice(self, invoice_id):
return self._invoices.get(invoice_id)
def import_requests(self, path):
data = read_json_file(path)
for x in data:
try:
req = Invoice(**x)
except:
raise FileImportFailed(_("Invalid invoice format"))
self.add_payment_request(req, write_to_disk=False)
self.save_db()
def export_requests(self, path):
write_json_file(path, list(self._receive_requests.values()))
def import_invoices(self, path):
data = read_json_file(path)
for x in data:
try:
invoice = Invoice(**x)
except:
raise FileImportFailed(_("Invalid invoice format"))
self.save_invoice(invoice, write_to_disk=False)
self.save_db()
def export_invoices(self, path):
write_json_file(path, list(self._invoices.values()))
def get_relevant_invoices_for_tx(self, tx_hash) -> Sequence[Invoice]:
invoice_keys = self._invoices_from_txid_map.get(tx_hash, set())
invoices = [self.get_invoice(key) for key in invoice_keys]
invoices = [inv for inv in invoices if inv] # filter out None
for inv in invoices:
assert isinstance(inv, Invoice), f"unexpected type {type(inv)}"
return invoices
def _init_requests_rhash_index(self):
# self._requests_addr_to_key may contain addresses that can be reused
# this is checked in get_request_by_address
self._requests_addr_to_key = defaultdict(set) # type: Dict[str, Set[str]]
for req in self._receive_requests.values():
if addr := req.get_address():
self._requests_addr_to_key[addr].add(req.get_id())
def _prepare_onchain_invoice_paid_detection(self):
self._invoices_from_txid_map = defaultdict(set) # type: Dict[str, Set[str]]
self._invoices_from_scriptpubkey_map = defaultdict(set) # type: Dict[bytes, Set[str]]
self._update_onchain_invoice_paid_detection(self._invoices.keys())
def _update_onchain_invoice_paid_detection(self, invoice_keys: Iterable[str]) -> None:
for invoice_key in invoice_keys:
invoice = self._invoices.get(invoice_key)
if not invoice:
continue
if invoice.is_lightning() and not invoice.get_address():
continue
if invoice.is_lightning() and self.lnworker and self.lnworker.get_invoice_status(invoice) == PR_PAID:
continue
is_paid, conf_needed, relevant_txs = self._is_onchain_invoice_paid(invoice)
if is_paid:
for txid in relevant_txs:
self._invoices_from_txid_map[txid].add(invoice_key)
for txout in invoice.get_outputs():
self._invoices_from_scriptpubkey_map[txout.scriptpubkey].add(invoice_key)
def _is_onchain_invoice_paid(self, invoice: Invoice) -> Tuple[bool, Optional[int], Sequence[str]]:
"""Returns whether on-chain invoice/request is satisfied, num confs required txs have,
and list of relevant TXIDs.
"""
outputs = invoice.get_outputs()
if not outputs: # e.g. lightning-only
return False, None, []
invoice_amounts = defaultdict(int) # type: Dict[bytes, int] # scriptpubkey -> value_sats
for txo in outputs: # type: PartialTxOutput
invoice_amounts[txo.scriptpubkey] += 1 if parse_max_spend(txo.value) else txo.value
relevant_txs = set()
is_paid = True
conf_needed = None # type: Optional[int]
with self.lock, self.transaction_lock:
for invoice_scriptpubkey, invoice_amt in invoice_amounts.items():
scripthash = bitcoin.script_to_scripthash(invoice_scriptpubkey.hex())
prevouts_and_values = self.db.get_prevouts_by_scripthash(scripthash)
confs_and_values = []
for prevout, v in prevouts_and_values:
relevant_txs.add(prevout.txid.hex())
tx_height = self.adb.get_tx_height(prevout.txid.hex())
if 0 < tx_height.height <= invoice.height: # exclude txs older than invoice
continue
confs_and_values.append((tx_height.conf or 0, v))
# check that there is at least one TXO, and that they pay enough.
# note: "at least one TXO" check is needed for zero amount invoice (e.g. OP_RETURN)
vsum = 0
for conf, v in reversed(sorted(confs_and_values)):
vsum += v
if vsum >= invoice_amt:
conf_needed = min(conf_needed, conf) if conf_needed is not None else conf
break
else:
is_paid = False
return is_paid, conf_needed, list(relevant_txs)
def is_onchain_invoice_paid(self, invoice: Invoice) -> Tuple[bool, Optional[int]]:
is_paid, conf_needed, relevant_txs = self._is_onchain_invoice_paid(invoice)
return is_paid, conf_needed
@profiler
def get_full_history(self, fx=None, *, onchain_domain=None, include_lightning=True):
transactions_tmp = OrderedDictWithIndex()
# add on-chain txns
onchain_history = self.get_onchain_history(domain=onchain_domain)
for tx_item in onchain_history:
txid = tx_item['txid']
transactions_tmp[txid] = tx_item
# add lnworker onchain transactions
lnworker_history = self.lnworker.get_onchain_history() if self.lnworker and include_lightning else {}
for txid, item in lnworker_history.items():
if txid in transactions_tmp:
tx_item = transactions_tmp[txid]
tx_item['group_id'] = item.get('group_id') # for swaps
tx_item['label'] = item['label']
tx_item['type'] = item['type']
ln_value = Decimal(item['amount_msat']) / 1000 # for channel open/close tx
tx_item['ln_value'] = Satoshis(ln_value)
if channel_id := item.get('channel_id'):
tx_item['channel_id'] = channel_id
else:
if item['type'] == 'swap':
# swap items do not have all the fields. We can skip skip them
# because they will eventually be in onchain_history
# TODO: use attr.s objects instead of dicts
continue
transactions_tmp[txid] = item
ln_value = Decimal(item['amount_msat']) / 1000 # for channel open/close tx
item['ln_value'] = Satoshis(ln_value)
# add lightning_transactions
lightning_history = self.lnworker.get_lightning_history() if self.lnworker and include_lightning else {}
for tx_item in lightning_history.values():
txid = tx_item.get('txid')
ln_value = Decimal(tx_item['amount_msat']) / 1000
tx_item['lightning'] = True
tx_item['ln_value'] = Satoshis(ln_value)
key = tx_item.get('txid') or tx_item['payment_hash']
transactions_tmp[key] = tx_item
# sort on-chain and LN stuff into new dict, by timestamp
# (we rely on this being a *stable* sort)
transactions = OrderedDictWithIndex()
for k, v in sorted(list(transactions_tmp.items()),
key=lambda x: x[1].get('monotonic_timestamp') or x[1].get('timestamp') or float('inf')):
transactions[k] = v
now = time.time()
balance = 0
for item in transactions.values():
# add on-chain and lightning values
value = Decimal(0)
if item.get('bc_value'):
value += item['bc_value'].value
if item.get('ln_value'):
value += item.get('ln_value').value
# note: 'value' and 'balance' has msat precision (as LN has msat precision)
item['value'] = Satoshis(value)
balance += value
item['balance'] = Satoshis(balance)
if fx and fx.is_enabled() and fx.get_history_config():
txid = item.get('txid')
if not item.get('lightning') and txid:
fiat_fields = self.get_tx_item_fiat(tx_hash=txid, amount_sat=value, fx=fx, tx_fee=item['fee_sat'])
item.update(fiat_fields)
else:
timestamp = item['timestamp'] or now
fiat_value = value / Decimal(bitcoin.COIN) * fx.timestamp_rate(timestamp)
item['fiat_value'] = Fiat(fiat_value, fx.ccy)
item['fiat_default'] = True
return transactions
@profiler
def get_detailed_history(
self,
from_timestamp=None,
to_timestamp=None,
fx=None,
show_addresses=False,
from_height=None,
to_height=None):
# History with capital gains, using utxo pricing
# FIXME: Lightning capital gains would requires FIFO
if (from_timestamp is not None or to_timestamp is not None) \
and (from_height is not None or to_height is not None):
raise Exception('timestamp and block height based filtering cannot be used together')
show_fiat = fx and fx.is_enabled() and fx.get_history_config()
out = []
income = 0
expenditures = 0
capital_gains = Decimal(0)
fiat_income = Decimal(0)
fiat_expenditures = Decimal(0)
now = time.time()
for item in self.get_onchain_history():
timestamp = item['timestamp']
if from_timestamp and (timestamp or now) < from_timestamp:
continue
if to_timestamp and (timestamp or now) >= to_timestamp:
continue
height = item['height']
if from_height is not None and from_height > height > 0:
continue
if to_height is not None and (height >= to_height or height <= 0):
continue
tx_hash = item['txid']
tx = self.db.get_transaction(tx_hash)
tx_fee = item['fee_sat']
item['fee'] = Satoshis(tx_fee) if tx_fee is not None else None
if show_addresses:
item['inputs'] = list(map(lambda x: x.to_json(), tx.inputs()))
item['outputs'] = list(map(lambda x: {'address': x.get_ui_address_str(), 'value': Satoshis(x.value)},
tx.outputs()))
# fixme: use in and out values
value = item['bc_value'].value
if value < 0:
expenditures += -value
else:
income += value
# fiat computations
if show_fiat:
fiat_fields = self.get_tx_item_fiat(tx_hash=tx_hash, amount_sat=value, fx=fx, tx_fee=tx_fee)
fiat_value = fiat_fields['fiat_value'].value
item.update(fiat_fields)
if value < 0:
capital_gains += fiat_fields['capital_gain'].value
fiat_expenditures += -fiat_value
else:
fiat_income += fiat_value
out.append(item)
# add summary
if out:
first_item = out[0]
last_item = out[-1]
if from_height or to_height:
start_height = from_height
end_height = to_height
else:
start_height = first_item['height'] - 1
end_height = last_item['height']
b = first_item['bc_balance'].value
v = first_item['bc_value'].value
start_balance = None if b is None or v is None else b - v
end_balance = last_item['bc_balance'].value
if from_timestamp is not None and to_timestamp is not None:
start_timestamp = from_timestamp
end_timestamp = to_timestamp
else:
start_timestamp = first_item['timestamp']
end_timestamp = last_item['timestamp']
start_coins = self.get_utxos(
block_height=start_height,
confirmed_funding_only=True,
confirmed_spending_only=True,
nonlocal_only=True)
end_coins = self.get_utxos(
block_height=end_height,
confirmed_funding_only=True,
confirmed_spending_only=True,
nonlocal_only=True)
def summary_point(timestamp, height, balance, coins):
date = timestamp_to_datetime(timestamp)
out = {
'date': date,
'block_height': height,
'BTC_balance': Satoshis(balance),
}
if show_fiat:
ap = self.acquisition_price(coins, fx.timestamp_rate, fx.ccy)
lp = self.liquidation_price(coins, fx.timestamp_rate, timestamp)
out['acquisition_price'] = Fiat(ap, fx.ccy)
out['liquidation_price'] = Fiat(lp, fx.ccy)
out['unrealized_gains'] = Fiat(lp - ap, fx.ccy)
out['fiat_balance'] = Fiat(fx.historical_value(balance, date), fx.ccy)
out['BTC_fiat_price'] = Fiat(fx.historical_value(COIN, date), fx.ccy)
return out
summary_start = summary_point(start_timestamp, start_height, start_balance, start_coins)
summary_end = summary_point(end_timestamp, end_height, end_balance, end_coins)
flow = {
'BTC_incoming': Satoshis(income),
'BTC_outgoing': Satoshis(expenditures)
}
if show_fiat:
flow['fiat_currency'] = fx.ccy
flow['fiat_incoming'] = Fiat(fiat_income, fx.ccy)
flow['fiat_outgoing'] = Fiat(fiat_expenditures, fx.ccy)
flow['realized_capital_gains'] = Fiat(capital_gains, fx.ccy)
summary = {
'begin': summary_start,
'end': summary_end,
'flow': flow,
}
else:
summary = {}
return {
'transactions': out,
'summary': summary
}
def acquisition_price(self, coins, price_func, ccy):
return Decimal(sum(self.coin_price(coin.prevout.txid.hex(), price_func, ccy, self.adb.get_txin_value(coin)) for coin in coins))
def liquidation_price(self, coins, price_func, timestamp):
p = price_func(timestamp)
return sum([coin.value_sats() for coin in coins]) * p / Decimal(COIN)
def default_fiat_value(self, tx_hash, fx, value_sat):
return value_sat / Decimal(COIN) * self.price_at_timestamp(tx_hash, fx.timestamp_rate)
def get_tx_item_fiat(
self,
*,
tx_hash: str,
amount_sat: int,
fx: 'FxThread',
tx_fee: Optional[int],
) -> Dict[str, Any]:
item = {}
fiat_value = self.get_fiat_value(tx_hash, fx.ccy)
fiat_default = fiat_value is None
fiat_rate = self.price_at_timestamp(tx_hash, fx.timestamp_rate)
fiat_value = fiat_value if fiat_value is not None else self.default_fiat_value(tx_hash, fx, amount_sat)
fiat_fee = tx_fee / Decimal(COIN) * fiat_rate if tx_fee is not None else None
item['fiat_currency'] = fx.ccy
item['fiat_rate'] = Fiat(fiat_rate, fx.ccy)
item['fiat_value'] = Fiat(fiat_value, fx.ccy)
item['fiat_fee'] = Fiat(fiat_fee, fx.ccy) if fiat_fee is not None else None
item['fiat_default'] = fiat_default
if amount_sat < 0:
acquisition_price = - amount_sat / Decimal(COIN) * self.average_price(tx_hash, fx.timestamp_rate, fx.ccy)
liquidation_price = - fiat_value
item['acquisition_price'] = Fiat(acquisition_price, fx.ccy)
cg = liquidation_price - acquisition_price
item['capital_gain'] = Fiat(cg, fx.ccy)
return item
def _get_label(self, key: str) -> str:
# key is typically: address / txid / LN-payment-hash-hex
return self._labels.get(key) or ''
def get_label_for_address(self, addr: str) -> str:
label = self._labels.get(addr) or ''
if not label and (request := self.get_request_by_addr(addr)):
label = request.get_message()
return label
def get_label_for_txid(self, tx_hash: str) -> str:
return self._labels.get(tx_hash) or self._get_default_label_for_txid(tx_hash)
def _get_default_label_for_txid(self, tx_hash: str) -> str:
# note: we don't deserialize tx as the history calls us for every tx, and that would be slow
if not self.db.get_txi_addresses(tx_hash):
# no inputs are ismine -> likely incoming payment -> concat labels of output addresses
labels = []
for addr in self.db.get_txo_addresses(tx_hash):
label = self.get_label_for_address(addr)
if label:
labels.append(label)
return ', '.join(labels)
else:
# some inputs are ismine -> likely outgoing payment
labels = []
for invoice in self.get_relevant_invoices_for_tx(tx_hash):
if invoice.message:
labels.append(invoice.message)
return ', '.join(labels)
def _get_default_label_for_rhash(self, rhash: str) -> str:
req = self.get_request(rhash)
return req.get_message() if req else ''
def get_label_for_rhash(self, rhash: str) -> str:
return self._labels.get(rhash) or self._get_default_label_for_rhash(rhash)
def get_all_labels(self) -> Dict[str, str]:
with self.lock:
return copy.copy(self._labels)
def get_tx_status(self, tx_hash, tx_mined_info: TxMinedInfo):
extra = []
height = tx_mined_info.height
conf = tx_mined_info.conf
timestamp = tx_mined_info.timestamp
if height == TX_HEIGHT_FUTURE:
assert conf < 0, conf
num_blocks_remainining = -conf
return 2, f'in {num_blocks_remainining} blocks'
if conf == 0:
tx = self.db.get_transaction(tx_hash)
if not tx:
return 2, 'unknown'
is_final = tx and tx.is_final()
if not is_final:
extra.append('rbf')
fee = self.adb.get_tx_fee(tx_hash)
if fee is not None:
size = tx.estimated_size()
fee_per_byte = fee / size
extra.append(format_fee_satoshis(fee_per_byte) + ' sat/b')
if fee is not None and height in (TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED) \
and self.config.has_fee_mempool():
exp_n = self.config.fee_to_depth(fee_per_byte)
if exp_n is not None:
extra.append(self.config.get_depth_mb_str(exp_n))
if height == TX_HEIGHT_LOCAL:
status = 3
elif height == TX_HEIGHT_UNCONF_PARENT:
status = 1
elif height == TX_HEIGHT_UNCONFIRMED:
status = 0
else:
status = 2 # not SPV verified
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = TX_STATUS[status] if status < 4 else time_str
if extra:
status_str += ' [%s]'%(', '.join(extra))
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def get_unconfirmed_base_tx_for_batching(self) -> Optional[Transaction]:
candidate = None
domain = self.get_addresses()
for hist_item in self.adb.get_history(domain):
# tx should not be mined yet
if hist_item.tx_mined_status.conf > 0: continue
# conservative future proofing of code: only allow known unconfirmed types
if hist_item.tx_mined_status.height not in (TX_HEIGHT_UNCONFIRMED,
TX_HEIGHT_UNCONF_PARENT,
TX_HEIGHT_LOCAL):
continue
# tx should be "outgoing" from wallet
if hist_item.delta >= 0:
continue
tx = self.db.get_transaction(hist_item.txid)
if not tx:
continue
# is_mine outputs should not be spent yet
# to avoid cancelling our own dependent transactions
txid = tx.txid()
if any([self.is_mine(o.address) and self.db.get_spent_outpoint(txid, output_idx)
for output_idx, o in enumerate(tx.outputs())]):
continue
# all inputs should be is_mine
if not all([self.is_mine(self.adb.get_txin_address(txin)) for txin in tx.inputs()]):
continue
# do not mutate LN funding txs, as that would change their txid
if self.is_lightning_funding_tx(txid):
continue
# tx must have opted-in for RBF (even if local, for consistency)
if tx.is_final():
continue
# prefer txns already in mempool (vs local)
if hist_item.tx_mined_status.height == TX_HEIGHT_LOCAL:
candidate = tx
continue
return tx
return candidate
def get_change_addresses_for_new_transaction(
self, preferred_change_addr=None, *, allow_reusing_used_change_addrs: bool = True,
) -> List[str]:
change_addrs = []
if preferred_change_addr:
if isinstance(preferred_change_addr, (list, tuple)):
change_addrs = list(preferred_change_addr)
else:
change_addrs = [preferred_change_addr]
elif self.use_change:
# Recalc and get unused change addresses
addrs = self.calc_unused_change_addresses()
# New change addresses are created only after a few
# confirmations.
if addrs:
# if there are any unused, select all
change_addrs = addrs
else:
# if there are none, take one randomly from the last few
if not allow_reusing_used_change_addrs:
return []
addrs = self.get_change_addresses(slice_start=-self.gap_limit_for_change)
change_addrs = [random.choice(addrs)] if addrs else []
for addr in change_addrs:
assert is_address(addr), f"not valid bitcoin address: {addr}"
# note that change addresses are not necessarily ismine
# in which case this is a no-op
self.check_address_for_corruption(addr)
max_change = self.max_change_outputs if self.multiple_change else 1
return change_addrs[:max_change]
def get_single_change_address_for_new_transaction(
self, preferred_change_addr=None, *, allow_reusing_used_change_addrs: bool = True,
) -> Optional[str]:
addrs = self.get_change_addresses_for_new_transaction(
preferred_change_addr=preferred_change_addr,
allow_reusing_used_change_addrs=allow_reusing_used_change_addrs,
)
if addrs:
return addrs[0]
return None
@check_returned_address_for_corruption
def get_new_sweep_address_for_channel(self) -> str:
# Recalc and get unused change addresses
addrs = self.calc_unused_change_addresses()
if addrs:
selected_addr = addrs[0]
else:
# if there are none, take one randomly from the last few
addrs = self.get_change_addresses(slice_start=-self.gap_limit_for_change)
if addrs:
selected_addr = random.choice(addrs)
else: # fallback for e.g. imported wallets
selected_addr = self.get_receiving_address()
assert is_address(selected_addr), f"not valid bitcoin address: {selected_addr}"
return selected_addr
def can_pay_onchain(self, outputs, coins=None):
fee = partial(self.config.estimate_fee, allow_fallback_to_static_rates=True) # to avoid NoDynamicFeeEstimates
try:
self.make_unsigned_transaction(
coins=coins,
outputs=outputs,
fee=fee)
except NotEnoughFunds:
return False
return True
def make_unsigned_transaction(
self, *,
coins: Sequence[PartialTxInput],
outputs: List[PartialTxOutput],
fee=None,
change_addr: str = None,
is_sweep=False,
rbf=False) -> PartialTransaction:
"""Can raise NotEnoughFunds or NoDynamicFeeEstimates."""
if not coins: # any bitcoin tx must have at least 1 input by consensus
raise NotEnoughFunds()
if any([c.already_has_some_signatures() for c in coins]):
raise Exception("Some inputs already contain signatures!")
# prevent side-effect with '!'
outputs = copy.deepcopy(outputs)
# check outputs
i_max = []
i_max_sum = 0
for i, o in enumerate(outputs):
weight = parse_max_spend(o.value)
if weight:
i_max_sum += weight
i_max.append((weight, i))
if fee is None and self.config.fee_per_kb() is None:
raise NoDynamicFeeEstimates()
for item in coins:
self.add_input_info(item)
# Fee estimator
if fee is None:
fee_estimator = self.config.estimate_fee
elif isinstance(fee, Number):
fee_estimator = lambda size: fee
elif callable(fee):
fee_estimator = fee
else:
raise Exception(f'Invalid argument fee: {fee}')
if len(i_max) == 0:
# Let the coin chooser select the coins to spend
coin_chooser = coinchooser.get_coin_chooser(self.config)
# If there is an unconfirmed RBF tx, merge with it
base_tx = self.get_unconfirmed_base_tx_for_batching()
if self.config.get('batch_rbf', False) and base_tx:
# make sure we don't try to spend change from the tx-to-be-replaced:
coins = [c for c in coins if c.prevout.txid.hex() != base_tx.txid()]
is_local = self.adb.get_tx_height(base_tx.txid()).height == TX_HEIGHT_LOCAL
base_tx = PartialTransaction.from_tx(base_tx)
base_tx.add_info_from_wallet(self)
base_tx_fee = base_tx.get_fee()
relayfeerate = Decimal(self.relayfee()) / 1000
original_fee_estimator = fee_estimator
def fee_estimator(size: Union[int, float, Decimal]) -> int:
size = Decimal(size)
lower_bound = base_tx_fee + round(size * relayfeerate)
lower_bound = lower_bound if not is_local else 0
return int(max(lower_bound, original_fee_estimator(size)))
txi = base_tx.inputs()
txo = list(filter(lambda o: not self.is_change(o.address), base_tx.outputs()))
old_change_addrs = [o.address for o in base_tx.outputs() if self.is_change(o.address)]
else:
txi = []
txo = []
old_change_addrs = []
# change address. if empty, coin_chooser will set it
change_addrs = self.get_change_addresses_for_new_transaction(change_addr or old_change_addrs)
tx = coin_chooser.make_tx(
coins=coins,
inputs=txi,
outputs=list(outputs) + txo,
change_addrs=change_addrs,
fee_estimator_vb=fee_estimator,
dust_threshold=self.dust_threshold())
else:
# "spend max" branch
# note: This *will* spend inputs with negative effective value (if there are any).
# Given as the user is spending "max", and so might be abandoning the wallet,
# try to include all UTXOs, otherwise leftover might remain in the UTXO set
# forever. see #5433
# note: Actually, it might be the case that not all UTXOs from the wallet are
# being spent if the user manually selected UTXOs.
sendable = sum(map(lambda c: c.value_sats(), coins))
for (_,i) in i_max:
outputs[i].value = 0
tx = PartialTransaction.from_io(list(coins), list(outputs))
fee = fee_estimator(tx.estimated_size())
amount = sendable - tx.output_value() - fee
if amount < 0:
raise NotEnoughFunds()
distr_amount = 0
for (weight, i) in i_max:
val = int((amount/i_max_sum) * weight)
outputs[i].value = val
distr_amount += val
(x,i) = i_max[-1]
outputs[i].value += (amount - distr_amount)
tx = PartialTransaction.from_io(list(coins), list(outputs))
# Timelock tx to current height.
tx.locktime = get_locktime_for_new_transaction(self.network)
tx.set_rbf(rbf)
tx.add_info_from_wallet(self)
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, *,
outputs: List[PartialTxOutput],
password=None, fee=None, change_addr=None,
domain=None, rbf=False, nonlocal_only=False,
tx_version=None, sign=True) -> PartialTransaction:
coins = self.get_spendable_coins(domain, nonlocal_only=nonlocal_only)
tx = self.make_unsigned_transaction(
coins=coins,
outputs=outputs,
fee=fee,
change_addr=change_addr,
rbf=rbf)
if tx_version is not None:
tx.version = tx_version
if sign:
self.sign_transaction(tx, password)
return tx
def is_frozen_address(self, addr: str) -> bool:
return addr in self._frozen_addresses
def is_frozen_coin(self, utxo: PartialTxInput) -> bool:
prevout_str = utxo.prevout.to_str()
frozen = self._frozen_coins.get(prevout_str, None)
# note: there are three possible states for 'frozen':
# True/False if the user explicitly set it,
# None otherwise
if frozen is None:
return self._is_coin_small_and_unconfirmed(utxo)
return bool(frozen)
def _is_coin_small_and_unconfirmed(self, utxo: PartialTxInput) -> bool:
"""If true, the coin should not be spent.
The idea here is that an attacker might send us a UTXO in a
large low-fee unconfirmed tx that will ~never confirm. If we
spend it as part of a tx ourselves, that too will not confirm
(unless we use a high fee, but that might not be worth it for
a small value UTXO).
In particular, this test triggers for large "dusting transactions"
that are used for advertising purposes by some entities.
see #6960
"""
# confirmed UTXOs are fine; check this first for performance:
block_height = utxo.block_height
assert block_height is not None
if block_height > 0:
return False
# exempt large value UTXOs
value_sats = utxo.value_sats()
assert value_sats is not None
threshold = self.config.get('unconf_utxo_freeze_threshold', 5_000)
if value_sats >= threshold:
return False
# if funding tx has any is_mine input, then UTXO is fine
funding_tx = self.db.get_transaction(utxo.prevout.txid.hex())
if funding_tx is None:
# we should typically have the funding tx available;
# might not have it e.g. while not up_to_date
return True
if any(self.is_mine(self.adb.get_txin_address(txin))
for txin in funding_tx.inputs()):
return False
return True
def set_frozen_state_of_addresses(self, addrs: Sequence[str], freeze: bool) -> bool:
"""Set frozen state of the addresses to FREEZE, True or False"""
if all(self.is_mine(addr) for addr in addrs):
with self._freeze_lock:
if freeze:
self._frozen_addresses |= set(addrs)
else:
self._frozen_addresses -= set(addrs)
self.db.put('frozen_addresses', list(self._frozen_addresses))
util.trigger_callback('status')
return True
return False
def set_frozen_state_of_coins(self, utxos: Sequence[str], freeze: bool) -> None:
"""Set frozen state of the utxos to FREEZE, True or False"""
# basic sanity check that input is not garbage: (see if raises)
[TxOutpoint.from_str(utxo) for utxo in utxos]
with self._freeze_lock:
for utxo in utxos:
self._frozen_coins[utxo] = bool(freeze)
util.trigger_callback('status')
def is_address_reserved(self, addr: str) -> bool:
# note: atm 'reserved' status is only taken into consideration for 'change addresses'
return addr in self._reserved_addresses
def set_reserved_state_of_address(self, addr: str, *, reserved: bool) -> None:
if not self.is_mine(addr):
return
with self.lock:
if reserved:
self._reserved_addresses.add(addr)
else:
self._reserved_addresses.discard(addr)
self.db.put('reserved_addresses', list(self._reserved_addresses))
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def bump_fee(
self,
*,
tx: Transaction,
txid: str = None,
new_fee_rate: Union[int, float, Decimal],
coins: Sequence[PartialTxInput] = None,
strategies: Sequence[BumpFeeStrategy] = None,
) -> PartialTransaction:
"""Increase the miner fee of 'tx'.
'new_fee_rate' is the target min rate in sat/vbyte
'coins' is a list of UTXOs we can choose from as potential new inputs to be added
"""
txid = txid or tx.txid()
assert txid
assert tx.txid() in (None, txid)
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
assert isinstance(tx, PartialTransaction)
tx.remove_signatures()
if tx.is_final():
raise CannotBumpFee(_('Transaction is final'))
new_fee_rate = quantize_feerate(new_fee_rate) # strip excess precision
try:
# note: this might download input utxos over network
tx.add_info_from_wallet(self, ignore_network_issues=False)
except NetworkException as e:
raise CannotBumpFee(repr(e))
old_tx_size = tx.estimated_size()
old_fee = tx.get_fee()
assert old_fee is not None
old_fee_rate = old_fee / old_tx_size # sat/vbyte
if new_fee_rate <= old_fee_rate:
raise CannotBumpFee(_("The new fee rate needs to be higher than the old fee rate."))
if not strategies:
strategies = [BumpFeeStrategy.COINCHOOSER, BumpFeeStrategy.DECREASE_CHANGE]
tx_new = None
exc = None
for strat in strategies:
try:
if strat == BumpFeeStrategy.COINCHOOSER:
tx_new = self._bump_fee_through_coinchooser(
tx=tx,
txid=txid,
new_fee_rate=new_fee_rate,
coins=coins,
)
elif strat == BumpFeeStrategy.DECREASE_CHANGE:
tx_new = self._bump_fee_through_decreasing_change(
tx=tx, new_fee_rate=new_fee_rate)
elif strat == BumpFeeStrategy.DECREASE_PAYMENT:
tx_new = self._bump_fee_through_decreasing_payment(
tx=tx, new_fee_rate=new_fee_rate)
else:
raise NotImplementedError(f"unexpected strategy: {strat}")
except CannotBumpFee as e:
exc = e
else:
strat_used = strat
break
if tx_new is None:
assert exc
raise exc # all strategies failed, re-raise last exception
target_min_fee = new_fee_rate * tx_new.estimated_size()
actual_fee = tx_new.get_fee()
if actual_fee + 1 < target_min_fee:
raise CannotBumpFee(
f"bump_fee fee target was not met (strategy: {strat_used}). "
f"got {actual_fee}, expected >={target_min_fee}. "
f"target rate was {new_fee_rate}")
tx_new.locktime = get_locktime_for_new_transaction(self.network)
tx_new.set_rbf(True)
tx_new.add_info_from_wallet(self)
return tx_new
def _bump_fee_through_coinchooser(
self,
*,
tx: PartialTransaction,
txid: str,
new_fee_rate: Union[int, Decimal],
coins: Sequence[PartialTxInput] = None,
) -> PartialTransaction:
"""Increase the miner fee of 'tx'.
- keeps all inputs
- keeps all not is_mine outputs,
- allows adding new inputs
"""
assert txid
tx = copy.deepcopy(tx)
tx.add_info_from_wallet(self)
assert tx.get_fee() is not None
old_inputs = list(tx.inputs())
old_outputs = list(tx.outputs())
# change address
old_change_addrs = [o.address for o in old_outputs if self.is_change(o.address)]
change_addrs = self.get_change_addresses_for_new_transaction(old_change_addrs)
# which outputs to keep?
if old_change_addrs:
fixed_outputs = list(filter(lambda o: not self.is_change(o.address), old_outputs))
else:
if all(self.is_mine(o.address) for o in old_outputs):
# all outputs are is_mine and none of them are change.
# we bail out as it's unclear what the user would want!
# the coinchooser bump fee method is probably not a good idea in this case
raise CannotBumpFee(_('All outputs are non-change is_mine'))
old_not_is_mine = list(filter(lambda o: not self.is_mine(o.address), old_outputs))
if old_not_is_mine:
fixed_outputs = old_not_is_mine
else:
fixed_outputs = old_outputs
if not fixed_outputs:
raise CannotBumpFee(_('Could not figure out which outputs to keep'))
if coins is None:
coins = self.get_spendable_coins(None)
# make sure we don't try to spend output from the tx-to-be-replaced:
coins = [c for c in coins if c.prevout.txid.hex() != txid]
for item in coins:
self.add_input_info(item)
def fee_estimator(size):
return self.config.estimate_fee_for_feerate(fee_per_kb=new_fee_rate*1000, size=size)
coin_chooser = coinchooser.get_coin_chooser(self.config)
try:
return coin_chooser.make_tx(
coins=coins,
inputs=old_inputs,
outputs=fixed_outputs,
change_addrs=change_addrs,
fee_estimator_vb=fee_estimator,
dust_threshold=self.dust_threshold())
except NotEnoughFunds as e:
raise CannotBumpFee(e)
def _bump_fee_through_decreasing_change(
self,
*,
tx: PartialTransaction,
new_fee_rate: Union[int, Decimal],
) -> PartialTransaction:
"""Increase the miner fee of 'tx'.
- keeps all inputs
- no new inputs are added
- allows decreasing and removing outputs (change is decreased first)
This is less "safe" than "coinchooser" method as it might end up decreasing
e.g. a payment to a merchant; but e.g. if the user has sent "Max" previously,
this is the only way to RBF.
"""
tx = copy.deepcopy(tx)
tx.add_info_from_wallet(self)
assert tx.get_fee() is not None
inputs = tx.inputs()
outputs = tx._outputs # note: we will mutate this directly
# use own outputs
s = list(filter(lambda o: self.is_mine(o.address), outputs))
# ... unless there is none
if not s:
s = [out for out in outputs if self._is_rbf_allowed_to_touch_tx_output(out)]
if not s:
raise CannotBumpFee('No outputs at all??')
# prioritize low value outputs, to get rid of dust
s = sorted(s, key=lambda o: o.value)
for o in s:
target_fee = int(math.ceil(tx.estimated_size() * new_fee_rate))
delta = target_fee - tx.get_fee()
if delta <= 0:
break
i = outputs.index(o)
if o.value - delta >= self.dust_threshold():
new_output_value = o.value - delta
assert isinstance(new_output_value, int)
outputs[i].value = new_output_value
delta = 0
break
else:
del outputs[i]
# note: we mutated the outputs of tx, which will affect
# tx.estimated_size() in the next iteration
else:
# recompute delta if there was no next iteration
target_fee = int(math.ceil(tx.estimated_size() * new_fee_rate))
delta = target_fee - tx.get_fee()
if delta > 0:
raise CannotBumpFee(_('Could not find suitable outputs'))
return PartialTransaction.from_io(inputs, outputs)
def _bump_fee_through_decreasing_payment(
self,
*,
tx: PartialTransaction,
new_fee_rate: Union[int, Decimal],
) -> PartialTransaction:
"""Increase the miner fee of 'tx'.
- keeps all inputs
- no new inputs are added
- decreases payment outputs (not change!). Each non-ismine output is decreased
proportionally to their byte-size.
"""
tx = copy.deepcopy(tx)
tx.add_info_from_wallet(self)
assert tx.get_fee() is not None
inputs = tx.inputs()
outputs = tx.outputs()
# select non-ismine outputs
s = [(idx, out) for (idx, out) in enumerate(outputs)
if not self.is_mine(out.address)]
s = [(idx, out) for (idx, out) in s if self._is_rbf_allowed_to_touch_tx_output(out)]
if not s:
raise CannotBumpFee("Cannot find payment output")
del_out_idxs = set()
tx_size = tx.estimated_size()
cur_fee = tx.get_fee()
# Main loop. Each iteration decreases value of all selected outputs.
# The number of iterations is bounded by len(s) as only the final iteration
# can *not remove* any output.
for __ in range(len(s) + 1):
target_fee = int(math.ceil(tx_size * new_fee_rate))
delta_total = target_fee - cur_fee
if delta_total <= 0:
break
out_size_total = sum(Transaction.estimated_output_size_for_script(out.scriptpubkey.hex())
for (idx, out) in s if idx not in del_out_idxs)
for idx, out in s:
out_size = Transaction.estimated_output_size_for_script(out.scriptpubkey.hex())
delta = int(math.ceil(delta_total * out_size / out_size_total))
if out.value - delta >= self.dust_threshold():
new_output_value = out.value - delta
assert isinstance(new_output_value, int)
outputs[idx].value = new_output_value
cur_fee += delta
else: # remove output
tx_size -= out_size
cur_fee += out.value
del_out_idxs.add(idx)
if delta_total > 0:
raise CannotBumpFee(_('Could not find suitable outputs'))
outputs = [out for (idx, out) in enumerate(outputs) if idx not in del_out_idxs]
return PartialTransaction.from_io(inputs, outputs)
def _is_rbf_allowed_to_touch_tx_output(self, txout: TxOutput) -> bool:
# 2fa fee outputs if present, should not be removed or have their value decreased
if self.is_billing_address(txout.address):
return False
# submarine swap funding outputs must not be decreased
if self.lnworker and self.lnworker.swap_manager.is_lockup_address_for_a_swap(txout.address):
return False
return True
def cpfp(self, tx: Transaction, fee: int) -> Optional[PartialTransaction]:
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
address, value = o.address, o.value
if self.is_mine(address):
break
else:
raise CannotCPFP(_("Could not find suitable output"))
coins = self.adb.get_addr_utxo(address)
item = coins.get(TxOutpoint.from_str(txid+':%d'%i))
if not item:
raise CannotCPFP(_("Could not find coins for output"))
inputs = [item]
out_address = (self.get_single_change_address_for_new_transaction(allow_reusing_used_change_addrs=False)
or self.get_unused_address()
or address)
output_value = value - fee
if output_value < self.dust_threshold():
raise CannotCPFP(_("The output value remaining after fee is too low."))
outputs = [PartialTxOutput.from_address_and_value(out_address, output_value)]
locktime = get_locktime_for_new_transaction(self.network)
tx_new = PartialTransaction.from_io(inputs, outputs, locktime=locktime)
tx_new.set_rbf(True)
tx_new.add_info_from_wallet(self)
return tx_new
def dscancel(
self, *, tx: Transaction, new_fee_rate: Union[int, float, Decimal]
) -> PartialTransaction:
"""Double-Spend-Cancel: cancel an unconfirmed tx by double-spending
its inputs, paying ourselves.
'new_fee_rate' is the target min rate in sat/vbyte
"""
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
assert isinstance(tx, PartialTransaction)
tx.remove_signatures()
if tx.is_final():
raise CannotDoubleSpendTx(_('Transaction is final'))
new_fee_rate = quantize_feerate(new_fee_rate) # strip excess precision
try:
# note: this might download input utxos over network
tx.add_info_from_wallet(self, ignore_network_issues=False)
except NetworkException as e:
raise CannotDoubleSpendTx(repr(e))
old_tx_size = tx.estimated_size()
old_fee = tx.get_fee()
assert old_fee is not None
old_fee_rate = old_fee / old_tx_size # sat/vbyte
if new_fee_rate <= old_fee_rate:
raise CannotDoubleSpendTx(_("The new fee rate needs to be higher than the old fee rate."))
# grab all ismine inputs
inputs = [txin for txin in tx.inputs()
if self.is_mine(self.adb.get_txin_address(txin))]
value = sum([txin.value_sats() for txin in inputs])
# figure out output address
old_change_addrs = [o.address for o in tx.outputs() if self.is_mine(o.address)]
out_address = (self.get_single_change_address_for_new_transaction(old_change_addrs)
or self.get_receiving_address())
locktime = get_locktime_for_new_transaction(self.network)
outputs = [PartialTxOutput.from_address_and_value(out_address, value)]
tx_new = PartialTransaction.from_io(inputs, outputs, locktime=locktime)
new_tx_size = tx_new.estimated_size()
new_fee = max(
new_fee_rate * new_tx_size,
old_fee + self.relayfee() * new_tx_size / Decimal(1000), # BIP-125 rules 3 and 4
)
new_fee = int(math.ceil(new_fee))
output_value = value - new_fee
if output_value < self.dust_threshold():
raise CannotDoubleSpendTx(_("The output value remaining after fee is too low."))
outputs = [PartialTxOutput.from_address_and_value(out_address, value - new_fee)]
tx_new = PartialTransaction.from_io(inputs, outputs, locktime=locktime)
tx_new.set_rbf(True)
tx_new.add_info_from_wallet(self)
return tx_new
@abstractmethod
def _add_input_sig_info(self, txin: PartialTxInput, address: str, *, only_der_suffix: bool) -> None:
pass
def _add_txinout_derivation_info(self, txinout: Union[PartialTxInput, PartialTxOutput],
address: str, *, only_der_suffix: bool) -> None:
pass # implemented by subclasses
def _add_input_utxo_info(
self,
txin: PartialTxInput,
*,
address: str = None,
ignore_network_issues: bool = True,
) -> None:
# - We prefer to include UTXO (full tx), even for segwit inputs (see #6198).
# - For witness v0 inputs, we include *both* UTXO and WITNESS_UTXO. UTXO is a strict superset,
# so this is redundant, but it is (implied to be) "expected" from bip-0174 (see #8039).
# Regardless, this might improve compatibility with some other software.
# - For witness v1, witness_utxo will be enough though (bip-0341 sighash fixes known prior issues).
# - We cannot include UTXO if the prev tx is not signed yet (chain of unsigned txs).
address = address or txin.address
if txin.witness_utxo is None and txin.is_segwit() and address:
received, spent = self.adb.get_addr_io(address)
item = received.get(txin.prevout.to_str())
if item:
txin_value = item[1]
txin.witness_utxo = TxOutput.from_address_and_value(address, txin_value)
if txin.utxo is None:
txin.utxo = self.get_input_tx(txin.prevout.txid.hex(), ignore_network_issues=ignore_network_issues)
def _learn_derivation_path_for_address_from_txinout(self, txinout: Union[PartialTxInput, PartialTxOutput],
address: str) -> bool:
"""Tries to learn the derivation path for an address (potentially beyond gap limit)
using data available in given txin/txout.
Returns whether the address was found to be is_mine.
"""
return False # implemented by subclasses
def add_input_info(
self,
txin: PartialTxInput,
*,
only_der_suffix: bool = False,
ignore_network_issues: bool = True,
) -> None:
address = self.adb.get_txin_address(txin)
# note: we add input utxos regardless of is_mine
self._add_input_utxo_info(txin, ignore_network_issues=ignore_network_issues, address=address)
is_mine = self.is_mine(address)
if not is_mine:
is_mine = self._learn_derivation_path_for_address_from_txinout(txin, address)
if not is_mine:
if self.lnworker:
self.lnworker.swap_manager.add_txin_info(txin)
return
# set script_type first, as later checks might rely on it:
txin.script_type = self.get_txin_type(address)
txin.num_sig = self.m if isinstance(self, Multisig_Wallet) else 1
if txin.redeem_script is None:
try:
redeem_script_hex = self.get_redeem_script(address)
txin.redeem_script = bfh(redeem_script_hex) if redeem_script_hex else None
except UnknownTxinType:
pass
if txin.witness_script is None:
try:
witness_script_hex = self.get_witness_script(address)
txin.witness_script = bfh(witness_script_hex) if witness_script_hex else None
except UnknownTxinType:
pass
self._add_input_sig_info(txin, address, only_der_suffix=only_der_suffix)
txin.block_height = self.adb.get_tx_height(txin.prevout.txid.hex()).height
def can_sign(self, tx: Transaction) -> bool:
if not isinstance(tx, PartialTransaction):
return False
if tx.is_complete():
return False
# add info to inputs if we can; otherwise we might return a false negative:
tx.add_info_from_wallet(self)
for txin in tx.inputs():
# note: is_mine check needed to avoid false positives.
# just because keystore could sign, txin does not necessarily belong to wallet.
# Example: we have p2pkh-like addresses and txin is a multisig that involves our pubkey.
if not self.is_mine(txin.address):
continue
for k in self.get_keystores():
if k.can_sign_txin(txin):
return True
if self.is_swap_tx(tx):
return True
return False
def get_input_tx(self, tx_hash: str, *, ignore_network_issues=False) -> Optional[Transaction]:
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.db.get_transaction(tx_hash)
if not tx and self.network and self.network.has_internet_connection():
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(tx_hash, timeout=10))
except NetworkException as e:
_logger.info(f'got network error getting input txn. err: {repr(e)}. txid: {tx_hash}. '
f'if you are intentionally offline, consider using the --offline flag')
if not ignore_network_issues:
raise e
else:
tx = Transaction(raw_tx)
if not tx and not ignore_network_issues:
raise NetworkException('failed to get prev tx from network')
return tx
def add_output_info(self, txout: PartialTxOutput, *, only_der_suffix: bool = False) -> None:
address = txout.address
if not self.is_mine(address):
is_mine = self._learn_derivation_path_for_address_from_txinout(txout, address)
if not is_mine:
return
txout.script_type = self.get_txin_type(address)
txout.is_mine = True
txout.is_change = self.is_change(address)
if isinstance(self, Multisig_Wallet):
txout.num_sig = self.m
self._add_txinout_derivation_info(txout, address, only_der_suffix=only_der_suffix)
if txout.redeem_script is None:
try:
redeem_script_hex = self.get_redeem_script(address)
txout.redeem_script = bfh(redeem_script_hex) if redeem_script_hex else None
except UnknownTxinType:
pass
if txout.witness_script is None:
try:
witness_script_hex = self.get_witness_script(address)
txout.witness_script = bfh(witness_script_hex) if witness_script_hex else None
except UnknownTxinType:
pass
def sign_transaction(self, tx: Transaction, password) -> Optional[PartialTransaction]:
if self.is_watching_only():
return
if not isinstance(tx, PartialTransaction):
return
# note: swap signing does not require the password
swap = self.lnworker.swap_manager.get_swap_by_tx(tx) if self.lnworker else None
if swap:
self.lnworker.swap_manager.sign_tx(tx, swap)
return
# add info to a temporary tx copy; including xpubs
# and full derivation paths as hw keystores might want them
tmp_tx = copy.deepcopy(tx)
tmp_tx.add_info_from_wallet(self, include_xpubs=True)
# sign. start with ready keystores.
# note: ks.ready_to_sign() side-effect: we trigger pairings with potential hw devices.
# We only do this once, before the loop, however we could rescan after each iteration,
# to see if the user connected/disconnected devices in the meantime.
for k in sorted(self.get_keystores(), key=lambda ks: ks.ready_to_sign(), reverse=True):
try:
if k.can_sign(tmp_tx):
k.sign_transaction(tmp_tx, password)
except UserCancelled:
continue
# remove sensitive info; then copy back details from temporary tx
tmp_tx.remove_xpubs_and_bip32_paths()
tx.combine_with_other_psbt(tmp_tx)
tx.add_info_from_wallet(self, include_xpubs=False)
return tx
def try_detecting_internal_addresses_corruption(self) -> None:
pass
def check_address_for_corruption(self, addr: str) -> None:
pass
def get_unused_addresses(self) -> Sequence[str]:
domain = self.get_receiving_addresses()
return [addr for addr in domain if not self.adb.is_used(addr) and not self.get_request_by_addr(addr)]
@check_returned_address_for_corruption
def get_unused_address(self) -> Optional[str]:
"""Get an unused receiving address, if there is one.
Note: there might NOT be one available!
"""
addrs = self.get_unused_addresses()
if addrs:
return addrs[0]
@check_returned_address_for_corruption
def get_receiving_address(self) -> str:
"""Get a receiving address. Guaranteed to always return an address."""
unused_addr = self.get_unused_address()
if unused_addr:
return unused_addr
domain = self.get_receiving_addresses()
if not domain:
raise Exception("no receiving addresses in wallet?!")
choice = domain[0]
for addr in domain:
if not self.adb.is_used(addr):
if self.get_request_by_addr(addr) is None:
return addr
else:
choice = addr
return choice
def create_new_address(self, for_change: bool = False):
raise Exception("this wallet cannot generate new addresses")
def import_address(self, address: str) -> str:
raise Exception("this wallet cannot import addresses")
def import_addresses(self, addresses: List[str], *,
write_to_disk=True) -> Tuple[List[str], List[Tuple[str, str]]]:
raise Exception("this wallet cannot import addresses")
def delete_address(self, address: str) -> None:
raise Exception("this wallet cannot delete addresses")
def get_request_URI(self, req: Invoice) -> Optional[str]:
include_lightning = bool(self.config.get('bip21_lightning', False))
return req.get_bip21_URI(include_lightning=include_lightning)
def check_expired_status(self, r: Invoice, status):
#if r.is_lightning() and r.exp == 0:
# status = PR_EXPIRED # for BOLT-11 invoices, exp==0 means 0 seconds
if status == PR_UNPAID and r.has_expired():
status = PR_EXPIRED
return status
def get_invoice_status(self, invoice: Invoice):
"""Returns status of (incoming) request or (outgoing) invoice."""
# lightning invoices can be paid onchain
if invoice.is_lightning() and self.lnworker:
status = self.lnworker.get_invoice_status(invoice)
if status != PR_UNPAID:
return self.check_expired_status(invoice, status)
paid, conf = self.is_onchain_invoice_paid(invoice)
if not paid:
status = PR_UNPAID
elif conf == 0:
status = PR_UNCONFIRMED
else:
assert conf >= 1, conf
status = PR_PAID
return self.check_expired_status(invoice, status)
def get_request_by_addr(self, addr: str) -> Optional[Invoice]:
"""Returns a relevant request for address, from an on-chain PoV.
(One that has been paid on-chain or is pending)
Called in get_label_for_address and update_invoices_and_reqs_touched_by_tx
Returns None if the address can be reused (i.e. was paid by lightning or has expired)
"""
keys = self._requests_addr_to_key.get(addr) or []
reqs = [self._receive_requests.get(key) for key in keys]
reqs = [req for req in reqs if req] # filter None
if not reqs:
return
# filter out expired
reqs = [req for req in reqs if self.get_invoice_status(req) != PR_EXPIRED]
# filter out paid-with-lightning
if self.lnworker:
reqs = [req for req in reqs
if not req.is_lightning() or self.lnworker.get_invoice_status(req) == PR_UNPAID]
if not reqs:
return None
# note: there typically should not be more than one relevant request for an address
return reqs[0]
def get_request(self, request_id: str) -> Optional[Invoice]:
return self._receive_requests.get(request_id)
def get_formatted_request(self, request_id):
x = self.get_request(request_id)
if x:
return self.export_request(x)
def export_request(self, x: Invoice) -> Dict[str, Any]:
key = x.get_id()
status = self.get_invoice_status(x)
status_str = x.get_status_str(status)
is_lightning = x.is_lightning()
address = x.get_address()
d = {
'is_lightning': is_lightning,
'amount_BTC': format_satoshis(x.get_amount_sat()),
'message': x.message,
'timestamp': x.get_time(),
'expiration': x.get_expiration_date(),
'status': status,
'status_str': status_str,
'request_id': key,
"tx_hashes": []
}
if is_lightning:
d['rhash'] = x.rhash
d['lightning_invoice'] = x.lightning_invoice
d['amount_msat'] = x.get_amount_msat()
if self.lnworker and status == PR_UNPAID:
d['can_receive'] = self.lnworker.can_receive_invoice(x)
if address:
d['amount_sat'] = int(x.get_amount_sat())
d['address'] = address
d['URI'] = self.get_request_URI(x)
# if request was paid onchain, add relevant fields
# note: addr is reused when getting paid on LN! so we check for that.
is_paid, conf, tx_hashes = self._is_onchain_invoice_paid(x)
if is_paid and (not self.lnworker or self.lnworker.get_invoice_status(x) != PR_PAID):
if conf is not None:
d['confirmations'] = conf
d['tx_hashes'] = tx_hashes
run_hook('wallet_export_request', d, key)
return d
def export_invoice(self, x: Invoice) -> Dict[str, Any]:
key = x.get_id()
status = self.get_invoice_status(x)
status_str = x.get_status_str(status)
is_lightning = x.is_lightning()
d = {
'is_lightning': is_lightning,
'amount_BTC': format_satoshis(x.get_amount_sat()),
'message': x.message,
'timestamp': x.time,
'expiration': x.exp,
'status': status,
'status_str': status_str,
'invoice_id': key,
}
if is_lightning:
d['lightning_invoice'] = x.lightning_invoice
d['amount_msat'] = x.get_amount_msat()
if self.lnworker and status == PR_UNPAID:
d['can_pay'] = self.lnworker.can_pay_invoice(x)
else:
amount_sat = x.get_amount_sat()
assert isinstance(amount_sat, (int, str, type(None)))
d['amount_sat'] = amount_sat
d['outputs'] = [y.to_legacy_tuple() for y in x.get_outputs()]
if x.bip70:
d['bip70'] = x.bip70
return d
def _update_invoices_and_reqs_touched_by_tx(self, tx_hash: str) -> None:
# FIXME in some cases if tx2 replaces unconfirmed tx1 in the mempool, we are not called.
# For a given receive request, if tx1 touches it but tx2 does not, then
# we were called when tx1 was added, but we will not get called when tx2 replaces tx1.
tx = self.db.get_transaction(tx_hash)
if tx is None:
return
relevant_invoice_keys = set()
with self.transaction_lock:
for txo in tx.outputs():
addr = txo.address
if request:=self.get_request_by_addr(addr):
status = self.get_invoice_status(request)
util.trigger_callback('request_status', self, request.get_id(), status)
for invoice_key in self._invoices_from_scriptpubkey_map.get(txo.scriptpubkey, set()):
relevant_invoice_keys.add(invoice_key)
self._update_onchain_invoice_paid_detection(relevant_invoice_keys)
def create_request(self, amount_sat: int, message: str, exp_delay: int, address: Optional[str]):
# for receiving
amount_sat = amount_sat or 0
assert isinstance(amount_sat, int), f"{amount_sat!r}"
message = message or ''
address = address or None # converts "" to None
exp_delay = exp_delay or 0
timestamp = int(time.time())
fallback_address = address if self.config.get('bolt11_fallback', True) else None
lightning = self.has_lightning()
if lightning:
lightning_invoice = self.lnworker.add_request(
amount_sat=amount_sat,
message=message,
expiry=exp_delay,
fallback_address=fallback_address,
)
else:
lightning_invoice = None
outputs = [ PartialTxOutput.from_address_and_value(address, amount_sat)] if address else []
height = self.adb.get_local_height()
req = Invoice(
outputs=outputs,
message=message,
time=timestamp,
amount_msat=amount_sat*1000,
exp=exp_delay,
height=height,
bip70=None,
lightning_invoice=lightning_invoice,
)
key = self.add_payment_request(req)
return key
def add_payment_request(self, req: Invoice, *, write_to_disk: bool = True):
request_id = req.get_id()
self._receive_requests[request_id] = req
if addr:=req.get_address():
self._requests_addr_to_key[addr].add(request_id)
if write_to_disk:
self.save_db()
return request_id
def delete_request(self, request_id, *, write_to_disk: bool = True):
""" lightning or on-chain """
req = self.get_request(request_id)
if req is None:
return
self._receive_requests.pop(request_id, None)
if addr:=req.get_address():
self._requests_addr_to_key[addr].discard(request_id)
if req.is_lightning() and self.lnworker:
self.lnworker.delete_payment_info(req.rhash)
if write_to_disk:
self.save_db()
def delete_invoice(self, invoice_id, *, write_to_disk: bool = True):
""" lightning or on-chain """
inv = self._invoices.pop(invoice_id, None)
if inv is None:
return
if inv.is_lightning() and self.lnworker:
self.lnworker.delete_payment_info(inv.rhash)
if write_to_disk:
self.save_db()
def get_sorted_requests(self) -> List[Invoice]:
""" sorted by timestamp """
out = [self.get_request(x) for x in self._receive_requests.keys()]
out = [x for x in out if x is not None]
out.sort(key=lambda x: x.time)
return out
def get_unpaid_requests(self):
out = [x for x in self._receive_requests.values() if self.get_invoice_status(x) != PR_PAID]
out.sort(key=lambda x: x.time)
return out
@abstractmethod
def get_fingerprint(self) -> str:
"""Returns a string that can be used to identify this wallet.
Used e.g. by Labels plugin, and LN channel backups.
Returns empty string "" for wallets that don't have an ID.
"""
pass
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def has_password(self) -> bool:
return self.has_keystore_encryption() or self.has_storage_encryption()
def can_have_keystore_encryption(self):
return self.keystore and self.keystore.may_have_password()
def get_available_storage_encryption_version(self) -> StorageEncryptionVersion:
"""Returns the type of storage encryption offered to the user.
A wallet file (storage) is either encrypted with this version
or is stored in plaintext.
"""
if isinstance(self.keystore, Hardware_KeyStore):
return StorageEncryptionVersion.XPUB_PASSWORD
else:
return StorageEncryptionVersion.USER_PASSWORD
def has_keystore_encryption(self) -> bool:
"""Returns whether encryption is enabled for the keystore.
If True, e.g. signing a transaction will require a password.
"""
if self.can_have_keystore_encryption():
return bool(self.db.get('use_encryption', False))
return False
def has_storage_encryption(self) -> bool:
"""Returns whether encryption is enabled for the wallet file on disk."""
return bool(self.storage) and self.storage.is_encrypted()
@classmethod
def may_have_password(cls):
return True
def check_password(self, password):
if self.has_keystore_encryption():
self.keystore.check_password(password)
if self.has_storage_encryption():
self.storage.check_password(password)
def update_password(self, old_pw, new_pw, *, encrypt_storage: bool = True):
if old_pw is None and self.has_password():
raise InvalidPassword()
self.check_password(old_pw)
if self.storage:
if encrypt_storage:
enc_version = self.get_available_storage_encryption_version()
else:
enc_version = StorageEncryptionVersion.PLAINTEXT
self.storage.set_password(new_pw, enc_version)
# make sure next storage.write() saves changes
self.db.set_modified(True)
# note: Encrypting storage with a hw device is currently only
# allowed for non-multisig wallets. Further,
# Hardware_KeyStore.may_have_password() == False.
# If these were not the case,
# extra care would need to be taken when encrypting keystores.
self._update_password_for_keystore(old_pw, new_pw)
encrypt_keystore = self.can_have_keystore_encryption()
self.db.set_keystore_encryption(bool(new_pw) and encrypt_keystore)
self.save_db()
@abstractmethod
def _update_password_for_keystore(self, old_pw: Optional[str], new_pw: Optional[str]) -> None:
pass
def sign_message(self, address: str, message: str, password) -> bytes:
index = self.get_address_index(address)
script_type = self.get_txin_type(address)
assert script_type != "address"
return self.keystore.sign_message(index, message, password, script_type=script_type)
def decrypt_message(self, pubkey: str, message, password) -> bytes:
addr = self.pubkeys_to_address([pubkey])
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
@abstractmethod
def pubkeys_to_address(self, pubkeys: Sequence[str]) -> Optional[str]:
pass
def price_at_timestamp(self, txid, price_func):
"""Returns fiat price of bitcoin at the time tx got confirmed."""
timestamp = self.adb.get_tx_height(txid).timestamp
return price_func(timestamp if timestamp else time.time())
def average_price(self, txid, price_func, ccy) -> Decimal:
""" Average acquisition price of the inputs of a transaction """
input_value = 0
total_price = 0
txi_addresses = self.db.get_txi_addresses(txid)
if not txi_addresses:
return Decimal('NaN')
for addr in txi_addresses:
d = self.db.get_txi_addr(txid, addr)
for ser, v in d:
input_value += v
total_price += self.coin_price(ser.split(':')[0], price_func, ccy, v)
return total_price / (input_value/Decimal(COIN))
def clear_coin_price_cache(self):
self._coin_price_cache = {}
def coin_price(self, txid, price_func, ccy, txin_value) -> Decimal:
"""
Acquisition price of a coin.
This assumes that either all inputs are mine, or no input is mine.
"""
if txin_value is None:
return Decimal('NaN')
cache_key = "{}:{}:{}".format(str(txid), str(ccy), str(txin_value))
result = self._coin_price_cache.get(cache_key, None)
if result is not None:
return result
if self.db.get_txi_addresses(txid):
result = self.average_price(txid, price_func, ccy) * txin_value/Decimal(COIN)
self._coin_price_cache[cache_key] = result
return result
else:
fiat_value = self.get_fiat_value(txid, ccy)
if fiat_value is not None:
return fiat_value
else:
p = self.price_at_timestamp(txid, price_func)
return p * txin_value/Decimal(COIN)
def is_billing_address(self, addr):
# overridden for TrustedCoin wallets
return False
@abstractmethod
def is_watching_only(self) -> bool:
pass
def get_keystore(self) -> Optional[KeyStore]:
return self.keystore
def get_keystores(self) -> Sequence[KeyStore]:
return [self.keystore] if self.keystore else []
@abstractmethod
def save_keystore(self):
pass
@abstractmethod
def has_seed(self) -> bool:
pass
@abstractmethod
def get_all_known_addresses_beyond_gap_limit(self) -> Set[str]:
pass
def create_transaction(self, outputs, *, fee=None, feerate=None, change_addr=None, domain_addr=None, domain_coins=None,
unsigned=False, rbf=None, password=None, locktime=None):
if fee is not None and feerate is not None:
raise Exception("Cannot specify both 'fee' and 'feerate' at the same time!")
coins = self.get_spendable_coins(domain_addr)
if domain_coins is not None:
coins = [coin for coin in coins if (coin.prevout.to_str() in domain_coins)]
if feerate is not None:
fee_per_kb = 1000 * Decimal(feerate)
fee_estimator = partial(SimpleConfig.estimate_fee_for_feerate, fee_per_kb)
else:
fee_estimator = fee
tx = self.make_unsigned_transaction(
coins=coins,
outputs=outputs,
fee=fee_estimator,
change_addr=change_addr)
if locktime is not None:
tx.locktime = locktime
if rbf is None:
rbf = bool(self.config.get('use_rbf', True))
tx.set_rbf(rbf)
if not unsigned:
self.sign_transaction(tx, password)
return tx
def get_warning_for_risk_of_burning_coins_as_fees(self, tx: 'PartialTransaction') -> Optional[str]:
"""Returns a warning message if there is risk of burning coins as fees if we sign.
Note that if not all inputs are ismine, e.g. coinjoin, the risk is not just about fees.
Note:
- legacy sighash does not commit to any input amounts
- BIP-0143 sighash only commits to the *corresponding* input amount
- BIP-taproot sighash commits to *all* input amounts
"""
assert isinstance(tx, PartialTransaction)
# if we have all full previous txs, we *know* all the input amounts -> fine
if all([txin.utxo for txin in tx.inputs()]):
return None
# a single segwit input -> fine
if len(tx.inputs()) == 1 and tx.inputs()[0].is_segwit() and tx.inputs()[0].witness_utxo:
return None
# coinjoin or similar
if any([not self.is_mine(txin.address) for txin in tx.inputs()]):
return (_("Warning") + ": "
+ _("The input amounts could not be verified as the previous transactions are missing.\n"
"The amount of money being spent CANNOT be verified."))
# some inputs are legacy
if any([not txin.is_segwit() for txin in tx.inputs()]):
return (_("Warning") + ": "
+ _("The fee could not be verified. Signing non-segwit inputs is risky:\n"
"if this transaction was maliciously modified before you sign,\n"
"you might end up paying a higher mining fee than displayed."))
# all inputs are segwit
# https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2017-August/014843.html
return (_("Warning") + ": "
+ _("If you received this transaction from an untrusted device, "
"do not accept to sign it more than once,\n"
"otherwise you could end up paying a different fee."))
def get_tx_fee_warning(
self, *,
invoice_amt: int,
tx_size: int,
fee: int) -> Optional[Tuple[bool, str, str]]:
feerate = Decimal(fee) / tx_size # sat/byte
fee_ratio = Decimal(fee) / invoice_amt if invoice_amt else 1
long_warning = None
short_warning = None
allow_send = True
if feerate < self.relayfee() / 1000:
long_warning = (
_("This transaction requires a higher fee, or it will not be propagated by your current server.") + " "
+ _("Try to raise your transaction fee, or use a server with a lower relay fee."))
short_warning = _("below relay fee") + "!"
allow_send = False
elif fee_ratio >= FEE_RATIO_HIGH_WARNING:
long_warning = (
_('Warning') + ': ' + _("The fee for this transaction seems unusually high.")
+ f' ({fee_ratio*100:.2f}% of amount)')
short_warning = _("high fee ratio") + "!"
elif feerate > FEERATE_WARNING_HIGH_FEE / 1000:
long_warning = (
_('Warning') + ': ' + _("The fee for this transaction seems unusually high.")
+ f' (feerate: {feerate:.2f} sat/byte)')
short_warning = _("high fee rate") + "!"
if long_warning is None:
return None
else:
return allow_send, long_warning, short_warning
def get_help_texts_for_receive_request(self, req: Invoice) -> ReceiveRequestHelp:
key = req.get_id()
addr = req.get_address() or ''
amount_sat = req.get_amount_sat() or 0
address_help = ''
URI_help = ''
ln_help = ''
address_is_error = False
URI_is_error = False
ln_is_error = False
ln_swap_suggestion = None
ln_rebalance_suggestion = None
lnaddr = req.lightning_invoice or ''
URI = self.get_request_URI(req) or ''
lightning_online = self.lnworker and self.lnworker.num_peers() > 0
can_receive_lightning = self.lnworker and amount_sat <= self.lnworker.num_sats_can_receive()
status = self.get_invoice_status(req)
if status == PR_EXPIRED:
address_help = URI_help = ln_help = _('This request has expired')
is_amt_too_small_for_onchain = amount_sat and amount_sat < self.dust_threshold()
if not addr:
address_is_error = True
address_help = _('This request cannot be paid on-chain')
if is_amt_too_small_for_onchain:
address_help = _('Amount too small to be received onchain')
if not URI:
URI_is_error = True
URI_help = _('This request cannot be paid on-chain')
if is_amt_too_small_for_onchain:
URI_help = _('Amount too small to be received onchain')
if not lnaddr:
ln_is_error = True
ln_help = _('This request does not have a Lightning invoice.')
if status == PR_UNPAID:
if self.adb.is_used(addr):
address_help = URI_help = (_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
if lnaddr:
if not lightning_online:
ln_is_error = True
ln_help = _('You must be online to receive Lightning payments.')
elif not can_receive_lightning:
ln_is_error = True
ln_rebalance_suggestion = self.lnworker.suggest_rebalance_to_receive(amount_sat)
ln_swap_suggestion = self.lnworker.suggest_swap_to_receive(amount_sat)
ln_help = _('You do not have the capacity to receive this amount with Lightning.')
if bool(ln_rebalance_suggestion):
ln_help += '\n\n' + _('You may have that capacity if you rebalance your channels.')
elif bool(ln_swap_suggestion):
ln_help += '\n\n' + _('You may have that capacity if you swap some of your funds.')
return ReceiveRequestHelp(
address_help=address_help,
URI_help=URI_help,
ln_help=ln_help,
address_is_error=address_is_error,
URI_is_error=URI_is_error,
ln_is_error=ln_is_error,
ln_rebalance_suggestion=ln_rebalance_suggestion,
ln_swap_suggestion=ln_swap_suggestion,
)
def synchronize(self) -> int:
"""Returns the number of new addresses we generated."""
return 0
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def is_watching_only(self):
return self.keystore.is_watching_only()
def _update_password_for_keystore(self, old_pw, new_pw):
if self.keystore and self.keystore.may_have_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
def save_keystore(self):
self.db.put('keystore', self.keystore.dump())
@abstractmethod
def get_public_key(self, address: str) -> Optional[str]:
pass
def get_public_keys(self, address: str) -> Sequence[str]:
return [self.get_public_key(address)]
def get_redeem_script(self, address: str) -> Optional[str]:
txin_type = self.get_txin_type(address)
if txin_type in ('p2pkh', 'p2wpkh', 'p2pk'):
return None
if txin_type == 'p2wpkh-p2sh':
pubkey = self.get_public_key(address)
return bitcoin.p2wpkh_nested_script(pubkey)
if txin_type == 'address':
return None
raise UnknownTxinType(f'unexpected txin_type {txin_type}')
def get_witness_script(self, address: str) -> Optional[str]:
return None
class Imported_Wallet(Simple_Wallet):
# wallet made of imported addresses
wallet_type = 'imported'
txin_type = 'address'
def __init__(self, db, storage, *, config):
Abstract_Wallet.__init__(self, db, storage, config=config)
self.use_change = db.get('use_change', False)
def is_watching_only(self):
return self.keystore is None
def can_import_privkey(self):
return bool(self.keystore)
def load_keystore(self):
self.keystore = load_keystore(self.db, 'keystore') if self.db.get('keystore') else None
def save_keystore(self):
self.db.put('keystore', self.keystore.dump())
def can_import_address(self):
return self.is_watching_only()
def can_delete_address(self):
return True
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_all_known_addresses_beyond_gap_limit(self) -> Set[str]:
return set()
def get_fingerprint(self):
return ''
def get_addresses(self):
# note: overridden so that the history can be cleared
return self.db.get_imported_addresses()
def get_receiving_addresses(self, **kwargs):
return self.get_addresses()
def get_change_addresses(self, **kwargs):
return self.get_addresses()
def import_addresses(self, addresses: List[str], *,
write_to_disk=True) -> Tuple[List[str], List[Tuple[str, str]]]:
good_addr = [] # type: List[str]
bad_addr = [] # type: List[Tuple[str, str]]
for address in addresses:
if not bitcoin.is_address(address):
bad_addr.append((address, _('invalid address')))
continue
if self.db.has_imported_address(address):
bad_addr.append((address, _('address already in wallet')))
continue
good_addr.append(address)
self.db.add_imported_address(address, {})
self.adb.add_address(address)
if write_to_disk:
self.save_db()
return good_addr, bad_addr
def import_address(self, address: str) -> str:
good_addr, bad_addr = self.import_addresses([address])
if good_addr and good_addr[0] == address:
return address
else:
raise BitcoinException(str(bad_addr[0][1]))
def delete_address(self, address: str) -> None:
if not self.db.has_imported_address(address):
return
if len(self.get_addresses()) <= 1:
raise UserFacingException("cannot delete last remaining address from wallet")
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr in self.db.get_history():
details = self.adb.get_address_history(addr)
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self.db.remove_addr_history(address)
for tx_hash in transactions_to_remove:
self.adb._remove_transaction(tx_hash)
self.set_label(address, None)
if req:= self.get_request_by_addr(address):
self.delete_request(req.get_id())
self.set_frozen_state_of_addresses([address], False)
pubkey = self.get_public_key(address)
self.db.remove_imported_address(address)
if pubkey:
# delete key iff no other address uses it (e.g. p2pkh and p2wpkh for same key)
for txin_type in bitcoin.WIF_SCRIPT_TYPES.keys():
try:
addr2 = bitcoin.pubkey_to_address(txin_type, pubkey)
except NotImplementedError:
pass
else:
if self.db.has_imported_address(addr2):
break
else:
self.keystore.delete_imported_key(pubkey)
self.save_keystore()
self.save_db()
def get_change_addresses_for_new_transaction(self, *args, **kwargs) -> List[str]:
# for an imported wallet, if all "change addresses" are already used,
# it is probably better to send change back to the "from address", than to
# send it to another random used address and link them together, hence
# we force "allow_reusing_used_change_addrs=False"
return super().get_change_addresses_for_new_transaction(
*args,
**{**kwargs, "allow_reusing_used_change_addrs": False},
)
def calc_unused_change_addresses(self) -> Sequence[str]:
with self.lock:
unused_addrs = [addr for addr in self.get_change_addresses()
if not self.adb.is_used(addr) and not self.is_address_reserved(addr)]
return unused_addrs
def is_mine(self, address) -> bool:
if not address: return False
return self.db.has_imported_address(address)
def get_address_index(self, address) -> Optional[str]:
# returns None if address is not mine
return self.get_public_key(address)
def get_address_path_str(self, address):
return None
def get_public_key(self, address) -> Optional[str]:
x = self.db.get_imported_address(address)
return x.get('pubkey') if x else None
def import_private_keys(self, keys: List[str], password: Optional[str], *,
write_to_disk=True) -> Tuple[List[str], List[Tuple[str, str]]]:
good_addr = [] # type: List[str]
bad_keys = [] # type: List[Tuple[str, str]]
for key in keys:
try:
txin_type, pubkey = self.keystore.import_privkey(key, password)
except Exception as e:
bad_keys.append((key, _('invalid private key') + f': {e}'))
continue
if txin_type not in ('p2pkh', 'p2wpkh', 'p2wpkh-p2sh'):
bad_keys.append((key, _('not implemented type') + f': {txin_type}'))
continue
addr = bitcoin.pubkey_to_address(txin_type, pubkey)
good_addr.append(addr)
self.db.add_imported_address(addr, {'type':txin_type, 'pubkey':pubkey})
self.adb.add_address(addr)
self.save_keystore()
if write_to_disk:
self.save_db()
return good_addr, bad_keys
def import_private_key(self, key: str, password: Optional[str]) -> str:
good_addr, bad_keys = self.import_private_keys([key], password=password)
if good_addr:
return good_addr[0]
else:
raise BitcoinException(str(bad_keys[0][1]))
def get_txin_type(self, address):
return self.db.get_imported_address(address).get('type', 'address')
@profiler
def try_detecting_internal_addresses_corruption(self):
# we check only a random sample, for performance
addresses = self.get_addresses()
addresses = random.sample(addresses, min(len(addresses), 10))
for addr_found in addresses:
self.check_address_for_corruption(addr_found)
def check_address_for_corruption(self, addr):
if addr and self.is_mine(addr):
pubkey = self.get_public_key(addr)
if not pubkey:
return
txin_type = self.get_txin_type(addr)
if txin_type == 'address':
return
if addr != bitcoin.pubkey_to_address(txin_type, pubkey):
raise InternalAddressCorruption()
def _add_input_sig_info(self, txin, address, *, only_der_suffix):
if not self.is_mine(address):
return
if txin.script_type in ('unknown', 'address'):
return
elif txin.script_type in ('p2pkh', 'p2wpkh', 'p2wpkh-p2sh'):
pubkey = self.get_public_key(address)
if not pubkey:
return
txin.pubkeys = [bfh(pubkey)]
else:
raise Exception(f'Unexpected script type: {txin.script_type}. '
f'Imported wallets are not implemented to handle this.')
def pubkeys_to_address(self, pubkeys):
pubkey = pubkeys[0]
# FIXME This is slow.
# Ideally we would re-derive the address from the pubkey and the txin_type,
# but we don't know the txin_type, and we only have an addr->txin_type map.
# so instead a linear search of reverse-lookups is done...
for addr in self.db.get_imported_addresses():
if self.db.get_imported_address(addr)['pubkey'] == pubkey:
return addr
return None
def decrypt_message(self, pubkey: str, message, password) -> bytes:
# this is significantly faster than the implementation in the superclass
return self.keystore.decrypt_message(pubkey, message, password)
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, db, storage, *, config):
self._ephemeral_addr_to_addr_index = {} # type: Dict[str, Sequence[int]]
Abstract_Wallet.__init__(self, db, storage, config=config)
self.gap_limit = db.get('gap_limit', 20)
# generate addresses now. note that without libsecp this might block
# for a few seconds!
self.synchronize()
def _init_lnworker(self):
# lightning_privkey2 is not deterministic (legacy wallets, bip39)
ln_xprv = self.db.get('lightning_xprv') or self.db.get('lightning_privkey2')
# lnworker can only be initialized once receiving addresses are available
# therefore we instantiate lnworker in DeterministicWallet
self.lnworker = LNWallet(self, ln_xprv) if ln_xprv else None
def has_seed(self):
return self.keystore.has_seed()
def get_addresses(self):
# note: overridden so that the history can be cleared.
# addresses are ordered based on derivation
out = self.get_receiving_addresses()
out += self.get_change_addresses()
return out
def get_receiving_addresses(self, *, slice_start=None, slice_stop=None):
return self.db.get_receiving_addresses(slice_start=slice_start, slice_stop=slice_stop)
def get_change_addresses(self, *, slice_start=None, slice_stop=None):
return self.db.get_change_addresses(slice_start=slice_start, slice_stop=slice_stop)
@profiler
def try_detecting_internal_addresses_corruption(self):
addresses_all = self.get_addresses()
# sample 1: first few
addresses_sample1 = addresses_all[:10]
# sample2: a few more randomly selected
addresses_rand = addresses_all[10:]
addresses_sample2 = random.sample(addresses_rand, min(len(addresses_rand), 10))
for addr_found in itertools.chain(addresses_sample1, addresses_sample2):
self.check_address_for_corruption(addr_found)
def check_address_for_corruption(self, addr):
if addr and self.is_mine(addr):
if addr != self.derive_address(*self.get_address_index(addr)):
raise InternalAddressCorruption()
def get_seed(self, password):
return self.keystore.get_seed(password)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
value = int(value)
if value >= self.min_acceptable_gap():
self.gap_limit = value
self.db.put('gap_limit', self.gap_limit)
self.save_db()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for addr in addresses[::-1]:
if self.db.get_addr_history(addr):
break
k += 1
return k
def min_acceptable_gap(self) -> int:
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for addr in addresses[0:-k]:
if self.adb.address_is_old(addr):
n = 0
else:
n += 1
nmax = max(nmax, n)
return nmax + 1
@abstractmethod
def derive_pubkeys(self, c: int, i: int) -> Sequence[str]:
pass
def derive_address(self, for_change: int, n: int) -> str:
for_change = int(for_change)
pubkeys = self.derive_pubkeys(for_change, n)
return self.pubkeys_to_address(pubkeys)
def export_private_key_for_path(self, path: Union[Sequence[int], str], password: Optional[str]) -> str:
if isinstance(path, str):
path = convert_bip32_path_to_list_of_uint32(path)
pk, compressed = self.keystore.get_private_key(path, password)
txin_type = self.get_txin_type() # assumes no mixed-scripts in wallet
return bitcoin.serialize_privkey(pk, compressed, txin_type)
def get_public_keys_with_deriv_info(self, address: str):
der_suffix = self.get_address_index(address)
der_suffix = [int(x) for x in der_suffix]
return {k.derive_pubkey(*der_suffix): (k, der_suffix)
for k in self.get_keystores()}
def _add_input_sig_info(self, txin, address, *, only_der_suffix):
self._add_txinout_derivation_info(txin, address, only_der_suffix=only_der_suffix)
def _add_txinout_derivation_info(self, txinout, address, *, only_der_suffix):
if not self.is_mine(address):
return
pubkey_deriv_info = self.get_public_keys_with_deriv_info(address)
txinout.pubkeys = sorted([pk for pk in list(pubkey_deriv_info)])
for pubkey in pubkey_deriv_info:
ks, der_suffix = pubkey_deriv_info[pubkey]
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix,
only_der_suffix=only_der_suffix)
txinout.bip32_paths[pubkey] = (fp_bytes, der_full)
def create_new_address(self, for_change: bool = False):
assert type(for_change) is bool
with self.lock:
n = self.db.num_change_addresses() if for_change else self.db.num_receiving_addresses()
address = self.derive_address(int(for_change), n)
self.db.add_change_address(address) if for_change else self.db.add_receiving_address(address)
self.adb.add_address(address)
if for_change:
# note: if it's actually "old", it will get filtered later
self._not_old_change_addresses.append(address)
return address
def synchronize_sequence(self, for_change: bool) -> int:
count = 0 # num new addresses we generated
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
num_addr = self.db.num_change_addresses() if for_change else self.db.num_receiving_addresses()
if num_addr < limit:
count += 1
self.create_new_address(for_change)
continue
if for_change:
last_few_addresses = self.get_change_addresses(slice_start=-limit)
else:
last_few_addresses = self.get_receiving_addresses(slice_start=-limit)
if any(map(self.adb.address_is_old, last_few_addresses)):
count += 1
self.create_new_address(for_change)
else:
break
return count
def synchronize(self):
count = 0
with self.lock:
count += self.synchronize_sequence(False)
count += self.synchronize_sequence(True)
return count
def get_all_known_addresses_beyond_gap_limit(self):
# note that we don't stop at first large gap
found = set()
def process_addresses(addrs, gap_limit):
rolling_num_unused = 0
for addr in addrs:
if self.db.get_addr_history(addr):
rolling_num_unused = 0
else:
if rolling_num_unused >= gap_limit:
found.add(addr)
rolling_num_unused += 1
process_addresses(self.get_receiving_addresses(), self.gap_limit)
process_addresses(self.get_change_addresses(), self.gap_limit_for_change)
return found
def get_address_index(self, address) -> Optional[Sequence[int]]:
return self.db.get_address_index(address) or self._ephemeral_addr_to_addr_index.get(address)
def get_address_path_str(self, address):
intpath = self.get_address_index(address)
if intpath is None:
return None
return convert_bip32_intpath_to_strpath(intpath)
def _learn_derivation_path_for_address_from_txinout(self, txinout, address):
for ks in self.get_keystores():
pubkey, der_suffix = ks.find_my_pubkey_in_txinout(txinout, only_der_suffix=True)
if der_suffix is not None:
# note: we already know the pubkey belongs to the keystore,
# but the script template might be different
if len(der_suffix) != 2: continue
try:
my_address = self.derive_address(*der_suffix)
except CannotDerivePubkey:
my_address = None
if my_address == address:
self._ephemeral_addr_to_addr_index[address] = list(der_suffix)
return True
return False
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address=None):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, db, storage, *, config):
Deterministic_Wallet.__init__(self, db, storage, config=config)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkeys = self.derive_pubkeys(*sequence)
return pubkeys[0]
def load_keystore(self):
self.keystore = load_keystore(self.db, 'keystore')
try:
xtype = bip32.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return [self.keystore.derive_pubkey(c, i).hex()]
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkeys):
pubkey = pubkeys[0]
return bitcoin.pubkey_to_address(self.txin_type, pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
def __init__(self, db, storage, *, config):
self.wallet_type = db.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, db, storage, config=config)
def get_public_keys(self, address):
return [pk.hex() for pk in self.get_public_keys_with_deriv_info(address)]
def pubkeys_to_address(self, pubkeys):
redeem_script = self.pubkeys_to_scriptcode(pubkeys)
return bitcoin.redeem_script_to_address(self.txin_type, redeem_script)
def pubkeys_to_scriptcode(self, pubkeys: Sequence[str]) -> str:
return transaction.multisig_script(sorted(pubkeys), self.m)
def get_redeem_script(self, address):
txin_type = self.get_txin_type(address)
pubkeys = self.get_public_keys(address)
scriptcode = self.pubkeys_to_scriptcode(pubkeys)
if txin_type == 'p2sh':
return scriptcode
elif txin_type == 'p2wsh-p2sh':
return bitcoin.p2wsh_nested_script(scriptcode)
elif txin_type == 'p2wsh':
return None
raise UnknownTxinType(f'unexpected txin_type {txin_type}')
def get_witness_script(self, address):
txin_type = self.get_txin_type(address)
pubkeys = self.get_public_keys(address)
scriptcode = self.pubkeys_to_scriptcode(pubkeys)
if txin_type == 'p2sh':
return None
elif txin_type in ('p2wsh-p2sh', 'p2wsh'):
return scriptcode
raise UnknownTxinType(f'unexpected txin_type {txin_type}')
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i).hex() for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.db, name)
self.keystore = self.keystores['x1/']
xtype = bip32.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.db.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def can_have_keystore_encryption(self):
return any([k.may_have_password() for k in self.get_keystores()])
def _update_password_for_keystore(self, old_pw, new_pw):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.update_password(old_pw, new_pw)
self.db.put(name, keystore.dump())
def check_password(self, password):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.check_password(password)
if self.has_storage_encryption():
self.storage.check_password(password)
def get_available_storage_encryption_version(self):
# multisig wallets are not offered hw device encryption
return StorageEncryptionVersion.USER_PASSWORD
def has_seed(self):
return self.keystore.has_seed()
def is_watching_only(self):
return all([k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported': Imported_Wallet
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, db: 'WalletDB', storage: Optional[WalletStorage], *, config: SimpleConfig):
wallet_type = db.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(db, storage, config=config)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise WalletFileException("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config: SimpleConfig, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
db = WalletDB('', manual_upgrades=False)
seed = Mnemonic('en').make_seed(seed_type=seed_type)
k = keystore.from_seed(seed, passphrase)
db.put('keystore', k.dump())
db.put('wallet_type', 'standard')
if k.can_have_deterministic_lightning_xprv():
db.put('lightning_xprv', k.get_lightning_xprv(None))
if gap_limit is not None:
db.put('gap_limit', gap_limit)
wallet = Wallet(db, storage, config=config)
wallet.update_password(old_pw=None, new_pw=password, encrypt_storage=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.save_db()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path: Optional[str], config: SimpleConfig,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
if path is None: # create wallet in-memory
storage = None
else:
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
db = WalletDB('', manual_upgrades=False)
text = text.strip()
if keystore.is_address_list(text):
wallet = Imported_Wallet(db, storage, config=config)
addresses = text.split()
good_inputs, bad_inputs = wallet.import_addresses(addresses, write_to_disk=False)
# FIXME tell user about bad_inputs
if not good_inputs:
raise Exception("None of the given addresses can be imported")
elif keystore.is_private_key_list(text, allow_spaces_inside_key=False):
k = keystore.Imported_KeyStore({})
db.put('keystore', k.dump())
wallet = Imported_Wallet(db, storage, config=config)
keys = keystore.get_private_keys(text, allow_spaces_inside_key=False)
good_inputs, bad_inputs = wallet.import_private_keys(keys, None, write_to_disk=False)
# FIXME tell user about bad_inputs
if not good_inputs:
raise Exception("None of the given privkeys can be imported")
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase)
if k.can_have_deterministic_lightning_xprv():
db.put('lightning_xprv', k.get_lightning_xprv(None))
else:
raise Exception("Seed or key not recognized")
db.put('keystore', k.dump())
db.put('wallet_type', 'standard')
if gap_limit is not None:
db.put('gap_limit', gap_limit)
wallet = Wallet(db, storage, config=config)
if storage:
assert not storage.file_exists(), "file was created too soon! plaintext keys might have been written to disk"
wallet.update_password(old_pw=None, new_pw=password, encrypt_storage=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.save_db()
return {'wallet': wallet, 'msg': msg}
| mit | 6e4d65a2043a2e44d535dcad074fbd2c | 41.038983 | 150 | 0.586209 | 3.835027 | false | false | false | false |
spesmilo/electrum | electrum/plugins/keepkey/keepkey.py | 1 | 20302 | from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.keystore import Hardware_KeyStore
from electrum.plugin import Device, runs_in_hwd_thread
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
import usb1
from .client import KeepKeyClient
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
plugin: 'KeepKeyPlugin'
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password, *, script_type=None):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not txin.is_segwit():
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
# only "register" hid device id:
self.device_manager().register_devices(keepkeylib.transport_hid.DEVICE_IDS, plugin=self)
# for webusb transport, use custom enumerate function:
self.device_manager().register_enumerate_func(self.enumerate)
self.libraries_available = True
except ImportError:
self.libraries_available = False
@runs_in_hwd_thread
def enumerate(self):
from keepkeylib.transport_webusb import WebUsbTransport
results = []
for dev in WebUsbTransport.enumerate():
path = self._dev_to_str(dev)
results.append(Device(path=path,
interface_number=-1,
id_=path,
product_key=(dev.getVendorID(), dev.getProductID()),
usage_page=0,
transport_ui_string=f"webusb:{path}"))
return results
@staticmethod
def _dev_to_str(dev: "usb1.USBDevice") -> str:
return ":".join(str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList())
@runs_in_hwd_thread
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
@runs_in_hwd_thread
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for dev in WebUsbTransport.enumerate():
if device.path == self._dev_to_str(dev):
return WebUsbTransport(dev)
@runs_in_hwd_thread
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['KeepKeyClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.daemon = True
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'KeepKey_KeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_keepkey_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n.extend(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'KeepKey_KeyStore'):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for out in tx.outputs():
o = t.bin_outputs.add()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
| mit | 82792b109cf479dbf5b959804b6b998f | 40.602459 | 122 | 0.591912 | 3.881093 | false | false | false | false |
spesmilo/electrum | electrum/synchronizer.py | 2 | 13220 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import hashlib
from typing import Dict, List, TYPE_CHECKING, Tuple, Set
from collections import defaultdict
import logging
from aiorpcx import run_in_thread, RPCError
from . import util
from .transaction import Transaction, PartialTransaction
from .util import bh2u, make_aiohttp_session, NetworkJobOnDefaultServer, random_shuffled_copy, OldTaskGroup
from .bitcoin import address_to_scripthash, is_address
from .logging import Logger
from .interface import GracefulDisconnect, NetworkTimeout
if TYPE_CHECKING:
from .network import Network
from .address_synchronizer import AddressSynchronizer
class SynchronizerFailure(Exception): pass
def history_status(h):
if not h:
return None
status = ''
for tx_hash, height in h:
status += tx_hash + ':%d:' % height
return bh2u(hashlib.sha256(status.encode('ascii')).digest())
class SynchronizerBase(NetworkJobOnDefaultServer):
"""Subscribe over the network to a set of addresses, and monitor their statuses.
Every time a status changes, run a coroutine provided by the subclass.
"""
def __init__(self, network: 'Network'):
self.asyncio_loop = network.asyncio_loop
NetworkJobOnDefaultServer.__init__(self, network)
def _reset(self):
super()._reset()
self.requested_addrs = set()
self.scripthash_to_address = {}
self._processed_some_notifications = False # so that we don't miss them
# Queues
self.add_queue = asyncio.Queue()
self.status_queue = asyncio.Queue()
async def _run_tasks(self, *, taskgroup):
await super()._run_tasks(taskgroup=taskgroup)
try:
async with taskgroup as group:
await group.spawn(self.send_subscriptions())
await group.spawn(self.handle_status())
await group.spawn(self.main())
finally:
# we are being cancelled now
self.session.unsubscribe(self.status_queue)
def add(self, addr):
asyncio.run_coroutine_threadsafe(self._add_address(addr), self.asyncio_loop)
async def _add_address(self, addr: str):
# note: this method is async as add_queue.put_nowait is not thread-safe.
if not is_address(addr): raise ValueError(f"invalid bitcoin address {addr}")
if addr in self.requested_addrs: return
self.requested_addrs.add(addr)
self.add_queue.put_nowait(addr)
async def _on_address_status(self, addr, status):
"""Handle the change of the status of an address."""
raise NotImplementedError() # implemented by subclasses
async def send_subscriptions(self):
async def subscribe_to_address(addr):
h = address_to_scripthash(addr)
self.scripthash_to_address[h] = addr
self._requests_sent += 1
try:
async with self._network_request_semaphore:
await self.session.subscribe('blockchain.scripthash.subscribe', [h], self.status_queue)
except RPCError as e:
if e.message == 'history too large': # no unique error code
raise GracefulDisconnect(e, log_level=logging.ERROR) from e
raise
self._requests_answered += 1
self.requested_addrs.remove(addr)
while True:
addr = await self.add_queue.get()
await self.taskgroup.spawn(subscribe_to_address, addr)
async def handle_status(self):
while True:
h, status = await self.status_queue.get()
addr = self.scripthash_to_address[h]
await self.taskgroup.spawn(self._on_address_status, addr, status)
self._processed_some_notifications = True
async def main(self):
raise NotImplementedError() # implemented by subclasses
class Synchronizer(SynchronizerBase):
'''The synchronizer keeps the wallet up-to-date with its set of
addresses and their transactions. It subscribes over the network
to wallet addresses, gets the wallet to generate new addresses
when necessary, requests the transaction history of any addresses
we don't have the full history of, and requests binary transaction
data of any transactions the wallet doesn't have.
'''
def __init__(self, adb: 'AddressSynchronizer'):
self.adb = adb
SynchronizerBase.__init__(self, adb.network)
def _reset(self):
super()._reset()
self.requested_tx = {}
self.requested_histories = set()
self._stale_histories = dict() # type: Dict[str, asyncio.Task]
def diagnostic_name(self):
return self.adb.diagnostic_name()
def is_up_to_date(self):
return (not self.requested_addrs
and not self.requested_histories
and not self.requested_tx
and not self._stale_histories)
async def _on_address_status(self, addr, status):
history = self.adb.db.get_addr_history(addr)
if history_status(history) == status:
return
# No point in requesting history twice for the same announced status.
# However if we got announced a new status, we should request history again:
if (addr, status) in self.requested_histories:
return
# request address history
self.requested_histories.add((addr, status))
self._stale_histories.pop(addr, asyncio.Future()).cancel()
h = address_to_scripthash(addr)
self._requests_sent += 1
async with self._network_request_semaphore:
result = await self.interface.get_history_for_scripthash(h)
self._requests_answered += 1
self.logger.info(f"receiving history {addr} {len(result)}")
hist = list(map(lambda item: (item['tx_hash'], item['height']), result))
# tx_fees
tx_fees = [(item['tx_hash'], item.get('fee')) for item in result]
tx_fees = dict(filter(lambda x:x[1] is not None, tx_fees))
# Check that the status corresponds to what was announced
if history_status(hist) != status:
# could happen naturally if history changed between getting status and history (race)
self.logger.info(f"error: status mismatch: {addr}. we'll wait a bit for status update.")
# The server is supposed to send a new status notification, which will trigger a new
# get_history. We shall wait a bit for this to happen, otherwise we disconnect.
async def disconnect_if_still_stale():
timeout = self.network.get_network_timeout_seconds(NetworkTimeout.Generic)
await asyncio.sleep(timeout)
raise SynchronizerFailure(f"timeout reached waiting for addr {addr}: history still stale")
self._stale_histories[addr] = await self.taskgroup.spawn(disconnect_if_still_stale)
else:
self._stale_histories.pop(addr, asyncio.Future()).cancel()
# Store received history
self.adb.receive_history_callback(addr, hist, tx_fees)
# Request transactions we don't have
await self._request_missing_txs(hist)
# Remove request; this allows up_to_date to be True
self.requested_histories.discard((addr, status))
async def _request_missing_txs(self, hist, *, allow_server_not_finding_tx=False):
# "hist" is a list of [tx_hash, tx_height] lists
transaction_hashes = []
for tx_hash, tx_height in hist:
if tx_hash in self.requested_tx:
continue
tx = self.adb.db.get_transaction(tx_hash)
if tx and not isinstance(tx, PartialTransaction):
continue # already have complete tx
transaction_hashes.append(tx_hash)
self.requested_tx[tx_hash] = tx_height
if not transaction_hashes: return
async with OldTaskGroup() as group:
for tx_hash in transaction_hashes:
await group.spawn(self._get_transaction(tx_hash, allow_server_not_finding_tx=allow_server_not_finding_tx))
async def _get_transaction(self, tx_hash, *, allow_server_not_finding_tx=False):
self._requests_sent += 1
try:
async with self._network_request_semaphore:
raw_tx = await self.interface.get_transaction(tx_hash)
except RPCError as e:
# most likely, "No such mempool or blockchain transaction"
if allow_server_not_finding_tx:
self.requested_tx.pop(tx_hash)
return
else:
raise
finally:
self._requests_answered += 1
tx = Transaction(raw_tx)
if tx_hash != tx.txid():
raise SynchronizerFailure(f"received tx does not match expected txid ({tx_hash} != {tx.txid()})")
tx_height = self.requested_tx.pop(tx_hash)
self.adb.receive_tx_callback(tx_hash, tx, tx_height)
self.logger.info(f"received tx {tx_hash} height: {tx_height} bytes: {len(raw_tx)}")
async def main(self):
self.adb.set_up_to_date(False)
# request missing txns, if any
for addr in random_shuffled_copy(self.adb.db.get_history()):
history = self.adb.db.get_addr_history(addr)
# Old electrum servers returned ['*'] when all history for the address
# was pruned. This no longer happens but may remain in old wallets.
if history == ['*']: continue
await self._request_missing_txs(history, allow_server_not_finding_tx=True)
# add addresses to bootstrap
for addr in random_shuffled_copy(self.adb.get_addresses()):
await self._add_address(addr)
# main loop
while True:
await asyncio.sleep(0.1)
hist_done = self.is_up_to_date()
spv_done = self.adb.verifier.is_up_to_date() if self.adb.verifier else True
up_to_date = hist_done and spv_done
# see if status changed
if (up_to_date != self.adb.is_up_to_date()
or up_to_date and self._processed_some_notifications):
self._processed_some_notifications = False
self.adb.set_up_to_date(up_to_date)
class Notifier(SynchronizerBase):
"""Watch addresses. Every time the status of an address changes,
an HTTP POST is sent to the corresponding URL.
"""
def __init__(self, network):
SynchronizerBase.__init__(self, network)
self.watched_addresses = defaultdict(list) # type: Dict[str, List[str]]
self._start_watching_queue = asyncio.Queue() # type: asyncio.Queue[Tuple[str, str]]
async def main(self):
# resend existing subscriptions if we were restarted
for addr in self.watched_addresses:
await self._add_address(addr)
# main loop
while True:
addr, url = await self._start_watching_queue.get()
self.watched_addresses[addr].append(url)
await self._add_address(addr)
async def start_watching_addr(self, addr: str, url: str):
await self._start_watching_queue.put((addr, url))
async def stop_watching_addr(self, addr: str):
self.watched_addresses.pop(addr, None)
# TODO blockchain.scripthash.unsubscribe
async def _on_address_status(self, addr, status):
if addr not in self.watched_addresses:
return
self.logger.info(f'new status for addr {addr}')
headers = {'content-type': 'application/json'}
data = {'address': addr, 'status': status}
for url in self.watched_addresses[addr]:
try:
async with make_aiohttp_session(proxy=self.network.proxy, headers=headers) as session:
async with session.post(url, json=data, headers=headers) as resp:
await resp.text()
except Exception as e:
self.logger.info(repr(e))
else:
self.logger.info(f'Got Response for {addr}')
| mit | c00384fc13e7cc53c3ec959b0ee7355c | 42.774834 | 122 | 0.639637 | 4.050245 | false | false | false | false |
spesmilo/electrum | electrum/plugins/keepkey/qt.py | 1 | 23765 | from functools import partial
import threading
from PyQt5.QtCore import Qt, QEventLoop, pyqtSignal, QRegExp
from PyQt5.QtGui import QRegExpValidator
from PyQt5.QtWidgets import (QVBoxLayout, QLabel, QGridLayout, QPushButton,
QHBoxLayout, QButtonGroup, QGroupBox, QDialog,
QTextEdit, QLineEdit, QRadioButton, QCheckBox, QWidget,
QMessageBox, QFileDialog, QSlider, QTabWidget)
from electrum.gui.qt.util import (WindowModalDialog, WWLabel, Buttons, CancelButton,
OkButton, CloseButton)
from electrum.i18n import _
from electrum.plugin import hook
from electrum.util import bh2u
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from ..hw_wallet.plugin import only_hook_if_libraries_available
from .keepkey import KeepKeyPlugin, TIM_NEW, TIM_RECOVER, TIM_MNEMONIC
PASSPHRASE_HELP_SHORT =_(
"Passphrases allow you to access new wallets, each "
"hidden behind a particular case-sensitive passphrase.")
PASSPHRASE_HELP = PASSPHRASE_HELP_SHORT + " " + _(
"You need to create a separate Electrum wallet for each passphrase "
"you use as they each generate different addresses. Changing "
"your passphrase does not lose other wallets, each is still "
"accessible behind its own passphrase.")
RECOMMEND_PIN = _(
"You should enable PIN protection. Your PIN is the only protection "
"for your bitcoins if your device is lost or stolen.")
PASSPHRASE_NOT_PIN = _(
"If you forget a passphrase you will be unable to access any "
"bitcoins in the wallet behind it. A passphrase is not a PIN. "
"Only change this if you are sure you understand it.")
CHARACTER_RECOVERY = (
"Use the recovery cipher shown on your device to input your seed words. "
"The cipher changes with every keypress.\n"
"After at most 4 letters the device will auto-complete a word.\n"
"Press SPACE or the Accept Word button to accept the device's auto-"
"completed word and advance to the next one.\n"
"Press BACKSPACE to go back a character or word.\n"
"Press ENTER or the Seed Entered button once the last word in your "
"seed is auto-completed.")
class CharacterButton(QPushButton):
def __init__(self, text=None):
QPushButton.__init__(self, text)
def keyPressEvent(self, event):
event.setAccepted(False) # Pass through Enter and Space keys
class CharacterDialog(WindowModalDialog):
def __init__(self, parent):
super(CharacterDialog, self).__init__(parent)
self.setWindowTitle(_("KeepKey Seed Recovery"))
self.character_pos = 0
self.word_pos = 0
self.loop = QEventLoop()
self.word_help = QLabel()
self.char_buttons = []
vbox = QVBoxLayout(self)
vbox.addWidget(WWLabel(CHARACTER_RECOVERY))
hbox = QHBoxLayout()
hbox.addWidget(self.word_help)
for i in range(4):
char_button = CharacterButton('*')
char_button.setMaximumWidth(36)
self.char_buttons.append(char_button)
hbox.addWidget(char_button)
self.accept_button = CharacterButton(_("Accept Word"))
self.accept_button.clicked.connect(partial(self.process_key, 32))
self.rejected.connect(partial(self.loop.exit, 1))
hbox.addWidget(self.accept_button)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.finished_button = QPushButton(_("Seed Entered"))
self.cancel_button = QPushButton(_("Cancel"))
self.finished_button.clicked.connect(partial(self.process_key,
Qt.Key_Return))
self.cancel_button.clicked.connect(self.rejected)
buttons = Buttons(self.finished_button, self.cancel_button)
vbox.addSpacing(40)
vbox.addLayout(buttons)
self.refresh()
self.show()
def refresh(self):
self.word_help.setText("Enter seed word %2d:" % (self.word_pos + 1))
self.accept_button.setEnabled(self.character_pos >= 3)
self.finished_button.setEnabled((self.word_pos in (11, 17, 23)
and self.character_pos >= 3))
for n, button in enumerate(self.char_buttons):
button.setEnabled(n == self.character_pos)
if n == self.character_pos:
button.setFocus()
def is_valid_alpha_space(self, key):
# Auto-completion requires at least 3 characters
if key == ord(' ') and self.character_pos >= 3:
return True
# Firmware aborts protocol if the 5th character is non-space
if self.character_pos >= 4:
return False
return (key >= ord('a') and key <= ord('z')
or (key >= ord('A') and key <= ord('Z')))
def process_key(self, key):
self.data = None
if key == Qt.Key_Return and self.finished_button.isEnabled():
self.data = {'done': True}
elif key == Qt.Key_Backspace and (self.word_pos or self.character_pos):
self.data = {'delete': True}
elif self.is_valid_alpha_space(key):
self.data = {'character': chr(key).lower()}
if self.data:
self.loop.exit(0)
def keyPressEvent(self, event):
self.process_key(event.key())
if not self.data:
QDialog.keyPressEvent(self, event)
def get_char(self, word_pos, character_pos):
self.word_pos = word_pos
self.character_pos = character_pos
self.refresh()
if self.loop.exec_():
self.data = None # User cancelled
class QtHandler(QtHandlerBase):
char_signal = pyqtSignal(object)
pin_signal = pyqtSignal(object, object)
close_char_dialog_signal = pyqtSignal()
def __init__(self, win, pin_matrix_widget_class, device):
super(QtHandler, self).__init__(win, device)
self.char_signal.connect(self.update_character_dialog)
self.pin_signal.connect(self.pin_dialog)
self.close_char_dialog_signal.connect(self._close_char_dialog)
self.pin_matrix_widget_class = pin_matrix_widget_class
self.character_dialog = None
def get_char(self, msg):
self.done.clear()
self.char_signal.emit(msg)
self.done.wait()
data = self.character_dialog.data
if not data or 'done' in data:
self.close_char_dialog_signal.emit()
return data
def _close_char_dialog(self):
if self.character_dialog:
self.character_dialog.accept()
self.character_dialog = None
def get_pin(self, msg, *, show_strength=True):
self.done.clear()
self.pin_signal.emit(msg, show_strength)
self.done.wait()
return self.response
def pin_dialog(self, msg, show_strength):
# Needed e.g. when resetting a device
self.clear_dialog()
dialog = WindowModalDialog(self.top_level_window(), _("Enter PIN"))
matrix = self.pin_matrix_widget_class(show_strength)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(msg))
vbox.addWidget(matrix)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
self.response = str(matrix.get_value())
self.done.set()
def update_character_dialog(self, msg):
if not self.character_dialog:
self.character_dialog = CharacterDialog(self.top_level_window())
self.character_dialog.get_char(msg.word_pos, msg.character_pos)
self.done.set()
class QtPlugin(QtPluginBase):
# Derived classes must provide the following class-static variables:
# icon_file
# pin_matrix_widget_class
@only_hook_if_libraries_available
@hook
def receive_menu(self, menu, addrs, wallet):
if len(addrs) != 1:
return
for keystore in wallet.get_keystores():
if type(keystore) == self.keystore_class:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0], keystore))
device_name = "{} ({})".format(self.device, keystore.label)
menu.addAction(_("Show on {}").format(device_name), show_address)
def show_settings_dialog(self, window, keystore):
def connect():
device_id = self.choose_device(window, keystore)
return device_id
def show_dialog(device_id):
if device_id:
SettingsDialog(window, self, keystore, device_id).exec_()
keystore.thread.add(connect, on_success=show_dialog)
def request_trezor_init_settings(self, wizard, method, device):
vbox = QVBoxLayout()
next_enabled = True
label = QLabel(_("Enter a label to name your device:"))
name = QLineEdit()
hl = QHBoxLayout()
hl.addWidget(label)
hl.addWidget(name)
hl.addStretch(1)
vbox.addLayout(hl)
def clean_text(widget):
text = widget.toPlainText().strip()
return ' '.join(text.split())
if method in [TIM_NEW, TIM_RECOVER]:
gb = QGroupBox()
hbox1 = QHBoxLayout()
gb.setLayout(hbox1)
# KeepKey recovery doesn't need a word count
if method == TIM_NEW:
vbox.addWidget(gb)
gb.setTitle(_("Select your seed length:"))
bg = QButtonGroup()
for i, count in enumerate([12, 18, 24]):
rb = QRadioButton(gb)
rb.setText(_("{} words").format(count))
bg.addButton(rb)
bg.setId(rb, i)
hbox1.addWidget(rb)
rb.setChecked(True)
cb_pin = QCheckBox(_('Enable PIN protection'))
cb_pin.setChecked(True)
else:
text = QTextEdit()
text.setMaximumHeight(60)
if method == TIM_MNEMONIC:
msg = _("Enter your BIP39 mnemonic:")
else:
msg = _("Enter the master private key beginning with xprv:")
def set_enabled():
from electrum.bip32 import is_xprv
wizard.next_button.setEnabled(is_xprv(clean_text(text)))
text.textChanged.connect(set_enabled)
next_enabled = False
vbox.addWidget(QLabel(msg))
vbox.addWidget(text)
pin = QLineEdit()
pin.setValidator(QRegExpValidator(QRegExp('[1-9]{0,9}')))
pin.setMaximumWidth(100)
hbox_pin = QHBoxLayout()
hbox_pin.addWidget(QLabel(_("Enter your PIN (digits 1-9):")))
hbox_pin.addWidget(pin)
hbox_pin.addStretch(1)
if method in [TIM_NEW, TIM_RECOVER]:
vbox.addWidget(WWLabel(RECOMMEND_PIN))
vbox.addWidget(cb_pin)
else:
vbox.addLayout(hbox_pin)
passphrase_msg = WWLabel(PASSPHRASE_HELP_SHORT)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
cb_phrase = QCheckBox(_('Enable passphrases'))
cb_phrase.setChecked(False)
vbox.addWidget(passphrase_msg)
vbox.addWidget(passphrase_warning)
vbox.addWidget(cb_phrase)
wizard.exec_layout(vbox, next_enabled=next_enabled)
if method in [TIM_NEW, TIM_RECOVER]:
item = bg.checkedId()
pin = cb_pin.isChecked()
else:
item = ' '.join(str(clean_text(text)).split())
pin = str(pin.text())
return (item, name.text(), pin, cb_phrase.isChecked())
class Plugin(KeepKeyPlugin, QtPlugin):
icon_paired = "keepkey.png"
icon_unpaired = "keepkey_unpaired.png"
def create_handler(self, window):
return QtHandler(window, self.pin_matrix_widget_class(), self.device)
@classmethod
def pin_matrix_widget_class(self):
from keepkeylib.qt.pinmatrix import PinMatrixWidget
return PinMatrixWidget
class SettingsDialog(WindowModalDialog):
'''This dialog doesn't require a device be paired with a wallet.
We want users to be able to wipe a device even if they've forgotten
their PIN.'''
def __init__(self, window, plugin, keystore, device_id):
title = _("{} Settings").format(plugin.device)
super(SettingsDialog, self).__init__(window, title)
self.setMaximumWidth(540)
devmgr = plugin.device_manager()
config = devmgr.config
handler = keystore.handler
thread = keystore.thread
def invoke_client(method, *args, **kw_args):
unpair_after = kw_args.pop('unpair_after', False)
def task():
client = devmgr.client_by_id(device_id)
if not client:
raise RuntimeError("Device not connected")
if method:
getattr(client, method)(*args, **kw_args)
if unpair_after:
devmgr.unpair_id(device_id)
return client.features
thread.add(task, on_success=update)
def update(features):
self.features = features
set_label_enabled()
bl_hash = bh2u(features.bootloader_hash)
bl_hash = "\n".join([bl_hash[:32], bl_hash[32:]])
noyes = [_("No"), _("Yes")]
endis = [_("Enable Passphrases"), _("Disable Passphrases")]
disen = [_("Disabled"), _("Enabled")]
setchange = [_("Set a PIN"), _("Change PIN")]
version = "%d.%d.%d" % (features.major_version,
features.minor_version,
features.patch_version)
coins = ", ".join(coin.coin_name for coin in features.coins)
device_label.setText(features.label)
pin_set_label.setText(noyes[features.pin_protection])
passphrases_label.setText(disen[features.passphrase_protection])
bl_hash_label.setText(bl_hash)
label_edit.setText(features.label)
device_id_label.setText(features.device_id)
initialized_label.setText(noyes[features.initialized])
version_label.setText(version)
coins_label.setText(coins)
clear_pin_button.setVisible(features.pin_protection)
clear_pin_warning.setVisible(features.pin_protection)
pin_button.setText(setchange[features.pin_protection])
pin_msg.setVisible(not features.pin_protection)
passphrase_button.setText(endis[features.passphrase_protection])
language_label.setText(features.language)
def set_label_enabled():
label_apply.setEnabled(label_edit.text() != self.features.label)
def rename():
invoke_client('change_label', label_edit.text())
def toggle_passphrase():
title = _("Confirm Toggle Passphrase Protection")
currently_enabled = self.features.passphrase_protection
if currently_enabled:
msg = _("After disabling passphrases, you can only pair this "
"Electrum wallet if it had an empty passphrase. "
"If its passphrase was not empty, you will need to "
"create a new wallet with the install wizard. You "
"can use this wallet again at any time by re-enabling "
"passphrases and entering its passphrase.")
else:
msg = _("Your current Electrum wallet can only be used with "
"an empty passphrase. You must create a separate "
"wallet with the install wizard for other passphrases "
"as each one generates a new set of addresses.")
msg += "\n\n" + _("Are you sure you want to proceed?")
if not self.question(msg, title=title):
return
invoke_client('toggle_passphrase', unpair_after=currently_enabled)
def set_pin():
invoke_client('set_pin', remove=False)
def clear_pin():
invoke_client('set_pin', remove=True)
def wipe_device():
wallet = window.wallet
if wallet and sum(wallet.get_balance()):
title = _("Confirm Device Wipe")
msg = _("Are you SURE you want to wipe the device?\n"
"Your wallet still has bitcoins in it!")
if not self.question(msg, title=title,
icon=QMessageBox.Critical):
return
invoke_client('wipe_device', unpair_after=True)
def slider_moved():
mins = timeout_slider.sliderPosition()
timeout_minutes.setText(_("{:2d} minutes").format(mins))
def slider_released():
config.set_session_timeout(timeout_slider.sliderPosition() * 60)
# Information tab
info_tab = QWidget()
info_layout = QVBoxLayout(info_tab)
info_glayout = QGridLayout()
info_glayout.setColumnStretch(2, 1)
device_label = QLabel()
pin_set_label = QLabel()
passphrases_label = QLabel()
version_label = QLabel()
device_id_label = QLabel()
bl_hash_label = QLabel()
bl_hash_label.setWordWrap(True)
coins_label = QLabel()
coins_label.setWordWrap(True)
language_label = QLabel()
initialized_label = QLabel()
rows = [
(_("Device Label"), device_label),
(_("PIN set"), pin_set_label),
(_("Passphrases"), passphrases_label),
(_("Firmware Version"), version_label),
(_("Device ID"), device_id_label),
(_("Bootloader Hash"), bl_hash_label),
(_("Supported Coins"), coins_label),
(_("Language"), language_label),
(_("Initialized"), initialized_label),
]
for row_num, (label, widget) in enumerate(rows):
info_glayout.addWidget(QLabel(label), row_num, 0)
info_glayout.addWidget(widget, row_num, 1)
info_layout.addLayout(info_glayout)
# Settings tab
settings_tab = QWidget()
settings_layout = QVBoxLayout(settings_tab)
settings_glayout = QGridLayout()
# Settings tab - Label
label_msg = QLabel(_("Name this {}. If you have multiple devices "
"their labels help distinguish them.")
.format(plugin.device))
label_msg.setWordWrap(True)
label_label = QLabel(_("Device Label"))
label_edit = QLineEdit()
label_edit.setMinimumWidth(150)
label_edit.setMaxLength(plugin.MAX_LABEL_LEN)
label_apply = QPushButton(_("Apply"))
label_apply.clicked.connect(rename)
label_edit.textChanged.connect(set_label_enabled)
settings_glayout.addWidget(label_label, 0, 0)
settings_glayout.addWidget(label_edit, 0, 1, 1, 2)
settings_glayout.addWidget(label_apply, 0, 3)
settings_glayout.addWidget(label_msg, 1, 1, 1, -1)
# Settings tab - PIN
pin_label = QLabel(_("PIN Protection"))
pin_button = QPushButton()
pin_button.clicked.connect(set_pin)
settings_glayout.addWidget(pin_label, 2, 0)
settings_glayout.addWidget(pin_button, 2, 1)
pin_msg = QLabel(_("PIN protection is strongly recommended. "
"A PIN is your only protection against someone "
"stealing your bitcoins if they obtain physical "
"access to your {}.").format(plugin.device))
pin_msg.setWordWrap(True)
pin_msg.setStyleSheet("color: red")
settings_glayout.addWidget(pin_msg, 3, 1, 1, -1)
# Settings tab - Session Timeout
timeout_label = QLabel(_("Session Timeout"))
timeout_minutes = QLabel()
timeout_slider = QSlider(Qt.Horizontal)
timeout_slider.setRange(1, 60)
timeout_slider.setSingleStep(1)
timeout_slider.setTickInterval(5)
timeout_slider.setTickPosition(QSlider.TicksBelow)
timeout_slider.setTracking(True)
timeout_msg = QLabel(
_("Clear the session after the specified period "
"of inactivity. Once a session has timed out, "
"your PIN and passphrase (if enabled) must be "
"re-entered to use the device."))
timeout_msg.setWordWrap(True)
timeout_slider.setSliderPosition(config.get_session_timeout() // 60)
slider_moved()
timeout_slider.valueChanged.connect(slider_moved)
timeout_slider.sliderReleased.connect(slider_released)
settings_glayout.addWidget(timeout_label, 6, 0)
settings_glayout.addWidget(timeout_slider, 6, 1, 1, 3)
settings_glayout.addWidget(timeout_minutes, 6, 4)
settings_glayout.addWidget(timeout_msg, 7, 1, 1, -1)
settings_layout.addLayout(settings_glayout)
settings_layout.addStretch(1)
# Advanced tab
advanced_tab = QWidget()
advanced_layout = QVBoxLayout(advanced_tab)
advanced_glayout = QGridLayout()
# Advanced tab - clear PIN
clear_pin_button = QPushButton(_("Disable PIN"))
clear_pin_button.clicked.connect(clear_pin)
clear_pin_warning = QLabel(
_("If you disable your PIN, anyone with physical access to your "
"{} device can spend your bitcoins.").format(plugin.device))
clear_pin_warning.setWordWrap(True)
clear_pin_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(clear_pin_button, 0, 2)
advanced_glayout.addWidget(clear_pin_warning, 1, 0, 1, 5)
# Advanced tab - toggle passphrase protection
passphrase_button = QPushButton()
passphrase_button.clicked.connect(toggle_passphrase)
passphrase_msg = WWLabel(PASSPHRASE_HELP)
passphrase_warning = WWLabel(PASSPHRASE_NOT_PIN)
passphrase_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(passphrase_button, 3, 2)
advanced_glayout.addWidget(passphrase_msg, 4, 0, 1, 5)
advanced_glayout.addWidget(passphrase_warning, 5, 0, 1, 5)
# Advanced tab - wipe device
wipe_device_button = QPushButton(_("Wipe Device"))
wipe_device_button.clicked.connect(wipe_device)
wipe_device_msg = QLabel(
_("Wipe the device, removing all data from it. The firmware "
"is left unchanged."))
wipe_device_msg.setWordWrap(True)
wipe_device_warning = QLabel(
_("Only wipe a device if you have the recovery seed written down "
"and the device wallet(s) are empty, otherwise the bitcoins "
"will be lost forever."))
wipe_device_warning.setWordWrap(True)
wipe_device_warning.setStyleSheet("color: red")
advanced_glayout.addWidget(wipe_device_button, 6, 2)
advanced_glayout.addWidget(wipe_device_msg, 7, 0, 1, 5)
advanced_glayout.addWidget(wipe_device_warning, 8, 0, 1, 5)
advanced_layout.addLayout(advanced_glayout)
advanced_layout.addStretch(1)
tabs = QTabWidget(self)
tabs.addTab(info_tab, _("Information"))
tabs.addTab(settings_tab, _("Settings"))
tabs.addTab(advanced_tab, _("Advanced"))
dialog_vbox = QVBoxLayout(self)
dialog_vbox.addWidget(tabs)
dialog_vbox.addLayout(Buttons(CloseButton(self)))
# Update information
invoke_client(None)
| mit | 45124adb215441b7ea95f3f7e30e2fe3 | 40.402439 | 95 | 0.597686 | 4.033435 | false | false | false | false |
spesmilo/electrum | electrum/plugins/jade/qt.py | 1 | 1301 | from functools import partial
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtWidgets import QLabel, QVBoxLayout
from electrum.i18n import _
from electrum.plugin import hook
from electrum.wallet import Standard_Wallet
from electrum.gui.qt.util import WindowModalDialog
from .jade import JadePlugin
from ..hw_wallet.qt import QtHandlerBase, QtPluginBase
from ..hw_wallet.plugin import only_hook_if_libraries_available
class Plugin(JadePlugin, QtPluginBase):
icon_unpaired = "jade_unpaired.png"
icon_paired = "jade.png"
def create_handler(self, window):
return Jade_Handler(window)
@only_hook_if_libraries_available
@hook
def receive_menu(self, menu, addrs, wallet):
if type(wallet) is not Standard_Wallet:
return
keystore = wallet.get_keystore()
if type(keystore) == self.keystore_class and len(addrs) == 1:
def show_address():
keystore.thread.add(partial(self.show_address, wallet, addrs[0]))
menu.addAction(_("Show on Jade"), show_address)
class Jade_Handler(QtHandlerBase):
setup_signal = pyqtSignal()
auth_signal = pyqtSignal(object, object)
MESSAGE_DIALOG_TITLE = _("Jade Status")
def __init__(self, win):
super(Jade_Handler, self).__init__(win, 'Jade')
| mit | 65f0e273682da96e8660a84e737d837d | 30.731707 | 81 | 0.692544 | 3.574176 | false | false | false | false |
spesmilo/electrum | electrum/gui/qt/qrreader/qtmultimedia/crop_blur_effect.py | 4 | 2982 | #!/usr/bin/env python3
#
# Electron Cash - lightweight Bitcoin client
# Copyright (C) 2019 Axel Gembe <derago@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from PyQt5.QtWidgets import QGraphicsBlurEffect, QGraphicsEffect
from PyQt5.QtGui import QPainter, QTransform, QRegion
from PyQt5.QtCore import QObject, QRect, QPoint, Qt
class QrReaderCropBlurEffect(QGraphicsBlurEffect):
CROP_OFFSET_ENABLED = False
CROP_OFFSET = QPoint(5, 5)
BLUR_DARKEN = 0.25
BLUR_RADIUS = 8
def __init__(self, parent: QObject, crop: QRect = None):
super().__init__(parent)
self.crop = crop
self.setBlurRadius(self.BLUR_RADIUS)
def setCrop(self, crop: QRect = None):
self.crop = crop
def draw(self, painter: QPainter):
assert self.crop, 'crop must be set'
# Compute painter regions for the crop and the blur
all_region = QRegion(painter.viewport())
crop_region = QRegion(self.crop)
blur_region = all_region.subtracted(crop_region)
# Let the QGraphicsBlurEffect only paint in blur_region
painter.setClipRegion(blur_region)
# Fill with black and set opacity so that the blurred region is drawn darker
if self.BLUR_DARKEN > 0.0:
painter.fillRect(painter.viewport(), Qt.black)
painter.setOpacity(1 - self.BLUR_DARKEN)
# Draw the blur effect
super().draw(painter)
# Restore clipping and opacity
painter.setClipping(False)
painter.setOpacity(1.0)
# Get the source pixmap
pixmap, offset = self.sourcePixmap(Qt.DeviceCoordinates, QGraphicsEffect.NoPad)
painter.setWorldTransform(QTransform())
# Get the source by adding the offset to the crop location
source = self.crop
if self.CROP_OFFSET_ENABLED:
source = source.translated(self.CROP_OFFSET)
painter.drawPixmap(self.crop.topLeft() + offset, pixmap, source)
| mit | 61dd3c2bd40569e24845651223814b3d | 37.727273 | 87 | 0.705567 | 3.867704 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.