hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c2c346b926e965849966de02518eb85033da42d | 4,375 | py | Python | aoc/day22.py | martinhenstridge/adventofcode2021 | f9fa76fd91f13abab9307794e30461033a470eca | [
"MIT"
] | null | null | null | aoc/day22.py | martinhenstridge/adventofcode2021 | f9fa76fd91f13abab9307794e30461033a470eca | [
"MIT"
] | null | null | null | aoc/day22.py | martinhenstridge/adventofcode2021 | f9fa76fd91f13abab9307794e30461033a470eca | [
"MIT"
] | null | null | null | import re
from typing import NamedTuple
from . import util
class Cuboid(NamedTuple):
xmin: int
xmax: int
ymin: int
ymax: int
zmin: int
zmax: int
@property
def count(self):
return (
(1 + self.xmax - self.xmin)
* (1 + self.ymax - self.ymin)
* (1 + self.zmax - self.zmin)
)
def within(self, lo, hi):
if self.xmin < lo or self.xmin > hi:
return False
if self.xmax < lo or self.xmax > hi:
return False
if self.ymin < lo or self.ymin > hi:
return False
if self.ymax < lo or self.ymax > hi:
return False
if self.zmin < lo or self.zmin > hi:
return False
if self.zmax < lo or self.zmax > hi:
return False
return True
def overlaps(self, other):
if self.xmin > other.xmax:
return False
if self.xmax < other.xmin:
return False
if self.ymin > other.ymax:
return False
if self.ymax < other.ymin:
return False
if self.zmin > other.zmax:
return False
if self.zmax < other.zmin:
return False
return True
def split(self, other):
"""Split `other` into cuboids not overlapping `self`"""
missing = set()
hitting = {other}
for imin, imax in [(0, 1), (2, 3), (4, 5)]:
candidates = hitting.copy()
hitting = set()
for candidate in candidates:
_hitting, _missing = self._split_axis(candidate, imin, imax)
hitting.update(_hitting)
missing.update(_missing)
return missing
def _split_axis(self, other, imin, imax):
"""Split `other` along a single axis of `self`"""
if self[imin] > other[imax]:
# missing entirely on right side
return [], [other]
if self[imax] < other[imin]:
# missing entirely on left side
return [], [other]
if self[imin] <= other[imin] and self[imax] >= other[imax]:
# fully subsuming
return [other], []
if self[imin] <= other[imin]:
# self[imax] is in middle of other
hitting = [other._replace(**{self._fields[imax]: self[imax]})]
missing = [other._replace(**{self._fields[imin]: self[imax] + 1})]
return hitting, missing
if self[imax] >= other[imax]:
# self[imin] is in middle of other
hitting = [other._replace(**{self._fields[imin]: self[imin]})]
missing = [other._replace(**{self._fields[imax]: self[imin] - 1})]
return hitting, missing
# self[imin] and self[imax] are both in middle of other
hitting = [other._replace(**{self._fields[imin]: self[imin], self._fields[imax]: self[imax]})]
missing = [
other._replace(**{self._fields[imax]: self[imin] - 1}),
other._replace(**{self._fields[imin]: self[imax] + 1}),
]
return hitting, missing
def get_reboot_steps(lines):
regex = (
r"(on|off)"
r" x=(\-?\d+)\.\.(\-?\d+)"
r",y=(\-?\d+)\.\.(\-?\d+)"
r",z=(\-?\d+)\.\.(\-?\d+)"
)
steps = []
for line in lines:
if match := re.fullmatch(regex, line):
turnon = (match[1] == "on")
cuboid = Cuboid(
int(match[2]),
int(match[3]),
int(match[4]),
int(match[5]),
int(match[6]),
int(match[7]),
)
steps.append((turnon, cuboid))
return steps
def reboot(steps):
cuboids = set()
for turnon, cuboid in steps:
updated = set()
for existing in cuboids:
if not cuboid.overlaps(existing):
updated.add(existing)
else:
for new in cuboid.split(existing):
updated.add(new)
if turnon:
updated.add(cuboid)
cuboids = updated
return sum(c.count for c in cuboids)
def run():
inputlines = util.get_input_lines("22.txt")
steps = get_reboot_steps(inputlines)
init_count = reboot([s for s in steps if s[1].within(-50, 50)])
full_count = reboot(steps)
return init_count, full_count
| 29.362416 | 102 | 0.511086 | import re
from typing import NamedTuple
from . import util
class Cuboid(NamedTuple):
xmin: int
xmax: int
ymin: int
ymax: int
zmin: int
zmax: int
@property
def count(self):
return (
(1 + self.xmax - self.xmin)
* (1 + self.ymax - self.ymin)
* (1 + self.zmax - self.zmin)
)
def within(self, lo, hi):
if self.xmin < lo or self.xmin > hi:
return False
if self.xmax < lo or self.xmax > hi:
return False
if self.ymin < lo or self.ymin > hi:
return False
if self.ymax < lo or self.ymax > hi:
return False
if self.zmin < lo or self.zmin > hi:
return False
if self.zmax < lo or self.zmax > hi:
return False
return True
def overlaps(self, other):
if self.xmin > other.xmax:
return False
if self.xmax < other.xmin:
return False
if self.ymin > other.ymax:
return False
if self.ymax < other.ymin:
return False
if self.zmin > other.zmax:
return False
if self.zmax < other.zmin:
return False
return True
def split(self, other):
missing = set()
hitting = {other}
for imin, imax in [(0, 1), (2, 3), (4, 5)]:
candidates = hitting.copy()
hitting = set()
for candidate in candidates:
_hitting, _missing = self._split_axis(candidate, imin, imax)
hitting.update(_hitting)
missing.update(_missing)
return missing
def _split_axis(self, other, imin, imax):
if self[imin] > other[imax]:
return [], [other]
if self[imax] < other[imin]:
return [], [other]
if self[imin] <= other[imin] and self[imax] >= other[imax]:
return [other], []
if self[imin] <= other[imin]:
hitting = [other._replace(**{self._fields[imax]: self[imax]})]
missing = [other._replace(**{self._fields[imin]: self[imax] + 1})]
return hitting, missing
if self[imax] >= other[imax]:
hitting = [other._replace(**{self._fields[imin]: self[imin]})]
missing = [other._replace(**{self._fields[imax]: self[imin] - 1})]
return hitting, missing
hitting = [other._replace(**{self._fields[imin]: self[imin], self._fields[imax]: self[imax]})]
missing = [
other._replace(**{self._fields[imax]: self[imin] - 1}),
other._replace(**{self._fields[imin]: self[imax] + 1}),
]
return hitting, missing
def get_reboot_steps(lines):
regex = (
r"(on|off)"
r" x=(\-?\d+)\.\.(\-?\d+)"
r",y=(\-?\d+)\.\.(\-?\d+)"
r",z=(\-?\d+)\.\.(\-?\d+)"
)
steps = []
for line in lines:
if match := re.fullmatch(regex, line):
turnon = (match[1] == "on")
cuboid = Cuboid(
int(match[2]),
int(match[3]),
int(match[4]),
int(match[5]),
int(match[6]),
int(match[7]),
)
steps.append((turnon, cuboid))
return steps
def reboot(steps):
cuboids = set()
for turnon, cuboid in steps:
updated = set()
for existing in cuboids:
if not cuboid.overlaps(existing):
updated.add(existing)
else:
for new in cuboid.split(existing):
updated.add(new)
if turnon:
updated.add(cuboid)
cuboids = updated
return sum(c.count for c in cuboids)
def run():
inputlines = util.get_input_lines("22.txt")
steps = get_reboot_steps(inputlines)
init_count = reboot([s for s in steps if s[1].within(-50, 50)])
full_count = reboot(steps)
return init_count, full_count
| true | true |
1c2c3481f1407d2b1c0479de6b385d4aac674d9f | 3,749 | py | Python | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/objc/_convenience_nsdecimal.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | 4 | 2018-07-04T17:20:12.000Z | 2019-07-14T18:07:25.000Z | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/objc/_convenience_nsdecimal.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | null | null | null | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/objc/_convenience_nsdecimal.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | 1 | 2018-09-03T03:02:06.000Z | 2018-09-03T03:02:06.000Z | """
Support for NSDecimalNumber.
The actual class is defined in Foundation, but having the wrapper
here is much more convenient.
"""
__all__ = ()
from objc._convenience import addConvenienceForClass
from _objc import lookUpClass, NSDecimal
import sys
import operator
NSDecimalNumber = lookUpClass('NSDecimalNumber')
def decimal_new(cls, value=None):
if value is None:
return cls.numberWithInt_(0)
else:
if isinstance(value, NSDecimal):
return cls.decimalNumberWithDecimal_(value)
elif isinstance(value, NSDecimalNumber):
return cls.decimalNumberWithDecimal_(value.decimalValue())
elif isinstance(value, float):
return cls.numberWithDouble_(value)
elif isinstance(value, str):
value = NSDecimal(value)
return cls.decimalNumberWithDecimal_(value)
else:
# The value is either an integer, or
# invalid (and numberWithLongLong_ wil raise
# TypeError)
try:
return cls.numberWithLongLong_(value)
except ValueError:
raise TypeError("Value is not a number")
addConvenienceForClass('NSDecimalNumber', (
('__new__', staticmethod(decimal_new)),
('__add__', lambda self, other: NSDecimalNumber(operator.add(NSDecimal(self), other))),
('__radd__', lambda self, other: NSDecimalNumber(operator.add(other, NSDecimal(self)))),
('__sub__', lambda self, other: NSDecimalNumber(operator.sub(NSDecimal(self), other))),
('__rsub__', lambda self, other: NSDecimalNumber(operator.sub(other, NSDecimal(self)))),
('__mul__', lambda self, other: NSDecimalNumber(operator.mul(NSDecimal(self), other))),
('__rmul__', lambda self, other: NSDecimalNumber(operator.mul(other, NSDecimal(self)))),
('__truediv__', lambda self, other: NSDecimalNumber(operator.truediv(NSDecimal(self), other))),
('__rtruediv__', lambda self, other: NSDecimalNumber(operator.truediv(other, NSDecimal(self)))),
('__floordiv__', lambda self, other: NSDecimalNumber(operator.floordiv(NSDecimal(self), other))),
('__rfloordiv__', lambda self, other: NSDecimalNumber(operator.floordiv(other, NSDecimal(self)))),
('__mod__', lambda self, other: NSDecimalNumber(operator.mod(NSDecimal(self), other))),
('__rmod__', lambda self, other: NSDecimalNumber(operator.mod(other, NSDecimal(self)))),
('__neg__', lambda self: NSDecimalNumber(operator.neg(NSDecimal(self)))),
('__pos__', lambda self: NSDecimalNumber(operator.pos(NSDecimal(self)))),
('__abs__', lambda self: NSDecimalNumber(abs(NSDecimal(self)))),
('__lt__', lambda self, other: (NSDecimal(self) < other)),
('__gt__', lambda self, other: (NSDecimal(self) > other)),
('__le__', lambda self, other: (NSDecimal(self) <= other)),
('__ge__', lambda self, other: (NSDecimal(self) >= other)),
('__eq__', lambda self, other: (NSDecimal(self) == other)),
('__ne__', lambda self, other: (NSDecimal(self) != other)),
))
if sys.version_info[0] == 2: # pragma: no 3.x cover
addConvenienceForClass('NSDecimalNumber', (
('__div__', lambda self, other: NSDecimalNumber(operator.div(NSDecimal(self), other))),
('__rdiv__', lambda self, other: NSDecimalNumber(operator.div(other, NSDecimal(self)))),
('__cmp__', lambda self, other: cmp(NSDecimalNumber(NSDecimal(self), other))),
))
else: # pragma: no 2.x cover
addConvenienceForClass('NSDecimalNumber', (
('__round__', lambda self, n=0 : NSDecimalNumber(round(NSDecimal(self), n))),
))
| 49.986667 | 104 | 0.65004 | __all__ = ()
from objc._convenience import addConvenienceForClass
from _objc import lookUpClass, NSDecimal
import sys
import operator
NSDecimalNumber = lookUpClass('NSDecimalNumber')
def decimal_new(cls, value=None):
if value is None:
return cls.numberWithInt_(0)
else:
if isinstance(value, NSDecimal):
return cls.decimalNumberWithDecimal_(value)
elif isinstance(value, NSDecimalNumber):
return cls.decimalNumberWithDecimal_(value.decimalValue())
elif isinstance(value, float):
return cls.numberWithDouble_(value)
elif isinstance(value, str):
value = NSDecimal(value)
return cls.decimalNumberWithDecimal_(value)
else:
try:
return cls.numberWithLongLong_(value)
except ValueError:
raise TypeError("Value is not a number")
addConvenienceForClass('NSDecimalNumber', (
('__new__', staticmethod(decimal_new)),
('__add__', lambda self, other: NSDecimalNumber(operator.add(NSDecimal(self), other))),
('__radd__', lambda self, other: NSDecimalNumber(operator.add(other, NSDecimal(self)))),
('__sub__', lambda self, other: NSDecimalNumber(operator.sub(NSDecimal(self), other))),
('__rsub__', lambda self, other: NSDecimalNumber(operator.sub(other, NSDecimal(self)))),
('__mul__', lambda self, other: NSDecimalNumber(operator.mul(NSDecimal(self), other))),
('__rmul__', lambda self, other: NSDecimalNumber(operator.mul(other, NSDecimal(self)))),
('__truediv__', lambda self, other: NSDecimalNumber(operator.truediv(NSDecimal(self), other))),
('__rtruediv__', lambda self, other: NSDecimalNumber(operator.truediv(other, NSDecimal(self)))),
('__floordiv__', lambda self, other: NSDecimalNumber(operator.floordiv(NSDecimal(self), other))),
('__rfloordiv__', lambda self, other: NSDecimalNumber(operator.floordiv(other, NSDecimal(self)))),
('__mod__', lambda self, other: NSDecimalNumber(operator.mod(NSDecimal(self), other))),
('__rmod__', lambda self, other: NSDecimalNumber(operator.mod(other, NSDecimal(self)))),
('__neg__', lambda self: NSDecimalNumber(operator.neg(NSDecimal(self)))),
('__pos__', lambda self: NSDecimalNumber(operator.pos(NSDecimal(self)))),
('__abs__', lambda self: NSDecimalNumber(abs(NSDecimal(self)))),
('__lt__', lambda self, other: (NSDecimal(self) < other)),
('__gt__', lambda self, other: (NSDecimal(self) > other)),
('__le__', lambda self, other: (NSDecimal(self) <= other)),
('__ge__', lambda self, other: (NSDecimal(self) >= other)),
('__eq__', lambda self, other: (NSDecimal(self) == other)),
('__ne__', lambda self, other: (NSDecimal(self) != other)),
))
if sys.version_info[0] == 2:
addConvenienceForClass('NSDecimalNumber', (
('__div__', lambda self, other: NSDecimalNumber(operator.div(NSDecimal(self), other))),
('__rdiv__', lambda self, other: NSDecimalNumber(operator.div(other, NSDecimal(self)))),
('__cmp__', lambda self, other: cmp(NSDecimalNumber(NSDecimal(self), other))),
))
else:
addConvenienceForClass('NSDecimalNumber', (
('__round__', lambda self, n=0 : NSDecimalNumber(round(NSDecimal(self), n))),
))
| true | true |
1c2c34ed42e078182cd2bd0a9d7af18b99592cbf | 7,302 | py | Python | google/cloud/aiplatform_v1/types/specialist_pool_service.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | 1 | 2022-03-30T05:23:29.000Z | 2022-03-30T05:23:29.000Z | google/cloud/aiplatform_v1/types/specialist_pool_service.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1/types/specialist_pool_service.py | sakagarwal/python-aiplatform | 62b4a1ea589235910c6e87f027899a29bf1bacb1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1.types import operation
from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"CreateSpecialistPoolRequest",
"CreateSpecialistPoolOperationMetadata",
"GetSpecialistPoolRequest",
"ListSpecialistPoolsRequest",
"ListSpecialistPoolsResponse",
"DeleteSpecialistPoolRequest",
"UpdateSpecialistPoolRequest",
"UpdateSpecialistPoolOperationMetadata",
},
)
class CreateSpecialistPoolRequest(proto.Message):
r"""Request message for
[SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool].
Attributes:
parent (str):
Required. The parent Project name for the new
SpecialistPool. The form is
``projects/{project}/locations/{location}``.
specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool):
Required. The SpecialistPool to create.
"""
parent = proto.Field(proto.STRING, number=1,)
specialist_pool = proto.Field(
proto.MESSAGE, number=2, message=gca_specialist_pool.SpecialistPool,
)
class CreateSpecialistPoolOperationMetadata(proto.Message):
r"""Runtime operation information for
[SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.CreateSpecialistPool].
Attributes:
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
The operation generic information.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class GetSpecialistPoolRequest(proto.Message):
r"""Request message for
[SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.GetSpecialistPool].
Attributes:
name (str):
Required. The name of the SpecialistPool resource. The form
is
``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``.
"""
name = proto.Field(proto.STRING, number=1,)
class ListSpecialistPoolsRequest(proto.Message):
r"""Request message for
[SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools].
Attributes:
parent (str):
Required. The name of the SpecialistPool's parent resource.
Format: ``projects/{project}/locations/{location}``
page_size (int):
The standard list page size.
page_token (str):
The standard list page token. Typically obtained by
[ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1.ListSpecialistPoolsResponse.next_page_token]
of the previous
[SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools]
call. Return first page if empty.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
FieldMask represents a set of
"""
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
read_mask = proto.Field(proto.MESSAGE, number=4, message=field_mask_pb2.FieldMask,)
class ListSpecialistPoolsResponse(proto.Message):
r"""Response message for
[SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1.SpecialistPoolService.ListSpecialistPools].
Attributes:
specialist_pools (Sequence[google.cloud.aiplatform_v1.types.SpecialistPool]):
A list of SpecialistPools that matches the
specified filter in the request.
next_page_token (str):
The standard List next-page token.
"""
@property
def raw_page(self):
return self
specialist_pools = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_specialist_pool.SpecialistPool,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeleteSpecialistPoolRequest(proto.Message):
r"""Request message for
[SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.DeleteSpecialistPool].
Attributes:
name (str):
Required. The resource name of the SpecialistPool to delete.
Format:
``projects/{project}/locations/{location}/specialistPools/{specialist_pool}``
force (bool):
If set to true, any specialist managers in
this SpecialistPool will also be deleted.
(Otherwise, the request will only work if the
SpecialistPool has no specialist managers.)
"""
name = proto.Field(proto.STRING, number=1,)
force = proto.Field(proto.BOOL, number=2,)
class UpdateSpecialistPoolRequest(proto.Message):
r"""Request message for
[SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool].
Attributes:
specialist_pool (google.cloud.aiplatform_v1.types.SpecialistPool):
Required. The SpecialistPool which replaces
the resource on the server.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to the
resource.
"""
specialist_pool = proto.Field(
proto.MESSAGE, number=1, message=gca_specialist_pool.SpecialistPool,
)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class UpdateSpecialistPoolOperationMetadata(proto.Message):
r"""Runtime operation metadata for
[SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1.SpecialistPoolService.UpdateSpecialistPool].
Attributes:
specialist_pool (str):
Output only. The name of the SpecialistPool to which the
specialists are being added. Format:
``projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}``
generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata):
The operation generic information.
"""
specialist_pool = proto.Field(proto.STRING, number=1,)
generic_metadata = proto.Field(
proto.MESSAGE, number=2, message=operation.GenericOperationMetadata,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 37.446154 | 129 | 0.71364 |
import proto
from google.cloud.aiplatform_v1.types import operation
from google.cloud.aiplatform_v1.types import specialist_pool as gca_specialist_pool
from google.protobuf import field_mask_pb2
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"CreateSpecialistPoolRequest",
"CreateSpecialistPoolOperationMetadata",
"GetSpecialistPoolRequest",
"ListSpecialistPoolsRequest",
"ListSpecialistPoolsResponse",
"DeleteSpecialistPoolRequest",
"UpdateSpecialistPoolRequest",
"UpdateSpecialistPoolOperationMetadata",
},
)
class CreateSpecialistPoolRequest(proto.Message):
parent = proto.Field(proto.STRING, number=1,)
specialist_pool = proto.Field(
proto.MESSAGE, number=2, message=gca_specialist_pool.SpecialistPool,
)
class CreateSpecialistPoolOperationMetadata(proto.Message):
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class GetSpecialistPoolRequest(proto.Message):
name = proto.Field(proto.STRING, number=1,)
class ListSpecialistPoolsRequest(proto.Message):
parent = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
read_mask = proto.Field(proto.MESSAGE, number=4, message=field_mask_pb2.FieldMask,)
class ListSpecialistPoolsResponse(proto.Message):
@property
def raw_page(self):
return self
specialist_pools = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_specialist_pool.SpecialistPool,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeleteSpecialistPoolRequest(proto.Message):
name = proto.Field(proto.STRING, number=1,)
force = proto.Field(proto.BOOL, number=2,)
class UpdateSpecialistPoolRequest(proto.Message):
specialist_pool = proto.Field(
proto.MESSAGE, number=1, message=gca_specialist_pool.SpecialistPool,
)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class UpdateSpecialistPoolOperationMetadata(proto.Message):
specialist_pool = proto.Field(proto.STRING, number=1,)
generic_metadata = proto.Field(
proto.MESSAGE, number=2, message=operation.GenericOperationMetadata,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| true | true |
1c2c35e8c03707058b2c6e6a8afcf60ccd099056 | 3,764 | py | Python | mgf_checker/reader.py | kusterlab/MasterSpectrum | 938688043690a7da1b68126a73e519d2a07ba6de | [
"MIT"
] | null | null | null | mgf_checker/reader.py | kusterlab/MasterSpectrum | 938688043690a7da1b68126a73e519d2a07ba6de | [
"MIT"
] | null | null | null | mgf_checker/reader.py | kusterlab/MasterSpectrum | 938688043690a7da1b68126a73e519d2a07ba6de | [
"MIT"
] | null | null | null |
from mgf_checker.msParserInfo import MS_parser_info
from abc import ABCMeta, abstractmethod
import csv
from mgf_checker.mzidInfo import MZID_info
class Reader(metaclass=ABCMeta):
def __init__(self, path):
self.path = path
self.data = {}
def read_file(self):
with open(self.path) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
for row in csvreader:
self.parse(row)
@abstractmethod
def parse(self):
pass
class MZID_Comparison_result(Reader):
"""
mzid comparison reults reader
everything is a rank1 result
expected is calculted by mzid
found is value of my mgf comparison
"""
def __init__(self, path):
super().__init__(path)
def parse(self, row):
if "scanid" not in row:
scanid = int(row[0])
rank = int(row[1])
peak = float(row[2])
position = int(row[3])
frag = row[4]
if 'b' in frag:
frag = 'B'
elif 'y' in frag:
frag = 'Y'
else:
raise ValueError("strange frag")
expected = float(row[5])
found = float(row[6])
charge = int(row[7])
dPeaks_matched = int(row[8])
if scanid in self.data:
if rank in self.data[scanid]:
self.data[scanid][rank].add(stFrag=frag, nPosition=position, dPeak=peak, dExpected=expected, dFound=found, nCharge=charge, dPeaks_matched=dPeaks_matched)
else:
self.data[scanid][rank] = MZID_info(nScanid=scanid, dPeak=peak, nPosition=position, stFrag=frag, dExpected=expected, dFound=found, nCharge=charge, dPeaks_matched=dPeaks_matched)
else:
self.data[scanid] = {}
self.data[scanid][rank] = MZID_info(nScanid=scanid, dPeak=peak, nPosition=position, stFrag=frag, dExpected=expected, dFound=found, nCharge=charge, dPeaks_matched=dPeaks_matched)
class MS_parser_result(Reader):
"""
store more than first rank result
"""
def parse(self, row):
rank_max = 2
if "query" not in row:
nQuery = int(row[0])
dScore = float(row[1])
if dScore > 0:
stPeptide = row[2]
stVarModsStr = row[3]
stReadableVarMods = row[4]
nMsmsid = int(row[5])
stProtein_match = row[6]
stFilename = row[7]
nRank = int(row[8])
stSeriesUsedStr = row[9]
if nRank <= rank_max:
if nMsmsid in self.data:
if nRank in self.data[nMsmsid]:
pass
else:
self.data[nMsmsid][nRank] = MS_parser_info(nQuery=nQuery, dScore=dScore, stPeptide=stPeptide, stVarModsStr=stVarModsStr, stReadableVarMods=stReadableVarMods,
nMsmsid=nMsmsid,
stProtein_match=stProtein_match, stFilename=stFilename, nRank=nRank, stSeriesUsedStr=stSeriesUsedStr)
else:
self.data[nMsmsid] = {}
self.data[nMsmsid][nRank] = MS_parser_info(nQuery=nQuery, dScore=dScore, stPeptide=stPeptide, stVarModsStr=stVarModsStr, stReadableVarMods=stReadableVarMods,
nMsmsid=nMsmsid,
stProtein_match=stProtein_match, stFilename=stFilename, nRank=nRank, stSeriesUsedStr=stSeriesUsedStr)
| 39.208333 | 197 | 0.535335 |
from mgf_checker.msParserInfo import MS_parser_info
from abc import ABCMeta, abstractmethod
import csv
from mgf_checker.mzidInfo import MZID_info
class Reader(metaclass=ABCMeta):
def __init__(self, path):
self.path = path
self.data = {}
def read_file(self):
with open(self.path) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
for row in csvreader:
self.parse(row)
@abstractmethod
def parse(self):
pass
class MZID_Comparison_result(Reader):
def __init__(self, path):
super().__init__(path)
def parse(self, row):
if "scanid" not in row:
scanid = int(row[0])
rank = int(row[1])
peak = float(row[2])
position = int(row[3])
frag = row[4]
if 'b' in frag:
frag = 'B'
elif 'y' in frag:
frag = 'Y'
else:
raise ValueError("strange frag")
expected = float(row[5])
found = float(row[6])
charge = int(row[7])
dPeaks_matched = int(row[8])
if scanid in self.data:
if rank in self.data[scanid]:
self.data[scanid][rank].add(stFrag=frag, nPosition=position, dPeak=peak, dExpected=expected, dFound=found, nCharge=charge, dPeaks_matched=dPeaks_matched)
else:
self.data[scanid][rank] = MZID_info(nScanid=scanid, dPeak=peak, nPosition=position, stFrag=frag, dExpected=expected, dFound=found, nCharge=charge, dPeaks_matched=dPeaks_matched)
else:
self.data[scanid] = {}
self.data[scanid][rank] = MZID_info(nScanid=scanid, dPeak=peak, nPosition=position, stFrag=frag, dExpected=expected, dFound=found, nCharge=charge, dPeaks_matched=dPeaks_matched)
class MS_parser_result(Reader):
def parse(self, row):
rank_max = 2
if "query" not in row:
nQuery = int(row[0])
dScore = float(row[1])
if dScore > 0:
stPeptide = row[2]
stVarModsStr = row[3]
stReadableVarMods = row[4]
nMsmsid = int(row[5])
stProtein_match = row[6]
stFilename = row[7]
nRank = int(row[8])
stSeriesUsedStr = row[9]
if nRank <= rank_max:
if nMsmsid in self.data:
if nRank in self.data[nMsmsid]:
pass
else:
self.data[nMsmsid][nRank] = MS_parser_info(nQuery=nQuery, dScore=dScore, stPeptide=stPeptide, stVarModsStr=stVarModsStr, stReadableVarMods=stReadableVarMods,
nMsmsid=nMsmsid,
stProtein_match=stProtein_match, stFilename=stFilename, nRank=nRank, stSeriesUsedStr=stSeriesUsedStr)
else:
self.data[nMsmsid] = {}
self.data[nMsmsid][nRank] = MS_parser_info(nQuery=nQuery, dScore=dScore, stPeptide=stPeptide, stVarModsStr=stVarModsStr, stReadableVarMods=stReadableVarMods,
nMsmsid=nMsmsid,
stProtein_match=stProtein_match, stFilename=stFilename, nRank=nRank, stSeriesUsedStr=stSeriesUsedStr)
| true | true |
1c2c36196484c2b0158ae77c67ec5b00f64a6446 | 10,704 | py | Python | contrib/database/schema_migration/versions/3f96c40b580a_created_event_schema.py | ctrlsys/sensys | 58221bca169c95c300ecc5526aa00590f0ce5c2f | [
"BSD-3-Clause-Open-MPI"
] | 14 | 2016-09-06T17:02:20.000Z | 2020-03-30T14:34:59.000Z | contrib/database/schema_migration/versions/3f96c40b580a_created_event_schema.py | ctrlsys/sensys | 58221bca169c95c300ecc5526aa00590f0ce5c2f | [
"BSD-3-Clause-Open-MPI"
] | 9 | 2017-04-25T20:45:11.000Z | 2017-07-20T14:58:03.000Z | contrib/database/schema_migration/versions/3f96c40b580a_created_event_schema.py | ctrlsys/sensys | 58221bca169c95c300ecc5526aa00590f0ce5c2f | [
"BSD-3-Clause-Open-MPI"
] | 5 | 2016-10-11T03:28:26.000Z | 2019-07-31T00:36:02.000Z | #
# Copyright (c) 2015 Intel Corporation. All rights reserved
#
"""Created 'event', 'event_data', and 'event_data_key' tables to store data from the EvGen framework.
Revision ID: 3f96c40b580a
Revises: 1d10ef20817b
Create Date: 2015-11-19 22:26:55.393954
"""
# revision identifiers, used by Alembic.
import textwrap
revision = '3f96c40b580a'
down_revision = '1d10ef20817b'
branch_labels = None
from alembic import op
import sqlalchemy as sa
def _postgresql_upgrade_ddl():
"""Create the two event related functions: add_event() and add_event_data()
"""
op.execute(textwrap.dedent("""
CREATE FUNCTION add_event(
p_time_stamp TIMESTAMP WITHOUT TIME ZONE,
p_severity CHARACTER VARYING,
p_type CHARACTER VARYING,
p_version CHARACTER VARYING,
p_vendor CHARACTER VARYING,
p_description CHARACTER VARYING)
RETURNS INTEGER AS
$BODY$
DECLARE
v_event_id INTEGER := 0;
BEGIN
INSERT INTO event(
time_stamp,
severity,
type,
version,
vendor,
description)
VALUES(
p_time_stamp,
p_severity,
p_type,
p_version,
p_vendor,
p_description)
RETURNING event_id INTO v_event_id;
RETURN v_event_id;
END
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;"""))
# Adding a new column as primary key. Alembic doesn't handle adding new
# column to existing table as autoincrement.
op.execute("ALTER TABLE data_sample_raw ADD COLUMN data_sample_id SERIAL;")
# Update all existing records to have default values for the new columns
op.execute(textwrap.dedent("""
UPDATE data_sample_raw
SET data_sample_id = DEFAULT,
app_value_type_id = data_type_id;"""))
op.execute(textwrap.dedent("""
CREATE FUNCTION add_event_data(
p_event_id INTEGER,
p_key_name CHARACTER VARYING,
p_app_value_type_id INTEGER,
p_value_int BIGINT,
p_value_real DOUBLE PRECISION,
p_value_str CHARACTER VARYING,
p_units CHARACTER VARYING)
RETURNS VOID AS
$BODY$
DECLARE
v_event_data_key_id INTEGER := 0;
BEGIN
SELECT event_data_key_id INTO v_event_data_key_id
FROM event_data_key
WHERE name = p_key_name;
IF (v_event_data_key_id IS NULL) THEN
INSERT INTO event_data_key (name, app_value_type_id)
VALUES (p_key_name, p_app_value_type_id)
RETURNING event_data_key_id INTO v_event_data_key_id;
END IF;
INSERT INTO event_data(
event_id,
event_data_key_id,
value_int,
value_real,
value_str,
units)
VALUES(
p_event_id,
v_event_data_key_id,
p_value_int,
p_value_real,
p_value_str,
p_units);
RETURN;
END
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;"""))
def upgrade():
"""Replace the 'event' table with the new one, create 'event_data' and
'event_data_key' tables. Also create function related to these tables.
"""
op.drop_table('event')
op.drop_table('event_type')
op.create_table('event',
sa.Column('event_id', sa.Integer(), nullable=False,
autoincrement=True),
sa.Column('time_stamp', sa.DateTime(), nullable=False,
server_default=sa.func.now()),
sa.Column('severity', sa.String(length=50), nullable=False,
server_default=''),
sa.Column('type', sa.String(length=50), nullable=False,
server_default=''),
sa.Column('vendor', sa.String(length=250), nullable=False,
server_default=''),
sa.Column('version', sa.String(length=250), nullable=False,
server_default=''),
sa.Column('description', sa.Text(), nullable=False,
server_default=''),
sa.PrimaryKeyConstraint('event_id', name=op.f('pk_event')))
op.create_table('event_data_key',
sa.Column('event_data_key_id', sa.Integer(),
nullable=False, autoincrement=True),
sa.Column('name', sa.String(length=250), nullable=False),
sa.Column('app_value_type_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('event_data_key_id',
name=op.f('pk_event_data_key')),
sa.UniqueConstraint('name',
name=op.f('uq_event_data_key_name')))
op.create_table('event_data',
sa.Column('event_data_id', sa.Integer(), nullable=False,
autoincrement=True),
sa.Column('event_id', sa.Integer(), nullable=False),
sa.Column('event_data_key_id', sa.Integer(),
nullable=False),
sa.Column('value_int', sa.BigInteger()),
sa.Column('value_real', sa.Float(precision=53)),
sa.Column('value_str', sa.String(length=500)),
sa.Column('units', sa.String(length=50), nullable=False,
server_default=''),
sa.ForeignKeyConstraint(['event_data_key_id'], [
'event_data_key.event_data_key_id'], name=op.f(
'fk_event_data_event_data_key_id_event_data_key')),
sa.ForeignKeyConstraint(['event_id'], ['event.event_id'],
name=op.f(
'fk_event_data_event_id_event')),
sa.PrimaryKeyConstraint('event_data_id',
name=op.f('pk_event_data')),
sa.CheckConstraint('(value_int IS NOT NULL '
' OR value_real IS NOT NULL '
' OR value_str IS NOT NULL)',
name=op.f(
'ck_event_data_at_least_one_value')))
op.add_column('data_sample_raw',
sa.Column('app_value_type_id', sa.Integer(), nullable=True))
op.add_column('data_sample_raw', sa.Column('event_id', sa.Integer()))
db_dialect = op.get_context().dialect
if 'postgresql' in db_dialect.name:
_postgresql_upgrade_ddl()
else:
print("Views were not created. "
"'%s' is not a supported database dialect." % db_dialect.name)
return
op.alter_column('data_sample_raw',
sa.Column('data_sample_id', sa.Integer(),
autoincrement=True, nullable=False))
op.create_primary_key(op.f('pk_data_sample_raw'), 'data_sample_raw',
['data_sample_id'])
op.create_foreign_key(op.f('fk_data_sample_raw_event_id_event'),
'data_sample_raw', 'event', ['event_id'],
['event_id'])
def _postgresql_downgrade_ddl():
"""Drop the two event related functions: add_event() and add_event_data()
"""
op.execute(textwrap.dedent("""
DROP FUNCTION add_event(
TIMESTAMP WITHOUT TIME ZONE,
CHARACTER VARYING,
CHARACTER VARYING,
CHARACTER VARYING,
CHARACTER VARYING,
CHARACTER VARYING);"""))
op.execute(textwrap.dedent("""
DROP FUNCTION add_event_data(
INTEGER,
CHARACTER VARYING,
INTEGER,
BIGINT,
DOUBLE
PRECISION,
CHARACTER VARYING,
CHARACTER VARYING);"""))
def downgrade():
"""Restore the 'event' and 'event_type' tables. Drop the 'event_data'
and 'event_data_key' tables and the event related functions
"""
op.drop_constraint(op.f('fk_data_sample_raw_event_id_event'),
'data_sample_raw', type_='foreignkey')
op.drop_constraint(op.f('pk_data_sample_raw'), 'data_sample_raw',
type_='primary')
db_dialect = op.get_context().dialect
if 'postgresql' in db_dialect.name:
_postgresql_downgrade_ddl()
else:
print("Views were not dropped. "
"'%s' is not a supported database dialect." % db_dialect.name)
return
op.drop_column('data_sample_raw', 'app_value_type_id')
op.drop_column('data_sample_raw', 'event_id')
op.drop_column('data_sample_raw', 'data_sample_id')
op.drop_table('event_data')
op.drop_table('event_data_key')
op.drop_table('event')
op.create_table('event_type',
sa.Column('event_type_id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.VARCHAR(length=250),
autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('event_type_id',
name=op.f('pk_event_type')))
op.create_table('event',
sa.Column('node_id', sa.Integer(), nullable=False),
sa.Column('event_type_id', sa.Integer(), nullable=False),
sa.Column('time_stamp', sa.DateTime(), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['event_type_id'],
['event_type.event_type_id'],
name=op.f(
'fk_event_event_type_id_event_type')),
sa.ForeignKeyConstraint(['node_id'], ['node.node_id'],
name=op.f('fk_event_node_id_node')),
sa.PrimaryKeyConstraint('node_id', 'event_type_id',
'time_stamp',
name=op.f('pk_event')))
| 41.169231 | 101 | 0.526999 |
import textwrap
revision = '3f96c40b580a'
down_revision = '1d10ef20817b'
branch_labels = None
from alembic import op
import sqlalchemy as sa
def _postgresql_upgrade_ddl():
op.execute(textwrap.dedent("""
CREATE FUNCTION add_event(
p_time_stamp TIMESTAMP WITHOUT TIME ZONE,
p_severity CHARACTER VARYING,
p_type CHARACTER VARYING,
p_version CHARACTER VARYING,
p_vendor CHARACTER VARYING,
p_description CHARACTER VARYING)
RETURNS INTEGER AS
$BODY$
DECLARE
v_event_id INTEGER := 0;
BEGIN
INSERT INTO event(
time_stamp,
severity,
type,
version,
vendor,
description)
VALUES(
p_time_stamp,
p_severity,
p_type,
p_version,
p_vendor,
p_description)
RETURNING event_id INTO v_event_id;
RETURN v_event_id;
END
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;"""))
# column to existing table as autoincrement.
op.execute("ALTER TABLE data_sample_raw ADD COLUMN data_sample_id SERIAL;")
# Update all existing records to have default values for the new columns
op.execute(textwrap.dedent("""
UPDATE data_sample_raw
SET data_sample_id = DEFAULT,
app_value_type_id = data_type_id;"""))
op.execute(textwrap.dedent("""
CREATE FUNCTION add_event_data(
p_event_id INTEGER,
p_key_name CHARACTER VARYING,
p_app_value_type_id INTEGER,
p_value_int BIGINT,
p_value_real DOUBLE PRECISION,
p_value_str CHARACTER VARYING,
p_units CHARACTER VARYING)
RETURNS VOID AS
$BODY$
DECLARE
v_event_data_key_id INTEGER := 0;
BEGIN
SELECT event_data_key_id INTO v_event_data_key_id
FROM event_data_key
WHERE name = p_key_name;
IF (v_event_data_key_id IS NULL) THEN
INSERT INTO event_data_key (name, app_value_type_id)
VALUES (p_key_name, p_app_value_type_id)
RETURNING event_data_key_id INTO v_event_data_key_id;
END IF;
INSERT INTO event_data(
event_id,
event_data_key_id,
value_int,
value_real,
value_str,
units)
VALUES(
p_event_id,
v_event_data_key_id,
p_value_int,
p_value_real,
p_value_str,
p_units);
RETURN;
END
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;"""))
def upgrade():
op.drop_table('event')
op.drop_table('event_type')
op.create_table('event',
sa.Column('event_id', sa.Integer(), nullable=False,
autoincrement=True),
sa.Column('time_stamp', sa.DateTime(), nullable=False,
server_default=sa.func.now()),
sa.Column('severity', sa.String(length=50), nullable=False,
server_default=''),
sa.Column('type', sa.String(length=50), nullable=False,
server_default=''),
sa.Column('vendor', sa.String(length=250), nullable=False,
server_default=''),
sa.Column('version', sa.String(length=250), nullable=False,
server_default=''),
sa.Column('description', sa.Text(), nullable=False,
server_default=''),
sa.PrimaryKeyConstraint('event_id', name=op.f('pk_event')))
op.create_table('event_data_key',
sa.Column('event_data_key_id', sa.Integer(),
nullable=False, autoincrement=True),
sa.Column('name', sa.String(length=250), nullable=False),
sa.Column('app_value_type_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('event_data_key_id',
name=op.f('pk_event_data_key')),
sa.UniqueConstraint('name',
name=op.f('uq_event_data_key_name')))
op.create_table('event_data',
sa.Column('event_data_id', sa.Integer(), nullable=False,
autoincrement=True),
sa.Column('event_id', sa.Integer(), nullable=False),
sa.Column('event_data_key_id', sa.Integer(),
nullable=False),
sa.Column('value_int', sa.BigInteger()),
sa.Column('value_real', sa.Float(precision=53)),
sa.Column('value_str', sa.String(length=500)),
sa.Column('units', sa.String(length=50), nullable=False,
server_default=''),
sa.ForeignKeyConstraint(['event_data_key_id'], [
'event_data_key.event_data_key_id'], name=op.f(
'fk_event_data_event_data_key_id_event_data_key')),
sa.ForeignKeyConstraint(['event_id'], ['event.event_id'],
name=op.f(
'fk_event_data_event_id_event')),
sa.PrimaryKeyConstraint('event_data_id',
name=op.f('pk_event_data')),
sa.CheckConstraint('(value_int IS NOT NULL '
' OR value_real IS NOT NULL '
' OR value_str IS NOT NULL)',
name=op.f(
'ck_event_data_at_least_one_value')))
op.add_column('data_sample_raw',
sa.Column('app_value_type_id', sa.Integer(), nullable=True))
op.add_column('data_sample_raw', sa.Column('event_id', sa.Integer()))
db_dialect = op.get_context().dialect
if 'postgresql' in db_dialect.name:
_postgresql_upgrade_ddl()
else:
print("Views were not created. "
"'%s' is not a supported database dialect." % db_dialect.name)
return
op.alter_column('data_sample_raw',
sa.Column('data_sample_id', sa.Integer(),
autoincrement=True, nullable=False))
op.create_primary_key(op.f('pk_data_sample_raw'), 'data_sample_raw',
['data_sample_id'])
op.create_foreign_key(op.f('fk_data_sample_raw_event_id_event'),
'data_sample_raw', 'event', ['event_id'],
['event_id'])
def _postgresql_downgrade_ddl():
op.execute(textwrap.dedent("""
DROP FUNCTION add_event(
TIMESTAMP WITHOUT TIME ZONE,
CHARACTER VARYING,
CHARACTER VARYING,
CHARACTER VARYING,
CHARACTER VARYING,
CHARACTER VARYING);"""))
op.execute(textwrap.dedent("""
DROP FUNCTION add_event_data(
INTEGER,
CHARACTER VARYING,
INTEGER,
BIGINT,
DOUBLE
PRECISION,
CHARACTER VARYING,
CHARACTER VARYING);"""))
def downgrade():
op.drop_constraint(op.f('fk_data_sample_raw_event_id_event'),
'data_sample_raw', type_='foreignkey')
op.drop_constraint(op.f('pk_data_sample_raw'), 'data_sample_raw',
type_='primary')
db_dialect = op.get_context().dialect
if 'postgresql' in db_dialect.name:
_postgresql_downgrade_ddl()
else:
print("Views were not dropped. "
"'%s' is not a supported database dialect." % db_dialect.name)
return
op.drop_column('data_sample_raw', 'app_value_type_id')
op.drop_column('data_sample_raw', 'event_id')
op.drop_column('data_sample_raw', 'data_sample_id')
op.drop_table('event_data')
op.drop_table('event_data_key')
op.drop_table('event')
op.create_table('event_type',
sa.Column('event_type_id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.VARCHAR(length=250),
autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('event_type_id',
name=op.f('pk_event_type')))
op.create_table('event',
sa.Column('node_id', sa.Integer(), nullable=False),
sa.Column('event_type_id', sa.Integer(), nullable=False),
sa.Column('time_stamp', sa.DateTime(), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['event_type_id'],
['event_type.event_type_id'],
name=op.f(
'fk_event_event_type_id_event_type')),
sa.ForeignKeyConstraint(['node_id'], ['node.node_id'],
name=op.f('fk_event_node_id_node')),
sa.PrimaryKeyConstraint('node_id', 'event_type_id',
'time_stamp',
name=op.f('pk_event')))
| true | true |
1c2c36a370c83b79c722649dc78b5c5da8e1ef45 | 8,240 | py | Python | fairseq/models/__init__.py | tran-khoa/fairseq | 558366b3c6970a5dd85ad1909581d43e41fdce9f | [
"MIT"
] | null | null | null | fairseq/models/__init__.py | tran-khoa/fairseq | 558366b3c6970a5dd85ad1909581d43e41fdce9f | [
"MIT"
] | null | null | null | fairseq/models/__init__.py | tran-khoa/fairseq | 558366b3c6970a5dd85ad1909581d43e41fdce9f | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import argparse
import importlib
import os
from contextlib import ExitStack
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent
from hydra.core.config_store import ConfigStore
from omegaconf import open_dict, OmegaConf
from .composite_encoder import CompositeEncoder
from .distributed_fairseq_model import DistributedFairseqModel
from .fairseq_decoder import FairseqDecoder
from .fairseq_encoder import FairseqEncoder
from .fairseq_incremental_decoder import FairseqIncrementalDecoder
from .fairseq_model import (
BaseFairseqModel,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqLanguageModel,
FairseqModel,
FairseqMultiModel,
)
MODEL_REGISTRY = {}
MODEL_DATACLASS_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_MODEL_NAME_REGISTRY = {}
ARCH_MODEL_INV_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
__all__ = [
"BaseFairseqModel",
"CompositeEncoder",
"DistributedFairseqModel",
"FairseqDecoder",
"FairseqEncoder",
"FairseqEncoderDecoderModel",
"FairseqEncoderModel",
"FairseqIncrementalDecoder",
"FairseqLanguageModel",
"FairseqModel",
"FairseqMultiModel",
]
def build_model(cfg: FairseqDataclass, task):
model = None
model_type = getattr(cfg, "_name", None) or getattr(cfg, "arch", None)
if not model_type and len(cfg) == 1:
# this is hit if config object is nested in directory that is named after model type
model_type = next(iter(cfg))
if model_type in MODEL_DATACLASS_REGISTRY:
cfg = cfg[model_type]
else:
raise Exception(
"Could not infer model type from directory. Please add _name field to indicate model type. "
"Available models: "
+ str(MODEL_DATACLASS_REGISTRY.keys())
+ " Requested model type: "
+ model_type
)
if model_type in ARCH_MODEL_REGISTRY:
# case 1: legacy models
model = ARCH_MODEL_REGISTRY[model_type]
elif model_type in MODEL_DATACLASS_REGISTRY:
# case 2: config-driven models
model = MODEL_REGISTRY[model_type]
if model_type in MODEL_DATACLASS_REGISTRY:
# set defaults from dataclass. note that arch name and model name can be the same
dc = MODEL_DATACLASS_REGISTRY[model_type]
if isinstance(cfg, argparse.Namespace):
cfg = dc.from_namespace(cfg)
else:
cfg = merge_with_parent(dc(), cfg)
else:
if model_type in ARCH_CONFIG_REGISTRY:
with open_dict(cfg) if OmegaConf.is_config(cfg) else ExitStack():
# this calls the different "arch" functions (like base_architecture()) that you indicate
# if you specify --arch on the command line. this is only applicable to the old argparse based models
# hydra models should expose different architectures via different config files
# it will modify the cfg object and default parameters according to the arch
ARCH_CONFIG_REGISTRY[model_type](cfg)
assert model is not None, (
f"Could not infer model type from {cfg}. "
"Available models: {}".format(MODEL_DATACLASS_REGISTRY.keys())
+ f" Requested model type: {model_type}"
)
return model.build_model(cfg, task)
def register_model(name, dataclass=None):
"""
New model types can be added to fairseq with the :func:`register_model`
function decorator.
For example::
@register_model('lstm')
class LSTM(FairseqEncoderDecoderModel):
(...)
.. note:: All models must implement the :class:`BaseFairseqModel` interface.
Typically you will extend :class:`FairseqEncoderDecoderModel` for
sequence-to-sequence tasks or :class:`FairseqLanguageModel` for
language modeling tasks.
Args:
name (str): the name of the model
"""
def register_model_cls(cls):
if name in MODEL_REGISTRY:
raise ValueError("Cannot register duplicate model ({})".format(name))
if not issubclass(cls, BaseFairseqModel):
raise ValueError(
"Model ({}: {}) must extend BaseFairseqModel".format(name, cls.__name__)
)
MODEL_REGISTRY[name] = cls
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
MODEL_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="model", node=node, provider="fairseq")
@register_model_architecture(name, name)
def noop(_):
pass
return cls
return register_model_cls
def register_model_architecture(model_name, arch_name):
"""
New model architectures can be added to fairseq with the
:func:`register_model_architecture` function decorator. After registration,
model architectures can be selected with the ``--arch`` command-line
argument.
For example::
@register_model_architecture('lstm', 'lstm_luong_wmt_en_de')
def lstm_luong_wmt_en_de(cfg):
args.encoder_embed_dim = getattr(cfg.model, 'encoder_embed_dim', 1000)
(...)
The decorated function should take a single argument *cfg*, which is a
:class:`omegaconf.DictConfig`. The decorated function should modify these
arguments in-place to match the desired architecture.
Args:
model_name (str): the name of the Model (Model must already be
registered)
arch_name (str): the name of the model architecture (``--arch``)
"""
def register_model_arch_fn(fn):
if model_name not in MODEL_REGISTRY:
raise ValueError(
"Cannot register model architecture for unknown model type ({})".format(
model_name
)
)
if arch_name in ARCH_MODEL_REGISTRY:
raise ValueError(
"Cannot register duplicate model architecture ({})".format(arch_name)
)
if not callable(fn):
raise ValueError(
"Model architecture must be callable ({})".format(arch_name)
)
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_MODEL_NAME_REGISTRY[arch_name] = model_name
ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name)
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn
def import_models(models_dir, namespace):
importlib.invalidate_caches()
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group("Named architectures")
group_archs.add_argument(
"--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name]
)
group_args = parser.add_argument_group(
"Additional command-line arguments"
)
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + "_parser"] = parser
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
import_models(models_dir, "fairseq.models")
| 34.915254 | 117 | 0.653641 |
import argparse
import importlib
import os
from contextlib import ExitStack
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent
from hydra.core.config_store import ConfigStore
from omegaconf import open_dict, OmegaConf
from .composite_encoder import CompositeEncoder
from .distributed_fairseq_model import DistributedFairseqModel
from .fairseq_decoder import FairseqDecoder
from .fairseq_encoder import FairseqEncoder
from .fairseq_incremental_decoder import FairseqIncrementalDecoder
from .fairseq_model import (
BaseFairseqModel,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqLanguageModel,
FairseqModel,
FairseqMultiModel,
)
MODEL_REGISTRY = {}
MODEL_DATACLASS_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_MODEL_NAME_REGISTRY = {}
ARCH_MODEL_INV_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
__all__ = [
"BaseFairseqModel",
"CompositeEncoder",
"DistributedFairseqModel",
"FairseqDecoder",
"FairseqEncoder",
"FairseqEncoderDecoderModel",
"FairseqEncoderModel",
"FairseqIncrementalDecoder",
"FairseqLanguageModel",
"FairseqModel",
"FairseqMultiModel",
]
def build_model(cfg: FairseqDataclass, task):
model = None
model_type = getattr(cfg, "_name", None) or getattr(cfg, "arch", None)
if not model_type and len(cfg) == 1:
model_type = next(iter(cfg))
if model_type in MODEL_DATACLASS_REGISTRY:
cfg = cfg[model_type]
else:
raise Exception(
"Could not infer model type from directory. Please add _name field to indicate model type. "
"Available models: "
+ str(MODEL_DATACLASS_REGISTRY.keys())
+ " Requested model type: "
+ model_type
)
if model_type in ARCH_MODEL_REGISTRY:
model = ARCH_MODEL_REGISTRY[model_type]
elif model_type in MODEL_DATACLASS_REGISTRY:
model = MODEL_REGISTRY[model_type]
if model_type in MODEL_DATACLASS_REGISTRY:
dc = MODEL_DATACLASS_REGISTRY[model_type]
if isinstance(cfg, argparse.Namespace):
cfg = dc.from_namespace(cfg)
else:
cfg = merge_with_parent(dc(), cfg)
else:
if model_type in ARCH_CONFIG_REGISTRY:
with open_dict(cfg) if OmegaConf.is_config(cfg) else ExitStack():
ARCH_CONFIG_REGISTRY[model_type](cfg)
assert model is not None, (
f"Could not infer model type from {cfg}. "
"Available models: {}".format(MODEL_DATACLASS_REGISTRY.keys())
+ f" Requested model type: {model_type}"
)
return model.build_model(cfg, task)
def register_model(name, dataclass=None):
def register_model_cls(cls):
if name in MODEL_REGISTRY:
raise ValueError("Cannot register duplicate model ({})".format(name))
if not issubclass(cls, BaseFairseqModel):
raise ValueError(
"Model ({}: {}) must extend BaseFairseqModel".format(name, cls.__name__)
)
MODEL_REGISTRY[name] = cls
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
MODEL_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="model", node=node, provider="fairseq")
@register_model_architecture(name, name)
def noop(_):
pass
return cls
return register_model_cls
def register_model_architecture(model_name, arch_name):
def register_model_arch_fn(fn):
if model_name not in MODEL_REGISTRY:
raise ValueError(
"Cannot register model architecture for unknown model type ({})".format(
model_name
)
)
if arch_name in ARCH_MODEL_REGISTRY:
raise ValueError(
"Cannot register duplicate model architecture ({})".format(arch_name)
)
if not callable(fn):
raise ValueError(
"Model architecture must be callable ({})".format(arch_name)
)
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_MODEL_NAME_REGISTRY[arch_name] = model_name
ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name)
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn
def import_models(models_dir, namespace):
importlib.invalidate_caches()
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + model_name)
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group("Named architectures")
group_archs.add_argument(
"--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name]
)
group_args = parser.add_argument_group(
"Additional command-line arguments"
)
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + "_parser"] = parser
models_dir = os.path.dirname(__file__)
import_models(models_dir, "fairseq.models")
| true | true |
1c2c36bf2d6e93d5f3d560ba85669dcf9accf49b | 203 | py | Python | system/t04_mirror/__init__.py | kchida/aptly | 07165efc9d4bcd7018031787f27e70c2d8ecb8b9 | [
"MIT"
] | 16 | 2015-02-10T16:32:43.000Z | 2021-08-10T18:59:10.000Z | system/t04_mirror/__init__.py | kchida/aptly | 07165efc9d4bcd7018031787f27e70c2d8ecb8b9 | [
"MIT"
] | null | null | null | system/t04_mirror/__init__.py | kchida/aptly | 07165efc9d4bcd7018031787f27e70c2d8ecb8b9 | [
"MIT"
] | 8 | 2015-02-28T23:21:55.000Z | 2020-11-24T11:29:30.000Z | """
Testing mirror management
"""
from .create import *
from .show import *
from .list import *
from .update import *
from .drop import *
from .rename import *
from .edit import *
from .search import *
| 15.615385 | 25 | 0.704433 |
from .create import *
from .show import *
from .list import *
from .update import *
from .drop import *
from .rename import *
from .edit import *
from .search import *
| true | true |
1c2c380f19bf455f71912570bd1e4554ad7f517a | 18,542 | py | Python | dashboard/main.py | narek-davtyan/bigquery-bokeh-dashboard | e2f8ab7684ded7bad81ac8d7b5ffaa3eb954481f | [
"Apache-2.0"
] | null | null | null | dashboard/main.py | narek-davtyan/bigquery-bokeh-dashboard | e2f8ab7684ded7bad81ac8d7b5ffaa3eb954481f | [
"Apache-2.0"
] | null | null | null | dashboard/main.py | narek-davtyan/bigquery-bokeh-dashboard | e2f8ab7684ded7bad81ac8d7b5ffaa3eb954481f | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
# Import multiprocessing libraries
from pandarallel import pandarallel
# Initialization
pandarallel.initialize()
# Load data
min_list_of_columns_to_load = ['company', 'service', 'recommendation', 'easiness', 'rec_sc', 'eas_sc']
df_orig = pd.read_excel(r'CDD1.xlsx', names=min_list_of_columns_to_load)#.astype({'country':'category', 'company':'int16', 'service':'category', 'recommendation':'int8', 'question_one':'string', 'easiness':'int8', 'question_two':'string'})
# Initial data transformation
df_orig['service'] = df_orig['service'].parallel_map(str)
# Create dictionary of all plots, filter lock, filters, data sources
general_dict = {}
def calculate_barycenter(df_temp, country_list):
# Create visual data points
df_tempo = df_temp[['recommendation', 'easiness', 'company']].groupby(['recommendation', 'easiness'],as_index=False).count().rename(columns={'company' : 'sum'}).astype({'sum': 'float32'})
df_tempy = pd.merge(df_temp, df_tempo, how='left', on=['recommendation', 'easiness'])
# Calculate size of circles
df_tempy.loc[~df_tempy['service'].isin(country_list), 'sum'] = 0.0
df_tempy.loc[df_tempy['sum'] > 25.0, 'sum'] = 25.0
df_tempy.eval('visual_sum = sum * 2.95', inplace=True)
# Create visual barycenter with edges
if len(df_temp) == 0 or len(country_list) == 0:
barycenter = np.array([0.0, 0.0])
else:
barycenter = df_temp[['recommendation', 'easiness']].astype({'recommendation':'float32', 'easiness':'float32'}).mean().to_numpy()
# Create barycenter dataframe
bary_numpy = df_temp[['recommendation', 'easiness']].astype({'recommendation':'float32', 'easiness':'float32'}).to_numpy()
row_bary = [barycenter[0], barycenter[1]]
row_empty = np.empty((1,bary_numpy.shape[1]))
row_empty.fill(np.nan)
bary_numpy = np.insert(bary_numpy, range(1, len(bary_numpy)+1, 1), row_bary, axis=0)
bary_numpy = np.insert(bary_numpy, range(2, len(bary_numpy), 2), row_empty, axis=0)
bary_data = pd.DataFrame(bary_numpy, columns=['recommendation', 'easiness'])
return df_tempy, barycenter, bary_data
# Unset initial filter lock
general_dict['filter_called'] = False
# Set initial filters to all
general_dict['filter_list'] = df_orig.service.unique()
general_dict['full_filter_list'] = df_orig.service.unique()
# Calculating filtered dataframe
filtered_df = df_orig.loc[df_orig['service'].isin(general_dict['filter_list'])]
# Calculating new data points, barycenter and its edges
df_points, barycenter, df_bary = calculate_barycenter(filtered_df[min_list_of_columns_to_load], general_dict['filter_list'])
###################################################################################
###################################################################################
from bokeh.models import ColumnDataSource, Callback, Toggle, BoxAnnotation, LabelSet, Label, HoverTool, DataTable, TableColumn, Image, TapTool, Tap, HBar, Plot
from bokeh.plotting import figure, curdoc
from bokeh.layouts import column, row, Spacer
###################################################################################
############################## Visual 3 - Data Table ##############################
# Create data table structure
data_columns = [
TableColumn(field="company", title="Company"),
TableColumn(field="service", title="Service"),
]
data_source = ColumnDataSource(pd.DataFrame(columns=['service', 'company']))
data_table = DataTable(source=data_source, columns=data_columns, width=400, height=550)
###################################################################################
###################################################################################
###################################################################################
############################## Visual 1 - Points Plot #############################
#---------------------------------------------------------------------------------#
#------------------------------- Static Background -------------------------------#
# Create points plot
general_dict['points_plot'] = figure(x_range=(0, 10), y_range=(0, 10), plot_width=600, plot_height=600, match_aspect=True, tools=['tap'])
# Hide real axis
general_dict['points_plot'].axis.visible = False
# Hide real grid
general_dict['points_plot'].xgrid.grid_line_color = None
general_dict['points_plot'].ygrid.grid_line_color = None
# Define grid lines
general_dict['points_plot'].xaxis.ticker = list(range(11))
general_dict['points_plot'].yaxis.ticker = list(range(11))
# Create color zones
general_dict['points_plot'].circle(x=7.0, y=7.0, radius=1, fill_alpha=1, fill_color='#fbe5d6', radius_units='data', line_color=None, level='underlay')
ba1 = BoxAnnotation(bottom=7, top=10, left=0, right=7, fill_alpha=1, fill_color='#fbe5d6', level='underlay')
ba2 = BoxAnnotation(bottom=0, top=7, left=7, right=10, fill_alpha=1, fill_color='#fbe5d6', level='underlay')
ba3 = BoxAnnotation(bottom=0, top=7, left=0, right=7, fill_alpha=0.3, fill_color='#bf0603', level='underlay')
ba4 = BoxAnnotation(bottom=7, top=10, left=7, right=10, fill_alpha=0.3, fill_color='#538d22', level='underlay')
general_dict['points_plot'].add_layout(ba1)
general_dict['points_plot'].add_layout(ba2)
general_dict['points_plot'].add_layout(ba3)
general_dict['points_plot'].add_layout(ba4)
# Create fake axis lines with ticks and labels
general_dict['points_plot'].line(x=[0, 10], y=[7, 7], line_color='skyblue', level='underlay')
general_dict['points_plot'].line(x=[7, 7], y=[0, 10], line_color='forestgreen', level='underlay')
general_dict['points_plot'].segment(x0=list(range(11)), y0=list(np.array(range(7,8))-0.1)*11,
x1=list(range(11)), y1=list(np.array(range(7,8))+0.1)*11,
color='skyblue', line_width=2, level='underlay')
general_dict['points_plot'].segment(x0=list(np.array(range(7,8))-0.1)*11, y0=list(range(11)),
x1=list(np.array(range(7,8))+0.1)*11, y1=list(range(11)),
color='forestgreen', line_width=1, level='underlay')
source = ColumnDataSource(data=dict(height=list(range(11)),
weight=list(np.array(range(7,8)))*11,
names=list(range(11))))
labels = LabelSet(x='weight', y='height', text='names', level='glyph',
x_offset=8, y_offset=2, source=source, render_mode='canvas')
general_dict['points_plot'].add_layout(labels)
labels = LabelSet(x='height', y='weight', text='names', level='glyph',
x_offset=5, y_offset=-20, source=source, render_mode='canvas')
general_dict['points_plot'].add_layout(labels)
# Create quadrant labels
citation = Label(x=8, y=8, text='Love', render_mode='css')
general_dict['points_plot'].add_layout(citation)
citation = Label(x=3, y=8, text='Frustration', render_mode='css')
general_dict['points_plot'].add_layout(citation)
citation = Label(x=3, y=3, text='Repulsion', render_mode='css')
general_dict['points_plot'].add_layout(citation)
citation = Label(x=8, y=3, text='Frustration', render_mode='css')
general_dict['points_plot'].add_layout(citation)
#----------------------------- ^ Static Background ^ -----------------------------#
#---------------------------------------------------------------------------------#
#---------------------------------------------------------------------------------#
#------------------------------ Ineractive Triggers ------------------------------#
# Filter countries on button click
def callback_h(selected_state):
# Ignore any individual beahaviour of buttons after 'Select All/None' was triggered
if general_dict['filter_called']:
return None
# Get selected filters from toggle buttons
selected_country_list = []
if filter_button1.active:
general_dict['filter_called'] = True
for button in buttons:
button.active = False
general_dict['filter_called'] = False
filter_button1.active = False
elif filter_button3.active:
general_dict['filter_called'] = True
for button in buttons:
button.active = True
selected_country_list.append(button.name)
general_dict['filter_called'] = False
filter_button3.active = False
if len(selected_country_list) == len(general_dict['full_filter_list']):
return None
else:
for button in buttons:
if button.active:
selected_country_list.append(button.name)
# Setting new filters
general_dict['filter_list'] = selected_country_list
# Calculating new filtered dataframe
filtered_df = df_orig.loc[df_orig['service'].isin(general_dict['filter_list'])]
# Calculating new data points, barycenter and its edges
df_points, barycenter, df_bary = calculate_barycenter(filtered_df[min_list_of_columns_to_load], general_dict['filter_list'])
# Create data source for points plot
general_dict['data_points'] = ColumnDataSource(df_points)
# Attach circle tap callback to new circles
general_dict['data_points'].selected.on_change('indices', callback)
# Remove old data points
general_dict['points_plot'].renderers.remove(general_dict['points_plot'].select(name='data_points')[0])
# Plot new data points
general_dict['points_plot'].circle('recommendation', 'easiness', name='data_points', size='visual_sum', source=general_dict['data_points'], selection_fill_alpha=0.2, selection_color="firebrick", line_width=1, nonselection_line_color="firebrick")
# Remove old barycenter and connecting edges
if len(general_dict['points_plot'].select(name='bary')) > 0 and len(general_dict['points_plot'].select(name='barypoint')) > 0:
general_dict['points_plot'].renderers.remove(general_dict['points_plot'].select(name='bary')[0])
general_dict['points_plot'].renderers.remove(general_dict['points_plot'].select(name='barypoint')[0])
# Plot new barycenter and connecting edges
general_dict['points_plot'].line(x='recommendation', y='easiness', source=ColumnDataSource(df_bary), name='bary', line_width=1, level='overlay', color='#2a679d')
general_dict['points_plot'].circle(x=barycenter[0], y=barycenter[1], color='firebrick', size=barycenter[0]+barycenter[1]+1, name='barypoint', level='overlay')
# Calculate new scores
df_emotions = filtered_df[['rec_sc','eas_sc']]
if len(df_emotions) > 0:
rec_score = df_emotions['rec_sc'].mean() * 100
easy_score = df_emotions['eas_sc'].mean() * 100
else:
rec_score = 0.0
easy_score = 0.0
# Update scores
general_dict['emotions_rec_score'].patch({ 'right' : [(0,rec_score)], 'left' : [(0,rec_score)] })
general_dict['emotions_easy_score'].patch({ 'right' : [(0,easy_score)], 'left' : [(0,easy_score)] })
# Update data table on circle tap
def callback(attr, old, new):
recommendations, easinesses = ([],[])
inds = general_dict['data_points'].selected.indices
if (len(inds) == 0):
pass
for i in range(0, len(inds)):
recommendations.append(general_dict['data_points'].data['recommendation'][inds[i]])
easinesses.append(general_dict['data_points'].data['easiness'][inds[i]])
current = df_points.loc[(df_points['recommendation'].isin(recommendations)) & (df_points['easiness'].isin(easinesses)) & (df_points['service'].isin(general_dict['filter_list']))]
data_source.data = {
'service' : current.service,
'company' : current.company,
}
#---------------------------- ^ Ineractive Triggers ^ ----------------------------#
#---------------------------------------------------------------------------------#
# Create data source for points plot
general_dict['data_points'] = ColumnDataSource(df_points)
# Attach circle tap callback to circles
general_dict['data_points'].selected.on_change('indices', callback)
# Plot data circles
general_dict['points_plot'].circle('recommendation', 'easiness', name='data_points', size='visual_sum', source=general_dict['data_points'], selection_fill_alpha=0.2, selection_color="firebrick", line_width=1, nonselection_line_color="firebrick")
# general_dict['points_plot'].circle('recommendation', 'easiness', name='data_points', size='visual_sum', alpha=0.4, source=general_dict['data_points'], selection_color="firebrick", selection_alpha=0.4, tags=['country','service'], line_width=1, nonselection_fill_alpha=0.2, nonselection_fill_color="blue", nonselection_line_color="firebrick", nonselection_line_alpha=1.0)
# Plot barycenter and connecting edges
general_dict['bary_points'] = ColumnDataSource(df_bary)
general_dict['points_plot'].line(x='recommendation', y='easiness', source=general_dict['bary_points'], name='bary', line_width=1, level='overlay', color='#2a679d')
general_dict['points_plot'].circle(x=barycenter[0], y=barycenter[1], color='firebrick', size=barycenter[0]+barycenter[1], name='barypoint', level='overlay')
###################################################################################
###################################################################################
###################################################################################
############################ Visual 2 - Buttons Columns ###########################
buttons = []
for country in df_orig.service.unique():
# Plot buttons
button = Toggle(label=country, button_type="primary", name=country, width=290)
button.active = True
button.on_click(callback_h)
buttons.append(button)
filter_button1 = Toggle(label='Select None', button_type="default", name='filter1', width_policy='fixed', width=290)
filter_button3 = Toggle(label='Select All', button_type="default", name='filter3', width_policy='fixed', width=290)
filter_button1.active = False
filter_button3.active = False
filter_button1.on_click(callback_h)
filter_button3.on_click(callback_h)
###################################################################################
###################################################################################
###################################################################################
############################# Visual 6 - Emotions Plot ############################
df_emotions = filtered_df[['rec_sc','eas_sc']]
rec_score = df_emotions['rec_sc'].mean() * 100
easy_score = df_emotions['eas_sc'].mean() * 100
general_dict['emotions_rec_score'] = ColumnDataSource(dict(right=[rec_score], left=[rec_score],))
general_dict['emotions_easy_score'] = ColumnDataSource(dict(right=[easy_score], left=[easy_score],))
general_dict['emotions_plot'] = Plot(
title=None, plot_width=600, plot_height=180, align='center',
min_border=0, toolbar_location=None, outline_line_color=None, output_backend="webgl")
general_dict['emotions_plot'].add_glyph(HBar(y=0.4, right=0, left=-100, height=0.2, fill_color="#931a25", line_width=0))
general_dict['emotions_plot'].add_glyph(HBar(y=0.0, right=0, left=-100, height=0.2, fill_color="#931a25", line_width=0))
general_dict['emotions_plot'].add_glyph(HBar(y=0.4, right=30, left=0, height=0.2, fill_color="#ffc93c", line_width=0))
general_dict['emotions_plot'].add_glyph(HBar(y=0.0, right=30, left=0, height=0.2, fill_color="#ffc93c", line_width=0))
general_dict['emotions_plot'].add_glyph(HBar(y=0.4, right=70, left=30, height=0.2, fill_color="#b3de69", line_width=0))
general_dict['emotions_plot'].add_glyph(HBar(y=0.0, right=70, left=30, height=0.2, fill_color="#b3de69", line_width=0))
general_dict['emotions_plot'].add_glyph(HBar(y=0.4, right=100, left=70, height=0.2, fill_color="#158467", line_width=0))
general_dict['emotions_plot'].add_glyph(HBar(y=0.0, right=100, left=70, height=0.2, fill_color="#158467", line_width=0))
general_dict['emotions_plot'].add_glyph(general_dict['emotions_rec_score'], HBar(y=0.4, right='right', left='left', height=0.2, fill_color="#1a1c20", line_width=4), name='rec_s')
general_dict['emotions_plot'].add_glyph(general_dict['emotions_easy_score'], HBar(y=0.0, right='right', left='left', height=0.2, fill_color="#1a1c20", line_width=4), name='easy_s')
# Create labels
citation = Label(x=-24, y=0.55, text='Recommendation', render_mode='css', text_color="#4c4c4c", text_font_style='bold')
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=-12, y=0.16, text='Easiness', render_mode='css', text_color="#4c4c4c", text_font_style='bold')
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=-82, y=-0.2, text='NEEDS IMPROVEMENT', render_mode='css', text_color="#931a25")
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=7, y=-0.2, text='GOOD', render_mode='css', text_color="#ffc93c")
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=40, y=-0.2, text='GREAT', render_mode='css', text_color="#b3de69")
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=68, y=-0.2, text='EXCELLENT', render_mode='css', text_color="#158467")
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=-103, y=0.16, text='-100', render_mode='css', text_color="#4c4c4c")
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=93, y=0.16, text='100', render_mode='css', text_color="#4c4c4c")
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=1.5, y=0.35, text='0', render_mode='css', text_color="#f4f4f4")
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=31.5, y=0.35, text='30', render_mode='css', text_color="#f4f4f4")
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=71.5, y=0.35, text='70', render_mode='css', text_color="#f4f4f4")
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=1.5, y=-0.05, text='0', render_mode='css', text_color="#f4f4f4")
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=31.5, y=-0.05, text='30', render_mode='css', text_color="#f4f4f4")
general_dict['emotions_plot'].add_layout(citation)
citation = Label(x=71.5, y=-0.05, text='70', render_mode='css', text_color="#f4f4f4")
general_dict['emotions_plot'].add_layout(citation)
###################################################################################
###################################################################################
# Connect all plots into one object and set layout
curdoc().add_root(row(general_dict['points_plot'], column(row(column(filter_button1, filter_button3, column(buttons)), data_table), Spacer(height=50), general_dict['emotions_plot'])))
| 54.216374 | 371 | 0.643081 | import pandas as pd
import numpy as np
from pandarallel import pandarallel
pandarallel.initialize()
min_list_of_columns_to_load = ['company', 'service', 'recommendation', 'easiness', 'rec_sc', 'eas_sc']
df_orig = pd.read_excel(r'CDD1.xlsx', names=min_list_of_columns_to_load)
df_orig['service'] = df_orig['service'].parallel_map(str)
general_dict = {}
def calculate_barycenter(df_temp, country_list):
df_tempo = df_temp[['recommendation', 'easiness', 'company']].groupby(['recommendation', 'easiness'],as_index=False).count().rename(columns={'company' : 'sum'}).astype({'sum': 'float32'})
df_tempy = pd.merge(df_temp, df_tempo, how='left', on=['recommendation', 'easiness'])
df_tempy.loc[~df_tempy['service'].isin(country_list), 'sum'] = 0.0
df_tempy.loc[df_tempy['sum'] > 25.0, 'sum'] = 25.0
df_tempy.eval('visual_sum = sum * 2.95', inplace=True)
if len(df_temp) == 0 or len(country_list) == 0:
barycenter = np.array([0.0, 0.0])
else:
barycenter = df_temp[['recommendation', 'easiness']].astype({'recommendation':'float32', 'easiness':'float32'}).mean().to_numpy()
bary_numpy = df_temp[['recommendation', 'easiness']].astype({'recommendation':'float32', 'easiness':'float32'}).to_numpy()
row_bary = [barycenter[0], barycenter[1]]
row_empty = np.empty((1,bary_numpy.shape[1]))
row_empty.fill(np.nan)
bary_numpy = np.insert(bary_numpy, range(1, len(bary_numpy)+1, 1), row_bary, axis=0)
bary_numpy = np.insert(bary_numpy, range(2, len(bary_numpy), 2), row_empty, axis=0)
bary_data = pd.DataFrame(bary_numpy, columns=['recommendation', 'easiness'])
return df_tempy, barycenter, bary_data
general_dict['filter_called'] = False
general_dict['filter_list'] = df_orig.service.unique()
general_dict['full_filter_list'] = df_orig.service.unique()
filtered_df = df_orig.loc[df_orig['service'].isin(general_dict['filter_list'])]
df_points, barycenter, df_bary = calculate_barycenter(filtered_df[min_list_of_columns_to_load], general_dict['filter_list'])
| true | true |
1c2c380fe9cc858758b68712b42ad3237a84b6f0 | 3,650 | py | Python | V2RaycSpider1225/src/BusinessLogicLayer/utils/sckey_steward.py | game-loader/V2RayCloudSpider | 21d37e9ce3af3fb1c77ec5394449d8373cad1816 | [
"MIT"
] | null | null | null | V2RaycSpider1225/src/BusinessLogicLayer/utils/sckey_steward.py | game-loader/V2RayCloudSpider | 21d37e9ce3af3fb1c77ec5394449d8373cad1816 | [
"MIT"
] | null | null | null | V2RaycSpider1225/src/BusinessLogicLayer/utils/sckey_steward.py | game-loader/V2RayCloudSpider | 21d37e9ce3af3fb1c77ec5394449d8373cad1816 | [
"MIT"
] | null | null | null | __all__ = ['api']
import base64
import hashlib
import os
from Crypto.Cipher import AES
class _SecretSteward(object):
"""
> 接收用户输入
- 校验数据 if 正确 -> 缓存数据 data:服务器敏感数据
- else -> 丢弃
> 接收用户确认信息
- 是否存储 data if YES -> 启动密钥管理模块
- else -> 丢弃缓存
> 密钥管理模块
- 接收用户输入 pem-password -> md5(md5($salt,$password)) ->sckey of AES ECB
- 输入校验模块
> 输入校验模块
- if run in win -> use easygui
- else -> Terminal/shell
"""
def __init__(self, sckey: str):
self.base_pem = '_sckeyStream.v2raycs'
if sckey.__len__() > 32:
sckey = sckey[:32]
self.sckey = self._complete_stream(sckey=sckey)
@staticmethod
def _complete_stream(sckey: str):
sckey = bytes(sckey, encoding='utf-8')
while sckey.__len__() % 16 != 0:
sckey += b'\0'
return sckey
def encrypt(self, message: str):
return str(
base64.encodebytes(AES.new(key=self.sckey, mode=AES.MODE_ECB).encrypt(self._complete_stream(message))),
encoding='utf-8'.replace('\n', ''))
def decrypt(self, cipher_text: str):
return str(AES.new(key=self.sckey, mode=AES.MODE_ECB).decrypt(
base64.decodebytes(bytes(cipher_text, encoding='utf-8'))).rstrip(
b'\0').decode('utf-8'))
def capture_stream(self, stream, path: str = None, ):
if not stream:
stream = self.base_pem
with open(path, 'w') as f:
f.write(stream)
def _change_format_of_sckey(weak_password: str) -> str:
if not isinstance(weak_password, bytes):
weak_password = bytes(weak_password, 'utf-8')
m = hashlib.md5()
m.update(weak_password)
m.update(m.digest())
m.update(bytes(str(base64.encodebytes(m.digest()), encoding='utf-8').replace('\n', ''), 'utf-8'))
return m.hexdigest()
def api(mode: str, password: str, message: str = None, cache_path: str = None):
"""
mode : encrypt or decrypt
"""
_sckey: str = _change_format_of_sckey(weak_password=password)
ss = _SecretSteward(sckey=_sckey)
if cache_path is None or cache_path == '':
cache_path = ss.base_pem
if mode == 'encrypt':
if message is None or message == '':
print(">>> Message is None. Please pass in the plaintext that needs to be encrypted.")
return None
cipher_text: str = ss.encrypt(message=message)
ss.capture_stream(stream=cipher_text, path=cache_path)
print(f">>> Data encryption succeeded! SCKEY file storage path:{cache_path}")
elif mode == 'decrypt':
if message:
return ss.decrypt(cipher_text=message)
else:
if not os.path.exists(cache_path):
print(">>> Message is None. Please pass in the data to be decrypted.")
print(f">>> The path of the file to be decrypted cannot be found({cache_path}).")
return None
with open(cache_path, 'r', encoding='utf-8') as f:
message = f.read()
try:
result = ss.decrypt(message)
print(">>> Decrypt success!")
return result
except UnicodeDecodeError:
print(">>> Password Error!Please try again.")
else:
print(f">>> Wrong parameter({mode})! Try this ---> |decrypt|encrypt|")
if __name__ == '__main__':
message_ = "192.168.0.1$443$root$!(A-RAI.DM)"
# api(mode='encrypt', password='V2RayCloudSpider', message=message_, cache_path="_Cipher.v2raycs")
s = api(mode='decrypt', password='V2RayCloudSpider', cache_path='_Cipher.v2raycs')
| 33.486239 | 115 | 0.592055 | __all__ = ['api']
import base64
import hashlib
import os
from Crypto.Cipher import AES
class _SecretSteward(object):
def __init__(self, sckey: str):
self.base_pem = '_sckeyStream.v2raycs'
if sckey.__len__() > 32:
sckey = sckey[:32]
self.sckey = self._complete_stream(sckey=sckey)
@staticmethod
def _complete_stream(sckey: str):
sckey = bytes(sckey, encoding='utf-8')
while sckey.__len__() % 16 != 0:
sckey += b'\0'
return sckey
def encrypt(self, message: str):
return str(
base64.encodebytes(AES.new(key=self.sckey, mode=AES.MODE_ECB).encrypt(self._complete_stream(message))),
encoding='utf-8'.replace('\n', ''))
def decrypt(self, cipher_text: str):
return str(AES.new(key=self.sckey, mode=AES.MODE_ECB).decrypt(
base64.decodebytes(bytes(cipher_text, encoding='utf-8'))).rstrip(
b'\0').decode('utf-8'))
def capture_stream(self, stream, path: str = None, ):
if not stream:
stream = self.base_pem
with open(path, 'w') as f:
f.write(stream)
def _change_format_of_sckey(weak_password: str) -> str:
if not isinstance(weak_password, bytes):
weak_password = bytes(weak_password, 'utf-8')
m = hashlib.md5()
m.update(weak_password)
m.update(m.digest())
m.update(bytes(str(base64.encodebytes(m.digest()), encoding='utf-8').replace('\n', ''), 'utf-8'))
return m.hexdigest()
def api(mode: str, password: str, message: str = None, cache_path: str = None):
_sckey: str = _change_format_of_sckey(weak_password=password)
ss = _SecretSteward(sckey=_sckey)
if cache_path is None or cache_path == '':
cache_path = ss.base_pem
if mode == 'encrypt':
if message is None or message == '':
print(">>> Message is None. Please pass in the plaintext that needs to be encrypted.")
return None
cipher_text: str = ss.encrypt(message=message)
ss.capture_stream(stream=cipher_text, path=cache_path)
print(f">>> Data encryption succeeded! SCKEY file storage path:{cache_path}")
elif mode == 'decrypt':
if message:
return ss.decrypt(cipher_text=message)
else:
if not os.path.exists(cache_path):
print(">>> Message is None. Please pass in the data to be decrypted.")
print(f">>> The path of the file to be decrypted cannot be found({cache_path}).")
return None
with open(cache_path, 'r', encoding='utf-8') as f:
message = f.read()
try:
result = ss.decrypt(message)
print(">>> Decrypt success!")
return result
except UnicodeDecodeError:
print(">>> Password Error!Please try again.")
else:
print(f">>> Wrong parameter({mode})! Try this ---> |decrypt|encrypt|")
if __name__ == '__main__':
message_ = "192.168.0.1$443$root$!(A-RAI.DM)"
s = api(mode='decrypt', password='V2RayCloudSpider', cache_path='_Cipher.v2raycs')
| true | true |
1c2c38675ddeb50a2e1178f4d9b6889afd1dc419 | 375 | py | Python | csob/tests/resources/__init__.py | druids/csob-paymentgateway | 64e5e84ad4a0239f716855e12ee70364ab4414df | [
"MIT"
] | null | null | null | csob/tests/resources/__init__.py | druids/csob-paymentgateway | 64e5e84ad4a0239f716855e12ee70364ab4414df | [
"MIT"
] | 3 | 2019-03-13T10:28:35.000Z | 2019-04-04T10:56:59.000Z | csob/tests/resources/__init__.py | druids/csob-paymentgateway | 64e5e84ad4a0239f716855e12ee70364ab4414df | [
"MIT"
] | 1 | 2019-03-11T02:32:14.000Z | 2019-03-11T02:32:14.000Z | import os
import sys
PRIVATE_KEY_PATH = os.path.join(sys.prefix, 'csob_keys/rsa_test_A3746UdxZO.key')
GATEWAY_KEY_PATH = os.path.join(sys.prefix, 'csob_keys/mips_platebnibrana.csob.cz.pub')
def get_private_key():
with open(PRIVATE_KEY_PATH, 'r') as f:
return f.read()
def get_gateway_key():
with open(GATEWAY_KEY_PATH, 'r') as f:
return f.read()
| 23.4375 | 87 | 0.714667 | import os
import sys
PRIVATE_KEY_PATH = os.path.join(sys.prefix, 'csob_keys/rsa_test_A3746UdxZO.key')
GATEWAY_KEY_PATH = os.path.join(sys.prefix, 'csob_keys/mips_platebnibrana.csob.cz.pub')
def get_private_key():
with open(PRIVATE_KEY_PATH, 'r') as f:
return f.read()
def get_gateway_key():
with open(GATEWAY_KEY_PATH, 'r') as f:
return f.read()
| true | true |
1c2c3886367fd57d2bba97ab2435464496cd0f56 | 144 | py | Python | conda-4.3/run_test.py | jakirkham/anaconda-recipes | 74fb2280662a022f2d12a7744b4823cfa5e236df | [
"BSD-3-Clause"
] | 130 | 2015-07-28T03:41:21.000Z | 2022-03-16T03:07:41.000Z | conda-4.3/run_test.py | jakirkham/anaconda-recipes | 74fb2280662a022f2d12a7744b4823cfa5e236df | [
"BSD-3-Clause"
] | 119 | 2015-08-01T00:54:06.000Z | 2021-01-05T13:00:46.000Z | conda-4.3/run_test.py | jakirkham/anaconda-recipes | 74fb2280662a022f2d12a7744b4823cfa5e236df | [
"BSD-3-Clause"
] | 72 | 2015-07-29T02:35:56.000Z | 2022-02-26T14:31:15.000Z | import conda
print('conda.__version__: %s' % conda.__version__)
assert conda.__version__ == '4.3.25'
from conda.fetch import handle_proxy_407
| 20.571429 | 50 | 0.770833 | import conda
print('conda.__version__: %s' % conda.__version__)
assert conda.__version__ == '4.3.25'
from conda.fetch import handle_proxy_407
| true | true |
1c2c38900d31a5e1fbf65407352dee9fdcd9bf95 | 7,749 | py | Python | tests/test_gcloud.py | timgates42/pyramid_storage | a45e54beb52957e1e9d0177ad5a225815df827c1 | [
"BSD-3-Clause"
] | null | null | null | tests/test_gcloud.py | timgates42/pyramid_storage | a45e54beb52957e1e9d0177ad5a225815df827c1 | [
"BSD-3-Clause"
] | null | null | null | tests/test_gcloud.py | timgates42/pyramid_storage | a45e54beb52957e1e9d0177ad5a225815df827c1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from io import BytesIO
import mock
import pytest
from pyramid import compat
from pyramid import exceptions as pyramid_exceptions
class MockGCloudConnection(object):
def get_bucket(self, bucket_name):
bucket = mock.MagicMock()
bucket.get_blob.return_value = None
return bucket
def _get_mock_gcloud_connection(self):
return MockGCloudConnection()
def _mock_open_name():
if compat.PY3:
return 'builtins.open'
else:
return '__builtin__.open'
def _mock_open(name='test', mode='wb', encoding="utf-8"):
obj = mock.Mock()
obj.__enter__ = mock.Mock()
obj.__enter__.return_value = BytesIO()
obj.__exit__ = mock.Mock()
return obj
def test_extension_allowed_if_any():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="any"
)
assert g.extension_allowed(".jpg")
def test_extension_allowed_if_allowed_if_dotted():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
assert g.extension_allowed(".jpg", ("jpg",))
def test_extension_not_allowed_if_allowed_if_dotted():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
assert not g.extension_allowed("jpg", ("gif",))
def test_extension_not_allowed_if_allowed_if_not_dotted():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
assert not g.extension_allowed("jpg", ("gif",))
def test_file_allowed():
from pyramid_storage import gcloud
fs = mock.Mock()
fs.filename = "test.jpg"
s = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
assert s.file_allowed(fs)
def test_file_not_allowed():
from pyramid_storage import gcloud
fs = mock.Mock()
fs.filename = "test.jpg"
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="documents"
)
assert not g.file_allowed(fs)
def test_save_if_file_not_allowed():
from pyramid_storage import gcloud
from pyramid_storage.exceptions import FileNotAllowed
fs = mock.Mock()
fs.filename = "test.jpg"
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="documents"
)
with pytest.raises(FileNotAllowed):
g.save(fs)
def test_save_if_file_allowed():
from pyramid_storage import gcloud
fs = mock.Mock()
fs.filename = "test.jpg"
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images")
with mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection):
with mock.patch('pyramid_storage.gcloud.Blob') as mocked_new_blob:
name = g.save(fs)
assert mocked_new_blob.return_value.upload_from_file.called
assert name == "test.jpg"
def test_save_file():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
with mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection):
with mock.patch('pyramid_storage.gcloud.Blob') as mocked_new_blob:
name = g.save_file(BytesIO(), "test.jpg")
assert mocked_new_blob.return_value.upload_from_file.called
assert name == "test.jpg"
def test_save_filename():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
patches = (
mock.patch(_mock_open_name(), _mock_open),
mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection
)
)
for patch in patches:
patch.start()
with mock.patch('pyramid_storage.gcloud.Blob') as mocked_new_blob:
name = g.save_filename("test.jpg", replace=True)
assert name == "test.jpg"
assert mocked_new_blob.return_value.upload_from_file.called
for patch in patches:
patch.stop()
def test_save_if_randomize():
from pyramid_storage import gcloud
fs = mock.Mock()
fs.filename = "test.jpg"
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images")
with mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection):
with mock.patch('pyramid_storage.gcloud.Blob') as mocked_new_blob:
name = g.save(fs, randomize=True)
assert mocked_new_blob.return_value.upload_from_file.called
assert name != "test.jpg"
def test_save_in_folder():
from pyramid_storage import gcloud
fs = mock.MagicMock()
fs.filename = "test.jpg"
fs.file = BytesIO()
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images")
with mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection):
with mock.patch('pyramid_storage.gcloud.Blob') as mocked_new_blob:
name = g.save(fs, folder="my_folder")
assert mocked_new_blob.return_value.upload_from_file.called
assert name == "my_folder/test.jpg"
def test_delete():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
with mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection):
g.delete("test.jpg")
def test_from_settings_with_defaults():
from pyramid_storage import gcloud
settings = {
'storage.gcloud.credentials': '/secure/credentials.json',
'storage.gcloud.bucket_name': 'Attachments',
}
inst = gcloud.GoogleCloudStorage.from_settings(settings, 'storage.')
assert inst.base_url == ''
assert inst.bucket_name == 'Attachments'
assert inst.acl == 'publicRead'
assert set(('jpg', 'txt', 'doc')).intersection(inst.extensions)
with mock.patch.object(gcloud, "Client") as gcloud_mocked:
inst.get_connection()
_, gcloud_options = gcloud_mocked.from_service_account_json.call_args_list[0]
assert 'json_credentials_path' in gcloud_options
assert gcloud_options["json_credentials_path"] == '/secure/credentials.json'
inst.get_bucket()
bucket_options, _ = gcloud_mocked.from_service_account_json \
.return_value.get_bucket.call_args_list[0]
assert "Attachments" in bucket_options
def test_from_settings_if_base_path_missing():
from pyramid_storage import gcloud
with pytest.raises(pyramid_exceptions.ConfigurationError):
gcloud.GoogleCloudStorage.from_settings({}, 'storage.')
| 27.874101 | 85 | 0.677507 |
from io import BytesIO
import mock
import pytest
from pyramid import compat
from pyramid import exceptions as pyramid_exceptions
class MockGCloudConnection(object):
def get_bucket(self, bucket_name):
bucket = mock.MagicMock()
bucket.get_blob.return_value = None
return bucket
def _get_mock_gcloud_connection(self):
return MockGCloudConnection()
def _mock_open_name():
if compat.PY3:
return 'builtins.open'
else:
return '__builtin__.open'
def _mock_open(name='test', mode='wb', encoding="utf-8"):
obj = mock.Mock()
obj.__enter__ = mock.Mock()
obj.__enter__.return_value = BytesIO()
obj.__exit__ = mock.Mock()
return obj
def test_extension_allowed_if_any():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="any"
)
assert g.extension_allowed(".jpg")
def test_extension_allowed_if_allowed_if_dotted():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
assert g.extension_allowed(".jpg", ("jpg",))
def test_extension_not_allowed_if_allowed_if_dotted():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
assert not g.extension_allowed("jpg", ("gif",))
def test_extension_not_allowed_if_allowed_if_not_dotted():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
assert not g.extension_allowed("jpg", ("gif",))
def test_file_allowed():
from pyramid_storage import gcloud
fs = mock.Mock()
fs.filename = "test.jpg"
s = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
assert s.file_allowed(fs)
def test_file_not_allowed():
from pyramid_storage import gcloud
fs = mock.Mock()
fs.filename = "test.jpg"
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="documents"
)
assert not g.file_allowed(fs)
def test_save_if_file_not_allowed():
from pyramid_storage import gcloud
from pyramid_storage.exceptions import FileNotAllowed
fs = mock.Mock()
fs.filename = "test.jpg"
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="documents"
)
with pytest.raises(FileNotAllowed):
g.save(fs)
def test_save_if_file_allowed():
from pyramid_storage import gcloud
fs = mock.Mock()
fs.filename = "test.jpg"
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images")
with mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection):
with mock.patch('pyramid_storage.gcloud.Blob') as mocked_new_blob:
name = g.save(fs)
assert mocked_new_blob.return_value.upload_from_file.called
assert name == "test.jpg"
def test_save_file():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
with mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection):
with mock.patch('pyramid_storage.gcloud.Blob') as mocked_new_blob:
name = g.save_file(BytesIO(), "test.jpg")
assert mocked_new_blob.return_value.upload_from_file.called
assert name == "test.jpg"
def test_save_filename():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
patches = (
mock.patch(_mock_open_name(), _mock_open),
mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection
)
)
for patch in patches:
patch.start()
with mock.patch('pyramid_storage.gcloud.Blob') as mocked_new_blob:
name = g.save_filename("test.jpg", replace=True)
assert name == "test.jpg"
assert mocked_new_blob.return_value.upload_from_file.called
for patch in patches:
patch.stop()
def test_save_if_randomize():
from pyramid_storage import gcloud
fs = mock.Mock()
fs.filename = "test.jpg"
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images")
with mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection):
with mock.patch('pyramid_storage.gcloud.Blob') as mocked_new_blob:
name = g.save(fs, randomize=True)
assert mocked_new_blob.return_value.upload_from_file.called
assert name != "test.jpg"
def test_save_in_folder():
from pyramid_storage import gcloud
fs = mock.MagicMock()
fs.filename = "test.jpg"
fs.file = BytesIO()
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images")
with mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection):
with mock.patch('pyramid_storage.gcloud.Blob') as mocked_new_blob:
name = g.save(fs, folder="my_folder")
assert mocked_new_blob.return_value.upload_from_file.called
assert name == "my_folder/test.jpg"
def test_delete():
from pyramid_storage import gcloud
g = gcloud.GoogleCloudStorage(
credentials="/secrets/credentials.json",
bucket_name="my_bucket",
extensions="images"
)
with mock.patch(
'pyramid_storage.gcloud.GoogleCloudStorage.get_connection',
_get_mock_gcloud_connection):
g.delete("test.jpg")
def test_from_settings_with_defaults():
from pyramid_storage import gcloud
settings = {
'storage.gcloud.credentials': '/secure/credentials.json',
'storage.gcloud.bucket_name': 'Attachments',
}
inst = gcloud.GoogleCloudStorage.from_settings(settings, 'storage.')
assert inst.base_url == ''
assert inst.bucket_name == 'Attachments'
assert inst.acl == 'publicRead'
assert set(('jpg', 'txt', 'doc')).intersection(inst.extensions)
with mock.patch.object(gcloud, "Client") as gcloud_mocked:
inst.get_connection()
_, gcloud_options = gcloud_mocked.from_service_account_json.call_args_list[0]
assert 'json_credentials_path' in gcloud_options
assert gcloud_options["json_credentials_path"] == '/secure/credentials.json'
inst.get_bucket()
bucket_options, _ = gcloud_mocked.from_service_account_json \
.return_value.get_bucket.call_args_list[0]
assert "Attachments" in bucket_options
def test_from_settings_if_base_path_missing():
from pyramid_storage import gcloud
with pytest.raises(pyramid_exceptions.ConfigurationError):
gcloud.GoogleCloudStorage.from_settings({}, 'storage.')
| true | true |
1c2c38922ab51ffc005959091fad12ea82adc024 | 12,323 | py | Python | saharaclient/tests/unit/osc/v2/test_data_sources.py | openstack/python-saharaclient | 2f01b878a9e07bc712fae9c6c2c5f823bd986dd6 | [
"Apache-2.0"
] | 34 | 2015-01-26T21:39:46.000Z | 2021-01-16T17:30:25.000Z | saharaclient/tests/unit/osc/v2/test_data_sources.py | openstack/python-saharaclient | 2f01b878a9e07bc712fae9c6c2c5f823bd986dd6 | [
"Apache-2.0"
] | null | null | null | saharaclient/tests/unit/osc/v2/test_data_sources.py | openstack/python-saharaclient | 2f01b878a9e07bc712fae9c6c2c5f823bd986dd6 | [
"Apache-2.0"
] | 15 | 2015-03-13T23:24:59.000Z | 2017-06-22T12:15:46.000Z | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from osc_lib.tests import utils as osc_utils
from unittest import mock
from saharaclient.api import data_sources as api_ds
from saharaclient.osc.v1 import data_sources as osc_ds
from saharaclient.tests.unit.osc.v1 import test_data_sources as tds_v1
DS_INFO = {'id': 'id', 'name': 'source', 'type': 'swift',
'url': 'swift://container.sahara/object',
'description': 'Data Source for tests',
'is_public': True, 'is_protected': True}
class TestDataSources(tds_v1.TestDataSources):
def setUp(self):
super(TestDataSources, self).setUp()
self.app.api_version['data_processing'] = '2'
self.ds_mock = (
self.app.client_manager.data_processing.data_sources)
self.ds_mock.reset_mock()
class TestCreateDataSource(TestDataSources):
def setUp(self):
super(TestCreateDataSource, self).setUp()
self.ds_mock.create.return_value = api_ds.DataSources(
None, DS_INFO)
# Command to test
self.cmd = osc_ds.CreateDataSource(self.app, None)
def test_data_sources_create_no_options(self):
arglist = []
verifylist = []
self.assertRaises(osc_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_data_sources_create_required_options(self):
arglist = ['source', '--type', 'swift', '--url',
'swift://container.sahara/object']
verifylist = [('name', 'source'), ('type', 'swift'),
('url', 'swift://container.sahara/object')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that data source was created with correct arguments
called_args = {'credential_pass': None, 'credential_user': None,
'data_source_type': 'swift', 'name': 'source',
'description': '',
'url': 'swift://container.sahara/object',
'is_public': False, 'is_protected': False,
's3_credentials': None}
self.ds_mock.create.assert_called_once_with(**called_args)
# Check that columns are correct
expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
'Name', 'Type', 'Url')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('Data Source for tests', 'id', True, True, 'source',
'swift', 'swift://container.sahara/object')
self.assertEqual(expected_data, data)
def test_data_sources_create_all_options(self):
arglist = ['source', '--type', 'swift', '--url',
'swift://container.sahara/object', '--username', 'user',
'--password', 'pass', '--description',
'Data Source for tests', '--public', '--protected']
verifylist = [('name', 'source'), ('type', 'swift'),
('url', 'swift://container.sahara/object'),
('username', 'user'), ('password', 'pass'),
('description', 'Data Source for tests'),
('public', True), ('protected', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that data source was created with correct arguments
called_args = {'credential_pass': 'pass', 'credential_user': 'user',
'data_source_type': 'swift', 'name': 'source',
'description': 'Data Source for tests',
'url': 'swift://container.sahara/object',
'is_protected': True, 'is_public': True,
's3_credentials': None}
self.ds_mock.create.assert_called_once_with(**called_args)
# Check that columns are correct
expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
'Name', 'Type', 'Url')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('Data Source for tests', 'id', True, True, 'source',
'swift', 'swift://container.sahara/object')
self.assertEqual(expected_data, data)
class TestListDataSources(TestDataSources):
def setUp(self):
super(TestListDataSources, self).setUp()
self.ds_mock.list.return_value = [api_ds.DataSources(
None, DS_INFO)]
# Command to test
self.cmd = osc_ds.ListDataSources(self.app, None)
def test_data_sources_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Name', 'Id', 'Type']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('source', 'id', 'swift')]
self.assertEqual(expected_data, list(data))
def test_data_sources_list_long(self):
arglist = ['--long']
verifylist = [('long', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that columns are correct
expected_columns = ['Name', 'Id', 'Type', 'Url', 'Description',
'Is public', 'Is protected']
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = [('source', 'id', 'swift',
'swift://container.sahara/object',
'Data Source for tests', True, True)]
self.assertEqual(expected_data, list(data))
class TestShowDataSource(TestDataSources):
def setUp(self):
super(TestShowDataSource, self).setUp()
self.ds_mock.find_unique.return_value = api_ds.DataSources(
None, DS_INFO)
# Command to test
self.cmd = osc_ds.ShowDataSource(self.app, None)
def test_data_sources_show(self):
arglist = ['source']
verifylist = [('data_source', 'source')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that correct arguments was passed
self.ds_mock.find_unique.assert_called_once_with(name='source')
# Check that columns are correct
expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
'Name', 'Type', 'Url')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ['Data Source for tests', 'id', True, True, 'source',
'swift', 'swift://container.sahara/object']
self.assertEqual(expected_data, list(data))
class TestDeleteDataSource(TestDataSources):
def setUp(self):
super(TestDeleteDataSource, self).setUp()
self.ds_mock.find_unique.return_value = api_ds.DataSources(
None, DS_INFO)
# Command to test
self.cmd = osc_ds.DeleteDataSource(self.app, None)
def test_data_sources_delete(self):
arglist = ['source']
verifylist = [('data_source', ['source'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that correct arguments was passed
self.ds_mock.delete.assert_called_once_with('id')
class TestUpdateDataSource(TestDataSources):
def setUp(self):
super(TestUpdateDataSource, self).setUp()
self.ds_mock.find_unique.return_value = api_ds.DataSources(
None, DS_INFO)
self.ds_mock.update.return_value = mock.Mock(
data_source=DS_INFO)
# Command to test
self.cmd = osc_ds.UpdateDataSource(self.app, None)
def test_data_sources_update_no_options(self):
arglist = []
verifylist = []
self.assertRaises(osc_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_data_sources_update_nothing_updated(self):
arglist = ['source']
verifylist = [('data_source', 'source')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.ds_mock.update.assert_called_once_with('id', {})
def test_data_sources_update_required_options(self):
arglist = ['source']
verifylist = [('data_source', 'source')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that data source was created with correct arguments
self.ds_mock.update.assert_called_once_with('id', {})
# Check that columns are correct
expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
'Name', 'Type', 'Url')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('Data Source for tests', 'id', True, True, 'source',
'swift', 'swift://container.sahara/object')
self.assertEqual(expected_data, data)
def test_data_sources_update_all_options(self):
arglist = ['source', '--name', 'source', '--type', 'swift', '--url',
'swift://container.sahara/object', '--username', 'user',
'--password', 'pass', '--description',
'Data Source for tests', '--public', '--protected']
verifylist = [('data_source', 'source'), ('name', 'source'),
('type', 'swift'),
('url', 'swift://container.sahara/object'),
('username', 'user'), ('password', 'pass'),
('description', 'Data Source for tests'),
('is_public', True), ('is_protected', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
# Check that data source was created with correct arguments
self.ds_mock.update.assert_called_once_with(
'id', {'name': 'source', 'url': 'swift://container.sahara/object',
'is_protected': True,
'credentials': {'password': 'pass', 'user': 'user'},
'is_public': True, 'type': 'swift',
'description': 'Data Source for tests'})
# Check that columns are correct
expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
'Name', 'Type', 'Url')
self.assertEqual(expected_columns, columns)
# Check that data is correct
expected_data = ('Data Source for tests', 'id', True, True, 'source',
'swift', 'swift://container.sahara/object')
self.assertEqual(expected_data, data)
def test_data_sources_update_private_unprotected(self):
arglist = ['source', '--private', '--unprotected']
verifylist = [('data_source', 'source'), ('is_public', False),
('is_protected', False)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
# Check that data source was created with correct arguments
self.ds_mock.update.assert_called_once_with(
'id', {'is_public': False, 'is_protected': False})
| 39.751613 | 78 | 0.6031 |
from osc_lib.tests import utils as osc_utils
from unittest import mock
from saharaclient.api import data_sources as api_ds
from saharaclient.osc.v1 import data_sources as osc_ds
from saharaclient.tests.unit.osc.v1 import test_data_sources as tds_v1
DS_INFO = {'id': 'id', 'name': 'source', 'type': 'swift',
'url': 'swift://container.sahara/object',
'description': 'Data Source for tests',
'is_public': True, 'is_protected': True}
class TestDataSources(tds_v1.TestDataSources):
def setUp(self):
super(TestDataSources, self).setUp()
self.app.api_version['data_processing'] = '2'
self.ds_mock = (
self.app.client_manager.data_processing.data_sources)
self.ds_mock.reset_mock()
class TestCreateDataSource(TestDataSources):
def setUp(self):
super(TestCreateDataSource, self).setUp()
self.ds_mock.create.return_value = api_ds.DataSources(
None, DS_INFO)
self.cmd = osc_ds.CreateDataSource(self.app, None)
def test_data_sources_create_no_options(self):
arglist = []
verifylist = []
self.assertRaises(osc_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_data_sources_create_required_options(self):
arglist = ['source', '--type', 'swift', '--url',
'swift://container.sahara/object']
verifylist = [('name', 'source'), ('type', 'swift'),
('url', 'swift://container.sahara/object')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
called_args = {'credential_pass': None, 'credential_user': None,
'data_source_type': 'swift', 'name': 'source',
'description': '',
'url': 'swift://container.sahara/object',
'is_public': False, 'is_protected': False,
's3_credentials': None}
self.ds_mock.create.assert_called_once_with(**called_args)
expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
'Name', 'Type', 'Url')
self.assertEqual(expected_columns, columns)
expected_data = ('Data Source for tests', 'id', True, True, 'source',
'swift', 'swift://container.sahara/object')
self.assertEqual(expected_data, data)
def test_data_sources_create_all_options(self):
arglist = ['source', '--type', 'swift', '--url',
'swift://container.sahara/object', '--username', 'user',
'--password', 'pass', '--description',
'Data Source for tests', '--public', '--protected']
verifylist = [('name', 'source'), ('type', 'swift'),
('url', 'swift://container.sahara/object'),
('username', 'user'), ('password', 'pass'),
('description', 'Data Source for tests'),
('public', True), ('protected', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
called_args = {'credential_pass': 'pass', 'credential_user': 'user',
'data_source_type': 'swift', 'name': 'source',
'description': 'Data Source for tests',
'url': 'swift://container.sahara/object',
'is_protected': True, 'is_public': True,
's3_credentials': None}
self.ds_mock.create.assert_called_once_with(**called_args)
expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
'Name', 'Type', 'Url')
self.assertEqual(expected_columns, columns)
expected_data = ('Data Source for tests', 'id', True, True, 'source',
'swift', 'swift://container.sahara/object')
self.assertEqual(expected_data, data)
class TestListDataSources(TestDataSources):
def setUp(self):
super(TestListDataSources, self).setUp()
self.ds_mock.list.return_value = [api_ds.DataSources(
None, DS_INFO)]
self.cmd = osc_ds.ListDataSources(self.app, None)
def test_data_sources_list_no_options(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
expected_columns = ['Name', 'Id', 'Type']
self.assertEqual(expected_columns, columns)
expected_data = [('source', 'id', 'swift')]
self.assertEqual(expected_data, list(data))
def test_data_sources_list_long(self):
arglist = ['--long']
verifylist = [('long', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
expected_columns = ['Name', 'Id', 'Type', 'Url', 'Description',
'Is public', 'Is protected']
self.assertEqual(expected_columns, columns)
expected_data = [('source', 'id', 'swift',
'swift://container.sahara/object',
'Data Source for tests', True, True)]
self.assertEqual(expected_data, list(data))
class TestShowDataSource(TestDataSources):
def setUp(self):
super(TestShowDataSource, self).setUp()
self.ds_mock.find_unique.return_value = api_ds.DataSources(
None, DS_INFO)
self.cmd = osc_ds.ShowDataSource(self.app, None)
def test_data_sources_show(self):
arglist = ['source']
verifylist = [('data_source', 'source')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.ds_mock.find_unique.assert_called_once_with(name='source')
expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
'Name', 'Type', 'Url')
self.assertEqual(expected_columns, columns)
expected_data = ['Data Source for tests', 'id', True, True, 'source',
'swift', 'swift://container.sahara/object']
self.assertEqual(expected_data, list(data))
class TestDeleteDataSource(TestDataSources):
def setUp(self):
super(TestDeleteDataSource, self).setUp()
self.ds_mock.find_unique.return_value = api_ds.DataSources(
None, DS_INFO)
self.cmd = osc_ds.DeleteDataSource(self.app, None)
def test_data_sources_delete(self):
arglist = ['source']
verifylist = [('data_source', ['source'])]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.ds_mock.delete.assert_called_once_with('id')
class TestUpdateDataSource(TestDataSources):
def setUp(self):
super(TestUpdateDataSource, self).setUp()
self.ds_mock.find_unique.return_value = api_ds.DataSources(
None, DS_INFO)
self.ds_mock.update.return_value = mock.Mock(
data_source=DS_INFO)
self.cmd = osc_ds.UpdateDataSource(self.app, None)
def test_data_sources_update_no_options(self):
arglist = []
verifylist = []
self.assertRaises(osc_utils.ParserException, self.check_parser,
self.cmd, arglist, verifylist)
def test_data_sources_update_nothing_updated(self):
arglist = ['source']
verifylist = [('data_source', 'source')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.ds_mock.update.assert_called_once_with('id', {})
def test_data_sources_update_required_options(self):
arglist = ['source']
verifylist = [('data_source', 'source')]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.ds_mock.update.assert_called_once_with('id', {})
expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
'Name', 'Type', 'Url')
self.assertEqual(expected_columns, columns)
expected_data = ('Data Source for tests', 'id', True, True, 'source',
'swift', 'swift://container.sahara/object')
self.assertEqual(expected_data, data)
def test_data_sources_update_all_options(self):
arglist = ['source', '--name', 'source', '--type', 'swift', '--url',
'swift://container.sahara/object', '--username', 'user',
'--password', 'pass', '--description',
'Data Source for tests', '--public', '--protected']
verifylist = [('data_source', 'source'), ('name', 'source'),
('type', 'swift'),
('url', 'swift://container.sahara/object'),
('username', 'user'), ('password', 'pass'),
('description', 'Data Source for tests'),
('is_public', True), ('is_protected', True)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.ds_mock.update.assert_called_once_with(
'id', {'name': 'source', 'url': 'swift://container.sahara/object',
'is_protected': True,
'credentials': {'password': 'pass', 'user': 'user'},
'is_public': True, 'type': 'swift',
'description': 'Data Source for tests'})
expected_columns = ('Description', 'Id', 'Is protected', 'Is public',
'Name', 'Type', 'Url')
self.assertEqual(expected_columns, columns)
expected_data = ('Data Source for tests', 'id', True, True, 'source',
'swift', 'swift://container.sahara/object')
self.assertEqual(expected_data, data)
def test_data_sources_update_private_unprotected(self):
arglist = ['source', '--private', '--unprotected']
verifylist = [('data_source', 'source'), ('is_public', False),
('is_protected', False)]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.cmd.take_action(parsed_args)
self.ds_mock.update.assert_called_once_with(
'id', {'is_public': False, 'is_protected': False})
| true | true |
1c2c38f1314c710ed50ebe34a3ec895a178c8f82 | 1,779 | py | Python | tests/serializers.py | jwhitlock/drf-json-api | a62802432c612c34079f3c3694129f37778e2577 | [
"MIT"
] | null | null | null | tests/serializers.py | jwhitlock/drf-json-api | a62802432c612c34079f3c3694129f37778e2577 | [
"MIT"
] | null | null | null | tests/serializers.py | jwhitlock/drf-json-api | a62802432c612c34079f3c3694129f37778e2577 | [
"MIT"
] | null | null | null | from rest_framework import relations, serializers
from tests import models
import rest_framework
class CommentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
fields = ("id", "url", "post", "body", )
model = models.Comment
class PersonSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
fields = ("id", "url", "name", )
model = models.Person
class PostSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
fields = ("id", "url", "title", "author", "comments", )
model = models.Post
class MaximalPersonSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
fields = ("id", "url", "name", "favorite_post", "liked_comments")
model = models.Person
class MinimalCommentSerializer(CommentSerializer):
class Meta(CommentSerializer.Meta):
fields = ("id", "url", "body", )
class MinimalPostSerializer(PostSerializer):
class Meta(PostSerializer.Meta):
fields = ("id", "url", "title", )
class NestedCommentSerializer(CommentSerializer):
post = MinimalPostSerializer()
class NestedPostSerializer(PostSerializer):
comments = MinimalCommentSerializer(many=True)
class PkCommentSerializer(CommentSerializer):
post = relations.PrimaryKeyRelatedField(queryset=models.Post.objects)
single_related_kwargs = {}
if rest_framework.__version__.split(".")[0] >= "3":
single_related_kwargs = {"allow_null": True}
class PkMaximalPersonSerializer(MaximalPersonSerializer):
favorite_post = relations.PrimaryKeyRelatedField(required=False, queryset=models.Post.objects, **single_related_kwargs)
liked_comments = relations.PrimaryKeyRelatedField(required=False, many=True, queryset=models.Comment.objects)
| 26.552239 | 123 | 0.728499 | from rest_framework import relations, serializers
from tests import models
import rest_framework
class CommentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
fields = ("id", "url", "post", "body", )
model = models.Comment
class PersonSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
fields = ("id", "url", "name", )
model = models.Person
class PostSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
fields = ("id", "url", "title", "author", "comments", )
model = models.Post
class MaximalPersonSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
fields = ("id", "url", "name", "favorite_post", "liked_comments")
model = models.Person
class MinimalCommentSerializer(CommentSerializer):
class Meta(CommentSerializer.Meta):
fields = ("id", "url", "body", )
class MinimalPostSerializer(PostSerializer):
class Meta(PostSerializer.Meta):
fields = ("id", "url", "title", )
class NestedCommentSerializer(CommentSerializer):
post = MinimalPostSerializer()
class NestedPostSerializer(PostSerializer):
comments = MinimalCommentSerializer(many=True)
class PkCommentSerializer(CommentSerializer):
post = relations.PrimaryKeyRelatedField(queryset=models.Post.objects)
single_related_kwargs = {}
if rest_framework.__version__.split(".")[0] >= "3":
single_related_kwargs = {"allow_null": True}
class PkMaximalPersonSerializer(MaximalPersonSerializer):
favorite_post = relations.PrimaryKeyRelatedField(required=False, queryset=models.Post.objects, **single_related_kwargs)
liked_comments = relations.PrimaryKeyRelatedField(required=False, many=True, queryset=models.Comment.objects)
| true | true |
1c2c38f7831d706904b45845d1bc5d3e68681c1d | 1,960 | py | Python | samples/openapi3/client/petstore/python-experimental/petstore_api/model/special_model_name.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 1 | 2022-02-23T20:33:43.000Z | 2022-02-23T20:33:43.000Z | samples/openapi3/client/petstore/python-experimental/petstore_api/model/special_model_name.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 7 | 2021-12-16T03:21:30.000Z | 2022-03-31T03:26:58.000Z | samples/openapi3/client/petstore/python-experimental/petstore_api/model/special_model_name.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 1 | 2022-02-19T21:56:04.000Z | 2022-02-19T21:56:04.000Z | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class SpecialModelName(
DictSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
model with an invalid class name for python
"""
a = StrSchema
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
a: typing.Union[a, Unset] = unset,
_configuration: typing.Optional[Configuration] = None,
**kwargs: typing.Type[Schema],
) -> 'SpecialModelName':
return super().__new__(
cls,
*args,
a=a,
_configuration=_configuration,
**kwargs,
)
| 21.075269 | 174 | 0.640306 |
import re
import sys
import typing
from frozendict import frozendict
import decimal
from datetime import date, datetime
from frozendict import frozendict
from petstore_api.schemas import (
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class SpecialModelName(
DictSchema
):
a = StrSchema
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
a: typing.Union[a, Unset] = unset,
_configuration: typing.Optional[Configuration] = None,
**kwargs: typing.Type[Schema],
) -> 'SpecialModelName':
return super().__new__(
cls,
*args,
a=a,
_configuration=_configuration,
**kwargs,
)
| true | true |
1c2c3984c19ddc16ccb2625358b0d022aea89c00 | 1,270 | py | Python | tests/test_entity.py | juntosdesdecasa/mcthings_extra | dc4c91ac6920b861ff4454be506eed6e067f306b | [
"Apache-2.0"
] | 1 | 2020-05-12T06:33:36.000Z | 2020-05-12T06:33:36.000Z | tests/test_entity.py | Voxelers/mcthings_extra | dc4c91ac6920b861ff4454be506eed6e067f306b | [
"Apache-2.0"
] | 1 | 2020-04-24T12:25:24.000Z | 2020-04-24T23:09:30.000Z | tests/test_entity.py | Voxelers/mcthings_extra | dc4c91ac6920b861ff4454be506eed6e067f306b | [
"Apache-2.0"
] | 1 | 2020-05-15T14:02:51.000Z | 2020-05-15T14:02:51.000Z | #!/usr/bin/env python3
# Licensed under the terms of http://www.apache.org/licenses/LICENSE-2.0
# Author (©): Alvaro del Castillo
import sys
import mcpi.block
import mcpi.entity
from mcpi.vec3 import Vec3
from mcthings.decorators.light_decorator import LightDecorator
from mcthings.house import House
from mcthings.server import Server
from mcthings.world import World
from mcthings_extra.decorators.villager_decorator import VillagerDecorator
BUILDER_NAME = "ElasticExplorer"
MC_SEVER_HOST = "localhost"
MC_SEVER_PORT = 4711
def main():
try:
World.connect(Server(MC_SEVER_HOST, MC_SEVER_PORT))
World.server.postToChat("Spawning entities in Minecraft")
pos = World.server.entity.getTilePos(World.server.getPlayerEntityId(BUILDER_NAME))
# Let's create a Scene and populate it with Entities
house = House(Vec3(pos.x + 5, pos.y, pos.z))
house.height = 4
house.width = 10
house.length = 10
house.build()
house.add_decorator(VillagerDecorator)
house.add_decorator(LightDecorator)
house.decorate()
except mcpi.connection.RequestError:
print("Can't connect to Minecraft server " + MC_SEVER_HOST)
if __name__ == "__main__":
main()
sys.exit(0)
| 25.4 | 90 | 0.716535 |
import sys
import mcpi.block
import mcpi.entity
from mcpi.vec3 import Vec3
from mcthings.decorators.light_decorator import LightDecorator
from mcthings.house import House
from mcthings.server import Server
from mcthings.world import World
from mcthings_extra.decorators.villager_decorator import VillagerDecorator
BUILDER_NAME = "ElasticExplorer"
MC_SEVER_HOST = "localhost"
MC_SEVER_PORT = 4711
def main():
try:
World.connect(Server(MC_SEVER_HOST, MC_SEVER_PORT))
World.server.postToChat("Spawning entities in Minecraft")
pos = World.server.entity.getTilePos(World.server.getPlayerEntityId(BUILDER_NAME))
house = House(Vec3(pos.x + 5, pos.y, pos.z))
house.height = 4
house.width = 10
house.length = 10
house.build()
house.add_decorator(VillagerDecorator)
house.add_decorator(LightDecorator)
house.decorate()
except mcpi.connection.RequestError:
print("Can't connect to Minecraft server " + MC_SEVER_HOST)
if __name__ == "__main__":
main()
sys.exit(0)
| true | true |
1c2c398680d7f59fc0fd38aaa36a16bdc1bfeb3a | 13,112 | py | Python | scripts/models/verification_result.py | Druidos/cv | 90cdbf212d7cc8c5cbd2fbbcc770d18a89771037 | [
"Apache-2.0"
] | null | null | null | scripts/models/verification_result.py | Druidos/cv | 90cdbf212d7cc8c5cbd2fbbcc770d18a89771037 | [
"Apache-2.0"
] | null | null | null | scripts/models/verification_result.py | Druidos/cv | 90cdbf212d7cc8c5cbd2fbbcc770d18a89771037 | [
"Apache-2.0"
] | null | null | null | #
# CV is a framework for continuous verification.
#
# Copyright (c) 2018-2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import json
import os
import re
import shutil
import time
from xml.etree import ElementTree
from components import *
from components.mea import MEA
TAG_OPTIMIZE = "optimize"
def to_str(val) -> str:
return "{}".format(val)
class EntryPointDesc:
def __init__(self, file: str, identifier: str):
self.file = file
with open(file, errors='ignore') as fd:
data = json.load(fd)
metadata = data.get(TAG_METADATA, {})
self.optimize = metadata.get(TAG_OPTIMIZE, False)
self.subsystem = metadata.get(TAG_SUBSYSTEM, ".")
self.id = identifier # Path in the entrypoints directory (may contain subdirectories).
self.short_name = re.sub(r"\W", "_", identifier) # Should be used in path concatenations.
def __str__(self):
return self.id
class VerificationTask:
def __init__(self, entry_desc: EntryPointDesc, rule, entrypoint, path_to_verifier, cil_file):
self.entry_desc = entry_desc
self.rule = rule
self.entrypoint = entrypoint
if self.rule == RULE_COVERAGE:
self.mode = COVERAGE
elif self.rule == RULE_MEMSAFETY:
self.mode = MEMSAFETY
elif self.rule == RULE_RACES:
self.mode = RACES
elif self.rule in DEADLOCK_SUB_PROPERTIES:
self.mode = DEADLOCK
else:
self.mode = UNREACHABILITY
self.path_to_verifier = path_to_verifier
self.cil_file = cil_file
self.name = "_".join([self.entry_desc.id, self.rule, self.entrypoint])
def copy(self):
return type(self)(self.entry_desc, self.rule, self.entrypoint, self.path_to_verifier, self.cil_file)
class GlobalStatistics:
"""
Class for collecting and printing global statistics.
"""
def __init__(self):
self.cpu = 0 # in seconds
self.wall = 0 # of each verifier launch
self.mem_average = 0 # in MB
self.safes = 0
self.unsafes = 0
self.unknowns = 0
self.et = 0
self.filtered = 0
self.relevant = 0
def add_result(self, verification_result):
self.cpu += verification_result.cpu
self.wall += verification_result.wall
self.et += verification_result.initial_traces
self.filtered += verification_result.filtered_traces
if verification_result.relevant:
self.relevant += 1
if verification_result.verdict == VERDICT_SAFE:
self.safes += 1
elif verification_result.verdict == VERDICT_UNSAFE:
self.unsafes += 1
else:
self.unknowns += 1
self.mem_average += verification_result.mem
def __add_overall(self, cpu, wall, et, filtered, relevant):
self.cpu += cpu
self.wall += wall
self.et += et
self.filtered += filtered
self.relevant += relevant
def sum(self, info):
self.relevant = max(self.relevant, info.relevant)
info.relevant = 0
self.__add_overall(info.cpu, info.wall, info.et, info.filtered, info.relevant)
self.mem_average = max(self.mem_average, info.mem_average)
self.safes += info.safes
self.unsafes += info.unsafes
self.unknowns += info.unknowns
def sum_memory(self):
overall = self.safes + self.unknowns + self.unsafes
if overall:
self.mem_average = int(self.mem_average / overall)
else:
self.mem_average = 0
def __str__(self):
return ";".join([str(self.safes), str(self.unsafes), str(self.unknowns), str(self.relevant), str(self.et),
str(self.filtered), str(round(self.cpu, -3)), str(round(self.wall, -3)),
str(round(self.mem_average / 1000))])
class VerificationResults:
def __init__(self, verification_task, config: dict):
if verification_task:
self.id = verification_task.entry_desc.subsystem
self.rule = verification_task.rule
self.entrypoint = verification_task.entrypoint
else:
self.id = None
self.rule = None
self.entrypoint = None
self.cpu = 0
self.mem = 0
self.wall = 0
self.verdict = VERDICT_UNKNOWN
self.termination_reason = ""
self.relevant = False
self.initial_traces = 0
self.work_dir = None
self.cov_lines = 0.0
self.cov_funcs = 0.0
self.filtered_traces = 0
self.debug = config.get(TAG_DEBUG, False)
self.config = config
self.coverage_resources = dict()
self.mea_resources = dict()
self.resources = dict()
def is_equal(self, verification_task: VerificationTask):
return self.id == verification_task.entry_desc.subsystem and \
self.rule == verification_task.rule and \
self.entrypoint == verification_task.entrypoint
def get_name(self) -> str:
return "_".join([str(self.entrypoint), str(self.rule), str(self.entrypoint)])
def __parse_xml_node(self, columns):
for column in columns:
title = column.attrib['title']
if title == 'status':
self.verdict = column.attrib['value']
if 'true' in self.verdict:
self.verdict = VERDICT_SAFE
self.termination_reason = TERMINATION_SUCCESS
elif 'false' in self.verdict:
self.verdict = VERDICT_UNSAFE
self.termination_reason = TERMINATION_SUCCESS
else:
self.termination_reason = self.verdict
self.verdict = VERDICT_UNKNOWN
elif title == 'cputime':
value = column.attrib['value']
if str(value).endswith("s"):
value = value[:-1]
self.cpu = float(value)
elif title == 'walltime':
value = column.attrib['value']
if str(value).endswith("s"):
value = value[:-1]
self.wall = float(value)
elif title == 'memUsage' or title == 'memory':
value = column.attrib['value']
if str(value).endswith("B"):
value = value[:-1]
self.mem = int(int(value) / 1000000)
elif title in ADDITIONAL_RESOURCES:
value = column.attrib['value']
if str(value).endswith("B"):
value = value[:-1]
self.resources[title] = int(value)
def parse_output_dir(self, launch_dir: str, install_dir: str, result_dir: str, parsed_columns=None):
# Process BenchExec xml output file.
if parsed_columns:
self.__parse_xml_node(parsed_columns)
else:
for file in glob.glob(os.path.join(launch_dir, 'benchmark*.xml')):
tree = ElementTree.ElementTree()
tree.parse(file)
root = tree.getroot()
# for column in root.findall('./run/column'):
self.__parse_xml_node(root.findall('./run/column'))
# Process verifier log file.
try:
if parsed_columns:
log_file = glob.glob(os.path.join(launch_dir, LOG_FILE))[0]
else:
log_file = glob.glob(os.path.join(launch_dir, 'benchmark*logfiles/*.log'))[0]
with open(log_file, errors='ignore') as f_res:
for line in f_res.readlines():
res = re.search(r'Number of refinements:(\s+)(\d+)', line)
if res:
if int(res.group(2)) > 1:
self.relevant = True
if not parsed_columns:
shutil.move(log_file, "{}/{}".format(launch_dir, LOG_FILE))
except IndexError:
print("WARNING: log file was not found for entry point '{}'".format(self.entrypoint))
pass
error_traces = glob.glob("{}/*{}".format(launch_dir, GRAPHML_EXTENSION))
self.initial_traces = len(error_traces)
if self.verdict == VERDICT_SAFE and not \
self.config.get(COMPONENT_EXPORTER, {}).get(TAG_ADD_VERIFIER_PROOFS, True):
self.initial_traces = 0
self.filtered_traces = self.initial_traces
if not self.verdict == VERDICT_SAFE:
self.relevant = True
# If there is only one trace, filtering will not be performed and it will not be examined.
if self.initial_traces == 1:
# Trace should be checked if it is correct or not.
start_time_cpu = time.process_time()
start_wall_time = time.time()
mea = MEA(self.config, error_traces, install_dir, self.rule, result_dir)
is_exported, witness_type = mea.process_traces_without_filtering()
if is_exported:
# Trace is fine, just recheck final verdict.
if witness_type == WITNESS_VIOLATION:
# Change global verdict to Unsafe, if there is at least one correct violation witness.
self.verdict = VERDICT_UNSAFE
else:
# Trace is bad, most likely verifier was killed during its printing, so just delete it.
if self.verdict == VERDICT_UNSAFE and witness_type == WITNESS_VIOLATION:
# TODO: Add exception text to log.
self.verdict = VERDICT_UNKNOWN
self.initial_traces = 0
self.filtered_traces = 0
self.mea_resources[TAG_CPU_TIME] = time.process_time() - start_time_cpu
self.mea_resources[TAG_WALL_TIME] = time.time() - start_wall_time
self.mea_resources[TAG_MEMORY_USAGE] = mea.memory
# Remove auxiliary files.
if not self.debug:
for file in glob.glob(os.path.join(launch_dir, "benchmark*")):
if os.path.isdir(file):
shutil.rmtree(file, ignore_errors=True)
else:
os.remove(file)
def filter_traces(self, launch_dir: str, install_dir: str, result_dir: str):
# Perform Multiple Error Analysis to filter found error traces (only for several traces).
start_time_cpu = time.process_time()
start_wall_time = time.time()
traces = glob.glob("{}/witness*".format(launch_dir))
mea = MEA(self.config, traces, install_dir, self.rule, result_dir)
self.filtered_traces = len(mea.filter())
if self.filtered_traces:
self.verdict = VERDICT_UNSAFE
self.mea_resources[TAG_CPU_TIME] = time.process_time() - start_time_cpu + mea.cpu_time
self.mea_resources[TAG_WALL_TIME] = time.time() - start_wall_time
self.mea_resources[TAG_MEMORY_USAGE] = mea.memory
def parse_line(self, line: str):
values = line.split(";")
self.id = values[0]
self.rule = values[1]
self.entrypoint = values[2]
self.verdict = values[3]
self.termination_reason = values[4]
self.cpu = int(values[5])
self.wall = int(values[6])
self.mem = int(values[7])
self.relevant = values[8]
self.initial_traces = int(values[9])
self.filtered_traces = int(values[10])
self.work_dir = values[11]
self.cov_lines = float(values[12])
self.cov_funcs = float(values[13])
self.mea_resources[TAG_CPU_TIME] = float(values[14])
def __str__(self):
return ";".join([to_str(self.id), to_str(self.rule), to_str(self.entrypoint), to_str(self.verdict),
to_str(self.termination_reason), to_str(self.cpu), to_str(self.wall), to_str(self.mem),
to_str(self.relevant), to_str(self.initial_traces), to_str(self.filtered_traces),
to_str(self.work_dir), to_str(self.cov_lines), to_str(self.cov_funcs),
to_str(self.mea_resources.get(TAG_CPU_TIME, 0.0))])
def print_resources(self):
res = list()
for resource in ADDITIONAL_RESOURCES:
if resource == "error traces":
value = self.filtered_traces
else:
value = self.resources.get(resource, 0)
res.append(str(value))
return ";".join(res)
| 40.594427 | 114 | 0.59701 |
import glob
import json
import os
import re
import shutil
import time
from xml.etree import ElementTree
from components import *
from components.mea import MEA
TAG_OPTIMIZE = "optimize"
def to_str(val) -> str:
return "{}".format(val)
class EntryPointDesc:
def __init__(self, file: str, identifier: str):
self.file = file
with open(file, errors='ignore') as fd:
data = json.load(fd)
metadata = data.get(TAG_METADATA, {})
self.optimize = metadata.get(TAG_OPTIMIZE, False)
self.subsystem = metadata.get(TAG_SUBSYSTEM, ".")
self.id = identifier
self.short_name = re.sub(r"\W", "_", identifier)
def __str__(self):
return self.id
class VerificationTask:
def __init__(self, entry_desc: EntryPointDesc, rule, entrypoint, path_to_verifier, cil_file):
self.entry_desc = entry_desc
self.rule = rule
self.entrypoint = entrypoint
if self.rule == RULE_COVERAGE:
self.mode = COVERAGE
elif self.rule == RULE_MEMSAFETY:
self.mode = MEMSAFETY
elif self.rule == RULE_RACES:
self.mode = RACES
elif self.rule in DEADLOCK_SUB_PROPERTIES:
self.mode = DEADLOCK
else:
self.mode = UNREACHABILITY
self.path_to_verifier = path_to_verifier
self.cil_file = cil_file
self.name = "_".join([self.entry_desc.id, self.rule, self.entrypoint])
def copy(self):
return type(self)(self.entry_desc, self.rule, self.entrypoint, self.path_to_verifier, self.cil_file)
class GlobalStatistics:
def __init__(self):
self.cpu = 0
self.wall = 0
self.mem_average = 0
self.safes = 0
self.unsafes = 0
self.unknowns = 0
self.et = 0
self.filtered = 0
self.relevant = 0
def add_result(self, verification_result):
self.cpu += verification_result.cpu
self.wall += verification_result.wall
self.et += verification_result.initial_traces
self.filtered += verification_result.filtered_traces
if verification_result.relevant:
self.relevant += 1
if verification_result.verdict == VERDICT_SAFE:
self.safes += 1
elif verification_result.verdict == VERDICT_UNSAFE:
self.unsafes += 1
else:
self.unknowns += 1
self.mem_average += verification_result.mem
def __add_overall(self, cpu, wall, et, filtered, relevant):
self.cpu += cpu
self.wall += wall
self.et += et
self.filtered += filtered
self.relevant += relevant
def sum(self, info):
self.relevant = max(self.relevant, info.relevant)
info.relevant = 0
self.__add_overall(info.cpu, info.wall, info.et, info.filtered, info.relevant)
self.mem_average = max(self.mem_average, info.mem_average)
self.safes += info.safes
self.unsafes += info.unsafes
self.unknowns += info.unknowns
def sum_memory(self):
overall = self.safes + self.unknowns + self.unsafes
if overall:
self.mem_average = int(self.mem_average / overall)
else:
self.mem_average = 0
def __str__(self):
return ";".join([str(self.safes), str(self.unsafes), str(self.unknowns), str(self.relevant), str(self.et),
str(self.filtered), str(round(self.cpu, -3)), str(round(self.wall, -3)),
str(round(self.mem_average / 1000))])
class VerificationResults:
def __init__(self, verification_task, config: dict):
if verification_task:
self.id = verification_task.entry_desc.subsystem
self.rule = verification_task.rule
self.entrypoint = verification_task.entrypoint
else:
self.id = None
self.rule = None
self.entrypoint = None
self.cpu = 0
self.mem = 0
self.wall = 0
self.verdict = VERDICT_UNKNOWN
self.termination_reason = ""
self.relevant = False
self.initial_traces = 0
self.work_dir = None
self.cov_lines = 0.0
self.cov_funcs = 0.0
self.filtered_traces = 0
self.debug = config.get(TAG_DEBUG, False)
self.config = config
self.coverage_resources = dict()
self.mea_resources = dict()
self.resources = dict()
def is_equal(self, verification_task: VerificationTask):
return self.id == verification_task.entry_desc.subsystem and \
self.rule == verification_task.rule and \
self.entrypoint == verification_task.entrypoint
def get_name(self) -> str:
return "_".join([str(self.entrypoint), str(self.rule), str(self.entrypoint)])
def __parse_xml_node(self, columns):
for column in columns:
title = column.attrib['title']
if title == 'status':
self.verdict = column.attrib['value']
if 'true' in self.verdict:
self.verdict = VERDICT_SAFE
self.termination_reason = TERMINATION_SUCCESS
elif 'false' in self.verdict:
self.verdict = VERDICT_UNSAFE
self.termination_reason = TERMINATION_SUCCESS
else:
self.termination_reason = self.verdict
self.verdict = VERDICT_UNKNOWN
elif title == 'cputime':
value = column.attrib['value']
if str(value).endswith("s"):
value = value[:-1]
self.cpu = float(value)
elif title == 'walltime':
value = column.attrib['value']
if str(value).endswith("s"):
value = value[:-1]
self.wall = float(value)
elif title == 'memUsage' or title == 'memory':
value = column.attrib['value']
if str(value).endswith("B"):
value = value[:-1]
self.mem = int(int(value) / 1000000)
elif title in ADDITIONAL_RESOURCES:
value = column.attrib['value']
if str(value).endswith("B"):
value = value[:-1]
self.resources[title] = int(value)
def parse_output_dir(self, launch_dir: str, install_dir: str, result_dir: str, parsed_columns=None):
if parsed_columns:
self.__parse_xml_node(parsed_columns)
else:
for file in glob.glob(os.path.join(launch_dir, 'benchmark*.xml')):
tree = ElementTree.ElementTree()
tree.parse(file)
root = tree.getroot()
self.__parse_xml_node(root.findall('./run/column'))
try:
if parsed_columns:
log_file = glob.glob(os.path.join(launch_dir, LOG_FILE))[0]
else:
log_file = glob.glob(os.path.join(launch_dir, 'benchmark*logfiles/*.log'))[0]
with open(log_file, errors='ignore') as f_res:
for line in f_res.readlines():
res = re.search(r'Number of refinements:(\s+)(\d+)', line)
if res:
if int(res.group(2)) > 1:
self.relevant = True
if not parsed_columns:
shutil.move(log_file, "{}/{}".format(launch_dir, LOG_FILE))
except IndexError:
print("WARNING: log file was not found for entry point '{}'".format(self.entrypoint))
pass
error_traces = glob.glob("{}/*{}".format(launch_dir, GRAPHML_EXTENSION))
self.initial_traces = len(error_traces)
if self.verdict == VERDICT_SAFE and not \
self.config.get(COMPONENT_EXPORTER, {}).get(TAG_ADD_VERIFIER_PROOFS, True):
self.initial_traces = 0
self.filtered_traces = self.initial_traces
if not self.verdict == VERDICT_SAFE:
self.relevant = True
if self.initial_traces == 1:
start_time_cpu = time.process_time()
start_wall_time = time.time()
mea = MEA(self.config, error_traces, install_dir, self.rule, result_dir)
is_exported, witness_type = mea.process_traces_without_filtering()
if is_exported:
if witness_type == WITNESS_VIOLATION:
self.verdict = VERDICT_UNSAFE
else:
if self.verdict == VERDICT_UNSAFE and witness_type == WITNESS_VIOLATION:
self.verdict = VERDICT_UNKNOWN
self.initial_traces = 0
self.filtered_traces = 0
self.mea_resources[TAG_CPU_TIME] = time.process_time() - start_time_cpu
self.mea_resources[TAG_WALL_TIME] = time.time() - start_wall_time
self.mea_resources[TAG_MEMORY_USAGE] = mea.memory
if not self.debug:
for file in glob.glob(os.path.join(launch_dir, "benchmark*")):
if os.path.isdir(file):
shutil.rmtree(file, ignore_errors=True)
else:
os.remove(file)
def filter_traces(self, launch_dir: str, install_dir: str, result_dir: str):
start_time_cpu = time.process_time()
start_wall_time = time.time()
traces = glob.glob("{}/witness*".format(launch_dir))
mea = MEA(self.config, traces, install_dir, self.rule, result_dir)
self.filtered_traces = len(mea.filter())
if self.filtered_traces:
self.verdict = VERDICT_UNSAFE
self.mea_resources[TAG_CPU_TIME] = time.process_time() - start_time_cpu + mea.cpu_time
self.mea_resources[TAG_WALL_TIME] = time.time() - start_wall_time
self.mea_resources[TAG_MEMORY_USAGE] = mea.memory
def parse_line(self, line: str):
values = line.split(";")
self.id = values[0]
self.rule = values[1]
self.entrypoint = values[2]
self.verdict = values[3]
self.termination_reason = values[4]
self.cpu = int(values[5])
self.wall = int(values[6])
self.mem = int(values[7])
self.relevant = values[8]
self.initial_traces = int(values[9])
self.filtered_traces = int(values[10])
self.work_dir = values[11]
self.cov_lines = float(values[12])
self.cov_funcs = float(values[13])
self.mea_resources[TAG_CPU_TIME] = float(values[14])
def __str__(self):
return ";".join([to_str(self.id), to_str(self.rule), to_str(self.entrypoint), to_str(self.verdict),
to_str(self.termination_reason), to_str(self.cpu), to_str(self.wall), to_str(self.mem),
to_str(self.relevant), to_str(self.initial_traces), to_str(self.filtered_traces),
to_str(self.work_dir), to_str(self.cov_lines), to_str(self.cov_funcs),
to_str(self.mea_resources.get(TAG_CPU_TIME, 0.0))])
def print_resources(self):
res = list()
for resource in ADDITIONAL_RESOURCES:
if resource == "error traces":
value = self.filtered_traces
else:
value = self.resources.get(resource, 0)
res.append(str(value))
return ";".join(res)
| true | true |
1c2c3b53bf84a7751b24ce63b436536f011c60b2 | 1,967 | py | Python | tests/test_formatter.py | Cottonwood-Technology/ConfigTree | ce7d92a4e536ba0104b92a9ce871819279f5b63a | [
"BSD-2-Clause"
] | null | null | null | tests/test_formatter.py | Cottonwood-Technology/ConfigTree | ce7d92a4e536ba0104b92a9ce871819279f5b63a | [
"BSD-2-Clause"
] | null | null | null | tests/test_formatter.py | Cottonwood-Technology/ConfigTree | ce7d92a4e536ba0104b92a9ce871819279f5b63a | [
"BSD-2-Clause"
] | null | null | null | from os import linesep
from configtree import formatter
from configtree.tree import Tree
t = Tree(
{
"a.x": 1,
"a.y": 'Testing "json"',
"a.z": "Testing 'shell'",
"list": ['Testing "json"', "Testing 'shell'"],
"none": None,
"bool": True,
}
)
def test_json():
result = formatter.to_json(t, indent=4, sort=True)
result = [line.rstrip() for line in result.split(linesep)]
assert result == [
"{",
' "a.x": 1,',
' "a.y": "Testing \\"json\\"",',
' "a.z": "Testing \'shell\'",',
' "bool": true,',
' "list": [',
' "Testing \\"json\\"",',
" \"Testing 'shell'\"",
" ],",
' "none": null',
"}",
]
result = formatter.to_json(t, indent=4, sort=True, rare=True)
result = [line.rstrip() for line in result.split(linesep)]
assert result == [
"{",
' "a": {',
' "x": 1,',
' "y": "Testing \\"json\\"",',
' "z": "Testing \'shell\'"',
" },",
' "bool": true,',
' "list": [',
' "Testing \\"json\\"",',
" \"Testing 'shell'\"",
" ],",
' "none": null',
"}",
]
def test_shell():
result = formatter.to_shell(
t, prefix="local ", seq_sep=":", sort=True, capitalize=True
)
result = [line.rstrip() for line in result.split(linesep)]
assert result == [
"local A_X=1",
"local A_Y='Testing \"json\"'",
"local A_Z='Testing \\'shell\\''",
"local BOOL=true",
"local LIST='Testing \"json\":Testing \\'shell\\''",
"local NONE=''",
]
result = formatter.to_shell(t["a.x"], prefix="local X=")
assert result == "local X=1"
def test_map():
assert formatter.map["json"] == formatter.to_json
assert formatter.map["shell"] == formatter.to_shell
| 25.881579 | 67 | 0.449415 | from os import linesep
from configtree import formatter
from configtree.tree import Tree
t = Tree(
{
"a.x": 1,
"a.y": 'Testing "json"',
"a.z": "Testing 'shell'",
"list": ['Testing "json"', "Testing 'shell'"],
"none": None,
"bool": True,
}
)
def test_json():
result = formatter.to_json(t, indent=4, sort=True)
result = [line.rstrip() for line in result.split(linesep)]
assert result == [
"{",
' "a.x": 1,',
' "a.y": "Testing \\"json\\"",',
' "a.z": "Testing \'shell\'",',
' "bool": true,',
' "list": [',
' "Testing \\"json\\"",',
" \"Testing 'shell'\"",
" ],",
' "none": null',
"}",
]
result = formatter.to_json(t, indent=4, sort=True, rare=True)
result = [line.rstrip() for line in result.split(linesep)]
assert result == [
"{",
' "a": {',
' "x": 1,',
' "y": "Testing \\"json\\"",',
' "z": "Testing \'shell\'"',
" },",
' "bool": true,',
' "list": [',
' "Testing \\"json\\"",',
" \"Testing 'shell'\"",
" ],",
' "none": null',
"}",
]
def test_shell():
result = formatter.to_shell(
t, prefix="local ", seq_sep=":", sort=True, capitalize=True
)
result = [line.rstrip() for line in result.split(linesep)]
assert result == [
"local A_X=1",
"local A_Y='Testing \"json\"'",
"local A_Z='Testing \\'shell\\''",
"local BOOL=true",
"local LIST='Testing \"json\":Testing \\'shell\\''",
"local NONE=''",
]
result = formatter.to_shell(t["a.x"], prefix="local X=")
assert result == "local X=1"
def test_map():
assert formatter.map["json"] == formatter.to_json
assert formatter.map["shell"] == formatter.to_shell
| true | true |
1c2c3c44f55c81475ff3d43350cbad7efe9dd2a3 | 410 | py | Python | basic_web/app.py | HungThinhPhung/Miscellaneous | dee5c6551a8580510c3c4e54087d38feb995d4d4 | [
"MIT"
] | null | null | null | basic_web/app.py | HungThinhPhung/Miscellaneous | dee5c6551a8580510c3c4e54087d38feb995d4d4 | [
"MIT"
] | null | null | null | basic_web/app.py | HungThinhPhung/Miscellaneous | dee5c6551a8580510c3c4e54087d38feb995d4d4 | [
"MIT"
] | null | null | null | import logging
import os
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/", methods=['GET'])
def render():
return render_template('index.html')
if __name__ == "__main__":
port = int(os.getenv('PORT', 8111))
logging.warning("Starting app on port %d" % port)
app.config['JSON_SORT_KEYS'] = False
app.run(debug=True, port=port, host='0.0.0.0', threaded=True)
| 21.578947 | 65 | 0.67561 | import logging
import os
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/", methods=['GET'])
def render():
return render_template('index.html')
if __name__ == "__main__":
port = int(os.getenv('PORT', 8111))
logging.warning("Starting app on port %d" % port)
app.config['JSON_SORT_KEYS'] = False
app.run(debug=True, port=port, host='0.0.0.0', threaded=True)
| true | true |
1c2c3c7f288d963874d9bedd9f8d88d979674e3a | 20,112 | py | Python | bin/docker_detection_tester/modules/splunk_container.py | arjunkhunti-crest/security_content | 41e354485e5917d3366ef735a9c5b25a20d3b8cc | [
"Apache-2.0"
] | null | null | null | bin/docker_detection_tester/modules/splunk_container.py | arjunkhunti-crest/security_content | 41e354485e5917d3366ef735a9c5b25a20d3b8cc | [
"Apache-2.0"
] | null | null | null | bin/docker_detection_tester/modules/splunk_container.py | arjunkhunti-crest/security_content | 41e354485e5917d3366ef735a9c5b25a20d3b8cc | [
"Apache-2.0"
] | 1 | 2022-01-27T05:29:43.000Z | 2022-01-27T05:29:43.000Z | from collections import OrderedDict
import datetime
import docker
import docker.types
import docker.models
import docker.models.resource
import docker.models.containers
import os.path
import random
import requests
import shutil
from modules import splunk_sdk
from modules import testing_service
from modules import test_driver
import time
import timeit
from typing import Union
import threading
import wrapt_timeout_decorator
import sys
import traceback
SPLUNKBASE_URL = "https://splunkbase.splunk.com/app/%d/release/%s/download"
SPLUNK_START_ARGS = "--accept-license"
#Give ten minutes to start - this is probably enough time
MAX_CONTAINER_START_TIME_SECONDS = 60*20
class SplunkContainer:
def __init__(
self,
synchronization_object: test_driver.TestDriver,
full_docker_hub_path,
container_name: str,
apps: OrderedDict,
web_port_tuple: tuple[str, int],
management_port_tuple: tuple[str, int],
container_password: str,
files_to_copy_to_container: OrderedDict = OrderedDict(),
mounts: list[docker.types.Mount] = [],
splunkbase_username: Union[str, None] = None,
splunkbase_password: Union[str, None] = None,
splunk_ip: str = "127.0.0.1",
interactive_failure: bool = False,
interactive:bool = False
):
self.interactive_failure = interactive_failure
self.interactive = interactive
self.synchronization_object = synchronization_object
self.client = docker.client.from_env()
self.full_docker_hub_path = full_docker_hub_path
self.container_password = container_password
self.apps = apps
self.files_to_copy_to_container = files_to_copy_to_container
self.splunk_ip = splunk_ip
self.container_name = container_name
self.mounts = mounts
self.environment = self.make_environment(
apps, container_password, splunkbase_username, splunkbase_password
)
self.ports = self.make_ports(web_port_tuple, management_port_tuple)
self.web_port = web_port_tuple[1]
self.management_port = management_port_tuple[1]
self.container = self.make_container()
self.thread = threading.Thread(target=self.run_container, )
self.container_start_time = -1
self.test_start_time = -1
self.num_tests_completed = 0
def prepare_apps_path(
self,
apps: OrderedDict,
splunkbase_username: Union[str, None] = None,
splunkbase_password: Union[str, None] = None,
) -> tuple[str, bool]:
apps_to_install = []
#We don't require credentials unless we install at least one splunkbase app
require_credentials = False
#If the username and password are supplied, then we will use splunkbase...
#assuming that the app_name and app_number are supplied. Note that if a
#local_path is supplied, then it should override this option!
if splunkbase_username is not None and splunkbase_password is not None:
use_splunkbase = True
else:
use_splunkbase = False
for app_name, app_info in self.apps.items():
if use_splunkbase is True and 'local_path' not in app_info:
target = SPLUNKBASE_URL % (app_info["app_number"], app_info["app_version"])
apps_to_install.append(target)
#We will require credentials since we are installing at least one splunkbase app
require_credentials = True
#Some paths may have a local_path and an HTTP path defined. Default to the local_path first,
#mostly because we may have copied it before into the cache to speed up start time.
elif 'local_path' in app_info:
app_file_name = os.path.basename(app_info['local_path'])
app_file_container_path = os.path.join("/tmp/apps", app_file_name)
apps_to_install.append(app_file_container_path)
elif 'http_path' in app_info:
apps_to_install.append(app_info['http_path'])
else:
if use_splunkbase is True:
print("Error, the app %s: %s could not be installed from Splunkbase because "
"--splunkbase_username and.or --splunkbase_password were not provided."
"\n\tQuitting..."%(app_name,app_info), file=sys.stderr)
else:
print("Error, the app %s: %s has no http_path or local_path.\n\tQuitting..."%(app_name,app_info), file=sys.stderr)
sys.exit(1)
return ",".join(apps_to_install), require_credentials
def make_environment(
self,
apps: OrderedDict,
container_password: str,
splunkbase_username: Union[str, None] = None,
splunkbase_password: Union[str, None] = None,
) -> dict:
env = {}
env["SPLUNK_START_ARGS"] = SPLUNK_START_ARGS
env["SPLUNK_PASSWORD"] = container_password
splunk_apps_url, require_credentials = self.prepare_apps_path(
apps, splunkbase_username, splunkbase_password
)
if require_credentials:
env["SPLUNKBASE_USERNAME"] = splunkbase_username
env["SPLUNKBASE_PASSWORD"] = splunkbase_password
env["SPLUNK_APPS_URL"] = splunk_apps_url
return env
def make_ports(self, *ports: tuple[str, int]) -> dict[str, int]:
port_dict = {}
for port in ports:
port_dict[port[0]] = port[1]
return port_dict
def __str__(self) -> str:
container_string = (
"Container Name: %s\n\t"
"Docker Hub Path: %s\n\t"
"Apps: %s\n\t"
"Ports: %s\n\t"
"Mounts: %s\n\t"
% (
self.container_name,
self.full_docker_hub_path,
self.environment["SPLUNK_APPS_URL"],
self.ports,
)
)
return container_string
def make_container(self) -> docker.models.resource.Model:
# First, make sure that the container has been removed if it already existed
self.removeContainer()
container = self.client.containers.create(
self.full_docker_hub_path,
ports=self.ports,
environment=self.environment,
name=self.container_name,
mounts=self.mounts,
detach=True,
)
return container
def extract_tar_file_to_container(
self, local_file_path: str, container_file_path: str, sleepTimeSeconds: int = 5
) -> bool:
# Check to make sure that the file ends in .tar. If it doesn't raise an exception
if os.path.splitext(local_file_path)[1] != ".tar":
raise Exception(
"Error - Failed copy of file [%s] to container [%s]. Only "
"files ending in .tar can be copied to the container using this function."
% (local_file_path, self.container_name)
)
successful_copy = False
api_client = docker.APIClient()
# need to use the low level client to put a file onto a container
while not successful_copy:
try:
with open(local_file_path, "rb") as fileData:
# splunk will restart a few times will installation of apps takes place so it will reload its indexes...
api_client.put_archive(
container=self.container_name,
path=container_file_path,
data=fileData,
)
successful_copy = True
except Exception as e:
#print("Failed copy of [%s] file to [%s] on CONTAINER [%s]: [%s]\n...we will try again"%(local_file_path, container_file_path, self.container_name, str(e)))
time.sleep(10)
successful_copy = False
#print("Successfully copied [%s] to [%s] on [%s]"% (local_file_path, container_file_path, self.container_name))
return successful_copy
def stopContainer(self,timeout=10) -> bool:
try:
container = self.client.containers.get(self.container_name)
#Note that stopping does not remove any of the volumes or logs,
#so stopping can be useful if we want to debug any container failure
container.stop(timeout=10)
self.synchronization_object.containerFailure()
return True
except Exception as e:
# Container does not exist, or we could not get it. Throw and error
print("Error stopping docker container [%s]"%(self.container_name))
return False
def removeContainer(
self, removeVolumes: bool = True, forceRemove: bool = True
) -> bool:
try:
container = self.client.containers.get(self.container_name)
except Exception as e:
# Container does not exist, no need to try and remove it
return True
try:
# container was found, so now we try to remove it
# v also removes volumes linked to the container
container.remove(
v=removeVolumes, force=forceRemove
) # remove it even if it is running. remove volumes as well
# No need to print that the container has been removed, it is expected behavior
return True
except Exception as e:
print("Could not remove Docker Container [%s]" % (
self.container_name))
raise (Exception(f"CONTAINER REMOVE ERROR: {str(e)}"))
def get_container_summary(self) -> str:
current_time = timeit.default_timer()
# Total time the container has been running
if self.container_start_time == -1:
total_time_string = "NOT STARTED"
else:
total_time_rounded = datetime.timedelta(
seconds=round(current_time - self.container_start_time))
total_time_string = str(total_time_rounded)
# Time that the container setup took
if self.test_start_time == -1 or self.container_start_time == -1:
setup_time_string = "NOT SET UP"
else:
setup_secounds_rounded = datetime.timedelta(
seconds=round(self.test_start_time - self.container_start_time))
setup_time_string = str(setup_secounds_rounded)
# Time that the tests have been running
if self.test_start_time == -1 or self.num_tests_completed == 0:
testing_time_string = "NO TESTS COMPLETED"
else:
testing_seconds_rounded = datetime.timedelta(
seconds=round(current_time - self.test_start_time))
# Get the approximate time per test. This is a clunky way to get rid of decimal
# seconds.... but it works
timedelta_per_test = testing_seconds_rounded/self.num_tests_completed
timedelta_per_test_rounded = timedelta_per_test - \
datetime.timedelta(
microseconds=timedelta_per_test.microseconds)
testing_time_string = "%s (%d tests @ %s per test)" % (
testing_seconds_rounded, self.num_tests_completed, timedelta_per_test_rounded)
summary_str = "Summary for %s\n\t"\
"Total Time : [%s]\n\t"\
"Container Start Time: [%s]\n\t"\
"Test Execution Time : [%s]\n" % (
self.container_name, total_time_string, setup_time_string, testing_time_string)
return summary_str
def wait_for_splunk_ready(
self,
seconds_between_attempts: int = 10,
) -> bool:
# The smarter version of this will try to hit one of the pages,
# probably the login page, and when that is available it means that
# splunk is fully started and ready to go. Until then, we just
# use a simple sleep
while True:
try:
service = splunk_sdk.client.connect(host=self.splunk_ip, port=self.management_port, username='admin', password=self.container_password)
if service.restart_required:
#The sleep below will wait
pass
else:
return True
except Exception as e:
# There is a good chance the server is restarting, so the SDK connection failed.
# Or, we tried to check restart_required while the server was restarting. In the
# calling function, we have a timeout, so it's okay if this function could get
# stuck in an infinite loop (the caller will generate a timeout error)
pass
time.sleep(seconds_between_attempts)
#@wrapt_timeout_decorator.timeout(MAX_CONTAINER_START_TIME_SECONDS, timeout_exception=RuntimeError)
def setup_container(self):
self.container.start()
# def shutdown_signal_handler(sig, frame):
# shutdown_client = docker.client.from_env()
# errorCount = 0
# print(f"Shutting down {self.container_name}...", file=sys.stderr)
# try:
# container = shutdown_client.containers.get(self.container_name)
# #Note that stopping does not remove any of the volumes or logs,
# #so stopping can be useful if we want to debug any container failure
# container.stop(timeout=10)
# print(f"{self.container_name} shut down successfully", file=sys.stderr)
# except Exception as e:
# print(f"Error trying to shut down {self.container_name}. It may have already shut down. Stop it youself with 'docker containter stop {self.container_name}", sys.stderr)
# #We must use os._exit(1) because sys.exit(1) actually generates an exception which can be caught! And then we don't Quit!
# import os
# os._exit(1)
# import signal
# signal.signal(signal.SIGINT, shutdown_signal_handler)
# By default, first copy the index file then the datamodel file
for file_description, file_dict in self.files_to_copy_to_container.items():
self.extract_tar_file_to_container(
file_dict["local_file_path"], file_dict["container_file_path"]
)
print("Finished copying files to [%s]" % (self.container_name))
self.wait_for_splunk_ready()
def successfully_finish_tests(self)->None:
try:
if self.num_tests_completed == 0:
print("Container [%s] did not find any tests and will not start.\n"\
"This does not mean there was an error!"%(self.container_name))
else:
print("Container [%s] has finished running [%d] detections, time to stop the container."
% (self.container_name, self.num_tests_completed))
# remove the container
self.removeContainer()
except Exception as e:
print(
"Error stopping or removing the container: [%s]" % (str(e)))
return None
def run_container(self) -> None:
print("Starting the container [%s]" % (self.container_name))
# Try to get something from the queue. Check this early on
# before launching the container because it can save us a lot of time!
detection_to_test = self.synchronization_object.getTest()
if detection_to_test is None:
return self.successfully_finish_tests()
self.container_start_time = timeit.default_timer()
container_start_time = timeit.default_timer()
try:
self.setup_container()
except Exception as e:
print("There was an exception starting the container [%s]: [%s]. Shutting down container"%(self.container_name,str(e)),file=sys.stdout)
self.stopContainer()
elapsed_rounded = round(timeit.default_timer() - container_start_time)
time_string = (datetime.timedelta(seconds=elapsed_rounded))
print("Container [%s] FAILED in [%s]"%(self.container_name, time_string))
return None
#GTive some info about how long the container took to start up
elapsed_rounded = round(timeit.default_timer() - container_start_time)
time_string = (datetime.timedelta(seconds=elapsed_rounded))
print("Container [%s] took [%s] to start"%(self.container_name, time_string))
self.synchronization_object.start_barrier.wait()
# Sleep for a small random time so that containers drift apart and don't synchronize their testing
time.sleep(random.randint(1, 30))
self.test_start_time = timeit.default_timer()
while detection_to_test is not None:
if self.synchronization_object.checkContainerFailure():
self.container.stop()
print("Container [%s] successfully stopped early due to failure" % (self.container_name))
return None
current_test_start_time = timeit.default_timer()
# Sleep for a small random time so that containers drift apart and don't synchronize their testing
#time.sleep(random.randint(1, 30))
# There is a detection to test
print("Container [%s]--->[%s]" %
(self.container_name, detection_to_test))
try:
result = testing_service.test_detection_wrapper(
self.container_name,
self.splunk_ip,
self.container_password,
self.management_port,
detection_to_test,
self.synchronization_object.attack_data_root_folder,
wait_on_failure=self.interactive_failure,
wait_on_completion = self.interactive
)
self.synchronization_object.addResult(result, duration_string = datetime.timedelta(seconds=round(timeit.default_timer() - current_test_start_time)))
# Remove the data from the test that we just ran. We MUST do this when running on CI because otherwise, we will download
# a massive amount of data over the course of a long path and will run out of space on the relatively small CI runner drive
shutil.rmtree(result["attack_data_directory"],ignore_errors=True)
except Exception as e:
print(
"Warning - uncaught error in detection test for [%s] - this should not happen: [%s]"
% (detection_to_test, str(e))
)
#traceback.print_exc()
#import pdb
#pdb.set_trace()
# Fill in all the "Empty" fields with default values. Otherwise, we will not be able to
# process the result correctly.
self.synchronization_object.addError(
{"detection_file": detection_to_test,
"detection_error": str(e)}, duration_string = datetime.timedelta(seconds=round(timeit.default_timer() - current_test_start_time))
)
self.num_tests_completed += 1
# Try to get something from the queue
detection_to_test = self.synchronization_object.getTest()
#We failed to get a test from the queue, so we must be done gracefully! Quit
return self.successfully_finish_tests()
| 42.520085 | 187 | 0.606504 | from collections import OrderedDict
import datetime
import docker
import docker.types
import docker.models
import docker.models.resource
import docker.models.containers
import os.path
import random
import requests
import shutil
from modules import splunk_sdk
from modules import testing_service
from modules import test_driver
import time
import timeit
from typing import Union
import threading
import wrapt_timeout_decorator
import sys
import traceback
SPLUNKBASE_URL = "https://splunkbase.splunk.com/app/%d/release/%s/download"
SPLUNK_START_ARGS = "--accept-license"
MAX_CONTAINER_START_TIME_SECONDS = 60*20
class SplunkContainer:
def __init__(
self,
synchronization_object: test_driver.TestDriver,
full_docker_hub_path,
container_name: str,
apps: OrderedDict,
web_port_tuple: tuple[str, int],
management_port_tuple: tuple[str, int],
container_password: str,
files_to_copy_to_container: OrderedDict = OrderedDict(),
mounts: list[docker.types.Mount] = [],
splunkbase_username: Union[str, None] = None,
splunkbase_password: Union[str, None] = None,
splunk_ip: str = "127.0.0.1",
interactive_failure: bool = False,
interactive:bool = False
):
self.interactive_failure = interactive_failure
self.interactive = interactive
self.synchronization_object = synchronization_object
self.client = docker.client.from_env()
self.full_docker_hub_path = full_docker_hub_path
self.container_password = container_password
self.apps = apps
self.files_to_copy_to_container = files_to_copy_to_container
self.splunk_ip = splunk_ip
self.container_name = container_name
self.mounts = mounts
self.environment = self.make_environment(
apps, container_password, splunkbase_username, splunkbase_password
)
self.ports = self.make_ports(web_port_tuple, management_port_tuple)
self.web_port = web_port_tuple[1]
self.management_port = management_port_tuple[1]
self.container = self.make_container()
self.thread = threading.Thread(target=self.run_container, )
self.container_start_time = -1
self.test_start_time = -1
self.num_tests_completed = 0
def prepare_apps_path(
self,
apps: OrderedDict,
splunkbase_username: Union[str, None] = None,
splunkbase_password: Union[str, None] = None,
) -> tuple[str, bool]:
apps_to_install = []
require_credentials = False
#If the username and password are supplied, then we will use splunkbase...
#assuming that the app_name and app_number are supplied. Note that if a
#local_path is supplied, then it should override this option!
if splunkbase_username is not None and splunkbase_password is not None:
use_splunkbase = True
else:
use_splunkbase = False
for app_name, app_info in self.apps.items():
if use_splunkbase is True and 'local_path' not in app_info:
target = SPLUNKBASE_URL % (app_info["app_number"], app_info["app_version"])
apps_to_install.append(target)
#We will require credentials since we are installing at least one splunkbase app
require_credentials = True
#Some paths may have a local_path and an HTTP path defined. Default to the local_path first,
#mostly because we may have copied it before into the cache to speed up start time.
elif 'local_path' in app_info:
app_file_name = os.path.basename(app_info['local_path'])
app_file_container_path = os.path.join("/tmp/apps", app_file_name)
apps_to_install.append(app_file_container_path)
elif 'http_path' in app_info:
apps_to_install.append(app_info['http_path'])
else:
if use_splunkbase is True:
print("Error, the app %s: %s could not be installed from Splunkbase because "
"--splunkbase_username and.or --splunkbase_password were not provided."
"\n\tQuitting..."%(app_name,app_info), file=sys.stderr)
else:
print("Error, the app %s: %s has no http_path or local_path.\n\tQuitting..."%(app_name,app_info), file=sys.stderr)
sys.exit(1)
return ",".join(apps_to_install), require_credentials
def make_environment(
self,
apps: OrderedDict,
container_password: str,
splunkbase_username: Union[str, None] = None,
splunkbase_password: Union[str, None] = None,
) -> dict:
env = {}
env["SPLUNK_START_ARGS"] = SPLUNK_START_ARGS
env["SPLUNK_PASSWORD"] = container_password
splunk_apps_url, require_credentials = self.prepare_apps_path(
apps, splunkbase_username, splunkbase_password
)
if require_credentials:
env["SPLUNKBASE_USERNAME"] = splunkbase_username
env["SPLUNKBASE_PASSWORD"] = splunkbase_password
env["SPLUNK_APPS_URL"] = splunk_apps_url
return env
def make_ports(self, *ports: tuple[str, int]) -> dict[str, int]:
port_dict = {}
for port in ports:
port_dict[port[0]] = port[1]
return port_dict
def __str__(self) -> str:
container_string = (
"Container Name: %s\n\t"
"Docker Hub Path: %s\n\t"
"Apps: %s\n\t"
"Ports: %s\n\t"
"Mounts: %s\n\t"
% (
self.container_name,
self.full_docker_hub_path,
self.environment["SPLUNK_APPS_URL"],
self.ports,
)
)
return container_string
def make_container(self) -> docker.models.resource.Model:
# First, make sure that the container has been removed if it already existed
self.removeContainer()
container = self.client.containers.create(
self.full_docker_hub_path,
ports=self.ports,
environment=self.environment,
name=self.container_name,
mounts=self.mounts,
detach=True,
)
return container
def extract_tar_file_to_container(
self, local_file_path: str, container_file_path: str, sleepTimeSeconds: int = 5
) -> bool:
# Check to make sure that the file ends in .tar. If it doesn't raise an exception
if os.path.splitext(local_file_path)[1] != ".tar":
raise Exception(
"Error - Failed copy of file [%s] to container [%s]. Only "
"files ending in .tar can be copied to the container using this function."
% (local_file_path, self.container_name)
)
successful_copy = False
api_client = docker.APIClient()
while not successful_copy:
try:
with open(local_file_path, "rb") as fileData:
api_client.put_archive(
container=self.container_name,
path=container_file_path,
data=fileData,
)
successful_copy = True
except Exception as e:
time.sleep(10)
successful_copy = False
return successful_copy
def stopContainer(self,timeout=10) -> bool:
try:
container = self.client.containers.get(self.container_name)
container.stop(timeout=10)
self.synchronization_object.containerFailure()
return True
except Exception as e:
print("Error stopping docker container [%s]"%(self.container_name))
return False
def removeContainer(
self, removeVolumes: bool = True, forceRemove: bool = True
) -> bool:
try:
container = self.client.containers.get(self.container_name)
except Exception as e:
return True
try:
container.remove(
v=removeVolumes, force=forceRemove
)
return True
except Exception as e:
print("Could not remove Docker Container [%s]" % (
self.container_name))
raise (Exception(f"CONTAINER REMOVE ERROR: {str(e)}"))
def get_container_summary(self) -> str:
current_time = timeit.default_timer()
if self.container_start_time == -1:
total_time_string = "NOT STARTED"
else:
total_time_rounded = datetime.timedelta(
seconds=round(current_time - self.container_start_time))
total_time_string = str(total_time_rounded)
if self.test_start_time == -1 or self.container_start_time == -1:
setup_time_string = "NOT SET UP"
else:
setup_secounds_rounded = datetime.timedelta(
seconds=round(self.test_start_time - self.container_start_time))
setup_time_string = str(setup_secounds_rounded)
if self.test_start_time == -1 or self.num_tests_completed == 0:
testing_time_string = "NO TESTS COMPLETED"
else:
testing_seconds_rounded = datetime.timedelta(
seconds=round(current_time - self.test_start_time))
timedelta_per_test = testing_seconds_rounded/self.num_tests_completed
timedelta_per_test_rounded = timedelta_per_test - \
datetime.timedelta(
microseconds=timedelta_per_test.microseconds)
testing_time_string = "%s (%d tests @ %s per test)" % (
testing_seconds_rounded, self.num_tests_completed, timedelta_per_test_rounded)
summary_str = "Summary for %s\n\t"\
"Total Time : [%s]\n\t"\
"Container Start Time: [%s]\n\t"\
"Test Execution Time : [%s]\n" % (
self.container_name, total_time_string, setup_time_string, testing_time_string)
return summary_str
def wait_for_splunk_ready(
self,
seconds_between_attempts: int = 10,
) -> bool:
while True:
try:
service = splunk_sdk.client.connect(host=self.splunk_ip, port=self.management_port, username='admin', password=self.container_password)
if service.restart_required:
pass
else:
return True
except Exception as e:
# stuck in an infinite loop (the caller will generate a timeout error)
pass
time.sleep(seconds_between_attempts)
#@wrapt_timeout_decorator.timeout(MAX_CONTAINER_START_TIME_SECONDS, timeout_exception=RuntimeError)
def setup_container(self):
self.container.start()
# def shutdown_signal_handler(sig, frame):
# shutdown_client = docker.client.from_env()
# errorCount = 0
# print(f"Shutting down {self.container_name}...", file=sys.stderr)
# try:
# container = shutdown_client.containers.get(self.container_name)
# #Note that stopping does not remove any of the volumes or logs,
# #so stopping can be useful if we want to debug any container failure
# container.stop(timeout=10)
# print(f"{self.container_name} shut down successfully", file=sys.stderr)
# except Exception as e:
# print(f"Error trying to shut down {self.container_name}. It may have already shut down. Stop it youself with 'docker containter stop {self.container_name}", sys.stderr)
# signal.signal(signal.SIGINT, shutdown_signal_handler)
# By default, first copy the index file then the datamodel file
for file_description, file_dict in self.files_to_copy_to_container.items():
self.extract_tar_file_to_container(
file_dict["local_file_path"], file_dict["container_file_path"]
)
print("Finished copying files to [%s]" % (self.container_name))
self.wait_for_splunk_ready()
def successfully_finish_tests(self)->None:
try:
if self.num_tests_completed == 0:
print("Container [%s] did not find any tests and will not start.\n"\
"This does not mean there was an error!"%(self.container_name))
else:
print("Container [%s] has finished running [%d] detections, time to stop the container."
% (self.container_name, self.num_tests_completed))
# remove the container
self.removeContainer()
except Exception as e:
print(
"Error stopping or removing the container: [%s]" % (str(e)))
return None
def run_container(self) -> None:
print("Starting the container [%s]" % (self.container_name))
# Try to get something from the queue. Check this early on
# before launching the container because it can save us a lot of time!
detection_to_test = self.synchronization_object.getTest()
if detection_to_test is None:
return self.successfully_finish_tests()
self.container_start_time = timeit.default_timer()
container_start_time = timeit.default_timer()
try:
self.setup_container()
except Exception as e:
print("There was an exception starting the container [%s]: [%s]. Shutting down container"%(self.container_name,str(e)),file=sys.stdout)
self.stopContainer()
elapsed_rounded = round(timeit.default_timer() - container_start_time)
time_string = (datetime.timedelta(seconds=elapsed_rounded))
print("Container [%s] FAILED in [%s]"%(self.container_name, time_string))
return None
#GTive some info about how long the container took to start up
elapsed_rounded = round(timeit.default_timer() - container_start_time)
time_string = (datetime.timedelta(seconds=elapsed_rounded))
print("Container [%s] took [%s] to start"%(self.container_name, time_string))
self.synchronization_object.start_barrier.wait()
# Sleep for a small random time so that containers drift apart and don't synchronize their testing
time.sleep(random.randint(1, 30))
self.test_start_time = timeit.default_timer()
while detection_to_test is not None:
if self.synchronization_object.checkContainerFailure():
self.container.stop()
print("Container [%s] successfully stopped early due to failure" % (self.container_name))
return None
current_test_start_time = timeit.default_timer()
#time.sleep(random.randint(1, 30))
# There is a detection to test
print("Container [%s]--->[%s]" %
(self.container_name, detection_to_test))
try:
result = testing_service.test_detection_wrapper(
self.container_name,
self.splunk_ip,
self.container_password,
self.management_port,
detection_to_test,
self.synchronization_object.attack_data_root_folder,
wait_on_failure=self.interactive_failure,
wait_on_completion = self.interactive
)
self.synchronization_object.addResult(result, duration_string = datetime.timedelta(seconds=round(timeit.default_timer() - current_test_start_time)))
# Remove the data from the test that we just ran. We MUST do this when running on CI because otherwise, we will download
# a massive amount of data over the course of a long path and will run out of space on the relatively small CI runner drive
shutil.rmtree(result["attack_data_directory"],ignore_errors=True)
except Exception as e:
print(
"Warning - uncaught error in detection test for [%s] - this should not happen: [%s]"
% (detection_to_test, str(e))
)
#traceback.print_exc()
#import pdb
#pdb.set_trace()
# Fill in all the "Empty" fields with default values. Otherwise, we will not be able to
# process the result correctly.
self.synchronization_object.addError(
{"detection_file": detection_to_test,
"detection_error": str(e)}, duration_string = datetime.timedelta(seconds=round(timeit.default_timer() - current_test_start_time))
)
self.num_tests_completed += 1
# Try to get something from the queue
detection_to_test = self.synchronization_object.getTest()
#We failed to get a test from the queue, so we must be done gracefully! Quit
return self.successfully_finish_tests()
| true | true |
1c2c3cb5d27790c1e7ed31daba914ceae5626d89 | 61,187 | py | Python | Tools/gdb/libpython.py | deadsnakes/python3.3 | 4faaf44cd5478410ac3b977351c1965fa054b5e9 | [
"PSF-2.0"
] | 652 | 2015-07-26T00:00:17.000Z | 2022-02-24T18:30:04.000Z | Tools/gdb/libpython.py | deadsnakes/python3.3 | 4faaf44cd5478410ac3b977351c1965fa054b5e9 | [
"PSF-2.0"
] | 8 | 2015-09-07T03:38:19.000Z | 2021-05-23T03:18:51.000Z | Tools/gdb/libpython.py | isabella232/cpython-pt | 16d0393123bcc49009e3746471a603290804899a | [
"PSF-2.0"
] | 40 | 2015-07-24T19:45:08.000Z | 2021-11-01T14:54:56.000Z | #!/usr/bin/python
'''
From gdb 7 onwards, gdb's build can be configured --with-python, allowing gdb
to be extended with Python code e.g. for library-specific data visualizations,
such as for the C++ STL types. Documentation on this API can be seen at:
http://sourceware.org/gdb/current/onlinedocs/gdb/Python-API.html
This python module deals with the case when the process being debugged (the
"inferior process" in gdb parlance) is itself python, or more specifically,
linked against libpython. In this situation, almost every item of data is a
(PyObject*), and having the debugger merely print their addresses is not very
enlightening.
This module embeds knowledge about the implementation details of libpython so
that we can emit useful visualizations e.g. a string, a list, a dict, a frame
giving file/line information and the state of local variables
In particular, given a gdb.Value corresponding to a PyObject* in the inferior
process, we can generate a "proxy value" within the gdb process. For example,
given a PyObject* in the inferior process that is in fact a PyListObject*
holding three PyObject* that turn out to be PyBytesObject* instances, we can
generate a proxy value within the gdb process that is a list of bytes
instances:
[b"foo", b"bar", b"baz"]
Doing so can be expensive for complicated graphs of objects, and could take
some time, so we also have a "write_repr" method that writes a representation
of the data to a file-like object. This allows us to stop the traversal by
having the file-like object raise an exception if it gets too much data.
With both "proxyval" and "write_repr" we keep track of the set of all addresses
visited so far in the traversal, to avoid infinite recursion due to cycles in
the graph of object references.
We try to defer gdb.lookup_type() invocations for python types until as late as
possible: for a dynamically linked python binary, when the process starts in
the debugger, the libpython.so hasn't been dynamically loaded yet, so none of
the type names are known to the debugger
The module also extends gdb with some python-specific commands.
'''
# NOTE: some gdbs are linked with Python 3, so this file should be dual-syntax
# compatible (2.6+ and 3.0+). See #19308.
from __future__ import print_function, with_statement
import gdb
import os
import locale
import sys
if sys.version_info[0] >= 3:
unichr = chr
xrange = range
long = int
# Look up the gdb.Type for some standard types:
_type_char_ptr = gdb.lookup_type('char').pointer() # char*
_type_unsigned_char_ptr = gdb.lookup_type('unsigned char').pointer() # unsigned char*
_type_void_ptr = gdb.lookup_type('void').pointer() # void*
_type_unsigned_short_ptr = gdb.lookup_type('unsigned short').pointer()
_type_unsigned_int_ptr = gdb.lookup_type('unsigned int').pointer()
# value computed later, see PyUnicodeObjectPtr.proxy()
_is_pep393 = None
SIZEOF_VOID_P = _type_void_ptr.sizeof
Py_TPFLAGS_HEAPTYPE = (1 << 9)
Py_TPFLAGS_LONG_SUBCLASS = (1 << 24)
Py_TPFLAGS_LIST_SUBCLASS = (1 << 25)
Py_TPFLAGS_TUPLE_SUBCLASS = (1 << 26)
Py_TPFLAGS_BYTES_SUBCLASS = (1 << 27)
Py_TPFLAGS_UNICODE_SUBCLASS = (1 << 28)
Py_TPFLAGS_DICT_SUBCLASS = (1 << 29)
Py_TPFLAGS_BASE_EXC_SUBCLASS = (1 << 30)
Py_TPFLAGS_TYPE_SUBCLASS = (1 << 31)
MAX_OUTPUT_LEN=1024
hexdigits = "0123456789abcdef"
ENCODING = locale.getpreferredencoding()
class NullPyObjectPtr(RuntimeError):
pass
def safety_limit(val):
# Given a integer value from the process being debugged, limit it to some
# safety threshold so that arbitrary breakage within said process doesn't
# break the gdb process too much (e.g. sizes of iterations, sizes of lists)
return min(val, 1000)
def safe_range(val):
# As per range, but don't trust the value too much: cap it to a safety
# threshold in case the data was corrupted
return xrange(safety_limit(int(val)))
if sys.version_info[0] >= 3:
def write_unicode(file, text):
file.write(text)
else:
def write_unicode(file, text):
# Write a byte or unicode string to file. Unicode strings are encoded to
# ENCODING encoding with 'backslashreplace' error handler to avoid
# UnicodeEncodeError.
if isinstance(text, unicode):
text = text.encode(ENCODING, 'backslashreplace')
file.write(text)
try:
os_fsencode = os.fsencode
except AttributeError:
def os_fsencode(filename):
if not isinstance(filename, unicode):
return filename
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
# mbcs doesn't support surrogateescape
return filename.encode(encoding)
encoded = []
for char in filename:
# surrogateescape error handler
if 0xDC80 <= ord(char) <= 0xDCFF:
byte = chr(ord(char) - 0xDC00)
else:
byte = char.encode(encoding)
encoded.append(byte)
return ''.join(encoded)
class StringTruncated(RuntimeError):
pass
class TruncatedStringIO(object):
'''Similar to io.StringIO, but can truncate the output by raising a
StringTruncated exception'''
def __init__(self, maxlen=None):
self._val = ''
self.maxlen = maxlen
def write(self, data):
if self.maxlen:
if len(data) + len(self._val) > self.maxlen:
# Truncation:
self._val += data[0:self.maxlen - len(self._val)]
raise StringTruncated()
self._val += data
def getvalue(self):
return self._val
class PyObjectPtr(object):
"""
Class wrapping a gdb.Value that's a either a (PyObject*) within the
inferior process, or some subclass pointer e.g. (PyBytesObject*)
There will be a subclass for every refined PyObject type that we care
about.
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
_typename = 'PyObject'
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
else:
self._gdbval = gdbval
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
def pyop_field(self, name):
'''
Get a PyObjectPtr for the given PyObject* field within this PyObject,
coping with some python 2 versus python 3 differences.
'''
return PyObjectPtr.from_pyobject_ptr(self.field(name))
def write_field_repr(self, name, out, visited):
'''
Extract the PyObject* field named "name", and write its representation
to file-like object "out"
'''
field_obj = self.pyop_field(name)
field_obj.write_repr(out, visited)
def get_truncated_repr(self, maxlen):
'''
Get a repr-like string for the data, but truncate it at "maxlen" bytes
(ending the object graph traversal as soon as you do)
'''
out = TruncatedStringIO(maxlen)
try:
self.write_repr(out, set())
except StringTruncated:
# Truncation occurred:
return out.getvalue() + '...(truncated)'
# No truncation occurred:
return out.getvalue()
def type(self):
return PyTypeObjectPtr(self.field('ob_type'))
def is_null(self):
return 0 == long(self._gdbval)
def is_optimized_out(self):
'''
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
'''
return self._gdbval.is_optimized_out
def safe_tp_name(self):
try:
return self.type().field('tp_name').string()
except NullPyObjectPtr:
# NULL tp_name?
return 'unknown'
except RuntimeError:
# Can't even read the object at all?
return 'unknown'
def proxyval(self, visited):
'''
Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave
'''
class FakeRepr(object):
"""
Class representing a non-descript PyObject* value in the inferior
process for when we don't have a custom scraper, intended to have
a sane repr().
"""
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
# For the NULL pointer, we have no way of knowing a type, so
# special-case it as per
# http://bugs.python.org/issue8032#msg100882
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
long(self._gdbval))
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
@classmethod
def subclass_from_type(cls, t):
'''
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
'''
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
return cls
#print('tp_flags = 0x%08x' % tp_flags)
#print('tp_name = %r' % tp_name)
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_BYTES_SUBCLASS:
return PyBytesObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
#if tp_flags & Py_TPFLAGS_TYPE_SUBCLASS:
# return PyTypeObjectPtr
# Use the base class:
return cls
@classmethod
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
@classmethod
def get_gdb_type(cls):
return gdb.lookup_type(cls._typename).pointer()
def as_address(self):
return long(self._gdbval)
class PyVarObjectPtr(PyObjectPtr):
_typename = 'PyVarObject'
class ProxyAlreadyVisited(object):
'''
Placeholder proxy to use when protecting against infinite recursion due to
loops in the object graph.
Analogous to the values emitted by the users of Py_ReprEnter and Py_ReprLeave
'''
def __init__(self, rep):
self._rep = rep
def __repr__(self):
return self._rep
def _write_instance_repr(out, visited, name, pyop_attrdict, address):
'''Shared code for use by all classes:
write a representation to file-like object "out"'''
out.write('<')
out.write(name)
# Write dictionary of instance attributes:
if isinstance(pyop_attrdict, PyDictObjectPtr):
out.write('(')
first = True
for pyop_arg, pyop_val in pyop_attrdict.iteritems():
if not first:
out.write(', ')
first = False
out.write(pyop_arg.proxyval(visited))
out.write('=')
pyop_val.write_repr(out, visited)
out.write(')')
out.write(' at remote 0x%x>' % address)
class InstanceProxy(object):
def __init__(self, cl_name, attrdict, address):
self.cl_name = cl_name
self.attrdict = attrdict
self.address = address
def __repr__(self):
if isinstance(self.attrdict, dict):
kwargs = ', '.join(["%s=%r" % (arg, val)
for arg, val in self.attrdict.iteritems()])
return '<%s(%s) at remote 0x%x>' % (self.cl_name,
kwargs, self.address)
else:
return '<%s at remote 0x%x>' % (self.cl_name,
self.address)
def _PyObject_VAR_SIZE(typeobj, nitems):
if _PyObject_VAR_SIZE._type_size_t is None:
_PyObject_VAR_SIZE._type_size_t = gdb.lookup_type('size_t')
return ( ( typeobj.field('tp_basicsize') +
nitems * typeobj.field('tp_itemsize') +
(SIZEOF_VOID_P - 1)
) & ~(SIZEOF_VOID_P - 1)
).cast(_PyObject_VAR_SIZE._type_size_t)
_PyObject_VAR_SIZE._type_size_t = None
class HeapTypeObjectPtr(PyObjectPtr):
_typename = 'PyObject'
def get_attr_dict(self):
'''
Get the PyDictObject ptr representing the attribute dictionary
(or None if there's a problem)
'''
try:
typeobj = self.type()
dictoffset = int_from_int(typeobj.field('tp_dictoffset'))
if dictoffset != 0:
if dictoffset < 0:
type_PyVarObject_ptr = gdb.lookup_type('PyVarObject').pointer()
tsize = int_from_int(self._gdbval.cast(type_PyVarObject_ptr)['ob_size'])
if tsize < 0:
tsize = -tsize
size = _PyObject_VAR_SIZE(typeobj, tsize)
dictoffset += size
assert dictoffset > 0
assert dictoffset % SIZEOF_VOID_P == 0
dictptr = self._gdbval.cast(_type_char_ptr) + dictoffset
PyObjectPtrPtr = PyObjectPtr.get_gdb_type().pointer()
dictptr = dictptr.cast(PyObjectPtrPtr)
return PyObjectPtr.from_pyobject_ptr(dictptr.dereference())
except RuntimeError:
# Corrupt data somewhere; fail safe
pass
# Not found, or some kind of error:
return None
def proxyval(self, visited):
'''
Support for classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# Class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
pyop_attrdict = self.get_attr_dict()
_write_instance_repr(out, visited,
self.safe_tp_name(), pyop_attrdict, self.as_address())
class ProxyException(Exception):
def __init__(self, tp_name, args):
self.tp_name = tp_name
self.args = args
def __repr__(self):
return '%s%r' % (self.tp_name, self.args)
class PyBaseExceptionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBaseExceptionObject* i.e. an exception
within the process being debugged.
"""
_typename = 'PyBaseExceptionObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
arg_proxy = self.pyop_field('args').proxyval(visited)
return ProxyException(self.safe_tp_name(),
arg_proxy)
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write(self.safe_tp_name())
self.write_field_repr('args', out, visited)
class PyClassObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyClassObject* i.e. a <classobj>
instance within the process being debugged.
"""
_typename = 'PyClassObject'
class BuiltInFunctionProxy(object):
def __init__(self, ml_name):
self.ml_name = ml_name
def __repr__(self):
return "<built-in function %s>" % self.ml_name
class BuiltInMethodProxy(object):
def __init__(self, ml_name, pyop_m_self):
self.ml_name = ml_name
self.pyop_m_self = pyop_m_self
def __repr__(self):
return ('<built-in method %s of %s object at remote 0x%x>'
% (self.ml_name,
self.pyop_m_self.safe_tp_name(),
self.pyop_m_self.as_address())
)
class PyCFunctionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCFunctionObject*
(see Include/methodobject.h and Objects/methodobject.c)
"""
_typename = 'PyCFunctionObject'
def proxyval(self, visited):
m_ml = self.field('m_ml') # m_ml is a (PyMethodDef*)
ml_name = m_ml['ml_name'].string()
pyop_m_self = self.pyop_field('m_self')
if pyop_m_self.is_null():
return BuiltInFunctionProxy(ml_name)
else:
return BuiltInMethodProxy(ml_name, pyop_m_self)
class PyCodeObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCodeObject* i.e. a <code> instance
within the process being debugged.
"""
_typename = 'PyCodeObject'
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
class PyDictObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyDictObject* i.e. a dict instance
within the process being debugged.
"""
_typename = 'PyDictObject'
def iteritems(self):
'''
Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs,
analogous to dict.iteritems()
'''
keys = self.field('ma_keys')
values = self.field('ma_values')
for i in safe_range(keys['dk_size']):
ep = keys['dk_entries'].address + i
if long(values):
pyop_value = PyObjectPtr.from_pyobject_ptr(values[i])
else:
pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value'])
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('{...}')
visited.add(self.as_address())
result = {}
for pyop_key, pyop_value in self.iteritems():
proxy_key = pyop_key.proxyval(visited)
proxy_value = pyop_value.proxyval(visited)
result[proxy_key] = proxy_value
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('{...}')
return
visited.add(self.as_address())
out.write('{')
first = True
for pyop_key, pyop_value in self.iteritems():
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write(': ')
pyop_value.write_repr(out, visited)
out.write('}')
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('[...]')
visited.add(self.as_address())
result = [PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))]
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('[...]')
return
visited.add(self.as_address())
out.write('[')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
out.write(']')
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
struct _longobject {
PyObject_VAR_HEAD
digit ob_digit[1];
};
with this description:
The absolute value of a number is equal to
SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i)
Negative numbers are represented with ob_size < 0;
zero is represented by ob_size == 0.
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
'''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
SHIFT = 15
else:
SHIFT = 30
digits = [long(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
result = sum(digits)
if ob_size < 0:
result = -result
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 int literal, i.e. without the "L" suffix
proxy = self.proxyval(visited)
out.write("%s" % proxy)
class PyBoolObjectPtr(PyLongObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBoolObject* i.e. one of the two
<bool> instances (Py_True/Py_False) within the process being debugged.
"""
def proxyval(self, visited):
if PyLongObjectPtr.proxyval(self, visited):
return True
else:
return False
class PyNoneStructPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyObject* pointing to the
singleton (we hope) _Py_NoneStruct with ob_type PyNone_Type
"""
_typename = 'PyObject'
def proxyval(self, visited):
return None
class PyFrameObjectPtr(PyObjectPtr):
_typename = 'PyFrameObject'
def __init__(self, gdbval, cast_to=None):
PyObjectPtr.__init__(self, gdbval, cast_to)
if not self.is_optimized_out():
self.co = PyCodeObjectPtr.from_pyobject_ptr(self.field('f_code'))
self.co_name = self.co.pyop_field('co_name')
self.co_filename = self.co.pyop_field('co_filename')
self.f_lineno = int_from_int(self.field('f_lineno'))
self.f_lasti = int_from_int(self.field('f_lasti'))
self.co_nlocals = int_from_int(self.co.field('co_nlocals'))
self.co_varnames = PyTupleObjectPtr.from_pyobject_ptr(self.co.field('co_varnames'))
def iter_locals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the local variables of this frame
'''
if self.is_optimized_out():
return
f_localsplus = self.field('f_localsplus')
for i in safe_range(self.co_nlocals):
pyop_value = PyObjectPtr.from_pyobject_ptr(f_localsplus[i])
if not pyop_value.is_null():
pyop_name = PyObjectPtr.from_pyobject_ptr(self.co_varnames[i])
yield (pyop_name, pyop_value)
def iter_globals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the global variables of this frame
'''
if self.is_optimized_out():
return ()
pyop_globals = self.pyop_field('f_globals')
return pyop_globals.iteritems()
def iter_builtins(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the builtin variables
'''
if self.is_optimized_out():
return ()
pyop_builtins = self.pyop_field('f_builtins')
return pyop_builtins.iteritems()
def get_var_by_name(self, name):
'''
Look for the named local variable, returning a (PyObjectPtr, scope) pair
where scope is a string 'local', 'global', 'builtin'
If not found, return (None, None)
'''
for pyop_name, pyop_value in self.iter_locals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'local'
for pyop_name, pyop_value in self.iter_globals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'global'
for pyop_name, pyop_value in self.iter_builtins():
if name == pyop_name.proxyval(set()):
return pyop_value, 'builtin'
return None, None
def filename(self):
'''Get the path of the current Python source file, as a string'''
if self.is_optimized_out():
return '(frame information optimized out)'
return self.co_filename.proxyval(set())
def current_line_num(self):
'''Get current line number as an integer (1-based)
Translated from PyFrame_GetLineNumber and PyCode_Addr2Line
See Objects/lnotab_notes.txt
'''
if self.is_optimized_out():
return None
f_trace = self.field('f_trace')
if long(f_trace) != 0:
# we have a non-NULL f_trace:
return self.f_lineno
else:
#try:
return self.co.addr2line(self.f_lasti)
#except ValueError:
# return self.f_lineno
def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
filename = self.filename()
try:
f = open(os_fsencode(filename), 'r')
except IOError:
return None
with f:
all_lines = f.readlines()
# Convert from 1-based current_line_num to 0-based list offset:
return all_lines[self.current_line_num()-1]
def write_repr(self, out, visited):
if self.is_optimized_out():
out.write('(frame information optimized out)')
return
out.write('Frame 0x%x, for file %s, line %i, in %s ('
% (self.as_address(),
self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
first = True
for pyop_name, pyop_value in self.iter_locals():
if not first:
out.write(', ')
first = False
out.write(pyop_name.proxyval(visited))
out.write('=')
pyop_value.write_repr(out, visited)
out.write(')')
def print_traceback(self):
if self.is_optimized_out():
sys.stdout.write(' (frame information optimized out)\n')
return
visited = set()
sys.stdout.write(' File "%s", line %i, in %s\n'
% (self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
class PySetObjectPtr(PyObjectPtr):
_typename = 'PySetObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('%s(...)' % self.safe_tp_name())
visited.add(self.as_address())
members = []
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
key_proxy = PyObjectPtr.from_pyobject_ptr(key).proxyval(visited)
if key_proxy != '<dummy key>':
members.append(key_proxy)
if self.safe_tp_name() == 'frozenset':
return frozenset(members)
else:
return set(members)
def write_repr(self, out, visited):
# Emulate Python 3's set_repr
tp_name = self.safe_tp_name()
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
# Python 3's set_repr special-cases the empty set:
if not self.field('used'):
out.write(tp_name)
out.write('()')
return
# Python 3 uses {} for set literals:
if tp_name != 'set':
out.write(tp_name)
out.write('(')
out.write('{')
first = True
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
pyop_key = PyObjectPtr.from_pyobject_ptr(key)
key_proxy = pyop_key.proxyval(visited) # FIXME!
if key_proxy != '<dummy key>':
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write('}')
if tp_name != 'set':
out.write(')')
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
char_ptr = field_ob_sval.address.cast(_type_unsigned_char_ptr)
return ''.join([chr(char_ptr[i]) for i in safe_range(field_ob_size)])
def proxyval(self, visited):
return str(self)
def write_repr(self, out, visited):
# Write this out as a Python 3 bytes literal, i.e. with a "b" prefix
# Get a PyStringObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Objects/bytesobject.c:PyBytes_Repr
# to Python 2 code:
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
out.write('b')
out.write(quote)
for byte in proxy:
if byte == quote or byte == '\\':
out.write('\\')
out.write(byte)
elif byte == '\t':
out.write('\\t')
elif byte == '\n':
out.write('\\n')
elif byte == '\r':
out.write('\\r')
elif byte < ' ' or ord(byte) >= 0x7f:
out.write('\\x')
out.write(hexdigits[(ord(byte) & 0xf0) >> 4])
out.write(hexdigits[ord(byte) & 0xf])
else:
out.write(byte)
out.write(quote)
class PyTupleObjectPtr(PyObjectPtr):
_typename = 'PyTupleObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
result = tuple([PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))])
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write('(')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
if self.field('ob_size') == 1:
out.write(',)')
else:
out.write(')')
class PyTypeObjectPtr(PyObjectPtr):
_typename = 'PyTypeObject'
def _unichr_is_printable(char):
# Logic adapted from Python 3's Tools/unicode/makeunicodedata.py
if char == u" ":
return True
import unicodedata
return unicodedata.category(char) not in ("C", "Z")
if sys.maxunicode >= 0x10000:
_unichr = unichr
else:
# Needed for proper surrogate support if sizeof(Py_UNICODE) is 2 in gdb
def _unichr(x):
if x < 0x10000:
return unichr(x)
x -= 0x10000
ch1 = 0xD800 | (x >> 10)
ch2 = 0xDC00 | (x & 0x3FF)
return unichr(ch1) + unichr(ch2)
class PyUnicodeObjectPtr(PyObjectPtr):
_typename = 'PyUnicodeObject'
def char_width(self):
_type_Py_UNICODE = gdb.lookup_type('Py_UNICODE')
return _type_Py_UNICODE.sizeof
def proxyval(self, visited):
global _is_pep393
if _is_pep393 is None:
fields = gdb.lookup_type('PyUnicodeObject').target().fields()
_is_pep393 = 'data' in [f.name for f in fields]
if _is_pep393:
# Python 3.3 and newer
may_have_surrogates = False
compact = self.field('_base')
ascii = compact['_base']
state = ascii['state']
is_compact_ascii = (int(state['ascii']) and int(state['compact']))
if not int(state['ready']):
# string is not ready
field_length = long(compact['wstr_length'])
may_have_surrogates = True
field_str = ascii['wstr']
else:
field_length = long(ascii['length'])
if is_compact_ascii:
field_str = ascii.address + 1
elif int(state['compact']):
field_str = compact.address + 1
else:
field_str = self.field('data')['any']
repr_kind = int(state['kind'])
if repr_kind == 1:
field_str = field_str.cast(_type_unsigned_char_ptr)
elif repr_kind == 2:
field_str = field_str.cast(_type_unsigned_short_ptr)
elif repr_kind == 4:
field_str = field_str.cast(_type_unsigned_int_ptr)
else:
# Python 3.2 and earlier
field_length = long(self.field('length'))
field_str = self.field('str')
may_have_surrogates = self.char_width() == 2
# Gather a list of ints from the Py_UNICODE array; these are either
# UCS-1, UCS-2 or UCS-4 code points:
if not may_have_surrogates:
Py_UNICODEs = [int(field_str[i]) for i in safe_range(field_length)]
else:
# A more elaborate routine if sizeof(Py_UNICODE) is 2 in the
# inferior process: we must join surrogate pairs.
Py_UNICODEs = []
i = 0
limit = safety_limit(field_length)
while i < limit:
ucs = int(field_str[i])
i += 1
if ucs < 0xD800 or ucs >= 0xDC00 or i == field_length:
Py_UNICODEs.append(ucs)
continue
# This could be a surrogate pair.
ucs2 = int(field_str[i])
if ucs2 < 0xDC00 or ucs2 > 0xDFFF:
continue
code = (ucs & 0x03FF) << 10
code |= ucs2 & 0x03FF
code += 0x00010000
Py_UNICODEs.append(code)
i += 1
# Convert the int code points to unicode characters, and generate a
# local unicode instance.
# This splits surrogate pairs if sizeof(Py_UNICODE) is 2 here (in gdb).
result = u''.join([_unichr(ucs) for ucs in Py_UNICODEs])
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 str literal, i.e. without a "u" prefix
# Get a PyUnicodeObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Object/unicodeobject.c:unicode_repr
# to Python 2:
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
quote = "'"
out.write(quote)
i = 0
while i < len(proxy):
ch = proxy[i]
i += 1
# Escape quotes and backslashes
if ch == quote or ch == '\\':
out.write('\\')
out.write(ch)
# Map special whitespace to '\t', \n', '\r'
elif ch == '\t':
out.write('\\t')
elif ch == '\n':
out.write('\\n')
elif ch == '\r':
out.write('\\r')
# Map non-printable US ASCII to '\xhh' */
elif ch < ' ' or ch == 0x7F:
out.write('\\x')
out.write(hexdigits[(ord(ch) >> 4) & 0x000F])
out.write(hexdigits[ord(ch) & 0x000F])
# Copy ASCII characters as-is
elif ord(ch) < 0x7F:
out.write(ch)
# Non-ASCII characters
else:
ucs = ch
ch2 = None
if sys.maxunicode < 0x10000:
# If sizeof(Py_UNICODE) is 2 here (in gdb), join
# surrogate pairs before calling _unichr_is_printable.
if (i < len(proxy)
and 0xD800 <= ord(ch) < 0xDC00 \
and 0xDC00 <= ord(proxy[i]) <= 0xDFFF):
ch2 = proxy[i]
ucs = ch + ch2
i += 1
# Unfortuately, Python 2's unicode type doesn't seem
# to expose the "isprintable" method
printable = _unichr_is_printable(ucs)
if printable:
try:
ucs.encode(ENCODING)
except UnicodeEncodeError:
printable = False
# Map Unicode whitespace and control characters
# (categories Z* and C* except ASCII space)
if not printable:
if ch2 is not None:
# Match Python 3's representation of non-printable
# wide characters.
code = (ord(ch) & 0x03FF) << 10
code |= ord(ch2) & 0x03FF
code += 0x00010000
else:
code = ord(ucs)
# Map 8-bit characters to '\\xhh'
if code <= 0xff:
out.write('\\x')
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
# Map 21-bit characters to '\U00xxxxxx'
elif code >= 0x10000:
out.write('\\U')
out.write(hexdigits[(code >> 28) & 0x0000000F])
out.write(hexdigits[(code >> 24) & 0x0000000F])
out.write(hexdigits[(code >> 20) & 0x0000000F])
out.write(hexdigits[(code >> 16) & 0x0000000F])
out.write(hexdigits[(code >> 12) & 0x0000000F])
out.write(hexdigits[(code >> 8) & 0x0000000F])
out.write(hexdigits[(code >> 4) & 0x0000000F])
out.write(hexdigits[code & 0x0000000F])
# Map 16-bit characters to '\uxxxx'
else:
out.write('\\u')
out.write(hexdigits[(code >> 12) & 0x000F])
out.write(hexdigits[(code >> 8) & 0x000F])
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
else:
# Copy characters as-is
out.write(ch)
if ch2 is not None:
out.write(ch2)
out.write(quote)
def int_from_int(gdbval):
return int(str(gdbval))
def stringify(val):
# TODO: repr() puts everything on one line; pformat can be nicer, but
# can lead to v.long results; this function isolates the choice
if True:
return repr(val)
else:
from pprint import pformat
return pformat(val)
class PyObjectPtrPrinter:
"Prints a (PyObject*)"
def __init__ (self, gdbval):
self.gdbval = gdbval
def to_string (self):
pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval)
if True:
return pyop.get_truncated_repr(MAX_OUTPUT_LEN)
else:
# Generate full proxy value then stringify it.
# Doing so could be expensive
proxyval = pyop.proxyval(set())
return stringify(proxyval)
def pretty_printer_lookup(gdbval):
type = gdbval.type.unqualified()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
t = str(type)
if t in ("PyObject", "PyFrameObject", "PyUnicodeObject"):
return PyObjectPtrPrinter(gdbval)
"""
During development, I've been manually invoking the code in this way:
(gdb) python
import sys
sys.path.append('/home/david/coding/python-gdb')
import libpython
end
then reloading it after each edit like this:
(gdb) python reload(libpython)
The following code should ensure that the prettyprinter is registered
if the code is autoloaded by gdb when visiting libpython.so, provided
that this python file is installed to the same path as the library (or its
.debug file) plus a "-gdb.py" suffix, e.g:
/usr/lib/libpython2.6.so.1.0-gdb.py
/usr/lib/debug/usr/lib/libpython2.6.so.1.0.debug-gdb.py
"""
def register (obj):
if obj is None:
obj = gdb
# Wire up the pretty-printer
obj.pretty_printers.append(pretty_printer_lookup)
register (gdb.current_objfile ())
# Unfortunately, the exact API exposed by the gdb module varies somewhat
# from build to build
# See http://bugs.python.org/issue8279?#msg102276
class Frame(object):
'''
Wrapper for gdb.Frame, adding various methods
'''
def __init__(self, gdbframe):
self._gdbframe = gdbframe
def older(self):
older = self._gdbframe.older()
if older:
return Frame(older)
else:
return None
def newer(self):
newer = self._gdbframe.newer()
if newer:
return Frame(newer)
else:
return None
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
# We divide frames into:
# - "python frames":
# - "bytecode frames" i.e. PyEval_EvalFrameEx
# - "other python frames": things that are of interest from a python
# POV, but aren't bytecode (e.g. GC, GIL)
# - everything else
def is_python_frame(self):
'''Is this a PyEval_EvalFrameEx frame, or some other important
frame? (see is_other_python_frame for what "important" means in this
context)'''
if self.is_evalframeex():
return True
if self.is_other_python_frame():
return True
return False
def is_evalframeex(self):
'''Is this a PyEval_EvalFrameEx frame?'''
if self._gdbframe.name() == 'PyEval_EvalFrameEx':
'''
I believe we also need to filter on the inline
struct frame_id.inline_depth, only regarding frames with
an inline depth of 0 as actually being this function
So we reject those with type gdb.INLINE_FRAME
'''
if self._gdbframe.type() == gdb.NORMAL_FRAME:
# We have a PyEval_EvalFrameEx frame:
return True
return False
def is_other_python_frame(self):
'''Is this frame worth displaying in python backtraces?
Examples:
- waiting on the GIL
- garbage-collecting
- within a CFunction
If it is, return a descriptive string
For other frames, return False
'''
if self.is_waiting_for_gil():
return 'Waiting for the GIL'
elif self.is_gc_collect():
return 'Garbage-collecting'
else:
# Detect invocations of PyCFunction instances:
older = self.older()
if older and older._gdbframe.name() == 'PyCFunction_Call':
# Within that frame:
# "func" is the local containing the PyObject* of the
# PyCFunctionObject instance
# "f" is the same value, but cast to (PyCFunctionObject*)
# "self" is the (PyObject*) of the 'self'
try:
# Use the prettyprinter for the func:
func = older._gdbframe.read_var('func')
return str(func)
except RuntimeError:
return 'PyCFunction invocation (unable to read "func")'
# This frame isn't worth reporting:
return False
def is_waiting_for_gil(self):
'''Is this frame waiting on the GIL?'''
# This assumes the _POSIX_THREADS version of Python/ceval_gil.h:
name = self._gdbframe.name()
if name:
return 'pthread_cond_timedwait' in name
def is_gc_collect(self):
'''Is this frame "collect" within the garbage-collector?'''
return self._gdbframe.name() == 'collect'
def get_pyop(self):
try:
f = self._gdbframe.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
# gdb is unable to get the "f" argument of PyEval_EvalFrameEx()
# because it was "optimized out". Try to get "f" from the frame
# of the caller, PyEval_EvalCodeEx().
orig_frame = frame
caller = self._gdbframe.older()
if caller:
f = caller.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
return orig_frame
except ValueError:
return None
@classmethod
def get_selected_frame(cls):
_gdbframe = gdb.selected_frame()
if _gdbframe:
return Frame(_gdbframe)
return None
@classmethod
def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python-related code in the selected
frame, or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_python_frame():
return frame
frame = frame.older()
# Not found:
return None
@classmethod
def get_selected_bytecode_frame(cls):
'''Try to obtain the Frame for the python bytecode interpreter in the
selected GDB frame, or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframeex():
return frame
frame = frame.older()
# Not found:
return None
def print_summary(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
line = pyop.get_truncated_repr(MAX_OUTPUT_LEN)
write_unicode(sys.stdout, '#%i %s\n' % (self.get_index(), line))
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write('#%i (unable to read python frame information)\n' % self.get_index())
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write('#%i %s\n' % (self.get_index(), info))
else:
sys.stdout.write('#%i\n' % self.get_index())
def print_traceback(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
pyop.print_traceback()
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write(' (unable to read python frame information)\n')
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write(' %s\n' % info)
else:
sys.stdout.write(' (not a python frame)\n')
class PyList(gdb.Command):
'''List the current Python source code, if any
Use
py-list START
to list at a different line number within the python source.
Use
py-list START, END
to list a specific range of lines within the python source.
'''
def __init__(self):
gdb.Command.__init__ (self,
"py-list",
gdb.COMMAND_FILES,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
import re
start = None
end = None
m = re.match(r'\s*(\d+)\s*', args)
if m:
start = int(m.group(0))
end = start + 10
m = re.match(r'\s*(\d+)\s*,\s*(\d+)\s*', args)
if m:
start, end = map(int, m.groups())
# py-list requires an actual PyEval_EvalFrameEx frame:
frame = Frame.get_selected_bytecode_frame()
if not frame:
print('Unable to locate gdb frame for python bytecode interpreter')
return
pyop = frame.get_pyop()
if not pyop or pyop.is_optimized_out():
print('Unable to read information on python frame')
return
filename = pyop.filename()
lineno = pyop.current_line_num()
if start is None:
start = lineno - 5
end = lineno + 5
if start<1:
start = 1
try:
f = open(os_fsencode(filename), 'r')
except IOError as err:
sys.stdout.write('Unable to open %s: %s\n'
% (filename, err))
return
with f:
all_lines = f.readlines()
# start and end are 1-based, all_lines is 0-based;
# so [start-1:end] as a python slice gives us [start, end] as a
# closed interval
for i, line in enumerate(all_lines[start-1:end]):
linestr = str(i+start)
# Highlight current line:
if i + start == lineno:
linestr = '>' + linestr
sys.stdout.write('%4s %s' % (linestr, line))
# ...and register the command:
PyList()
def move_in_stack(move_up):
'''Move up or down the stack (for the py-up/py-down command)'''
frame = Frame.get_selected_python_frame()
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_python_frame():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print('Unable to find an older python frame')
else:
print('Unable to find a newer python frame')
class PyUp(gdb.Command):
'Select and print the python stack frame that called this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-up",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=True)
class PyDown(gdb.Command):
'Select and print the python stack frame called by this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-down",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=False)
# Not all builds of gdb have gdb.Frame.select
if hasattr(gdb.Frame, 'select'):
PyUp()
PyDown()
class PyBacktraceFull(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt-full",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
while frame:
if frame.is_python_frame():
frame.print_summary()
frame = frame.older()
PyBacktraceFull()
class PyBacktrace(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
sys.stdout.write('Traceback (most recent call first):\n')
frame = Frame.get_selected_python_frame()
while frame:
if frame.is_python_frame():
frame.print_traceback()
frame = frame.older()
PyBacktrace()
class PyPrint(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-print",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print('Unable to read information on python frame')
return
pyop_var, scope = pyop_frame.get_var_by_name(name)
if pyop_var:
print('%s %r = %s'
% (scope,
name,
pyop_var.get_truncated_repr(MAX_OUTPUT_LEN)))
else:
print('%r not found' % name)
PyPrint()
class PyLocals(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-locals",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print('Unable to read information on python frame')
return
for pyop_name, pyop_value in pyop_frame.iter_locals():
print('%s = %s'
% (pyop_name.proxyval(set()),
pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)))
PyLocals()
| 33.842367 | 102 | 0.574191 |
__future__ import print_function, with_statement
import gdb
import os
import locale
import sys
if sys.version_info[0] >= 3:
unichr = chr
xrange = range
long = int
_type_char_ptr = gdb.lookup_type('char').pointer()
_type_unsigned_char_ptr = gdb.lookup_type('unsigned char').pointer()
_type_void_ptr = gdb.lookup_type('void').pointer()
_type_unsigned_short_ptr = gdb.lookup_type('unsigned short').pointer()
_type_unsigned_int_ptr = gdb.lookup_type('unsigned int').pointer()
_is_pep393 = None
SIZEOF_VOID_P = _type_void_ptr.sizeof
Py_TPFLAGS_HEAPTYPE = (1 << 9)
Py_TPFLAGS_LONG_SUBCLASS = (1 << 24)
Py_TPFLAGS_LIST_SUBCLASS = (1 << 25)
Py_TPFLAGS_TUPLE_SUBCLASS = (1 << 26)
Py_TPFLAGS_BYTES_SUBCLASS = (1 << 27)
Py_TPFLAGS_UNICODE_SUBCLASS = (1 << 28)
Py_TPFLAGS_DICT_SUBCLASS = (1 << 29)
Py_TPFLAGS_BASE_EXC_SUBCLASS = (1 << 30)
Py_TPFLAGS_TYPE_SUBCLASS = (1 << 31)
MAX_OUTPUT_LEN=1024
hexdigits = "0123456789abcdef"
ENCODING = locale.getpreferredencoding()
class NullPyObjectPtr(RuntimeError):
pass
def safety_limit(val):
# break the gdb process too much (e.g. sizes of iterations, sizes of lists)
return min(val, 1000)
def safe_range(val):
# As per range, but don't trust the value too much: cap it to a safety
return xrange(safety_limit(int(val)))
if sys.version_info[0] >= 3:
def write_unicode(file, text):
file.write(text)
else:
def write_unicode(file, text):
if isinstance(text, unicode):
text = text.encode(ENCODING, 'backslashreplace')
file.write(text)
try:
os_fsencode = os.fsencode
except AttributeError:
def os_fsencode(filename):
if not isinstance(filename, unicode):
return filename
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
return filename.encode(encoding)
encoded = []
for char in filename:
# surrogateescape error handler
if 0xDC80 <= ord(char) <= 0xDCFF:
byte = chr(ord(char) - 0xDC00)
else:
byte = char.encode(encoding)
encoded.append(byte)
return ''.join(encoded)
class StringTruncated(RuntimeError):
pass
class TruncatedStringIO(object):
def __init__(self, maxlen=None):
self._val = ''
self.maxlen = maxlen
def write(self, data):
if self.maxlen:
if len(data) + len(self._val) > self.maxlen:
# Truncation:
self._val += data[0:self.maxlen - len(self._val)]
raise StringTruncated()
self._val += data
def getvalue(self):
return self._val
class PyObjectPtr(object):
_typename = 'PyObject'
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
else:
self._gdbval = gdbval
def field(self, name):
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
def pyop_field(self, name):
return PyObjectPtr.from_pyobject_ptr(self.field(name))
def write_field_repr(self, name, out, visited):
field_obj = self.pyop_field(name)
field_obj.write_repr(out, visited)
def get_truncated_repr(self, maxlen):
out = TruncatedStringIO(maxlen)
try:
self.write_repr(out, set())
except StringTruncated:
# Truncation occurred:
return out.getvalue() + '...(truncated)'
# No truncation occurred:
return out.getvalue()
def type(self):
return PyTypeObjectPtr(self.field('ob_type'))
def is_null(self):
return 0 == long(self._gdbval)
def is_optimized_out(self):
return self._gdbval.is_optimized_out
def safe_tp_name(self):
try:
return self.type().field('tp_name').string()
except NullPyObjectPtr:
# NULL tp_name?
return 'unknown'
except RuntimeError:
# Can't even read the object at all?
return 'unknown'
def proxyval(self, visited):
class FakeRepr(object):
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
long(self._gdbval))
def write_repr(self, out, visited):
return out.write(repr(self.proxyval(visited)))
@classmethod
def subclass_from_type(cls, t):
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
except RuntimeError:
return cls
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_BYTES_SUBCLASS:
return PyBytesObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
return cls
@classmethod
def from_pyobject_ptr(cls, gdbval):
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
pass
return cls(gdbval)
@classmethod
def get_gdb_type(cls):
return gdb.lookup_type(cls._typename).pointer()
def as_address(self):
return long(self._gdbval)
class PyVarObjectPtr(PyObjectPtr):
_typename = 'PyVarObject'
class ProxyAlreadyVisited(object):
def __init__(self, rep):
self._rep = rep
def __repr__(self):
return self._rep
def _write_instance_repr(out, visited, name, pyop_attrdict, address):
out.write('<')
out.write(name)
if isinstance(pyop_attrdict, PyDictObjectPtr):
out.write('(')
first = True
for pyop_arg, pyop_val in pyop_attrdict.iteritems():
if not first:
out.write(', ')
first = False
out.write(pyop_arg.proxyval(visited))
out.write('=')
pyop_val.write_repr(out, visited)
out.write(')')
out.write(' at remote 0x%x>' % address)
class InstanceProxy(object):
def __init__(self, cl_name, attrdict, address):
self.cl_name = cl_name
self.attrdict = attrdict
self.address = address
def __repr__(self):
if isinstance(self.attrdict, dict):
kwargs = ', '.join(["%s=%r" % (arg, val)
for arg, val in self.attrdict.iteritems()])
return '<%s(%s) at remote 0x%x>' % (self.cl_name,
kwargs, self.address)
else:
return '<%s at remote 0x%x>' % (self.cl_name,
self.address)
def _PyObject_VAR_SIZE(typeobj, nitems):
if _PyObject_VAR_SIZE._type_size_t is None:
_PyObject_VAR_SIZE._type_size_t = gdb.lookup_type('size_t')
return ( ( typeobj.field('tp_basicsize') +
nitems * typeobj.field('tp_itemsize') +
(SIZEOF_VOID_P - 1)
) & ~(SIZEOF_VOID_P - 1)
).cast(_PyObject_VAR_SIZE._type_size_t)
_PyObject_VAR_SIZE._type_size_t = None
class HeapTypeObjectPtr(PyObjectPtr):
_typename = 'PyObject'
def get_attr_dict(self):
try:
typeobj = self.type()
dictoffset = int_from_int(typeobj.field('tp_dictoffset'))
if dictoffset != 0:
if dictoffset < 0:
type_PyVarObject_ptr = gdb.lookup_type('PyVarObject').pointer()
tsize = int_from_int(self._gdbval.cast(type_PyVarObject_ptr)['ob_size'])
if tsize < 0:
tsize = -tsize
size = _PyObject_VAR_SIZE(typeobj, tsize)
dictoffset += size
assert dictoffset > 0
assert dictoffset % SIZEOF_VOID_P == 0
dictptr = self._gdbval.cast(_type_char_ptr) + dictoffset
PyObjectPtrPtr = PyObjectPtr.get_gdb_type().pointer()
dictptr = dictptr.cast(PyObjectPtrPtr)
return PyObjectPtr.from_pyobject_ptr(dictptr.dereference())
except RuntimeError:
pass
return None
def proxyval(self, visited):
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
def write_repr(self, out, visited):
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
pyop_attrdict = self.get_attr_dict()
_write_instance_repr(out, visited,
self.safe_tp_name(), pyop_attrdict, self.as_address())
class ProxyException(Exception):
def __init__(self, tp_name, args):
self.tp_name = tp_name
self.args = args
def __repr__(self):
return '%s%r' % (self.tp_name, self.args)
class PyBaseExceptionObjectPtr(PyObjectPtr):
_typename = 'PyBaseExceptionObject'
def proxyval(self, visited):
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
arg_proxy = self.pyop_field('args').proxyval(visited)
return ProxyException(self.safe_tp_name(),
arg_proxy)
def write_repr(self, out, visited):
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write(self.safe_tp_name())
self.write_field_repr('args', out, visited)
class PyClassObjectPtr(PyObjectPtr):
_typename = 'PyClassObject'
class BuiltInFunctionProxy(object):
def __init__(self, ml_name):
self.ml_name = ml_name
def __repr__(self):
return "<built-in function %s>" % self.ml_name
class BuiltInMethodProxy(object):
def __init__(self, ml_name, pyop_m_self):
self.ml_name = ml_name
self.pyop_m_self = pyop_m_self
def __repr__(self):
return ('<built-in method %s of %s object at remote 0x%x>'
% (self.ml_name,
self.pyop_m_self.safe_tp_name(),
self.pyop_m_self.as_address())
)
class PyCFunctionObjectPtr(PyObjectPtr):
_typename = 'PyCFunctionObject'
def proxyval(self, visited):
m_ml = self.field('m_ml')
ml_name = m_ml['ml_name'].string()
pyop_m_self = self.pyop_field('m_self')
if pyop_m_self.is_null():
return BuiltInFunctionProxy(ml_name)
else:
return BuiltInMethodProxy(ml_name, pyop_m_self)
class PyCodeObjectPtr(PyObjectPtr):
_typename = 'PyCodeObject'
def addr2line(self, addrq):
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
class PyDictObjectPtr(PyObjectPtr):
_typename = 'PyDictObject'
def iteritems(self):
keys = self.field('ma_keys')
values = self.field('ma_values')
for i in safe_range(keys['dk_size']):
ep = keys['dk_entries'].address + i
if long(values):
pyop_value = PyObjectPtr.from_pyobject_ptr(values[i])
else:
pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value'])
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
def proxyval(self, visited):
if self.as_address() in visited:
return ProxyAlreadyVisited('{...}')
visited.add(self.as_address())
result = {}
for pyop_key, pyop_value in self.iteritems():
proxy_key = pyop_key.proxyval(visited)
proxy_value = pyop_value.proxyval(visited)
result[proxy_key] = proxy_value
return result
def write_repr(self, out, visited):
if self.as_address() in visited:
out.write('{...}')
return
visited.add(self.as_address())
out.write('{')
first = True
for pyop_key, pyop_value in self.iteritems():
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write(': ')
pyop_value.write_repr(out, visited)
out.write('}')
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
def __getitem__(self, i):
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
if self.as_address() in visited:
return ProxyAlreadyVisited('[...]')
visited.add(self.as_address())
result = [PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))]
return result
def write_repr(self, out, visited):
if self.as_address() in visited:
out.write('[...]')
return
visited.add(self.as_address())
out.write('[')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
out.write(']')
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
SHIFT = 15
else:
SHIFT = 30
digits = [long(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
result = sum(digits)
if ob_size < 0:
result = -result
return result
def write_repr(self, out, visited):
proxy = self.proxyval(visited)
out.write("%s" % proxy)
class PyBoolObjectPtr(PyLongObjectPtr):
def proxyval(self, visited):
if PyLongObjectPtr.proxyval(self, visited):
return True
else:
return False
class PyNoneStructPtr(PyObjectPtr):
_typename = 'PyObject'
def proxyval(self, visited):
return None
class PyFrameObjectPtr(PyObjectPtr):
_typename = 'PyFrameObject'
def __init__(self, gdbval, cast_to=None):
PyObjectPtr.__init__(self, gdbval, cast_to)
if not self.is_optimized_out():
self.co = PyCodeObjectPtr.from_pyobject_ptr(self.field('f_code'))
self.co_name = self.co.pyop_field('co_name')
self.co_filename = self.co.pyop_field('co_filename')
self.f_lineno = int_from_int(self.field('f_lineno'))
self.f_lasti = int_from_int(self.field('f_lasti'))
self.co_nlocals = int_from_int(self.co.field('co_nlocals'))
self.co_varnames = PyTupleObjectPtr.from_pyobject_ptr(self.co.field('co_varnames'))
def iter_locals(self):
if self.is_optimized_out():
return
f_localsplus = self.field('f_localsplus')
for i in safe_range(self.co_nlocals):
pyop_value = PyObjectPtr.from_pyobject_ptr(f_localsplus[i])
if not pyop_value.is_null():
pyop_name = PyObjectPtr.from_pyobject_ptr(self.co_varnames[i])
yield (pyop_name, pyop_value)
def iter_globals(self):
if self.is_optimized_out():
return ()
pyop_globals = self.pyop_field('f_globals')
return pyop_globals.iteritems()
def iter_builtins(self):
if self.is_optimized_out():
return ()
pyop_builtins = self.pyop_field('f_builtins')
return pyop_builtins.iteritems()
def get_var_by_name(self, name):
for pyop_name, pyop_value in self.iter_locals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'local'
for pyop_name, pyop_value in self.iter_globals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'global'
for pyop_name, pyop_value in self.iter_builtins():
if name == pyop_name.proxyval(set()):
return pyop_value, 'builtin'
return None, None
def filename(self):
if self.is_optimized_out():
return '(frame information optimized out)'
return self.co_filename.proxyval(set())
def current_line_num(self):
if self.is_optimized_out():
return None
f_trace = self.field('f_trace')
if long(f_trace) != 0:
return self.f_lineno
else:
return self.co.addr2line(self.f_lasti)
def current_line(self):
if self.is_optimized_out():
return '(frame information optimized out)'
filename = self.filename()
try:
f = open(os_fsencode(filename), 'r')
except IOError:
return None
with f:
all_lines = f.readlines()
return all_lines[self.current_line_num()-1]
def write_repr(self, out, visited):
if self.is_optimized_out():
out.write('(frame information optimized out)')
return
out.write('Frame 0x%x, for file %s, line %i, in %s ('
% (self.as_address(),
self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
first = True
for pyop_name, pyop_value in self.iter_locals():
if not first:
out.write(', ')
first = False
out.write(pyop_name.proxyval(visited))
out.write('=')
pyop_value.write_repr(out, visited)
out.write(')')
def print_traceback(self):
if self.is_optimized_out():
sys.stdout.write(' (frame information optimized out)\n')
return
visited = set()
sys.stdout.write(' File "%s", line %i, in %s\n'
% (self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
class PySetObjectPtr(PyObjectPtr):
_typename = 'PySetObject'
def proxyval(self, visited):
if self.as_address() in visited:
return ProxyAlreadyVisited('%s(...)' % self.safe_tp_name())
visited.add(self.as_address())
members = []
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
key_proxy = PyObjectPtr.from_pyobject_ptr(key).proxyval(visited)
if key_proxy != '<dummy key>':
members.append(key_proxy)
if self.safe_tp_name() == 'frozenset':
return frozenset(members)
else:
return set(members)
def write_repr(self, out, visited):
tp_name = self.safe_tp_name()
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
# Python 3's set_repr special-cases the empty set:
if not self.field('used'):
out.write(tp_name)
out.write('()')
return
if tp_name != 'set':
out.write(tp_name)
out.write('(')
out.write('{')
first = True
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
pyop_key = PyObjectPtr.from_pyobject_ptr(key)
key_proxy = pyop_key.proxyval(visited)
if key_proxy != '<dummy key>':
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write('}')
if tp_name != 'set':
out.write(')')
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
char_ptr = field_ob_sval.address.cast(_type_unsigned_char_ptr)
return ''.join([chr(char_ptr[i]) for i in safe_range(field_ob_size)])
def proxyval(self, visited):
return str(self)
def write_repr(self, out, visited):
proxy = self.proxyval(visited)
# to Python 2 code:
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
out.write('b')
out.write(quote)
for byte in proxy:
if byte == quote or byte == '\\':
out.write('\\')
out.write(byte)
elif byte == '\t':
out.write('\\t')
elif byte == '\n':
out.write('\\n')
elif byte == '\r':
out.write('\\r')
elif byte < ' ' or ord(byte) >= 0x7f:
out.write('\\x')
out.write(hexdigits[(ord(byte) & 0xf0) >> 4])
out.write(hexdigits[ord(byte) & 0xf])
else:
out.write(byte)
out.write(quote)
class PyTupleObjectPtr(PyObjectPtr):
_typename = 'PyTupleObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
result = tuple([PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))])
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write('(')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
if self.field('ob_size') == 1:
out.write(',)')
else:
out.write(')')
class PyTypeObjectPtr(PyObjectPtr):
_typename = 'PyTypeObject'
def _unichr_is_printable(char):
# Logic adapted from Python 3's Tools/unicode/makeunicodedata.py
if char == u" ":
return True
import unicodedata
return unicodedata.category(char) not in ("C", "Z")
if sys.maxunicode >= 0x10000:
_unichr = unichr
else:
def _unichr(x):
if x < 0x10000:
return unichr(x)
x -= 0x10000
ch1 = 0xD800 | (x >> 10)
ch2 = 0xDC00 | (x & 0x3FF)
return unichr(ch1) + unichr(ch2)
class PyUnicodeObjectPtr(PyObjectPtr):
_typename = 'PyUnicodeObject'
def char_width(self):
_type_Py_UNICODE = gdb.lookup_type('Py_UNICODE')
return _type_Py_UNICODE.sizeof
def proxyval(self, visited):
global _is_pep393
if _is_pep393 is None:
fields = gdb.lookup_type('PyUnicodeObject').target().fields()
_is_pep393 = 'data' in [f.name for f in fields]
if _is_pep393:
may_have_surrogates = False
compact = self.field('_base')
ascii = compact['_base']
state = ascii['state']
is_compact_ascii = (int(state['ascii']) and int(state['compact']))
if not int(state['ready']):
field_length = long(compact['wstr_length'])
may_have_surrogates = True
field_str = ascii['wstr']
else:
field_length = long(ascii['length'])
if is_compact_ascii:
field_str = ascii.address + 1
elif int(state['compact']):
field_str = compact.address + 1
else:
field_str = self.field('data')['any']
repr_kind = int(state['kind'])
if repr_kind == 1:
field_str = field_str.cast(_type_unsigned_char_ptr)
elif repr_kind == 2:
field_str = field_str.cast(_type_unsigned_short_ptr)
elif repr_kind == 4:
field_str = field_str.cast(_type_unsigned_int_ptr)
else:
field_length = long(self.field('length'))
field_str = self.field('str')
may_have_surrogates = self.char_width() == 2
if not may_have_surrogates:
Py_UNICODEs = [int(field_str[i]) for i in safe_range(field_length)]
else:
Py_UNICODEs = []
i = 0
limit = safety_limit(field_length)
while i < limit:
ucs = int(field_str[i])
i += 1
if ucs < 0xD800 or ucs >= 0xDC00 or i == field_length:
Py_UNICODEs.append(ucs)
continue
ucs2 = int(field_str[i])
if ucs2 < 0xDC00 or ucs2 > 0xDFFF:
continue
code = (ucs & 0x03FF) << 10
code |= ucs2 & 0x03FF
code += 0x00010000
Py_UNICODEs.append(code)
i += 1
result = u''.join([_unichr(ucs) for ucs in Py_UNICODEs])
return result
def write_repr(self, out, visited):
proxy = self.proxyval(visited)
# to Python 2:
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
quote = "'"
out.write(quote)
i = 0
while i < len(proxy):
ch = proxy[i]
i += 1
# Escape quotes and backslashes
if ch == quote or ch == '\\':
out.write('\\')
out.write(ch)
# Map special whitespace to '\t', \n', '\r'
elif ch == '\t':
out.write('\\t')
elif ch == '\n':
out.write('\\n')
elif ch == '\r':
out.write('\\r')
elif ch < ' ' or ch == 0x7F:
out.write('\\x')
out.write(hexdigits[(ord(ch) >> 4) & 0x000F])
out.write(hexdigits[ord(ch) & 0x000F])
elif ord(ch) < 0x7F:
out.write(ch)
else:
ucs = ch
ch2 = None
if sys.maxunicode < 0x10000:
if (i < len(proxy)
and 0xD800 <= ord(ch) < 0xDC00 \
and 0xDC00 <= ord(proxy[i]) <= 0xDFFF):
ch2 = proxy[i]
ucs = ch + ch2
i += 1
printable = _unichr_is_printable(ucs)
if printable:
try:
ucs.encode(ENCODING)
except UnicodeEncodeError:
printable = False
if not printable:
if ch2 is not None:
# wide characters.
code = (ord(ch) & 0x03FF) << 10
code |= ord(ch2) & 0x03FF
code += 0x00010000
else:
code = ord(ucs)
# Map 8-bit characters to '\\xhh'
if code <= 0xff:
out.write('\\x')
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
# Map 21-bit characters to '\U00xxxxxx'
elif code >= 0x10000:
out.write('\\U')
out.write(hexdigits[(code >> 28) & 0x0000000F])
out.write(hexdigits[(code >> 24) & 0x0000000F])
out.write(hexdigits[(code >> 20) & 0x0000000F])
out.write(hexdigits[(code >> 16) & 0x0000000F])
out.write(hexdigits[(code >> 12) & 0x0000000F])
out.write(hexdigits[(code >> 8) & 0x0000000F])
out.write(hexdigits[(code >> 4) & 0x0000000F])
out.write(hexdigits[code & 0x0000000F])
# Map 16-bit characters to '\uxxxx'
else:
out.write('\\u')
out.write(hexdigits[(code >> 12) & 0x000F])
out.write(hexdigits[(code >> 8) & 0x000F])
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
else:
# Copy characters as-is
out.write(ch)
if ch2 is not None:
out.write(ch2)
out.write(quote)
def int_from_int(gdbval):
return int(str(gdbval))
def stringify(val):
# TODO: repr() puts everything on one line; pformat can be nicer, but
# can lead to v.long results; this function isolates the choice
if True:
return repr(val)
else:
from pprint import pformat
return pformat(val)
class PyObjectPtrPrinter:
def __init__ (self, gdbval):
self.gdbval = gdbval
def to_string (self):
pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval)
if True:
return pyop.get_truncated_repr(MAX_OUTPUT_LEN)
else:
# Generate full proxy value then stringify it.
# Doing so could be expensive
proxyval = pyop.proxyval(set())
return stringify(proxyval)
def pretty_printer_lookup(gdbval):
type = gdbval.type.unqualified()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
t = str(type)
if t in ("PyObject", "PyFrameObject", "PyUnicodeObject"):
return PyObjectPtrPrinter(gdbval)
def register (obj):
if obj is None:
obj = gdb
# Wire up the pretty-printer
obj.pretty_printers.append(pretty_printer_lookup)
register (gdb.current_objfile ())
# Unfortunately, the exact API exposed by the gdb module varies somewhat
# from build to build
# See http://bugs.python.org/issue8279?#msg102276
class Frame(object):
def __init__(self, gdbframe):
self._gdbframe = gdbframe
def older(self):
older = self._gdbframe.older()
if older:
return Frame(older)
else:
return None
def newer(self):
newer = self._gdbframe.newer()
if newer:
return Frame(newer)
else:
return None
def select(self):
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
def get_index(self):
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
# We divide frames into:
# - "python frames":
# - "bytecode frames" i.e. PyEval_EvalFrameEx
# - "other python frames": things that are of interest from a python
# POV, but aren't bytecode (e.g. GC, GIL)
def is_python_frame(self):
if self.is_evalframeex():
return True
if self.is_other_python_frame():
return True
return False
def is_evalframeex(self):
if self._gdbframe.name() == 'PyEval_EvalFrameEx':
if self._gdbframe.type() == gdb.NORMAL_FRAME:
return True
return False
def is_other_python_frame(self):
if self.is_waiting_for_gil():
return 'Waiting for the GIL'
elif self.is_gc_collect():
return 'Garbage-collecting'
else:
older = self.older()
if older and older._gdbframe.name() == 'PyCFunction_Call':
try:
func = older._gdbframe.read_var('func')
return str(func)
except RuntimeError:
return 'PyCFunction invocation (unable to read "func")'
return False
def is_waiting_for_gil(self):
# This assumes the _POSIX_THREADS version of Python/ceval_gil.h:
name = self._gdbframe.name()
if name:
return 'pthread_cond_timedwait' in name
def is_gc_collect(self):
return self._gdbframe.name() == 'collect'
def get_pyop(self):
try:
f = self._gdbframe.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
# gdb is unable to get the "f" argument of PyEval_EvalFrameEx()
# because it was "optimized out". Try to get "f" from the frame
# of the caller, PyEval_EvalCodeEx().
orig_frame = frame
caller = self._gdbframe.older()
if caller:
f = caller.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
return orig_frame
except ValueError:
return None
@classmethod
def get_selected_frame(cls):
_gdbframe = gdb.selected_frame()
if _gdbframe:
return Frame(_gdbframe)
return None
@classmethod
def get_selected_python_frame(cls):
frame = cls.get_selected_frame()
while frame:
if frame.is_python_frame():
return frame
frame = frame.older()
# Not found:
return None
@classmethod
def get_selected_bytecode_frame(cls):
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframeex():
return frame
frame = frame.older()
# Not found:
return None
def print_summary(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
line = pyop.get_truncated_repr(MAX_OUTPUT_LEN)
write_unicode(sys.stdout, '
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write('
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write('
else:
sys.stdout.write('
def print_traceback(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
pyop.print_traceback()
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write(' (unable to read python frame information)\n')
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write(' %s\n' % info)
else:
sys.stdout.write(' (not a python frame)\n')
class PyList(gdb.Command):
def __init__(self):
gdb.Command.__init__ (self,
"py-list",
gdb.COMMAND_FILES,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
import re
start = None
end = None
m = re.match(r'\s*(\d+)\s*', args)
if m:
start = int(m.group(0))
end = start + 10
m = re.match(r'\s*(\d+)\s*,\s*(\d+)\s*', args)
if m:
start, end = map(int, m.groups())
# py-list requires an actual PyEval_EvalFrameEx frame:
frame = Frame.get_selected_bytecode_frame()
if not frame:
print('Unable to locate gdb frame for python bytecode interpreter')
return
pyop = frame.get_pyop()
if not pyop or pyop.is_optimized_out():
print('Unable to read information on python frame')
return
filename = pyop.filename()
lineno = pyop.current_line_num()
if start is None:
start = lineno - 5
end = lineno + 5
if start<1:
start = 1
try:
f = open(os_fsencode(filename), 'r')
except IOError as err:
sys.stdout.write('Unable to open %s: %s\n'
% (filename, err))
return
with f:
all_lines = f.readlines()
# start and end are 1-based, all_lines is 0-based;
# so [start-1:end] as a python slice gives us [start, end] as a
# closed interval
for i, line in enumerate(all_lines[start-1:end]):
linestr = str(i+start)
# Highlight current line:
if i + start == lineno:
linestr = '>' + linestr
sys.stdout.write('%4s %s' % (linestr, line))
# ...and register the command:
PyList()
def move_in_stack(move_up):
frame = Frame.get_selected_python_frame()
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_python_frame():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print('Unable to find an older python frame')
else:
print('Unable to find a newer python frame')
class PyUp(gdb.Command):
def __init__(self):
gdb.Command.__init__ (self,
"py-up",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=True)
class PyDown(gdb.Command):
def __init__(self):
gdb.Command.__init__ (self,
"py-down",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=False)
# Not all builds of gdb have gdb.Frame.select
if hasattr(gdb.Frame, 'select'):
PyUp()
PyDown()
class PyBacktraceFull(gdb.Command):
def __init__(self):
gdb.Command.__init__ (self,
"py-bt-full",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
while frame:
if frame.is_python_frame():
frame.print_summary()
frame = frame.older()
PyBacktraceFull()
class PyBacktrace(gdb.Command):
def __init__(self):
gdb.Command.__init__ (self,
"py-bt",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
sys.stdout.write('Traceback (most recent call first):\n')
frame = Frame.get_selected_python_frame()
while frame:
if frame.is_python_frame():
frame.print_traceback()
frame = frame.older()
PyBacktrace()
class PyPrint(gdb.Command):
def __init__(self):
gdb.Command.__init__ (self,
"py-print",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print('Unable to read information on python frame')
return
pyop_var, scope = pyop_frame.get_var_by_name(name)
if pyop_var:
print('%s %r = %s'
% (scope,
name,
pyop_var.get_truncated_repr(MAX_OUTPUT_LEN)))
else:
print('%r not found' % name)
PyPrint()
class PyLocals(gdb.Command):
def __init__(self):
gdb.Command.__init__ (self,
"py-locals",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print('Unable to read information on python frame')
return
for pyop_name, pyop_value in pyop_frame.iter_locals():
print('%s = %s'
% (pyop_name.proxyval(set()),
pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)))
PyLocals()
| true | true |
1c2c3d61eee8035467645bf5c46c77b5e5e49dc2 | 8,611 | py | Python | brainMRI/dataset.py | giacomodeodato/BrainMRIDataset | 7f2dd315e7c970c61651e025dcafbed94caa8924 | [
"MIT"
] | 1 | 2022-03-04T06:18:21.000Z | 2022-03-04T06:18:21.000Z | brainMRI/dataset.py | giacomodeodato/BrainMRIDataset | 7f2dd315e7c970c61651e025dcafbed94caa8924 | [
"MIT"
] | null | null | null | brainMRI/dataset.py | giacomodeodato/BrainMRIDataset | 7f2dd315e7c970c61651e025dcafbed94caa8924 | [
"MIT"
] | null | null | null | import os
import numpy as np
import h5py
from skimage.io import imread
from datetime import datetime
from tqdm.auto import tqdm
from .utils import preprocess_volume, preprocess_mask
class Dataset():
"""
TCGA-LGG dataset of brain MRIs for Lower Grade Glioma segmentation.
Attributes:
IMG_SHAPE : tuple
Shape of the images: (H, W).
VOLUMES : list
Names of the volumes.
Methods:
make_dataset(raw_data_dir='./kaggle_3m', data_dir='./data')
Creates a virtual HDF5 dataset with preprocessed images and metadata.
"""
IMG_SHAPE = (256, 256)
VOLUMES = ["pre-contrast", "FLAIR", "post-contrast"]
def __init__(self, path="./data/brainMRI.h5", train=True, volume=None, seed=42):
"""Initializes the brain MRI dataset.
Args:
path : str, optional
Path to the virtual HDF5 dataset.
Default is './data/brainMRI.h5'.
train : bool, optional
Slice of the dataset to select.
Default is True, selects 80% of the patients.
volume : str, optional.
Volume images to return.
Default is None, returns all the volumes.
seed : int, optional.
Seed for the random number generator to split
the data into train and test sets.
Returns: instance of the brain MRI dataset
"""
if not os.path.exists(path):
raise RuntimeError("Dataset not found at '{}'.\nUse Dataset.make_dataset() to create it.".format(path))
assert volume in [None,] + Dataset.VOLUMES, 'volume can only be None or one of {}'.format(Dataset.VOLUMES)
self.dataset = h5py.File(path, 'r')
self.volume = volume
self.index_vector = np.arange(len(self.dataset['slices']))
patients = np.unique(self.patients)
rng = np.random.default_rng(seed=seed)
data_patients = rng.choice(
patients,
size=int((0.8 if train else 0.2) * len(patients)),
replace=False,
shuffle=False
)
bool_vector = np.zeros_like(self.index_vector)
for patient in data_patients:
bool_vector = bool_vector + np.array(self.patients == patient)
self.index_vector = np.where(bool_vector)[0]
def __getitem__(self, index):
index = self.index_vector[index]
img = self.dataset['images'][index].astype(np.float32)
mask = self.dataset['masks'][index]
patient = self.dataset['patients'][index]
slice = self.dataset['slices'][index]
if self.volume is not None:
img = img[self.VOLUMES.index(self.volume)]
img = img[np.newaxis, ...]
return img, mask, (patient, slice)
def __len__(self):
return len(self.index_vector)
@property
def images(self):
return self.dataset["images"][self.index_vector]
@property
def masks(self):
return self.dataset["masks"][self.index_vector]
@property
def patients(self):
return self.dataset["patients"][self.index_vector]
@property
def slices(self):
return self.dataset["slices"][self.index_vector]
@staticmethod
def make_dataset(raw_data_dir='./kaggle_3m', data_dir='./data'):
"""Creates a virtual HDF5 dataset with preprocessed images and metadata.
Data should be previously downloaded from https://www.kaggle.com/mateuszbuda/lgg-mri-segmentation.
Args:
raw_data_dir : str, optional
Path to the raw data directory.
data_dir : str, optional
Path to the processed data directory.
"""
# check data directories
if not os.path.exists(raw_data_dir):
print('{} does not exist.\n You can download raw data at https://www.kaggle.com/mateuszbuda/lgg-mri-segmentation'.format(
raw_data_dir
))
raise OSError
if not os.path.exists(data_dir):
os.mkdir(data_dir)
h5_dir = os.path.join(data_dir, 'hdf5')
if not os.path.exists(h5_dir):
os.mkdir(h5_dir)
patient_dirs = [
d
for d in os.listdir(raw_data_dir)
if os.path.isdir(
os.path.join(raw_data_dir, d)
)
]
pbar = tqdm(total=len(patient_dirs), desc='Retrieving data', leave=False)
n_samples = 0
for patient_dir in patient_dirs:
# retrieve patient images and masks
pbar.set_description(f'{patient_dir} [Retrieving data]')
dir_path = os.path.join(raw_data_dir, patient_dir)
img_names = [
x
for x in os.listdir(dir_path)
if 'mask' not in x
]
img_names.sort(
key=lambda x: int(x.split(".")[0].split("_")[4])
)
n_slices = len(img_names)
n_samples += n_slices
images = np.empty((n_slices, *Dataset.IMG_SHAPE, len(Dataset.VOLUMES)), dtype=np.uint8)
masks = np.empty((n_slices, *Dataset.IMG_SHAPE), dtype=np.uint8)
for i, name in enumerate(img_names):
img_path = os.path.join(dir_path, name)
prefix, ext = os.path.splitext(img_path)
mask_path = prefix + '_mask' + ext
images[i] = imread(img_path)
masks[i] = imread(mask_path)
# preprocess images and metadata
pbar.set_description(f'{patient_dir} [Preprocessing data]')
images = preprocess_volume(images)
masks = preprocess_mask(masks)
patient = np.array(("_".join(patient_dir.split("_")[:-1]),)*n_slices)
slices = np.array([
int(x.split('.')[0].split("_")[4])
for x in img_names
], dtype=np.uint8)
# create patient dataset
pbar.set_description(f'{patient_dir} [Saving data]')
h5_file_path = os.path.join(h5_dir, patient_dir + '.h5')
with h5py.File(h5_file_path, 'w') as h5_file:
h5_file.attrs['timestamp'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
h5_file.attrs['info'] = h5py.version.info
h5_file.create_dataset("images", data=np.moveaxis(images, 3, 1))
h5_file.create_dataset("masks", data=np.moveaxis(masks[..., np.newaxis], 3, 1))
h5_file.create_dataset("patients", data=patient.astype(h5py.string_dtype(encoding='utf-8')))
h5_file.create_dataset("slices", data=slices)
pbar.update(1)
# create virtual layouts
pbar.set_description('Creating virtual dataset')
layouts = {
"images": h5py.VirtualLayout(
shape=(n_samples, len(Dataset.VOLUMES), *Dataset.IMG_SHAPE),
dtype=np.float16
),
"masks": h5py.VirtualLayout(
shape=(n_samples, 1, *Dataset.IMG_SHAPE),
dtype=np.uint8
),
"patients": h5py.VirtualLayout(
shape=(n_samples,),
dtype=h5py.string_dtype(encoding='utf-8')
),
"slices": h5py.VirtualLayout(
shape=(n_samples,),
dtype=np.uint8
)
}
# fill the virtual layouts
i = 0
for filename in os.listdir(h5_dir):
file_path = os.path.join(h5_dir, filename)
with h5py.File(file_path, "r") as h5_file:
n_slices = h5_file['slices'].shape[0]
for k in h5_file.keys():
layouts[k][i:i+n_slices] = h5py.VirtualSource(h5_file[k])
i += n_slices
# create virtual dataset
vds_path = os.path.join(data_dir, 'brainMRI.h5')
with h5py.File(vds_path, "w") as h5_file:
h5_file.attrs['timestamp'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
h5_file.attrs['h5py_info'] = h5py.version.info
h5_file.attrs['dataset'] = 'TCGA-LGG Brain MRI'
h5_file.attrs['github'] = 'https://github.com/giacomodeodato/BrainMRIDataset'
h5_file.attrs['website'] = 'https://www.kaggle.com/mateuszbuda/lgg-mri-segmentation'
for name, layout in layouts.items():
h5_file.create_virtual_dataset(name, layout)
pbar.close() | 37.60262 | 133 | 0.561956 | import os
import numpy as np
import h5py
from skimage.io import imread
from datetime import datetime
from tqdm.auto import tqdm
from .utils import preprocess_volume, preprocess_mask
class Dataset():
IMG_SHAPE = (256, 256)
VOLUMES = ["pre-contrast", "FLAIR", "post-contrast"]
def __init__(self, path="./data/brainMRI.h5", train=True, volume=None, seed=42):
if not os.path.exists(path):
raise RuntimeError("Dataset not found at '{}'.\nUse Dataset.make_dataset() to create it.".format(path))
assert volume in [None,] + Dataset.VOLUMES, 'volume can only be None or one of {}'.format(Dataset.VOLUMES)
self.dataset = h5py.File(path, 'r')
self.volume = volume
self.index_vector = np.arange(len(self.dataset['slices']))
patients = np.unique(self.patients)
rng = np.random.default_rng(seed=seed)
data_patients = rng.choice(
patients,
size=int((0.8 if train else 0.2) * len(patients)),
replace=False,
shuffle=False
)
bool_vector = np.zeros_like(self.index_vector)
for patient in data_patients:
bool_vector = bool_vector + np.array(self.patients == patient)
self.index_vector = np.where(bool_vector)[0]
def __getitem__(self, index):
index = self.index_vector[index]
img = self.dataset['images'][index].astype(np.float32)
mask = self.dataset['masks'][index]
patient = self.dataset['patients'][index]
slice = self.dataset['slices'][index]
if self.volume is not None:
img = img[self.VOLUMES.index(self.volume)]
img = img[np.newaxis, ...]
return img, mask, (patient, slice)
def __len__(self):
return len(self.index_vector)
@property
def images(self):
return self.dataset["images"][self.index_vector]
@property
def masks(self):
return self.dataset["masks"][self.index_vector]
@property
def patients(self):
return self.dataset["patients"][self.index_vector]
@property
def slices(self):
return self.dataset["slices"][self.index_vector]
@staticmethod
def make_dataset(raw_data_dir='./kaggle_3m', data_dir='./data'):
if not os.path.exists(raw_data_dir):
print('{} does not exist.\n You can download raw data at https://www.kaggle.com/mateuszbuda/lgg-mri-segmentation'.format(
raw_data_dir
))
raise OSError
if not os.path.exists(data_dir):
os.mkdir(data_dir)
h5_dir = os.path.join(data_dir, 'hdf5')
if not os.path.exists(h5_dir):
os.mkdir(h5_dir)
patient_dirs = [
d
for d in os.listdir(raw_data_dir)
if os.path.isdir(
os.path.join(raw_data_dir, d)
)
]
pbar = tqdm(total=len(patient_dirs), desc='Retrieving data', leave=False)
n_samples = 0
for patient_dir in patient_dirs:
pbar.set_description(f'{patient_dir} [Retrieving data]')
dir_path = os.path.join(raw_data_dir, patient_dir)
img_names = [
x
for x in os.listdir(dir_path)
if 'mask' not in x
]
img_names.sort(
key=lambda x: int(x.split(".")[0].split("_")[4])
)
n_slices = len(img_names)
n_samples += n_slices
images = np.empty((n_slices, *Dataset.IMG_SHAPE, len(Dataset.VOLUMES)), dtype=np.uint8)
masks = np.empty((n_slices, *Dataset.IMG_SHAPE), dtype=np.uint8)
for i, name in enumerate(img_names):
img_path = os.path.join(dir_path, name)
prefix, ext = os.path.splitext(img_path)
mask_path = prefix + '_mask' + ext
images[i] = imread(img_path)
masks[i] = imread(mask_path)
pbar.set_description(f'{patient_dir} [Preprocessing data]')
images = preprocess_volume(images)
masks = preprocess_mask(masks)
patient = np.array(("_".join(patient_dir.split("_")[:-1]),)*n_slices)
slices = np.array([
int(x.split('.')[0].split("_")[4])
for x in img_names
], dtype=np.uint8)
pbar.set_description(f'{patient_dir} [Saving data]')
h5_file_path = os.path.join(h5_dir, patient_dir + '.h5')
with h5py.File(h5_file_path, 'w') as h5_file:
h5_file.attrs['timestamp'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
h5_file.attrs['info'] = h5py.version.info
h5_file.create_dataset("images", data=np.moveaxis(images, 3, 1))
h5_file.create_dataset("masks", data=np.moveaxis(masks[..., np.newaxis], 3, 1))
h5_file.create_dataset("patients", data=patient.astype(h5py.string_dtype(encoding='utf-8')))
h5_file.create_dataset("slices", data=slices)
pbar.update(1)
pbar.set_description('Creating virtual dataset')
layouts = {
"images": h5py.VirtualLayout(
shape=(n_samples, len(Dataset.VOLUMES), *Dataset.IMG_SHAPE),
dtype=np.float16
),
"masks": h5py.VirtualLayout(
shape=(n_samples, 1, *Dataset.IMG_SHAPE),
dtype=np.uint8
),
"patients": h5py.VirtualLayout(
shape=(n_samples,),
dtype=h5py.string_dtype(encoding='utf-8')
),
"slices": h5py.VirtualLayout(
shape=(n_samples,),
dtype=np.uint8
)
}
i = 0
for filename in os.listdir(h5_dir):
file_path = os.path.join(h5_dir, filename)
with h5py.File(file_path, "r") as h5_file:
n_slices = h5_file['slices'].shape[0]
for k in h5_file.keys():
layouts[k][i:i+n_slices] = h5py.VirtualSource(h5_file[k])
i += n_slices
vds_path = os.path.join(data_dir, 'brainMRI.h5')
with h5py.File(vds_path, "w") as h5_file:
h5_file.attrs['timestamp'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
h5_file.attrs['h5py_info'] = h5py.version.info
h5_file.attrs['dataset'] = 'TCGA-LGG Brain MRI'
h5_file.attrs['github'] = 'https://github.com/giacomodeodato/BrainMRIDataset'
h5_file.attrs['website'] = 'https://www.kaggle.com/mateuszbuda/lgg-mri-segmentation'
for name, layout in layouts.items():
h5_file.create_virtual_dataset(name, layout)
pbar.close() | true | true |
1c2c3db7c37d1820053f9f0dfc995aac11ca4116 | 1,837 | py | Python | contentcuration/contentcuration/migrations/0052_auto_20170201_1155.py | Tlazypanda/studio | cd1c2f169c705027cdd808cbbcae907d0a9b21d2 | [
"MIT"
] | 1 | 2019-03-30T18:14:25.000Z | 2019-03-30T18:14:25.000Z | contentcuration/contentcuration/migrations/0052_auto_20170201_1155.py | Tlazypanda/studio | cd1c2f169c705027cdd808cbbcae907d0a9b21d2 | [
"MIT"
] | 2 | 2019-04-06T07:06:08.000Z | 2019-04-08T23:33:53.000Z | contentcuration/contentcuration/migrations/0052_auto_20170201_1155.py | Tlazypanda/studio | cd1c2f169c705027cdd808cbbcae907d0a9b21d2 | [
"MIT"
] | 1 | 2020-10-20T05:21:56.000Z | 2020-10-20T05:21:56.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-02-01 19:55
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0051_auto_20170126_1633'),
]
operations = [
migrations.AddField(
model_name='assessmentitem',
name='source_url',
field=models.CharField(blank=True, max_length=400, null=True),
),
migrations.AlterField(
model_name='fileformat',
name='extension',
field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), (
'png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip')], max_length=40, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('document_thumbnail', 'Thumbnail'), (
'exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150, primary_key=True, serialize=False),
),
]
| 54.029412 | 356 | 0.617311 |
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0051_auto_20170126_1633'),
]
operations = [
migrations.AddField(
model_name='assessmentitem',
name='source_url',
field=models.CharField(blank=True, max_length=400, null=True),
),
migrations.AlterField(
model_name='fileformat',
name='extension',
field=models.CharField(choices=[('mp4', 'MP4 Video'), ('vtt', 'VTT Subtitle'), ('srt', 'SRT Subtitle'), ('mp3', 'MP3 Audio'), ('pdf', 'PDF Document'), ('jpg', 'JPG Image'), ('jpeg', 'JPEG Image'), (
'png', 'PNG Image'), ('gif', 'GIF Image'), ('json', 'JSON'), ('svg', 'SVG Image'), ('perseus', 'Perseus Exercise'), ('zip', 'HTML5 Zip')], max_length=40, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[('high_res_video', 'High Resolution'), ('low_res_video', 'Low Resolution'), ('vector_video', 'Vectorized'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), ('audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('document_thumbnail', 'Thumbnail'), (
'exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail'), ('exercise_image', 'Exercise Image'), ('exercise_graphie', 'Exercise Graphie'), ('channel_thumbnail', 'Channel Thumbnail'), ('topic_thumbnail', 'Thumbnail'), ('html5_zip', 'HTML5 Zip'), ('html5_thumbnail', 'HTML5 Thumbnail')], max_length=150, primary_key=True, serialize=False),
),
]
| true | true |
1c2c3fdce517842bc7e8701a457753679cbadd18 | 5,257 | py | Python | docs/conf.py | SylvainCorlay/xhale | 70e2ded348b19ec64d80d3be9b52894f35e73d23 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | SylvainCorlay/xhale | 70e2ded348b19ec64d80d3be9b52894f35e73d23 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | SylvainCorlay/xhale | 70e2ded348b19ec64d80d3be9b52894f35e73d23 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# xhale documentation build configuration file, created by
# sphinx-quickstart on Sun Apr 22 20:02:10 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
def setup(app):
app.add_stylesheet("main_stylesheet.css")
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'xhale'
copyright = '2018, Loic gouarin'
author = 'Loic gouarin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from xhale.version import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_logo = "quantstack-white.svg"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'xhaledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'xhale.tex', 'xhale Documentation',
'Loic gouarin', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'xhale', 'xhale Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'xhale', 'xhale Documentation',
author, 'xhale', 'One line description of project.',
'Miscellaneous'),
]
| 29.869318 | 79 | 0.684231 |
def setup(app):
app.add_stylesheet("main_stylesheet.css")
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'xhale'
copyright = '2018, Loic gouarin'
author = 'Loic gouarin'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from xhale.version import __version__
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_logo = "quantstack-white.svg"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'xhaledoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'xhale.tex', 'xhale Documentation',
'Loic gouarin', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'xhale', 'xhale Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'xhale', 'xhale Documentation',
author, 'xhale', 'One line description of project.',
'Miscellaneous'),
]
| true | true |
1c2c402b5349167d4dbd224778cc996b162ae5b3 | 2,332 | py | Python | bin/bootstrapping/schema_conversion_tool.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | null | null | null | bin/bootstrapping/schema_conversion_tool.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | bin/bootstrapping/schema_conversion_tool.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 1 | 2020-07-24T18:47:35.000Z | 2020-07-24T18:47:35.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
"""Wrapper for Gcloud-installed Schema Conversion Tool."""
import os
import bootstrapping
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core.updater import update_manager
# Path to the unpacked component
_COMPONENT_DIR = os.path.join(bootstrapping.SDK_ROOT,
'platform', 'schema_conversion_tool')
# Path to the directory of unpacked jars relative to the SDK root
_JAR_DIR = os.path.join(_COMPONENT_DIR, 'lib')
_COMPONENT_ID = 'schema-conversion-tool'
def main():
"""Launches the Schema Conversion Tool."""
bootstrapping.CommandStart(_COMPONENT_ID, component_id=_COMPONENT_ID)
bootstrapping.CheckUpdates(_COMPONENT_ID)
update_manager.UpdateManager.EnsureInstalledAndRestart(
[_COMPONENT_ID], command=__file__)
try:
java_bin = java.RequireJavaInstalled('Schema Conversion Tool',
min_version=9)
java_9plus = True
except java.JavaVersionError:
java_bin = java.RequireJavaInstalled('Schema Conversion Tool',
min_version=8)
java_9plus = False
os.environ.setdefault('SCT_UPDATE_CHECK', 'false')
jar_name = 'schema_conversion_gui.jar'
main_jar = os.path.join(_JAR_DIR, jar_name)
# Accept a platform-appropriate default added as 1st arg in sct.sh/sct.cmd.
working_dir_default = bootstrapping.GetDecodedArgv()[1]
flags = [
'-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager',
'-Dspring.profiles.active=production',
'-Dgcloud.component.dir={}'.format(_COMPONENT_DIR),
'-Dsct.working.dir.default={}'.format(working_dir_default),
'-jar',
main_jar,
]
if java_9plus:
# Open modules to reflection explicitly to avoid Java 9+ warnings.
flags = [
'--add-opens', 'java.base/java.io=ALL-UNNAMED',
'--add-opens', 'java.base/java.lang=ALL-UNNAMED',
'--add-opens', 'java.base/java.net=ALL-UNNAMED',
'--add-opens', 'java.rmi/sun.rmi.transport=ALL-UNNAMED',
] + flags
bootstrapping.ExecuteJarTool(
java_bin,
_JAR_DIR,
jar_name,
None, # No main classname for Springboot JAR. Use -jar flag instead.
flags,
'--server.address=127.0.0.1')
if __name__ == '__main__':
main()
| 32.388889 | 77 | 0.683105 |
import os
import bootstrapping
from googlecloudsdk.command_lib.util import java
from googlecloudsdk.core.updater import update_manager
_COMPONENT_DIR = os.path.join(bootstrapping.SDK_ROOT,
'platform', 'schema_conversion_tool')
_JAR_DIR = os.path.join(_COMPONENT_DIR, 'lib')
_COMPONENT_ID = 'schema-conversion-tool'
def main():
bootstrapping.CommandStart(_COMPONENT_ID, component_id=_COMPONENT_ID)
bootstrapping.CheckUpdates(_COMPONENT_ID)
update_manager.UpdateManager.EnsureInstalledAndRestart(
[_COMPONENT_ID], command=__file__)
try:
java_bin = java.RequireJavaInstalled('Schema Conversion Tool',
min_version=9)
java_9plus = True
except java.JavaVersionError:
java_bin = java.RequireJavaInstalled('Schema Conversion Tool',
min_version=8)
java_9plus = False
os.environ.setdefault('SCT_UPDATE_CHECK', 'false')
jar_name = 'schema_conversion_gui.jar'
main_jar = os.path.join(_JAR_DIR, jar_name)
working_dir_default = bootstrapping.GetDecodedArgv()[1]
flags = [
'-Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager',
'-Dspring.profiles.active=production',
'-Dgcloud.component.dir={}'.format(_COMPONENT_DIR),
'-Dsct.working.dir.default={}'.format(working_dir_default),
'-jar',
main_jar,
]
if java_9plus:
flags = [
'--add-opens', 'java.base/java.io=ALL-UNNAMED',
'--add-opens', 'java.base/java.lang=ALL-UNNAMED',
'--add-opens', 'java.base/java.net=ALL-UNNAMED',
'--add-opens', 'java.rmi/sun.rmi.transport=ALL-UNNAMED',
] + flags
bootstrapping.ExecuteJarTool(
java_bin,
_JAR_DIR,
jar_name,
None,
flags,
'--server.address=127.0.0.1')
if __name__ == '__main__':
main()
| true | true |
1c2c40d45e4de0a5a61b1cd7ccc4c43ca2496bb6 | 6,188 | py | Python | docs/source/conf.py | delepoulle/rawls | 9e57be0b79d8ea09873690f1820cb52640a2a85d | [
"MIT"
] | null | null | null | docs/source/conf.py | delepoulle/rawls | 9e57be0b79d8ea09873690f1820cb52640a2a85d | [
"MIT"
] | 1 | 2021-04-16T06:51:25.000Z | 2021-04-16T06:51:25.000Z | docs/source/conf.py | delepoulle/rawls | 9e57be0b79d8ea09873690f1820cb52640a2a85d | [
"MIT"
] | 1 | 2021-04-13T08:30:14.000Z | 2021-04-13T08:30:14.000Z | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../../rawls'))
# -- Project information -----------------------------------------------------
project = 'rawls'
copyright = '2020, Jérôme BUISINE'
author = 'Jérôme BUISINE'
# The short X.Y version
version = '1.1.7'
# The full version, including alpha/beta/rc tags
release = 'v1.1.7'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
#'autoapi.extension'
]
# autoapi_add_toctree_entry = True
# autoapi_template_dir = '_autoapi_templates'
# autoapi_dirs = ['../../rawls']
autosummary_generate = True
autodoc_default_flags = ['members']
autodoc_member_order = 'groupwise'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'canonical_url': '',
#'analytics_id': 'UA-XXXXXXX-1',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
#'vcs_pageview_mode': '',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
html_context = {
'display_github': True,
'github_user': 'prise-3d',
'github_repo': 'rawls',
'github_version': 'master/docs/source/'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'rawlsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'rawls.tex', 'rawls Documentation',
'Jérôme BUISINE', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rawls', 'rawls Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'rawls', 'rawls Documentation',
author, 'rawls', 'raw light simulation image file reader/converter.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration ------------------------------------------------- | 29.051643 | 79 | 0.650452 |
import os
import sys
sys.path.insert(0, os.path.abspath('../../../rawls'))
project = 'rawls'
copyright = '2020, Jérôme BUISINE'
author = 'Jérôme BUISINE'
version = '1.1.7'
release = 'v1.1.7'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
]
autosummary_generate = True
autodoc_default_flags = ['members']
autodoc_member_order = 'groupwise'
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = []
pygments_style = None
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'canonical_url': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
html_context = {
'display_github': True,
'github_user': 'prise-3d',
'github_repo': 'rawls',
'github_version': 'master/docs/source/'
}
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'rawlsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'rawls.tex', 'rawls Documentation',
'Jérôme BUISINE', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rawls', 'rawls Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'rawls', 'rawls Documentation',
author, 'rawls', 'raw light simulation image file reader/converter.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration ------------------------------------------------- | true | true |
1c2c41715db2b4c61d502f76ba519ca242650b1d | 972 | py | Python | kubernetes/test/test_v1_secret_env_source.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_secret_env_source.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_secret_env_source.py | anemerovsky-essextec/python | 6e40b9169b27c3f1f9422c0f6dd1cd9caef8d57c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_secret_env_source import V1SecretEnvSource
class TestV1SecretEnvSource(unittest.TestCase):
""" V1SecretEnvSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SecretEnvSource(self):
"""
Test V1SecretEnvSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_secret_env_source.V1SecretEnvSource()
pass
if __name__ == '__main__':
unittest.main()
| 21.6 | 105 | 0.711934 |
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_secret_env_source import V1SecretEnvSource
class TestV1SecretEnvSource(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1SecretEnvSource(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
1c2c4174e9a9774b26b5d7443b67579a36b5ab8e | 4,360 | py | Python | instances/pspnet/ade.pspnet.R50_v1c/network.py | dontLoveBugs/MyTorch | d14bd1a231bde7f2e05282f86c640bcce4a55baf | [
"MIT"
] | 1 | 2020-02-25T00:35:00.000Z | 2020-02-25T00:35:00.000Z | instances/pspnet/ade.pspnet.R50_v1c/network.py | dontLoveBugs/MyTorch | d14bd1a231bde7f2e05282f86c640bcce4a55baf | [
"MIT"
] | null | null | null | instances/pspnet/ade.pspnet.R50_v1c/network.py | dontLoveBugs/MyTorch | d14bd1a231bde7f2e05282f86c640bcce4a55baf | [
"MIT"
] | null | null | null | # encoding: utf-8
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.backbone import resnet50
from modules.ops.seg.seg_oprs import ConvBnRelu
# 读取配置文件
from modules.engine.seg.config import Config
config = Config(config_file='./config.json').get_config()
class PSPNet(nn.Module):
def __init__(self, out_planes, criterion, pretrained_model=None,
norm_layer=nn.BatchNorm2d):
super(PSPNet, self).__init__()
self.backbone = resnet50(pretrained_model, norm_layer=norm_layer,
bn_eps=config.model.bn_eps,
bn_momentum=config.model.bn_momentum,
deep_stem=True, stem_width=64)
self.backbone.layer3.apply(partial(self._nostride_dilate, dilate=2))
self.backbone.layer4.apply(partial(self._nostride_dilate, dilate=4))
self.business_layer = []
self.psp_layer = PyramidPooling('psp', out_planes, 2048,
norm_layer=norm_layer)
self.aux_layer = nn.Sequential(
ConvBnRelu(1024, 1024, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(1024, out_planes, kernel_size=1)
)
self.business_layer.append(self.psp_layer)
self.business_layer.append(self.aux_layer)
self.criterion = criterion
def forward(self, data, label=None):
blocks = self.backbone(data)
psp_fm = self.psp_layer(blocks[-1])
aux_fm = self.aux_layer(blocks[-2])
psp_fm = F.interpolate(psp_fm, scale_factor=8, mode='bilinear',
align_corners=True)
aux_fm = F.interpolate(aux_fm, scale_factor=8, mode='bilinear',
align_corners=True)
psp_fm = F.log_softmax(psp_fm, dim=1)
aux_fm = F.log_softmax(aux_fm, dim=1)
if label is not None:
# TODO: using loss warpper
loss = self.criterion(psp_fm, label)
aux_loss = self.criterion(aux_fm, label)
loss = loss + 0.4 * aux_loss
# TODO: cumpute score
# TODO: visulization
return loss
return psp_fm
# @staticmethod
def _nostride_dilate(self, m, dilate):
if isinstance(m, nn.Conv2d):
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate // 2, dilate // 2)
m.padding = (dilate // 2, dilate // 2)
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
class PyramidPooling(nn.Module):
def __init__(self, name, out_planes, fc_dim=4096, pool_scales=[1, 2, 3, 6],
norm_layer=nn.BatchNorm2d):
super(PyramidPooling, self).__init__()
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(OrderedDict([
('{}/pool_1'.format(name), nn.AdaptiveAvgPool2d(scale)),
('{}/cbr'.format(name),
ConvBnRelu(fc_dim, 512, 1, 1, 0, has_bn=True,
has_relu=True, has_bias=False,
norm_layer=norm_layer))
])))
self.ppm = nn.ModuleList(self.ppm)
self.conv6 = nn.Sequential(
ConvBnRelu(fc_dim + len(pool_scales) * 512, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
def forward(self, x):
input_size = x.size()
ppm_out = [x]
for pooling in self.ppm:
ppm_out.append(
F.interpolate(pooling(x), size=(input_size[2], input_size[3]),
mode='bilinear', align_corners=True))
ppm_out = torch.cat(ppm_out, 1)
ppm_out = self.conv6(ppm_out)
return ppm_out
if __name__ == "__main__":
model = PSPNet(150, None)
print(model)
| 34.88 | 79 | 0.558028 |
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.backbone import resnet50
from modules.ops.seg.seg_oprs import ConvBnRelu
from modules.engine.seg.config import Config
config = Config(config_file='./config.json').get_config()
class PSPNet(nn.Module):
def __init__(self, out_planes, criterion, pretrained_model=None,
norm_layer=nn.BatchNorm2d):
super(PSPNet, self).__init__()
self.backbone = resnet50(pretrained_model, norm_layer=norm_layer,
bn_eps=config.model.bn_eps,
bn_momentum=config.model.bn_momentum,
deep_stem=True, stem_width=64)
self.backbone.layer3.apply(partial(self._nostride_dilate, dilate=2))
self.backbone.layer4.apply(partial(self._nostride_dilate, dilate=4))
self.business_layer = []
self.psp_layer = PyramidPooling('psp', out_planes, 2048,
norm_layer=norm_layer)
self.aux_layer = nn.Sequential(
ConvBnRelu(1024, 1024, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(1024, out_planes, kernel_size=1)
)
self.business_layer.append(self.psp_layer)
self.business_layer.append(self.aux_layer)
self.criterion = criterion
def forward(self, data, label=None):
blocks = self.backbone(data)
psp_fm = self.psp_layer(blocks[-1])
aux_fm = self.aux_layer(blocks[-2])
psp_fm = F.interpolate(psp_fm, scale_factor=8, mode='bilinear',
align_corners=True)
aux_fm = F.interpolate(aux_fm, scale_factor=8, mode='bilinear',
align_corners=True)
psp_fm = F.log_softmax(psp_fm, dim=1)
aux_fm = F.log_softmax(aux_fm, dim=1)
if label is not None:
loss = self.criterion(psp_fm, label)
aux_loss = self.criterion(aux_fm, label)
loss = loss + 0.4 * aux_loss
return loss
return psp_fm
def _nostride_dilate(self, m, dilate):
if isinstance(m, nn.Conv2d):
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate // 2, dilate // 2)
m.padding = (dilate // 2, dilate // 2)
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
class PyramidPooling(nn.Module):
def __init__(self, name, out_planes, fc_dim=4096, pool_scales=[1, 2, 3, 6],
norm_layer=nn.BatchNorm2d):
super(PyramidPooling, self).__init__()
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(OrderedDict([
('{}/pool_1'.format(name), nn.AdaptiveAvgPool2d(scale)),
('{}/cbr'.format(name),
ConvBnRelu(fc_dim, 512, 1, 1, 0, has_bn=True,
has_relu=True, has_bias=False,
norm_layer=norm_layer))
])))
self.ppm = nn.ModuleList(self.ppm)
self.conv6 = nn.Sequential(
ConvBnRelu(fc_dim + len(pool_scales) * 512, 512, 3, 1, 1,
has_bn=True,
has_relu=True, has_bias=False, norm_layer=norm_layer),
nn.Dropout2d(0.1, inplace=False),
nn.Conv2d(512, out_planes, kernel_size=1)
)
def forward(self, x):
input_size = x.size()
ppm_out = [x]
for pooling in self.ppm:
ppm_out.append(
F.interpolate(pooling(x), size=(input_size[2], input_size[3]),
mode='bilinear', align_corners=True))
ppm_out = torch.cat(ppm_out, 1)
ppm_out = self.conv6(ppm_out)
return ppm_out
if __name__ == "__main__":
model = PSPNet(150, None)
print(model)
| true | true |
1c2c423ba66151af6b755dfd20d9f6dce4f1cc45 | 1,099 | py | Python | examples/tutorial/tutorial/app.py | carbonariy/dvhb-hybrid | adbb250767ea255addc607fb6f6755c9add447db | [
"MIT"
] | 27 | 2018-05-08T16:03:24.000Z | 2020-02-20T06:39:19.000Z | examples/tutorial/tutorial/app.py | carbonariy/dvhb-hybrid | adbb250767ea255addc607fb6f6755c9add447db | [
"MIT"
] | 7 | 2018-10-20T16:03:36.000Z | 2021-11-03T11:09:22.000Z | examples/tutorial/tutorial/app.py | carbonariy/dvhb-hybrid | adbb250767ea255addc607fb6f6755c9add447db | [
"MIT"
] | 16 | 2018-12-11T15:34:22.000Z | 2022-01-25T00:20:55.000Z | import os
import aiopg.sa
import django
from aiohttp_apiset import SwaggerRouter
from aiohttp_apiset.middlewares import jsonify
import aioworkers.http
from dvhb_hybrid.amodels import AppModels
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tutorial.settings")
django.setup()
import tutorial
AppModels.import_all_models_from_packages(tutorial)
class Application(aioworkers.http.Application):
def __init__(self, *args, **kwargs):
router = SwaggerRouter(search_dirs=['tutorial'])
kwargs['router'] = router
kwargs.setdefault('middlewares', []).append(jsonify)
super().__init__(**kwargs)
router.include('api.yaml')
cls = type(self)
self.on_startup.append(cls.startup_database)
self.on_cleanup.append(cls.cleanup_database)
async def startup_database(self):
dbparams = self.config.databases.default
self['db'] = await aiopg.sa.create_engine(**dbparams)
self.models = self.m = AppModels(self)
async def cleanup_database(self):
self['db'].close()
await self['db'].wait_closed()
| 26.804878 | 68 | 0.708826 | import os
import aiopg.sa
import django
from aiohttp_apiset import SwaggerRouter
from aiohttp_apiset.middlewares import jsonify
import aioworkers.http
from dvhb_hybrid.amodels import AppModels
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tutorial.settings")
django.setup()
import tutorial
AppModels.import_all_models_from_packages(tutorial)
class Application(aioworkers.http.Application):
def __init__(self, *args, **kwargs):
router = SwaggerRouter(search_dirs=['tutorial'])
kwargs['router'] = router
kwargs.setdefault('middlewares', []).append(jsonify)
super().__init__(**kwargs)
router.include('api.yaml')
cls = type(self)
self.on_startup.append(cls.startup_database)
self.on_cleanup.append(cls.cleanup_database)
async def startup_database(self):
dbparams = self.config.databases.default
self['db'] = await aiopg.sa.create_engine(**dbparams)
self.models = self.m = AppModels(self)
async def cleanup_database(self):
self['db'].close()
await self['db'].wait_closed()
| true | true |
1c2c4382dc2bec243f9120ad2fca826f556dfdf0 | 475 | py | Python | URI1789.py | rashidulhasanhridoy/URI-Online-Judge-Problem-Solve-with-Python-3 | c7db434e2e6e40c2ca3bd56db0d04cf79f69de12 | [
"Apache-2.0"
] | 2 | 2020-07-21T18:01:37.000Z | 2021-11-29T01:08:14.000Z | URI1789.py | rashidulhasanhridoy/URI-Online-Judge-Problem-Solve-with-Python-3 | c7db434e2e6e40c2ca3bd56db0d04cf79f69de12 | [
"Apache-2.0"
] | null | null | null | URI1789.py | rashidulhasanhridoy/URI-Online-Judge-Problem-Solve-with-Python-3 | c7db434e2e6e40c2ca3bd56db0d04cf79f69de12 | [
"Apache-2.0"
] | null | null | null | def main():
while True:
numbers = []
try:
l = int(input())
except:
break
x = input()
numbers = x.split()
for i in range(len(numbers)):
numbers[i] = int(numbers[i])
if max(numbers) < 10:
print("1")
elif max(numbers) >= 20:
print("3")
elif max(numbers) >= 10 or max(numbers) < 20:
print("2")
if __name__ == '__main__':
main() | 25 | 53 | 0.431579 | def main():
while True:
numbers = []
try:
l = int(input())
except:
break
x = input()
numbers = x.split()
for i in range(len(numbers)):
numbers[i] = int(numbers[i])
if max(numbers) < 10:
print("1")
elif max(numbers) >= 20:
print("3")
elif max(numbers) >= 10 or max(numbers) < 20:
print("2")
if __name__ == '__main__':
main() | true | true |
1c2c45b6beabbc089285e913423b74d6687ece7b | 777 | py | Python | src/connect4_zero/lib/data_helper.py | maximerihouey/connect4-alpha-zero | 952a47df1a15d1a4e0696c23c9109acefd727aa0 | [
"MIT"
] | null | null | null | src/connect4_zero/lib/data_helper.py | maximerihouey/connect4-alpha-zero | 952a47df1a15d1a4e0696c23c9109acefd727aa0 | [
"MIT"
] | null | null | null | src/connect4_zero/lib/data_helper.py | maximerihouey/connect4-alpha-zero | 952a47df1a15d1a4e0696c23c9109acefd727aa0 | [
"MIT"
] | null | null | null | import json
import os
from glob import glob
from logging import getLogger
from connect4_zero.config import ResourceConfig
logger = getLogger(__name__)
def get_game_data_filenames(rc: ResourceConfig):
pattern = os.path.join(rc.play_data_dir, rc.play_data_filename_tmpl % "*")
files = list(sorted(glob(pattern)))
return files
def get_next_generation_model_dirs(rc: ResourceConfig):
dir_pattern = os.path.join(
rc.next_generation_model_dir, rc.next_generation_model_dirname_tmpl % "*"
)
dirs = list(sorted(glob(dir_pattern)))
return dirs
def write_game_data_to_file(path, data):
with open(path, "wt") as f:
json.dump(data, f)
def read_game_data_from_file(path):
with open(path, "rt") as f:
return json.load(f)
| 23.545455 | 81 | 0.724582 | import json
import os
from glob import glob
from logging import getLogger
from connect4_zero.config import ResourceConfig
logger = getLogger(__name__)
def get_game_data_filenames(rc: ResourceConfig):
pattern = os.path.join(rc.play_data_dir, rc.play_data_filename_tmpl % "*")
files = list(sorted(glob(pattern)))
return files
def get_next_generation_model_dirs(rc: ResourceConfig):
dir_pattern = os.path.join(
rc.next_generation_model_dir, rc.next_generation_model_dirname_tmpl % "*"
)
dirs = list(sorted(glob(dir_pattern)))
return dirs
def write_game_data_to_file(path, data):
with open(path, "wt") as f:
json.dump(data, f)
def read_game_data_from_file(path):
with open(path, "rt") as f:
return json.load(f)
| true | true |
1c2c460c66670ad2aca1c91fa0fc74defb26f85a | 491 | py | Python | mover.py | OlopesMaster/mover_pastas | 384ba76f0a820f07cc4e186b90e7df93f47dc1a7 | [
"MIT"
] | 1 | 2021-12-26T23:17:19.000Z | 2021-12-26T23:17:19.000Z | mover.py | OlopesMaster/mover_pastas | 384ba76f0a820f07cc4e186b90e7df93f47dc1a7 | [
"MIT"
] | null | null | null | mover.py | OlopesMaster/mover_pastas | 384ba76f0a820f07cc4e186b90e7df93f47dc1a7 | [
"MIT"
] | null | null | null | import os
import pathlib
def mover_arquivos(caminho_origem, caminho_destino, formato):
lista = list(set(str(path.parent) for path in pathlib.Path(f"{caminho_origem}\\.").glob(f"**/*{formato}")))
nome_arquivo = list()
aux = 0
for c in lista:
nome_arquivo.append(pathlib.PureWindowsPath(str(lista[aux])).name)
aux+=1
aux = 0
for arquivo in lista:
os.rename(f"{arquivo}", f"{caminho_destino}\\{nome_arquivo[aux]}")
aux+=1
| 32.733333 | 112 | 0.627291 | import os
import pathlib
def mover_arquivos(caminho_origem, caminho_destino, formato):
lista = list(set(str(path.parent) for path in pathlib.Path(f"{caminho_origem}\\.").glob(f"**/*{formato}")))
nome_arquivo = list()
aux = 0
for c in lista:
nome_arquivo.append(pathlib.PureWindowsPath(str(lista[aux])).name)
aux+=1
aux = 0
for arquivo in lista:
os.rename(f"{arquivo}", f"{caminho_destino}\\{nome_arquivo[aux]}")
aux+=1
| true | true |
1c2c468e4116f074bee492800d8729487553b458 | 41,198 | py | Python | Simulating hydrogen/functionsHydrogenSimulation.py | animucki/2mmn40 | c54c0e4e9c801d63f048fbb5d9abd8fe9432cfdc | [
"Unlicense"
] | null | null | null | Simulating hydrogen/functionsHydrogenSimulation.py | animucki/2mmn40 | c54c0e4e9c801d63f048fbb5d9abd8fe9432cfdc | [
"Unlicense"
] | null | null | null | Simulating hydrogen/functionsHydrogenSimulation.py | animucki/2mmn40 | c54c0e4e9c801d63f048fbb5d9abd8fe9432cfdc | [
"Unlicense"
] | null | null | null | ###################################################################################
################################# FUNCTIONS #######################################
###################################################################################
import numpy as np
import time
from sklearn.metrics.pairwise import euclidean_distances
# Define the structure of different molecules:
def getMoleculeStructure():
# Define the properties of different molecules: masses, bonds, angles, dihedrals, equillibrium length and angles, potential constants, Lennart-Jones parameters, etc.
struc = {"WaterReal": {"atoms": [["O"], ["H"], ["H"]], "masses": np.array([15.9994, 1.0080, 1.0080]), "bonds": np.array([[0, 1], [0, 2]]), "angles": np.array([[1, 0, 2]]), "dihedrals": np.empty([0, 4]), "kb": np.array([[5024.16], [5024.16]]), "r0": np.array([[0.9572], [0.9572]]), "ktheta": np.array([[100*6.2802]]), "theta0": np.array([[104.52*np.pi/180]]), "C1": np.empty([0, 1]), "C2": np.empty([0, 1]), "C3": np.empty([0, 1]), "C4": np.empty([0, 1]), "epsilon": [0.66386, 0, 0], "sigma": [3.15061, 0, 0]},
"Water": {"atoms": [["H"], ["H"]], "masses": np.array([1.0080, 1.0080]), "bonds": np.array([[0, 1]]), "angles": np.empty([0, 3]), "dihedrals": np.empty([0, 4]), "kb": np.array([[1000]]), "r0": np.array([[0.7415]]), "ktheta": np.empty([0, 1]), "theta0": np.empty([0, 1]), "C1": np.empty([0, 1]), "C2": np.empty([0, 1]), "C3": np.empty([0, 1]), "C4": np.empty([0, 1]), "epsilon": [0.307, 0.307], "sigma": [2.9, 2.9]},
"Methane": {"atoms": [["C"], ["H"], ["H"], ["H"], ["H"]], "masses": np.array([12.0110, 1.0080, 1.0080, 1.0080, 1.0080]), "bonds": np.array([[0, 1], [0, 2], [0, 3], [0, 4]]), "angles": np.array([[1, 0, 2], [1, 0, 3], [1, 0, 4], [2, 0, 3], [2, 0, 4], [3, 0, 4]]), "dihedrals": np.empty([0, 4])},
"Ethanol": {"atoms": [["C"], ["H"], ["H"], ["H"], ["C"], ["H"], ["H"], ["O"], ["H"]], "masses": np.array([12.0110, 1.0080, 1.0080, 1.0080, 12.0110, 1.0080, 1.0080, 15.9994, 1.0080]), "bonds": np.array([[0, 1], [0, 2], [0, 3], [0, 4], [4, 5], [4, 6], [4, 7], [7, 8]]), "angles": np.array([[1, 0, 4], [2, 0, 4], [3, 0, 4], [3, 0, 2], [3, 0, 1], [2, 0, 1], [5, 4, 6], [0, 4, 6], [0, 4, 5], [0, 4, 7], [4, 7, 8], [5, 4, 7], [6, 4, 7]]), "dihedrals": np.array([[1, 0, 4, 5], [2, 0, 4, 5], [3, 0, 4, 5], [1, 0, 4, 6], [2, 0, 4, 6], [3, 0, 4, 6], [1, 0, 4, 7], [2, 0, 4, 7], [3, 0, 4, 7], [0, 4, 7, 8], [5, 4, 7, 8], [6, 4, 7, 8]]), "kb": np.array([[2845.12], [2845.12] , [2845.12], [2242.624], [2845.12], [2845.12], [2677.76], [4627.50]]), "r0": np.array([[1.090], [1.090] , [1.090], [1.529], [1.090], [1.090], [1.410], [0.945]]), "ktheta": np.array([[100*2.9288], [100*2.9288] , [100*2.9288], [100*2.76144], [100*2.76144], [100*2.76144], [100*2.76144], [100*3.138], [100*3.138], [100*4.144] , [100*4.6024], [100*2.9288], [100*2.9288]]), "theta0": np.array([[108.5*np.pi/180], [108.5*np.pi/180] , [108.5*np.pi/180], [107.8*np.pi/180], [107.8*np.pi/180], [107.8*np.pi/180], [107.8*np.pi/180], [110.7*np.pi/180], [110.7*np.pi/180], [109.5*np.pi/180] , [108.5*np.pi/180], [109.5*np.pi/180], [109.5*np.pi/180]]), "C1": np.array([[0.62760], [0.62760], [0.62760], [0.62760], [0.62760], [0.62760], [0.97905], [0.97905], [0.97905], [-0.44310], [0.94140], [0.94140]]), "C2": np.array([[1.88280], [1.88280], [1.88280], [1.88280], [1.88280], [1.88280], [2.93716], [2.93716], [2.93716], [3.83255], [2.82420], [2.82420]]), "C3": np.array([[0], [0], [0], [0], [0], [0], [0], [0], [0], [0.72801], [0], [0]]), "C4": np.array([[-3.91622], [-3.91622], [-3.91622], [-3.91622], [-3.91622], [-3.91622], [-3.91622], [-3.91622], [-3.91622], [-4.11705], [-3.76560], [-3.76560]]), "epsilon": [0.276144, 0.125520, 0.125520, 0.125520, 0.276144, 0.125520, 0.125520, 0.711280, 0], "sigma": [3.5, 2.5, 2.5, 2.5, 3.5, 2.5, 2.5, 3.12, 0]}
}
return struc
def getLennartJonesCrossSigma(allTypes, allMoleculeNumbers):
LJsigma = np.zeros([allTypes.shape[0], allTypes.shape[0]])
struc = getMoleculeStructure()
for i in range(0, allTypes.shape[0]):
for j in range(0, allTypes.shape[0]):
LJsigma[i, j] = 0.5*(struc[allTypes[i, 1]]["sigma"][allMoleculeNumbers[i, 1]] + struc[allTypes[j, 1]]["sigma"][allMoleculeNumbers[j, 1]])
return LJsigma
def getLennartJonesCrossEpsilon(allTypes, allMoleculeNumbers):
LJepsilon = np.zeros([allTypes.shape[0], allTypes.shape[0]])
struc = getMoleculeStructure()
for i in range(0, allTypes.shape[0]):
for j in range(0, allTypes.shape[0]):
LJepsilon[i, j] = (1*(allMoleculeNumbers[i,0] != allMoleculeNumbers[j,0]))*np.sqrt(struc[allTypes[i, 1]]["epsilon"][allMoleculeNumbers[i, 1]] * struc[allTypes[j, 1]]["epsilon"][allMoleculeNumbers[j, 1]])
return LJepsilon
# Count atoms/bonds/angles/dihedrals (A) in molecule B
def countIn(A, B):
struc = getMoleculeStructure()
return len(struc[B][A])
# Create an initial configuration of the mixture in the prescribed box:
def initializeConfiguration(totalNumMolecules, percentageEthanol, boxSize):
# Ethanol parameters:
numMoleculesEthanol = int(np.round(totalNumMolecules*percentageEthanol/100))
numAtomsPerMoleculeEthanol = countIn("atoms", "Ethanol")
numBondsPerMoleculeEthanol = countIn("bonds", "Ethanol")
numAnglesPerMoleculeEthanol = countIn("angles", "Ethanol")
numDihedralsPerMoleculeEthanol = countIn("dihedrals", "Ethanol")
# Water parameters:
numMoleculesWater = totalNumMolecules - numMoleculesEthanol
numAtomsPerMoleculeWater = countIn("atoms", "Water")
numBondsPerMoleculeWater = countIn("bonds", "Water")
numAnglesPerMoleculeWater = countIn("angles", "Water")
numDihedralsPerMoleculeWater = countIn("dihedrals", "Water")
# Calculate useful information:
totalNumWaterBonds = numMoleculesWater*numBondsPerMoleculeWater
totalNumWaterAngles = numMoleculesWater*numAnglesPerMoleculeWater
totalNumWaterDihedrals = numMoleculesWater*numDihedralsPerMoleculeWater
totalNumWaterAtoms = numMoleculesWater*numAtomsPerMoleculeWater
totalNumEthanolBonds = numMoleculesEthanol*numBondsPerMoleculeEthanol
totalNumEthanolAngles = numMoleculesEthanol*numAnglesPerMoleculeEthanol
totalNumEthanolDihedrals = numMoleculesEthanol*numDihedralsPerMoleculeEthanol
totalNumEthanolAtoms = numMoleculesEthanol*numAtomsPerMoleculeEthanol
totalNumBonds = totalNumWaterBonds + totalNumEthanolBonds
totalNumAngles = totalNumWaterAngles + totalNumEthanolAngles
totalNumDihedrals = totalNumWaterDihedrals + totalNumEthanolDihedrals
totalNumAtoms = numMoleculesWater*numAtomsPerMoleculeWater + numMoleculesEthanol*numAtomsPerMoleculeEthanol
# Create empty arrays with all the bonds, angles, dihedrals, types, masses, numbers. Also useful constants are included
allBonds = np.zeros([totalNumBonds, 4]) # Indices of 2 atoms in the bond in first 2 columns, kb in 3rd column, r0 in 4th column
allAngles = np.zeros([totalNumAngles, 5]) # Indices of 3 atoms in the angle in first 3 columns, ktheta in 4th column, theta0 in 5th column
allDihedrals = np.zeros([totalNumDihedrals, 8]) # Indices of 4 atoms in the dihedral in first 4 columns, C1 in 5th column, C2 in 6th column, C3 in 7th column and C4 in 8th column
allMasses = np.zeros([totalNumAtoms, 1]) # Mass in amu in first column
allTypes = np.empty([totalNumAtoms, 2], dtype="<U7") # Atom letter in first column, molecule name in second column
allMoleculeNumbers = np.zeros([totalNumAtoms, 2], dtype = int) # Molecule number in first column, atom number within molecule in second column
# Set a seed in order to obtain same results:
np.random.seed(0)
# Get a vector of molecule numbers which are going to be ethanol:
ethanolPositions = np.random.choice(totalNumMolecules, numMoleculesEthanol, replace = False)
# Create a vector of all molecules and there molecule name which is assigned
moleculeVector = np.empty([totalNumMolecules, 1], dtype = "<U7")
moleculeVector[:, 0] = "Water"
moleculeVector[ethanolPositions, 0] = "Ethanol"
repeatVector = np.zeros([totalNumMolecules, 1], dtype = int)
repeatVector[:, 0] = countIn("atoms", "Water")
repeatVector[ethanolPositions, 0] = countIn("atoms", "Ethanol")
# Fill the second column of allTypes with the molecule names (per atom: "Water" or "Ethanol")
allTypes[:, 1] = np.repeat(moleculeVector[:,0], repeatVector[:,0])
# Initialize indices to use in the for loop:
currentBondIndex = 0
currentAngleIndex = 0
currentDihedralIndex = 0
currentAtomIndex = 0
# Get the structure of molecules:
structure = getMoleculeStructure()
# Iteratre over all molecules:
for molecule in range(0, totalNumMolecules):
# Which molecule do we have? Water or ethanol:
moleculeType = moleculeVector[molecule, 0]
# How many bonds, angles, dihedrals, atoms are in such a molecule?:
bondsInType = countIn("bonds", moleculeType)
anglesInType = countIn("angles", moleculeType)
dihedralsInType = countIn("dihedrals", moleculeType)
atomsInType = countIn("atoms", moleculeType)
# Fill the list of bonds, angles, dihedrals, masses, types and molecule numbers with the right information:
allBonds[currentBondIndex:(currentBondIndex + bondsInType),:] = np.concatenate((structure[moleculeType]["bonds"] + currentAtomIndex, structure[moleculeType]["kb"], structure[moleculeType]["r0"]), axis = 1) # Indices of atoms in first 2 columns, kb in 3rd column, r0 in 4th column
allAngles[currentAngleIndex:(currentAngleIndex + anglesInType),:] = np.concatenate((structure[moleculeType]["angles"] + currentAtomIndex, structure[moleculeType]["ktheta"], structure[moleculeType]["theta0"]), axis = 1) # Indices of atoms in first 3 columns, ktheta in 4th column, theta0 in 5th column
allDihedrals[currentDihedralIndex:(currentDihedralIndex + dihedralsInType), :] = np.concatenate((structure[moleculeType]["dihedrals"] + currentAtomIndex, structure[moleculeType]["C1"], structure[moleculeType]["C2"], structure[moleculeType]["C3"], structure[moleculeType]["C4"]), axis = 1)
allMasses[currentAtomIndex:(currentAtomIndex + atomsInType),:] = structure[moleculeType]["masses"].reshape(countIn("atoms", moleculeType), 1)
allTypes[currentAtomIndex:(currentAtomIndex + atomsInType),0] = np.concatenate((structure[moleculeType]["atoms"], structure[moleculeType]["atoms"]), axis = 1)[:,0]#np.concatenate((structure["Water"]["atoms"], np.repeat("Water", numAtomsPerMoleculeWater).reshape(numAtomsPerMoleculeWater, 1)), axis=1)#.reshape(numAtomsPerMoleculeWater, 1)
allMoleculeNumbers[currentAtomIndex:(currentAtomIndex + atomsInType),:] = np.transpose(np.array([atomsInType*[molecule], [x for x in range(0, atomsInType)]]))#np.array([[molecule, 0], [molecule, 1], [molecule, 2]])
# Increment the indices:
currentBondIndex += bondsInType
currentAngleIndex += anglesInType
currentAtomIndex += atomsInType
currentDihedralIndex += dihedralsInType
# How many molecules fit in one of the three directions: (e.g. if totalNumMolecules = 15**3, we have 15 molecules in one direction (we are filling a 3D grid))
numMoleculeOneDirec = int(np.round(totalNumMolecules**(1/3)))
# Define the 3D grid:
grid = np.meshgrid([x for x in range(0, numMoleculeOneDirec)], [x for x in range(0, numMoleculeOneDirec)], [x for x in range(0, numMoleculeOneDirec)])
gridVectors = np.concatenate((grid[0].reshape(totalNumMolecules, 1), grid[1].reshape(totalNumMolecules, 1), grid[2].reshape(totalNumMolecules, 1)), axis = 1)
# Initialize a water and a ethanol molecule
basicWater = np.array([[0, 0.5, 0.6], [0.6, 0.9, 1.2]])
basicEthanol = np.array([[-1.683, -0.523, 1.084], [-1.689, -1.638, 1.101], [-1.171, -0.174, 2.011], [-2.738, -0.167, 1.117], [-0.968, -0.008, -0.167], [0.094, -0.344, -0.200], [-1.490, -0.319, -1.102], [-0.953, 1.395, -0.142], [-1.842, 1.688, -0.250]]) + np.array([2.738, 1.638, 1.102])
# Put the first molecule in the first grid point:
if moleculeVector[0] == "Water":
XYZinitial = basicWater + gridVectors[0]
else:
XYZinitial = basicEthanol + gridVectors[0]
# Put all the other molecules in the next grid points:
for m in range(0, totalNumMolecules-1):
if moleculeVector[m+1] == "Water":
XYZinitial = np.concatenate((XYZinitial, basicWater + (1/numMoleculeOneDirec)*boxSize*gridVectors[m + 1]))
else:
XYZinitial = np.concatenate((XYZinitial, basicEthanol + (1/numMoleculeOneDirec)*boxSize*gridVectors[m + 1]))
# Return all initializations:
return XYZinitial, allBonds, allAngles, allDihedrals, allMasses, allTypes, allMoleculeNumbers
# Initialize the velocities
def initializeV(allMoleculeNumbers, meanV, stDevV):
# How many molecules do we have?
totalNumMolecules = max(allMoleculeNumbers[:, 0])+1
# Find how many time the same velocity should be replicated (either 3 times for water or 9 times for ethanol): each atom within a molecule should have the same initial velocity:
replications = np.diff(np.append(np.append([-1], np.where((np.diff(allMoleculeNumbers[:,0]) != 0))), [len(allMoleculeNumbers[:,0])-1]))
# Set a seed to get the same answers everytime:
np.random.seed(1)
# Create random velocities per molecule, v is a 3-dimensional normal variable with mean meanV and standard deviation of stDevV and correlation of 0 between the three dimensions
v = np.repeat(np.random.normal(meanV, stDevV, [totalNumMolecules, 3]), replications, axis=0)
# Return the initial velocity vector:
return v
# Create a calculateForces function:
def calculateForcesEnergy(atomListXYZNow, bondList, angleList, dihedralList, typeList, moleculeNumberList, boxSize, rLJCutOff, LJsigma, LJepsilon, LJsigma6, LJsigma12): # add dihedralList
'''
atomListXYZNow, bondList, angleList, dihedralList, typeList, moleculeNumberList = XYZ, allBonds, allAngles, allDihedrals, allTypes, allMoleculeNumbers
'''
# Initialize the force vector and the potential energy number:
forces = np.zeros(atomListXYZNow.shape)
potentialEnergy = 0
########### Forces for bond potentials #############
codeTimer = time.time()
# Calculate all the forces resulting from the bond potential within the molecules, iterate for all bonds in a for-loop:
for b in range(0, bondList.shape[0]):
# Calculate distance between the two atoms
r = np.linalg.norm(atomListXYZNow[int(bondList[b, 0])] - atomListXYZNow[int(bondList[b, 1])])
# Bond potential: V(r) = 0.5*k_b*(r-r0)^2
# Increase potential energy:
potentialEnergy += 0.5*bondList[b, 2]*((r-bondList[b, 3])**2)
# Structure: atom0 ------- atom1
# Find the magnitude of the force:
# r = sqrt((qx_1 - qx_0)^2 + (qy_1 - qy_0)^2 + (qz_1 - qz_0)^2)
# V = 1/2 * kb * (r - r0)^2
# Force wrt atom 0: F_0 = -grad(V) = kb * (r-r0)/r * (q_0-q_1)
# Force wrt atom 1: F_1 = - kb * (r-r0)/r * (q_0-q_1)
# Magnitude ||F|| = |kb|*|r-r0| * ( ||q_0 - q_1||/r ) = |kb|*|r-r0|
magnitudeForce = bondList[b, 2]*abs(r-bondList[b, 3])
# Case 1: r > r0
# Case 2: r <= r0
# Find force direction with respect to atom 0 and case 1 r > r0: q_1 - q_0
# Normalize: q_1 - q_0 / r
# Calculate Normalized force direction * magnitude with respect to atom 0 and the case that r > r0
forceVectorAtom0Case1 = (atomListXYZNow[int(bondList[b, 1])] - atomListXYZNow[int(bondList[b, 0])])*magnitudeForce/r
# For atom 1 this vector is in the opposite direction: forceVectorAtom1Case1 = -forceVectorAtom0Case1
# For case 2 we clearly have: forceVectorAtom0Case2 = -forceVectorAtom0Case1 and forceVectorAtom1Case2 = forceVectorAtom0Case1
correctSign = np.sign(r - bondList[b, 3])
forces[int(bondList[b, 0])] = forces[int(bondList[b, 0])] + correctSign*forceVectorAtom0Case1
forces[int(bondList[b, 1])] = forces[int(bondList[b, 1])] - correctSign*forceVectorAtom0Case1
'''
# Case 1: r > r0
if r > bondList[b, 3]:
# Add the right forces to the right atoms:
forces[int(bondList[b, 0])] = forces[int(bondList[b, 0])] + forceVectorAtom0Case1
forces[int(bondList[b, 1])] = forces[int(bondList[b, 1])] - forceVectorAtom0Case1
# Case 2: r <= r0
else:
# Add the right forces to the right atoms:
forces[int(bondList[b, 0])] = forces[int(bondList[b, 0])] - forceVectorAtom0Case1
forces[int(bondList[b, 1])] = forces[int(bondList[b, 1])] + forceVectorAtom0Case1
'''
print("Calculating bond forces took " + str(time.time() - codeTimer) + " seconds.")
########### Forces for angle potentials #############
codeTimer = time.time()
# Calculate all the forces resulting from the angle potential within the molecules, iterate for all angles in a for-loop:
for a in range(0, angleList.shape[0]):
# Structure: atom2
# / \
# / \
# / atom3
# atom1
#
# with angle theta: angle 123 (< 180 degr = pi rad)
# Bond potential: V(theta) = 0.5*k_theta*(theta-theta0)^2
atom1 = int(angleList[a, 0])
atom2 = int(angleList[a, 1])
atom3 = int(angleList[a, 2])
XYZatom1 = atomListXYZNow[atom1,]
XYZatom2 = atomListXYZNow[atom2,]
XYZatom3 = atomListXYZNow[atom3,]
k_theta = angleList[a, 3]
theta0 = angleList[a, 4]
# Vector v21 is the vector from atom 2 to atom 1
v21 = XYZatom1 - XYZatom2
rv21 = np.linalg.norm(v21)
# Vector v23 is the vector from atom 2 to atom 3
v23 = XYZatom3 - XYZatom2
rv23 = np.linalg.norm(v23)
# Find the angle theta
theta = np.arccos((np.dot(v21, v23))/(rv21 * rv23))
# Increase potential energy:
potentialEnergy += 0.5*k_theta*((theta - theta0)**2)
# Find the magnitude of the force acting on atom 1
# ||F_atom1|| = || grad_atom1(V) || = || dV/dtheta || * || dtheta / dq_atom1 ||
# = | ktheta | * | theta - theta0 | * 1/| q_atom2 - q_atom1 |
magnitudeForceOnAtom1 = k_theta*abs(theta - theta0)/rv21
# Find the magnitude of the force acting on atom 3
# ||F_atom3|| = || grad_atom3(V) || = || dV/dtheta || * || dtheta / dq_atom3 ||
# = | ktheta | * | theta - theta0 | * 1/|| q_atom2 - q_atom3 ||
magnitudeForceOnAtom3 = k_theta*abs(theta - theta0)/rv23
# Case 1: theta > theta0
# Case 2: theta <= theta0
# Find the direction of the force acting on atom 1 and normalize, for case 1 theta > theta0: force is pointing inwards (to make the angle smaller)
directionForceOnAtom1WrtCase1 = np.cross(v21, np.cross(v23, v21))
directionForceOnAtom1NormalizedWrtCase1 = directionForceOnAtom1WrtCase1/np.linalg.norm(directionForceOnAtom1WrtCase1)
# Find the direction of the force acting on atom 3 and normalize, for case 1 theta > theta0: force is pointing inwards (to make the angle smaller)
directionForceOnAtom3WrtCase1 = np.cross(v23, np.cross(v21, v23))
directionForceOnAtom3NormalizedWrtCase1 = directionForceOnAtom3WrtCase1/np.linalg.norm(directionForceOnAtom3WrtCase1)
# With respect to case 2 the forces are in opposite directions
# Force on atom 2 is minus the force on atom 1 minus the force on atom 3
correctSign = np.sign(theta - angleList[a, 4])
forceAtom1 = correctSign*directionForceOnAtom1NormalizedWrtCase1*magnitudeForceOnAtom1
forceAtom3 = correctSign*directionForceOnAtom3NormalizedWrtCase1*magnitudeForceOnAtom3
forces[atom1] = forces[atom1] + forceAtom1
forces[atom3] = forces[atom3] + forceAtom3
forces[atom2] = forces[atom2] - (forceAtom1 + forceAtom3)
'''
# Case 1: theta > theta0
if theta > angleList[a, 4]:
# Add the right forces to the right atoms:
forces[int(angleList[a, 0])] = forces[int(angleList[a, 0])] + directionForceOnAtom1NormalizedWrtCase1*magnitudeForceOnAtom1
forces[int(angleList[a, 2])] = forces[int(angleList[a, 2])] + directionForceOnAtom3NormalizedWrtCase1*magnitudeForceOnAtom3
forces[int(angleList[a, 1])] = forces[int(angleList[a, 1])] - (directionForceOnAtom1NormalizedWrtCase1*magnitudeForceOnAtom1 + directionForceOnAtom3NormalizedWrtCase1*magnitudeForceOnAtom3)
# Case 2: theta <= theta0
else:
# Add the right forces to the right atoms:
forces[int(angleList[a, 0])] = forces[int(angleList[a, 0])] - directionForceOnAtom1NormalizedWrtCase1*magnitudeForceOnAtom1
forces[int(angleList[a, 2])] = forces[int(angleList[a, 2])] - directionForceOnAtom3NormalizedWrtCase1*magnitudeForceOnAtom3
forces[int(angleList[a, 1])] = forces[int(angleList[a, 1])] + (directionForceOnAtom1NormalizedWrtCase1*magnitudeForceOnAtom1 + directionForceOnAtom3NormalizedWrtCase1*magnitudeForceOnAtom3)
'''
print("Calculating angle forces took " + str(time.time() - codeTimer) + " seconds.")
codeTimer = time.time()
########### Forces for dihedral potentials #############
# Calculate all the forces resulting from the dihedral potential within the molecules, iterate for all dihedrals in a for-loop:
for dih in range(0, dihedralList.shape[0]):
# Define atoms:
atom_a = int(dihedralList[dih, 0])
atom_b = int(dihedralList[dih, 1])
atom_c = int(dihedralList[dih, 2])
atom_d = int(dihedralList[dih, 3])
# Get parameters:
C1 = dihedralList[dih, 4]
C2 = dihedralList[dih, 5]
C3 = dihedralList[dih, 6]
C4 = dihedralList[dih, 7]
# Dihedral potential:
# Let theta be the torsion angle (angle between plane described by atoms a, b, c and the plane described by atoms b, c, d)
# Let psi = theta - pi
# V(psi) = 0.5*(C1*(1+cos(psi)) + C2*(1-cos(2*psi)) + C3*(1+cos(3*psi)) + C4*(1-cos(4*psi)))
# Get XYZ:
XYZ_a = atomListXYZNow[atom_a, :]
XYZ_b = atomListXYZNow[atom_b, :]
XYZ_c = atomListXYZNow[atom_c, :]
XYZ_d = atomListXYZNow[atom_d, :]
# Point 0 is the midpoint of bond b ----- c
XYZ_0 = 0.5*XYZ_b + 0.5*XYZ_c
# Get vectors from b pointing to a and to c and from c to b and d and from 0 pointing to c:
v_b_to_a = XYZ_a - XYZ_b
v_b_to_c = XYZ_c - XYZ_b
v_c_to_b = XYZ_b - XYZ_c
v_c_to_d = XYZ_d - XYZ_c
v_0_to_c = XYZ_c - XYZ_0
# Find normalized normal on plane abc and on plane bcd:
n_abc = np.cross(v_b_to_a, v_b_to_c)
n_abc = n_abc/np.linalg.norm(n_abc)
n_bcd = np.cross(v_c_to_d, v_c_to_b)
n_bcd = n_bcd/np.linalg.norm(n_bcd)
# Let the vector m be the opposite of the normal of plane abc and normalize it
m = -n_abc
m = m/np.linalg.norm(m)
# Let n be the normal of plane bcd
n = n_bcd
# Find the dihedral angle by using a more stable version of finding the angle using arctan2:
theta = np.arctan2(((np.dot(np.cross(m, n), v_c_to_b))/(np.linalg.norm(v_c_to_b))), (np.dot(m, n)))
# Find psi = theta - pi
psi = theta - np.pi
# Find the angle abc in a----b----c and the angle bcd in b----c----d:
theta_abc = np.arccos(np.dot(v_b_to_a, v_b_to_c)/(np.linalg.norm(v_b_to_a)*np.linalg.norm(v_b_to_c)))
theta_bcd = np.arccos(np.dot(v_c_to_b, v_c_to_d)/(np.linalg.norm(v_c_to_b)*np.linalg.norm(v_c_to_d)))
# Find signed force magnitudes:
part1_of_magnitude = -0.5*(C1*np.sin(psi)-2*C2*np.sin(2*psi)+3*C3*np.sin(3*psi)-4*C4*np.sin(4*psi))
signed_magnitude_force_a = part1_of_magnitude/(np.sin(theta_abc)*(np.linalg.norm(v_b_to_a)))
signed_magnitude_force_d = part1_of_magnitude/(np.sin(theta_bcd)*(np.linalg.norm(v_c_to_d)))
# Calculate the forces such that sum of forces is zero and the torque is zero as well:
force_a = signed_magnitude_force_a*n_abc
force_d = signed_magnitude_force_d*n_bcd
force_c = (1/((np.linalg.norm(v_0_to_c))**2))*np.cross(-(np.cross(v_0_to_c, force_d) + 0.5*np.cross(v_c_to_d, force_d) + 0.5*np.cross(v_b_to_a, force_a)), v_0_to_c)
force_b = -force_a - force_d - force_c
# Add the right forces to the right atoms:
forces[atom_a, :] = forces[atom_a, :] + force_a
forces[atom_b, :] = forces[atom_b, :] + force_b
forces[atom_c, :] = forces[atom_c, :] + force_c
forces[atom_d, :] = forces[atom_d, :] + force_d
# Increase potential energy:
potentialEnergy += 0.5*(C1*(1+np.cos(psi))+C2*(1-np.cos(2*psi))+C3*(1+np.cos(3*psi))+C4*(1-np.cos(4*psi)))
print("Calculating dihedral forces took " + str(time.time() - codeTimer) + " seconds.")
codeTimer = time.time()
########### Forces for Lennart-Jones potentials #############
# Import Euclidean distance package
#from sklearn.metrics.pairwise import euclidean_distances
# Get the molecule structure
struc = getMoleculeStructure()
# Which not to incorporate:
#noLJ = np.where(((typeList[:,1] == "Water") & ((moleculeNumberList[:,1] == 1)+(moleculeNumberList[:,1] == 2))) + ((typeList[:,1] == "Ethanol") & (moleculeNumberList[:,1] == 8)))
pairwiseDistances = np.zeros([atomListXYZNow.shape[0], atomListXYZNow.shape[0]])#euclidean_distances(atomListXYZNow, atomListXYZNow)
#pairwiseDistances = floorDistanceVector(atomListXYZNow, boxSize)
for i in range(0, pairwiseDistances.shape[0]):
pairwiseDistances[i,(i+1):] = floorDistanceVectorOld(atomListXYZNow[i, :], atomListXYZNow[(i+1):, :], boxSize)
#if sum(abs(pairwiseDistances[i,:] - test[i,:]) < 10**(-6)) != pairwiseDistances.shape[0]:
# print("WWWWRRROOONNNGGGG!!!!!!")
# print(str(sum(abs(pairwiseDistances[i,:] - test[i,:]) < 10**(-6))))
# print(str(pairwiseDistances.shape[0]))
#pairwiseDistances[i, (moleculeNumberList[:,0] == moleculeNumberList[i,0])] = 0
#pairwiseDistances[i, :i] = 0
#pairwiseDistances[:, noLJ] = 0
#pairwiseDistances[noLJ, :] = 0
pairwiseDistances = pairwiseDistances*(1*(LJepsilon > 0))
print("Calculating Lennart-Jones forces part 1 took " + str(time.time() - codeTimer) + " seconds.")
LJList = np.where((pairwiseDistances < rLJCutOff) & (pairwiseDistances != 0))
LJList = np.concatenate((LJList[0].reshape([LJList[0].shape[0], 1]), LJList[1].reshape([LJList[1].shape[0], 1])), axis=1)
print(LJList.shape[0])
codeTimer = time.time()
'''
r = pairwiseDistances[LJList[:,0], LJList[:,1]]
epsilon = LJepsilon[LJList[:,0], LJList[:,1]]
sigma = LJsigma[LJList[:,0], LJList[:,1]]
sigma6 = LJsigma6[LJList[:,0], LJList[:,1]]
sigma12 = LJsigma12[LJList[:,0], LJList[:,1]]
rMin7 = r**(-7)
rMin6 = r*rMin7
# Find magnitude of the force:(note that ||grad(V)|| = ||dV/dr||*||dr/dq|| = ||dV/dr||*1)
magnitudeLJForce = 24*epsilon*abs(rMin7*(2*sigma12*rMin6 - sigma6))
# Add potential energy:
potentialEnergy += sum(4*epsilon*((sigma12*((rMin6)**2))-(sigma6*rMin6)))
# Put the atoms back in the box (they are moving free through space):
atom1XYZBox = np.mod(atomListXYZNow[LJList[:,0], :], boxSize)
atom2XYZBox = np.mod(atomListXYZNow[LJList[:,1], :], boxSize)
directionForceWrtAtom1LargeR = atom2XYZBox - atom1XYZBox
# Find the direction of the force, take into account the boundary conditions
directionForceWrtAtom1LargeR = directionForceWrtAtom1LargeR - boxSize*(2*(atom2XYZBox - atom1XYZBox > 0) - 1)*(abs(atom2XYZBox - atom1XYZBox) > .5*boxSize)
# Force are opposite if atoms are too close:
#if r < (2**(1/6))*sigma:
# directionForceWrtAtom1LargeR = -directionForceWrtAtom1LargeR
# Normalize:
normalizedDirectionForceWrtAtom1LargeR = directionForceWrtAtom1LargeR/np.linalg.norm(directionForceWrtAtom1LargeR, axis=1).reshape(-1, 1)
# Add forces:
correctSign = np.sign(r - (2**(1/6))*sigma)
forceAtom1Vec = correctSign.reshape(-1, 1)*magnitudeLJForce.reshape(-1, 1)*normalizedDirectionForceWrtAtom1LargeR
forces[LJList[:,0], :] = forces[LJList[:,0], :] + forceAtom1Vec
forces[LJList[:,1], :] = forces[LJList[:,1], :] - forceAtom1Vec
'''
atom1 = LJList[:, 0]
atom2 = LJList[:, 1]
r = pairwiseDistances[atom1, atom2]
epsilon = LJepsilon[atom1, atom2]
sigma = LJsigma[atom1, atom2]
sigma6 = LJsigma6[atom1, atom2]
sigma12 = LJsigma12[atom1, atom2]
rMin7 = r**(-7)
rMin6 = r*rMin7
magnitudeLJForce = 24*epsilon*abs(rMin7*(2*sigma12*rMin6 - sigma6))
potentialEnergy += sum(4*epsilon*((sigma12*((rMin6)**2))-(sigma6*rMin6)))
atom1XYZBox = np.mod(atomListXYZNow[atom1, :], boxSize)
atom2XYZBox = np.mod(atomListXYZNow[atom2, :], boxSize)
directionForceWrtAtom1LargeR = atom2XYZBox - atom1XYZBox
directionForceWrtAtom1LargeR = directionForceWrtAtom1LargeR - boxSize*(np.sign(atom2XYZBox - atom1XYZBox))*(abs(atom2XYZBox - atom1XYZBox) > .5*boxSize)
# Normalize:
normalizedDirectionForceWrtAtom1LargeR = directionForceWrtAtom1LargeR/(np.linalg.norm(directionForceWrtAtom1LargeR, axis=1).reshape([directionForceWrtAtom1LargeR.shape[0], 1]))
# Add forces:
correctSign = np.sign(r - (2**(1/6))*sigma)
forceAtom1Vec = correctSign.reshape([correctSign.shape[0], 1])*magnitudeLJForce.reshape([correctSign.shape[0], 1])*normalizedDirectionForceWrtAtom1LargeR
#if sum(abs(forceAtom1 - forceAtom1Vec[lj, :]) < 10**(-10)) != 3:
# print("HO!!!!!!!!")
for lj in range(0, LJList.shape[0]):
forces[LJList[lj, 0], :] = forces[LJList[lj, 0], :] + forceAtom1Vec[lj, :]
forces[LJList[lj, 1], :] = forces[LJList[lj, 1], :] - forceAtom1Vec[lj, :]
#forces[atom1, :] = forces[atom1, :] + forceAtom1Vec # THIS DOES NOT WORK FOR SOME REASON !??!!!??!
#forces[atom2, :] = forces[atom2, :] - forceAtom1Vec # THIS DOES NOT WORK FOR SOME REASON !??!!!??!
'''
for lj in range(0, LJList.shape[0]):
atom1 = LJList[lj, 0]
atom2 = LJList[lj, 1]
# Distance between atom1 and atom2:
r = pairwiseDistances[atom1, atom2]
# Find parameters and powers:
#epsilon = np.sqrt(struc[typeList[atom1, 1]]["epsilon"][moleculeNumberList[atom1, 1]] * struc[typeList[atom2, 1]]["epsilon"][moleculeNumberList[atom2, 1]])
#sigma = 0.5*(struc[typeList[atom1, 1]]["sigma"][moleculeNumberList[atom1, 1]] + struc[typeList[atom2, 1]]["sigma"][moleculeNumberList[atom2, 1]])
#sigma6 = sigma**6
epsilon = LJepsilon[atom1, atom2]
sigma = LJsigma[atom1, atom2]
sigma6 = LJsigma6[atom1, atom2]
sigma12 = LJsigma12[atom1, atom2]
rMin7 = r**(-7)
rMin6 = r*rMin7
# Find magnitude of the force:(note that ||grad(V)|| = ||dV/dr||*||dr/dq|| = ||dV/dr||*1)
magnitudeLJForce = 24*epsilon*abs(rMin7*(2*sigma12*rMin6 - sigma6))
# Add potential energy:
#potentialEnergy += 4*epsilon*((sigma12*((rMin6)**2))-(sigma6*rMin6))
# Put the atoms back in the box (they are moving free through space):
atom1XYZBox = np.mod(atomListXYZNow[atom1, :], boxSize)
atom2XYZBox = np.mod(atomListXYZNow[atom2, :], boxSize)
directionForceWrtAtom1LargeR = atom2XYZBox - atom1XYZBox
# Find the direction of the force, take into account the boundary conditions
directionForceWrtAtom1LargeR = directionForceWrtAtom1LargeR - boxSize*(2*(atom2XYZBox - atom1XYZBox > 0) - 1)*(abs(atom2XYZBox - atom1XYZBox) > .5*boxSize)
# Force are opposite if atoms are too close:
#if r < (2**(1/6))*sigma:
# directionForceWrtAtom1LargeR = -directionForceWrtAtom1LargeR
# Normalize:
normalizedDirectionForceWrtAtom1LargeR = directionForceWrtAtom1LargeR/np.linalg.norm(directionForceWrtAtom1LargeR)
# Add forces:
correctSign = np.sign(r - (2**(1/6))*sigma)
forceAtom1 = correctSign*magnitudeLJForce*normalizedDirectionForceWrtAtom1LargeR
if sum(abs(forceAtom1 - forceAtom1Vec[lj, :]) < 10**(-9)) != 3:
print("HO!!!!!!!!")
forces[atom1, :] = forces[atom1, :] + forceAtom1
forces[atom2, :] = forces[atom2, :] - forceAtom1
#print(str(sum(abs(forceAtom1 - forceAtom1Vec[lj,:]) < 10**(-9))))
'''
print("Calculating Lennart-Jones forces part 2 took " + str(time.time() - codeTimer) + " seconds.")
'''
# Calculate all the forces resulting from the Lennart-Jones potential for non-bonded interactions between different molecules, iterate for all atoms in a for-loop:
for atom1 in range(0, atomListXYZNow.shape[0]):
#codeTimer = time.time()
# Find the distances (taking care of the box) to the other atoms:
#rVector = floorDistanceVector(atomListXYZNow[atom1, :], atomListXYZNow, boxSize)
rVector = floorDistanceVector(atomListXYZNow[atom1, :], atomListXYZNow[(atom1+1):,], boxSize)
# All atoms before atom1 have already been incorporated, don't incorporate it twice!
#rVector[:atom1] = 0
# Find all atoms which interact (they should not be farther than rLJCutOff, should not be already considere and should not belong to the same molecule)
#closeAtoms = np.where((rVector <= rLJCutOff) & (rVector != 0) & (moleculeNumberList[:, 0] != moleculeNumberList[atom1, 0]))
#closeAtoms = closeAtoms[0]
closeAtoms = np.where((rVector <= rLJCutOff) & (moleculeNumberList[(atom1+1):, 0] != moleculeNumberList[atom1, 0]))
closeAtoms = closeAtoms[0] + atom1 + 1
#closeAtoms = np.where((moleculeNumberList[(atom1+1):, 0] != moleculeNumberList[atom1, 0])) + atom1 + 1
# For each of these close atoms to atom1: find the potentials and force ina for-loop:
for i in range(0, len(closeAtoms)):
# Select atom2:
atom2 = closeAtoms[i]
# Distance between atom1 and atom2:
r = rVector[atom2 - atom1 - 1]
# Find parameters and powers:
epsilon = np.sqrt(struc[typeList[atom1, 1]]["epsilon"][moleculeNumberList[atom1, 1]] * struc[typeList[atom2, 1]]["epsilon"][moleculeNumberList[atom2, 1]])
sigma = 0.5*(struc[typeList[atom1, 1]]["sigma"][moleculeNumberList[atom1, 1]] + struc[typeList[atom2, 1]]["sigma"][moleculeNumberList[atom2, 1]])
sigma6 = sigma**6
rMin7 = r**(-7)
# Find magnitude of the force:(note that ||grad(V)|| = ||dV/dr||*||dr/dq|| = ||dV/dr||*1)
magnitudeLJForce = 24*epsilon*abs(sigma6*rMin7*(2*sigma6*r*rMin7 - 1))
# Add potential energy:
potentialEnergy += 4*epsilon*(((sigma6*r*rMin7)**2)-(sigma6*r*rMin7))
# Put the atoms back in the box (they are moving free through space):
atom1XYZBox = np.mod(atomListXYZNow[atom1, :], boxSize)
atom2XYZBox = np.mod(atomListXYZNow[atom2, :], boxSize)
directionForceWrtAtom1LargeR = atom2XYZBox - atom1XYZBox
# Find the direction of the force, take into account the boundary conditions
directionForceWrtAtom1LargeR = directionForceWrtAtom1LargeR - boxSize*(2*(atom2XYZBox - atom1XYZBox > 0) - 1)*(abs(atom2XYZBox - atom1XYZBox) > .5*boxSize)
# Force are opposite if atoms are too close:
#if r < (2**(1/6))*sigma:
# directionForceWrtAtom1LargeR = -directionForceWrtAtom1LargeR
# Normalize:
normalizedDirectionForceWrtAtom1LargeR = directionForceWrtAtom1LargeR/np.linalg.norm(directionForceWrtAtom1LargeR)
# Add forces:
correctSign = np.sign(r - (2**(1/6))*sigma)
forceAtom1 = correctSign*magnitudeLJForce*normalizedDirectionForceWrtAtom1LargeR
forces[atom1, :] = forces[atom1, :] + forceAtom1
forces[atom2, :] = forces[atom2, :] - forceAtom1
#print("Calculating LJ forces for atom " + str(atom1) + " took " + str(time.time() - codeTimer) + " seconds.")
'''
#print("Calculating Lennart-Jones forces took " + str(time.time() - codeTimer) + " seconds.")
# Return forces and potential energy:
return forces, potentialEnergy
# Special function for distances when taking into account the boundary conditions:
def floorDistanceVector(b, size):#(a, b, size):
# Put back in box:
#a = np.mod(a, size)
#b = np.mod(b, size)
c = np.mod(b, size)
# Get absolute differences of components:
#rx = abs(a[0] - b[:, 0])
#ry = abs(a[1] - b[:, 1])
#rz = abs(a[2] - b[:, 2])
rx = euclidean_distances(c[:,0].reshape(-1, 1))
ry = euclidean_distances(c[:,1].reshape(-1, 1))
rz = euclidean_distances(c[:,2].reshape(-1, 1))
# Take into account the box:
rxN = rx - size*np.floor(rx/size + 0.5)
ryN = ry - size*np.floor(ry/size + 0.5)
rzN = rz - size*np.floor(rz/size + 0.5)
# Calculate the distances:
#dist = np.linalg.norm(np.concatenate((rxN.reshape([len(rxN), 1]), ryN.reshape([len(ryN), 1]), rzN.reshape([len(rzN), 1])), axis = 1), axis = 1)
dim = rxN.shape[0]
d = np.concatenate((rxN.reshape([1, dim, dim]), ryN.reshape([1, dim, dim]), rzN.reshape([1, dim, dim])), axis = 0)
dist = np.linalg.norm(d, axis = 0)
# Return the distances
return dist
def floorDistanceVectorOld(a, b, size):#(a, b, size):
# Put back in box:
a = np.mod(a, size)
b = np.mod(b, size)
# Get absolute differences of components:
rx = abs(a[0] - b[:, 0])
ry = abs(a[1] - b[:, 1])
rz = abs(a[2] - b[:, 2])
# Take into account the box:
rxN = rx - size*np.floor(rx/size + 0.5)
ryN = ry - size*np.floor(ry/size + 0.5)
rzN = rz - size*np.floor(rz/size + 0.5)
# Calculate the distances:
dist = np.linalg.norm(np.concatenate((rxN.reshape([len(rxN), 1]), ryN.reshape([len(ryN), 1]), rzN.reshape([len(rzN), 1])), axis = 1), axis = 1)
# Return the distances
return dist
# Function for the thermostat: find temperature and rescale velocities
def thermostat(v, allMasses, rescale, targetTemperature):
# Define Boltzmann constant:
boltzmannConstant = 1.38064852*6.02214086*(10**(-7)) # in angström^2 * amu * fs^-2 * K^-1
# Get how many atoms are in the system:
totalNumAtoms = allMasses.shape[0]
# Find the current temperature:
currentTemperature = (2/(3*totalNumAtoms*boltzmannConstant))*sum(.5*allMasses[:,0]*((np.linalg.norm(v, axis=1))**2)) # in K
# Rescale if one wants to use the thermostat. Don't rescale if one does not want to:
if (rescale == 1) & (int(currentTemperature) != 0):
rescaledV = np.sqrt(targetTemperature/currentTemperature)*v
else:
rescaledV = v
# Return the temperature before rescaling and the rescaled velocities
return currentTemperature, rescaledV
| 54.857523 | 2,014 | 0.609471 | nol + gridVectors[0]
for m in range(0, totalNumMolecules-1):
if moleculeVector[m+1] == "Water":
XYZinitial = np.concatenate((XYZinitial, basicWater + (1/numMoleculeOneDirec)*boxSize*gridVectors[m + 1]))
else:
XYZinitial = np.concatenate((XYZinitial, basicEthanol + (1/numMoleculeOneDirec)*boxSize*gridVectors[m + 1]))
return XYZinitial, allBonds, allAngles, allDihedrals, allMasses, allTypes, allMoleculeNumbers
def initializeV(allMoleculeNumbers, meanV, stDevV):
totalNumMolecules = max(allMoleculeNumbers[:, 0])+1
replications = np.diff(np.append(np.append([-1], np.where((np.diff(allMoleculeNumbers[:,0]) != 0))), [len(allMoleculeNumbers[:,0])-1]))
np.random.seed(1)
v = np.repeat(np.random.normal(meanV, stDevV, [totalNumMolecules, 3]), replications, axis=0)
return v
def calculateForcesEnergy(atomListXYZNow, bondList, angleList, dihedralList, typeList, moleculeNumberList, boxSize, rLJCutOff, LJsigma, LJepsilon, LJsigma6, LJsigma12):
forces = np.zeros(atomListXYZNow.shape)
potentialEnergy = 0
List[b, 1])] - atomListXYZNow[int(bondList[b, 0])])*magnitudeForce/r
correctSign = np.sign(r - bondList[b, 3])
forces[int(bondList[b, 0])] = forces[int(bondList[b, 0])] + correctSign*forceVectorAtom0Case1
forces[int(bondList[b, 1])] = forces[int(bondList[b, 1])] - correctSign*forceVectorAtom0Case1
print("Calculating bond forces took " + str(time.time() - codeTimer) + " seconds.")
.norm(v21)
v23 = XYZatom3 - XYZatom2
rv23 = np.linalg.norm(v23)
theta = np.arccos((np.dot(v21, v23))/(rv21 * rv23))
potentialEnergy += 0.5*k_theta*((theta - theta0)**2)
magnitudeForceOnAtom1 = k_theta*abs(theta - theta0)/rv21
magnitudeForceOnAtom3 = k_theta*abs(theta - theta0)/rv23
directionForceOnAtom1WrtCase1 = np.cross(v21, np.cross(v23, v21))
directionForceOnAtom1NormalizedWrtCase1 = directionForceOnAtom1WrtCase1/np.linalg.norm(directionForceOnAtom1WrtCase1)
directionForceOnAtom3WrtCase1 = np.cross(v23, np.cross(v21, v23))
directionForceOnAtom3NormalizedWrtCase1 = directionForceOnAtom3WrtCase1/np.linalg.norm(directionForceOnAtom3WrtCase1)
correctSign = np.sign(theta - angleList[a, 4])
forceAtom1 = correctSign*directionForceOnAtom1NormalizedWrtCase1*magnitudeForceOnAtom1
forceAtom3 = correctSign*directionForceOnAtom3NormalizedWrtCase1*magnitudeForceOnAtom3
forces[atom1] = forces[atom1] + forceAtom1
forces[atom3] = forces[atom3] + forceAtom3
forces[atom2] = forces[atom2] - (forceAtom1 + forceAtom3)
print("Calculating angle forces took " + str(time.time() - codeTimer) + " seconds.")
codeTimer = time.time()
Z_d = atomListXYZNow[atom_d, :]
XYZ_0 = 0.5*XYZ_b + 0.5*XYZ_c
v_b_to_a = XYZ_a - XYZ_b
v_b_to_c = XYZ_c - XYZ_b
v_c_to_b = XYZ_b - XYZ_c
v_c_to_d = XYZ_d - XYZ_c
v_0_to_c = XYZ_c - XYZ_0
n_abc = np.cross(v_b_to_a, v_b_to_c)
n_abc = n_abc/np.linalg.norm(n_abc)
n_bcd = np.cross(v_c_to_d, v_c_to_b)
n_bcd = n_bcd/np.linalg.norm(n_bcd)
m = -n_abc
m = m/np.linalg.norm(m)
n = n_bcd
theta = np.arctan2(((np.dot(np.cross(m, n), v_c_to_b))/(np.linalg.norm(v_c_to_b))), (np.dot(m, n)))
psi = theta - np.pi
theta_abc = np.arccos(np.dot(v_b_to_a, v_b_to_c)/(np.linalg.norm(v_b_to_a)*np.linalg.norm(v_b_to_c)))
theta_bcd = np.arccos(np.dot(v_c_to_b, v_c_to_d)/(np.linalg.norm(v_c_to_b)*np.linalg.norm(v_c_to_d)))
part1_of_magnitude = -0.5*(C1*np.sin(psi)-2*C2*np.sin(2*psi)+3*C3*np.sin(3*psi)-4*C4*np.sin(4*psi))
signed_magnitude_force_a = part1_of_magnitude/(np.sin(theta_abc)*(np.linalg.norm(v_b_to_a)))
signed_magnitude_force_d = part1_of_magnitude/(np.sin(theta_bcd)*(np.linalg.norm(v_c_to_d)))
force_a = signed_magnitude_force_a*n_abc
force_d = signed_magnitude_force_d*n_bcd
force_c = (1/((np.linalg.norm(v_0_to_c))**2))*np.cross(-(np.cross(v_0_to_c, force_d) + 0.5*np.cross(v_c_to_d, force_d) + 0.5*np.cross(v_b_to_a, force_a)), v_0_to_c)
force_b = -force_a - force_d - force_c
forces[atom_a, :] = forces[atom_a, :] + force_a
forces[atom_b, :] = forces[atom_b, :] + force_b
forces[atom_c, :] = forces[atom_c, :] + force_c
forces[atom_d, :] = forces[atom_d, :] + force_d
potentialEnergy += 0.5*(C1*(1+np.cos(psi))+C2*(1-np.cos(2*psi))+C3*(1+np.cos(3*psi))+C4*(1-np.cos(4*psi)))
print("Calculating dihedral forces took " + str(time.time() - codeTimer) + " seconds.")
codeTimer = time.time()
Distances != 0))
LJList = np.concatenate((LJList[0].reshape([LJList[0].shape[0], 1]), LJList[1].reshape([LJList[1].shape[0], 1])), axis=1)
print(LJList.shape[0])
codeTimer = time.time()
atom1 = LJList[:, 0]
atom2 = LJList[:, 1]
r = pairwiseDistances[atom1, atom2]
epsilon = LJepsilon[atom1, atom2]
sigma = LJsigma[atom1, atom2]
sigma6 = LJsigma6[atom1, atom2]
sigma12 = LJsigma12[atom1, atom2]
rMin7 = r**(-7)
rMin6 = r*rMin7
magnitudeLJForce = 24*epsilon*abs(rMin7*(2*sigma12*rMin6 - sigma6))
potentialEnergy += sum(4*epsilon*((sigma12*((rMin6)**2))-(sigma6*rMin6)))
atom1XYZBox = np.mod(atomListXYZNow[atom1, :], boxSize)
atom2XYZBox = np.mod(atomListXYZNow[atom2, :], boxSize)
directionForceWrtAtom1LargeR = atom2XYZBox - atom1XYZBox
directionForceWrtAtom1LargeR = directionForceWrtAtom1LargeR - boxSize*(np.sign(atom2XYZBox - atom1XYZBox))*(abs(atom2XYZBox - atom1XYZBox) > .5*boxSize)
normalizedDirectionForceWrtAtom1LargeR = directionForceWrtAtom1LargeR/(np.linalg.norm(directionForceWrtAtom1LargeR, axis=1).reshape([directionForceWrtAtom1LargeR.shape[0], 1]))
correctSign = np.sign(r - (2**(1/6))*sigma)
forceAtom1Vec = correctSign.reshape([correctSign.shape[0], 1])*magnitudeLJForce.reshape([correctSign.shape[0], 1])*normalizedDirectionForceWrtAtom1LargeR
for lj in range(0, LJList.shape[0]):
forces[LJList[lj, 0], :] = forces[LJList[lj, 0], :] + forceAtom1Vec[lj, :]
forces[LJList[lj, 1], :] = forces[LJList[lj, 1], :] - forceAtom1Vec[lj, :]
" + str(time.time() - codeTimer) + " seconds.")
return forces, potentialEnergy
def floorDistanceVector(b, size):
c = np.mod(b, size)
rx = euclidean_distances(c[:,0].reshape(-1, 1))
ry = euclidean_distances(c[:,1].reshape(-1, 1))
rz = euclidean_distances(c[:,2].reshape(-1, 1))
rxN = rx - size*np.floor(rx/size + 0.5)
ryN = ry - size*np.floor(ry/size + 0.5)
rzN = rz - size*np.floor(rz/size + 0.5)
dim = rxN.shape[0]
d = np.concatenate((rxN.reshape([1, dim, dim]), ryN.reshape([1, dim, dim]), rzN.reshape([1, dim, dim])), axis = 0)
dist = np.linalg.norm(d, axis = 0)
return dist
def floorDistanceVectorOld(a, b, size):
a = np.mod(a, size)
b = np.mod(b, size)
rx = abs(a[0] - b[:, 0])
ry = abs(a[1] - b[:, 1])
rz = abs(a[2] - b[:, 2])
rxN = rx - size*np.floor(rx/size + 0.5)
ryN = ry - size*np.floor(ry/size + 0.5)
rzN = rz - size*np.floor(rz/size + 0.5)
dist = np.linalg.norm(np.concatenate((rxN.reshape([len(rxN), 1]), ryN.reshape([len(ryN), 1]), rzN.reshape([len(rzN), 1])), axis = 1), axis = 1)
return dist
def thermostat(v, allMasses, rescale, targetTemperature):
boltzmannConstant = 1.38064852*6.02214086*(10**(-7))
totalNumAtoms = allMasses.shape[0]
currentTemperature = (2/(3*totalNumAtoms*boltzmannConstant))*sum(.5*allMasses[:,0]*((np.linalg.norm(v, axis=1))**2))
if (rescale == 1) & (int(currentTemperature) != 0):
rescaledV = np.sqrt(targetTemperature/currentTemperature)*v
else:
rescaledV = v
# Return the temperature before rescaling and the rescaled velocities
return currentTemperature, rescaledV
| true | true |
1c2c46f99a82875b917a330d6ec76062222420de | 20,238 | py | Python | python/paddle/fluid/tests/unittests/test_compat.py | ysh329/Paddle | 50ad9046c9a440564d104eaa354eb9df83a35678 | [
"Apache-2.0"
] | 9 | 2017-12-04T02:58:01.000Z | 2020-12-03T14:46:30.000Z | python/paddle/fluid/tests/unittests/test_compat.py | ysh329/Paddle | 50ad9046c9a440564d104eaa354eb9df83a35678 | [
"Apache-2.0"
] | 7 | 2017-12-05T20:29:08.000Z | 2018-10-15T08:57:40.000Z | python/paddle/fluid/tests/unittests/test_compat.py | ysh329/Paddle | 50ad9046c9a440564d104eaa354eb9df83a35678 | [
"Apache-2.0"
] | 6 | 2018-03-19T22:38:46.000Z | 2019-11-01T22:28:27.000Z | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.compat as cpt
import six
class TestCompatible(unittest.TestCase):
def test_type(self):
if six.PY2:
self.assertEqual(cpt.int_type, int)
self.assertEqual(cpt.long_type, long)
else:
self.assertEqual(cpt.int_type, int)
self.assertEqual(cpt.long_type, int)
def test_to_text(self):
# Only support python2.x and python3.x now
self.assertTrue(six.PY2 | six.PY3)
if six.PY2:
# check None
self.assertIsNone(cpt.to_text(None))
# check all string related types
self.assertTrue(isinstance(cpt.to_text(str("")), unicode))
self.assertTrue(isinstance(cpt.to_text(str("123")), unicode))
self.assertTrue(isinstance(cpt.to_text(b""), unicode))
self.assertTrue(isinstance(cpt.to_text(b""), unicode))
self.assertTrue(isinstance(cpt.to_text(u""), unicode))
self.assertTrue(isinstance(cpt.to_text(u""), unicode))
self.assertEqual(u"", cpt.to_text(str("")))
self.assertEqual(u"123", cpt.to_text(str("123")))
self.assertEqual(u"", cpt.to_text(b""))
self.assertEqual(u"123", cpt.to_text(b"123"))
self.assertEqual(u"", cpt.to_text(u""))
self.assertEqual(u"123", cpt.to_text(u"123"))
# check list types, not inplace
l = [""]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u""], l2)
l = ["", "123"]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u"", u"123"], l2)
l = ["", b'123', u"321"]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u"", u"123", u"321"], l2)
for i in l2:
self.assertTrue(isinstance(i, unicode))
# check list types, inplace
l = [""]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u""], l2)
l = ["", "123"]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u"", u"123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u"", u"123", u"321"], l2)
# check set types, not inplace
l = set("")
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(u""), l2)
l = set([b"", b"123"])
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([u"", u"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([u"", u"123", u"321"]), l2)
for i in l2:
self.assertTrue(isinstance(i, unicode))
# check set types, inplace
l = set("")
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(u""), l2)
l = set([b"", b"123"])
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([u"", u"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([u"", u"123", u"321"]), l2)
elif six.PY3:
self.assertIsNone(cpt.to_text(None))
self.assertTrue(isinstance(cpt.to_text(str("")), str))
self.assertTrue(isinstance(cpt.to_text(str("123")), str))
self.assertTrue(isinstance(cpt.to_text(b""), str))
self.assertTrue(isinstance(cpt.to_text(b""), str))
self.assertTrue(isinstance(cpt.to_text(u""), str))
self.assertTrue(isinstance(cpt.to_text(u""), str))
self.assertEqual("", cpt.to_text(str("")))
self.assertEqual("123", cpt.to_text(str("123")))
self.assertEqual("", cpt.to_text(b""))
self.assertEqual("123", cpt.to_text(b"123"))
self.assertEqual("", cpt.to_text(u""))
self.assertEqual("123", cpt.to_text(u"123"))
# check list types, not inplace
l = [""]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([""], l2)
l = ["", "123"]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(["", "123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(["", "123", "321"], l2)
# check list types, inplace
l = [""]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([""], l2)
l = ["", b"123"]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(["", "123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(["", "123", "321"], l2)
for i in l2:
self.assertTrue(isinstance(i, str))
# check set types, not inplace
l = set("")
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(""), l2)
l = set([b"", b"123"])
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(set(["", "123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(set(["", "123", "321"]), l2)
# check set types, inplace
l = set("")
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(""), l2)
l = set([b"", b"123"])
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(["", "123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(["", "123", "321"]), l2)
for i in l2:
self.assertTrue(isinstance(i, str))
def test_to_bytes(self):
# Only support python2.x and python3.x now
self.assertTrue(six.PY2 | six.PY3)
if six.PY2:
# check None
self.assertIsNone(cpt.to_bytes(None))
# check all string related types
self.assertTrue(isinstance(cpt.to_bytes(str("")), bytes))
self.assertTrue(isinstance(cpt.to_bytes(str("123")), bytes))
self.assertTrue(isinstance(cpt.to_bytes(b""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(b""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(u""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(u""), bytes))
self.assertEqual(b"", cpt.to_bytes(str("")))
self.assertEqual(b"123", cpt.to_bytes(str("123")))
self.assertEqual(b"", cpt.to_bytes(b""))
self.assertEqual(b"123", cpt.to_bytes(b"123"))
self.assertEqual(b"", cpt.to_bytes(u""))
self.assertEqual(b"123", cpt.to_bytes(u"123"))
# check list types, not inplace
l = [""]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b""], l2)
l = ["", "123"]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123"], l2)
l = ["", b'123', u"321"]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123", b"321"], l2)
for i in l2:
self.assertTrue(isinstance(i, bytes))
# check list types, inplace
l = [""]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b""], l2)
l = ["", "123"]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123", b"321"], l2)
# check set types, not inplace
l = set("")
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(b""), l2)
l = set([b"", b"123"])
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123", b"321"]), l2)
for i in l2:
self.assertTrue(isinstance(i, bytes))
# check set types, inplace
l = set("")
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(b""), l2)
l = set([b"", b"123"])
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123", b"321"]), l2)
elif six.PY3:
self.assertIsNone(cpt.to_bytes(None))
self.assertTrue(isinstance(cpt.to_bytes(str("")), bytes))
self.assertTrue(isinstance(cpt.to_bytes(str("123")), bytes))
self.assertTrue(isinstance(cpt.to_bytes(b""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(b""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(u""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(u""), bytes))
self.assertEqual(b"", cpt.to_bytes(str("")))
self.assertEqual(b"123", cpt.to_bytes(str("123")))
self.assertEqual(b"", cpt.to_bytes(b""))
self.assertEqual(b"123", cpt.to_bytes(b"123"))
self.assertEqual(b"", cpt.to_bytes(u""))
self.assertEqual(b"123", cpt.to_bytes(u"123"))
# check list types, not inplace
l = [""]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual([b""], l2)
l = ["", "123"]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual([b"", b"123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual([b"", b"123", b"321"], l2)
# check list types, inplace
l = [""]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b""], l2)
l = ["", b"123"]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123", b"321"], l2)
for i in l2:
self.assertTrue(isinstance(i, bytes))
# check set types, not inplace
l = set([""])
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(set([b""]), l2)
l = set([u"", u"123"])
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(set([b"", b"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(set([b"", b"123", b"321"]), l2)
# check set types, inplace
l = set("")
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(b""), l2)
l = set([u"", u"123"])
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123", b"321"]), l2)
for i in l2:
self.assertTrue(isinstance(i, bytes))
def test_round(self):
self.assertEqual(3.0, cpt.round(3.4))
self.assertEqual(4.0, cpt.round(3.5))
self.assertEqual(0.0, cpt.round(0.1))
self.assertEqual(0.0, cpt.round(0.0))
self.assertEqual(-0.0, cpt.round(-0.0))
self.assertEqual(-0.0, cpt.round(-0.1))
self.assertEqual(-3.0, cpt.round(-3.4))
self.assertEqual(-4.0, cpt.round(-3.5))
self.assertEqual(5.0, cpt.round(5))
self.assertRaises(TypeError, cpt.round, None)
def test_floor_division(self):
self.assertEqual(0.0, cpt.floor_division(3, 4))
self.assertEqual(1.0, cpt.floor_division(4, 3))
self.assertEqual(2.0, cpt.floor_division(6, 3))
self.assertEqual(-2.0, cpt.floor_division(-4, 3))
self.assertEqual(-2.0, cpt.floor_division(-6, 3))
self.assertRaises(ZeroDivisionError, cpt.floor_division, 3, 0)
self.assertRaises(TypeError, cpt.floor_division, None, None)
def test_get_exception_message(self):
exception_message = "test_message"
self.assertRaises(AssertionError, cpt.get_exception_message, None)
if six.PY2:
self.assertRaises(AttributeError, cpt.get_exception_message,
exception_message)
try:
raise RuntimeError(exception_message)
except Exception as e:
self.assertEqual(exception_message,
cpt.get_exception_message(e))
self.assertIsNotNone(e)
try:
raise Exception(exception_message)
except Exception as e:
self.assertEqual(exception_message,
cpt.get_exception_message(e))
self.assertIsNotNone(e)
if six.PY3:
try:
raise RuntimeError(exception_message)
except Exception as e:
self.assertEqual(exception_message,
cpt.get_exception_message(e))
self.assertIsNotNone(e)
try:
raise Exception(exception_message)
except Exception as e:
self.assertEqual(exception_message,
cpt.get_exception_message(e))
self.assertIsNotNone(e)
if __name__ == "__main__":
unittest.main()
| 39.996047 | 74 | 0.523322 |
from __future__ import print_function
import unittest
import paddle.compat as cpt
import six
class TestCompatible(unittest.TestCase):
def test_type(self):
if six.PY2:
self.assertEqual(cpt.int_type, int)
self.assertEqual(cpt.long_type, long)
else:
self.assertEqual(cpt.int_type, int)
self.assertEqual(cpt.long_type, int)
def test_to_text(self):
self.assertTrue(six.PY2 | six.PY3)
if six.PY2:
self.assertIsNone(cpt.to_text(None))
self.assertTrue(isinstance(cpt.to_text(str("")), unicode))
self.assertTrue(isinstance(cpt.to_text(str("123")), unicode))
self.assertTrue(isinstance(cpt.to_text(b""), unicode))
self.assertTrue(isinstance(cpt.to_text(b""), unicode))
self.assertTrue(isinstance(cpt.to_text(u""), unicode))
self.assertTrue(isinstance(cpt.to_text(u""), unicode))
self.assertEqual(u"", cpt.to_text(str("")))
self.assertEqual(u"123", cpt.to_text(str("123")))
self.assertEqual(u"", cpt.to_text(b""))
self.assertEqual(u"123", cpt.to_text(b"123"))
self.assertEqual(u"", cpt.to_text(u""))
self.assertEqual(u"123", cpt.to_text(u"123"))
l = [""]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u""], l2)
l = ["", "123"]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u"", u"123"], l2)
l = ["", b'123', u"321"]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u"", u"123", u"321"], l2)
for i in l2:
self.assertTrue(isinstance(i, unicode))
l = [""]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u""], l2)
l = ["", "123"]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u"", u"123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([u"", u"123", u"321"], l2)
l = set("")
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(u""), l2)
l = set([b"", b"123"])
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([u"", u"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([u"", u"123", u"321"]), l2)
for i in l2:
self.assertTrue(isinstance(i, unicode))
l = set("")
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(u""), l2)
l = set([b"", b"123"])
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([u"", u"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([u"", u"123", u"321"]), l2)
elif six.PY3:
self.assertIsNone(cpt.to_text(None))
self.assertTrue(isinstance(cpt.to_text(str("")), str))
self.assertTrue(isinstance(cpt.to_text(str("123")), str))
self.assertTrue(isinstance(cpt.to_text(b""), str))
self.assertTrue(isinstance(cpt.to_text(b""), str))
self.assertTrue(isinstance(cpt.to_text(u""), str))
self.assertTrue(isinstance(cpt.to_text(u""), str))
self.assertEqual("", cpt.to_text(str("")))
self.assertEqual("123", cpt.to_text(str("123")))
self.assertEqual("", cpt.to_text(b""))
self.assertEqual("123", cpt.to_text(b"123"))
self.assertEqual("", cpt.to_text(u""))
self.assertEqual("123", cpt.to_text(u"123"))
l = [""]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([""], l2)
l = ["", "123"]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(["", "123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_text(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(["", "123", "321"], l2)
l = [""]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([""], l2)
l = ["", b"123"]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(["", "123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(["", "123", "321"], l2)
for i in l2:
self.assertTrue(isinstance(i, str))
l = set("")
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(""), l2)
l = set([b"", b"123"])
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(set(["", "123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_text(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(set(["", "123", "321"]), l2)
l = set("")
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(""), l2)
l = set([b"", b"123"])
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(["", "123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_text(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(["", "123", "321"]), l2)
for i in l2:
self.assertTrue(isinstance(i, str))
def test_to_bytes(self):
self.assertTrue(six.PY2 | six.PY3)
if six.PY2:
self.assertIsNone(cpt.to_bytes(None))
self.assertTrue(isinstance(cpt.to_bytes(str("")), bytes))
self.assertTrue(isinstance(cpt.to_bytes(str("123")), bytes))
self.assertTrue(isinstance(cpt.to_bytes(b""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(b""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(u""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(u""), bytes))
self.assertEqual(b"", cpt.to_bytes(str("")))
self.assertEqual(b"123", cpt.to_bytes(str("123")))
self.assertEqual(b"", cpt.to_bytes(b""))
self.assertEqual(b"123", cpt.to_bytes(b"123"))
self.assertEqual(b"", cpt.to_bytes(u""))
self.assertEqual(b"123", cpt.to_bytes(u"123"))
l = [""]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b""], l2)
l = ["", "123"]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123"], l2)
l = ["", b'123', u"321"]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123", b"321"], l2)
for i in l2:
self.assertTrue(isinstance(i, bytes))
l = [""]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b""], l2)
l = ["", "123"]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123", b"321"], l2)
l = set("")
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(b""), l2)
l = set([b"", b"123"])
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123", b"321"]), l2)
for i in l2:
self.assertTrue(isinstance(i, bytes))
l = set("")
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(b""), l2)
l = set([b"", b"123"])
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123", b"321"]), l2)
elif six.PY3:
self.assertIsNone(cpt.to_bytes(None))
self.assertTrue(isinstance(cpt.to_bytes(str("")), bytes))
self.assertTrue(isinstance(cpt.to_bytes(str("123")), bytes))
self.assertTrue(isinstance(cpt.to_bytes(b""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(b""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(u""), bytes))
self.assertTrue(isinstance(cpt.to_bytes(u""), bytes))
self.assertEqual(b"", cpt.to_bytes(str("")))
self.assertEqual(b"123", cpt.to_bytes(str("123")))
self.assertEqual(b"", cpt.to_bytes(b""))
self.assertEqual(b"123", cpt.to_bytes(b"123"))
self.assertEqual(b"", cpt.to_bytes(u""))
self.assertEqual(b"123", cpt.to_bytes(u"123"))
l = [""]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual([b""], l2)
l = ["", "123"]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual([b"", b"123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_bytes(l)
self.assertTrue(isinstance(l2, list))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual([b"", b"123", b"321"], l2)
l = [""]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b""], l2)
l = ["", b"123"]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123"], l2)
l = ["", b"123", u"321"]
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, list))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual([b"", b"123", b"321"], l2)
for i in l2:
self.assertTrue(isinstance(i, bytes))
l = set([""])
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(set([b""]), l2)
l = set([u"", u"123"])
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(set([b"", b"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_bytes(l, inplace=False)
self.assertTrue(isinstance(l2, set))
self.assertFalse(l is l2)
self.assertNotEqual(l, l2)
self.assertEqual(set([b"", b"123", b"321"]), l2)
l = set("")
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set(b""), l2)
l = set([u"", u"123"])
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123"]), l2)
l = set(["", b"123", u"321"])
l2 = cpt.to_bytes(l, inplace=True)
self.assertTrue(isinstance(l2, set))
self.assertTrue(l is l2)
self.assertEqual(l, l2)
self.assertEqual(set([b"", b"123", b"321"]), l2)
for i in l2:
self.assertTrue(isinstance(i, bytes))
def test_round(self):
self.assertEqual(3.0, cpt.round(3.4))
self.assertEqual(4.0, cpt.round(3.5))
self.assertEqual(0.0, cpt.round(0.1))
self.assertEqual(0.0, cpt.round(0.0))
self.assertEqual(-0.0, cpt.round(-0.0))
self.assertEqual(-0.0, cpt.round(-0.1))
self.assertEqual(-3.0, cpt.round(-3.4))
self.assertEqual(-4.0, cpt.round(-3.5))
self.assertEqual(5.0, cpt.round(5))
self.assertRaises(TypeError, cpt.round, None)
def test_floor_division(self):
self.assertEqual(0.0, cpt.floor_division(3, 4))
self.assertEqual(1.0, cpt.floor_division(4, 3))
self.assertEqual(2.0, cpt.floor_division(6, 3))
self.assertEqual(-2.0, cpt.floor_division(-4, 3))
self.assertEqual(-2.0, cpt.floor_division(-6, 3))
self.assertRaises(ZeroDivisionError, cpt.floor_division, 3, 0)
self.assertRaises(TypeError, cpt.floor_division, None, None)
def test_get_exception_message(self):
exception_message = "test_message"
self.assertRaises(AssertionError, cpt.get_exception_message, None)
if six.PY2:
self.assertRaises(AttributeError, cpt.get_exception_message,
exception_message)
try:
raise RuntimeError(exception_message)
except Exception as e:
self.assertEqual(exception_message,
cpt.get_exception_message(e))
self.assertIsNotNone(e)
try:
raise Exception(exception_message)
except Exception as e:
self.assertEqual(exception_message,
cpt.get_exception_message(e))
self.assertIsNotNone(e)
if six.PY3:
try:
raise RuntimeError(exception_message)
except Exception as e:
self.assertEqual(exception_message,
cpt.get_exception_message(e))
self.assertIsNotNone(e)
try:
raise Exception(exception_message)
except Exception as e:
self.assertEqual(exception_message,
cpt.get_exception_message(e))
self.assertIsNotNone(e)
if __name__ == "__main__":
unittest.main()
| true | true |
1c2c475e3ed7b3a3ebe61cc7556a74907074f8ef | 70,637 | py | Python | nova/compute/resource_tracker.py | mikalstill/nova | 99ca7d60b41f63446f86ed9be86440a2b250f330 | [
"Apache-2.0"
] | null | null | null | nova/compute/resource_tracker.py | mikalstill/nova | 99ca7d60b41f63446f86ed9be86440a2b250f330 | [
"Apache-2.0"
] | null | null | null | nova/compute/resource_tracker.py | mikalstill/nova | 99ca7d60b41f63446f86ed9be86440a2b250f330 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
import collections
import copy
from keystoneauth1 import exceptions as ks_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
import retrying
from nova.compute import claims
from nova.compute import monitors
from nova.compute import stats as compute_stats
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rc_fields as fields
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
"""Returns True if the instance is in one of the resizing states.
:param instance: `nova.objects.Instance` object
"""
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in (
task_states.resizing_states + task_states.rebuild_states)):
return True
return False
def _is_trackable_migration(migration):
# Only look at resize/migrate migration and evacuation records
# NOTE(danms): RT should probably examine live migration
# records as well and do something smart. However, ignore
# those for now to avoid them being included in below calculations.
return migration.migration_type in ('resize', 'migration',
'evacuation')
def _normalize_inventory_from_cn_obj(inv_data, cn):
"""Helper function that injects various information from a compute node
object into the inventory dict returned from the virt driver's
get_inventory() method. This function allows us to marry information like
*_allocation_ratio and reserved memory amounts that are in the
compute_nodes DB table and that the virt driver doesn't know about with the
information the virt driver *does* know about.
Note that if the supplied inv_data contains allocation_ratio, reserved or
other fields, we DO NOT override the value with that of the compute node.
This is to ensure that the virt driver is the single source of truth
regarding inventory information. For instance, the Ironic virt driver will
always return a very specific inventory with allocation_ratios pinned to
1.0.
:param inv_data: Dict, keyed by resource class, of inventory information
returned from virt driver's get_inventory() method
:param compute_node: `objects.ComputeNode` describing the compute node
"""
if fields.ResourceClass.VCPU in inv_data:
cpu_inv = inv_data[fields.ResourceClass.VCPU]
if 'allocation_ratio' not in cpu_inv:
cpu_inv['allocation_ratio'] = cn.cpu_allocation_ratio
if 'reserved' not in cpu_inv:
cpu_inv['reserved'] = CONF.reserved_host_cpus
if fields.ResourceClass.MEMORY_MB in inv_data:
mem_inv = inv_data[fields.ResourceClass.MEMORY_MB]
if 'allocation_ratio' not in mem_inv:
mem_inv['allocation_ratio'] = cn.ram_allocation_ratio
if 'reserved' not in mem_inv:
mem_inv['reserved'] = CONF.reserved_host_memory_mb
if fields.ResourceClass.DISK_GB in inv_data:
disk_inv = inv_data[fields.ResourceClass.DISK_GB]
if 'allocation_ratio' not in disk_inv:
disk_inv['allocation_ratio'] = cn.disk_allocation_ratio
if 'reserved' not in disk_inv:
# TODO(johngarbutt) We should either move to reserved_host_disk_gb
# or start tracking DISK_MB.
reserved_mb = CONF.reserved_host_disk_mb
reserved_gb = compute_utils.convert_mb_to_ceil_gb(reserved_mb)
disk_inv['reserved'] = reserved_gb
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver):
self.host = host
self.driver = driver
self.pci_tracker = None
# Dict of objects.ComputeNode objects, keyed by nodename
self.compute_nodes = {}
# Dict of Stats objects, keyed by nodename
self.stats = collections.defaultdict(compute_stats.Stats)
# Set of UUIDs of instances tracked on this host.
self.tracked_instances = set()
self.tracked_migrations = {}
self.is_bfv = {} # dict, keyed by instance uuid, to is_bfv boolean
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.scheduler_client = scheduler_client.SchedulerClient()
self.reportclient = self.scheduler_client.reportclient
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance, nodename, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance: instance to reserve resources for.
:type instance: nova.objects.instance.Instance object
:param nodename: The Ironic nodename selected by the scheduler
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled(nodename):
# instance_claim() was called before update_available_resource()
# (which ensures that a compute node exists for nodename). We
# shouldn't get here but in case we do, just set the instance's
# host and nodename attribute (probably incorrect) and return a
# NoopClaim.
# TODO(jaypipes): Remove all the disabled junk from the resource
# tracker. Servicegroup API-level active-checking belongs in the
# nova-compute manager.
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
# sanity checks:
if instance.host:
LOG.warning("Host field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
if instance.node:
LOG.warning("Node field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
# get the overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(instance)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance.flavor.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
context, instance.uuid)
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, overhead=overhead, limits=limits)
# self._set_instance_host_and_node() will save instance to the DB
# so set instance.numa_topology first. We need to make sure
# that numa_topology is saved while under COMPUTE_RESOURCE_SEMAPHORE
# so that the resource audit knows about any cpus we've pinned.
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def rebuild_claim(self, context, instance, nodename, limits=None,
image_meta=None, migration=None):
"""Create a claim for a rebuild operation."""
instance_type = instance.flavor
return self._move_claim(context, instance, instance_type, nodename,
migration, move_type='evacuation',
limits=limits, image_meta=image_meta)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type, nodename,
migration, image_meta=None, limits=None):
"""Create a claim for a resize or cold-migration move."""
return self._move_claim(context, instance, instance_type, nodename,
migration, image_meta=image_meta,
limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename,
migration, move_type=None, image_meta=None, limits=None):
"""Indicate that resources are needed for a move to this host.
Move can be either a migrate/resize, live-migrate or an
evacuate/rebuild operation.
:param context: security context
:param instance: instance object to reserve resources for
:param new_instance_type: new instance_type being resized to
:param nodename: The Ironic nodename selected by the scheduler
:param image_meta: instance image metadata
:param move_type: move type - can be one of 'migration', 'resize',
'live-migration', 'evacuate'
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs
:param migration: A migration object if one was already created
elsewhere for this operation (otherwise None)
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(context, instance,
new_instance_type,
nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
return claims.NopClaim(migration=migration)
# get memory overhead required to build this instance:
overhead = self.driver.estimate_instance_overhead(new_instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': new_instance_type.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
# TODO(moshele): we are recreating the pci requests even if
# there was no change on resize. This will cause allocating
# the old/new pci device in the resize phase. In the future
# we would like to optimise this.
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_instance_type)
new_pci_requests.instance_uuid = instance.uuid
# PCI requests come from two sources: instance flavor and
# SR-IOV ports. SR-IOV ports pci_request don't have an alias_name.
# On resize merge the SR-IOV ports pci_requests with the new
# instance flavor pci_requests.
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.alias_name is None:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_pci_requests, overhead=overhead,
limits=limits)
claim.migration = migration
claimed_pci_devices_objs = []
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
# TODO(jaypipes): Move claimed_numa_topology out of the Claim's
# constructor flow so the Claim constructor only tests whether
# resources can be claimed, not consume the resources directly.
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests)
instance.migration_context = mig_context
instance.save()
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(self, context, instance, new_instance_type,
nodename, move_type=None):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_instance_type.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
"""Make an existing migration record count for resource tracking.
If a migration record was created already before the request made
it to this compute host, only set up the migration so it's included in
resource tracking. This should be done while the
COMPUTE_RESOURCES_SEMAPHORE is held.
"""
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.status = 'pre-migrating'
migration.save()
def _set_instance_host_and_node(self, instance, nodename):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource claim
will not be lost if the audit process starts.
"""
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
"""Untag the instance so it no longer belongs to the host.
This should be done while the COMPUTE_RESOURCES_SEMAPHORE is held so
the resource claim will not be lost if the audit process starts.
"""
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance, nodename):
"""Remove usage from the given instance."""
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
# free old/new allocated pci devices
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_move_claim(self, context, instance, nodename,
instance_type=None, prefix='new_'):
"""Remove usage for an incoming/outgoing migration.
:param context: Security context.
:param instance: The instance whose usage is to be removed.
:param nodename: Host on which to remove usage. If the migration
completed successfully, this is normally the source.
If it did not complete successfully (failed or
reverted), this is normally the destination.
:param instance_type: The flavor that determines the usage to remove.
If the migration completed successfully, this is
the old flavor to be removed from the source. If
the migration did not complete successfully, this
is the new flavor to be removed from the
destination.
:param prefix: Prefix to use when accessing migration context
attributes. 'old_' or 'new_', with 'new_' being the
default.
"""
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix,
migration)
if instance_type is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
instance_type, instance, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
# Remove usage for an instance that is not tracked in migrations (such
# as on the source node after a migration).
# NOTE(lbeliveau): On resize on the same node, the instance is
# included in both tracked_migrations and tracked_instances.
elif (instance['uuid'] in self.tracked_instances):
self.tracked_instances.remove(instance['uuid'])
self._drop_pci_devices(instance, nodename, prefix)
# TODO(lbeliveau): Validate if numa needs the same treatment.
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance, nodename):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled(nodename):
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _check_for_nodes_rebalance(self, context, resources, nodename):
"""Check if nodes rebalance has happened.
The ironic driver maintains a hash ring mapping bare metal nodes
to compute nodes. If a compute dies, the hash ring is rebuilt, and
some of its bare metal nodes (more precisely, those not in ACTIVE
state) are assigned to other computes.
This method checks for this condition and adjusts the database
accordingly.
:param context: security context
:param resources: initial values
:param nodename: node name
:returns: True if a suitable compute node record was found, else False
"""
if not self.driver.rebalances_nodes:
return False
# Its possible ironic just did a node re-balance, so let's
# check if there is a compute node that already has the correct
# hypervisor_hostname. We can re-use that rather than create a
# new one and have to move existing placement allocations
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
context, nodename)
if len(cn_candidates) == 1:
cn = cn_candidates[0]
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
{"name": nodename, "old": cn.host, "new": self.host})
cn.host = self.host
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return True
elif len(cn_candidates) > 1:
LOG.error(
"Found more than one ComputeNode for nodename %s. "
"Please clean up the orphaned ComputeNode records in your DB.",
nodename)
return False
def _init_compute_node(self, context, resources):
"""Initialize the compute node if it does not already exist.
The resource tracker will be inoperable if compute_node
is not defined. The compute_node will remain undefined if
we fail to create it or if there is no associated service
registered.
If this method has to create a compute node it needs initial
values - these come from resources.
:param context: security context
:param resources: initial values
:returns: True if a new compute_nodes table record was created,
False otherwise
"""
nodename = resources['hypervisor_hostname']
# if there is already a compute node just use resources
# to initialize
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
# now try to get the compute node record from the
# database. If we get one we use resources to initialize
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
if self._check_for_nodes_rebalance(context, resources, nodename):
return False
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# to be initialized with resource values.
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
self.compute_nodes[nodename] = cn
cn.create()
LOG.info('Compute node record created for '
'%(host)s:%(node)s with uuid: %(uuid)s',
{'host': self.host, 'node': nodename, 'uuid': cn.uuid})
self._setup_pci_tracker(context, cn, resources)
return True
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
n_id = compute_node.id
self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources, initial=False):
"""Copy resource values to supplied compute_node."""
nodename = resources['hypervisor_hostname']
stats = self.stats[nodename]
# purge old stats and init with anything passed in by the driver
# NOTE(danms): Preserve 'failed_builds' across the stats clearing,
# as that is not part of resources
# TODO(danms): Stop doing this when we get a column to store this
# directly
prev_failed_builds = stats.get('failed_builds', 0)
stats.clear()
stats['failed_builds'] = prev_failed_builds
stats.digest_stats(resources.get('stats'))
compute_node.stats = stats
# Update the allocation ratios for the related ComputeNode object
# but only if the configured values are not the default; the
# ComputeNode._from_db_object method takes care of providing default
# allocation ratios when the config is left at the default, so
# we'll really end up with something like a
# ComputeNode.cpu_allocation_ratio of 16.0. We want to avoid
# resetting the ComputeNode fields to None because that will make
# the _resource_change method think something changed when really it
# didn't.
# NOTE(yikun): The CONF.initial_(cpu|ram|disk)_allocation_ratio would
# be used when we initialize the compute node object, that means the
# ComputeNode.(cpu|ram|disk)_allocation_ratio will be set to
# CONF.initial_(cpu|ram|disk)_allocation_ratio when initial flag is
# True.
for res in ('cpu', 'disk', 'ram'):
attr = '%s_allocation_ratio' % res
if initial:
conf_alloc_ratio = getattr(CONF, 'initial_%s' % attr)
else:
conf_alloc_ratio = getattr(self, attr)
# NOTE(yikun): In Stein version, we change the default value of
# (cpu|ram|disk)_allocation_ratio from 0.0 to None, but we still
# should allow 0.0 to keep compatibility, and this 0.0 condition
# will be removed in the next version (T version).
if conf_alloc_ratio not in (0.0, None):
setattr(compute_node, attr, conf_alloc_ratio)
# now copy rest to compute_node
compute_node.update_from_virt_driver(resources)
def remove_node(self, nodename):
"""Handle node removal/rebalance.
Clean up any stored data about a compute node no longer
managed by this host.
"""
self.stats.pop(nodename, None)
self.compute_nodes.pop(nodename, None)
self.old_resources.pop(nodename, None)
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
notify information to message bus.
"""
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except NotImplementedError:
LOG.debug("The compute driver doesn't support host "
"metrics for %(mon)s", {'mon': monitor})
except Exception as exc:
LOG.warning("Cannot get the metrics from %(mon)s; "
"error: %(exc)s",
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
# to be populated as a JSONified string.
metric_list = metrics.to_list()
if len(metric_list):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metric_list
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
compute_utils.notify_about_metrics_update(
context, self.host, CONF.my_ip, nodename, metrics)
return metric_list
def update_available_resource(self, context, nodename, startup=False):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
:param nodename: Temporary parameter representing the Ironic resource
node. This parameter will be removed once Ironic
baremetal resource nodes are handled like any other
resource in the system.
:param startup: Boolean indicating whether we're running this on
on startup (True) or periodic (False).
"""
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
# NOTE(jaypipes): The resources['hypervisor_hostname'] field now
# contains a non-None value, even for non-Ironic nova-compute hosts. It
# is this value that will be populated in the compute_nodes table.
resources['host_ip'] = CONF.my_ip
# We want the 'cpu_info' to be None from the POV of the
# virt driver, but the DB requires it to be non-null so
# just force it to empty string
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources, startup=startup)
def _pair_instances_to_migrations(self, migrations, instance_by_uuid):
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# NOTE(danms): If this happens, we don't set it here, and
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
# confirmed/reverted in that case instance already changed host
# to destination and no matching happens
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources, startup=False):
# initialize the compute node object, creating it
# if it does not already exist.
is_new_compute_node = self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context'])
# Now calculate usage based on instance utilization:
instance_by_uuid = self._update_usage_from_instances(
context, instances, nodename)
# Grab all in-progress migrations:
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, nodename)
self._pair_instances_to_migrations(migrations, instance_by_uuid)
self._update_usage_from_migrations(context, migrations, nodename)
# A new compute node means there won't be a resource provider yet since
# that would be created via the _update() call below, and if there is
# no resource provider then there are no allocations against it.
if not is_new_compute_node:
self._remove_deleted_instances_allocations(
context, self.compute_nodes[nodename], migrations,
instance_by_uuid)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans, nodename)
cn = self.compute_nodes[nodename]
# NOTE(yjiang5): Because pci device tracker status is not cleared in
# this periodic task, and also because the resource tracker is not
# notified when instances are deleted, we need remove all usages
# from deleted instances.
self.pci_tracker.clean_usage(instances, migrations, orphans)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
# TODO(pmurray): metrics should not be a json string in ComputeNode,
# but it is. This should be changed in ComputeNode
cn.metrics = jsonutils.dumps(metrics)
# update the compute_node
self._update(context, cn, startup=startup)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
def _get_compute_node(self, context, nodename):
"""Returns compute node for the host and nodename."""
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free resources.
This is just a snapshot of resource usage recorded by the
virt driver.
The following resources are logged:
- free memory
- free disk
- free CPUs
- assignable PCI devices
"""
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
else:
free_vcpus = 'unknown'
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
"""Report final calculate of physical memory, used virtual memory,
disk, usable vCPUs, used virtual CPUs and PCI devices,
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.debug("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s",
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
"""Check to see if any resources have changed."""
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _update_to_placement(self, context, compute_node, startup):
"""Send resource and inventory changes to placement."""
# NOTE(jianghuaw): Some resources(e.g. VGPU) are not saved in the
# object of compute_node; instead the inventory data for these
# resource is reported by driver's get_inventory(). So even there
# is no resource change for compute_node as above, we need proceed
# to get inventory and use scheduler_client interfaces to update
# inventory to placement. It's scheduler_client's responsibility to
# ensure the update request to placement only happens when inventory
# is changed.
nodename = compute_node.hypervisor_hostname
# Persist the stats to the Scheduler
# First try update_provider_tree
# Retrieve the provider tree associated with this compute node. If
# it doesn't exist yet, this will create it with a (single, root)
# provider corresponding to the compute node.
reportclient = self.scheduler_client.reportclient
prov_tree = reportclient.get_provider_tree_and_ensure_root(
context, compute_node.uuid, name=compute_node.hypervisor_hostname)
# Let the virt driver rearrange the provider tree and set/update
# the inventory, traits, and aggregates throughout.
try:
allocs = None
try:
self.driver.update_provider_tree(prov_tree, nodename)
except exception.ReshapeNeeded:
if not startup:
# This isn't supposed to happen during periodic, so raise
# it up; the compute manager will treat it specially.
raise
LOG.info("Performing resource provider inventory and "
"allocation data migration during compute service "
"startup or fast-forward upgrade.")
allocs = reportclient.get_allocations_for_provider_tree(
context, nodename)
self.driver.update_provider_tree(prov_tree, nodename,
allocations=allocs)
# Flush any changes. If we processed ReshapeNeeded above, allocs is
# not None, and this will hit placement's POST /reshaper route.
reportclient.update_from_provider_tree(context, prov_tree,
allocations=allocs)
except NotImplementedError:
# update_provider_tree isn't implemented yet - try get_inventory
try:
inv_data = self.driver.get_inventory(nodename)
_normalize_inventory_from_cn_obj(inv_data, compute_node)
self.scheduler_client.set_inventory_for_provider(
context,
compute_node.uuid,
compute_node.hypervisor_hostname,
inv_data,
)
except NotImplementedError:
# Eventually all virt drivers will return an inventory dict in
# the format that the placement API expects and we'll be able
# to remove this code branch
self.scheduler_client.update_compute_node(context,
compute_node)
@retrying.retry(stop_max_attempt_number=4,
retry_on_exception=lambda e: isinstance(
e, exception.ResourceProviderUpdateConflict))
def _update(self, context, compute_node, startup=False):
"""Update partial stats locally and populate them to Scheduler."""
if self._resource_change(compute_node):
# If the compute_node's resource changed, update to DB.
# NOTE(jianghuaw): Once we completely move to use get_inventory()
# for all resource provider's inv data. We can remove this check.
# At the moment we still need this check and save compute_node.
compute_node.save()
self._update_to_placement(context, compute_node, startup)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
disk_usage += overhead.get('disk_gb', 0)
vcpus_usage += overhead.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.local_gb_used += sign * usage.get('swap', 0) / 1024
cn.vcpus_used += sign * vcpus_usage
# free ram and disk may be negative, depending on policy:
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
stats = self.stats[nodename]
cn.running_vms = stats.num_instances
# Calculate the numa usage
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
cn, usage, free)
cn.numa_topology = updated_numa_topology
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
if not _is_trackable_migration(migration):
return
uuid = migration.instance_uuid
LOG.info("Updating resource usage from migration", instance_uuid=uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
tracked = uuid in self.tracked_instances
itype = None
numa_topology = None
sign = 0
if same_node:
# Same node resize. Record usage for the 'new_' resources. This
# is executed on resize_claim().
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
else:
# The instance is already set to the new flavor (this is done
# by the compute manager on finish_resize()), hold space for a
# possible revert to the 'old_' resources.
# NOTE(lbeliveau): When the periodic audit timer gets
# triggered, the compute usage gets reset. The usage for an
# instance that is migrated to the new flavor but not yet
# confirmed/reverted will first get accounted for by
# _update_usage_from_instances(). This method will then be
# called, and we need to account for the '_old' resources
# (just in case).
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not tracked:
# instance has not yet migrated here:
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
# Allocate pci device(s) for the instance.
sign = 1
elif outbound and not tracked:
# instance migrated, but record usage for a possible revert:
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, instance, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
# migration referencing deleted instance
LOG.debug('Migration instance not found: %s', e)
continue
# skip migration if instance isn't in a resize state:
if not _instance_in_resize_state(instances[uuid]):
LOG.warning("Instance not resizing, skipping migration.",
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
# NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
# Skip migration (and mark it as error) if it doesn't match the
# instance migration id.
# This can happen if we have a stale migration record.
# We want to proceed if instance.migration_context is None
if (instance.migration_context is not None and
instance.migration_context.migration_id != migration.id):
LOG.info("Current instance migration %(im)s doesn't match "
"migration %(m)s, marking migration as error. "
"This can occur if a previous migration for this "
"instance did not complete.",
{'im': instance.migration_context.migration_id,
'm': migration.id})
migration.status = "error"
migration.save()
continue
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning("Flavor could not be found, skipping migration.",
instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
# NOTE(sfinucan): Both brand new instances as well as instances that
# are being unshelved will have is_new_instance == True
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances.add(uuid)
sign = 1
if is_removed_instance:
self.tracked_instances.remove(uuid)
sign = -1
cn = self.compute_nodes[nodename]
stats = self.stats[nodename]
stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = stats
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
# new instance, update compute node resource usage:
self._update_usage(self._get_usage_dict(instance, instance),
nodename, sign=sign)
# Stop tracking removed instances in the is_bfv cache. This needs to
# happen *after* calling _get_usage_dict() since that relies on the
# is_bfv cache.
if is_removed_instance and uuid in self.is_bfv:
del self.is_bfv[uuid]
cn.current_workload = stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
# set some initial values, reserve room for host/hypervisor:
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = CONF.reserved_host_cpus
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
instance_by_uuid = {}
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
self._update_usage_from_instance(context, instance, nodename)
instance_by_uuid[instance.uuid] = instance
return instance_by_uuid
def _remove_deleted_instances_allocations(self, context, cn,
migrations, instance_by_uuid):
migration_uuids = [migration.uuid for migration in migrations
if 'uuid' in migration]
# NOTE(jaypipes): All of this code sucks. It's basically dealing with
# all the corner cases in move, local delete, unshelve and rebuild
# operations for when allocations should be deleted when things didn't
# happen according to the normal flow of events where the scheduler
# always creates allocations for an instance
try:
# pai: report.ProviderAllocInfo namedtuple
pai = self.reportclient.get_allocations_for_resource_provider(
context, cn.uuid)
except (exception.ResourceProviderAllocationRetrievalFailed,
ks_exc.ClientException) as e:
LOG.error("Skipping removal of allocations for deleted instances: "
"%s", e)
return
allocations = pai.allocations
if not allocations:
# The main loop below would short-circuit anyway, but this saves us
# the (potentially expensive) context.elevated construction below.
return
read_deleted_context = context.elevated(read_deleted='yes')
for consumer_uuid, alloc in allocations.items():
if consumer_uuid in self.tracked_instances:
LOG.debug("Instance %s actively managed on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
if consumer_uuid in migration_uuids:
LOG.debug("Migration %s is active on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
# We know these are instances now, so proceed
instance_uuid = consumer_uuid
instance = instance_by_uuid.get(instance_uuid)
if not instance:
try:
instance = objects.Instance.get_by_uuid(
read_deleted_context, consumer_uuid,
expected_attrs=[])
except exception.InstanceNotFound:
# The instance isn't even in the database. Either the
# scheduler _just_ created an allocation for it and we're
# racing with the creation in the cell database, or the
# instance was deleted and fully archived before we got a
# chance to run this. The former is far more likely than
# the latter. Avoid deleting allocations for a building
# instance here.
LOG.info("Instance %(uuid)s has allocations against this "
"compute host but is not found in the database.",
{'uuid': instance_uuid},
exc_info=False)
continue
if instance.deleted:
# The instance is gone, so we definitely want to remove
# allocations associated with it.
# NOTE(jaypipes): This will not be true if/when we support
# cross-cell migrations...
LOG.debug("Instance %s has been deleted (perhaps locally). "
"Deleting allocations that remained for this "
"instance against this compute host: %s.",
instance_uuid, alloc)
self.reportclient.delete_allocation_for_instance(context,
instance_uuid)
continue
if not instance.host:
# Allocations related to instances being scheduled should not
# be deleted if we already wrote the allocation previously.
LOG.debug("Instance %s has been scheduled to this compute "
"host, the scheduler has made an allocation "
"against this compute node but the instance has "
"yet to start. Skipping heal of allocation: %s.",
instance_uuid, alloc)
continue
if (instance.host == cn.host and
instance.node == cn.hypervisor_hostname):
# The instance is supposed to be on this compute host but is
# not in the list of actively managed instances.
LOG.warning("Instance %s is not being actively managed by "
"this compute host but has allocations "
"referencing this compute host: %s. Skipping "
"heal of allocation because we do not know "
"what to do.", instance_uuid, alloc)
continue
if instance.host != cn.host:
# The instance has been moved to another host either via a
# migration, evacuation or unshelve in between the time when we
# ran InstanceList.get_by_host_and_node(), added those
# instances to RT.tracked_instances and the above
# Instance.get_by_uuid() call. We SHOULD attempt to remove any
# allocations that reference this compute host if the VM is in
# a stable terminal state (i.e. it isn't in a state of waiting
# for resize to confirm/revert), however if the destination
# host is an Ocata compute host, it will delete the allocation
# that contains this source compute host information anyway and
# recreate an allocation that only refers to itself. So we
# don't need to do anything in that case. Just log the
# situation here for information but don't attempt to delete or
# change the allocation.
LOG.warning("Instance %s has been moved to another host "
"%s(%s). There are allocations remaining against "
"the source host that might need to be removed: "
"%s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
self._delete_allocation_for_moved_instance(
context, instance, node, 'evacuated', node_type)
def delete_allocation_for_migrated_instance(self, context, instance, node):
self._delete_allocation_for_moved_instance(context, instance, node,
'migrated')
def _delete_allocation_for_moved_instance(
self, context, instance, node, move_type, node_type='source'):
# Clean up the instance allocation from this node in placement
cn_uuid = self.compute_nodes[node].uuid
if not self.reportclient.remove_provider_tree_from_instance_allocation(
context, instance.uuid, cn_uuid):
LOG.error("Failed to clean allocation of %s "
"instance on the %s node %s",
move_type, node_type, cn_uuid, instance=instance)
def delete_allocation_for_failed_resize(self, context, instance, node,
flavor):
"""Delete instance allocations for the node during a failed resize
:param context: The request context.
:param instance: The instance being resized/migrated.
:param node: The node provider on which the instance should have
allocations to remove. If this is a resize to the same host, then
the new_flavor resources are subtracted from the single allocation.
:param flavor: This is the new_flavor during a resize.
"""
cn = self.compute_nodes[node]
if not self.reportclient.remove_provider_tree_from_instance_allocation(
context, instance.uuid, cn.uuid):
if instance.instance_type_id == flavor.id:
operation = 'migration'
else:
operation = 'resize'
LOG.error('Failed to clean allocation after a failed '
'%(operation)s on node %(node)s',
{'operation': operation, 'node': cn.uuid},
instance=instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances)
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans, nodename):
"""Include orphaned instances in usage."""
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)",
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(context,
instance.uuid)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, context, instance, prefix, migration):
"""Get the instance type from instance."""
stashed_flavors = migration.migration_type in ('resize',)
if stashed_flavors:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, instance, **updates):
"""Make a usage dict _update methods expect.
Accepts a dict or an Instance or Flavor object, and a set of updates.
Converts the object to a dict and applies the updates.
:param object_or_dict: instance or flavor as an object or just a dict
:param instance: nova.objects.Instance for the related operation; this
is needed to determine if the instance is
volume-backed
:param updates: key-value pairs to update the passed object.
Currently only considers 'numa_topology', all other
keys are ignored.
:returns: a dict with all the information from object_or_dict updated
with updates
"""
def _is_bfv():
# Check to see if we have the is_bfv value cached.
if instance.uuid in self.is_bfv:
is_bfv = self.is_bfv[instance.uuid]
else:
is_bfv = compute_utils.is_volume_backed_instance(
instance._context, instance)
self.is_bfv[instance.uuid] = is_bfv
return is_bfv
usage = {}
if isinstance(object_or_dict, objects.Instance):
is_bfv = _is_bfv()
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'swap': object_or_dict.flavor.swap,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': (0 if is_bfv else
object_or_dict.flavor.root_gb),
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
if _is_bfv():
usage['root_gb'] = 0
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
def build_failed(self, nodename):
"""Increments the failed_builds stats for the given node."""
self.stats[nodename].build_failed()
def build_succeeded(self, nodename):
"""Resets the failed_builds stats for the given node."""
self.stats[nodename].build_succeeded()
| 46.686715 | 79 | 0.617679 |
import collections
import copy
from keystoneauth1 import exceptions as ks_exc
from oslo_log import log as logging
from oslo_serialization import jsonutils
import retrying
from nova.compute import claims
from nova.compute import monitors
from nova.compute import stats as compute_stats
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base as obj_base
from nova.objects import migration as migration_obj
from nova.pci import manager as pci_manager
from nova.pci import request as pci_request
from nova import rc_fields as fields
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova import utils
from nova.virt import hardware
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
def _instance_in_resize_state(instance):
vm = instance.vm_state
task = instance.task_state
if vm == vm_states.RESIZED:
return True
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
and task in (
task_states.resizing_states + task_states.rebuild_states)):
return True
return False
def _is_trackable_migration(migration):
return migration.migration_type in ('resize', 'migration',
'evacuation')
def _normalize_inventory_from_cn_obj(inv_data, cn):
if fields.ResourceClass.VCPU in inv_data:
cpu_inv = inv_data[fields.ResourceClass.VCPU]
if 'allocation_ratio' not in cpu_inv:
cpu_inv['allocation_ratio'] = cn.cpu_allocation_ratio
if 'reserved' not in cpu_inv:
cpu_inv['reserved'] = CONF.reserved_host_cpus
if fields.ResourceClass.MEMORY_MB in inv_data:
mem_inv = inv_data[fields.ResourceClass.MEMORY_MB]
if 'allocation_ratio' not in mem_inv:
mem_inv['allocation_ratio'] = cn.ram_allocation_ratio
if 'reserved' not in mem_inv:
mem_inv['reserved'] = CONF.reserved_host_memory_mb
if fields.ResourceClass.DISK_GB in inv_data:
disk_inv = inv_data[fields.ResourceClass.DISK_GB]
if 'allocation_ratio' not in disk_inv:
disk_inv['allocation_ratio'] = cn.disk_allocation_ratio
if 'reserved' not in disk_inv:
reserved_mb = CONF.reserved_host_disk_mb
reserved_gb = compute_utils.convert_mb_to_ceil_gb(reserved_mb)
disk_inv['reserved'] = reserved_gb
class ResourceTracker(object):
def __init__(self, host, driver):
self.host = host
self.driver = driver
self.pci_tracker = None
self.compute_nodes = {}
self.stats = collections.defaultdict(compute_stats.Stats)
self.tracked_instances = set()
self.tracked_migrations = {}
self.is_bfv = {}
monitor_handler = monitors.MonitorHandler(self)
self.monitors = monitor_handler.monitors
self.old_resources = collections.defaultdict(objects.ComputeNode)
self.scheduler_client = scheduler_client.SchedulerClient()
self.reportclient = self.scheduler_client.reportclient
self.ram_allocation_ratio = CONF.ram_allocation_ratio
self.cpu_allocation_ratio = CONF.cpu_allocation_ratio
self.disk_allocation_ratio = CONF.disk_allocation_ratio
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def instance_claim(self, context, instance, nodename, limits=None):
if self.disabled(nodename):
self._set_instance_host_and_node(instance, nodename)
return claims.NopClaim()
if instance.host:
LOG.warning("Host field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
if instance.node:
LOG.warning("Node field should not be set on the instance "
"until resources have been claimed.",
instance=instance)
overhead = self.driver.estimate_instance_overhead(instance)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': instance.flavor.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
context, instance.uuid)
claim = claims.Claim(context, instance, nodename, self, cn,
pci_requests, overhead=overhead, limits=limits)
instance_numa_topology = claim.claimed_numa_topology
instance.numa_topology = instance_numa_topology
self._set_instance_host_and_node(instance, nodename)
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
self.pci_tracker.claim_instance(context, pci_requests,
instance_numa_topology)
# Mark resources in-use and update stats
self._update_usage_from_instance(context, instance, nodename)
elevated = context.elevated()
# persist changes to the compute node:
self._update(elevated, cn)
return claim
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def rebuild_claim(self, context, instance, nodename, limits=None,
image_meta=None, migration=None):
instance_type = instance.flavor
return self._move_claim(context, instance, instance_type, nodename,
migration, move_type='evacuation',
limits=limits, image_meta=image_meta)
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def resize_claim(self, context, instance, instance_type, nodename,
migration, image_meta=None, limits=None):
return self._move_claim(context, instance, instance_type, nodename,
migration, image_meta=image_meta,
limits=limits)
def _move_claim(self, context, instance, new_instance_type, nodename,
migration, move_type=None, image_meta=None, limits=None):
image_meta = image_meta or {}
if migration:
self._claim_existing_migration(migration, nodename)
else:
migration = self._create_migration(context, instance,
new_instance_type,
nodename, move_type)
if self.disabled(nodename):
# compute_driver doesn't support resource tracking, just
return claims.NopClaim(migration=migration)
overhead = self.driver.estimate_instance_overhead(new_instance_type)
LOG.debug("Memory overhead for %(flavor)d MB instance; %(overhead)d "
"MB", {'flavor': new_instance_type.memory_mb,
'overhead': overhead['memory_mb']})
LOG.debug("Disk overhead for %(flavor)d GB instance; %(overhead)d "
"GB", {'flavor': instance.flavor.root_gb,
'overhead': overhead.get('disk_gb', 0)})
LOG.debug("CPU overhead for %(flavor)d vCPUs instance; %(overhead)d "
"vCPU(s)", {'flavor': instance.flavor.vcpus,
'overhead': overhead.get('vcpus', 0)})
cn = self.compute_nodes[nodename]
new_pci_requests = pci_request.get_pci_requests_from_flavor(
new_instance_type)
new_pci_requests.instance_uuid = instance.uuid
# On resize merge the SR-IOV ports pci_requests with the new
# instance flavor pci_requests.
if instance.pci_requests:
for request in instance.pci_requests.requests:
if request.alias_name is None:
new_pci_requests.requests.append(request)
claim = claims.MoveClaim(context, instance, nodename,
new_instance_type, image_meta, self, cn,
new_pci_requests, overhead=overhead,
limits=limits)
claim.migration = migration
claimed_pci_devices_objs = []
if self.pci_tracker:
# NOTE(jaypipes): ComputeNode.pci_device_pools is set below
# in _update_usage_from_instance().
claimed_pci_devices_objs = self.pci_tracker.claim_instance(
context, new_pci_requests, claim.claimed_numa_topology)
claimed_pci_devices = objects.PciDeviceList(
objects=claimed_pci_devices_objs)
# TODO(jaypipes): Move claimed_numa_topology out of the Claim's
mig_context = objects.MigrationContext(
context=context, instance_uuid=instance.uuid,
migration_id=migration.id,
old_numa_topology=instance.numa_topology,
new_numa_topology=claim.claimed_numa_topology,
old_pci_devices=instance.pci_devices,
new_pci_devices=claimed_pci_devices,
old_pci_requests=instance.pci_requests,
new_pci_requests=new_pci_requests)
instance.migration_context = mig_context
instance.save()
self._update_usage_from_migration(context, instance, migration,
nodename)
elevated = context.elevated()
self._update(elevated, cn)
return claim
def _create_migration(self, context, instance, new_instance_type,
nodename, move_type=None):
migration = objects.Migration(context=context.elevated())
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = new_instance_type.id
migration.status = 'pre-migrating'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.source_node = instance.node
if move_type:
migration.migration_type = move_type
else:
migration.migration_type = migration_obj.determine_migration_type(
migration)
migration.create()
return migration
def _claim_existing_migration(self, migration, nodename):
migration.dest_compute = self.host
migration.dest_node = nodename
migration.dest_host = self.driver.get_host_ip_addr()
migration.status = 'pre-migrating'
migration.save()
def _set_instance_host_and_node(self, instance, nodename):
instance.host = self.host
instance.launched_on = self.host
instance.node = nodename
instance.save()
def _unset_instance_host_and_node(self, instance):
instance.host = None
instance.node = None
instance.save()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def abort_instance_claim(self, context, instance, nodename):
self._update_usage_from_instance(context, instance, nodename,
is_removed=True)
instance.clear_numa_topology()
self._unset_instance_host_and_node(instance)
self._update(context.elevated(), self.compute_nodes[nodename])
def _drop_pci_devices(self, instance, nodename, prefix):
if self.pci_tracker:
pci_devices = self._get_migration_context_resource(
'pci_devices', instance, prefix=prefix)
if pci_devices:
for pci_device in pci_devices:
self.pci_tracker.free_device(pci_device, instance)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_nodes[nodename].pci_device_pools = dev_pools_obj
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def drop_move_claim(self, context, instance, nodename,
instance_type=None, prefix='new_'):
if instance['uuid'] in self.tracked_migrations:
migration = self.tracked_migrations.pop(instance['uuid'])
if not instance_type:
ctxt = context.elevated()
instance_type = self._get_instance_type(ctxt, instance, prefix,
migration)
if instance_type is not None:
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix=prefix)
usage = self._get_usage_dict(
instance_type, instance, numa_topology=numa_topology)
self._drop_pci_devices(instance, nodename, prefix)
self._update_usage(usage, nodename, sign=-1)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
elif (instance['uuid'] in self.tracked_instances):
self.tracked_instances.remove(instance['uuid'])
self._drop_pci_devices(instance, nodename, prefix)
ctxt = context.elevated()
self._update(ctxt, self.compute_nodes[nodename])
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def update_usage(self, context, instance, nodename):
if self.disabled(nodename):
return
uuid = instance['uuid']
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(context, instance, nodename)
self._update(context.elevated(), self.compute_nodes[nodename])
def disabled(self, nodename):
return (nodename not in self.compute_nodes or
not self.driver.node_is_available(nodename))
def _check_for_nodes_rebalance(self, context, resources, nodename):
if not self.driver.rebalances_nodes:
return False
# Its possible ironic just did a node re-balance, so let's
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
context, nodename)
if len(cn_candidates) == 1:
cn = cn_candidates[0]
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
{"name": nodename, "old": cn.host, "new": self.host})
cn.host = self.host
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
self._update(context, cn)
return True
elif len(cn_candidates) > 1:
LOG.error(
"Found more than one ComputeNode for nodename %s. "
"Please clean up the orphaned ComputeNode records in your DB.",
nodename)
return False
def _init_compute_node(self, context, resources):
nodename = resources['hypervisor_hostname']
if nodename in self.compute_nodes:
cn = self.compute_nodes[nodename]
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
cn = self._get_compute_node(context, nodename)
if cn:
self.compute_nodes[nodename] = cn
self._copy_resources(cn, resources)
self._setup_pci_tracker(context, cn, resources)
return False
if self._check_for_nodes_rebalance(context, resources, nodename):
return False
cn = objects.ComputeNode(context)
cn.host = self.host
self._copy_resources(cn, resources, initial=True)
self.compute_nodes[nodename] = cn
cn.create()
LOG.info('Compute node record created for '
'%(host)s:%(node)s with uuid: %(uuid)s',
{'host': self.host, 'node': nodename, 'uuid': cn.uuid})
self._setup_pci_tracker(context, cn, resources)
return True
def _setup_pci_tracker(self, context, compute_node, resources):
if not self.pci_tracker:
n_id = compute_node.id
self.pci_tracker = pci_manager.PciDevTracker(context, node_id=n_id)
if 'pci_passthrough_devices' in resources:
dev_json = resources.pop('pci_passthrough_devices')
self.pci_tracker.update_devices_from_hypervisor_resources(
dev_json)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
compute_node.pci_device_pools = dev_pools_obj
def _copy_resources(self, compute_node, resources, initial=False):
nodename = resources['hypervisor_hostname']
stats = self.stats[nodename]
prev_failed_builds = stats.get('failed_builds', 0)
stats.clear()
stats['failed_builds'] = prev_failed_builds
stats.digest_stats(resources.get('stats'))
compute_node.stats = stats
# ComputeNode.cpu_allocation_ratio of 16.0. We want to avoid
# resetting the ComputeNode fields to None because that will make
# the _resource_change method think something changed when really it
# didn't.
for res in ('cpu', 'disk', 'ram'):
attr = '%s_allocation_ratio' % res
if initial:
conf_alloc_ratio = getattr(CONF, 'initial_%s' % attr)
else:
conf_alloc_ratio = getattr(self, attr)
if conf_alloc_ratio not in (0.0, None):
setattr(compute_node, attr, conf_alloc_ratio)
compute_node.update_from_virt_driver(resources)
def remove_node(self, nodename):
self.stats.pop(nodename, None)
self.compute_nodes.pop(nodename, None)
self.old_resources.pop(nodename, None)
def _get_host_metrics(self, context, nodename):
metrics = objects.MonitorMetricList()
metrics_info = {}
for monitor in self.monitors:
try:
monitor.populate_metrics(metrics)
except NotImplementedError:
LOG.debug("The compute driver doesn't support host "
"metrics for %(mon)s", {'mon': monitor})
except Exception as exc:
LOG.warning("Cannot get the metrics from %(mon)s; "
"error: %(exc)s",
{'mon': monitor, 'exc': exc})
# TODO(jaypipes): Remove this when compute_node.metrics doesn't need
metric_list = metrics.to_list()
if len(metric_list):
metrics_info['nodename'] = nodename
metrics_info['metrics'] = metric_list
metrics_info['host'] = self.host
metrics_info['host_ip'] = CONF.my_ip
notifier = rpc.get_notifier(service='compute', host=nodename)
notifier.info(context, 'compute.metrics.update', metrics_info)
compute_utils.notify_about_metrics_update(
context, self.host, CONF.my_ip, nodename, metrics)
return metric_list
def update_available_resource(self, context, nodename, startup=False):
LOG.debug("Auditing locally available compute resources for "
"%(host)s (node: %(node)s)",
{'node': nodename,
'host': self.host})
resources = self.driver.get_available_resource(nodename)
resources['host_ip'] = CONF.my_ip
if "cpu_info" not in resources or resources["cpu_info"] is None:
resources["cpu_info"] = ''
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
self._update_available_resource(context, resources, startup=startup)
def _pair_instances_to_migrations(self, migrations, instance_by_uuid):
for migration in migrations:
try:
migration.instance = instance_by_uuid[migration.instance_uuid]
except KeyError:
# let the code either fail or lazy-load the instance later
# which is what happened before we added this optimization.
# NOTE(tdurakov) this situation is possible for resize/cold
# migration when migration is finished but haven't yet
LOG.debug('Migration for instance %(uuid)s refers to '
'another host\'s instance!',
{'uuid': migration.instance_uuid})
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
def _update_available_resource(self, context, resources, startup=False):
# initialize the compute node object, creating it
# if it does not already exist.
is_new_compute_node = self._init_compute_node(context, resources)
nodename = resources['hypervisor_hostname']
# if we could not init the compute node the tracker will be
# disabled and we should quit now
if self.disabled(nodename):
return
# Grab all instances assigned to this node:
instances = objects.InstanceList.get_by_host_and_node(
context, self.host, nodename,
expected_attrs=['system_metadata',
'numa_topology',
'flavor', 'migration_context'])
# Now calculate usage based on instance utilization:
instance_by_uuid = self._update_usage_from_instances(
context, instances, nodename)
# Grab all in-progress migrations:
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
context, self.host, nodename)
self._pair_instances_to_migrations(migrations, instance_by_uuid)
self._update_usage_from_migrations(context, migrations, nodename)
# A new compute node means there won't be a resource provider yet since
if not is_new_compute_node:
self._remove_deleted_instances_allocations(
context, self.compute_nodes[nodename], migrations,
instance_by_uuid)
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(orphans, nodename)
cn = self.compute_nodes[nodename]
self.pci_tracker.clean_usage(instances, migrations, orphans)
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = dev_pools_obj
self._report_final_resource_view(nodename)
metrics = self._get_host_metrics(context, nodename)
cn.metrics = jsonutils.dumps(metrics)
self._update(context, cn, startup=startup)
LOG.debug('Compute_service record updated for %(host)s:%(node)s',
{'host': self.host, 'node': nodename})
def _get_compute_node(self, context, nodename):
try:
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, nodename)
except exception.NotFound:
LOG.warning("No compute node record for %(host)s:%(node)s",
{'host': self.host, 'node': nodename})
def _report_hypervisor_resource_view(self, resources):
nodename = resources['hypervisor_hostname']
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
else:
free_vcpus = 'unknown'
pci_devices = resources.get('pci_passthrough_devices')
LOG.debug("Hypervisor/Node resource view: "
"name=%(node)s "
"free_ram=%(free_ram)sMB "
"free_disk=%(free_disk)sGB "
"free_vcpus=%(free_vcpus)s "
"pci_devices=%(pci_devices)s",
{'node': nodename,
'free_ram': free_ram_mb,
'free_disk': free_disk_gb,
'free_vcpus': free_vcpus,
'pci_devices': pci_devices})
def _report_final_resource_view(self, nodename):
cn = self.compute_nodes[nodename]
vcpus = cn.vcpus
if vcpus:
tcpu = vcpus
ucpu = cn.vcpus_used
LOG.debug("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s",
{'tcpu': vcpus,
'ucpu': ucpu})
else:
tcpu = 0
ucpu = 0
pci_stats = (list(cn.pci_device_pools) if
cn.pci_device_pools else [])
LOG.debug("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
"used_ram=%(used_ram)sMB "
"phys_disk=%(phys_disk)sGB "
"used_disk=%(used_disk)sGB "
"total_vcpus=%(total_vcpus)s "
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s",
{'node': nodename,
'phys_ram': cn.memory_mb,
'used_ram': cn.memory_mb_used,
'phys_disk': cn.local_gb,
'used_disk': cn.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_stats})
def _resource_change(self, compute_node):
nodename = compute_node.hypervisor_hostname
old_compute = self.old_resources[nodename]
if not obj_base.obj_equal_prims(
compute_node, old_compute, ['updated_at']):
self.old_resources[nodename] = copy.deepcopy(compute_node)
return True
return False
def _update_to_placement(self, context, compute_node, startup):
# is no resource change for compute_node as above, we need proceed
# to get inventory and use scheduler_client interfaces to update
# inventory to placement. It's scheduler_client's responsibility to
# ensure the update request to placement only happens when inventory
# is changed.
nodename = compute_node.hypervisor_hostname
# Persist the stats to the Scheduler
# First try update_provider_tree
# Retrieve the provider tree associated with this compute node. If
# it doesn't exist yet, this will create it with a (single, root)
reportclient = self.scheduler_client.reportclient
prov_tree = reportclient.get_provider_tree_and_ensure_root(
context, compute_node.uuid, name=compute_node.hypervisor_hostname)
try:
allocs = None
try:
self.driver.update_provider_tree(prov_tree, nodename)
except exception.ReshapeNeeded:
if not startup:
# it up; the compute manager will treat it specially.
raise
LOG.info("Performing resource provider inventory and "
"allocation data migration during compute service "
"startup or fast-forward upgrade.")
allocs = reportclient.get_allocations_for_provider_tree(
context, nodename)
self.driver.update_provider_tree(prov_tree, nodename,
allocations=allocs)
# Flush any changes. If we processed ReshapeNeeded above, allocs is
# not None, and this will hit placement's POST /reshaper route.
reportclient.update_from_provider_tree(context, prov_tree,
allocations=allocs)
except NotImplementedError:
try:
inv_data = self.driver.get_inventory(nodename)
_normalize_inventory_from_cn_obj(inv_data, compute_node)
self.scheduler_client.set_inventory_for_provider(
context,
compute_node.uuid,
compute_node.hypervisor_hostname,
inv_data,
)
except NotImplementedError:
# Eventually all virt drivers will return an inventory dict in
# the format that the placement API expects and we'll be able
self.scheduler_client.update_compute_node(context,
compute_node)
@retrying.retry(stop_max_attempt_number=4,
retry_on_exception=lambda e: isinstance(
e, exception.ResourceProviderUpdateConflict))
def _update(self, context, compute_node, startup=False):
if self._resource_change(compute_node):
# NOTE(jianghuaw): Once we completely move to use get_inventory()
# for all resource provider's inv data. We can remove this check.
compute_node.save()
self._update_to_placement(context, compute_node, startup)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_usage(self, usage, nodename, sign=1):
mem_usage = usage['memory_mb']
disk_usage = usage.get('root_gb', 0)
vcpus_usage = usage.get('vcpus', 0)
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
disk_usage += overhead.get('disk_gb', 0)
vcpus_usage += overhead.get('vcpus', 0)
cn = self.compute_nodes[nodename]
cn.memory_mb_used += sign * mem_usage
cn.local_gb_used += sign * disk_usage
cn.local_gb_used += sign * usage.get('ephemeral_gb', 0)
cn.local_gb_used += sign * usage.get('swap', 0) / 1024
cn.vcpus_used += sign * vcpus_usage
cn.free_ram_mb = cn.memory_mb - cn.memory_mb_used
cn.free_disk_gb = cn.local_gb - cn.local_gb_used
stats = self.stats[nodename]
cn.running_vms = stats.num_instances
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
cn, usage, free)
cn.numa_topology = updated_numa_topology
def _get_migration_context_resource(self, resource, instance,
prefix='new_'):
migration_context = instance.migration_context
resource = prefix + resource
if migration_context and resource in migration_context:
return getattr(migration_context, resource)
return None
def _update_usage_from_migration(self, context, instance, migration,
nodename):
if not _is_trackable_migration(migration):
return
uuid = migration.instance_uuid
LOG.info("Updating resource usage from migration", instance_uuid=uuid)
incoming = (migration.dest_compute == self.host and
migration.dest_node == nodename)
outbound = (migration.source_compute == self.host and
migration.source_node == nodename)
same_node = (incoming and outbound)
tracked = uuid in self.tracked_instances
itype = None
numa_topology = None
sign = 0
if same_node:
if (instance['instance_type_id'] ==
migration.old_instance_type_id):
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
sign = 1
else:
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
elif incoming and not tracked:
itype = self._get_instance_type(context, instance, 'new_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance)
sign = 1
elif outbound and not tracked:
itype = self._get_instance_type(context, instance, 'old_',
migration)
numa_topology = self._get_migration_context_resource(
'numa_topology', instance, prefix='old_')
if itype:
cn = self.compute_nodes[nodename]
usage = self._get_usage_dict(
itype, instance, numa_topology=numa_topology)
if self.pci_tracker and sign:
self.pci_tracker.update_pci_for_instance(
context, instance, sign=sign)
self._update_usage(usage, nodename)
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
obj = objects.PciDevicePoolList()
cn.pci_device_pools = obj
self.tracked_migrations[uuid] = migration
def _update_usage_from_migrations(self, context, migrations, nodename):
filtered = {}
instances = {}
self.tracked_migrations.clear()
for migration in migrations:
uuid = migration.instance_uuid
try:
if uuid not in instances:
instances[uuid] = migration.instance
except exception.InstanceNotFound as e:
LOG.debug('Migration instance not found: %s', e)
continue
if not _instance_in_resize_state(instances[uuid]):
LOG.warning("Instance not resizing, skipping migration.",
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
other_migration = filtered.get(uuid, None)
# NOTE(claudiub): In Python 3, you cannot compare NoneTypes.
if other_migration:
om = other_migration
other_time = om.updated_at or om.created_at
migration_time = migration.updated_at or migration.created_at
if migration_time > other_time:
filtered[uuid] = migration
else:
filtered[uuid] = migration
for migration in filtered.values():
instance = instances[migration.instance_uuid]
# Skip migration (and mark it as error) if it doesn't match the
if (instance.migration_context is not None and
instance.migration_context.migration_id != migration.id):
LOG.info("Current instance migration %(im)s doesn't match "
"migration %(m)s, marking migration as error. "
"This can occur if a previous migration for this "
"instance did not complete.",
{'im': instance.migration_context.migration_id,
'm': migration.id})
migration.status = "error"
migration.save()
continue
try:
self._update_usage_from_migration(context, instance, migration,
nodename)
except exception.FlavorNotFound:
LOG.warning("Flavor could not be found, skipping migration.",
instance_uuid=instance.uuid)
continue
def _update_usage_from_instance(self, context, instance, nodename,
is_removed=False):
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
# NOTE(sfinucan): Both brand new instances as well as instances that
# are being unshelved will have is_new_instance == True
is_removed_instance = not is_new_instance and (is_removed or
instance['vm_state'] in vm_states.ALLOW_RESOURCE_REMOVAL)
if is_new_instance:
self.tracked_instances.add(uuid)
sign = 1
if is_removed_instance:
self.tracked_instances.remove(uuid)
sign = -1
cn = self.compute_nodes[nodename]
stats = self.stats[nodename]
stats.update_stats_for_instance(instance, is_removed_instance)
cn.stats = stats
# if it's a new or deleted instance:
if is_new_instance or is_removed_instance:
if self.pci_tracker:
self.pci_tracker.update_pci_for_instance(context,
instance,
sign=sign)
self._update_usage(self._get_usage_dict(instance, instance),
nodename, sign=sign)
if is_removed_instance and uuid in self.is_bfv:
del self.is_bfv[uuid]
cn.current_workload = stats.calculate_workload()
if self.pci_tracker:
obj = self.pci_tracker.stats.to_device_pools_obj()
cn.pci_device_pools = obj
else:
cn.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances, nodename):
self.tracked_instances.clear()
cn = self.compute_nodes[nodename]
cn.local_gb_used = CONF.reserved_host_disk_mb / 1024
cn.memory_mb_used = CONF.reserved_host_memory_mb
cn.vcpus_used = CONF.reserved_host_cpus
cn.free_ram_mb = (cn.memory_mb - cn.memory_mb_used)
cn.free_disk_gb = (cn.local_gb - cn.local_gb_used)
cn.current_workload = 0
cn.running_vms = 0
instance_by_uuid = {}
for instance in instances:
if instance.vm_state not in vm_states.ALLOW_RESOURCE_REMOVAL:
self._update_usage_from_instance(context, instance, nodename)
instance_by_uuid[instance.uuid] = instance
return instance_by_uuid
def _remove_deleted_instances_allocations(self, context, cn,
migrations, instance_by_uuid):
migration_uuids = [migration.uuid for migration in migrations
if 'uuid' in migration]
# all the corner cases in move, local delete, unshelve and rebuild
# operations for when allocations should be deleted when things didn't
try:
pai = self.reportclient.get_allocations_for_resource_provider(
context, cn.uuid)
except (exception.ResourceProviderAllocationRetrievalFailed,
ks_exc.ClientException) as e:
LOG.error("Skipping removal of allocations for deleted instances: "
"%s", e)
return
allocations = pai.allocations
if not allocations:
return
read_deleted_context = context.elevated(read_deleted='yes')
for consumer_uuid, alloc in allocations.items():
if consumer_uuid in self.tracked_instances:
LOG.debug("Instance %s actively managed on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
if consumer_uuid in migration_uuids:
LOG.debug("Migration %s is active on this compute host "
"and has allocations in placement: %s.",
consumer_uuid, alloc)
continue
instance_uuid = consumer_uuid
instance = instance_by_uuid.get(instance_uuid)
if not instance:
try:
instance = objects.Instance.get_by_uuid(
read_deleted_context, consumer_uuid,
expected_attrs=[])
except exception.InstanceNotFound:
# scheduler _just_ created an allocation for it and we're
LOG.info("Instance %(uuid)s has allocations against this "
"compute host but is not found in the database.",
{'uuid': instance_uuid},
exc_info=False)
continue
if instance.deleted:
LOG.debug("Instance %s has been deleted (perhaps locally). "
"Deleting allocations that remained for this "
"instance against this compute host: %s.",
instance_uuid, alloc)
self.reportclient.delete_allocation_for_instance(context,
instance_uuid)
continue
if not instance.host:
LOG.debug("Instance %s has been scheduled to this compute "
"host, the scheduler has made an allocation "
"against this compute node but the instance has "
"yet to start. Skipping heal of allocation: %s.",
instance_uuid, alloc)
continue
if (instance.host == cn.host and
instance.node == cn.hypervisor_hostname):
LOG.warning("Instance %s is not being actively managed by "
"this compute host but has allocations "
"referencing this compute host: %s. Skipping "
"heal of allocation because we do not know "
"what to do.", instance_uuid, alloc)
continue
if instance.host != cn.host:
# for resize to confirm/revert), however if the destination
# host is an Ocata compute host, it will delete the allocation
# that contains this source compute host information anyway and
# recreate an allocation that only refers to itself. So we
# don't need to do anything in that case. Just log the
# change the allocation.
LOG.warning("Instance %s has been moved to another host "
"%s(%s). There are allocations remaining against "
"the source host that might need to be removed: "
"%s.",
instance_uuid, instance.host, instance.node, alloc)
def delete_allocation_for_evacuated_instance(self, context, instance, node,
node_type='source'):
self._delete_allocation_for_moved_instance(
context, instance, node, 'evacuated', node_type)
def delete_allocation_for_migrated_instance(self, context, instance, node):
self._delete_allocation_for_moved_instance(context, instance, node,
'migrated')
def _delete_allocation_for_moved_instance(
self, context, instance, node, move_type, node_type='source'):
# Clean up the instance allocation from this node in placement
cn_uuid = self.compute_nodes[node].uuid
if not self.reportclient.remove_provider_tree_from_instance_allocation(
context, instance.uuid, cn_uuid):
LOG.error("Failed to clean allocation of %s "
"instance on the %s node %s",
move_type, node_type, cn_uuid, instance=instance)
def delete_allocation_for_failed_resize(self, context, instance, node,
flavor):
cn = self.compute_nodes[node]
if not self.reportclient.remove_provider_tree_from_instance_allocation(
context, instance.uuid, cn.uuid):
if instance.instance_type_id == flavor.id:
operation = 'migration'
else:
operation = 'resize'
LOG.error('Failed to clean allocation after a failed '
'%(operation)s on node %(node)s',
{'operation': operation, 'node': cn.uuid},
instance=instance)
def _find_orphaned_instances(self):
uuids1 = frozenset(self.tracked_instances)
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, orphans, nodename):
for orphan in orphans:
memory_mb = orphan['memory_mb']
LOG.warning("Detected running orphan instance: %(uuid)s "
"(consuming %(memory_mb)s MB memory)",
{'uuid': orphan['uuid'], 'memory_mb': memory_mb})
# just record memory usage for the orphan
usage = {'memory_mb': memory_mb}
self._update_usage(usage, nodename)
def delete_allocation_for_shelve_offloaded_instance(self, context,
instance):
self.reportclient.delete_allocation_for_instance(context,
instance.uuid)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _get_instance_type(self, context, instance, prefix, migration):
stashed_flavors = migration.migration_type in ('resize',)
if stashed_flavors:
return getattr(instance, '%sflavor' % prefix)
else:
# NOTE(ndipanov): Certain migration types (all but resize)
# do not change flavors so there is no need to stash
# them. In that case - just get the instance flavor.
return instance.flavor
def _get_usage_dict(self, object_or_dict, instance, **updates):
def _is_bfv():
# Check to see if we have the is_bfv value cached.
if instance.uuid in self.is_bfv:
is_bfv = self.is_bfv[instance.uuid]
else:
is_bfv = compute_utils.is_volume_backed_instance(
instance._context, instance)
self.is_bfv[instance.uuid] = is_bfv
return is_bfv
usage = {}
if isinstance(object_or_dict, objects.Instance):
is_bfv = _is_bfv()
usage = {'memory_mb': object_or_dict.flavor.memory_mb,
'swap': object_or_dict.flavor.swap,
'vcpus': object_or_dict.flavor.vcpus,
'root_gb': (0 if is_bfv else
object_or_dict.flavor.root_gb),
'ephemeral_gb': object_or_dict.flavor.ephemeral_gb,
'numa_topology': object_or_dict.numa_topology}
elif isinstance(object_or_dict, objects.Flavor):
usage = obj_base.obj_to_primitive(object_or_dict)
if _is_bfv():
usage['root_gb'] = 0
else:
usage.update(object_or_dict)
for key in ('numa_topology',):
if key in updates:
usage[key] = updates[key]
return usage
def build_failed(self, nodename):
self.stats[nodename].build_failed()
def build_succeeded(self, nodename):
self.stats[nodename].build_succeeded()
| true | true |
1c2c4942cd88d34d5c3829e03fcdff03e88a827c | 3,893 | py | Python | pc-compliance-alerts-get.py | sgordon46/pc-toolbox | a00cf2aa41eff683151a7f59ed108eaae0e3b384 | [
"MIT"
] | null | null | null | pc-compliance-alerts-get.py | sgordon46/pc-toolbox | a00cf2aa41eff683151a7f59ed108eaae0e3b384 | [
"MIT"
] | null | null | null | pc-compliance-alerts-get.py | sgordon46/pc-toolbox | a00cf2aa41eff683151a7f59ed108eaae0e3b384 | [
"MIT"
] | null | null | null | from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import argparse
import pc_lib_api
import pc_lib_general
import json
# --Execution Block-- #
# --Parse command line arguments-- #
parser = argparse.ArgumentParser(prog='rltoolbox')
parser.add_argument(
'-u',
'--username',
type=str,
help='*Required* - Prisma Cloud API Access Key ID that you want to set to access your Prisma Cloud account.')
parser.add_argument(
'-p',
'--password',
type=str,
help='*Required* - Prisma Cloud API Secret Key that you want to set to access your Prisma Cloud account.')
parser.add_argument(
'-url',
'--uiurl',
type=str,
help='*Required* - Base URL used in the UI for connecting to Prisma Cloud. '
'Formatted as app.prismacloud.io or app2.prismacloud.io or app.eu.prismacloud.io, etc. '
'You can also input the api version of the URL if you know it and it will be passed through.')
parser.add_argument(
'-y',
'--yes',
action='store_true',
help='(Optional) - Override user input for verification (auto answer for yes).')
parser.add_argument(
'source_compliance_standard_name',
type=str,
help='Name of the compliance standard to filter on. Please enter it exactly as listed in the Prisma Cloud UI')
parser.add_argument(
'source_cloud_account_name',
type=str,
help='Name of the cloud account to filter on. Please enter it exactly as listed in the Prisma Cloud UI')
args = parser.parse_args()
# --End parse command line arguments-- #
# --Main-- #
# Get login details worked out
pc_settings = pc_lib_general.pc_login_get(args.username, args.password, args.uiurl)
# Verification (override with -y)
if not args.yes:
print()
print('Ready to excute commands aginst your Prisma Cloud tenant.')
verification_response = str(input('Would you like to continue (y or yes to continue)?'))
continue_response = {'yes', 'y'}
print()
if verification_response not in continue_response:
pc_lib_general.pc_exit_error(400, 'Verification failed due to user response. Exiting...')
# Sort out API Login
print('API - Getting authentication token...', end='')
pc_settings = pc_lib_api.pc_jwt_get(pc_settings)
print('Done.')
# Get the standard and cloud account name from the command line
compliance_standard_name = args.source_compliance_standard_name
cloud_account_name = args.source_cloud_account_name
# Set some values used in the filter
alert_status = "open"
alert_detail = True
timeRange_type = "to_now"
timeRange_value = "epoch"
# Get the Policies list for the Compliance Standard
print('API - Getting the compliance standard policy list...', end='')
pc_settings, response_package = pc_lib_api.api_compliance_standard_policy_list_get(pc_settings, compliance_standard_name)
compliance_policy_list = response_package['data']
print('Done.')
# Loop through the policy list to collect the related alerts for a given cloud account
alert_list_complete = []
for compliance_policy in compliance_policy_list:
alert_filter = {"detailed": alert_detail,
"timeRange": {"type": timeRange_type, "value": timeRange_value},
"filters": [{"operator": "=", "name": "alert.status", "value": alert_status},
{"operator": "=", "name": "cloud.account", "value": cloud_account_name},
{"name": "policy.id", "operator": "=", "value": compliance_policy['policyId']}]
}
print('API - Getting the alerts for the policy named: ' + compliance_policy['name'] + '...', end='')
pc_settings, response_package = pc_lib_api.api_alert_list_get(pc_settings, data=alert_filter)
alert_list_complete.extend(response_package['data'])
print('Done.')
# Print the resulting data to the console
print()
print(json.dumps(alert_list_complete))
| 35.715596 | 121 | 0.702543 | from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import argparse
import pc_lib_api
import pc_lib_general
import json
parser = argparse.ArgumentParser(prog='rltoolbox')
parser.add_argument(
'-u',
'--username',
type=str,
help='*Required* - Prisma Cloud API Access Key ID that you want to set to access your Prisma Cloud account.')
parser.add_argument(
'-p',
'--password',
type=str,
help='*Required* - Prisma Cloud API Secret Key that you want to set to access your Prisma Cloud account.')
parser.add_argument(
'-url',
'--uiurl',
type=str,
help='*Required* - Base URL used in the UI for connecting to Prisma Cloud. '
'Formatted as app.prismacloud.io or app2.prismacloud.io or app.eu.prismacloud.io, etc. '
'You can also input the api version of the URL if you know it and it will be passed through.')
parser.add_argument(
'-y',
'--yes',
action='store_true',
help='(Optional) - Override user input for verification (auto answer for yes).')
parser.add_argument(
'source_compliance_standard_name',
type=str,
help='Name of the compliance standard to filter on. Please enter it exactly as listed in the Prisma Cloud UI')
parser.add_argument(
'source_cloud_account_name',
type=str,
help='Name of the cloud account to filter on. Please enter it exactly as listed in the Prisma Cloud UI')
args = parser.parse_args()
pc_settings = pc_lib_general.pc_login_get(args.username, args.password, args.uiurl)
if not args.yes:
print()
print('Ready to excute commands aginst your Prisma Cloud tenant.')
verification_response = str(input('Would you like to continue (y or yes to continue)?'))
continue_response = {'yes', 'y'}
print()
if verification_response not in continue_response:
pc_lib_general.pc_exit_error(400, 'Verification failed due to user response. Exiting...')
print('API - Getting authentication token...', end='')
pc_settings = pc_lib_api.pc_jwt_get(pc_settings)
print('Done.')
compliance_standard_name = args.source_compliance_standard_name
cloud_account_name = args.source_cloud_account_name
alert_status = "open"
alert_detail = True
timeRange_type = "to_now"
timeRange_value = "epoch"
print('API - Getting the compliance standard policy list...', end='')
pc_settings, response_package = pc_lib_api.api_compliance_standard_policy_list_get(pc_settings, compliance_standard_name)
compliance_policy_list = response_package['data']
print('Done.')
alert_list_complete = []
for compliance_policy in compliance_policy_list:
alert_filter = {"detailed": alert_detail,
"timeRange": {"type": timeRange_type, "value": timeRange_value},
"filters": [{"operator": "=", "name": "alert.status", "value": alert_status},
{"operator": "=", "name": "cloud.account", "value": cloud_account_name},
{"name": "policy.id", "operator": "=", "value": compliance_policy['policyId']}]
}
print('API - Getting the alerts for the policy named: ' + compliance_policy['name'] + '...', end='')
pc_settings, response_package = pc_lib_api.api_alert_list_get(pc_settings, data=alert_filter)
alert_list_complete.extend(response_package['data'])
print('Done.')
print()
print(json.dumps(alert_list_complete))
| true | true |
1c2c4a54661f935f34012845375fb387b79babf9 | 23,116 | py | Python | neutron/tests/unit/extensions/test_l3_ext_gw_mode.py | deepak-dt/neutron | 5226e50c68084dd3d76861d1247f9f292949b616 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/extensions/test_l3_ext_gw_mode.py | deepak-dt/neutron | 5226e50c68084dd3d76861d1247f9f292949b616 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/extensions/test_l3_ext_gw_mode.py | deepak-dt/neutron | 5226e50c68084dd3d76861d1247f9f292949b616 | [
"Apache-2.0"
] | 1 | 2018-08-28T17:13:16.000Z | 2018-08-28T17:13:16.000Z | # Copyright 2013 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import netaddr
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib import constants
from neutron_lib import context as nctx
from neutron_lib.db import api as db_api
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import testscenarios
from webob import exc
from neutron.common import utils
from neutron.db import l3_db
from neutron.db import l3_gwmode_db
from neutron.db.models import l3 as l3_models
from neutron.extensions import l3
from neutron.objects import network as net_obj
from neutron.objects import ports as port_obj
from neutron.objects import router as l3_obj
from neutron.objects import subnet as subnet_obj
from neutron.tests import base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
FAKE_GW_PORT_ID = _uuid()
FAKE_GW_PORT_MAC = 'aa:bb:cc:dd:ee:ff'
FAKE_FIP_EXT_PORT_ID = _uuid()
FAKE_FIP_EXT_PORT_MAC = '11:22:33:44:55:66'
FAKE_FIP_INT_PORT_ID = _uuid()
FAKE_FIP_INT_PORT_MAC = 'aa:aa:aa:aa:aa:aa'
FAKE_ROUTER_PORT_ID = _uuid()
FAKE_ROUTER_PORT_MAC = 'bb:bb:bb:bb:bb:bb'
class TestExtensionManager(object):
def get_resources(self):
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# A simple class for making a concrete class out of the mixin
# for the case of a plugin that integrates l3 routing.
class TestDbIntPlugin(test_l3.TestL3NatIntPlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ["external-net", "router", "ext-gw-mode"]
# A simple class for making a concrete class out of the mixin
# for the case of a l3 router service plugin
class TestDbSepPlugin(test_l3.TestL3NatServicePlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ["router", "ext-gw-mode"]
class TestGetEnableSnat(testscenarios.WithScenarios, base.BaseTestCase):
scenarios = [
('enabled', {'enable_snat_by_default': True}),
('disabled', {'enable_snat_by_default': False})]
def setUp(self):
super(TestGetEnableSnat, self).setUp()
self.config(enable_snat_by_default=self.enable_snat_by_default)
def _test_get_enable_snat(self, expected, info):
observed = l3_gwmode_db.L3_NAT_dbonly_mixin._get_enable_snat(info)
self.assertEqual(expected, observed)
def test_get_enable_snat_without_gw_info(self):
self._test_get_enable_snat(self.enable_snat_by_default, {})
def test_get_enable_snat_without_enable_snat(self):
info = {'network_id': _uuid()}
self._test_get_enable_snat(self.enable_snat_by_default, info)
def test_get_enable_snat_with_snat_enabled(self):
self._test_get_enable_snat(True, {'enable_snat': True})
def test_get_enable_snat_with_snat_disabled(self):
self._test_get_enable_snat(False, {'enable_snat': False})
class TestL3GwModeMixin(testlib_api.SqlTestCase):
def setUp(self):
super(TestL3GwModeMixin, self).setUp()
plugin = __name__ + '.' + TestDbIntPlugin.__name__
self.setup_coreplugin(plugin)
self.target_object = TestDbIntPlugin()
# Patch the context
ctx_patcher = mock.patch('neutron_lib.context', autospec=True)
mock_context = ctx_patcher.start()
self.context = mock_context.get_admin_context()
# This ensure also calls to elevated work in unit tests
self.context.elevated.return_value = self.context
self.context.session = db_api.get_writer_session()
# Create sample data for tests
self.ext_net_id = _uuid()
self.int_net_id = _uuid()
self.int_sub_id = _uuid()
self.tenant_id = 'the_tenant'
self.network = net_obj.Network(
self.context,
id=self.ext_net_id,
project_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.net_ext = net_obj.ExternalNetwork(
self.context, network_id=self.ext_net_id)
self.network.create()
self.net_ext.create()
self.router = l3_models.Router(
id=_uuid(),
name=None,
tenant_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE,
enable_snat=True,
gw_port_id=None)
self.context.session.add(self.router)
self.context.session.flush()
self.router_gw_port = port_obj.Port(
self.context,
id=FAKE_GW_PORT_ID,
project_id=self.tenant_id,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_GW,
admin_state_up=True,
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_GW_PORT_MAC),
network_id=self.ext_net_id)
self.router_gw_port.create()
self.router.gw_port_id = self.router_gw_port.id
self.context.session.add(self.router)
self.context.session.flush()
self.fip_ext_port = port_obj.Port(
self.context,
id=FAKE_FIP_EXT_PORT_ID,
project_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_FLOATINGIP,
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_FIP_EXT_PORT_MAC),
network_id=self.ext_net_id)
self.fip_ext_port.create()
self.context.session.flush()
self.int_net = net_obj.Network(
self.context,
id=self.int_net_id,
project_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.int_sub = subnet_obj.Subnet(self.context,
id=self.int_sub_id,
project_id=self.tenant_id,
ip_version=4,
cidr=utils.AuthenticIPNetwork('3.3.3.0/24'),
gateway_ip=netaddr.IPAddress('3.3.3.1'),
network_id=self.int_net_id)
self.router_port = port_obj.Port(
self.context,
id=FAKE_ROUTER_PORT_ID,
project_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_ROUTER_PORT_MAC),
network_id=self.int_net_id)
self.router_port_ip_info = port_obj.IPAllocation(self.context,
port_id=self.router_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.1')
self.int_net.create()
self.int_sub.create()
self.router_port.create()
self.router_port_ip_info.create()
self.context.session.flush()
self.fip_int_port = port_obj.Port(
self.context,
id=FAKE_FIP_INT_PORT_ID,
project_id=self.tenant_id,
admin_state_up=True,
device_id='something',
device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova',
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_FIP_INT_PORT_MAC),
network_id=self.int_net_id)
self.fip_int_ip_info = port_obj.IPAllocation(self.context,
port_id=self.fip_int_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.3')
self.fip = l3_obj.FloatingIP(
self.context,
id=_uuid(),
floating_ip_address=netaddr.IPAddress('1.1.1.2'),
floating_network_id=self.ext_net_id,
floating_port_id=FAKE_FIP_EXT_PORT_ID,
fixed_port_id=None,
fixed_ip_address=None,
router_id=None)
self.fip_int_port.create()
self.fip_int_ip_info.create()
self.fip.create()
self.context.session.flush()
self.context.session.expire_all()
self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID,
'tenant_id': self.tenant_id}
def _get_gwports_dict(self, gw_ports):
return dict((gw_port['id'], gw_port)
for gw_port in gw_ports)
def _reset_ext_gw(self):
# Reset external gateway
self.router.gw_port_id = None
self.context.session.add(self.router)
self.context.session.flush()
def _test_update_router_gw(self, current_enable_snat, gw_info=None,
expected_enable_snat=True):
if not current_enable_snat:
previous_gw_info = {'network_id': self.ext_net_id,
'enable_snat': current_enable_snat}
self.target_object._update_router_gw_info(
self.context, self.router.id, previous_gw_info)
self.target_object._update_router_gw_info(
self.context, self.router.id, gw_info)
router = self.target_object._get_router(
self.context, self.router.id)
try:
self.assertEqual(FAKE_GW_PORT_ID,
router.gw_port.id)
self.assertEqual(netaddr.EUI(FAKE_GW_PORT_MAC),
router.gw_port.mac_address)
except AttributeError:
self.assertIsNone(router.gw_port)
self.assertEqual(expected_enable_snat, router.enable_snat)
def test_update_router_gw_with_gw_info_none(self):
self._test_update_router_gw(current_enable_snat=True)
def test_update_router_gw_without_info_and_snat_disabled_previously(self):
self._test_update_router_gw(current_enable_snat=False)
def test_update_router_gw_with_network_only(self):
info = {'network_id': self.ext_net_id}
self._test_update_router_gw(current_enable_snat=True, gw_info=info)
def test_update_router_gw_with_network_and_snat_disabled_previously(self):
info = {'network_id': self.ext_net_id}
self._test_update_router_gw(current_enable_snat=False, gw_info=info)
def test_update_router_gw_with_snat_disabled(self):
info = {'network_id': self.ext_net_id,
'enable_snat': False}
self._test_update_router_gw(
current_enable_snat=True, gw_info=info, expected_enable_snat=False)
def test_update_router_gw_with_snat_enabled(self):
info = {'network_id': self.ext_net_id,
'enable_snat': True}
self._test_update_router_gw(current_enable_snat=False, gw_info=info)
def test_make_router_dict_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
self.assertIsNone(router_dict[l3_apidef.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': True,
'external_fixed_ips': []},
router_dict[l3_apidef.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': False,
'external_fixed_ips': []},
router_dict[l3_apidef.EXTERNAL_GW_INFO])
def test_build_routers_list_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(self.context,
[router_dict],
[])
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNone(router.get('gw_port'))
self.assertIsNone(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict],
self._get_gwports_dict([self.router.gw_port]))
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertTrue(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict],
self._get_gwports_dict([self.router.gw_port]))
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertFalse(router.get('enable_snat'))
def test_build_routers_list_with_gw_port_mismatch(self):
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict], {})
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNone(router.get('gw_port'))
self.assertIsNone(router.get('enable_snat'))
class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin):
def setUp(self, plugin=None, svc_plugins=None, ext_mgr=None):
plugin = plugin or (
'neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
'TestDbIntPlugin')
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
ext_mgr = ext_mgr or TestExtensionManager()
super(ExtGwModeIntTestCase, self).setUp(plugin=plugin,
ext_mgr=ext_mgr,
service_plugins=svc_plugins)
def _set_router_external_gateway(self, router_id, network_id,
snat_enabled=None,
expected_code=exc.HTTPOk.code,
neutron_context=None):
ext_gw_info = {'network_id': network_id}
# Need to set enable_snat also if snat_enabled == False
if snat_enabled is not None:
ext_gw_info['enable_snat'] = snat_enabled
return self._update('routers', router_id,
{'router': {'external_gateway_info':
ext_gw_info}},
expected_code=expected_code,
neutron_context=neutron_context)
def test_router_gateway_set_fail_after_port_create(self):
with self.router() as r, self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
plugin = directory.get_plugin()
with mock.patch.object(plugin, '_get_port',
side_effect=ValueError()):
self._set_router_external_gateway(r['router']['id'],
ext_net_id,
expected_code=500)
ports = [p for p in plugin.get_ports(nctx.get_admin_context())
if p['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_GW]
self.assertFalse(ports)
def test_router_gateway_set_retry(self):
with self.router() as r, self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
with mock.patch.object(
l3_db.L3_NAT_dbonly_mixin, '_validate_gw_info',
side_effect=[db_exc.RetryRequest(None), ext_net_id]):
self._set_router_external_gateway(r['router']['id'],
ext_net_id)
res = self._show('routers', r['router']['id'])['router']
self.assertEqual(ext_net_id,
res['external_gateway_info']['network_id'])
def test_router_create_with_gwinfo_invalid_ext_ip(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
ext_info = {
'network_id': s['subnet']['network_id'],
'external_fixed_ips': [{'ip_address': '10.0.0.'}]
}
error_code = exc.HTTPBadRequest.code
res = self._create_router(
self.fmt, _uuid(), arg_list=('external_gateway_info',),
external_gateway_info=ext_info,
expected_code=error_code
)
msg = ("Invalid input for external_gateway_info. "
"Reason: '10.0.0.' is not a valid IP address.")
body = jsonutils.loads(res.body)
self.assertEqual(msg, body['NeutronError']['message'])
def test_router_create_show_no_ext_gwinfo(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def _test_router_create_show_ext_gwinfo(self, snat_input_value,
snat_expected_value):
name = 'router1'
tenant_id = _uuid()
with self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
input_value = {'network_id': ext_net_id}
if snat_input_value in (True, False):
input_value['enable_snat'] = snat_input_value
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info',
{'network_id': ext_net_id,
'enable_snat': snat_expected_value,
'external_fixed_ips': [{
'ip_address': mock.ANY,
'subnet_id': s['subnet']['id']}]})]
with self.router(
name=name, admin_state_up=True, tenant_id=tenant_id,
external_gateway_info=input_value) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def test_router_create_show_ext_gwinfo_default(self):
self._test_router_create_show_ext_gwinfo(None, True)
def test_router_create_show_ext_gwinfo_with_snat_enabled(self):
self._test_router_create_show_ext_gwinfo(True, True)
def test_router_create_show_ext_gwinfo_with_snat_disabled(self):
self._test_router_create_show_ext_gwinfo(False, False)
def _test_router_update_ext_gwinfo(self, snat_input_value,
snat_expected_value=False,
expected_http_code=exc.HTTPOk.code):
with self.router() as r:
with self.subnet() as s:
try:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
self._set_router_external_gateway(
r['router']['id'], ext_net_id,
snat_enabled=snat_input_value,
expected_code=expected_http_code)
if expected_http_code != exc.HTTPOk.code:
return
body = self._show('routers', r['router']['id'])
res_gw_info = body['router']['external_gateway_info']
self.assertEqual(ext_net_id, res_gw_info['network_id'])
self.assertEqual(snat_expected_value,
res_gw_info['enable_snat'])
finally:
self._remove_external_gateway_from_router(
r['router']['id'], ext_net_id)
def test_router_update_ext_gwinfo_default(self):
self._test_router_update_ext_gwinfo(None, True)
def test_router_update_ext_gwinfo_with_snat_enabled(self):
self._test_router_update_ext_gwinfo(True, True)
def test_router_update_ext_gwinfo_with_snat_disabled(self):
self._test_router_update_ext_gwinfo(False, False)
def test_router_update_ext_gwinfo_with_invalid_snat_setting(self):
self._test_router_update_ext_gwinfo(
'xxx', None, expected_http_code=exc.HTTPBadRequest.code)
class ExtGwModeSepTestCase(ExtGwModeIntTestCase):
def setUp(self, plugin=None):
# Store l3 resource attribute map as it will be updated
self._l3_attribute_map_bk = {}
for item in l3_apidef.RESOURCE_ATTRIBUTE_MAP:
self._l3_attribute_map_bk[item] = (
l3_apidef.RESOURCE_ATTRIBUTE_MAP[item].copy())
plugin = plugin or (
'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin')
# the L3 service plugin
l3_plugin = ('neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
'TestDbSepPlugin')
svc_plugins = {'l3_plugin_name': l3_plugin}
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
super(ExtGwModeSepTestCase, self).setUp(plugin=plugin,
svc_plugins=svc_plugins)
| 43.207477 | 79 | 0.62779 |
import mock
import netaddr
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib import constants
from neutron_lib import context as nctx
from neutron_lib.db import api as db_api
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
import testscenarios
from webob import exc
from neutron.common import utils
from neutron.db import l3_db
from neutron.db import l3_gwmode_db
from neutron.db.models import l3 as l3_models
from neutron.extensions import l3
from neutron.objects import network as net_obj
from neutron.objects import ports as port_obj
from neutron.objects import router as l3_obj
from neutron.objects import subnet as subnet_obj
from neutron.tests import base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
_uuid = uuidutils.generate_uuid
FAKE_GW_PORT_ID = _uuid()
FAKE_GW_PORT_MAC = 'aa:bb:cc:dd:ee:ff'
FAKE_FIP_EXT_PORT_ID = _uuid()
FAKE_FIP_EXT_PORT_MAC = '11:22:33:44:55:66'
FAKE_FIP_INT_PORT_ID = _uuid()
FAKE_FIP_INT_PORT_MAC = 'aa:aa:aa:aa:aa:aa'
FAKE_ROUTER_PORT_ID = _uuid()
FAKE_ROUTER_PORT_MAC = 'bb:bb:bb:bb:bb:bb'
class TestExtensionManager(object):
def get_resources(self):
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestDbIntPlugin(test_l3.TestL3NatIntPlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ["external-net", "router", "ext-gw-mode"]
class TestDbSepPlugin(test_l3.TestL3NatServicePlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ["router", "ext-gw-mode"]
class TestGetEnableSnat(testscenarios.WithScenarios, base.BaseTestCase):
scenarios = [
('enabled', {'enable_snat_by_default': True}),
('disabled', {'enable_snat_by_default': False})]
def setUp(self):
super(TestGetEnableSnat, self).setUp()
self.config(enable_snat_by_default=self.enable_snat_by_default)
def _test_get_enable_snat(self, expected, info):
observed = l3_gwmode_db.L3_NAT_dbonly_mixin._get_enable_snat(info)
self.assertEqual(expected, observed)
def test_get_enable_snat_without_gw_info(self):
self._test_get_enable_snat(self.enable_snat_by_default, {})
def test_get_enable_snat_without_enable_snat(self):
info = {'network_id': _uuid()}
self._test_get_enable_snat(self.enable_snat_by_default, info)
def test_get_enable_snat_with_snat_enabled(self):
self._test_get_enable_snat(True, {'enable_snat': True})
def test_get_enable_snat_with_snat_disabled(self):
self._test_get_enable_snat(False, {'enable_snat': False})
class TestL3GwModeMixin(testlib_api.SqlTestCase):
def setUp(self):
super(TestL3GwModeMixin, self).setUp()
plugin = __name__ + '.' + TestDbIntPlugin.__name__
self.setup_coreplugin(plugin)
self.target_object = TestDbIntPlugin()
ctx_patcher = mock.patch('neutron_lib.context', autospec=True)
mock_context = ctx_patcher.start()
self.context = mock_context.get_admin_context()
self.context.elevated.return_value = self.context
self.context.session = db_api.get_writer_session()
self.ext_net_id = _uuid()
self.int_net_id = _uuid()
self.int_sub_id = _uuid()
self.tenant_id = 'the_tenant'
self.network = net_obj.Network(
self.context,
id=self.ext_net_id,
project_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.net_ext = net_obj.ExternalNetwork(
self.context, network_id=self.ext_net_id)
self.network.create()
self.net_ext.create()
self.router = l3_models.Router(
id=_uuid(),
name=None,
tenant_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE,
enable_snat=True,
gw_port_id=None)
self.context.session.add(self.router)
self.context.session.flush()
self.router_gw_port = port_obj.Port(
self.context,
id=FAKE_GW_PORT_ID,
project_id=self.tenant_id,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_GW,
admin_state_up=True,
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_GW_PORT_MAC),
network_id=self.ext_net_id)
self.router_gw_port.create()
self.router.gw_port_id = self.router_gw_port.id
self.context.session.add(self.router)
self.context.session.flush()
self.fip_ext_port = port_obj.Port(
self.context,
id=FAKE_FIP_EXT_PORT_ID,
project_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_FLOATINGIP,
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_FIP_EXT_PORT_MAC),
network_id=self.ext_net_id)
self.fip_ext_port.create()
self.context.session.flush()
self.int_net = net_obj.Network(
self.context,
id=self.int_net_id,
project_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.int_sub = subnet_obj.Subnet(self.context,
id=self.int_sub_id,
project_id=self.tenant_id,
ip_version=4,
cidr=utils.AuthenticIPNetwork('3.3.3.0/24'),
gateway_ip=netaddr.IPAddress('3.3.3.1'),
network_id=self.int_net_id)
self.router_port = port_obj.Port(
self.context,
id=FAKE_ROUTER_PORT_ID,
project_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_ROUTER_PORT_MAC),
network_id=self.int_net_id)
self.router_port_ip_info = port_obj.IPAllocation(self.context,
port_id=self.router_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.1')
self.int_net.create()
self.int_sub.create()
self.router_port.create()
self.router_port_ip_info.create()
self.context.session.flush()
self.fip_int_port = port_obj.Port(
self.context,
id=FAKE_FIP_INT_PORT_ID,
project_id=self.tenant_id,
admin_state_up=True,
device_id='something',
device_owner=constants.DEVICE_OWNER_COMPUTE_PREFIX + 'nova',
status=constants.PORT_STATUS_ACTIVE,
mac_address=netaddr.EUI(FAKE_FIP_INT_PORT_MAC),
network_id=self.int_net_id)
self.fip_int_ip_info = port_obj.IPAllocation(self.context,
port_id=self.fip_int_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.3')
self.fip = l3_obj.FloatingIP(
self.context,
id=_uuid(),
floating_ip_address=netaddr.IPAddress('1.1.1.2'),
floating_network_id=self.ext_net_id,
floating_port_id=FAKE_FIP_EXT_PORT_ID,
fixed_port_id=None,
fixed_ip_address=None,
router_id=None)
self.fip_int_port.create()
self.fip_int_ip_info.create()
self.fip.create()
self.context.session.flush()
self.context.session.expire_all()
self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID,
'tenant_id': self.tenant_id}
def _get_gwports_dict(self, gw_ports):
return dict((gw_port['id'], gw_port)
for gw_port in gw_ports)
def _reset_ext_gw(self):
self.router.gw_port_id = None
self.context.session.add(self.router)
self.context.session.flush()
def _test_update_router_gw(self, current_enable_snat, gw_info=None,
expected_enable_snat=True):
if not current_enable_snat:
previous_gw_info = {'network_id': self.ext_net_id,
'enable_snat': current_enable_snat}
self.target_object._update_router_gw_info(
self.context, self.router.id, previous_gw_info)
self.target_object._update_router_gw_info(
self.context, self.router.id, gw_info)
router = self.target_object._get_router(
self.context, self.router.id)
try:
self.assertEqual(FAKE_GW_PORT_ID,
router.gw_port.id)
self.assertEqual(netaddr.EUI(FAKE_GW_PORT_MAC),
router.gw_port.mac_address)
except AttributeError:
self.assertIsNone(router.gw_port)
self.assertEqual(expected_enable_snat, router.enable_snat)
def test_update_router_gw_with_gw_info_none(self):
self._test_update_router_gw(current_enable_snat=True)
def test_update_router_gw_without_info_and_snat_disabled_previously(self):
self._test_update_router_gw(current_enable_snat=False)
def test_update_router_gw_with_network_only(self):
info = {'network_id': self.ext_net_id}
self._test_update_router_gw(current_enable_snat=True, gw_info=info)
def test_update_router_gw_with_network_and_snat_disabled_previously(self):
info = {'network_id': self.ext_net_id}
self._test_update_router_gw(current_enable_snat=False, gw_info=info)
def test_update_router_gw_with_snat_disabled(self):
info = {'network_id': self.ext_net_id,
'enable_snat': False}
self._test_update_router_gw(
current_enable_snat=True, gw_info=info, expected_enable_snat=False)
def test_update_router_gw_with_snat_enabled(self):
info = {'network_id': self.ext_net_id,
'enable_snat': True}
self._test_update_router_gw(current_enable_snat=False, gw_info=info)
def test_make_router_dict_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
self.assertIsNone(router_dict[l3_apidef.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': True,
'external_fixed_ips': []},
router_dict[l3_apidef.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': False,
'external_fixed_ips': []},
router_dict[l3_apidef.EXTERNAL_GW_INFO])
def test_build_routers_list_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(self.context,
[router_dict],
[])
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNone(router.get('gw_port'))
self.assertIsNone(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict],
self._get_gwports_dict([self.router.gw_port]))
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertTrue(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict],
self._get_gwports_dict([self.router.gw_port]))
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertFalse(router.get('enable_snat'))
def test_build_routers_list_with_gw_port_mismatch(self):
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
self.context, [router_dict], {})
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNone(router.get('gw_port'))
self.assertIsNone(router.get('enable_snat'))
class ExtGwModeIntTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin):
def setUp(self, plugin=None, svc_plugins=None, ext_mgr=None):
plugin = plugin or (
'neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
'TestDbIntPlugin')
cfg.CONF.set_default('allow_overlapping_ips', True)
ext_mgr = ext_mgr or TestExtensionManager()
super(ExtGwModeIntTestCase, self).setUp(plugin=plugin,
ext_mgr=ext_mgr,
service_plugins=svc_plugins)
def _set_router_external_gateway(self, router_id, network_id,
snat_enabled=None,
expected_code=exc.HTTPOk.code,
neutron_context=None):
ext_gw_info = {'network_id': network_id}
if snat_enabled is not None:
ext_gw_info['enable_snat'] = snat_enabled
return self._update('routers', router_id,
{'router': {'external_gateway_info':
ext_gw_info}},
expected_code=expected_code,
neutron_context=neutron_context)
def test_router_gateway_set_fail_after_port_create(self):
with self.router() as r, self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
plugin = directory.get_plugin()
with mock.patch.object(plugin, '_get_port',
side_effect=ValueError()):
self._set_router_external_gateway(r['router']['id'],
ext_net_id,
expected_code=500)
ports = [p for p in plugin.get_ports(nctx.get_admin_context())
if p['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_GW]
self.assertFalse(ports)
def test_router_gateway_set_retry(self):
with self.router() as r, self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
with mock.patch.object(
l3_db.L3_NAT_dbonly_mixin, '_validate_gw_info',
side_effect=[db_exc.RetryRequest(None), ext_net_id]):
self._set_router_external_gateway(r['router']['id'],
ext_net_id)
res = self._show('routers', r['router']['id'])['router']
self.assertEqual(ext_net_id,
res['external_gateway_info']['network_id'])
def test_router_create_with_gwinfo_invalid_ext_ip(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
ext_info = {
'network_id': s['subnet']['network_id'],
'external_fixed_ips': [{'ip_address': '10.0.0.'}]
}
error_code = exc.HTTPBadRequest.code
res = self._create_router(
self.fmt, _uuid(), arg_list=('external_gateway_info',),
external_gateway_info=ext_info,
expected_code=error_code
)
msg = ("Invalid input for external_gateway_info. "
"Reason: '10.0.0.' is not a valid IP address.")
body = jsonutils.loads(res.body)
self.assertEqual(msg, body['NeutronError']['message'])
def test_router_create_show_no_ext_gwinfo(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def _test_router_create_show_ext_gwinfo(self, snat_input_value,
snat_expected_value):
name = 'router1'
tenant_id = _uuid()
with self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
input_value = {'network_id': ext_net_id}
if snat_input_value in (True, False):
input_value['enable_snat'] = snat_input_value
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info',
{'network_id': ext_net_id,
'enable_snat': snat_expected_value,
'external_fixed_ips': [{
'ip_address': mock.ANY,
'subnet_id': s['subnet']['id']}]})]
with self.router(
name=name, admin_state_up=True, tenant_id=tenant_id,
external_gateway_info=input_value) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def test_router_create_show_ext_gwinfo_default(self):
self._test_router_create_show_ext_gwinfo(None, True)
def test_router_create_show_ext_gwinfo_with_snat_enabled(self):
self._test_router_create_show_ext_gwinfo(True, True)
def test_router_create_show_ext_gwinfo_with_snat_disabled(self):
self._test_router_create_show_ext_gwinfo(False, False)
def _test_router_update_ext_gwinfo(self, snat_input_value,
snat_expected_value=False,
expected_http_code=exc.HTTPOk.code):
with self.router() as r:
with self.subnet() as s:
try:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
self._set_router_external_gateway(
r['router']['id'], ext_net_id,
snat_enabled=snat_input_value,
expected_code=expected_http_code)
if expected_http_code != exc.HTTPOk.code:
return
body = self._show('routers', r['router']['id'])
res_gw_info = body['router']['external_gateway_info']
self.assertEqual(ext_net_id, res_gw_info['network_id'])
self.assertEqual(snat_expected_value,
res_gw_info['enable_snat'])
finally:
self._remove_external_gateway_from_router(
r['router']['id'], ext_net_id)
def test_router_update_ext_gwinfo_default(self):
self._test_router_update_ext_gwinfo(None, True)
def test_router_update_ext_gwinfo_with_snat_enabled(self):
self._test_router_update_ext_gwinfo(True, True)
def test_router_update_ext_gwinfo_with_snat_disabled(self):
self._test_router_update_ext_gwinfo(False, False)
def test_router_update_ext_gwinfo_with_invalid_snat_setting(self):
self._test_router_update_ext_gwinfo(
'xxx', None, expected_http_code=exc.HTTPBadRequest.code)
class ExtGwModeSepTestCase(ExtGwModeIntTestCase):
def setUp(self, plugin=None):
self._l3_attribute_map_bk = {}
for item in l3_apidef.RESOURCE_ATTRIBUTE_MAP:
self._l3_attribute_map_bk[item] = (
l3_apidef.RESOURCE_ATTRIBUTE_MAP[item].copy())
plugin = plugin or (
'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin')
l3_plugin = ('neutron.tests.unit.extensions.test_l3_ext_gw_mode.'
'TestDbSepPlugin')
svc_plugins = {'l3_plugin_name': l3_plugin}
cfg.CONF.set_default('allow_overlapping_ips', True)
super(ExtGwModeSepTestCase, self).setUp(plugin=plugin,
svc_plugins=svc_plugins)
| true | true |
1c2c4b18f9235e6e3de66a3d2437f9377283e169 | 545 | py | Python | bumblebee_status/modules/core/time.py | rosalogia/bumblebee-status | 19c3975301d8700743df745ecd5ca2c05ecf5cf0 | [
"MIT"
] | null | null | null | bumblebee_status/modules/core/time.py | rosalogia/bumblebee-status | 19c3975301d8700743df745ecd5ca2c05ecf5cf0 | [
"MIT"
] | null | null | null | bumblebee_status/modules/core/time.py | rosalogia/bumblebee-status | 19c3975301d8700743df745ecd5ca2c05ecf5cf0 | [
"MIT"
] | 1 | 2020-10-22T09:23:54.000Z | 2020-10-22T09:23:54.000Z | # pylint: disable=C0111,R0903
"""Displays the current date and time.
Parameters:
* time.format: strftime()-compatible formatting string
* time.locale: locale to use rather than the system default
"""
import core.decorators
from .datetime import Module
class Module(Module):
@core.decorators.every(seconds=59) # ensures one update per minute
def __init__(self, config, theme):
super().__init__(config, theme)
def default_format(self):
return "%X"
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 22.708333 | 71 | 0.711927 |
import core.decorators
from .datetime import Module
class Module(Module):
@core.decorators.every(seconds=59)
def __init__(self, config, theme):
super().__init__(config, theme)
def default_format(self):
return "%X"
| true | true |
1c2c4c4d7fd9e44abbf1d439a0919ddbed850e12 | 9,329 | py | Python | stretch_render.py | CrunchyDuck/StretchRender | 47d5bf2b34d2bcd6ec2da1317f30ac5e05044add | [
"MIT"
] | null | null | null | stretch_render.py | CrunchyDuck/StretchRender | 47d5bf2b34d2bcd6ec2da1317f30ac5e05044add | [
"MIT"
] | null | null | null | stretch_render.py | CrunchyDuck/StretchRender | 47d5bf2b34d2bcd6ec2da1317f30ac5e05044add | [
"MIT"
] | null | null | null | import bpy
from bpy.types import (Panel, Operator)
import os
os.system("cls") # Clear console
bl_info = {
"name": "Stretch Render",
"description": "Alternative rendering compression method.",
"author": "CrunchyDuck <realvulpes@gmail.com>",
"version": (0, 1, 1),
"blender": (2, 83, 0),
"category": "Render",
"location": "Operator Search"
}
# Thanks to Arix for guiding and motivating me. Visit his cute website at arix.cc where he talks about cool stuff he's been doing.
# I'm quite new to python (This is but my second project so far), so please give me some critique with examples if you look through my code! You can contact me on Discord as CrunchyDuck#2278
# On top of this, I am also quite new to animation in Blender. There's many things like NLA that I have never even touched, so there's a high chance I've missed something I need to store. Please alert me if I have!
class RENDER_OT_stretch_render(Operator):
"""please don't crash"""
bl_idname = "render.stretch_render"
bl_label = "Stretch Render"
def execute(self, context):
print("-------------------------------\nEXECUTING STRETCH RENDER")
target_scene = bpy.context.scene
start_frame = target_scene.frame_start
end_frame = target_scene.frame_end
total_frames = end_frame - start_frame
target_layer = bpy.context.view_layer
layer_collection_master = target_layer.layer_collection # Get the "main" layer collection. Note that a layer collection is a wrapper that goes around a collection.
children_layers = layer_collection_master.children
active_actions = [] # A list of all actions This should collect a directory of all animations.
# So for now, I'm going to ignore any animations that AREN'T tied to an object, E.G animations on the scene. As of right now, I can't think of any situations where these changes would actually change how the screen *looks*.
empty_frames = [i for i in range(start_frame, end_frame + 1)] # This will store a list of all frames in the animation. We will then compare our animations to see which frames aren't empty. By the end of it, we will have an array containing only the unreserved frames.
render_frames = empty_frames.copy()
empty_frames.pop(0) # Since we'll always need to render the very first frame of an animation, we can remove it immediately.
# Search through all layers in this scene for any activated actions.
for i in children_layers:
layer_actions = self.parse_layer(i)
active_actions = active_actions + layer_actions
empty_frames = self.parse_actions(active_actions, empty_frames)
render_frames = list(set(render_frames) - set(empty_frames)) # The frames we want to render are just the inverse of the frames we don't want to render.
self.output(empty_frames, total_frames) # Print out the report of the render.
return {"FINISHED"}
def parse_layer(self, layer_wrapper):
"""Search through the given layer and store any objects found that're visible."""
collection = layer_wrapper.collection # The collection this wrapper contains.
layer_collection_children = layer_wrapper.children # Index any layers within this object.
actions = [] # We'll return this so it can be parsed outside of this function.
# Check objects within this layer.
if layer_wrapper.is_visible: # If the layer is set to be invisible, then none of the OBJECTS inside of it can render. Other layers contained may still render, however.
collection_objects = collection.objects
# Cycle through all objects stored within this collection.
for object in collection_objects:
if object is None: # Not sure if this can happen, but might as well be safe.
continue
if object.hide_render: # If the object is invisible, then move on to the next object.
continue
# Check the object for animation
object_animation = object.animation_data
if object_animation is not None: # Does this object have any animation at all?
object_action = object_animation.action
if object_action is not None: # If this object doesn't have an action, ignore it.
actions.append(object_action)
# Check the object's data block for animation.
if object.data is not None:
data_animation = object.data.animation_data # I'm not sure on all of the locations animation data can be stored, but I've found instances of it being stored in the data of an object.
if data_animation is not None: # Does this object have any animation at all?
data_action = data_animation.action
if data_action is not None: # If this object doesn't have an action, ignore it.
actions.append(data_action)
for i in layer_collection_children:
actions = actions + self.parse_layer(i)
return(actions)
def output(self, empty_frames, total_frames):
"""Report the results of stretch render."""
num_removed = len(empty_frames)
percent_removed = round((num_removed / total_frames) * 100, 2)
print("Number of frames free frames: {0}".format(num_removed))
print("Percent of animation removed: {0}%".format(percent_removed))
def parse_actions(self, actions_array, blank_array):
"""Search through all of the provided actions."""
empty_frames = blank_array
# Search through the F-curves of the active actions and figure out where their "empty" space is, then index that empty space to compare against other f-curves
for action in actions_array: # Maybe I should check if f_curves and keyframe are None?
for fcurve in action.fcurves:
number_of_keyframes = len(fcurve.keyframe_points)
for iterator, keyframe in enumerate(fcurve.keyframe_points):
keyframe_frame = int(keyframe.co[0])
interp = keyframe.interpolation # If the interpolation is anything besides constant, that means all frames between this one and the next are reserved.
curr_y = keyframe.co[1] # The y position of this keyframe. If it matches another keyframe, it and all frames between should be the same.
if iterator < number_of_keyframes - 1: # Only try to check the next keyframe if we're not at the end of the array, so we don't get indexing issues.
next_keyframe = fcurve.keyframe_points[iterator + 1]
next_keyframe_frame = int(next_keyframe.co[0])
next_y = next_keyframe.co[1] # Y position for the next keyframe. If this matches the current y position, then the frames are the same and interpolation shouldn't matter.
if not (curr_y is next_y):
if interp != "CONSTANT": # If the interpolation isn't constant, we'll render all frames after this keyframe until the next keyframe.
frames_to_render = [x for x in range(keyframe_frame + 1, next_keyframe_frame + 1)]
print("Keyframe on action {0}, curve {1}, frame {2} is animated without CONSTANT interpolation.".format(action.name, fcurve.data_path, str(keyframe_frame))) # I want to output these to a file in the future so that people can see what they might not have set up properly.
else: # If it is set to constant, we only need to mark the next frame for change.
frames_to_render = [next_keyframe_frame]
empty_frames = list(set(empty_frames) - set(frames_to_render))
# A little note on optimization as of writing this. I need to find the difference between two arrays: One array which will contain all numbers from start to end, and another array which will contain random numbers within.
# There seems to be ten dozen ways to do this, using sets, removing individual numbers, using the numpy.unique function. As of right now, I have no way of benchmarking these functions and do not know which is the fastest.
return empty_frames
class VIEW3D_PT_stretch_render(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Stretch Render"
bl_label = "Path Select"
def draw(self, context):
layout = self.layout
col = layout.column(align=True)
col.prop(context.scene, "stretch_render_location")
layout.operator("render.stretch_render")
blender_classes = [
VIEW3D_PT_stretch_render,
RENDER_OT_stretch_render
]
def register():
bpy.types.Scene.stretch_render_location = bpy.props.StringProperty(
name=".fox Output",
subtype="DIR_PATH",
default="//",
description="Where the file containing the duration of each frame will be output, to be used in decompression."
)
for blend_class in blender_classes:
bpy.utils.register_class(blend_class)
def unregister():
del bpy.types.Scene.stretch_render_location
for blend_class in blender_classes:
bpy.utils.unregister_class(blend_class)
if __name__ == "__main__":
register()
# CONSIDERATIONS:
# Absolutely warn the user that they need to create a duplicate blender file to use this, otherwise I'm going to squish all of their animations.
# I will have to figure out how to get this to work when you have physics animations within your scene. Likely, this will work based off of them being activated/deactivated, but I will need to test further. (Maybe force the user to bake it?)
# Speed, I am almost certainly not going to be able to optimize this on my own, and I will need to see if speed of my operations is a major concern.
# Check to make sure that this works with more than one scene.
# I should allow my code to "print" a log that specifies the location of any non-constant animations.
# Add checks for animations that are not tied to objects (E.G grease pencil animation)
| 50.427027 | 279 | 0.748526 | import bpy
from bpy.types import (Panel, Operator)
import os
os.system("cls")
bl_info = {
"name": "Stretch Render",
"description": "Alternative rendering compression method.",
"author": "CrunchyDuck <realvulpes@gmail.com>",
"version": (0, 1, 1),
"blender": (2, 83, 0),
"category": "Render",
"location": "Operator Search"
}
# I'm quite new to python (This is but my second project so far), so please give me some critique with examples if you look through my code! You can contact me on Discord as CrunchyDuck
class RENDER_OT_stretch_render(Operator):
bl_idname = "render.stretch_render"
bl_label = "Stretch Render"
def execute(self, context):
print("-------------------------------\nEXECUTING STRETCH RENDER")
target_scene = bpy.context.scene
start_frame = target_scene.frame_start
end_frame = target_scene.frame_end
total_frames = end_frame - start_frame
target_layer = bpy.context.view_layer
layer_collection_master = target_layer.layer_collection # Get the "main" layer collection. Note that a layer collection is a wrapper that goes around a collection.
children_layers = layer_collection_master.children
active_actions = [] # A list of all actions This should collect a directory of all animations.
# So for now, I'm going to ignore any animations that AREN'T tied to an object, E.G animations on the scene. As of right now, I can't think of any situations where these changes would actually change how the screen *looks*.
empty_frames = [i for i in range(start_frame, end_frame + 1)]
render_frames = empty_frames.copy()
empty_frames.pop(0) # Since we'll always need to render the very first frame of an animation, we can remove it immediately.
for i in children_layers:
layer_actions = self.parse_layer(i)
active_actions = active_actions + layer_actions
empty_frames = self.parse_actions(active_actions, empty_frames)
render_frames = list(set(render_frames) - set(empty_frames))
self.output(empty_frames, total_frames) # Print out the report of the render.
return {"FINISHED"}
def parse_layer(self, layer_wrapper):
collection = layer_wrapper.collection # The collection this wrapper contains.
layer_collection_children = layer_wrapper.children # Index any layers within this object.
actions = [] # We'll return this so it can be parsed outside of this function.
if layer_wrapper.is_visible:
collection_objects = collection.objects
for object in collection_objects:
if object is None:
continue
if object.hide_render:
continue
object_animation = object.animation_data
if object_animation is not None:
object_action = object_animation.action
if object_action is not None:
actions.append(object_action)
# Check the object's data block for animation.
if object.data is not None:
data_animation = object.data.animation_data
if data_animation is not None:
data_action = data_animation.action
if data_action is not None:
actions.append(data_action)
for i in layer_collection_children:
actions = actions + self.parse_layer(i)
return(actions)
def output(self, empty_frames, total_frames):
num_removed = len(empty_frames)
percent_removed = round((num_removed / total_frames) * 100, 2)
print("Number of frames free frames: {0}".format(num_removed))
print("Percent of animation removed: {0}%".format(percent_removed))
def parse_actions(self, actions_array, blank_array):
empty_frames = blank_array
# Search through the F-curves of the active actions and figure out where their "empty" space is, then index that empty space to compare against other f-curves
for action in actions_array: # Maybe I should check if f_curves and keyframe are None?
for fcurve in action.fcurves:
number_of_keyframes = len(fcurve.keyframe_points)
for iterator, keyframe in enumerate(fcurve.keyframe_points):
keyframe_frame = int(keyframe.co[0])
interp = keyframe.interpolation # If the interpolation is anything besides constant, that means all frames between this one and the next are reserved.
curr_y = keyframe.co[1] # The y position of this keyframe. If it matches another keyframe, it and all frames between should be the same.
if iterator < number_of_keyframes - 1: # Only try to check the next keyframe if we're not at the end of the array, so we don't get indexing issues.
next_keyframe = fcurve.keyframe_points[iterator + 1]
next_keyframe_frame = int(next_keyframe.co[0])
next_y = next_keyframe.co[1] # Y position for the next keyframe. If this matches the current y position, then the frames are the same and interpolation shouldn't matter.
if not (curr_y is next_y):
if interp != "CONSTANT":
frames_to_render = [x for x in range(keyframe_frame + 1, next_keyframe_frame + 1)]
print("Keyframe on action {0}, curve {1}, frame {2} is animated without CONSTANT interpolation.".format(action.name, fcurve.data_path, str(keyframe_frame)))
else:
frames_to_render = [next_keyframe_frame]
empty_frames = list(set(empty_frames) - set(frames_to_render))
return empty_frames
class VIEW3D_PT_stretch_render(Panel):
bl_space_type = "VIEW_3D"
bl_region_type = "UI"
bl_category = "Stretch Render"
bl_label = "Path Select"
def draw(self, context):
layout = self.layout
col = layout.column(align=True)
col.prop(context.scene, "stretch_render_location")
layout.operator("render.stretch_render")
blender_classes = [
VIEW3D_PT_stretch_render,
RENDER_OT_stretch_render
]
def register():
bpy.types.Scene.stretch_render_location = bpy.props.StringProperty(
name=".fox Output",
subtype="DIR_PATH",
default="//",
description="Where the file containing the duration of each frame will be output, to be used in decompression."
)
for blend_class in blender_classes:
bpy.utils.register_class(blend_class)
def unregister():
del bpy.types.Scene.stretch_render_location
for blend_class in blender_classes:
bpy.utils.unregister_class(blend_class)
if __name__ == "__main__":
register()
# I will have to figure out how to get this to work when you have physics animations within your scene. Likely, this will work based off of them being activated/deactivated, but I will need to test further. (Maybe force the user to bake it?)
# Speed, I am almost certainly not going to be able to optimize this on my own, and I will need to see if speed of my operations is a major concern.
# Check to make sure that this works with more than one scene.
# I should allow my code to "print" a log that specifies the location of any non-constant animations.
# Add checks for animations that are not tied to objects (E.G grease pencil animation)
| true | true |
1c2c4c5c9c96cc163def26125e1d56f2cde83a4f | 1,522 | py | Python | fontFeatures/feeLib/Conditional.py | m4rc1e/fontFeatures | 522f60ac632ab0a087477cfb57e23a96c33aee99 | [
"BSD-3-Clause"
] | null | null | null | fontFeatures/feeLib/Conditional.py | m4rc1e/fontFeatures | 522f60ac632ab0a087477cfb57e23a96c33aee99 | [
"BSD-3-Clause"
] | null | null | null | fontFeatures/feeLib/Conditional.py | m4rc1e/fontFeatures | 522f60ac632ab0a087477cfb57e23a96c33aee99 | [
"BSD-3-Clause"
] | null | null | null | """
Conditional
===========
Rules can be applied conditionally using the `If` statement. These will make
more sense when you can define variables.
Examples::
If $dosub {
Substitute a -> b;
}
"""
import fontFeatures
GRAMMAR = """
If_Args = boolean_condition:c wsc '{' wsc statement+:s wsc '}' -> (c,s)
boolean_condition = or | and | boolean_term
boolean_term = bracketed | not | boolean_factor
boolean_factor = integer:l ws comparison?:r -> parser.plugin_classes["If"].docomparison(l,r)
comparison = ('>='|'>'|'<='|'<'|'=='|'!='):cmp ws integer:r -> (cmp,r)
not = "not" ws boolean_condition:b -> not bool(b)
bracketed = "(" ws boolean_condition:b ws ")" -> b
or = boolean_term:a ws "or" ws boolean_term:b -> (a or b)
and = boolean_term:a ws "and" ws boolean_term:b -> (a and b)
"""
VERBS = ["If"]
class If:
@classmethod
def action(self, parser, condition, statements):
if bool(condition):
return parser.filterResults(statements)
else:
return []
@classmethod
def docomparison(self, l,r):
if not r:
return bool(l)
left,operator, right = l,r[0],r[1]
if operator == "<":
return left < right
if operator == "<=":
return left < right
if operator == "==":
return left == right
if operator == ">":
return left > right
if operator == ">=":
return left >= right
if operator == "!=":
return left != right
| 26.701754 | 92 | 0.567674 |
import fontFeatures
GRAMMAR = """
If_Args = boolean_condition:c wsc '{' wsc statement+:s wsc '}' -> (c,s)
boolean_condition = or | and | boolean_term
boolean_term = bracketed | not | boolean_factor
boolean_factor = integer:l ws comparison?:r -> parser.plugin_classes["If"].docomparison(l,r)
comparison = ('>='|'>'|'<='|'<'|'=='|'!='):cmp ws integer:r -> (cmp,r)
not = "not" ws boolean_condition:b -> not bool(b)
bracketed = "(" ws boolean_condition:b ws ")" -> b
or = boolean_term:a ws "or" ws boolean_term:b -> (a or b)
and = boolean_term:a ws "and" ws boolean_term:b -> (a and b)
"""
VERBS = ["If"]
class If:
@classmethod
def action(self, parser, condition, statements):
if bool(condition):
return parser.filterResults(statements)
else:
return []
@classmethod
def docomparison(self, l,r):
if not r:
return bool(l)
left,operator, right = l,r[0],r[1]
if operator == "<":
return left < right
if operator == "<=":
return left < right
if operator == "==":
return left == right
if operator == ">":
return left > right
if operator == ">=":
return left >= right
if operator == "!=":
return left != right
| true | true |
1c2c4d37d63ffae4463d667f463fb1de61cc518f | 2,692 | py | Python | tests/models/validators/v2_1_2/jsd_6db9292d4f28a26b.py | nonstdout/dnacentersdk | dbbbc4baa5300aa9e5c9193f2ea71438018095f5 | [
"MIT"
] | null | null | null | tests/models/validators/v2_1_2/jsd_6db9292d4f28a26b.py | nonstdout/dnacentersdk | dbbbc4baa5300aa9e5c9193f2ea71438018095f5 | [
"MIT"
] | null | null | null | tests/models/validators/v2_1_2/jsd_6db9292d4f28a26b.py | nonstdout/dnacentersdk | dbbbc4baa5300aa9e5c9193f2ea71438018095f5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""DNA Center Add Fabric data model.
Copyright (c) 2019-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidator6Db9292D4F28A26B(object):
"""Add Fabric request schema definition."""
def __init__(self):
super(JSONSchemaValidator6Db9292D4F28A26B, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"description":
{
"description":
"Description",
"type": [
"string",
"null"
]
},
"executionStatusUrl": {
"description":
"Execution Status Url",
"type": [
"string",
"null"
]
},
"status": {
"description":
"Status",
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 32.047619 | 78 | 0.591382 |
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidator6Db9292D4F28A26B(object):
def __init__(self):
super(JSONSchemaValidator6Db9292D4F28A26B, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"description":
{
"description":
"Description",
"type": [
"string",
"null"
]
},
"executionStatusUrl": {
"description":
"Execution Status Url",
"type": [
"string",
"null"
]
},
"status": {
"description":
"Status",
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| true | true |
1c2c4d5411086ef5f379dfac412a371890acce06 | 18,350 | py | Python | selfdrive/car/toyota/interface.py | iamlukewang/openpilot | d44f8c747b0c8764da8b84f208e40abf6e6b712a | [
"MIT"
] | 1 | 2021-04-29T18:40:03.000Z | 2021-04-29T18:40:03.000Z | selfdrive/car/toyota/interface.py | iamlukewang/openpilot | d44f8c747b0c8764da8b84f208e40abf6e6b712a | [
"MIT"
] | null | null | null | selfdrive/car/toyota/interface.py | iamlukewang/openpilot | d44f8c747b0c8764da8b84f208e40abf6e6b712a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from cereal import car
from selfdrive.config import Conversions as CV
from selfdrive.car.toyota.values import Ecu, ECU_FINGERPRINT, CAR, TSS2_CAR, FINGERPRINTS
from selfdrive.car import STD_CARGO_KG, scale_rot_inertia, scale_tire_stiffness, is_ecu_disconnected, gen_empty_fingerprint
from selfdrive.swaglog import cloudlog
from selfdrive.car.interfaces import CarInterfaceBase
from common.op_params import opParams
op_params = opParams()
use_lqr = op_params.get('use_lqr')
prius_use_pid = op_params.get('prius_use_pid')
corollaTSS2_use_indi = op_params.get('corollaTSS2_use_indi')
EventName = car.CarEvent.EventName
class CarInterface(CarInterfaceBase):
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 3.0
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), has_relay=False, car_fw=[]): # pylint: disable=dangerous-default-value
ret = CarInterfaceBase.get_std_params(candidate, fingerprint, has_relay)
ret.carName = "toyota"
ret.safetyModel = car.CarParams.SafetyModel.toyota
ret.steerActuatorDelay = 0.12 # Default delay, Prius has larger delay
ret.steerLimitTimer = 0.4
ret.hasZss = 0x23 in fingerprint[0] # Detect whether car has accurate ZSS
CARS_NOT_PID = [CAR.RAV4, CAR.RAV4H]
if not prius_use_pid:
CARS_NOT_PID.append(CAR.PRIUS_2020)
CARS_NOT_PID.append(CAR.PRIUS)
if candidate not in CARS_NOT_PID and not use_lqr: # These cars use LQR/INDI
ret.lateralTuning.init('pid')
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0.], [0.]]
ret.lateralTuning.pid.kdBP, ret.lateralTuning.pid.kdV = [[0.], [0.]]
if candidate == CAR.PRIUS:
stop_and_go = True
ret.safetyParam = 50 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.70
ret.steerRatio = 13.4 # unknown end-to-end spec
tire_stiffness_factor = 0.6371 # hand-tune
ret.mass = 3045. * CV.LB_TO_KG + STD_CARGO_KG
if prius_use_pid:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.38], [0.02]] # todo: parametertize by zss
ret.lateralTuning.pid.kdV = [0.85]
ret.lateralTuning.pid.kf = 0.000068 # full torque for 20 deg at 80mph means 0.00007818594
else:
ret.lateralTuning.init('indi')
ret.lateralTuning.indi.innerLoopGain = 4.0
ret.lateralTuning.indi.outerLoopGainBP = [0]
ret.lateralTuning.indi.outerLoopGainV = [3.0]
ret.lateralTuning.indi.timeConstant = 0.1 if ret.hasZss else 1.0
ret.lateralTuning.indi.actuatorEffectiveness = 1.0
ret.steerActuatorDelay = 0.5
elif candidate == CAR.PRIUS_2020:
stop_and_go = True
ret.safetyParam = 54
ret.wheelbase = 2.6924
ret.steerRatio = 13.4 # unknown end-to-end spec
ret.steerActuatorDelay = 0.55
tire_stiffness_factor = 0.6371 # hand-tune
ret.mass = 3115. * CV.LB_TO_KG + STD_CARGO_KG
if prius_use_pid:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.36], [0.1]]
ret.lateralTuning.pid.kdV = [2.] # corolla D times gain in PI values
ret.lateralTuning.pid.kf = 0.00007818594
else:
ret.lateralTuning.init('indi')
ret.lateralTuning.indi.innerLoopGain = 4.0
ret.lateralTuning.indi.outerLoopGainBP = [0]
ret.lateralTuning.indi.outerLoopGainV = [3.0]
ret.lateralTuning.indi.timeConstant = 0.1 if ret.hasZss else 1.0
ret.lateralTuning.indi.actuatorEffectiveness = 1.0
elif candidate in [CAR.RAV4, CAR.RAV4H]:
stop_and_go = True if (candidate in CAR.RAV4H) else False
ret.safetyParam = 73
ret.wheelbase = 2.65
ret.steerRatio = 16.88 # 14.5 is spec end-to-end
tire_stiffness_factor = 0.5533
ret.mass = 3650. * CV.LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid
ret.lateralTuning.init('lqr')
ret.lateralTuning.lqr.scale = 1500.0
ret.lateralTuning.lqr.ki = 0.05
ret.lateralTuning.lqr.a = [0., 1., -0.22619643, 1.21822268]
ret.lateralTuning.lqr.b = [-1.92006585e-04, 3.95603032e-05]
ret.lateralTuning.lqr.c = [1., 0.]
ret.lateralTuning.lqr.k = [-110.73572306, 451.22718255]
ret.lateralTuning.lqr.l = [0.3233671, 0.3185757]
ret.lateralTuning.lqr.dcGain = 0.002237852961363602
elif candidate == CAR.COROLLA:
stop_and_go = False
ret.safetyParam = 90
ret.wheelbase = 2.70
ret.steerRatio = 17.8
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 2860. * CV.LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.15], [0.035]]
ret.lateralTuning.pid.kdV = [0.75]
ret.lateralTuning.pid.kf = 0.000084 # full torque for 20 deg at 80mph means 0.00007818594
elif candidate == CAR.LEXUS_RX:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 14.8
tire_stiffness_factor = 0.5533
ret.mass = 4387. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.05]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.LEXUS_RXH:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 16. # 14.8 is spec end-to-end
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 4481. * CV.LB_TO_KG + STD_CARGO_KG # mean between min and max
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006 # full torque for 10 deg at 80mph means 0.00007818594
elif candidate == CAR.LEXUS_RX_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 14.8
tire_stiffness_factor = 0.5533 # not optimized yet
ret.mass = 4387. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.LEXUS_RXH_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 16.0 # 14.8 is spec end-to-end
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 4481.0 * CV.LB_TO_KG + STD_CARGO_KG # mean between min and max
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.15]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate in [CAR.CHR, CAR.CHRH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.63906
ret.steerRatio = 13.6
tire_stiffness_factor = 0.7933
ret.mass = 3300. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.723], [0.0428]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate in [CAR.CAMRY, CAR.CAMRYH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.82448
ret.steerRatio = 13.7
tire_stiffness_factor = 0.7933
ret.mass = 3400. * CV.LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate in [CAR.HIGHLANDER_TSS2, CAR.HIGHLANDERH_TSS2]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.84988 # 112.2 in = 2.84988 m
ret.steerRatio = 16.0
tire_stiffness_factor = 0.8
ret.mass = 4700. * CV.LB_TO_KG + STD_CARGO_KG # 4260 + 4-5 people
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.18], [0.015]] # community tuning
ret.lateralTuning.pid.kf = 0.00012 # community tuning
elif candidate in [CAR.HIGHLANDER, CAR.HIGHLANDERH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.78
ret.steerRatio = 16.0
tire_stiffness_factor = 0.8
ret.mass = 4607. * CV.LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid limited
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.18], [0.015]] # community tuning
ret.lateralTuning.pid.kf = 0.00012 # community tuning
elif candidate == CAR.AVALON:
stop_and_go = False
ret.safetyParam = 73
ret.wheelbase = 2.82
ret.steerRatio = 14.8 # Found at https://pressroom.toyota.com/releases/2016+avalon+product+specs.download
tire_stiffness_factor = 0.7983
ret.mass = 3505. * CV.LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.17], [0.03]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.RAV4_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.68986
ret.steerRatio = 14.3
tire_stiffness_factor = 0.7933
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.15], [0.05]]
ret.lateralTuning.pid.kdV = [0.68]
ret.mass = 3370. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kf = 0.00004
for fw in car_fw:
if fw.ecu == "eps" and fw.fwVersion == b"8965B42170\x00\x00\x00\x00\x00\x00":
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
break
elif candidate == CAR.RAV4H_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.68986
ret.steerRatio = 14.3
tire_stiffness_factor = 0.7933
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.15], [0.05]]
ret.lateralTuning.pid.kdV = [0.68]
ret.mass = 3800. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kf = 0.00004
for fw in car_fw:
if fw.ecu == "eps" and fw.fwVersion == b"8965B42170\x00\x00\x00\x00\x00\x00":
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
break
elif candidate in [CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.63906
ret.steerRatio = 13.9
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 3060. * CV.LB_TO_KG + STD_CARGO_KG
if corollaTSS2_use_indi: # birdman6450#7399's Corolla 2020 TSS2 Tune (PR thanks to majorwolf)
ret.lateralTuning.init('indi')
ret.steerRatio = 15.33
tire_stiffness_factor = 0.996
ret.lateralTuning.indi.outerLoopGainBP = [20, 21]
ret.lateralTuning.indi.outerLoopGainV = [6, 15]
ret.lateralTuning.indi.innerLoopGain = 6.0
ret.lateralTuning.indi.timeConstant = 5.5
ret.lateralTuning.indi.actuatorEffectiveness = 6.0
ret.steerActuatorDelay = 0.60
else:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kdV = [2.]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate in [CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.8702
ret.steerRatio = 16.0 # not optimized
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 3704. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.SIENNA:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 3.03
ret.steerRatio = 15.5
tire_stiffness_factor = 0.444
ret.mass = 4590. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.19], [0.02]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.LEXUS_IS:
stop_and_go = False
ret.safetyParam = 77
ret.wheelbase = 2.79908
ret.steerRatio = 13.3
tire_stiffness_factor = 0.444
ret.mass = 3736.8 * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.LEXUS_CTH:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.60
ret.steerRatio = 18.6
tire_stiffness_factor = 0.517
ret.mass = 3108 * CV.LB_TO_KG + STD_CARGO_KG # mean between min and max
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00007
elif candidate == CAR.LEXUS_NXH:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.66
ret.steerRatio = 14.7
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 4070 * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.PRIUS_TSS2:
stop_and_go = True
ret.safetyParam = 50
ret.wheelbase = 2.70
ret.steerRatio = 13.4 # unknown end-to-end spec
tire_stiffness_factor = 0.6371 # hand-tune
ret.steerActuatorDelay = 0.55
ret.mass = 3115. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.5], [0.15]]
ret.lateralTuning.pid.kdBP = [0.]
ret.lateralTuning.pid.kdV = [2.]
ret.lateralTuning.pid.kf = 0.00007818594
if use_lqr: # if user enables lqr, use lqr for all vehicles
ret.lateralTuning.init('lqr')
ret.lateralTuning.lqr.scale = 1500.0
ret.lateralTuning.lqr.ki = 0.05
ret.lateralTuning.lqr.a = [0., 1., -0.22619643, 1.21822268]
ret.lateralTuning.lqr.b = [-1.92006585e-04, 3.95603032e-05]
ret.lateralTuning.lqr.c = [1., 0.]
ret.lateralTuning.lqr.k = [-110.73572306, 451.22718255]
ret.lateralTuning.lqr.l = [0.3233671, 0.3185757]
ret.lateralTuning.lqr.dcGain = 0.002237852961363602
ret.steerRateCost = 0.5 if ret.hasZss else 1.0
ret.centerToFront = ret.wheelbase * 0.44
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront,
tire_stiffness_factor=tire_stiffness_factor)
ret.enableCamera = is_ecu_disconnected(fingerprint[0], FINGERPRINTS, ECU_FINGERPRINT, candidate, Ecu.fwdCamera) or has_relay
# Detect smartDSU, which intercepts ACC_CMD from the DSU allowing openpilot to send it
smartDsu = 0x2FF in fingerprint[0]
# In TSS2 cars the camera does long control
ret.enableDsu = is_ecu_disconnected(fingerprint[0], FINGERPRINTS, ECU_FINGERPRINT, candidate, Ecu.dsu) and candidate not in TSS2_CAR
ret.enableGasInterceptor = 0x201 in fingerprint[0]
# if the smartDSU is detected, openpilot can send ACC_CMD (and the smartDSU will block it from the DSU) or not (the DSU is "connected")
ret.openpilotLongitudinalControl = ret.enableCamera and (smartDsu or ret.enableDsu or candidate in TSS2_CAR)
cloudlog.warning("ECU Camera Simulated: %r", ret.enableCamera)
cloudlog.warning("ECU DSU Simulated: %r", ret.enableDsu)
cloudlog.warning("ECU Gas Interceptor: %r", ret.enableGasInterceptor)
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter.
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGasInterceptor) else 19. * CV.MPH_TO_MS
# removing the DSU disables AEB and it's considered a community maintained feature
# intercepting the DSU is a community feature since it requires unofficial hardware
ret.communityFeature = ret.enableGasInterceptor or ret.enableDsu or smartDsu
ret.longitudinalTuning.deadzoneBP = [0., 9.]
ret.longitudinalTuning.deadzoneV = [0., .15]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kiBP = [0., 35.]
if ret.enableGasInterceptor:
ret.gasMaxBP = [0., 9., 35]
ret.gasMaxV = [0.2, 0.5, 0.7]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiV = [0.18, 0.12]
else:
ret.gasMaxBP = [0.]
ret.gasMaxV = [0.5]
ret.longitudinalTuning.kpV = [3.6, 2.4, 1.5]
ret.longitudinalTuning.kiV = [0.54, 0.36]
return ret
# returns a car.CarState
def update(self, c, can_strings):
# ******************* do can recv *******************
self.cp.update_strings(can_strings)
self.cp_cam.update_strings(can_strings)
ret = self.CS.update(self.cp, self.cp_cam)
ret.canValid = self.cp.can_valid and self.cp_cam.can_valid
ret.steeringRateLimited = self.CC.steer_rate_limited if self.CC is not None else False
# events
events = self.create_common_events(ret)
if self.cp_cam.can_invalid_cnt >= 200 and self.CP.enableCamera and not self.CP.isPandaBlack:
events.add(EventName.invalidGiraffeToyota)
if self.CS.low_speed_lockout and self.CP.openpilotLongitudinalControl:
events.add(EventName.lowSpeedLockout)
if ret.vEgo < self.CP.minEnableSpeed and self.CP.openpilotLongitudinalControl:
events.add(EventName.belowEngageSpeed)
if c.actuators.gas > 0.1:
# some margin on the actuator to not false trigger cancellation while stopping
events.add(EventName.speedTooLow)
if ret.vEgo < 0.001:
# while in standstill, send a user alert
events.add(EventName.manualRestart)
ret.events = events.to_msg()
self.CS.out = ret.as_reader()
return self.CS.out
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
can_sends = self.CC.update(c.enabled, self.CS, self.frame,
c.actuators, c.cruiseControl.cancel,
c.hudControl.visualAlert, c.hudControl.leftLaneVisible,
c.hudControl.rightLaneVisible, c.hudControl.leadVisible,
c.hudControl.leftLaneDepart, c.hudControl.rightLaneDepart)
self.frame += 1
return can_sends
| 42.575406 | 139 | 0.667248 |
from cereal import car
from selfdrive.config import Conversions as CV
from selfdrive.car.toyota.values import Ecu, ECU_FINGERPRINT, CAR, TSS2_CAR, FINGERPRINTS
from selfdrive.car import STD_CARGO_KG, scale_rot_inertia, scale_tire_stiffness, is_ecu_disconnected, gen_empty_fingerprint
from selfdrive.swaglog import cloudlog
from selfdrive.car.interfaces import CarInterfaceBase
from common.op_params import opParams
op_params = opParams()
use_lqr = op_params.get('use_lqr')
prius_use_pid = op_params.get('prius_use_pid')
corollaTSS2_use_indi = op_params.get('corollaTSS2_use_indi')
EventName = car.CarEvent.EventName
class CarInterface(CarInterfaceBase):
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 3.0
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), has_relay=False, car_fw=[]):
ret = CarInterfaceBase.get_std_params(candidate, fingerprint, has_relay)
ret.carName = "toyota"
ret.safetyModel = car.CarParams.SafetyModel.toyota
ret.steerActuatorDelay = 0.12
ret.steerLimitTimer = 0.4
ret.hasZss = 0x23 in fingerprint[0]
CARS_NOT_PID = [CAR.RAV4, CAR.RAV4H]
if not prius_use_pid:
CARS_NOT_PID.append(CAR.PRIUS_2020)
CARS_NOT_PID.append(CAR.PRIUS)
if candidate not in CARS_NOT_PID and not use_lqr:
ret.lateralTuning.init('pid')
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0.], [0.]]
ret.lateralTuning.pid.kdBP, ret.lateralTuning.pid.kdV = [[0.], [0.]]
if candidate == CAR.PRIUS:
stop_and_go = True
ret.safetyParam = 50
ret.wheelbase = 2.70
ret.steerRatio = 13.4
tire_stiffness_factor = 0.6371
ret.mass = 3045. * CV.LB_TO_KG + STD_CARGO_KG
if prius_use_pid:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.38], [0.02]]
ret.lateralTuning.pid.kdV = [0.85]
ret.lateralTuning.pid.kf = 0.000068
else:
ret.lateralTuning.init('indi')
ret.lateralTuning.indi.innerLoopGain = 4.0
ret.lateralTuning.indi.outerLoopGainBP = [0]
ret.lateralTuning.indi.outerLoopGainV = [3.0]
ret.lateralTuning.indi.timeConstant = 0.1 if ret.hasZss else 1.0
ret.lateralTuning.indi.actuatorEffectiveness = 1.0
ret.steerActuatorDelay = 0.5
elif candidate == CAR.PRIUS_2020:
stop_and_go = True
ret.safetyParam = 54
ret.wheelbase = 2.6924
ret.steerRatio = 13.4
ret.steerActuatorDelay = 0.55
tire_stiffness_factor = 0.6371
ret.mass = 3115. * CV.LB_TO_KG + STD_CARGO_KG
if prius_use_pid:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.36], [0.1]]
ret.lateralTuning.pid.kdV = [2.]
ret.lateralTuning.pid.kf = 0.00007818594
else:
ret.lateralTuning.init('indi')
ret.lateralTuning.indi.innerLoopGain = 4.0
ret.lateralTuning.indi.outerLoopGainBP = [0]
ret.lateralTuning.indi.outerLoopGainV = [3.0]
ret.lateralTuning.indi.timeConstant = 0.1 if ret.hasZss else 1.0
ret.lateralTuning.indi.actuatorEffectiveness = 1.0
elif candidate in [CAR.RAV4, CAR.RAV4H]:
stop_and_go = True if (candidate in CAR.RAV4H) else False
ret.safetyParam = 73
ret.wheelbase = 2.65
ret.steerRatio = 16.88
tire_stiffness_factor = 0.5533
ret.mass = 3650. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.init('lqr')
ret.lateralTuning.lqr.scale = 1500.0
ret.lateralTuning.lqr.ki = 0.05
ret.lateralTuning.lqr.a = [0., 1., -0.22619643, 1.21822268]
ret.lateralTuning.lqr.b = [-1.92006585e-04, 3.95603032e-05]
ret.lateralTuning.lqr.c = [1., 0.]
ret.lateralTuning.lqr.k = [-110.73572306, 451.22718255]
ret.lateralTuning.lqr.l = [0.3233671, 0.3185757]
ret.lateralTuning.lqr.dcGain = 0.002237852961363602
elif candidate == CAR.COROLLA:
stop_and_go = False
ret.safetyParam = 90
ret.wheelbase = 2.70
ret.steerRatio = 17.8
tire_stiffness_factor = 0.444
ret.mass = 2860. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.15], [0.035]]
ret.lateralTuning.pid.kdV = [0.75]
ret.lateralTuning.pid.kf = 0.000084
elif candidate == CAR.LEXUS_RX:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 14.8
tire_stiffness_factor = 0.5533
ret.mass = 4387. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.05]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.LEXUS_RXH:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 16.
tire_stiffness_factor = 0.444
ret.mass = 4481. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.LEXUS_RX_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 14.8
tire_stiffness_factor = 0.5533
ret.mass = 4387. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.LEXUS_RXH_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 16.0
tire_stiffness_factor = 0.444
ret.mass = 4481.0 * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.15]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate in [CAR.CHR, CAR.CHRH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.63906
ret.steerRatio = 13.6
tire_stiffness_factor = 0.7933
ret.mass = 3300. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.723], [0.0428]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate in [CAR.CAMRY, CAR.CAMRYH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.82448
ret.steerRatio = 13.7
tire_stiffness_factor = 0.7933
ret.mass = 3400. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate in [CAR.HIGHLANDER_TSS2, CAR.HIGHLANDERH_TSS2]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.84988
ret.steerRatio = 16.0
tire_stiffness_factor = 0.8
ret.mass = 4700. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.18], [0.015]]
ret.lateralTuning.pid.kf = 0.00012
elif candidate in [CAR.HIGHLANDER, CAR.HIGHLANDERH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.78
ret.steerRatio = 16.0
tire_stiffness_factor = 0.8
ret.mass = 4607. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.18], [0.015]]
ret.lateralTuning.pid.kf = 0.00012
elif candidate == CAR.AVALON:
stop_and_go = False
ret.safetyParam = 73
ret.wheelbase = 2.82
ret.steerRatio = 14.8
tire_stiffness_factor = 0.7983
ret.mass = 3505. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.17], [0.03]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.RAV4_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.68986
ret.steerRatio = 14.3
tire_stiffness_factor = 0.7933
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.15], [0.05]]
ret.lateralTuning.pid.kdV = [0.68]
ret.mass = 3370. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kf = 0.00004
for fw in car_fw:
if fw.ecu == "eps" and fw.fwVersion == b"8965B42170\x00\x00\x00\x00\x00\x00":
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
break
elif candidate == CAR.RAV4H_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.68986
ret.steerRatio = 14.3
tire_stiffness_factor = 0.7933
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.15], [0.05]]
ret.lateralTuning.pid.kdV = [0.68]
ret.mass = 3800. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kf = 0.00004
for fw in car_fw:
if fw.ecu == "eps" and fw.fwVersion == b"8965B42170\x00\x00\x00\x00\x00\x00":
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
break
elif candidate in [CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.63906
ret.steerRatio = 13.9
tire_stiffness_factor = 0.444
ret.mass = 3060. * CV.LB_TO_KG + STD_CARGO_KG
if corollaTSS2_use_indi: erRatio = 15.33
tire_stiffness_factor = 0.996
ret.lateralTuning.indi.outerLoopGainBP = [20, 21]
ret.lateralTuning.indi.outerLoopGainV = [6, 15]
ret.lateralTuning.indi.innerLoopGain = 6.0
ret.lateralTuning.indi.timeConstant = 5.5
ret.lateralTuning.indi.actuatorEffectiveness = 6.0
ret.steerActuatorDelay = 0.60
else:
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kdV = [2.]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate in [CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.8702
ret.steerRatio = 16.0 # not optimized
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 3704. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.SIENNA:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 3.03
ret.steerRatio = 15.5
tire_stiffness_factor = 0.444
ret.mass = 4590. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.19], [0.02]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.LEXUS_IS:
stop_and_go = False
ret.safetyParam = 77
ret.wheelbase = 2.79908
ret.steerRatio = 13.3
tire_stiffness_factor = 0.444
ret.mass = 3736.8 * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.LEXUS_CTH:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.60
ret.steerRatio = 18.6
tire_stiffness_factor = 0.517
ret.mass = 3108 * CV.LB_TO_KG + STD_CARGO_KG # mean between min and max
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00007
elif candidate == CAR.LEXUS_NXH:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.66
ret.steerRatio = 14.7
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 4070 * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.PRIUS_TSS2:
stop_and_go = True
ret.safetyParam = 50
ret.wheelbase = 2.70
ret.steerRatio = 13.4 # unknown end-to-end spec
tire_stiffness_factor = 0.6371 # hand-tune
ret.steerActuatorDelay = 0.55
ret.mass = 3115. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.5], [0.15]]
ret.lateralTuning.pid.kdBP = [0.]
ret.lateralTuning.pid.kdV = [2.]
ret.lateralTuning.pid.kf = 0.00007818594
if use_lqr: # if user enables lqr, use lqr for all vehicles
ret.lateralTuning.init('lqr')
ret.lateralTuning.lqr.scale = 1500.0
ret.lateralTuning.lqr.ki = 0.05
ret.lateralTuning.lqr.a = [0., 1., -0.22619643, 1.21822268]
ret.lateralTuning.lqr.b = [-1.92006585e-04, 3.95603032e-05]
ret.lateralTuning.lqr.c = [1., 0.]
ret.lateralTuning.lqr.k = [-110.73572306, 451.22718255]
ret.lateralTuning.lqr.l = [0.3233671, 0.3185757]
ret.lateralTuning.lqr.dcGain = 0.002237852961363602
ret.steerRateCost = 0.5 if ret.hasZss else 1.0
ret.centerToFront = ret.wheelbase * 0.44
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront,
tire_stiffness_factor=tire_stiffness_factor)
ret.enableCamera = is_ecu_disconnected(fingerprint[0], FINGERPRINTS, ECU_FINGERPRINT, candidate, Ecu.fwdCamera) or has_relay
# Detect smartDSU, which intercepts ACC_CMD from the DSU allowing openpilot to send it
smartDsu = 0x2FF in fingerprint[0]
# In TSS2 cars the camera does long control
ret.enableDsu = is_ecu_disconnected(fingerprint[0], FINGERPRINTS, ECU_FINGERPRINT, candidate, Ecu.dsu) and candidate not in TSS2_CAR
ret.enableGasInterceptor = 0x201 in fingerprint[0]
# if the smartDSU is detected, openpilot can send ACC_CMD (and the smartDSU will block it from the DSU) or not (the DSU is "connected")
ret.openpilotLongitudinalControl = ret.enableCamera and (smartDsu or ret.enableDsu or candidate in TSS2_CAR)
cloudlog.warning("ECU Camera Simulated: %r", ret.enableCamera)
cloudlog.warning("ECU DSU Simulated: %r", ret.enableDsu)
cloudlog.warning("ECU Gas Interceptor: %r", ret.enableGasInterceptor)
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter.
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGasInterceptor) else 19. * CV.MPH_TO_MS
# intercepting the DSU is a community feature since it requires unofficial hardware
ret.communityFeature = ret.enableGasInterceptor or ret.enableDsu or smartDsu
ret.longitudinalTuning.deadzoneBP = [0., 9.]
ret.longitudinalTuning.deadzoneV = [0., .15]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kiBP = [0., 35.]
if ret.enableGasInterceptor:
ret.gasMaxBP = [0., 9., 35]
ret.gasMaxV = [0.2, 0.5, 0.7]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiV = [0.18, 0.12]
else:
ret.gasMaxBP = [0.]
ret.gasMaxV = [0.5]
ret.longitudinalTuning.kpV = [3.6, 2.4, 1.5]
ret.longitudinalTuning.kiV = [0.54, 0.36]
return ret
# returns a car.CarState
def update(self, c, can_strings):
# ******************* do can recv *******************
self.cp.update_strings(can_strings)
self.cp_cam.update_strings(can_strings)
ret = self.CS.update(self.cp, self.cp_cam)
ret.canValid = self.cp.can_valid and self.cp_cam.can_valid
ret.steeringRateLimited = self.CC.steer_rate_limited if self.CC is not None else False
# events
events = self.create_common_events(ret)
if self.cp_cam.can_invalid_cnt >= 200 and self.CP.enableCamera and not self.CP.isPandaBlack:
events.add(EventName.invalidGiraffeToyota)
if self.CS.low_speed_lockout and self.CP.openpilotLongitudinalControl:
events.add(EventName.lowSpeedLockout)
if ret.vEgo < self.CP.minEnableSpeed and self.CP.openpilotLongitudinalControl:
events.add(EventName.belowEngageSpeed)
if c.actuators.gas > 0.1:
# some margin on the actuator to not false trigger cancellation while stopping
events.add(EventName.speedTooLow)
if ret.vEgo < 0.001:
# while in standstill, send a user alert
events.add(EventName.manualRestart)
ret.events = events.to_msg()
self.CS.out = ret.as_reader()
return self.CS.out
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
can_sends = self.CC.update(c.enabled, self.CS, self.frame,
c.actuators, c.cruiseControl.cancel,
c.hudControl.visualAlert, c.hudControl.leftLaneVisible,
c.hudControl.rightLaneVisible, c.hudControl.leadVisible,
c.hudControl.leftLaneDepart, c.hudControl.rightLaneDepart)
self.frame += 1
return can_sends
| true | true |
1c2c4e7dee45b65ff30006c880c9cbfc23744252 | 19,192 | py | Python | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_12_01/operations/_private_endpoint_connections_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 2 | 2021-03-24T06:26:11.000Z | 2021-04-18T15:55:59.000Z | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_12_01/operations/_private_endpoint_connections_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | null | null | null | sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_12_01/operations/_private_endpoint_connections_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 1 | 2021-12-18T20:01:22.000Z | 2021-12-18T20:01:22.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations(object):
"""PrivateEndpointConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpointConnectionListResult"
"""Gets a list of private endpoint connections in the specified managed cluster.
Gets a list of private endpoint connections in the specified managed cluster. The operation
returns properties of each private endpoint connection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnectionListResult, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_12_01.models.PrivateEndpointConnectionListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections'} # type: ignore
def get(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpointConnection"
"""Gets the private endpoint connection.
Gets the details of the private endpoint connection by managed cluster and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_12_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
parameters, # type: "_models.PrivateEndpointConnection"
**kwargs # type: Any
):
# type: (...) -> "_models.PrivateEndpointConnection"
"""Updates a private endpoint connection.
Updates a private endpoint connection in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to the Update a private endpoint connection operation.
:type parameters: ~azure.mgmt.containerservice.v2020_12_01.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2020_12_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
resource_name, # type: str
private_endpoint_connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a private endpoint connection.
Deletes the private endpoint connection in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
| 52.294278 | 251 | 0.685911 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name,
resource_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections'}
def get(
self,
resource_group_name,
resource_name,
private_endpoint_connection_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'}
def update(
self,
resource_group_name,
resource_name,
private_endpoint_connection_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'}
def _delete_initial(
self,
resource_group_name,
resource_name,
private_endpoint_connection_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'}
def begin_delete(
self,
resource_group_name,
resource_name,
private_endpoint_connection_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'}
| true | true |
1c2c4f726603ea041b7ba928b14302a2c024617d | 63 | py | Python | wagtail_tinify/__init__.py | lucasmoeskops/wagtail-tinify | d0a6bf3f52f8b7ded2b7bd2c408bc7c10edc95a6 | [
"MIT"
] | null | null | null | wagtail_tinify/__init__.py | lucasmoeskops/wagtail-tinify | d0a6bf3f52f8b7ded2b7bd2c408bc7c10edc95a6 | [
"MIT"
] | null | null | null | wagtail_tinify/__init__.py | lucasmoeskops/wagtail-tinify | d0a6bf3f52f8b7ded2b7bd2c408bc7c10edc95a6 | [
"MIT"
] | null | null | null | default_app_config = 'wagtail_tinify.apps.WagtailTinifyConfig'
| 31.5 | 62 | 0.873016 | default_app_config = 'wagtail_tinify.apps.WagtailTinifyConfig'
| true | true |
1c2c5152116a83913c132f0a97f1b7bcc9d02645 | 5,399 | py | Python | data/p3BR/R1/benchmark/startQiskit_Class403.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startQiskit_Class403.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R1/benchmark/startQiskit_Class403.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=71
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=70
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.y(input_qubit[2]) # number=56
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class403.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 29.183784 | 140 | 0.629561 |
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
zero = np.binary_repr(0, n)
b = f(zero)
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
prog.x(input_qubit[n])
prog.h(input_qubit[1])
prog.h(input_qubit[1])
prog.rx(-0.09738937226128368,input_qubit[2])
prog.h(input_qubit[1])
prog.y(input_qubit[2])
prog.cz(input_qubit[2],input_qubit[1])
prog.h(input_qubit[1])
prog.h(input_qubit[1])
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
quantum_state = get_statevector(prog)
backend = Aer.get_backend(backend_str)
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_Class403.csv", "w")
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| true | true |
1c2c5293b6e6088817defdb6355b42a48bb97468 | 13,301 | py | Python | expression/core/option.py | erlendvollset/Expression | 888fd757d69e39efa5e3b76228d565b2607c1c5b | [
"MIT"
] | null | null | null | expression/core/option.py | erlendvollset/Expression | 888fd757d69e39efa5e3b76228d565b2607c1c5b | [
"MIT"
] | null | null | null | expression/core/option.py | erlendvollset/Expression | 888fd757d69e39efa5e3b76228d565b2607c1c5b | [
"MIT"
] | null | null | null | """Option module.
Contains a collection of static methods (functions) for operating on
options. All functions takes the source as the last curried
argument, i.e all functions returns a function that takes the source
sequence as the only argument.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import (
TYPE_CHECKING,
Any,
Callable,
Generator,
Iterable,
Iterator,
List,
Optional,
Protocol,
TypeVar,
Union,
cast,
get_origin,
overload,
)
from .error import EffectError
from .match import MatchMixin, SupportsMatch
from .pipe import pipe
if TYPE_CHECKING:
from ..collections.seq import Seq
TSource = TypeVar("TSource")
TResult = TypeVar("TResult")
T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
T4 = TypeVar("T4")
class Option(Iterable[TSource], MatchMixin[TSource], SupportsMatch[Union[TSource, bool]], ABC):
"""Option abstract base class."""
@overload
def pipe(self, __fn1: Callable[["Option[TSource]"], TResult]) -> TResult:
...
@overload
def pipe(self, __fn1: Callable[["Option[TSource]"], T1], __fn2: Callable[[T1], T2]) -> T2:
...
@overload
def pipe(
self, __fn1: Callable[["Option[TSource]"], T1], __fn2: Callable[[T1], T2], __fn3: Callable[[T2], T3]
) -> T3:
...
@overload
def pipe(
self,
__fn1: Callable[["Option[TSource]"], T1],
__fn2: Callable[[T1], T2],
__fn3: Callable[[T2], T3],
__fn4: Callable[[T3], T4],
) -> T4:
...
def pipe(self, *args: Any) -> Any:
"""Pipe option through the given functions."""
return pipe(self, *args)
def default_value(self, value: TSource) -> TSource:
"""Gets the value of the option if the option is Some, otherwise
returns the specified default value.
"""
raise NotImplementedError
@abstractmethod
def map(self, mapper: Callable[[TSource], TResult]) -> "Option[TResult]":
raise NotImplementedError
@abstractmethod
def map2(self, mapper: Callable[[TSource, T2], TResult], other: "Option[T2]") -> "Option[TResult]":
raise NotImplementedError
@abstractmethod
def bind(self, mapper: Callable[[TSource], "Option[TResult]"]) -> "Option[TResult]":
"""Bind option.
Applies and returns the result of the mapper if the value is
`Some`. If the value is `Nothing` then `Nothing` is returned.
Args:
mapper: A function that takes the value of type TSource from
an option and transforms it into an option containing a
value of type TResult.
Returns:
An option of the output type of the mapper.
"""
raise NotImplementedError
@abstractmethod
def or_else(self, if_none: "Option[TSource]") -> "Option[TSource]":
"""Returns option if it is Some, otherwise returns `if_one`. """
raise NotImplementedError
@abstractmethod
def to_list(self) -> List[TSource]:
raise NotImplementedError
@abstractmethod
def to_seq(self) -> Seq[TSource]:
raise NotImplementedError
@abstractmethod
def is_some(self) -> bool:
"""Returns true if the option is not Nothing."""
raise NotImplementedError
@abstractmethod
def is_none(self) -> bool:
"""Returns true if the option is Nothing."""
raise NotImplementedError
@classmethod
def of_obj(cls, value: TSource) -> "Option[TSource]":
"""Convert object to an option."""
return of_optional(value)
@classmethod
def of_optional(cls, value: Optional[TSource]) -> "Option[TSource]":
"""Convert optional value to an option."""
return of_optional(value)
@property
@abstractmethod
def value(self) -> TSource:
"""Returns the value wrapped by the option.
A `ValueError` is raised if the option is `Nothing`.
"""
raise NotImplementedError
@abstractmethod
def __eq__(self, other: Any) -> bool:
raise NotImplementedError
@abstractmethod
def __lt__(self, other: Any) -> bool:
raise NotImplementedError
def __repr__(self) -> str:
return self.__str__()
class Some(Option[TSource]):
"""The Some option case class."""
def __init__(self, value: TSource) -> None:
self._value = value
def default_value(self, value: TSource) -> TSource:
"""Gets the value of the option if the option is Some, otherwise
returns the specified default value.
"""
return self._value
def is_some(self) -> bool:
"""Returns `True`."""
return True
def is_none(self) -> bool:
"""Returns `False`."""
return False
def map(self, mapper: Callable[[TSource], TResult]):
return Some(mapper(self._value))
def map2(self, mapper: Callable[[TSource, T2], TResult], other: Option[T2]) -> Option[TResult]:
if isinstance(other, Some):
return Some(mapper(self._value, cast(Some[T2], other).value))
return Nothing
def bind(self, mapper: Callable[[TSource], Option[TResult]]) -> Option[TResult]:
"""Bind option.
Applies and returns the result of the mapper if the value is
`Some`. If the value is `Nothing` then `Nothing` is returned.
Args:
mapper: A function that takes the value of type TSource from
an option and transforms it into an option containing a
value of type TResult.
Returns:
An option of the output type of the mapper.
"""
return mapper(self._value)
def or_else(self, if_none: Option[TSource]) -> Option[TSource]:
"""Returns `self`."""
return self
def to_list(self) -> List[TSource]:
return [self._value]
def to_seq(self) -> Seq[TSource]:
from expression.collections.seq import Seq # deferred import to avoid circular dependencies
return Seq.of(self._value)
@property
def value(self) -> TSource:
"""Returns the value wrapped by the option.
A `ValueError` is raised if the option is `Nothing`.
"""
return self._value
def __match__(self, pattern: Any) -> Iterable[TSource]:
if self is pattern or self == pattern:
return [self.value]
try:
origin: Any = get_origin(pattern)
if isinstance(self, origin or pattern):
return [self.value]
except TypeError:
pass
return []
def __lt__(self, other: Any) -> bool:
if isinstance(other, Some):
return self._value < other._value # type: ignore
return False
def __eq__(self, other: Any) -> bool:
if isinstance(other, Some):
return self._value == other._value # type: ignore
return False
def __iter__(self) -> Generator[TSource, TSource, TSource]:
return (yield self._value)
def __str__(self) -> str:
return f"Some {self._value}"
class Nothing_(Option[TSource], EffectError):
"""The None option case class.
Do not use. Use the singleton `Nothing` instead. Since Nothing is a
singleton it can be tested e.g using `is`:
>>> if xs is Nothing:
... return True
"""
def default_value(self, value: TSource) -> TSource:
"""Gets the value of the option if the option is Some, otherwise
returns the specified default value.
"""
return value
def is_some(self) -> bool:
"""Returns `False`."""
return False
def is_none(self) -> bool:
"""Returns `True`."""
return True
def map(self, mapper: Callable[[TSource], TResult]) -> Option[TResult]:
return Nothing
def map2(self, mapper: Callable[[TSource, T2], TResult], other: Option[T2]) -> Option[TResult]:
return Nothing
def bind(self, mapper: Callable[[TSource], Option[TResult]]) -> Option[TResult]:
"""Bind option.
Applies and returns the result of the mapper if the value is
`Some`. If the value is `Nothing` then `Nothing` is returned.
Args:
mapper: A function that takes the value of type TSource from
an option and transforms it into an option containing a
value of type TResult.
Returns:
An option of the output type of the mapper.
"""
return Nothing
def or_else(self, if_none: Option[TSource]) -> Option[TSource]:
"""Returns `if_none`."""
return if_none
def to_list(self) -> List[TSource]:
return []
def to_seq(self) -> Seq[TSource]:
from expression.collections.seq import Seq # deferred import to avoid circular dependencies
return Seq()
@property
def value(self) -> TSource:
"""Returns the value wrapped by the option.
A `ValueError` is raised if the option is `Nothing`.
"""
raise ValueError("There is no value.")
def __match__(self, pattern: Any) -> "Iterable[bool]":
if self is pattern:
return [True]
return []
def __iter__(self) -> Iterator[TSource]:
"""Return iterator for the `Nothing` case.
We basically want to return nothing, but we have to return
something to signal fail.
"""
raise Nothing
while False:
yield
def __lt__(self, other: Any) -> bool:
return True
def __eq__(self, other: Any) -> bool:
if other is Nothing:
return True
return False
def __str__(self):
return "Nothing"
class TransformFn(Protocol[TResult]):
"""Option transforming protocol function.
`Option[TSource]) -> Option[TResult]`
"""
def __call__(self, __source: Option[TSource]) -> Option[TResult]:
raise NotImplementedError
# The singleton None class. We use the name 'Nothing' here instead of `None` to
# avoid conflicts with the builtin `None` value in Python.
# Note to self: Must be of type `Nothing_` or pattern matching will not work.
Nothing: Nothing_[Any] = Nothing_()
"""Singleton `Nothing` object.
Since Nothing is a singleton it can be tested e.g using `is`:
>>> if xs is Nothing:
... return True
"""
def bind(mapper: Callable[[TSource], Option[TResult]]) -> TransformFn[TResult]:
"""Bind option.
Applies and returns the result of the mapper if the value is
`Some`. If the value is `Nothing` then `Nothing` is returned.
Args:
mapper: A function that takes the value of type TSource from
an option and transforms it into an option containing a
value of type TResult.
Returns:
A partially applied function that takes an option and returns an
option of the output type of the mapper.
"""
def _bind(option: Option[TSource]) -> Option[TResult]:
return option.bind(mapper)
return _bind
def default_value(value: TSource) -> Callable[[Option[TSource]], TSource]:
"""Gets the value of the option if the option is Some, otherwise
returns the specified default value.
"""
def _default_value(option: Option[TSource]) -> TSource:
return option.default_value(value)
return _default_value
def is_none(option: Option[TSource]) -> bool:
return option.is_none()
def is_some(option: Option[TSource]) -> bool:
return option.is_some()
def map(mapper: Callable[[TSource], TResult]) -> TransformFn[TResult]:
def _map(option: Option[TSource]) -> Option[TResult]:
return option.map(mapper)
return _map
def map2(mapper: Callable[[T1, T2], TResult]) -> Callable[[Option[T1], Option[T2]], Option[TResult]]:
def _map2(opt1: Option[T1], opt2: Option[T2]) -> Option[TResult]:
return opt1.map2(mapper, opt2)
return _map2
def or_else(if_none: Option[TSource]) -> Callable[[Option[TSource]], Option[TSource]]:
"""Returns option if it is Some, otherwise returns ifNone."""
def _or_else(option: Option[TSource]) -> Option[TSource]:
return option.or_else(if_none)
return _or_else
def to_list(option: Option[TSource]) -> List[TSource]:
return option.to_list()
def to_seq(option: Option[TSource]) -> Seq[TSource]:
return option.to_seq()
def of_optional(value: Optional[TSource]) -> Option[TSource]:
"""Convert an optional value to an option.
Convert a value that could be `None` into an `Option` value. Same as
`of_obj` but with typed values.
Args:
value: The input optional value.
Return:
The result option.
"""
if value is None:
return Nothing
return Some(value)
def of_obj(value: Any) -> Option[Any]:
"""Convert object to an option.
Convert a value that could be `None` into an `Option` value.
Args:
value: The input object.
Return:
The result option.
"""
return of_optional(value)
__all__ = [
"Option",
"Some",
"Nothing",
"Nothing_",
"bind",
"default_value",
"map",
"map2",
"is_none",
"is_some",
"or_else",
"to_list",
"to_seq",
"of_optional",
"of_obj",
]
| 27.089613 | 108 | 0.618826 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import (
TYPE_CHECKING,
Any,
Callable,
Generator,
Iterable,
Iterator,
List,
Optional,
Protocol,
TypeVar,
Union,
cast,
get_origin,
overload,
)
from .error import EffectError
from .match import MatchMixin, SupportsMatch
from .pipe import pipe
if TYPE_CHECKING:
from ..collections.seq import Seq
TSource = TypeVar("TSource")
TResult = TypeVar("TResult")
T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
T4 = TypeVar("T4")
class Option(Iterable[TSource], MatchMixin[TSource], SupportsMatch[Union[TSource, bool]], ABC):
@overload
def pipe(self, __fn1: Callable[["Option[TSource]"], TResult]) -> TResult:
...
@overload
def pipe(self, __fn1: Callable[["Option[TSource]"], T1], __fn2: Callable[[T1], T2]) -> T2:
...
@overload
def pipe(
self, __fn1: Callable[["Option[TSource]"], T1], __fn2: Callable[[T1], T2], __fn3: Callable[[T2], T3]
) -> T3:
...
@overload
def pipe(
self,
__fn1: Callable[["Option[TSource]"], T1],
__fn2: Callable[[T1], T2],
__fn3: Callable[[T2], T3],
__fn4: Callable[[T3], T4],
) -> T4:
...
def pipe(self, *args: Any) -> Any:
return pipe(self, *args)
def default_value(self, value: TSource) -> TSource:
raise NotImplementedError
@abstractmethod
def map(self, mapper: Callable[[TSource], TResult]) -> "Option[TResult]":
raise NotImplementedError
@abstractmethod
def map2(self, mapper: Callable[[TSource, T2], TResult], other: "Option[T2]") -> "Option[TResult]":
raise NotImplementedError
@abstractmethod
def bind(self, mapper: Callable[[TSource], "Option[TResult]"]) -> "Option[TResult]":
raise NotImplementedError
@abstractmethod
def or_else(self, if_none: "Option[TSource]") -> "Option[TSource]":
raise NotImplementedError
@abstractmethod
def to_list(self) -> List[TSource]:
raise NotImplementedError
@abstractmethod
def to_seq(self) -> Seq[TSource]:
raise NotImplementedError
@abstractmethod
def is_some(self) -> bool:
raise NotImplementedError
@abstractmethod
def is_none(self) -> bool:
raise NotImplementedError
@classmethod
def of_obj(cls, value: TSource) -> "Option[TSource]":
return of_optional(value)
@classmethod
def of_optional(cls, value: Optional[TSource]) -> "Option[TSource]":
return of_optional(value)
@property
@abstractmethod
def value(self) -> TSource:
raise NotImplementedError
@abstractmethod
def __eq__(self, other: Any) -> bool:
raise NotImplementedError
@abstractmethod
def __lt__(self, other: Any) -> bool:
raise NotImplementedError
def __repr__(self) -> str:
return self.__str__()
class Some(Option[TSource]):
def __init__(self, value: TSource) -> None:
self._value = value
def default_value(self, value: TSource) -> TSource:
return self._value
def is_some(self) -> bool:
return True
def is_none(self) -> bool:
return False
def map(self, mapper: Callable[[TSource], TResult]):
return Some(mapper(self._value))
def map2(self, mapper: Callable[[TSource, T2], TResult], other: Option[T2]) -> Option[TResult]:
if isinstance(other, Some):
return Some(mapper(self._value, cast(Some[T2], other).value))
return Nothing
def bind(self, mapper: Callable[[TSource], Option[TResult]]) -> Option[TResult]:
return mapper(self._value)
def or_else(self, if_none: Option[TSource]) -> Option[TSource]:
return self
def to_list(self) -> List[TSource]:
return [self._value]
def to_seq(self) -> Seq[TSource]:
from expression.collections.seq import Seq
return Seq.of(self._value)
@property
def value(self) -> TSource:
return self._value
def __match__(self, pattern: Any) -> Iterable[TSource]:
if self is pattern or self == pattern:
return [self.value]
try:
origin: Any = get_origin(pattern)
if isinstance(self, origin or pattern):
return [self.value]
except TypeError:
pass
return []
def __lt__(self, other: Any) -> bool:
if isinstance(other, Some):
return self._value < other._value
return False
def __eq__(self, other: Any) -> bool:
if isinstance(other, Some):
return self._value == other._value
return False
def __iter__(self) -> Generator[TSource, TSource, TSource]:
return (yield self._value)
def __str__(self) -> str:
return f"Some {self._value}"
class Nothing_(Option[TSource], EffectError):
def default_value(self, value: TSource) -> TSource:
return value
def is_some(self) -> bool:
return False
def is_none(self) -> bool:
return True
def map(self, mapper: Callable[[TSource], TResult]) -> Option[TResult]:
return Nothing
def map2(self, mapper: Callable[[TSource, T2], TResult], other: Option[T2]) -> Option[TResult]:
return Nothing
def bind(self, mapper: Callable[[TSource], Option[TResult]]) -> Option[TResult]:
return Nothing
def or_else(self, if_none: Option[TSource]) -> Option[TSource]:
return if_none
def to_list(self) -> List[TSource]:
return []
def to_seq(self) -> Seq[TSource]:
from expression.collections.seq import Seq
return Seq()
@property
def value(self) -> TSource:
raise ValueError("There is no value.")
def __match__(self, pattern: Any) -> "Iterable[bool]":
if self is pattern:
return [True]
return []
def __iter__(self) -> Iterator[TSource]:
raise Nothing
while False:
yield
def __lt__(self, other: Any) -> bool:
return True
def __eq__(self, other: Any) -> bool:
if other is Nothing:
return True
return False
def __str__(self):
return "Nothing"
class TransformFn(Protocol[TResult]):
def __call__(self, __source: Option[TSource]) -> Option[TResult]:
raise NotImplementedError
Nothing: Nothing_[Any] = Nothing_()
def bind(mapper: Callable[[TSource], Option[TResult]]) -> TransformFn[TResult]:
def _bind(option: Option[TSource]) -> Option[TResult]:
return option.bind(mapper)
return _bind
def default_value(value: TSource) -> Callable[[Option[TSource]], TSource]:
def _default_value(option: Option[TSource]) -> TSource:
return option.default_value(value)
return _default_value
def is_none(option: Option[TSource]) -> bool:
return option.is_none()
def is_some(option: Option[TSource]) -> bool:
return option.is_some()
def map(mapper: Callable[[TSource], TResult]) -> TransformFn[TResult]:
def _map(option: Option[TSource]) -> Option[TResult]:
return option.map(mapper)
return _map
def map2(mapper: Callable[[T1, T2], TResult]) -> Callable[[Option[T1], Option[T2]], Option[TResult]]:
def _map2(opt1: Option[T1], opt2: Option[T2]) -> Option[TResult]:
return opt1.map2(mapper, opt2)
return _map2
def or_else(if_none: Option[TSource]) -> Callable[[Option[TSource]], Option[TSource]]:
def _or_else(option: Option[TSource]) -> Option[TSource]:
return option.or_else(if_none)
return _or_else
def to_list(option: Option[TSource]) -> List[TSource]:
return option.to_list()
def to_seq(option: Option[TSource]) -> Seq[TSource]:
return option.to_seq()
def of_optional(value: Optional[TSource]) -> Option[TSource]:
if value is None:
return Nothing
return Some(value)
def of_obj(value: Any) -> Option[Any]:
return of_optional(value)
__all__ = [
"Option",
"Some",
"Nothing",
"Nothing_",
"bind",
"default_value",
"map",
"map2",
"is_none",
"is_some",
"or_else",
"to_list",
"to_seq",
"of_optional",
"of_obj",
]
| true | true |
1c2c52d5d734a20158e75ad1077782aa67773b86 | 3,867 | py | Python | tests/test_modules.py | m3g4p0p/jsonscript | 9f55c46bd75c5f55a6fa8235abeffa8e5d8ee31b | [
"MIT"
] | null | null | null | tests/test_modules.py | m3g4p0p/jsonscript | 9f55c46bd75c5f55a6fa8235abeffa8e5d8ee31b | [
"MIT"
] | null | null | null | tests/test_modules.py | m3g4p0p/jsonscript | 9f55c46bd75c5f55a6fa8235abeffa8e5d8ee31b | [
"MIT"
] | null | null | null | import json
import pytest
from jsonscript.interpreter import run
class TestModules:
@staticmethod
@pytest.fixture
def create_json(tmp_path):
def create(file_path, obj):
file = tmp_path / file_path
file.parent.mkdir(exist_ok=True, parents=True)
with open(file, 'w') as f:
json.dump(obj, f)
return file
return create
def test_run_file(self, create_json):
main = create_json('spam.json', {'#': 42})
assert run(main) == 42
def test_imports(self, create_json):
main = create_json('spam.json', {
'@import': {
'eggs': ['func', 'foo']
},
'=foo': {'*': ['&foo', 5]},
'func': [{'+': ['&foo', 1]}]
})
create_json('eggs.json', {
'@export': ['foo', 'bar', 'func'],
'=foo': 4,
'=bar': 2,
'func': {
'*': ['&0', '&bar']
}
})
assert run(main) == 42
def test_import_all(self, create_json):
imports_all = create_json('spam.json', {
'@import': {
'eggs': True
},
'+': ['&foo', '&bar']
})
imports_foo = create_json('ham.json', {
'@import': {
'eggs': ['foo']
},
'+': ['&foo', '&bar']
})
create_json('eggs.json', {
'@export': ['foo', 'bar'],
'=foo': 41,
'=bar': 1,
})
assert run(imports_all) == 42
with pytest.raises(KeyError):
run(imports_foo)
def test_run_module_only_once(self, create_json, capsys):
main = create_json('spam.json', {
'@import': {
'eggs': ['foo'],
'ham': ['bar']
},
'!print': ['spam'],
'+': ['&foo', '&bar']
})
create_json('eggs.json', {
'@import': {
'ham': ['bar']
},
'@export': ['foo'],
'!print': ['eggs'],
'=foo': {'+': [40, '&bar']},
})
create_json('ham.json', {
'@export': ['bar'],
'!print': ['ham'],
'=bar': 1,
})
assert run(main) == 42
assert capsys.readouterr().out == 'ham\neggs\nspam\n'
def test_relative_import_paths(self, create_json):
main = create_json('foo/spam.json', {
'@import': {
'../bar/eggs': ['value'],
},
'#': '&value'
})
create_json('bar/eggs.json', {
'@import': {
'baz/ham.json': ['value']
},
'@export': ['value']
})
create_json('bar/baz/ham.json', {
'@export': ['value'],
'=value': 42,
})
assert run(main) == 42
def test_sublevel_imports(self, create_json):
main = create_json('spam.json', {
'@import': {
'eggs': ['foo'],
},
'#': {
'@import': {
'eggs': ['bar'],
},
'+': ['&foo', '&bar']
}
})
create_json('eggs.json', {
'@export': ['foo', 'bar'],
'=foo': 41,
'=bar': 1
})
assert run(main) == 42
def test_sublevel_exports(self, create_json):
main = create_json('spam.json', {
'@import': {
'eggs': ['foo', 'bar'],
},
'+': ['&foo', '&bar']
})
create_json('eggs.json', {
'@export': ['foo'],
'=foo': 41,
'#': {
'@export': ['bar'],
'=bar': 1
}
})
assert run(main) == 42
| 23.723926 | 61 | 0.376519 | import json
import pytest
from jsonscript.interpreter import run
class TestModules:
@staticmethod
@pytest.fixture
def create_json(tmp_path):
def create(file_path, obj):
file = tmp_path / file_path
file.parent.mkdir(exist_ok=True, parents=True)
with open(file, 'w') as f:
json.dump(obj, f)
return file
return create
def test_run_file(self, create_json):
main = create_json('spam.json', {'#': 42})
assert run(main) == 42
def test_imports(self, create_json):
main = create_json('spam.json', {
'@import': {
'eggs': ['func', 'foo']
},
'=foo': {'*': ['&foo', 5]},
'func': [{'+': ['&foo', 1]}]
})
create_json('eggs.json', {
'@export': ['foo', 'bar', 'func'],
'=foo': 4,
'=bar': 2,
'func': {
'*': ['&0', '&bar']
}
})
assert run(main) == 42
def test_import_all(self, create_json):
imports_all = create_json('spam.json', {
'@import': {
'eggs': True
},
'+': ['&foo', '&bar']
})
imports_foo = create_json('ham.json', {
'@import': {
'eggs': ['foo']
},
'+': ['&foo', '&bar']
})
create_json('eggs.json', {
'@export': ['foo', 'bar'],
'=foo': 41,
'=bar': 1,
})
assert run(imports_all) == 42
with pytest.raises(KeyError):
run(imports_foo)
def test_run_module_only_once(self, create_json, capsys):
main = create_json('spam.json', {
'@import': {
'eggs': ['foo'],
'ham': ['bar']
},
'!print': ['spam'],
'+': ['&foo', '&bar']
})
create_json('eggs.json', {
'@import': {
'ham': ['bar']
},
'@export': ['foo'],
'!print': ['eggs'],
'=foo': {'+': [40, '&bar']},
})
create_json('ham.json', {
'@export': ['bar'],
'!print': ['ham'],
'=bar': 1,
})
assert run(main) == 42
assert capsys.readouterr().out == 'ham\neggs\nspam\n'
def test_relative_import_paths(self, create_json):
main = create_json('foo/spam.json', {
'@import': {
'../bar/eggs': ['value'],
},
'#': '&value'
})
create_json('bar/eggs.json', {
'@import': {
'baz/ham.json': ['value']
},
'@export': ['value']
})
create_json('bar/baz/ham.json', {
'@export': ['value'],
'=value': 42,
})
assert run(main) == 42
def test_sublevel_imports(self, create_json):
main = create_json('spam.json', {
'@import': {
'eggs': ['foo'],
},
'#': {
'@import': {
'eggs': ['bar'],
},
'+': ['&foo', '&bar']
}
})
create_json('eggs.json', {
'@export': ['foo', 'bar'],
'=foo': 41,
'=bar': 1
})
assert run(main) == 42
def test_sublevel_exports(self, create_json):
main = create_json('spam.json', {
'@import': {
'eggs': ['foo', 'bar'],
},
'+': ['&foo', '&bar']
})
create_json('eggs.json', {
'@export': ['foo'],
'=foo': 41,
'#': {
'@export': ['bar'],
'=bar': 1
}
})
assert run(main) == 42
| true | true |
1c2c52fb474a12cf3a58bc7c2fe1b5a2f5c53cbc | 3,267 | py | Python | address/migrations/0004_auto__add_field_address_extended_address__chg_field_address_street_add.py | jacinda/django-address | c9a351b20e29c4fa9841a4bda849befa68f1c3de | [
"BSD-3-Clause"
] | 2 | 2015-08-11T08:21:40.000Z | 2016-01-24T05:51:08.000Z | address/migrations/0004_auto__add_field_address_extended_address__chg_field_address_street_add.py | jacinda/django-address | c9a351b20e29c4fa9841a4bda849befa68f1c3de | [
"BSD-3-Clause"
] | null | null | null | address/migrations/0004_auto__add_field_address_extended_address__chg_field_address_street_add.py | jacinda/django-address | c9a351b20e29c4fa9841a4bda849befa68f1c3de | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Address.extended_address'
db.add_column(u'address_address', 'extended_address',
self.gf('django.db.models.fields.CharField')(default='', max_length=35, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Address.extended_address'
db.delete_column(u'address_address', 'extended_address')
models = {
u'address.address': {
'Meta': {'ordering': "('locality', 'street_address')", 'object_name': 'Address'},
'extended_address': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'formatted': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'locality': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'addresses'", 'to': u"orm['address.Locality']"}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'address.country': {
'Meta': {'ordering': "('name',)", 'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'blank': 'True'})
},
u'address.locality': {
'Meta': {'ordering': "('state', 'postal_code', 'name')", 'unique_together': "(('name', 'state', 'postal_code'),)", 'object_name': 'Locality'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('address.models.NullCharField', [], {'max_length': '165', 'null': 'True', 'blank': 'True'}),
'postal_code': ('address.models.NullCharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'localities'", 'to': u"orm['address.State']"})
},
u'address.state': {
'Meta': {'ordering': "('country', 'code', 'name')", 'unique_together': "(('name', 'code', 'country'),)", 'object_name': 'State'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'states'", 'to': u"orm['address.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '165', 'blank': 'True'})
}
}
complete_apps = ['address'] | 59.4 | 154 | 0.561983 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column(u'address_address', 'extended_address',
self.gf('django.db.models.fields.CharField')(default='', max_length=35, blank=True),
keep_default=False)
def backwards(self, orm):
db.delete_column(u'address_address', 'extended_address')
models = {
u'address.address': {
'Meta': {'ordering': "('locality', 'street_address')", 'object_name': 'Address'},
'extended_address': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'formatted': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'locality': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'addresses'", 'to': u"orm['address.Locality']"}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'address.country': {
'Meta': {'ordering': "('name',)", 'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'blank': 'True'})
},
u'address.locality': {
'Meta': {'ordering': "('state', 'postal_code', 'name')", 'unique_together': "(('name', 'state', 'postal_code'),)", 'object_name': 'Locality'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('address.models.NullCharField', [], {'max_length': '165', 'null': 'True', 'blank': 'True'}),
'postal_code': ('address.models.NullCharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'localities'", 'to': u"orm['address.State']"})
},
u'address.state': {
'Meta': {'ordering': "('country', 'code', 'name')", 'unique_together': "(('name', 'code', 'country'),)", 'object_name': 'State'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'states'", 'to': u"orm['address.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '165', 'blank': 'True'})
}
}
complete_apps = ['address'] | true | true |
1c2c53300bd2714dada7580fe3e104cfec20b37b | 11,723 | py | Python | tests/db_engine_specs/mssql_tests.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | tests/db_engine_specs/mssql_tests.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | tests/db_engine_specs/mssql_tests.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | 1 | 2021-07-09T16:29:50.000Z | 2021-07-09T16:29:50.000Z | import unittest.mock as mock
from textwrap import dedent
from sqlalchemy import column, table
from sqlalchemy.dialects import mssql
from sqlalchemy.dialects.mssql import DATE, NTEXT, NVARCHAR, TEXT, VARCHAR
from sqlalchemy.sql import select
from sqlalchemy.types import String, UnicodeText
from rabbitai.db_engine_specs.base import BaseEngineSpec
from rabbitai.db_engine_specs.mssql import MssqlEngineSpec
from rabbitai.errors import ErrorLevel, RabbitaiError, RabbitaiErrorType
from rabbitai.utils.core import GenericDataType
from tests.db_engine_specs.base_tests import TestDbEngineSpec
class TestMssqlEngineSpec(TestDbEngineSpec):
def test_mssql_column_types(self):
def assert_type(type_string, type_expected, generic_type_expected):
if type_expected is None:
type_assigned = MssqlEngineSpec.get_sqla_column_type(type_string)
self.assertIsNone(type_assigned)
else:
column_spec = MssqlEngineSpec.get_column_spec(type_string)
if column_spec != None:
self.assertIsInstance(column_spec.sqla_type, type_expected)
self.assertEquals(column_spec.generic_type, generic_type_expected)
assert_type("STRING", String, GenericDataType.STRING)
assert_type("CHAR(10)", String, GenericDataType.STRING)
assert_type("VARCHAR(10)", String, GenericDataType.STRING)
assert_type("TEXT", String, GenericDataType.STRING)
assert_type("NCHAR(10)", UnicodeText, GenericDataType.STRING)
assert_type("NVARCHAR(10)", UnicodeText, GenericDataType.STRING)
assert_type("NTEXT", UnicodeText, GenericDataType.STRING)
def test_where_clause_n_prefix(self):
dialect = mssql.dialect()
spec = MssqlEngineSpec
type_, _ = spec.get_sqla_column_type("VARCHAR(10)")
str_col = column("col", type_=type_)
type_, _ = spec.get_sqla_column_type("NTEXT")
unicode_col = column("unicode_col", type_=type_)
tbl = table("tbl")
sel = (
select([str_col, unicode_col])
.select_from(tbl)
.where(str_col == "abc")
.where(unicode_col == "abc")
)
query = str(
sel.compile(dialect=dialect, compile_kwargs={"literal_binds": True})
)
query_expected = (
"SELECT col, unicode_col \n"
"FROM tbl \n"
"WHERE col = 'abc' AND unicode_col = N'abc'"
)
self.assertEqual(query, query_expected)
def test_time_exp_mixd_case_col_1y(self):
col = column("MixedCase")
expr = MssqlEngineSpec.get_timestamp_expr(col, None, "P1Y")
result = str(expr.compile(None, dialect=mssql.dialect()))
self.assertEqual(result, "DATEADD(year, DATEDIFF(year, 0, [MixedCase]), 0)")
def test_convert_dttm(self):
dttm = self.get_dttm()
test_cases = (
(
MssqlEngineSpec.convert_dttm("DATE", dttm),
"CONVERT(DATE, '2019-01-02', 23)",
),
(
MssqlEngineSpec.convert_dttm("DATETIME", dttm),
"CONVERT(DATETIME, '2019-01-02T03:04:05.678', 126)",
),
(
MssqlEngineSpec.convert_dttm("SMALLDATETIME", dttm),
"CONVERT(SMALLDATETIME, '2019-01-02 03:04:05', 20)",
),
)
for actual, expected in test_cases:
self.assertEqual(actual, expected)
def test_extract_error_message(self):
test_mssql_exception = Exception(
"(8155, b\"No column name was specified for column 1 of 'inner_qry'."
"DB-Lib error message 20018, severity 16:\\nGeneral SQL Server error: "
'Check messages from the SQL Server\\n")'
)
error_message = MssqlEngineSpec.extract_error_message(test_mssql_exception)
expected_message = (
"mssql error: All your SQL functions need to "
"have an alias on MSSQL. For example: SELECT COUNT(*) AS C1 FROM TABLE1"
)
self.assertEqual(expected_message, error_message)
test_mssql_exception = Exception(
'(8200, b"A correlated expression is invalid because it is not in a '
"GROUP BY clause.\\n\")'"
)
error_message = MssqlEngineSpec.extract_error_message(test_mssql_exception)
expected_message = "mssql error: " + MssqlEngineSpec._extract_error_message(
test_mssql_exception
)
self.assertEqual(expected_message, error_message)
@mock.patch.object(
MssqlEngineSpec, "pyodbc_rows_to_tuples", return_value="converted"
)
def test_fetch_data(self, mock_pyodbc_rows_to_tuples):
data = [(1, "foo")]
with mock.patch.object(
BaseEngineSpec, "fetch_data", return_value=data
) as mock_fetch:
result = MssqlEngineSpec.fetch_data(None, 0)
mock_pyodbc_rows_to_tuples.assert_called_once_with(data)
self.assertEqual(result, "converted")
def test_column_datatype_to_string(self):
test_cases = (
(DATE(), "DATE"),
(VARCHAR(length=255), "VARCHAR(255)"),
(VARCHAR(length=255, collation="utf8_general_ci"), "VARCHAR(255)"),
(NVARCHAR(length=128), "NVARCHAR(128)"),
(TEXT(), "TEXT"),
(NTEXT(collation="utf8_general_ci"), "NTEXT"),
)
for original, expected in test_cases:
actual = MssqlEngineSpec.column_datatype_to_string(
original, mssql.dialect()
)
self.assertEqual(actual, expected)
def test_extract_errors(self):
"""
Test that custom error messages are extracted correctly.
"""
msg = dedent(
"""
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (locahost)
"""
)
result = MssqlEngineSpec.extract_errors(Exception(msg))
assert result == [
RabbitaiError(
error_type=RabbitaiErrorType.CONNECTION_INVALID_HOSTNAME_ERROR,
message='The hostname "locahost" cannot be resolved.',
level=ErrorLevel.ERROR,
extra={
"engine_name": "Microsoft SQL",
"issue_codes": [
{
"code": 1007,
"message": "Issue 1007 - The hostname provided can't be resolved.",
}
],
},
)
]
msg = dedent(
"""
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (localhost)
Net-Lib error during Connection refused (61)
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (localhost)
Net-Lib error during Connection refused (61)
"""
)
result = MssqlEngineSpec.extract_errors(
Exception(msg), context={"port": 12345, "hostname": "localhost"}
)
assert result == [
RabbitaiError(
error_type=RabbitaiErrorType.CONNECTION_PORT_CLOSED_ERROR,
message='Port 12345 on hostname "localhost" refused the connection.',
level=ErrorLevel.ERROR,
extra={
"engine_name": "Microsoft SQL",
"issue_codes": [
{"code": 1008, "message": "Issue 1008 - The port is closed."}
],
},
)
]
msg = dedent(
"""
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (example.com)
Net-Lib error during Operation timed out (60)
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (example.com)
Net-Lib error during Operation timed out (60)
"""
)
result = MssqlEngineSpec.extract_errors(
Exception(msg), context={"port": 12345, "hostname": "example.com"}
)
assert result == [
RabbitaiError(
error_type=RabbitaiErrorType.CONNECTION_HOST_DOWN_ERROR,
message=(
'The host "example.com" might be down, '
"and can't be reached on port 12345."
),
level=ErrorLevel.ERROR,
extra={
"engine_name": "Microsoft SQL",
"issue_codes": [
{
"code": 1009,
"message": "Issue 1009 - The host might be down, and can't be reached on the provided port.",
}
],
},
)
]
msg = dedent(
"""
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (93.184.216.34)
Net-Lib error during Operation timed out (60)
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (93.184.216.34)
Net-Lib error during Operation timed out (60)
"""
)
result = MssqlEngineSpec.extract_errors(
Exception(msg), context={"port": 12345, "hostname": "93.184.216.34"}
)
assert result == [
RabbitaiError(
error_type=RabbitaiErrorType.CONNECTION_HOST_DOWN_ERROR,
message=(
'The host "93.184.216.34" might be down, '
"and can't be reached on port 12345."
),
level=ErrorLevel.ERROR,
extra={
"engine_name": "Microsoft SQL",
"issue_codes": [
{
"code": 1009,
"message": "Issue 1009 - The host might be down, and can't be reached on the provided port.",
}
],
},
)
]
msg = dedent(
"""
DB-Lib error message 20018, severity 14:
General SQL Server error: Check messages from the SQL Server
DB-Lib error message 20002, severity 9:
Adaptive Server connection failed (mssqldb.cxiotftzsypc.us-west-2.rds.amazonaws.com)
DB-Lib error message 20002, severity 9:
Adaptive Server connection failed (mssqldb.cxiotftzsypc.us-west-2.rds.amazonaws.com)
"""
)
result = MssqlEngineSpec.extract_errors(
Exception(msg), context={"username": "testuser", "database": "testdb"}
)
assert result == [
RabbitaiError(
message='Either the username "testuser", password, or database name "testdb" is incorrect.',
error_type=RabbitaiErrorType.CONNECTION_ACCESS_DENIED_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Microsoft SQL",
"issue_codes": [
{
"code": 1014,
"message": "Issue 1014 - Either the username or "
"the password is wrong.",
},
{
"code": 1015,
"message": "Issue 1015 - Either the database is "
"spelled incorrectly or does not exist.",
},
],
},
)
]
| 39.738983 | 121 | 0.565981 | import unittest.mock as mock
from textwrap import dedent
from sqlalchemy import column, table
from sqlalchemy.dialects import mssql
from sqlalchemy.dialects.mssql import DATE, NTEXT, NVARCHAR, TEXT, VARCHAR
from sqlalchemy.sql import select
from sqlalchemy.types import String, UnicodeText
from rabbitai.db_engine_specs.base import BaseEngineSpec
from rabbitai.db_engine_specs.mssql import MssqlEngineSpec
from rabbitai.errors import ErrorLevel, RabbitaiError, RabbitaiErrorType
from rabbitai.utils.core import GenericDataType
from tests.db_engine_specs.base_tests import TestDbEngineSpec
class TestMssqlEngineSpec(TestDbEngineSpec):
def test_mssql_column_types(self):
def assert_type(type_string, type_expected, generic_type_expected):
if type_expected is None:
type_assigned = MssqlEngineSpec.get_sqla_column_type(type_string)
self.assertIsNone(type_assigned)
else:
column_spec = MssqlEngineSpec.get_column_spec(type_string)
if column_spec != None:
self.assertIsInstance(column_spec.sqla_type, type_expected)
self.assertEquals(column_spec.generic_type, generic_type_expected)
assert_type("STRING", String, GenericDataType.STRING)
assert_type("CHAR(10)", String, GenericDataType.STRING)
assert_type("VARCHAR(10)", String, GenericDataType.STRING)
assert_type("TEXT", String, GenericDataType.STRING)
assert_type("NCHAR(10)", UnicodeText, GenericDataType.STRING)
assert_type("NVARCHAR(10)", UnicodeText, GenericDataType.STRING)
assert_type("NTEXT", UnicodeText, GenericDataType.STRING)
def test_where_clause_n_prefix(self):
dialect = mssql.dialect()
spec = MssqlEngineSpec
type_, _ = spec.get_sqla_column_type("VARCHAR(10)")
str_col = column("col", type_=type_)
type_, _ = spec.get_sqla_column_type("NTEXT")
unicode_col = column("unicode_col", type_=type_)
tbl = table("tbl")
sel = (
select([str_col, unicode_col])
.select_from(tbl)
.where(str_col == "abc")
.where(unicode_col == "abc")
)
query = str(
sel.compile(dialect=dialect, compile_kwargs={"literal_binds": True})
)
query_expected = (
"SELECT col, unicode_col \n"
"FROM tbl \n"
"WHERE col = 'abc' AND unicode_col = N'abc'"
)
self.assertEqual(query, query_expected)
def test_time_exp_mixd_case_col_1y(self):
col = column("MixedCase")
expr = MssqlEngineSpec.get_timestamp_expr(col, None, "P1Y")
result = str(expr.compile(None, dialect=mssql.dialect()))
self.assertEqual(result, "DATEADD(year, DATEDIFF(year, 0, [MixedCase]), 0)")
def test_convert_dttm(self):
dttm = self.get_dttm()
test_cases = (
(
MssqlEngineSpec.convert_dttm("DATE", dttm),
"CONVERT(DATE, '2019-01-02', 23)",
),
(
MssqlEngineSpec.convert_dttm("DATETIME", dttm),
"CONVERT(DATETIME, '2019-01-02T03:04:05.678', 126)",
),
(
MssqlEngineSpec.convert_dttm("SMALLDATETIME", dttm),
"CONVERT(SMALLDATETIME, '2019-01-02 03:04:05', 20)",
),
)
for actual, expected in test_cases:
self.assertEqual(actual, expected)
def test_extract_error_message(self):
test_mssql_exception = Exception(
"(8155, b\"No column name was specified for column 1 of 'inner_qry'."
"DB-Lib error message 20018, severity 16:\\nGeneral SQL Server error: "
'Check messages from the SQL Server\\n")'
)
error_message = MssqlEngineSpec.extract_error_message(test_mssql_exception)
expected_message = (
"mssql error: All your SQL functions need to "
"have an alias on MSSQL. For example: SELECT COUNT(*) AS C1 FROM TABLE1"
)
self.assertEqual(expected_message, error_message)
test_mssql_exception = Exception(
'(8200, b"A correlated expression is invalid because it is not in a '
"GROUP BY clause.\\n\")'"
)
error_message = MssqlEngineSpec.extract_error_message(test_mssql_exception)
expected_message = "mssql error: " + MssqlEngineSpec._extract_error_message(
test_mssql_exception
)
self.assertEqual(expected_message, error_message)
@mock.patch.object(
MssqlEngineSpec, "pyodbc_rows_to_tuples", return_value="converted"
)
def test_fetch_data(self, mock_pyodbc_rows_to_tuples):
data = [(1, "foo")]
with mock.patch.object(
BaseEngineSpec, "fetch_data", return_value=data
) as mock_fetch:
result = MssqlEngineSpec.fetch_data(None, 0)
mock_pyodbc_rows_to_tuples.assert_called_once_with(data)
self.assertEqual(result, "converted")
def test_column_datatype_to_string(self):
test_cases = (
(DATE(), "DATE"),
(VARCHAR(length=255), "VARCHAR(255)"),
(VARCHAR(length=255, collation="utf8_general_ci"), "VARCHAR(255)"),
(NVARCHAR(length=128), "NVARCHAR(128)"),
(TEXT(), "TEXT"),
(NTEXT(collation="utf8_general_ci"), "NTEXT"),
)
for original, expected in test_cases:
actual = MssqlEngineSpec.column_datatype_to_string(
original, mssql.dialect()
)
self.assertEqual(actual, expected)
def test_extract_errors(self):
msg = dedent(
"""
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (locahost)
"""
)
result = MssqlEngineSpec.extract_errors(Exception(msg))
assert result == [
RabbitaiError(
error_type=RabbitaiErrorType.CONNECTION_INVALID_HOSTNAME_ERROR,
message='The hostname "locahost" cannot be resolved.',
level=ErrorLevel.ERROR,
extra={
"engine_name": "Microsoft SQL",
"issue_codes": [
{
"code": 1007,
"message": "Issue 1007 - The hostname provided can't be resolved.",
}
],
},
)
]
msg = dedent(
"""
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (localhost)
Net-Lib error during Connection refused (61)
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (localhost)
Net-Lib error during Connection refused (61)
"""
)
result = MssqlEngineSpec.extract_errors(
Exception(msg), context={"port": 12345, "hostname": "localhost"}
)
assert result == [
RabbitaiError(
error_type=RabbitaiErrorType.CONNECTION_PORT_CLOSED_ERROR,
message='Port 12345 on hostname "localhost" refused the connection.',
level=ErrorLevel.ERROR,
extra={
"engine_name": "Microsoft SQL",
"issue_codes": [
{"code": 1008, "message": "Issue 1008 - The port is closed."}
],
},
)
]
msg = dedent(
"""
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (example.com)
Net-Lib error during Operation timed out (60)
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (example.com)
Net-Lib error during Operation timed out (60)
"""
)
result = MssqlEngineSpec.extract_errors(
Exception(msg), context={"port": 12345, "hostname": "example.com"}
)
assert result == [
RabbitaiError(
error_type=RabbitaiErrorType.CONNECTION_HOST_DOWN_ERROR,
message=(
'The host "example.com" might be down, '
"and can't be reached on port 12345."
),
level=ErrorLevel.ERROR,
extra={
"engine_name": "Microsoft SQL",
"issue_codes": [
{
"code": 1009,
"message": "Issue 1009 - The host might be down, and can't be reached on the provided port.",
}
],
},
)
]
msg = dedent(
"""
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (93.184.216.34)
Net-Lib error during Operation timed out (60)
DB-Lib error message 20009, severity 9:
Unable to connect: Adaptive Server is unavailable or does not exist (93.184.216.34)
Net-Lib error during Operation timed out (60)
"""
)
result = MssqlEngineSpec.extract_errors(
Exception(msg), context={"port": 12345, "hostname": "93.184.216.34"}
)
assert result == [
RabbitaiError(
error_type=RabbitaiErrorType.CONNECTION_HOST_DOWN_ERROR,
message=(
'The host "93.184.216.34" might be down, '
"and can't be reached on port 12345."
),
level=ErrorLevel.ERROR,
extra={
"engine_name": "Microsoft SQL",
"issue_codes": [
{
"code": 1009,
"message": "Issue 1009 - The host might be down, and can't be reached on the provided port.",
}
],
},
)
]
msg = dedent(
"""
DB-Lib error message 20018, severity 14:
General SQL Server error: Check messages from the SQL Server
DB-Lib error message 20002, severity 9:
Adaptive Server connection failed (mssqldb.cxiotftzsypc.us-west-2.rds.amazonaws.com)
DB-Lib error message 20002, severity 9:
Adaptive Server connection failed (mssqldb.cxiotftzsypc.us-west-2.rds.amazonaws.com)
"""
)
result = MssqlEngineSpec.extract_errors(
Exception(msg), context={"username": "testuser", "database": "testdb"}
)
assert result == [
RabbitaiError(
message='Either the username "testuser", password, or database name "testdb" is incorrect.',
error_type=RabbitaiErrorType.CONNECTION_ACCESS_DENIED_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Microsoft SQL",
"issue_codes": [
{
"code": 1014,
"message": "Issue 1014 - Either the username or "
"the password is wrong.",
},
{
"code": 1015,
"message": "Issue 1015 - Either the database is "
"spelled incorrectly or does not exist.",
},
],
},
)
]
| true | true |
1c2c533fd377e36d4c3a4e909727f095973d5a8e | 12,981 | py | Python | nuitka/nodes/AttributeNodes.py | ronnymajani/Nuitka | 0083a931e0bd085e4ac9991074b3b8bc05be52b1 | [
"Apache-2.0"
] | null | null | null | nuitka/nodes/AttributeNodes.py | ronnymajani/Nuitka | 0083a931e0bd085e4ac9991074b3b8bc05be52b1 | [
"Apache-2.0"
] | null | null | null | nuitka/nodes/AttributeNodes.py | ronnymajani/Nuitka | 0083a931e0bd085e4ac9991074b3b8bc05be52b1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Attribute nodes
Knowing attributes of an object is very important, esp. when it comes to 'self'
and objects and classes.
There will be a methods "computeExpression*Attribute" to aid predicting them,
with many variants for setting, deleting, and accessing. Also there is some
complication in the form of special lookups, that won't go through the normal
path, but just check slots.
Due to ``getattr`` and ``setattr`` built-ins, there is also a different in the
computations for objects and for compile time known strings. This reflects what
CPython also does with "tp_getattr" and "tp_getattro".
These nodes are therefore mostly delegating the work to expressions they
work on, and let them decide and do the heavy lifting of optimization
and annotation is happening in the nodes that implement these compute slots.
"""
from .ExpressionBases import (
ExpressionChildHavingBase,
ExpressionChildrenHavingBase,
)
from .NodeBases import StatementChildHavingBase, StatementChildrenHavingBase
from .NodeMakingHelpers import wrapExpressionWithNodeSideEffects
class StatementAssignmentAttribute(StatementChildrenHavingBase):
"""Assignment to an attribute.
Typically from code like: source.attribute_name = expression
Both source and expression may be complex expressions, the source
is evaluated first. Assigning to an attribute has its on slot on
the source, which gets to decide if it knows it will work or not,
and what value it will be.
"""
__slots__ = ("attribute_name",)
kind = "STATEMENT_ASSIGNMENT_ATTRIBUTE"
named_children = ("source", "expression")
def __init__(self, expression, attribute_name, source, source_ref):
StatementChildrenHavingBase.__init__(
self,
values={"expression": expression, "source": source},
source_ref=source_ref,
)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def getAttributeName(self):
return self.attribute_name
def computeStatement(self, trace_collection):
result, change_tags, change_desc = self.computeStatementSubExpressions(
trace_collection=trace_collection
)
if result is not self:
return result, change_tags, change_desc
return self.subnode_expression.computeExpressionSetAttribute(
set_node=self,
attribute_name=self.attribute_name,
value_node=self.subnode_source,
trace_collection=trace_collection,
)
@staticmethod
def getStatementNiceName():
return "attribute assignment statement"
class StatementDelAttribute(StatementChildHavingBase):
"""Deletion of an attribute.
Typically from code like: del source.attribute_name
The source may be complex expression. Deleting an attribute has its on
slot on the source, which gets to decide if it knows it will work or
not, and what value it will be.
"""
kind = "STATEMENT_DEL_ATTRIBUTE"
named_child = "expression"
__slots__ = ("attribute_name",)
def __init__(self, expression, attribute_name, source_ref):
StatementChildHavingBase.__init__(self, value=expression, source_ref=source_ref)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def getAttributeName(self):
return self.attribute_name
def computeStatement(self, trace_collection):
result, change_tags, change_desc = self.computeStatementSubExpressions(
trace_collection=trace_collection
)
if result is not self:
return result, change_tags, change_desc
return self.subnode_expression.computeExpressionDelAttribute(
set_node=self,
attribute_name=self.attribute_name,
trace_collection=trace_collection,
)
@staticmethod
def getStatementNiceName():
return "attribute del statement"
class ExpressionAttributeLookup(ExpressionChildHavingBase):
"""Looking up an attribute of an object.
Typically code like: source.attribute_name
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP"
named_child = "expression"
__slots__ = ("attribute_name",)
def __init__(self, expression, attribute_name, source_ref):
ExpressionChildHavingBase.__init__(
self, value=expression, source_ref=source_ref
)
self.attribute_name = attribute_name
def getAttributeName(self):
return self.attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def computeExpression(self, trace_collection):
return self.subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name=self.attribute_name,
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name=self.attribute_name
)
@staticmethod
def isKnownToBeIterable(count):
# TODO: Could be known. We would need for computeExpressionAttribute to
# either return a new node, or a decision maker.
return None
class ExpressionAttributeLookupSpecial(ExpressionAttributeLookup):
"""Special lookup up an attribute of an object.
Typically from code like this: with source: pass
These directly go to slots, and are performed for with statements
of Python2.7 or higher.
"""
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_SPECIAL"
def computeExpression(self, trace_collection):
return self.subnode_expression.computeExpressionAttributeSpecial(
lookup_node=self,
attribute_name=self.attribute_name,
trace_collection=trace_collection,
)
class ExpressionBuiltinGetattr(ExpressionChildrenHavingBase):
"""Built-in "getattr".
Typical code like this: getattr(object_arg, name, default)
The default is optional, but computed before the lookup is done.
"""
kind = "EXPRESSION_BUILTIN_GETATTR"
named_children = ("expression", "name", "default")
def __init__(self, expression, name, default, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "name": name, "default": default},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
trace_collection.onExceptionRaiseExit(BaseException)
default = self.subnode_default
if default is None or not default.mayHaveSideEffects():
attribute = self.subnode_name
attribute_name = attribute.getStringValue()
if attribute_name is not None:
source = self.subnode_expression
if source.isKnownToHaveAttribute(attribute_name):
# If source has side effects, they must be evaluated, before
# the lookup, meaning, a temporary variable should be assigned.
# For now, we give up in this case.
side_effects = source.extractSideEffects()
if not side_effects:
result = ExpressionAttributeLookup(
expression=source,
attribute_name=attribute_name,
source_ref=self.source_ref,
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=attribute
)
return (
result,
"new_expression",
"""Replaced call to built-in 'getattr' with constant \
attribute '%s' to mere attribute lookup"""
% attribute_name,
)
return self, None, None
class ExpressionBuiltinSetattr(ExpressionChildrenHavingBase):
"""Built-in "setattr".
Typical code like this: setattr(source, attribute, value)
"""
kind = "EXPRESSION_BUILTIN_SETATTR"
named_children = ("expression", "attribute", "value")
def __init__(self, expression, name, value, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "attribute": name, "value": value},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
trace_collection.onExceptionRaiseExit(BaseException)
# Note: Might be possible to predict or downgrade to mere attribute set.
return self, None, None
class ExpressionBuiltinHasattr(ExpressionChildrenHavingBase):
kind = "EXPRESSION_BUILTIN_HASATTR"
named_children = ("expression", "attribute")
def __init__(self, expression, name, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "attribute": name},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
# We do at least for compile time constants optimization here, but more
# could be done, were we to know shapes.
source = self.subnode_expression
if source.isCompileTimeConstant():
attribute = self.subnode_attribute
attribute_name = attribute.getStringValue()
if attribute_name is not None:
# If source or attribute have side effects, they must be
# evaluated, before the lookup.
(
result,
tags,
change_desc,
) = trace_collection.getCompileTimeComputationResult(
node=self,
computation=lambda: hasattr(
source.getCompileTimeConstant(), attribute_name
),
description="Call to 'hasattr' pre-computed.",
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=attribute
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=source
)
return result, tags, change_desc
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
class ExpressionAttributeCheck(ExpressionChildHavingBase):
kind = "EXPRESSION_ATTRIBUTE_CHECK"
named_child = "expression"
__slots__ = ("attribute_name",)
def __init__(self, expression, attribute_name, source_ref):
ExpressionChildHavingBase.__init__(
self, value=expression, source_ref=source_ref
)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def computeExpression(self, trace_collection):
# We do at least for compile time constants optimization here, but more
# could be done, were we to know shapes.
source = self.subnode_expression
if source.isCompileTimeConstant():
(
result,
tags,
change_desc,
) = trace_collection.getCompileTimeComputationResult(
node=self,
computation=lambda: hasattr(
source.getCompileTimeConstant(), self.attribute_name
),
description="Attribute check has been pre-computed.",
)
# If source has has side effects, they must be evaluated.
result = wrapExpressionWithNodeSideEffects(new_node=result, old_node=source)
return result, tags, change_desc
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
@staticmethod
def mayRaiseException(exception_type):
return False
def getAttributeName(self):
return self.attribute_name
| 33.199488 | 88 | 0.657191 |
from .ExpressionBases import (
ExpressionChildHavingBase,
ExpressionChildrenHavingBase,
)
from .NodeBases import StatementChildHavingBase, StatementChildrenHavingBase
from .NodeMakingHelpers import wrapExpressionWithNodeSideEffects
class StatementAssignmentAttribute(StatementChildrenHavingBase):
__slots__ = ("attribute_name",)
kind = "STATEMENT_ASSIGNMENT_ATTRIBUTE"
named_children = ("source", "expression")
def __init__(self, expression, attribute_name, source, source_ref):
StatementChildrenHavingBase.__init__(
self,
values={"expression": expression, "source": source},
source_ref=source_ref,
)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def getAttributeName(self):
return self.attribute_name
def computeStatement(self, trace_collection):
result, change_tags, change_desc = self.computeStatementSubExpressions(
trace_collection=trace_collection
)
if result is not self:
return result, change_tags, change_desc
return self.subnode_expression.computeExpressionSetAttribute(
set_node=self,
attribute_name=self.attribute_name,
value_node=self.subnode_source,
trace_collection=trace_collection,
)
@staticmethod
def getStatementNiceName():
return "attribute assignment statement"
class StatementDelAttribute(StatementChildHavingBase):
kind = "STATEMENT_DEL_ATTRIBUTE"
named_child = "expression"
__slots__ = ("attribute_name",)
def __init__(self, expression, attribute_name, source_ref):
StatementChildHavingBase.__init__(self, value=expression, source_ref=source_ref)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def getAttributeName(self):
return self.attribute_name
def computeStatement(self, trace_collection):
result, change_tags, change_desc = self.computeStatementSubExpressions(
trace_collection=trace_collection
)
if result is not self:
return result, change_tags, change_desc
return self.subnode_expression.computeExpressionDelAttribute(
set_node=self,
attribute_name=self.attribute_name,
trace_collection=trace_collection,
)
@staticmethod
def getStatementNiceName():
return "attribute del statement"
class ExpressionAttributeLookup(ExpressionChildHavingBase):
kind = "EXPRESSION_ATTRIBUTE_LOOKUP"
named_child = "expression"
__slots__ = ("attribute_name",)
def __init__(self, expression, attribute_name, source_ref):
ExpressionChildHavingBase.__init__(
self, value=expression, source_ref=source_ref
)
self.attribute_name = attribute_name
def getAttributeName(self):
return self.attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def computeExpression(self, trace_collection):
return self.subnode_expression.computeExpressionAttribute(
lookup_node=self,
attribute_name=self.attribute_name,
trace_collection=trace_collection,
)
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseExceptionAttributeLookup(
exception_type=exception_type, attribute_name=self.attribute_name
)
@staticmethod
def isKnownToBeIterable(count):
return None
class ExpressionAttributeLookupSpecial(ExpressionAttributeLookup):
kind = "EXPRESSION_ATTRIBUTE_LOOKUP_SPECIAL"
def computeExpression(self, trace_collection):
return self.subnode_expression.computeExpressionAttributeSpecial(
lookup_node=self,
attribute_name=self.attribute_name,
trace_collection=trace_collection,
)
class ExpressionBuiltinGetattr(ExpressionChildrenHavingBase):
kind = "EXPRESSION_BUILTIN_GETATTR"
named_children = ("expression", "name", "default")
def __init__(self, expression, name, default, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "name": name, "default": default},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
trace_collection.onExceptionRaiseExit(BaseException)
default = self.subnode_default
if default is None or not default.mayHaveSideEffects():
attribute = self.subnode_name
attribute_name = attribute.getStringValue()
if attribute_name is not None:
source = self.subnode_expression
if source.isKnownToHaveAttribute(attribute_name):
side_effects = source.extractSideEffects()
if not side_effects:
result = ExpressionAttributeLookup(
expression=source,
attribute_name=attribute_name,
source_ref=self.source_ref,
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=attribute
)
return (
result,
"new_expression",
"""Replaced call to built-in 'getattr' with constant \
attribute '%s' to mere attribute lookup"""
% attribute_name,
)
return self, None, None
class ExpressionBuiltinSetattr(ExpressionChildrenHavingBase):
kind = "EXPRESSION_BUILTIN_SETATTR"
named_children = ("expression", "attribute", "value")
def __init__(self, expression, name, value, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "attribute": name, "value": value},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
class ExpressionBuiltinHasattr(ExpressionChildrenHavingBase):
kind = "EXPRESSION_BUILTIN_HASATTR"
named_children = ("expression", "attribute")
def __init__(self, expression, name, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values={"expression": expression, "attribute": name},
source_ref=source_ref,
)
def computeExpression(self, trace_collection):
source = self.subnode_expression
if source.isCompileTimeConstant():
attribute = self.subnode_attribute
attribute_name = attribute.getStringValue()
if attribute_name is not None:
(
result,
tags,
change_desc,
) = trace_collection.getCompileTimeComputationResult(
node=self,
computation=lambda: hasattr(
source.getCompileTimeConstant(), attribute_name
),
description="Call to 'hasattr' pre-computed.",
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=attribute
)
result = wrapExpressionWithNodeSideEffects(
new_node=result, old_node=source
)
return result, tags, change_desc
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
class ExpressionAttributeCheck(ExpressionChildHavingBase):
kind = "EXPRESSION_ATTRIBUTE_CHECK"
named_child = "expression"
__slots__ = ("attribute_name",)
def __init__(self, expression, attribute_name, source_ref):
ExpressionChildHavingBase.__init__(
self, value=expression, source_ref=source_ref
)
self.attribute_name = attribute_name
def getDetails(self):
return {"attribute_name": self.attribute_name}
def computeExpression(self, trace_collection):
source = self.subnode_expression
if source.isCompileTimeConstant():
(
result,
tags,
change_desc,
) = trace_collection.getCompileTimeComputationResult(
node=self,
computation=lambda: hasattr(
source.getCompileTimeConstant(), self.attribute_name
),
description="Attribute check has been pre-computed.",
)
result = wrapExpressionWithNodeSideEffects(new_node=result, old_node=source)
return result, tags, change_desc
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
@staticmethod
def mayRaiseException(exception_type):
return False
def getAttributeName(self):
return self.attribute_name
| true | true |
1c2c5435cf1ab2a2633586653446fc7f3f8dc9f4 | 2,289 | py | Python | rusentrel/classic/ctx/att_hidden_z_yang.py | nicolay-r/attitude-extraction-with-attention-and-ds | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | [
"MIT"
] | null | null | null | rusentrel/classic/ctx/att_hidden_z_yang.py | nicolay-r/attitude-extraction-with-attention-and-ds | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | [
"MIT"
] | 1 | 2020-12-16T18:21:11.000Z | 2020-12-30T10:08:27.000Z | rusentrel/classic/ctx/att_hidden_z_yang.py | nicolay-r/attitude-extraction-with-attention-and-ds | fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d | [
"MIT"
] | 1 | 2021-03-29T20:58:26.000Z | 2021-03-29T20:58:26.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import tensorflow as tf
sys.path.append('../../../')
from io_utils import RuSentRelBasedExperimentsIOUtils
from arekit.contrib.networks.context.architectures.att_self_z_yang_bilstm import AttentionSelfZYangBiLSTM
from arekit.contrib.networks.context.configurations.att_self_z_yang_bilstm import AttentionSelfZYangBiLSTMConfig
from arekit.common.evaluation.evaluators.two_class import TwoClassEvaluator
from arekit.contrib.experiments.nn_io.rusentrel import RuSentRelBasedNeuralNetworkIO
from arekit.contrib.experiments.single.model import SingleInstanceTensorflowModel
from rusentrel.ctx_names import ModelNames
from arekit.contrib.experiments.engine import run_testing
from arekit.contrib.experiments.callback import CustomCallback
from rusentrel.classic.common import \
classic_ctx_common_config_settings, \
classic_common_callback_modification_func
def ctx_att_hidden_zyang_bilstm_custom_config(config):
assert(isinstance(config, AttentionSelfZYangBiLSTMConfig))
config.modify_bags_per_minibatch(2)
config.modify_weight_initializer(tf.contrib.layers.xavier_initializer())
def run_testing_att_hidden_zyang_bilstm(name_prefix=u'',
cv_count=1,
custom_config_func=ctx_att_hidden_zyang_bilstm_custom_config,
custom_callback_func=classic_common_callback_modification_func):
run_testing(full_model_name=name_prefix + ModelNames().AttSelfZYangBiLSTM,
create_network=AttentionSelfZYangBiLSTM,
create_config=AttentionSelfZYangBiLSTMConfig,
create_nn_io=RuSentRelBasedNeuralNetworkIO,
cv_count=cv_count,
create_model=SingleInstanceTensorflowModel,
evaluator_class=TwoClassEvaluator,
create_callback=CustomCallback,
experiments_io=RuSentRelBasedExperimentsIOUtils(),
common_callback_modification_func=custom_callback_func,
custom_config_modification_func=custom_config_func,
common_config_modification_func=classic_ctx_common_config_settings)
if __name__ == "__main__":
run_testing_att_hidden_zyang_bilstm()
| 43.188679 | 112 | 0.757536 |
import sys
import tensorflow as tf
sys.path.append('../../../')
from io_utils import RuSentRelBasedExperimentsIOUtils
from arekit.contrib.networks.context.architectures.att_self_z_yang_bilstm import AttentionSelfZYangBiLSTM
from arekit.contrib.networks.context.configurations.att_self_z_yang_bilstm import AttentionSelfZYangBiLSTMConfig
from arekit.common.evaluation.evaluators.two_class import TwoClassEvaluator
from arekit.contrib.experiments.nn_io.rusentrel import RuSentRelBasedNeuralNetworkIO
from arekit.contrib.experiments.single.model import SingleInstanceTensorflowModel
from rusentrel.ctx_names import ModelNames
from arekit.contrib.experiments.engine import run_testing
from arekit.contrib.experiments.callback import CustomCallback
from rusentrel.classic.common import \
classic_ctx_common_config_settings, \
classic_common_callback_modification_func
def ctx_att_hidden_zyang_bilstm_custom_config(config):
assert(isinstance(config, AttentionSelfZYangBiLSTMConfig))
config.modify_bags_per_minibatch(2)
config.modify_weight_initializer(tf.contrib.layers.xavier_initializer())
def run_testing_att_hidden_zyang_bilstm(name_prefix=u'',
cv_count=1,
custom_config_func=ctx_att_hidden_zyang_bilstm_custom_config,
custom_callback_func=classic_common_callback_modification_func):
run_testing(full_model_name=name_prefix + ModelNames().AttSelfZYangBiLSTM,
create_network=AttentionSelfZYangBiLSTM,
create_config=AttentionSelfZYangBiLSTMConfig,
create_nn_io=RuSentRelBasedNeuralNetworkIO,
cv_count=cv_count,
create_model=SingleInstanceTensorflowModel,
evaluator_class=TwoClassEvaluator,
create_callback=CustomCallback,
experiments_io=RuSentRelBasedExperimentsIOUtils(),
common_callback_modification_func=custom_callback_func,
custom_config_modification_func=custom_config_func,
common_config_modification_func=classic_ctx_common_config_settings)
if __name__ == "__main__":
run_testing_att_hidden_zyang_bilstm()
| true | true |
1c2c546cd2d3e521fb82afe1504f6f706a98dc6b | 59,591 | py | Python | tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py | thrau/moto | 125aeea70ae8dad9f21545e34ae6214f696a8eb2 | [
"Apache-2.0"
] | null | null | null | tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py | thrau/moto | 125aeea70ae8dad9f21545e34ae6214f696a8eb2 | [
"Apache-2.0"
] | 1 | 2021-09-13T04:39:03.000Z | 2021-09-13T04:39:03.000Z | tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py | thrau/moto | 125aeea70ae8dad9f21545e34ae6214f696a8eb2 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import json
from collections import OrderedDict
from datetime import datetime, timedelta
import pytz
import boto3
from botocore.exceptions import ClientError, ValidationError
import sure # noqa
import pytest
from moto import mock_cloudformation, mock_dynamodb2, mock_s3, mock_sqs, mock_ec2
from moto.core import ACCOUNT_ID
from .test_cloudformation_stack_crud import dummy_template_json2, dummy_template_json4
from tests import EXAMPLE_AMI_ID
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {
"EC2Instance1": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "dummy",
"InstanceType": "t2.micro",
"Tags": [
{"Key": "Description", "Value": "Test tag"},
{"Key": "Name", "Value": "Name tag for tests"},
],
},
}
},
}
dummy_template3 = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 3",
"Resources": {
"VPC": {"Properties": {"CidrBlock": "192.168.0.0/16"}, "Type": "AWS::EC2::VPC"}
},
}
dummy_template_yaml = """---
AWSTemplateFormatVersion: 2010-09-09
Description: Stack1 with yaml template
Resources:
EC2Instance1:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-03cf127a
KeyName: dummy
InstanceType: t2.micro
Tags:
- Key: Description
Value: Test tag
- Key: Name
Value: Name tag for tests
"""
dummy_template_yaml_with_short_form_func = """---
AWSTemplateFormatVersion: 2010-09-09
Description: Stack1 with yaml template
Resources:
EC2Instance1:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-03cf127a
KeyName: !Join [ ":", [ du, m, my ] ]
InstanceType: t2.micro
Tags:
- Key: Description
Value: Test tag
- Key: Name
Value: Name tag for tests
"""
dummy_template_yaml_with_ref = """---
AWSTemplateFormatVersion: 2010-09-09
Description: Stack1 with yaml template
Parameters:
TagDescription:
Type: String
TagName:
Type: String
Resources:
EC2Instance1:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-03cf127a
KeyName: dummy
InstanceType: t2.micro
Tags:
- Key: Description
Value:
Ref: TagDescription
- Key: Name
Value: !Ref TagName
"""
dummy_empty_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {},
"Resources": {},
}
dummy_parametrized_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {
"KeyName": {"Description": "A template parameter", "Type": "String"}
},
"Resources": {},
}
dummy_update_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {
"KeyName": {
"Description": "Name of an existing EC2 KeyPair",
"Type": "AWS::EC2::KeyPair::KeyName",
"ConstraintDescription": "must be the name of an existing EC2 KeyPair.",
}
},
"Resources": {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {"ImageId": EXAMPLE_AMI_ID},
}
},
}
dummy_output_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {"ImageId": EXAMPLE_AMI_ID},
}
},
"Outputs": {
"StackVPC": {
"Description": "The ID of the VPC",
"Value": "VPCID",
"Export": {"Name": "My VPC ID"},
}
},
}
dummy_import_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"Queue": {
"Type": "AWS::SQS::Queue",
"Properties": {
"QueueName": {"Fn::ImportValue": "My VPC ID"},
"VisibilityTimeout": 60,
},
}
},
}
dummy_redrive_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"MainQueue": {
"Type": "AWS::SQS::Queue",
"Properties": {
"QueueName": "mainqueue.fifo",
"FifoQueue": True,
"ContentBasedDeduplication": False,
"RedrivePolicy": {
"deadLetterTargetArn": {"Fn::GetAtt": ["DeadLetterQueue", "Arn"]},
"maxReceiveCount": 5,
},
},
},
"DeadLetterQueue": {
"Type": "AWS::SQS::Queue",
"Properties": {"FifoQueue": True},
},
},
}
dummy_template_special_chars_in_description = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1 <env>",
"Resources": {
"EC2Instance1": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "dummy",
"InstanceType": "t2.micro",
"Tags": [
{"Key": "Description", "Value": "Test tag"},
{"Key": "Name", "Value": "Name tag for tests"},
],
},
}
},
}
dummy_template_json = json.dumps(dummy_template)
dummy_template_special_chars_in_description_json = json.dumps(
dummy_template_special_chars_in_description
)
dummy_empty_template_json = json.dumps(dummy_empty_template)
dummy_parametrized_template_json = json.dumps(dummy_parametrized_template)
dummy_update_template_json = json.dumps(dummy_update_template)
dummy_output_template_json = json.dumps(dummy_output_template)
dummy_import_template_json = json.dumps(dummy_import_template)
dummy_redrive_template_json = json.dumps(dummy_redrive_template)
@mock_cloudformation
def test_boto3_describe_stack_instances():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
)
usw2_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-west-2",
)
use1_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-east-1",
)
usw2_instance["StackInstance"].should.have.key("Region").which.should.equal(
"us-west-2"
)
usw2_instance["StackInstance"].should.have.key("Account").which.should.equal(
ACCOUNT_ID
)
use1_instance["StackInstance"].should.have.key("Region").which.should.equal(
"us-east-1"
)
use1_instance["StackInstance"].should.have.key("Account").which.should.equal(
ACCOUNT_ID
)
@mock_cloudformation
def test_boto3_list_stacksets_length():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_set(
StackSetName="test_stack_set2", TemplateBody=dummy_template_yaml
)
stacksets = cf_conn.list_stack_sets()
stacksets.should.have.length_of(2)
@mock_cloudformation
def test_boto3_filter_stacks():
conn = boto3.client("cloudformation", region_name="us-east-1")
conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
conn.create_stack(StackName="test_stack2", TemplateBody=dummy_template_json)
conn.update_stack(StackName="test_stack", TemplateBody=dummy_template_json2)
stacks = conn.list_stacks(StackStatusFilter=["CREATE_COMPLETE"])
stacks.get("StackSummaries").should.have.length_of(1)
stacks = conn.list_stacks(StackStatusFilter=["UPDATE_COMPLETE"])
stacks.get("StackSummaries").should.have.length_of(1)
@mock_cloudformation
def test_boto3_list_stacksets_contents():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
stacksets = cf_conn.list_stack_sets()
stacksets["Summaries"][0].should.have.key("StackSetName").which.should.equal(
"test_stack_set"
)
stacksets["Summaries"][0].should.have.key("Status").which.should.equal("ACTIVE")
@mock_cloudformation
def test_boto3_stop_stack_set_operation():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-1", "us-west-2"],
)
operation_id = cf_conn.list_stack_set_operations(StackSetName="test_stack_set")[
"Summaries"
][-1]["OperationId"]
cf_conn.stop_stack_set_operation(
StackSetName="test_stack_set", OperationId=operation_id
)
list_operation = cf_conn.list_stack_set_operations(StackSetName="test_stack_set")
list_operation["Summaries"][-1]["Status"].should.equal("STOPPED")
@mock_cloudformation
def test_boto3_describe_stack_set_operation():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-1", "us-west-2"],
)
operation_id = cf_conn.list_stack_set_operations(StackSetName="test_stack_set")[
"Summaries"
][-1]["OperationId"]
cf_conn.stop_stack_set_operation(
StackSetName="test_stack_set", OperationId=operation_id
)
response = cf_conn.describe_stack_set_operation(
StackSetName="test_stack_set", OperationId=operation_id
)
response["StackSetOperation"]["Status"].should.equal("STOPPED")
response["StackSetOperation"]["Action"].should.equal("CREATE")
with pytest.raises(ClientError) as exp:
cf_conn.describe_stack_set_operation(
StackSetName="test_stack_set", OperationId="non_existing_operation"
)
exp_err = exp.value.response.get("Error")
exp_metadata = exp.value.response.get("ResponseMetadata")
exp_err.get("Code").should.match(r"ValidationError")
exp_err.get("Message").should.match(
r"Stack with id non_existing_operation does not exist"
)
exp_metadata.get("HTTPStatusCode").should.equal(400)
@mock_cloudformation
def test_boto3_list_stack_set_operation_results():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-1", "us-west-2"],
)
operation_id = cf_conn.list_stack_set_operations(StackSetName="test_stack_set")[
"Summaries"
][-1]["OperationId"]
cf_conn.stop_stack_set_operation(
StackSetName="test_stack_set", OperationId=operation_id
)
response = cf_conn.list_stack_set_operation_results(
StackSetName="test_stack_set", OperationId=operation_id
)
response["Summaries"].should.have.length_of(3)
response["Summaries"][0].should.have.key("Account").which.should.equal(ACCOUNT_ID)
response["Summaries"][1].should.have.key("Status").which.should.equal("STOPPED")
@mock_cloudformation
def test_boto3_update_stack_instances():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
param = [
{"ParameterKey": "SomeParam", "ParameterValue": "StackSetValue"},
{"ParameterKey": "AnotherParam", "ParameterValue": "StackSetValue2"},
]
param_overrides = [
{"ParameterKey": "SomeParam", "ParameterValue": "OverrideValue"},
{"ParameterKey": "AnotherParam", "ParameterValue": "OverrideValue2"},
]
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-1", "us-west-2"],
)
cf_conn.update_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-west-1", "us-west-2"],
ParameterOverrides=param_overrides,
)
usw2_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-west-2",
)
usw1_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-west-1",
)
use1_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-east-1",
)
usw2_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterKey"
].should.equal(param_overrides[0]["ParameterKey"])
usw2_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterValue"
].should.equal(param_overrides[0]["ParameterValue"])
usw2_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterKey"
].should.equal(param_overrides[1]["ParameterKey"])
usw2_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterValue"
].should.equal(param_overrides[1]["ParameterValue"])
usw1_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterKey"
].should.equal(param_overrides[0]["ParameterKey"])
usw1_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterValue"
].should.equal(param_overrides[0]["ParameterValue"])
usw1_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterKey"
].should.equal(param_overrides[1]["ParameterKey"])
usw1_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterValue"
].should.equal(param_overrides[1]["ParameterValue"])
use1_instance["StackInstance"]["ParameterOverrides"].should.be.empty
@mock_cloudformation
def test_boto3_delete_stack_instances():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
)
cf_conn.delete_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1"],
RetainStacks=False,
)
cf_conn.list_stack_instances(StackSetName="test_stack_set")[
"Summaries"
].should.have.length_of(1)
cf_conn.list_stack_instances(StackSetName="test_stack_set")["Summaries"][0][
"Region"
].should.equal("us-west-2")
@mock_cloudformation
def test_boto3_create_stack_instances():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
)
cf_conn.list_stack_instances(StackSetName="test_stack_set")[
"Summaries"
].should.have.length_of(2)
cf_conn.list_stack_instances(StackSetName="test_stack_set")["Summaries"][0][
"Account"
].should.equal(ACCOUNT_ID)
@mock_cloudformation
def test_boto3_create_stack_instances_with_param_overrides():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
param = [
{"ParameterKey": "TagDescription", "ParameterValue": "StackSetValue"},
{"ParameterKey": "TagName", "ParameterValue": "StackSetValue2"},
]
param_overrides = [
{"ParameterKey": "TagDescription", "ParameterValue": "OverrideValue"},
{"ParameterKey": "TagName", "ParameterValue": "OverrideValue2"},
]
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
ParameterOverrides=param_overrides,
)
usw2_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-west-2",
)
usw2_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterKey"
].should.equal(param_overrides[0]["ParameterKey"])
usw2_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterKey"
].should.equal(param_overrides[1]["ParameterKey"])
usw2_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterValue"
].should.equal(param_overrides[0]["ParameterValue"])
usw2_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterValue"
].should.equal(param_overrides[1]["ParameterValue"])
@mock_cloudformation
def test_update_stack_set():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
param = [
{"ParameterKey": "TagDescription", "ParameterValue": "StackSetValue"},
{"ParameterKey": "TagName", "ParameterValue": "StackSetValue2"},
]
param_overrides = [
{"ParameterKey": "TagDescription", "ParameterValue": "OverrideValue"},
{"ParameterKey": "TagName", "ParameterValue": "OverrideValue2"},
]
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param,
)
cf_conn.update_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param_overrides,
)
stackset = cf_conn.describe_stack_set(StackSetName="test_stack_set")
stackset["StackSet"]["Parameters"][0]["ParameterValue"].should.equal(
param_overrides[0]["ParameterValue"]
)
stackset["StackSet"]["Parameters"][1]["ParameterValue"].should.equal(
param_overrides[1]["ParameterValue"]
)
stackset["StackSet"]["Parameters"][0]["ParameterKey"].should.equal(
param_overrides[0]["ParameterKey"]
)
stackset["StackSet"]["Parameters"][1]["ParameterKey"].should.equal(
param_overrides[1]["ParameterKey"]
)
@mock_cloudformation
def test_update_stack_set_with_previous_value():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
param = [
{"ParameterKey": "TagDescription", "ParameterValue": "StackSetValue"},
{"ParameterKey": "TagName", "ParameterValue": "StackSetValue2"},
]
param_overrides = [
{"ParameterKey": "TagDescription", "ParameterValue": "OverrideValue"},
{"ParameterKey": "TagName", "UsePreviousValue": True},
]
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param,
)
cf_conn.update_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param_overrides,
)
stackset = cf_conn.describe_stack_set(StackSetName="test_stack_set")
stackset["StackSet"]["Parameters"][0]["ParameterValue"].should.equal(
param_overrides[0]["ParameterValue"]
)
stackset["StackSet"]["Parameters"][1]["ParameterValue"].should.equal(
param[1]["ParameterValue"]
)
stackset["StackSet"]["Parameters"][0]["ParameterKey"].should.equal(
param_overrides[0]["ParameterKey"]
)
stackset["StackSet"]["Parameters"][1]["ParameterKey"].should.equal(
param_overrides[1]["ParameterKey"]
)
@mock_cloudformation
def test_boto3_list_stack_set_operations():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
)
cf_conn.update_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
)
list_operation = cf_conn.list_stack_set_operations(StackSetName="test_stack_set")
list_operation["Summaries"].should.have.length_of(2)
list_operation["Summaries"][-1]["Action"].should.equal("UPDATE")
@mock_cloudformation
def test_boto3_bad_list_stack_resources():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
with pytest.raises(ClientError):
cf_conn.list_stack_resources(StackName="test_stack_set")
@mock_cloudformation
def test_boto3_delete_stack_set_by_name():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.delete_stack_set(StackSetName="test_stack_set")
cf_conn.describe_stack_set(StackSetName="test_stack_set")["StackSet"][
"Status"
].should.equal("DELETED")
@mock_cloudformation
def test_boto3_delete_stack_set_by_id():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
response = cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
stack_set_id = response["StackSetId"]
cf_conn.delete_stack_set(StackSetName=stack_set_id)
cf_conn.describe_stack_set(StackSetName="test_stack_set")["StackSet"][
"Status"
].should.equal("DELETED")
@mock_cloudformation
def test_boto3_create_stack_set():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
response = cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.describe_stack_set(StackSetName="test_stack_set")["StackSet"][
"TemplateBody"
].should.equal(dummy_template_json)
response["StackSetId"].should_not.be.empty
@mock_cloudformation
def test_boto3_create_stack_set_with_yaml():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_yaml
)
cf_conn.describe_stack_set(StackSetName="test_stack_set")["StackSet"][
"TemplateBody"
].should.equal(dummy_template_yaml)
@mock_cloudformation
@mock_s3
def test_create_stack_set_from_s3_url():
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
s3_conn.create_bucket(Bucket="foobar")
s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json)
key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
)
cf_conn = boto3.client("cloudformation", region_name="us-west-1")
cf_conn.create_stack_set(StackSetName="stack_from_url", TemplateURL=key_url)
cf_conn.describe_stack_set(StackSetName="stack_from_url")["StackSet"][
"TemplateBody"
].should.equal(dummy_template_json)
@mock_cloudformation
def test_boto3_create_stack_set_with_ref_yaml():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
params = [
{"ParameterKey": "TagDescription", "ParameterValue": "desc_ref"},
{"ParameterKey": "TagName", "ParameterValue": "name_ref"},
]
cf_conn.create_stack_set(
StackSetName="test_stack",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=params,
)
cf_conn.describe_stack_set(StackSetName="test_stack")["StackSet"][
"TemplateBody"
].should.equal(dummy_template_yaml_with_ref)
@mock_cloudformation
def test_boto3_describe_stack_set_params():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
params = [
{"ParameterKey": "TagDescription", "ParameterValue": "desc_ref"},
{"ParameterKey": "TagName", "ParameterValue": "name_ref"},
]
cf_conn.create_stack_set(
StackSetName="test_stack",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=params,
)
cf_conn.describe_stack_set(StackSetName="test_stack")["StackSet"][
"Parameters"
].should.equal(params)
@mock_cloudformation
def test_boto3_describe_stack_set_by_id():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
response = cf_conn.create_stack_set(
StackSetName="test_stack", TemplateBody=dummy_template_json,
)
stack_set_id = response["StackSetId"]
cf_conn.describe_stack_set(StackSetName=stack_set_id)["StackSet"][
"TemplateBody"
].should.equal(dummy_template_json)
@mock_cloudformation
def test_boto3_create_stack():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
cf_conn.get_template(StackName="test_stack")["TemplateBody"].should.equal(
json.loads(dummy_template_json, object_pairs_hook=OrderedDict)
)
@mock_cloudformation
def test_boto3_create_stack_fail_missing_parameter():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
with pytest.raises(ClientError, match="Missing parameter KeyName"):
cf_conn.create_stack(
StackName="test_stack", TemplateBody=dummy_parametrized_template_json
)
@mock_cloudformation
def test_boto3_create_stack_s3_long_name():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyLongStackName01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012"
template = '{"Resources":{"HelloBucket":{"Type":"AWS::S3::Bucket"}}}'
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
cf_conn.get_template(StackName=stack_name)["TemplateBody"].should.equal(
json.loads(template, object_pairs_hook=OrderedDict)
)
provisioned_resource = cf_conn.list_stack_resources(StackName=stack_name)[
"StackResourceSummaries"
][0]
provisioned_bucket_name = provisioned_resource["PhysicalResourceId"]
len(provisioned_bucket_name).should.be.lower_than(64)
logical_name_lower_case = provisioned_resource["LogicalResourceId"].lower()
bucket_name_stack_name_prefix = provisioned_bucket_name[
: provisioned_bucket_name.index("-" + logical_name_lower_case)
]
stack_name.lower().should.contain(bucket_name_stack_name_prefix)
@mock_cloudformation
def test_boto3_create_stack_with_yaml():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_yaml)
cf_conn.get_template(StackName="test_stack")["TemplateBody"].should.equal(
dummy_template_yaml
)
@mock_cloudformation
def test_boto3_create_stack_with_short_form_func_yaml():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="test_stack", TemplateBody=dummy_template_yaml_with_short_form_func
)
cf_conn.get_template(StackName="test_stack")["TemplateBody"].should.equal(
dummy_template_yaml_with_short_form_func
)
@mock_s3
@mock_cloudformation
def test_get_template_summary():
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
conn = boto3.client("cloudformation", region_name="us-east-1")
result = conn.get_template_summary(TemplateBody=json.dumps(dummy_template3))
result["ResourceTypes"].should.equal(["AWS::EC2::VPC"])
result["Version"].should.equal("2010-09-09")
result["Description"].should.equal("Stack 3")
conn.create_stack(StackName="test_stack", TemplateBody=json.dumps(dummy_template3))
result = conn.get_template_summary(StackName="test_stack")
result["ResourceTypes"].should.equal(["AWS::EC2::VPC"])
result["Version"].should.equal("2010-09-09")
result["Description"].should.equal("Stack 3")
s3_conn.create_bucket(Bucket="foobar")
s3_conn.Object("foobar", "template-key").put(Body=json.dumps(dummy_template3))
key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
)
conn.create_stack(StackName="stack_from_url", TemplateURL=key_url)
result = conn.get_template_summary(TemplateURL=key_url)
result["ResourceTypes"].should.equal(["AWS::EC2::VPC"])
result["Version"].should.equal("2010-09-09")
result["Description"].should.equal("Stack 3")
conn = boto3.client("cloudformation", region_name="us-east-1")
result = conn.get_template_summary(TemplateBody=dummy_template_yaml)
result["ResourceTypes"].should.equal(["AWS::EC2::Instance"])
result["Version"].should.equal("2010-09-09")
result["Description"].should.equal("Stack1 with yaml template")
@mock_cloudformation
def test_boto3_create_stack_with_ref_yaml():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
params = [
{"ParameterKey": "TagDescription", "ParameterValue": "desc_ref"},
{"ParameterKey": "TagName", "ParameterValue": "name_ref"},
]
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=params,
)
cf_conn.get_template(StackName="test_stack")["TemplateBody"].should.equal(
dummy_template_yaml_with_ref
)
@mock_cloudformation
def test_creating_stacks_across_regions():
west1_cf = boto3.resource("cloudformation", region_name="us-west-1")
west2_cf = boto3.resource("cloudformation", region_name="us-west-2")
west1_cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
west2_cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
list(west1_cf.stacks.all()).should.have.length_of(1)
list(west2_cf.stacks.all()).should.have.length_of(1)
@mock_cloudformation
def test_create_stack_with_notification_arn():
cf = boto3.resource("cloudformation", region_name="us-east-1")
cf.create_stack(
StackName="test_stack_with_notifications",
TemplateBody=dummy_template_json,
NotificationARNs=["arn:aws:sns:us-east-1:{}:fake-queue".format(ACCOUNT_ID)],
)
stack = list(cf.stacks.all())[0]
stack.notification_arns.should.contain(
"arn:aws:sns:us-east-1:{}:fake-queue".format(ACCOUNT_ID)
)
@mock_cloudformation
def test_create_stack_with_role_arn():
cf = boto3.resource("cloudformation", region_name="us-east-1")
cf.create_stack(
StackName="test_stack_with_notifications",
TemplateBody=dummy_template_json,
RoleARN="arn:aws:iam::{}:role/moto".format(ACCOUNT_ID),
)
stack = list(cf.stacks.all())[0]
stack.role_arn.should.equal("arn:aws:iam::{}:role/moto".format(ACCOUNT_ID))
@mock_cloudformation
@mock_s3
def test_create_stack_from_s3_url():
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
s3_conn.create_bucket(Bucket="foobar")
s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json)
key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
)
cf_conn = boto3.client("cloudformation", region_name="us-west-1")
cf_conn.create_stack(StackName="stack_from_url", TemplateURL=key_url)
cf_conn.get_template(StackName="stack_from_url")["TemplateBody"].should.equal(
json.loads(dummy_template_json, object_pairs_hook=OrderedDict)
)
@mock_cloudformation
def test_boto3_update_stack_fail_missing_new_parameter():
name = "update_stack_fail_missing_new_parameter"
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName=name, TemplateBody=dummy_empty_template_json)
with pytest.raises(ClientError, match="Missing parameter KeyName"):
cf_conn.update_stack(
StackName=name, TemplateBody=dummy_parametrized_template_json
)
@mock_cloudformation
def test_boto3_update_stack_deleted_resources_can_reference_deleted_parameters():
name = "update_stack_deleted_resources_can_reference_deleted_parameters"
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
template_json = json.dumps(
{
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {"TimeoutParameter": {"Default": 61, "Type": "String"}},
"Resources": {
"Queue": {
"Type": "AWS::SQS::Queue",
"Properties": {"VisibilityTimeout": {"Ref": "TimeoutParameter"}},
}
},
}
)
cf_conn.create_stack(StackName=name, TemplateBody=template_json)
response = cf_conn.describe_stack_resources(StackName=name)
len(response["StackResources"]).should.equal(1)
cf_conn.update_stack(StackName=name, TemplateBody=dummy_empty_template_json)
response = cf_conn.describe_stack_resources(StackName=name)
len(response["StackResources"]).should.equal(0)
@mock_cloudformation
def test_boto3_update_stack_deleted_resources_can_reference_deleted_resources():
name = "update_stack_deleted_resources_can_reference_deleted_resources"
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
template_json = json.dumps(
{
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {"TimeoutParameter": {"Default": 61, "Type": "String"}},
"Resources": {
"VPC": {
"Type": "AWS::EC2::VPC",
"Properties": {"CidrBlock": "10.0.0.0/16"},
},
"Subnet": {
"Type": "AWS::EC2::Subnet",
"Properties": {"VpcId": {"Ref": "VPC"}, "CidrBlock": "10.0.0.0/24"},
},
},
}
)
cf_conn.create_stack(StackName=name, TemplateBody=template_json)
response = cf_conn.describe_stack_resources(StackName=name)
len(response["StackResources"]).should.equal(2)
cf_conn.update_stack(StackName=name, TemplateBody=dummy_empty_template_json)
response = cf_conn.describe_stack_resources(StackName=name)
len(response["StackResources"]).should.equal(0)
@mock_cloudformation
def test_update_stack_with_previous_value():
name = "update_stack_with_previous_value"
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName=name,
TemplateBody=dummy_template_yaml_with_ref,
Parameters=[
{"ParameterKey": "TagName", "ParameterValue": "foo"},
{"ParameterKey": "TagDescription", "ParameterValue": "bar"},
],
)
cf_conn.update_stack(
StackName=name,
UsePreviousTemplate=True,
Parameters=[
{"ParameterKey": "TagName", "UsePreviousValue": True},
{"ParameterKey": "TagDescription", "ParameterValue": "not bar"},
],
)
stack = cf_conn.describe_stacks(StackName=name)["Stacks"][0]
tag_name = [
x["ParameterValue"]
for x in stack["Parameters"]
if x["ParameterKey"] == "TagName"
][0]
tag_desc = [
x["ParameterValue"]
for x in stack["Parameters"]
if x["ParameterKey"] == "TagDescription"
][0]
assert tag_name == "foo"
assert tag_desc == "not bar"
@mock_cloudformation
@mock_s3
@mock_ec2
def test_update_stack_from_s3_url():
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="update_stack_from_url",
TemplateBody=dummy_template_json,
Tags=[{"Key": "foo", "Value": "bar"}],
)
s3_conn.create_bucket(Bucket="foobar")
s3_conn.Object("foobar", "template-key").put(Body=dummy_update_template_json)
key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
)
cf_conn.update_stack(
StackName="update_stack_from_url",
TemplateURL=key_url,
Parameters=[{"ParameterKey": "KeyName", "ParameterValue": "value"}],
)
cf_conn.get_template(StackName="update_stack_from_url")[
"TemplateBody"
].should.equal(
json.loads(dummy_update_template_json, object_pairs_hook=OrderedDict)
)
@mock_cloudformation
@mock_s3
def test_create_change_set_from_s3_url():
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
s3_conn.create_bucket(Bucket="foobar")
s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json)
key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
)
cf_conn = boto3.client("cloudformation", region_name="us-west-1")
response = cf_conn.create_change_set(
StackName="NewStack",
TemplateURL=key_url,
ChangeSetName="NewChangeSet",
ChangeSetType="CREATE",
Tags=[{"Key": "tag-key", "Value": "tag-value"}],
)
assert (
"arn:aws:cloudformation:us-west-1:123456789:changeSet/NewChangeSet/"
in response["Id"]
)
assert (
"arn:aws:cloudformation:us-west-1:123456789:stack/NewStack"
in response["StackId"]
)
@mock_cloudformation
@mock_ec2
def test_describe_change_set():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet",
ChangeSetType="CREATE",
)
stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet")
stack["ChangeSetName"].should.equal("NewChangeSet")
stack["StackName"].should.equal("NewStack")
stack["Status"].should.equal("CREATE_COMPLETE")
stack["ExecutionStatus"].should.equal("AVAILABLE")
two_secs_ago = datetime.now(tz=pytz.UTC) - timedelta(seconds=2)
assert (
two_secs_ago < stack["CreationTime"] < datetime.now(tz=pytz.UTC)
), "Change set should have been created recently"
stack["Changes"].should.have.length_of(1)
stack["Changes"][0].should.equal(
dict(
{
"Type": "Resource",
"ResourceChange": {
"Action": "Add",
"LogicalResourceId": "EC2Instance1",
"ResourceType": "AWS::EC2::Instance",
},
}
)
)
# Execute change set
cf_conn.execute_change_set(ChangeSetName="NewChangeSet")
# Verify that the changes have been applied
ec2 = boto3.client("ec2", region_name="us-east-1")
ec2.describe_instances()["Reservations"].should.have.length_of(1)
change_set = cf_conn.describe_change_set(ChangeSetName="NewChangeSet")
change_set["Changes"].should.have.length_of(1)
change_set["ExecutionStatus"].should.equal("EXECUTE_COMPLETE")
stack = cf_conn.describe_stacks(StackName="NewStack")["Stacks"][0]
stack["StackStatus"].should.equal("CREATE_COMPLETE")
# create another change set to update the stack
cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_update_template_json,
ChangeSetName="NewChangeSet2",
ChangeSetType="UPDATE",
Parameters=[{"ParameterKey": "KeyName", "ParameterValue": "value"}],
)
stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet2")
stack["ChangeSetName"].should.equal("NewChangeSet2")
stack["StackName"].should.equal("NewStack")
stack["Changes"].should.have.length_of(2)
# Execute change set
cf_conn.execute_change_set(ChangeSetName="NewChangeSet2")
# Verify that the changes have been applied
stack = cf_conn.describe_stacks(StackName="NewStack")["Stacks"][0]
stack["StackStatus"].should.equal("UPDATE_COMPLETE")
@mock_cloudformation
@mock_ec2
def test_execute_change_set_w_arn():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
ec2 = boto3.client("ec2", region_name="us-east-1")
# Verify no instances exist at the moment
ec2.describe_instances()["Reservations"].should.have.length_of(0)
# Create a Change set, and verify no resources have been created yet
change_set = cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet",
ChangeSetType="CREATE",
)
ec2.describe_instances()["Reservations"].should.have.length_of(0)
cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal(
"CREATE_COMPLETE"
)
# Execute change set
cf_conn.execute_change_set(ChangeSetName=change_set["Id"])
# Verify that the status has changed, and the appropriate resources have been created
cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal(
"CREATE_COMPLETE"
)
ec2.describe_instances()["Reservations"].should.have.length_of(1)
@mock_cloudformation
def test_execute_change_set_w_name():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet",
ChangeSetType="CREATE",
)
cf_conn.execute_change_set(ChangeSetName="NewChangeSet", StackName="NewStack")
@mock_cloudformation
def test_describe_stack_pagination():
conn = boto3.client("cloudformation", region_name="us-east-1")
for i in range(100):
conn.create_stack(
StackName="test_stack_{}".format(i), TemplateBody=dummy_template_json
)
resp = conn.describe_stacks()
stacks = resp["Stacks"]
stacks.should.have.length_of(50)
next_token = resp["NextToken"]
next_token.should_not.be.none
resp2 = conn.describe_stacks(NextToken=next_token)
stacks.extend(resp2["Stacks"])
stacks.should.have.length_of(100)
assert "NextToken" not in resp2.keys()
@mock_cloudformation
def test_describe_stack_resource():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
response = cf_conn.describe_stack_resource(
StackName=stack["StackName"], LogicalResourceId="EC2Instance1"
)
resource = response["StackResourceDetail"]
resource["LogicalResourceId"].should.equal("EC2Instance1")
resource["ResourceStatus"].should.equal("CREATE_COMPLETE")
resource["ResourceType"].should.equal("AWS::EC2::Instance")
resource["StackId"].should.equal(stack["StackId"])
@mock_cloudformation
def test_describe_stack_resource_when_resource_does_not_exist():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
with pytest.raises(ClientError, match="does not exist for stack"):
cf_conn.describe_stack_resource(
StackName=stack["StackName"], LogicalResourceId="DoesNotExist"
)
@mock_cloudformation
def test_describe_stack_resources():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
response = cf_conn.describe_stack_resources(StackName=stack["StackName"])
resource = response["StackResources"][0]
resource["LogicalResourceId"].should.equal("EC2Instance1")
resource["ResourceStatus"].should.equal("CREATE_COMPLETE")
resource["ResourceType"].should.equal("AWS::EC2::Instance")
resource["StackId"].should.equal(stack["StackId"])
@mock_cloudformation
def test_describe_stack_by_name():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
stack["StackName"].should.equal("test_stack")
two_secs_ago = datetime.now(tz=pytz.UTC) - timedelta(seconds=2)
assert (
two_secs_ago < stack["CreationTime"] < datetime.now(tz=pytz.UTC)
), "Stack should have been created recently"
@mock_cloudformation
def test_describe_stack_by_stack_id():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
stack_by_id = cf_conn.describe_stacks(StackName=stack["StackId"])["Stacks"][0]
stack_by_id["StackId"].should.equal(stack["StackId"])
stack_by_id["StackName"].should.equal("test_stack")
@mock_cloudformation
def test_list_change_sets():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_change_set(
StackName="NewStack2",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet2",
ChangeSetType="CREATE",
)
change_set = cf_conn.list_change_sets(StackName="NewStack2")["Summaries"][0]
change_set["StackName"].should.equal("NewStack2")
change_set["ChangeSetName"].should.equal("NewChangeSet2")
@mock_cloudformation
def test_list_stacks():
cf = boto3.resource("cloudformation", region_name="us-east-1")
cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
cf.create_stack(StackName="test_stack2", TemplateBody=dummy_template_json)
stacks = list(cf.stacks.all())
stacks.should.have.length_of(2)
stack_names = [stack.stack_name for stack in stacks]
stack_names.should.contain("test_stack")
stack_names.should.contain("test_stack2")
@mock_cloudformation
def test_delete_stack_from_resource():
cf = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
list(cf.stacks.all()).should.have.length_of(1)
stack.delete()
list(cf.stacks.all()).should.have.length_of(0)
@mock_cloudformation
@mock_ec2
def test_delete_change_set():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet",
ChangeSetType="CREATE",
)
cf_conn.list_change_sets(StackName="NewStack")["Summaries"].should.have.length_of(1)
cf_conn.delete_change_set(ChangeSetName="NewChangeSet", StackName="NewStack")
cf_conn.list_change_sets(StackName="NewStack")["Summaries"].should.have.length_of(0)
# Testing deletion by arn
result = cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet1",
ChangeSetType="CREATE",
)
cf_conn.delete_change_set(ChangeSetName=result.get("Id"), StackName="NewStack")
cf_conn.list_change_sets(StackName="NewStack")["Summaries"].should.have.length_of(0)
@mock_cloudformation
@mock_ec2
def test_delete_stack_by_name():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
cf_conn.describe_stacks()["Stacks"].should.have.length_of(1)
cf_conn.delete_stack(StackName="test_stack")
cf_conn.describe_stacks()["Stacks"].should.have.length_of(0)
@mock_cloudformation
def test_delete_stack():
cf = boto3.client("cloudformation", region_name="us-east-1")
cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
cf.delete_stack(StackName="test_stack")
stacks = cf.list_stacks()
assert stacks["StackSummaries"][0]["StackStatus"] == "DELETE_COMPLETE"
@mock_cloudformation
def test_describe_deleted_stack():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
stack_id = stack["StackId"]
cf_conn.delete_stack(StackName=stack["StackId"])
stack_by_id = cf_conn.describe_stacks(StackName=stack_id)["Stacks"][0]
stack_by_id["StackId"].should.equal(stack["StackId"])
stack_by_id["StackName"].should.equal("test_stack")
stack_by_id["StackStatus"].should.equal("DELETE_COMPLETE")
@mock_cloudformation
def test_describe_stack_with_special_chars():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="test_stack_spl",
TemplateBody=dummy_template_special_chars_in_description_json,
)
stack = cf_conn.describe_stacks(StackName="test_stack_spl")["Stacks"][0]
assert stack.get("StackName") == "test_stack_spl"
assert stack.get("Description") == "Stack 1 <env>"
@mock_cloudformation
@mock_ec2
def test_describe_updated_stack():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
Tags=[{"Key": "foo", "Value": "bar"}],
)
cf_conn.update_stack(
StackName="test_stack",
RoleARN="arn:aws:iam::{}:role/moto".format(ACCOUNT_ID),
TemplateBody=dummy_update_template_json,
Tags=[{"Key": "foo", "Value": "baz"}],
Parameters=[{"ParameterKey": "KeyName", "ParameterValue": "value"}],
)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
stack_id = stack["StackId"]
stack_by_id = cf_conn.describe_stacks(StackName=stack_id)["Stacks"][0]
stack_by_id["StackId"].should.equal(stack["StackId"])
stack_by_id["StackName"].should.equal("test_stack")
stack_by_id["StackStatus"].should.equal("UPDATE_COMPLETE")
stack_by_id["RoleARN"].should.equal("arn:aws:iam::{}:role/moto".format(ACCOUNT_ID))
stack_by_id["Tags"].should.equal([{"Key": "foo", "Value": "baz"}])
@mock_cloudformation
def test_bad_describe_stack():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
with pytest.raises(ClientError):
cf_conn.describe_stacks(StackName="non_existent_stack")
@mock_cloudformation()
def test_cloudformation_params():
dummy_template_with_params = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {},
"Parameters": {
"APPNAME": {
"Default": "app-name",
"Description": "The name of the app",
"Type": "String",
}
},
}
dummy_template_with_params_json = json.dumps(dummy_template_with_params)
cf = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_with_params_json,
Parameters=[{"ParameterKey": "APPNAME", "ParameterValue": "testing123"}],
)
stack.parameters.should.have.length_of(1)
param = stack.parameters[0]
param["ParameterKey"].should.equal("APPNAME")
param["ParameterValue"].should.equal("testing123")
@mock_cloudformation
def test_stack_tags():
tags = [{"Key": "foo", "Value": "bar"}, {"Key": "baz", "Value": "bleh"}]
cf = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf.create_stack(
StackName="test_stack", TemplateBody=dummy_template_json, Tags=tags
)
observed_tag_items = set(
item for items in [tag.items() for tag in stack.tags] for item in items
)
expected_tag_items = set(
item for items in [tag.items() for tag in tags] for item in items
)
observed_tag_items.should.equal(expected_tag_items)
@mock_cloudformation
@mock_ec2
def test_stack_events():
cf = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack.update(
TemplateBody=dummy_update_template_json,
Parameters=[{"ParameterKey": "KeyName", "ParameterValue": "value"}],
)
stack = cf.Stack(stack.stack_id)
stack.delete()
# assert begins and ends with stack events
events = list(stack.events.all())
events[0].resource_type.should.equal("AWS::CloudFormation::Stack")
events[-1].resource_type.should.equal("AWS::CloudFormation::Stack")
# testing ordering of stack events without assuming resource events will not exist
# the AWS API returns events in reverse chronological order
stack_events_to_look_for = iter(
[
("DELETE_COMPLETE", None),
("DELETE_IN_PROGRESS", "User Initiated"),
("UPDATE_COMPLETE", None),
("UPDATE_IN_PROGRESS", "User Initiated"),
("CREATE_COMPLETE", None),
("CREATE_IN_PROGRESS", "User Initiated"),
]
)
try:
for event in events:
event.stack_id.should.equal(stack.stack_id)
event.stack_name.should.equal("test_stack")
event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}")
if event.resource_type == "AWS::CloudFormation::Stack":
event.logical_resource_id.should.equal("test_stack")
event.physical_resource_id.should.equal(stack.stack_id)
status_to_look_for, reason_to_look_for = next(stack_events_to_look_for)
event.resource_status.should.equal(status_to_look_for)
if reason_to_look_for is not None:
event.resource_status_reason.should.equal(reason_to_look_for)
except StopIteration:
assert False, "Too many stack events"
list(stack_events_to_look_for).should.be.empty
with pytest.raises(ClientError) as exp:
stack = cf.Stack("non_existing_stack")
events = list(stack.events.all())
exp_err = exp.value.response.get("Error")
exp_metadata = exp.value.response.get("ResponseMetadata")
exp_err.get("Code").should.match(r"ValidationError")
exp_err.get("Message").should.match(
r"Stack with id non_existing_stack does not exist"
)
exp_metadata.get("HTTPStatusCode").should.equal(400)
@mock_cloudformation
def test_list_exports():
cf_client = boto3.client("cloudformation", region_name="us-east-1")
cf_resource = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf_resource.create_stack(
StackName="test_stack", TemplateBody=dummy_output_template_json
)
output_value = "VPCID"
exports = cf_client.list_exports()["Exports"]
stack.outputs.should.have.length_of(1)
stack.outputs[0]["OutputValue"].should.equal(output_value)
exports.should.have.length_of(1)
exports[0]["ExportingStackId"].should.equal(stack.stack_id)
exports[0]["Name"].should.equal("My VPC ID")
exports[0]["Value"].should.equal(output_value)
@mock_cloudformation
def test_list_exports_with_token():
cf = boto3.client("cloudformation", region_name="us-east-1")
for i in range(101):
# Add index to ensure name is unique
dummy_output_template["Outputs"]["StackVPC"]["Export"]["Name"] += str(i)
cf.create_stack(
StackName="test_stack_{}".format(i),
TemplateBody=json.dumps(dummy_output_template),
)
exports = cf.list_exports()
exports["Exports"].should.have.length_of(100)
exports.get("NextToken").should_not.be.none
more_exports = cf.list_exports(NextToken=exports["NextToken"])
more_exports["Exports"].should.have.length_of(1)
more_exports.get("NextToken").should.be.none
@mock_cloudformation
def test_delete_stack_with_export():
cf = boto3.client("cloudformation", region_name="us-east-1")
stack = cf.create_stack(
StackName="test_stack", TemplateBody=dummy_output_template_json
)
stack_id = stack["StackId"]
exports = cf.list_exports()["Exports"]
exports.should.have.length_of(1)
cf.delete_stack(StackName=stack_id)
cf.list_exports()["Exports"].should.have.length_of(0)
@mock_cloudformation
def test_export_names_must_be_unique():
cf = boto3.resource("cloudformation", region_name="us-east-1")
cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json)
with pytest.raises(ClientError):
cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json)
@mock_sqs
@mock_cloudformation
def test_stack_with_imports():
cf = boto3.resource("cloudformation", region_name="us-east-1")
ec2_resource = boto3.resource("sqs", region_name="us-east-1")
output_stack = cf.create_stack(
StackName="test_stack1", TemplateBody=dummy_output_template_json
)
cf.create_stack(StackName="test_stack2", TemplateBody=dummy_import_template_json)
output_stack.outputs.should.have.length_of(1)
output = output_stack.outputs[0]["OutputValue"]
queue = ec2_resource.get_queue_by_name(QueueName=output)
queue.should_not.be.none
@mock_sqs
@mock_cloudformation
def test_non_json_redrive_policy():
cf = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf.create_stack(
StackName="test_stack1", TemplateBody=dummy_redrive_template_json
)
stack.Resource("MainQueue").resource_status.should.equal("CREATE_COMPLETE")
stack.Resource("DeadLetterQueue").resource_status.should.equal("CREATE_COMPLETE")
@mock_cloudformation
def test_boto3_create_duplicate_stack():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="test_stack", TemplateBody=dummy_template_json,
)
with pytest.raises(ClientError):
cf_conn.create_stack(
StackName="test_stack", TemplateBody=dummy_template_json,
)
@mock_dynamodb2
@mock_cloudformation
def test_delete_stack_dynamo_template():
conn = boto3.client("cloudformation", region_name="us-east-1")
dynamodb_client = boto3.client("dynamodb", region_name="us-east-1")
conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json4)
table_desc = dynamodb_client.list_tables()
len(table_desc.get("TableNames")).should.equal(1)
conn.delete_stack(StackName="test_stack")
table_desc = dynamodb_client.list_tables()
len(table_desc.get("TableNames")).should.equal(0)
conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json4)
| 35.094817 | 147 | 0.690222 | from __future__ import unicode_literals
import json
from collections import OrderedDict
from datetime import datetime, timedelta
import pytz
import boto3
from botocore.exceptions import ClientError, ValidationError
import sure
import pytest
from moto import mock_cloudformation, mock_dynamodb2, mock_s3, mock_sqs, mock_ec2
from moto.core import ACCOUNT_ID
from .test_cloudformation_stack_crud import dummy_template_json2, dummy_template_json4
from tests import EXAMPLE_AMI_ID
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {
"EC2Instance1": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "dummy",
"InstanceType": "t2.micro",
"Tags": [
{"Key": "Description", "Value": "Test tag"},
{"Key": "Name", "Value": "Name tag for tests"},
],
},
}
},
}
dummy_template3 = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 3",
"Resources": {
"VPC": {"Properties": {"CidrBlock": "192.168.0.0/16"}, "Type": "AWS::EC2::VPC"}
},
}
dummy_template_yaml = """---
AWSTemplateFormatVersion: 2010-09-09
Description: Stack1 with yaml template
Resources:
EC2Instance1:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-03cf127a
KeyName: dummy
InstanceType: t2.micro
Tags:
- Key: Description
Value: Test tag
- Key: Name
Value: Name tag for tests
"""
dummy_template_yaml_with_short_form_func = """---
AWSTemplateFormatVersion: 2010-09-09
Description: Stack1 with yaml template
Resources:
EC2Instance1:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-03cf127a
KeyName: !Join [ ":", [ du, m, my ] ]
InstanceType: t2.micro
Tags:
- Key: Description
Value: Test tag
- Key: Name
Value: Name tag for tests
"""
dummy_template_yaml_with_ref = """---
AWSTemplateFormatVersion: 2010-09-09
Description: Stack1 with yaml template
Parameters:
TagDescription:
Type: String
TagName:
Type: String
Resources:
EC2Instance1:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-03cf127a
KeyName: dummy
InstanceType: t2.micro
Tags:
- Key: Description
Value:
Ref: TagDescription
- Key: Name
Value: !Ref TagName
"""
dummy_empty_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {},
"Resources": {},
}
dummy_parametrized_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {
"KeyName": {"Description": "A template parameter", "Type": "String"}
},
"Resources": {},
}
dummy_update_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {
"KeyName": {
"Description": "Name of an existing EC2 KeyPair",
"Type": "AWS::EC2::KeyPair::KeyName",
"ConstraintDescription": "must be the name of an existing EC2 KeyPair.",
}
},
"Resources": {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {"ImageId": EXAMPLE_AMI_ID},
}
},
}
dummy_output_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {"ImageId": EXAMPLE_AMI_ID},
}
},
"Outputs": {
"StackVPC": {
"Description": "The ID of the VPC",
"Value": "VPCID",
"Export": {"Name": "My VPC ID"},
}
},
}
dummy_import_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"Queue": {
"Type": "AWS::SQS::Queue",
"Properties": {
"QueueName": {"Fn::ImportValue": "My VPC ID"},
"VisibilityTimeout": 60,
},
}
},
}
dummy_redrive_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"MainQueue": {
"Type": "AWS::SQS::Queue",
"Properties": {
"QueueName": "mainqueue.fifo",
"FifoQueue": True,
"ContentBasedDeduplication": False,
"RedrivePolicy": {
"deadLetterTargetArn": {"Fn::GetAtt": ["DeadLetterQueue", "Arn"]},
"maxReceiveCount": 5,
},
},
},
"DeadLetterQueue": {
"Type": "AWS::SQS::Queue",
"Properties": {"FifoQueue": True},
},
},
}
dummy_template_special_chars_in_description = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1 <env>",
"Resources": {
"EC2Instance1": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": EXAMPLE_AMI_ID,
"KeyName": "dummy",
"InstanceType": "t2.micro",
"Tags": [
{"Key": "Description", "Value": "Test tag"},
{"Key": "Name", "Value": "Name tag for tests"},
],
},
}
},
}
dummy_template_json = json.dumps(dummy_template)
dummy_template_special_chars_in_description_json = json.dumps(
dummy_template_special_chars_in_description
)
dummy_empty_template_json = json.dumps(dummy_empty_template)
dummy_parametrized_template_json = json.dumps(dummy_parametrized_template)
dummy_update_template_json = json.dumps(dummy_update_template)
dummy_output_template_json = json.dumps(dummy_output_template)
dummy_import_template_json = json.dumps(dummy_import_template)
dummy_redrive_template_json = json.dumps(dummy_redrive_template)
@mock_cloudformation
def test_boto3_describe_stack_instances():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
)
usw2_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-west-2",
)
use1_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-east-1",
)
usw2_instance["StackInstance"].should.have.key("Region").which.should.equal(
"us-west-2"
)
usw2_instance["StackInstance"].should.have.key("Account").which.should.equal(
ACCOUNT_ID
)
use1_instance["StackInstance"].should.have.key("Region").which.should.equal(
"us-east-1"
)
use1_instance["StackInstance"].should.have.key("Account").which.should.equal(
ACCOUNT_ID
)
@mock_cloudformation
def test_boto3_list_stacksets_length():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_set(
StackSetName="test_stack_set2", TemplateBody=dummy_template_yaml
)
stacksets = cf_conn.list_stack_sets()
stacksets.should.have.length_of(2)
@mock_cloudformation
def test_boto3_filter_stacks():
conn = boto3.client("cloudformation", region_name="us-east-1")
conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
conn.create_stack(StackName="test_stack2", TemplateBody=dummy_template_json)
conn.update_stack(StackName="test_stack", TemplateBody=dummy_template_json2)
stacks = conn.list_stacks(StackStatusFilter=["CREATE_COMPLETE"])
stacks.get("StackSummaries").should.have.length_of(1)
stacks = conn.list_stacks(StackStatusFilter=["UPDATE_COMPLETE"])
stacks.get("StackSummaries").should.have.length_of(1)
@mock_cloudformation
def test_boto3_list_stacksets_contents():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
stacksets = cf_conn.list_stack_sets()
stacksets["Summaries"][0].should.have.key("StackSetName").which.should.equal(
"test_stack_set"
)
stacksets["Summaries"][0].should.have.key("Status").which.should.equal("ACTIVE")
@mock_cloudformation
def test_boto3_stop_stack_set_operation():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-1", "us-west-2"],
)
operation_id = cf_conn.list_stack_set_operations(StackSetName="test_stack_set")[
"Summaries"
][-1]["OperationId"]
cf_conn.stop_stack_set_operation(
StackSetName="test_stack_set", OperationId=operation_id
)
list_operation = cf_conn.list_stack_set_operations(StackSetName="test_stack_set")
list_operation["Summaries"][-1]["Status"].should.equal("STOPPED")
@mock_cloudformation
def test_boto3_describe_stack_set_operation():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-1", "us-west-2"],
)
operation_id = cf_conn.list_stack_set_operations(StackSetName="test_stack_set")[
"Summaries"
][-1]["OperationId"]
cf_conn.stop_stack_set_operation(
StackSetName="test_stack_set", OperationId=operation_id
)
response = cf_conn.describe_stack_set_operation(
StackSetName="test_stack_set", OperationId=operation_id
)
response["StackSetOperation"]["Status"].should.equal("STOPPED")
response["StackSetOperation"]["Action"].should.equal("CREATE")
with pytest.raises(ClientError) as exp:
cf_conn.describe_stack_set_operation(
StackSetName="test_stack_set", OperationId="non_existing_operation"
)
exp_err = exp.value.response.get("Error")
exp_metadata = exp.value.response.get("ResponseMetadata")
exp_err.get("Code").should.match(r"ValidationError")
exp_err.get("Message").should.match(
r"Stack with id non_existing_operation does not exist"
)
exp_metadata.get("HTTPStatusCode").should.equal(400)
@mock_cloudformation
def test_boto3_list_stack_set_operation_results():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-1", "us-west-2"],
)
operation_id = cf_conn.list_stack_set_operations(StackSetName="test_stack_set")[
"Summaries"
][-1]["OperationId"]
cf_conn.stop_stack_set_operation(
StackSetName="test_stack_set", OperationId=operation_id
)
response = cf_conn.list_stack_set_operation_results(
StackSetName="test_stack_set", OperationId=operation_id
)
response["Summaries"].should.have.length_of(3)
response["Summaries"][0].should.have.key("Account").which.should.equal(ACCOUNT_ID)
response["Summaries"][1].should.have.key("Status").which.should.equal("STOPPED")
@mock_cloudformation
def test_boto3_update_stack_instances():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
param = [
{"ParameterKey": "SomeParam", "ParameterValue": "StackSetValue"},
{"ParameterKey": "AnotherParam", "ParameterValue": "StackSetValue2"},
]
param_overrides = [
{"ParameterKey": "SomeParam", "ParameterValue": "OverrideValue"},
{"ParameterKey": "AnotherParam", "ParameterValue": "OverrideValue2"},
]
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-1", "us-west-2"],
)
cf_conn.update_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-west-1", "us-west-2"],
ParameterOverrides=param_overrides,
)
usw2_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-west-2",
)
usw1_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-west-1",
)
use1_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-east-1",
)
usw2_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterKey"
].should.equal(param_overrides[0]["ParameterKey"])
usw2_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterValue"
].should.equal(param_overrides[0]["ParameterValue"])
usw2_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterKey"
].should.equal(param_overrides[1]["ParameterKey"])
usw2_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterValue"
].should.equal(param_overrides[1]["ParameterValue"])
usw1_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterKey"
].should.equal(param_overrides[0]["ParameterKey"])
usw1_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterValue"
].should.equal(param_overrides[0]["ParameterValue"])
usw1_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterKey"
].should.equal(param_overrides[1]["ParameterKey"])
usw1_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterValue"
].should.equal(param_overrides[1]["ParameterValue"])
use1_instance["StackInstance"]["ParameterOverrides"].should.be.empty
@mock_cloudformation
def test_boto3_delete_stack_instances():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
)
cf_conn.delete_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1"],
RetainStacks=False,
)
cf_conn.list_stack_instances(StackSetName="test_stack_set")[
"Summaries"
].should.have.length_of(1)
cf_conn.list_stack_instances(StackSetName="test_stack_set")["Summaries"][0][
"Region"
].should.equal("us-west-2")
@mock_cloudformation
def test_boto3_create_stack_instances():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
)
cf_conn.list_stack_instances(StackSetName="test_stack_set")[
"Summaries"
].should.have.length_of(2)
cf_conn.list_stack_instances(StackSetName="test_stack_set")["Summaries"][0][
"Account"
].should.equal(ACCOUNT_ID)
@mock_cloudformation
def test_boto3_create_stack_instances_with_param_overrides():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
param = [
{"ParameterKey": "TagDescription", "ParameterValue": "StackSetValue"},
{"ParameterKey": "TagName", "ParameterValue": "StackSetValue2"},
]
param_overrides = [
{"ParameterKey": "TagDescription", "ParameterValue": "OverrideValue"},
{"ParameterKey": "TagName", "ParameterValue": "OverrideValue2"},
]
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param,
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
ParameterOverrides=param_overrides,
)
usw2_instance = cf_conn.describe_stack_instance(
StackSetName="test_stack_set",
StackInstanceAccount=ACCOUNT_ID,
StackInstanceRegion="us-west-2",
)
usw2_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterKey"
].should.equal(param_overrides[0]["ParameterKey"])
usw2_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterKey"
].should.equal(param_overrides[1]["ParameterKey"])
usw2_instance["StackInstance"]["ParameterOverrides"][0][
"ParameterValue"
].should.equal(param_overrides[0]["ParameterValue"])
usw2_instance["StackInstance"]["ParameterOverrides"][1][
"ParameterValue"
].should.equal(param_overrides[1]["ParameterValue"])
@mock_cloudformation
def test_update_stack_set():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
param = [
{"ParameterKey": "TagDescription", "ParameterValue": "StackSetValue"},
{"ParameterKey": "TagName", "ParameterValue": "StackSetValue2"},
]
param_overrides = [
{"ParameterKey": "TagDescription", "ParameterValue": "OverrideValue"},
{"ParameterKey": "TagName", "ParameterValue": "OverrideValue2"},
]
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param,
)
cf_conn.update_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param_overrides,
)
stackset = cf_conn.describe_stack_set(StackSetName="test_stack_set")
stackset["StackSet"]["Parameters"][0]["ParameterValue"].should.equal(
param_overrides[0]["ParameterValue"]
)
stackset["StackSet"]["Parameters"][1]["ParameterValue"].should.equal(
param_overrides[1]["ParameterValue"]
)
stackset["StackSet"]["Parameters"][0]["ParameterKey"].should.equal(
param_overrides[0]["ParameterKey"]
)
stackset["StackSet"]["Parameters"][1]["ParameterKey"].should.equal(
param_overrides[1]["ParameterKey"]
)
@mock_cloudformation
def test_update_stack_set_with_previous_value():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
param = [
{"ParameterKey": "TagDescription", "ParameterValue": "StackSetValue"},
{"ParameterKey": "TagName", "ParameterValue": "StackSetValue2"},
]
param_overrides = [
{"ParameterKey": "TagDescription", "ParameterValue": "OverrideValue"},
{"ParameterKey": "TagName", "UsePreviousValue": True},
]
cf_conn.create_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param,
)
cf_conn.update_stack_set(
StackSetName="test_stack_set",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=param_overrides,
)
stackset = cf_conn.describe_stack_set(StackSetName="test_stack_set")
stackset["StackSet"]["Parameters"][0]["ParameterValue"].should.equal(
param_overrides[0]["ParameterValue"]
)
stackset["StackSet"]["Parameters"][1]["ParameterValue"].should.equal(
param[1]["ParameterValue"]
)
stackset["StackSet"]["Parameters"][0]["ParameterKey"].should.equal(
param_overrides[0]["ParameterKey"]
)
stackset["StackSet"]["Parameters"][1]["ParameterKey"].should.equal(
param_overrides[1]["ParameterKey"]
)
@mock_cloudformation
def test_boto3_list_stack_set_operations():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.create_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
)
cf_conn.update_stack_instances(
StackSetName="test_stack_set",
Accounts=[ACCOUNT_ID],
Regions=["us-east-1", "us-west-2"],
)
list_operation = cf_conn.list_stack_set_operations(StackSetName="test_stack_set")
list_operation["Summaries"].should.have.length_of(2)
list_operation["Summaries"][-1]["Action"].should.equal("UPDATE")
@mock_cloudformation
def test_boto3_bad_list_stack_resources():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
with pytest.raises(ClientError):
cf_conn.list_stack_resources(StackName="test_stack_set")
@mock_cloudformation
def test_boto3_delete_stack_set_by_name():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.delete_stack_set(StackSetName="test_stack_set")
cf_conn.describe_stack_set(StackSetName="test_stack_set")["StackSet"][
"Status"
].should.equal("DELETED")
@mock_cloudformation
def test_boto3_delete_stack_set_by_id():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
response = cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
stack_set_id = response["StackSetId"]
cf_conn.delete_stack_set(StackSetName=stack_set_id)
cf_conn.describe_stack_set(StackSetName="test_stack_set")["StackSet"][
"Status"
].should.equal("DELETED")
@mock_cloudformation
def test_boto3_create_stack_set():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
response = cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_json
)
cf_conn.describe_stack_set(StackSetName="test_stack_set")["StackSet"][
"TemplateBody"
].should.equal(dummy_template_json)
response["StackSetId"].should_not.be.empty
@mock_cloudformation
def test_boto3_create_stack_set_with_yaml():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack_set(
StackSetName="test_stack_set", TemplateBody=dummy_template_yaml
)
cf_conn.describe_stack_set(StackSetName="test_stack_set")["StackSet"][
"TemplateBody"
].should.equal(dummy_template_yaml)
@mock_cloudformation
@mock_s3
def test_create_stack_set_from_s3_url():
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
s3_conn.create_bucket(Bucket="foobar")
s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json)
key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
)
cf_conn = boto3.client("cloudformation", region_name="us-west-1")
cf_conn.create_stack_set(StackSetName="stack_from_url", TemplateURL=key_url)
cf_conn.describe_stack_set(StackSetName="stack_from_url")["StackSet"][
"TemplateBody"
].should.equal(dummy_template_json)
@mock_cloudformation
def test_boto3_create_stack_set_with_ref_yaml():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
params = [
{"ParameterKey": "TagDescription", "ParameterValue": "desc_ref"},
{"ParameterKey": "TagName", "ParameterValue": "name_ref"},
]
cf_conn.create_stack_set(
StackSetName="test_stack",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=params,
)
cf_conn.describe_stack_set(StackSetName="test_stack")["StackSet"][
"TemplateBody"
].should.equal(dummy_template_yaml_with_ref)
@mock_cloudformation
def test_boto3_describe_stack_set_params():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
params = [
{"ParameterKey": "TagDescription", "ParameterValue": "desc_ref"},
{"ParameterKey": "TagName", "ParameterValue": "name_ref"},
]
cf_conn.create_stack_set(
StackSetName="test_stack",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=params,
)
cf_conn.describe_stack_set(StackSetName="test_stack")["StackSet"][
"Parameters"
].should.equal(params)
@mock_cloudformation
def test_boto3_describe_stack_set_by_id():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
response = cf_conn.create_stack_set(
StackSetName="test_stack", TemplateBody=dummy_template_json,
)
stack_set_id = response["StackSetId"]
cf_conn.describe_stack_set(StackSetName=stack_set_id)["StackSet"][
"TemplateBody"
].should.equal(dummy_template_json)
@mock_cloudformation
def test_boto3_create_stack():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
cf_conn.get_template(StackName="test_stack")["TemplateBody"].should.equal(
json.loads(dummy_template_json, object_pairs_hook=OrderedDict)
)
@mock_cloudformation
def test_boto3_create_stack_fail_missing_parameter():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
with pytest.raises(ClientError, match="Missing parameter KeyName"):
cf_conn.create_stack(
StackName="test_stack", TemplateBody=dummy_parametrized_template_json
)
@mock_cloudformation
def test_boto3_create_stack_s3_long_name():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
stack_name = "MyLongStackName01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012"
template = '{"Resources":{"HelloBucket":{"Type":"AWS::S3::Bucket"}}}'
cf_conn.create_stack(StackName=stack_name, TemplateBody=template)
cf_conn.get_template(StackName=stack_name)["TemplateBody"].should.equal(
json.loads(template, object_pairs_hook=OrderedDict)
)
provisioned_resource = cf_conn.list_stack_resources(StackName=stack_name)[
"StackResourceSummaries"
][0]
provisioned_bucket_name = provisioned_resource["PhysicalResourceId"]
len(provisioned_bucket_name).should.be.lower_than(64)
logical_name_lower_case = provisioned_resource["LogicalResourceId"].lower()
bucket_name_stack_name_prefix = provisioned_bucket_name[
: provisioned_bucket_name.index("-" + logical_name_lower_case)
]
stack_name.lower().should.contain(bucket_name_stack_name_prefix)
@mock_cloudformation
def test_boto3_create_stack_with_yaml():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_yaml)
cf_conn.get_template(StackName="test_stack")["TemplateBody"].should.equal(
dummy_template_yaml
)
@mock_cloudformation
def test_boto3_create_stack_with_short_form_func_yaml():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="test_stack", TemplateBody=dummy_template_yaml_with_short_form_func
)
cf_conn.get_template(StackName="test_stack")["TemplateBody"].should.equal(
dummy_template_yaml_with_short_form_func
)
@mock_s3
@mock_cloudformation
def test_get_template_summary():
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
conn = boto3.client("cloudformation", region_name="us-east-1")
result = conn.get_template_summary(TemplateBody=json.dumps(dummy_template3))
result["ResourceTypes"].should.equal(["AWS::EC2::VPC"])
result["Version"].should.equal("2010-09-09")
result["Description"].should.equal("Stack 3")
conn.create_stack(StackName="test_stack", TemplateBody=json.dumps(dummy_template3))
result = conn.get_template_summary(StackName="test_stack")
result["ResourceTypes"].should.equal(["AWS::EC2::VPC"])
result["Version"].should.equal("2010-09-09")
result["Description"].should.equal("Stack 3")
s3_conn.create_bucket(Bucket="foobar")
s3_conn.Object("foobar", "template-key").put(Body=json.dumps(dummy_template3))
key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
)
conn.create_stack(StackName="stack_from_url", TemplateURL=key_url)
result = conn.get_template_summary(TemplateURL=key_url)
result["ResourceTypes"].should.equal(["AWS::EC2::VPC"])
result["Version"].should.equal("2010-09-09")
result["Description"].should.equal("Stack 3")
conn = boto3.client("cloudformation", region_name="us-east-1")
result = conn.get_template_summary(TemplateBody=dummy_template_yaml)
result["ResourceTypes"].should.equal(["AWS::EC2::Instance"])
result["Version"].should.equal("2010-09-09")
result["Description"].should.equal("Stack1 with yaml template")
@mock_cloudformation
def test_boto3_create_stack_with_ref_yaml():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
params = [
{"ParameterKey": "TagDescription", "ParameterValue": "desc_ref"},
{"ParameterKey": "TagName", "ParameterValue": "name_ref"},
]
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=params,
)
cf_conn.get_template(StackName="test_stack")["TemplateBody"].should.equal(
dummy_template_yaml_with_ref
)
@mock_cloudformation
def test_creating_stacks_across_regions():
west1_cf = boto3.resource("cloudformation", region_name="us-west-1")
west2_cf = boto3.resource("cloudformation", region_name="us-west-2")
west1_cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
west2_cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
list(west1_cf.stacks.all()).should.have.length_of(1)
list(west2_cf.stacks.all()).should.have.length_of(1)
@mock_cloudformation
def test_create_stack_with_notification_arn():
cf = boto3.resource("cloudformation", region_name="us-east-1")
cf.create_stack(
StackName="test_stack_with_notifications",
TemplateBody=dummy_template_json,
NotificationARNs=["arn:aws:sns:us-east-1:{}:fake-queue".format(ACCOUNT_ID)],
)
stack = list(cf.stacks.all())[0]
stack.notification_arns.should.contain(
"arn:aws:sns:us-east-1:{}:fake-queue".format(ACCOUNT_ID)
)
@mock_cloudformation
def test_create_stack_with_role_arn():
cf = boto3.resource("cloudformation", region_name="us-east-1")
cf.create_stack(
StackName="test_stack_with_notifications",
TemplateBody=dummy_template_json,
RoleARN="arn:aws:iam::{}:role/moto".format(ACCOUNT_ID),
)
stack = list(cf.stacks.all())[0]
stack.role_arn.should.equal("arn:aws:iam::{}:role/moto".format(ACCOUNT_ID))
@mock_cloudformation
@mock_s3
def test_create_stack_from_s3_url():
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
s3_conn.create_bucket(Bucket="foobar")
s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json)
key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
)
cf_conn = boto3.client("cloudformation", region_name="us-west-1")
cf_conn.create_stack(StackName="stack_from_url", TemplateURL=key_url)
cf_conn.get_template(StackName="stack_from_url")["TemplateBody"].should.equal(
json.loads(dummy_template_json, object_pairs_hook=OrderedDict)
)
@mock_cloudformation
def test_boto3_update_stack_fail_missing_new_parameter():
name = "update_stack_fail_missing_new_parameter"
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName=name, TemplateBody=dummy_empty_template_json)
with pytest.raises(ClientError, match="Missing parameter KeyName"):
cf_conn.update_stack(
StackName=name, TemplateBody=dummy_parametrized_template_json
)
@mock_cloudformation
def test_boto3_update_stack_deleted_resources_can_reference_deleted_parameters():
name = "update_stack_deleted_resources_can_reference_deleted_parameters"
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
template_json = json.dumps(
{
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {"TimeoutParameter": {"Default": 61, "Type": "String"}},
"Resources": {
"Queue": {
"Type": "AWS::SQS::Queue",
"Properties": {"VisibilityTimeout": {"Ref": "TimeoutParameter"}},
}
},
}
)
cf_conn.create_stack(StackName=name, TemplateBody=template_json)
response = cf_conn.describe_stack_resources(StackName=name)
len(response["StackResources"]).should.equal(1)
cf_conn.update_stack(StackName=name, TemplateBody=dummy_empty_template_json)
response = cf_conn.describe_stack_resources(StackName=name)
len(response["StackResources"]).should.equal(0)
@mock_cloudformation
def test_boto3_update_stack_deleted_resources_can_reference_deleted_resources():
name = "update_stack_deleted_resources_can_reference_deleted_resources"
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
template_json = json.dumps(
{
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {"TimeoutParameter": {"Default": 61, "Type": "String"}},
"Resources": {
"VPC": {
"Type": "AWS::EC2::VPC",
"Properties": {"CidrBlock": "10.0.0.0/16"},
},
"Subnet": {
"Type": "AWS::EC2::Subnet",
"Properties": {"VpcId": {"Ref": "VPC"}, "CidrBlock": "10.0.0.0/24"},
},
},
}
)
cf_conn.create_stack(StackName=name, TemplateBody=template_json)
response = cf_conn.describe_stack_resources(StackName=name)
len(response["StackResources"]).should.equal(2)
cf_conn.update_stack(StackName=name, TemplateBody=dummy_empty_template_json)
response = cf_conn.describe_stack_resources(StackName=name)
len(response["StackResources"]).should.equal(0)
@mock_cloudformation
def test_update_stack_with_previous_value():
name = "update_stack_with_previous_value"
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName=name,
TemplateBody=dummy_template_yaml_with_ref,
Parameters=[
{"ParameterKey": "TagName", "ParameterValue": "foo"},
{"ParameterKey": "TagDescription", "ParameterValue": "bar"},
],
)
cf_conn.update_stack(
StackName=name,
UsePreviousTemplate=True,
Parameters=[
{"ParameterKey": "TagName", "UsePreviousValue": True},
{"ParameterKey": "TagDescription", "ParameterValue": "not bar"},
],
)
stack = cf_conn.describe_stacks(StackName=name)["Stacks"][0]
tag_name = [
x["ParameterValue"]
for x in stack["Parameters"]
if x["ParameterKey"] == "TagName"
][0]
tag_desc = [
x["ParameterValue"]
for x in stack["Parameters"]
if x["ParameterKey"] == "TagDescription"
][0]
assert tag_name == "foo"
assert tag_desc == "not bar"
@mock_cloudformation
@mock_s3
@mock_ec2
def test_update_stack_from_s3_url():
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="update_stack_from_url",
TemplateBody=dummy_template_json,
Tags=[{"Key": "foo", "Value": "bar"}],
)
s3_conn.create_bucket(Bucket="foobar")
s3_conn.Object("foobar", "template-key").put(Body=dummy_update_template_json)
key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
)
cf_conn.update_stack(
StackName="update_stack_from_url",
TemplateURL=key_url,
Parameters=[{"ParameterKey": "KeyName", "ParameterValue": "value"}],
)
cf_conn.get_template(StackName="update_stack_from_url")[
"TemplateBody"
].should.equal(
json.loads(dummy_update_template_json, object_pairs_hook=OrderedDict)
)
@mock_cloudformation
@mock_s3
def test_create_change_set_from_s3_url():
s3 = boto3.client("s3", region_name="us-east-1")
s3_conn = boto3.resource("s3", region_name="us-east-1")
s3_conn.create_bucket(Bucket="foobar")
s3_conn.Object("foobar", "template-key").put(Body=dummy_template_json)
key_url = s3.generate_presigned_url(
ClientMethod="get_object", Params={"Bucket": "foobar", "Key": "template-key"}
)
cf_conn = boto3.client("cloudformation", region_name="us-west-1")
response = cf_conn.create_change_set(
StackName="NewStack",
TemplateURL=key_url,
ChangeSetName="NewChangeSet",
ChangeSetType="CREATE",
Tags=[{"Key": "tag-key", "Value": "tag-value"}],
)
assert (
"arn:aws:cloudformation:us-west-1:123456789:changeSet/NewChangeSet/"
in response["Id"]
)
assert (
"arn:aws:cloudformation:us-west-1:123456789:stack/NewStack"
in response["StackId"]
)
@mock_cloudformation
@mock_ec2
def test_describe_change_set():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet",
ChangeSetType="CREATE",
)
stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet")
stack["ChangeSetName"].should.equal("NewChangeSet")
stack["StackName"].should.equal("NewStack")
stack["Status"].should.equal("CREATE_COMPLETE")
stack["ExecutionStatus"].should.equal("AVAILABLE")
two_secs_ago = datetime.now(tz=pytz.UTC) - timedelta(seconds=2)
assert (
two_secs_ago < stack["CreationTime"] < datetime.now(tz=pytz.UTC)
), "Change set should have been created recently"
stack["Changes"].should.have.length_of(1)
stack["Changes"][0].should.equal(
dict(
{
"Type": "Resource",
"ResourceChange": {
"Action": "Add",
"LogicalResourceId": "EC2Instance1",
"ResourceType": "AWS::EC2::Instance",
},
}
)
)
cf_conn.execute_change_set(ChangeSetName="NewChangeSet")
ec2 = boto3.client("ec2", region_name="us-east-1")
ec2.describe_instances()["Reservations"].should.have.length_of(1)
change_set = cf_conn.describe_change_set(ChangeSetName="NewChangeSet")
change_set["Changes"].should.have.length_of(1)
change_set["ExecutionStatus"].should.equal("EXECUTE_COMPLETE")
stack = cf_conn.describe_stacks(StackName="NewStack")["Stacks"][0]
stack["StackStatus"].should.equal("CREATE_COMPLETE")
cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_update_template_json,
ChangeSetName="NewChangeSet2",
ChangeSetType="UPDATE",
Parameters=[{"ParameterKey": "KeyName", "ParameterValue": "value"}],
)
stack = cf_conn.describe_change_set(ChangeSetName="NewChangeSet2")
stack["ChangeSetName"].should.equal("NewChangeSet2")
stack["StackName"].should.equal("NewStack")
stack["Changes"].should.have.length_of(2)
cf_conn.execute_change_set(ChangeSetName="NewChangeSet2")
stack = cf_conn.describe_stacks(StackName="NewStack")["Stacks"][0]
stack["StackStatus"].should.equal("UPDATE_COMPLETE")
@mock_cloudformation
@mock_ec2
def test_execute_change_set_w_arn():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
ec2 = boto3.client("ec2", region_name="us-east-1")
ec2.describe_instances()["Reservations"].should.have.length_of(0)
change_set = cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet",
ChangeSetType="CREATE",
)
ec2.describe_instances()["Reservations"].should.have.length_of(0)
cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal(
"CREATE_COMPLETE"
)
cf_conn.execute_change_set(ChangeSetName=change_set["Id"])
cf_conn.describe_change_set(ChangeSetName="NewChangeSet")["Status"].should.equal(
"CREATE_COMPLETE"
)
ec2.describe_instances()["Reservations"].should.have.length_of(1)
@mock_cloudformation
def test_execute_change_set_w_name():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet",
ChangeSetType="CREATE",
)
cf_conn.execute_change_set(ChangeSetName="NewChangeSet", StackName="NewStack")
@mock_cloudformation
def test_describe_stack_pagination():
conn = boto3.client("cloudformation", region_name="us-east-1")
for i in range(100):
conn.create_stack(
StackName="test_stack_{}".format(i), TemplateBody=dummy_template_json
)
resp = conn.describe_stacks()
stacks = resp["Stacks"]
stacks.should.have.length_of(50)
next_token = resp["NextToken"]
next_token.should_not.be.none
resp2 = conn.describe_stacks(NextToken=next_token)
stacks.extend(resp2["Stacks"])
stacks.should.have.length_of(100)
assert "NextToken" not in resp2.keys()
@mock_cloudformation
def test_describe_stack_resource():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
response = cf_conn.describe_stack_resource(
StackName=stack["StackName"], LogicalResourceId="EC2Instance1"
)
resource = response["StackResourceDetail"]
resource["LogicalResourceId"].should.equal("EC2Instance1")
resource["ResourceStatus"].should.equal("CREATE_COMPLETE")
resource["ResourceType"].should.equal("AWS::EC2::Instance")
resource["StackId"].should.equal(stack["StackId"])
@mock_cloudformation
def test_describe_stack_resource_when_resource_does_not_exist():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
with pytest.raises(ClientError, match="does not exist for stack"):
cf_conn.describe_stack_resource(
StackName=stack["StackName"], LogicalResourceId="DoesNotExist"
)
@mock_cloudformation
def test_describe_stack_resources():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
response = cf_conn.describe_stack_resources(StackName=stack["StackName"])
resource = response["StackResources"][0]
resource["LogicalResourceId"].should.equal("EC2Instance1")
resource["ResourceStatus"].should.equal("CREATE_COMPLETE")
resource["ResourceType"].should.equal("AWS::EC2::Instance")
resource["StackId"].should.equal(stack["StackId"])
@mock_cloudformation
def test_describe_stack_by_name():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
stack["StackName"].should.equal("test_stack")
two_secs_ago = datetime.now(tz=pytz.UTC) - timedelta(seconds=2)
assert (
two_secs_ago < stack["CreationTime"] < datetime.now(tz=pytz.UTC)
), "Stack should have been created recently"
@mock_cloudformation
def test_describe_stack_by_stack_id():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
stack_by_id = cf_conn.describe_stacks(StackName=stack["StackId"])["Stacks"][0]
stack_by_id["StackId"].should.equal(stack["StackId"])
stack_by_id["StackName"].should.equal("test_stack")
@mock_cloudformation
def test_list_change_sets():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_change_set(
StackName="NewStack2",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet2",
ChangeSetType="CREATE",
)
change_set = cf_conn.list_change_sets(StackName="NewStack2")["Summaries"][0]
change_set["StackName"].should.equal("NewStack2")
change_set["ChangeSetName"].should.equal("NewChangeSet2")
@mock_cloudformation
def test_list_stacks():
cf = boto3.resource("cloudformation", region_name="us-east-1")
cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
cf.create_stack(StackName="test_stack2", TemplateBody=dummy_template_json)
stacks = list(cf.stacks.all())
stacks.should.have.length_of(2)
stack_names = [stack.stack_name for stack in stacks]
stack_names.should.contain("test_stack")
stack_names.should.contain("test_stack2")
@mock_cloudformation
def test_delete_stack_from_resource():
cf = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
list(cf.stacks.all()).should.have.length_of(1)
stack.delete()
list(cf.stacks.all()).should.have.length_of(0)
@mock_cloudformation
@mock_ec2
def test_delete_change_set():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet",
ChangeSetType="CREATE",
)
cf_conn.list_change_sets(StackName="NewStack")["Summaries"].should.have.length_of(1)
cf_conn.delete_change_set(ChangeSetName="NewChangeSet", StackName="NewStack")
cf_conn.list_change_sets(StackName="NewStack")["Summaries"].should.have.length_of(0)
result = cf_conn.create_change_set(
StackName="NewStack",
TemplateBody=dummy_template_json,
ChangeSetName="NewChangeSet1",
ChangeSetType="CREATE",
)
cf_conn.delete_change_set(ChangeSetName=result.get("Id"), StackName="NewStack")
cf_conn.list_change_sets(StackName="NewStack")["Summaries"].should.have.length_of(0)
@mock_cloudformation
@mock_ec2
def test_delete_stack_by_name():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
cf_conn.describe_stacks()["Stacks"].should.have.length_of(1)
cf_conn.delete_stack(StackName="test_stack")
cf_conn.describe_stacks()["Stacks"].should.have.length_of(0)
@mock_cloudformation
def test_delete_stack():
cf = boto3.client("cloudformation", region_name="us-east-1")
cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
cf.delete_stack(StackName="test_stack")
stacks = cf.list_stacks()
assert stacks["StackSummaries"][0]["StackStatus"] == "DELETE_COMPLETE"
@mock_cloudformation
def test_describe_deleted_stack():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
stack_id = stack["StackId"]
cf_conn.delete_stack(StackName=stack["StackId"])
stack_by_id = cf_conn.describe_stacks(StackName=stack_id)["Stacks"][0]
stack_by_id["StackId"].should.equal(stack["StackId"])
stack_by_id["StackName"].should.equal("test_stack")
stack_by_id["StackStatus"].should.equal("DELETE_COMPLETE")
@mock_cloudformation
def test_describe_stack_with_special_chars():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="test_stack_spl",
TemplateBody=dummy_template_special_chars_in_description_json,
)
stack = cf_conn.describe_stacks(StackName="test_stack_spl")["Stacks"][0]
assert stack.get("StackName") == "test_stack_spl"
assert stack.get("Description") == "Stack 1 <env>"
@mock_cloudformation
@mock_ec2
def test_describe_updated_stack():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
Tags=[{"Key": "foo", "Value": "bar"}],
)
cf_conn.update_stack(
StackName="test_stack",
RoleARN="arn:aws:iam::{}:role/moto".format(ACCOUNT_ID),
TemplateBody=dummy_update_template_json,
Tags=[{"Key": "foo", "Value": "baz"}],
Parameters=[{"ParameterKey": "KeyName", "ParameterValue": "value"}],
)
stack = cf_conn.describe_stacks(StackName="test_stack")["Stacks"][0]
stack_id = stack["StackId"]
stack_by_id = cf_conn.describe_stacks(StackName=stack_id)["Stacks"][0]
stack_by_id["StackId"].should.equal(stack["StackId"])
stack_by_id["StackName"].should.equal("test_stack")
stack_by_id["StackStatus"].should.equal("UPDATE_COMPLETE")
stack_by_id["RoleARN"].should.equal("arn:aws:iam::{}:role/moto".format(ACCOUNT_ID))
stack_by_id["Tags"].should.equal([{"Key": "foo", "Value": "baz"}])
@mock_cloudformation
def test_bad_describe_stack():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
with pytest.raises(ClientError):
cf_conn.describe_stacks(StackName="non_existent_stack")
@mock_cloudformation()
def test_cloudformation_params():
dummy_template_with_params = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {},
"Parameters": {
"APPNAME": {
"Default": "app-name",
"Description": "The name of the app",
"Type": "String",
}
},
}
dummy_template_with_params_json = json.dumps(dummy_template_with_params)
cf = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_with_params_json,
Parameters=[{"ParameterKey": "APPNAME", "ParameterValue": "testing123"}],
)
stack.parameters.should.have.length_of(1)
param = stack.parameters[0]
param["ParameterKey"].should.equal("APPNAME")
param["ParameterValue"].should.equal("testing123")
@mock_cloudformation
def test_stack_tags():
tags = [{"Key": "foo", "Value": "bar"}, {"Key": "baz", "Value": "bleh"}]
cf = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf.create_stack(
StackName="test_stack", TemplateBody=dummy_template_json, Tags=tags
)
observed_tag_items = set(
item for items in [tag.items() for tag in stack.tags] for item in items
)
expected_tag_items = set(
item for items in [tag.items() for tag in tags] for item in items
)
observed_tag_items.should.equal(expected_tag_items)
@mock_cloudformation
@mock_ec2
def test_stack_events():
cf = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf.create_stack(StackName="test_stack", TemplateBody=dummy_template_json)
stack.update(
TemplateBody=dummy_update_template_json,
Parameters=[{"ParameterKey": "KeyName", "ParameterValue": "value"}],
)
stack = cf.Stack(stack.stack_id)
stack.delete()
events = list(stack.events.all())
events[0].resource_type.should.equal("AWS::CloudFormation::Stack")
events[-1].resource_type.should.equal("AWS::CloudFormation::Stack")
stack_events_to_look_for = iter(
[
("DELETE_COMPLETE", None),
("DELETE_IN_PROGRESS", "User Initiated"),
("UPDATE_COMPLETE", None),
("UPDATE_IN_PROGRESS", "User Initiated"),
("CREATE_COMPLETE", None),
("CREATE_IN_PROGRESS", "User Initiated"),
]
)
try:
for event in events:
event.stack_id.should.equal(stack.stack_id)
event.stack_name.should.equal("test_stack")
event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}")
if event.resource_type == "AWS::CloudFormation::Stack":
event.logical_resource_id.should.equal("test_stack")
event.physical_resource_id.should.equal(stack.stack_id)
status_to_look_for, reason_to_look_for = next(stack_events_to_look_for)
event.resource_status.should.equal(status_to_look_for)
if reason_to_look_for is not None:
event.resource_status_reason.should.equal(reason_to_look_for)
except StopIteration:
assert False, "Too many stack events"
list(stack_events_to_look_for).should.be.empty
with pytest.raises(ClientError) as exp:
stack = cf.Stack("non_existing_stack")
events = list(stack.events.all())
exp_err = exp.value.response.get("Error")
exp_metadata = exp.value.response.get("ResponseMetadata")
exp_err.get("Code").should.match(r"ValidationError")
exp_err.get("Message").should.match(
r"Stack with id non_existing_stack does not exist"
)
exp_metadata.get("HTTPStatusCode").should.equal(400)
@mock_cloudformation
def test_list_exports():
cf_client = boto3.client("cloudformation", region_name="us-east-1")
cf_resource = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf_resource.create_stack(
StackName="test_stack", TemplateBody=dummy_output_template_json
)
output_value = "VPCID"
exports = cf_client.list_exports()["Exports"]
stack.outputs.should.have.length_of(1)
stack.outputs[0]["OutputValue"].should.equal(output_value)
exports.should.have.length_of(1)
exports[0]["ExportingStackId"].should.equal(stack.stack_id)
exports[0]["Name"].should.equal("My VPC ID")
exports[0]["Value"].should.equal(output_value)
@mock_cloudformation
def test_list_exports_with_token():
cf = boto3.client("cloudformation", region_name="us-east-1")
for i in range(101):
dummy_output_template["Outputs"]["StackVPC"]["Export"]["Name"] += str(i)
cf.create_stack(
StackName="test_stack_{}".format(i),
TemplateBody=json.dumps(dummy_output_template),
)
exports = cf.list_exports()
exports["Exports"].should.have.length_of(100)
exports.get("NextToken").should_not.be.none
more_exports = cf.list_exports(NextToken=exports["NextToken"])
more_exports["Exports"].should.have.length_of(1)
more_exports.get("NextToken").should.be.none
@mock_cloudformation
def test_delete_stack_with_export():
cf = boto3.client("cloudformation", region_name="us-east-1")
stack = cf.create_stack(
StackName="test_stack", TemplateBody=dummy_output_template_json
)
stack_id = stack["StackId"]
exports = cf.list_exports()["Exports"]
exports.should.have.length_of(1)
cf.delete_stack(StackName=stack_id)
cf.list_exports()["Exports"].should.have.length_of(0)
@mock_cloudformation
def test_export_names_must_be_unique():
cf = boto3.resource("cloudformation", region_name="us-east-1")
cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json)
with pytest.raises(ClientError):
cf.create_stack(StackName="test_stack", TemplateBody=dummy_output_template_json)
@mock_sqs
@mock_cloudformation
def test_stack_with_imports():
cf = boto3.resource("cloudformation", region_name="us-east-1")
ec2_resource = boto3.resource("sqs", region_name="us-east-1")
output_stack = cf.create_stack(
StackName="test_stack1", TemplateBody=dummy_output_template_json
)
cf.create_stack(StackName="test_stack2", TemplateBody=dummy_import_template_json)
output_stack.outputs.should.have.length_of(1)
output = output_stack.outputs[0]["OutputValue"]
queue = ec2_resource.get_queue_by_name(QueueName=output)
queue.should_not.be.none
@mock_sqs
@mock_cloudformation
def test_non_json_redrive_policy():
cf = boto3.resource("cloudformation", region_name="us-east-1")
stack = cf.create_stack(
StackName="test_stack1", TemplateBody=dummy_redrive_template_json
)
stack.Resource("MainQueue").resource_status.should.equal("CREATE_COMPLETE")
stack.Resource("DeadLetterQueue").resource_status.should.equal("CREATE_COMPLETE")
@mock_cloudformation
def test_boto3_create_duplicate_stack():
cf_conn = boto3.client("cloudformation", region_name="us-east-1")
cf_conn.create_stack(
StackName="test_stack", TemplateBody=dummy_template_json,
)
with pytest.raises(ClientError):
cf_conn.create_stack(
StackName="test_stack", TemplateBody=dummy_template_json,
)
@mock_dynamodb2
@mock_cloudformation
def test_delete_stack_dynamo_template():
conn = boto3.client("cloudformation", region_name="us-east-1")
dynamodb_client = boto3.client("dynamodb", region_name="us-east-1")
conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json4)
table_desc = dynamodb_client.list_tables()
len(table_desc.get("TableNames")).should.equal(1)
conn.delete_stack(StackName="test_stack")
table_desc = dynamodb_client.list_tables()
len(table_desc.get("TableNames")).should.equal(0)
conn.create_stack(StackName="test_stack", TemplateBody=dummy_template_json4)
| true | true |
1c2c55c0e9953e586f31be7674dab04396932620 | 2,783 | py | Python | jiant-russian-v2/setup.py | RussianNLP/RussianSuperGLUE | dafecf6f734835ba22cff5ea3ec2ff70cca3cd3c | [
"MIT"
] | 74 | 2020-06-11T11:37:57.000Z | 2022-03-07T09:44:05.000Z | jiant-russian-v2/setup.py | RussianNLP/RussianSuperGLUE | dafecf6f734835ba22cff5ea3ec2ff70cca3cd3c | [
"MIT"
] | 1 | 2020-06-11T12:08:50.000Z | 2020-12-15T10:51:50.000Z | jiant-russian-v2/setup.py | RussianNLP/RussianSuperGLUE | dafecf6f734835ba22cff5ea3ec2ff70cca3cd3c | [
"MIT"
] | 13 | 2020-06-18T11:53:19.000Z | 2022-03-23T17:15:44.000Z | """Setuptools package definition for PyPI/pip distribution
Dependencies will need to be updated in the "install_requires" of setup()
below. Those dependencies are used to create the CircleCI virtual environment.
These are generally the same dependencies as in environment.yml, but should be
limited to dependencies required by most users. New directories added under
the jiant directory will also need to be added to the "packages" section of
setup().
Distributions are automatically versioned based on git tags. After creating a
new git tag, a release can be created by running:
# install twine, if necessary
# pip install --user twine
# create distribution
python setup.py sdist bdist_wheel
# upload to PyPI
python -m twine upload dist/*
Twine will prompt for login. Login details can be stored for reuse in the file
"~/.pypirc". See https://docs.python.org/3.3/distutils/packageindex.html#pypirc
If you need to test a distribution before tagging, you can use the following
(with example version 0.1.0rc1), but take care to delete the distribution from
dist before the next twine upload to PyPI:
SETUPTOOLS_SCM_PRETEND_VERSION=0.1.0rc1 python setup.py sdist bdist_wheel
python -m twine upload --repository-url https://test.pypi.org/legacy/ dist/*
"""
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="jiant",
author="NYU Machine Learning for Language Group",
author_email="bowman@nyu.edu",
description="jiant is a software toolkit for natural language processing research, designed to \
facilitate work on multitask learning and transfer learning for sentence understanding tasks.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nyu-mll/jiant",
license="MIT",
packages=[
"jiant",
"jiant.allennlp_mods",
"jiant.metrics",
"jiant.modules",
"jiant.modules.onlstm",
"jiant.modules.prpn",
"jiant.huggingface_transformers_interface",
"jiant.tasks",
"jiant.utils",
],
install_requires=[
"torch==1.0.*",
"numpy==1.14.5",
"pandas==0.23.0",
"allennlp==0.8.4",
"jsondiff",
"nltk==3.4.5",
"pyhocon==0.3.35",
"python-Levenshtein==0.12.0",
"sacremoses",
"transformers==2.3.0",
"ftfy",
"spacy",
],
use_scm_version=True,
setup_requires=["setuptools_scm"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
include_package_data=True,
package_data={"": ["jiant/config/**/*.conf"]},
)
| 33.939024 | 100 | 0.679123 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="jiant",
author="NYU Machine Learning for Language Group",
author_email="bowman@nyu.edu",
description="jiant is a software toolkit for natural language processing research, designed to \
facilitate work on multitask learning and transfer learning for sentence understanding tasks.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/nyu-mll/jiant",
license="MIT",
packages=[
"jiant",
"jiant.allennlp_mods",
"jiant.metrics",
"jiant.modules",
"jiant.modules.onlstm",
"jiant.modules.prpn",
"jiant.huggingface_transformers_interface",
"jiant.tasks",
"jiant.utils",
],
install_requires=[
"torch==1.0.*",
"numpy==1.14.5",
"pandas==0.23.0",
"allennlp==0.8.4",
"jsondiff",
"nltk==3.4.5",
"pyhocon==0.3.35",
"python-Levenshtein==0.12.0",
"sacremoses",
"transformers==2.3.0",
"ftfy",
"spacy",
],
use_scm_version=True,
setup_requires=["setuptools_scm"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
include_package_data=True,
package_data={"": ["jiant/config/**/*.conf"]},
)
| true | true |
1c2c5656f8cb8cb081a07cb79d30fe7e82e2ac65 | 2,110 | py | Python | gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/dummy_driver.py | baodongli/group-based-policy | f3b892ecdc1051b204376e18679f73bf457ce7dc | [
"Apache-2.0"
] | null | null | null | gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/dummy_driver.py | baodongli/group-based-policy | f3b892ecdc1051b204376e18679f73bf457ce7dc | [
"Apache-2.0"
] | null | null | null | gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/dummy_driver.py | baodongli/group-based-policy | f3b892ecdc1051b204376e18679f73bf457ce7dc | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import helpers as log
from gbpservice.neutron.services.servicechain.plugins.ncp import driver_base
class NoopNodeDriver(driver_base.NodeDriverBase):
initialized = False
@log.log_method_call
def initialize(self, name):
self.initialized = True
self._name = name
@log.log_method_call
def get_plumbing_info(self, context):
pass
@log.log_method_call
def validate_create(self, context):
pass
@log.log_method_call
def validate_update(self, context):
pass
@log.log_method_call
def create(self, context):
pass
@log.log_method_call
def delete(self, context):
pass
@log.log_method_call
def update(self, context):
pass
@log.log_method_call
def update_policy_target_added(self, context, policy_target):
pass
@log.log_method_call
def update_policy_target_removed(self, context, policy_target):
pass
@log.log_method_call
def update_node_consumer_ptg_added(self, context, policy_target_group):
pass
@log.log_method_call
def update_node_consumer_ptg_removed(self, context, policy_target_group):
pass
@log.log_method_call
def notify_chain_parameters_updated(self, context):
pass
@log.log_method_call
def policy_target_group_updated(self, context, old_policy_target_group,
current_policy_target_group):
pass
@property
def name(self):
return self._name
| 26.708861 | 78 | 0.693839 |
from oslo_log import helpers as log
from gbpservice.neutron.services.servicechain.plugins.ncp import driver_base
class NoopNodeDriver(driver_base.NodeDriverBase):
initialized = False
@log.log_method_call
def initialize(self, name):
self.initialized = True
self._name = name
@log.log_method_call
def get_plumbing_info(self, context):
pass
@log.log_method_call
def validate_create(self, context):
pass
@log.log_method_call
def validate_update(self, context):
pass
@log.log_method_call
def create(self, context):
pass
@log.log_method_call
def delete(self, context):
pass
@log.log_method_call
def update(self, context):
pass
@log.log_method_call
def update_policy_target_added(self, context, policy_target):
pass
@log.log_method_call
def update_policy_target_removed(self, context, policy_target):
pass
@log.log_method_call
def update_node_consumer_ptg_added(self, context, policy_target_group):
pass
@log.log_method_call
def update_node_consumer_ptg_removed(self, context, policy_target_group):
pass
@log.log_method_call
def notify_chain_parameters_updated(self, context):
pass
@log.log_method_call
def policy_target_group_updated(self, context, old_policy_target_group,
current_policy_target_group):
pass
@property
def name(self):
return self._name
| true | true |
1c2c572a8c7ab69a86c0a51ef94ba47c41f04215 | 621 | py | Python | tasks/migrations/0001_initial.py | akshaygepl1/NewTodoApp | 87b697bd1e454279aa329154ec72ba8e9a40c4e6 | [
"MIT"
] | null | null | null | tasks/migrations/0001_initial.py | akshaygepl1/NewTodoApp | 87b697bd1e454279aa329154ec72ba8e9a40c4e6 | [
"MIT"
] | null | null | null | tasks/migrations/0001_initial.py | akshaygepl1/NewTodoApp | 87b697bd1e454279aa329154ec72ba8e9a40c4e6 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-07-30 09:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('complete', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| 25.875 | 114 | 0.57649 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('complete', models.BooleanField(default=False)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| true | true |
1c2c593c533af0bcc8d96984fc534b7dcc96e506 | 19,656 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/protocols.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/protocols.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/protocols.py | Vibaswan/ixnetwork_restpy | 239fedc7050890746cbabd71ea1e91c68d9e5cad | [
"MIT"
] | null | null | null | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Protocols(Base):
"""Allows the user to select a set of protocols that are enabled for a newly added port.
The Protocols class encapsulates a list of protocols resources that are managed by the system.
A list of resources can be retrieved from the server using the Protocols.find() method.
"""
__slots__ = ()
_SDM_NAME = 'protocols'
_SDM_ATT_MAP = {
'ProtocolMaxNodeCount': 'protocolMaxNodeCount',
}
def __init__(self, parent):
super(Protocols, self).__init__(parent)
@property
def Arp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.arp_02ad3a87af613c0238e23157d241afd5.Arp): An instance of the Arp class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.arp_02ad3a87af613c0238e23157d241afd5 import Arp
if self._properties.get('Arp', None) is None:
return Arp(self)
else:
return self._properties.get('Arp')
@property
def Bfd(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bfd_d5e88ec401a1941c80be1a5a01aa0e3b.Bfd): An instance of the Bfd class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bfd_d5e88ec401a1941c80be1a5a01aa0e3b import Bfd
if self._properties.get('Bfd', None) is None:
return Bfd(self)._select()
else:
return self._properties.get('Bfd')
@property
def Bgp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bgp_eaa18e059aea3db7489e5e5975c78393.Bgp): An instance of the Bgp class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bgp_eaa18e059aea3db7489e5e5975c78393 import Bgp
if self._properties.get('Bgp', None) is None:
return Bgp(self)._select()
else:
return self._properties.get('Bgp')
@property
def Cfm(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.cfm_a96182d068c89cc1a09d61bc9826454e.Cfm): An instance of the Cfm class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.cfm_a96182d068c89cc1a09d61bc9826454e import Cfm
if self._properties.get('Cfm', None) is None:
return Cfm(self)._select()
else:
return self._properties.get('Cfm')
@property
def Eigrp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.eigrp_8c0911879bb0fc33556819b581ef1cd5.Eigrp): An instance of the Eigrp class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.eigrp_8c0911879bb0fc33556819b581ef1cd5 import Eigrp
if self._properties.get('Eigrp', None) is None:
return Eigrp(self)._select()
else:
return self._properties.get('Eigrp')
@property
def Elmi(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.elmi_e04c43a1f4f61fd0c7e21feec7754432.Elmi): An instance of the Elmi class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.elmi_e04c43a1f4f61fd0c7e21feec7754432 import Elmi
if self._properties.get('Elmi', None) is None:
return Elmi(self)._select()
else:
return self._properties.get('Elmi')
@property
def Igmp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.igmp_9e398aa1578ce37d86fcb3bff949b26a.Igmp): An instance of the Igmp class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.igmp_9e398aa1578ce37d86fcb3bff949b26a import Igmp
if self._properties.get('Igmp', None) is None:
return Igmp(self)._select()
else:
return self._properties.get('Igmp')
@property
def Isis(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.isis_ed275f3afdd4b191a3a0bf3a3e4fcf23.Isis): An instance of the Isis class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.isis_ed275f3afdd4b191a3a0bf3a3e4fcf23 import Isis
if self._properties.get('Isis', None) is None:
return Isis(self)._select()
else:
return self._properties.get('Isis')
@property
def Lacp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lacp_c536b7fd522e328f25d1b7294149d8d5.Lacp): An instance of the Lacp class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lacp_c536b7fd522e328f25d1b7294149d8d5 import Lacp
if self._properties.get('Lacp', None) is None:
return Lacp(self)._select()
else:
return self._properties.get('Lacp')
@property
def Ldp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ldp_896c0d96a713502abc9340d8047a5f9d.Ldp): An instance of the Ldp class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ldp_896c0d96a713502abc9340d8047a5f9d import Ldp
if self._properties.get('Ldp', None) is None:
return Ldp(self)._select()
else:
return self._properties.get('Ldp')
@property
def LinkOam(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.linkoam_5924c549b7c15a4bb415f30f997378d2.LinkOam): An instance of the LinkOam class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.linkoam_5924c549b7c15a4bb415f30f997378d2 import LinkOam
if self._properties.get('LinkOam', None) is None:
return LinkOam(self)._select()
else:
return self._properties.get('LinkOam')
@property
def Lisp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lisp_7412d0bff0b602ec68ba446d28f90626.Lisp): An instance of the Lisp class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lisp_7412d0bff0b602ec68ba446d28f90626 import Lisp
if self._properties.get('Lisp', None) is None:
return Lisp(self)._select()
else:
return self._properties.get('Lisp')
@property
def Mld(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mld_f1161f34d4a4d525e0d1c8288951f7e6.Mld): An instance of the Mld class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mld_f1161f34d4a4d525e0d1c8288951f7e6 import Mld
if self._properties.get('Mld', None) is None:
return Mld(self)._select()
else:
return self._properties.get('Mld')
@property
def MplsOam(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplsoam_c5ec6f5217261009d57292d968097bd8.MplsOam): An instance of the MplsOam class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplsoam_c5ec6f5217261009d57292d968097bd8 import MplsOam
if self._properties.get('MplsOam', None) is None:
return MplsOam(self)._select()
else:
return self._properties.get('MplsOam')
@property
def MplsTp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplstp_77f517ba5758139a40ffca4f733d6d7d.MplsTp): An instance of the MplsTp class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplstp_77f517ba5758139a40ffca4f733d6d7d import MplsTp
if self._properties.get('MplsTp', None) is None:
return MplsTp(self)._select()
else:
return self._properties.get('MplsTp')
@property
def OpenFlow(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.openflow_ade3a58bbdcdf92d5bc0db71ff6aa555.OpenFlow): An instance of the OpenFlow class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.openflow_ade3a58bbdcdf92d5bc0db71ff6aa555 import OpenFlow
if self._properties.get('OpenFlow', None) is None:
return OpenFlow(self)._select()
else:
return self._properties.get('OpenFlow')
@property
def Ospf(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ospf_ececb387932f5a00700a5d4ee1d36de9.Ospf): An instance of the Ospf class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ospf_ececb387932f5a00700a5d4ee1d36de9 import Ospf
if self._properties.get('Ospf', None) is None:
return Ospf(self)._select()
else:
return self._properties.get('Ospf')
@property
def OspfV3(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ospfv3_34232cdb18da44847a8884d00dc4b2ba.OspfV3): An instance of the OspfV3 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ospfv3_34232cdb18da44847a8884d00dc4b2ba import OspfV3
if self._properties.get('OspfV3', None) is None:
return OspfV3(self)._select()
else:
return self._properties.get('OspfV3')
@property
def Pimsm(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.pimsm_33ba141f2aa1f04e67b06fb3580eec0d.Pimsm): An instance of the Pimsm class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.pimsm_33ba141f2aa1f04e67b06fb3580eec0d import Pimsm
if self._properties.get('Pimsm', None) is None:
return Pimsm(self)._select()
else:
return self._properties.get('Pimsm')
@property
def Ping(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ping_0ede3677595a61e44be090b8970ef408.Ping): An instance of the Ping class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ping_0ede3677595a61e44be090b8970ef408 import Ping
if self._properties.get('Ping', None) is None:
return Ping(self)
else:
return self._properties.get('Ping')
@property
def Rip(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rip_ad63955be154e2f3b80204b8ae6114dc.Rip): An instance of the Rip class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rip_ad63955be154e2f3b80204b8ae6114dc import Rip
if self._properties.get('Rip', None) is None:
return Rip(self)._select()
else:
return self._properties.get('Rip')
@property
def Ripng(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ripng_cf5e0e826b2079e4dfc76d9c1ce9f4e8.Ripng): An instance of the Ripng class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ripng_cf5e0e826b2079e4dfc76d9c1ce9f4e8 import Ripng
if self._properties.get('Ripng', None) is None:
return Ripng(self)._select()
else:
return self._properties.get('Ripng')
@property
def Rsvp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rsvp_94f31353a0f6b62a8a1920dfb1c87e99.Rsvp): An instance of the Rsvp class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rsvp_94f31353a0f6b62a8a1920dfb1c87e99 import Rsvp
if self._properties.get('Rsvp', None) is None:
return Rsvp(self)._select()
else:
return self._properties.get('Rsvp')
@property
def Static(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.static_6aeb02f98d8f1810c1a06cf568987348.Static): An instance of the Static class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.static_6aeb02f98d8f1810c1a06cf568987348 import Static
if self._properties.get('Static', None) is None:
return Static(self)._select()
else:
return self._properties.get('Static')
@property
def Stp(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.stp_4de133a97c1f0320b38f5c69c651d367.Stp): An instance of the Stp class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.stp_4de133a97c1f0320b38f5c69c651d367 import Stp
if self._properties.get('Stp', None) is None:
return Stp(self)._select()
else:
return self._properties.get('Stp')
@property
def ProtocolMaxNodeCount(self):
"""
Returns
-------
- number: Shows maximum number of node.
"""
return self._get_attribute(self._SDM_ATT_MAP['ProtocolMaxNodeCount'])
def find(self, ProtocolMaxNodeCount=None):
"""Finds and retrieves protocols resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve protocols resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all protocols resources from the server.
Args
----
- ProtocolMaxNodeCount (number): Shows maximum number of node.
Returns
-------
- self: This instance with matching protocols resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of protocols data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the protocols resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 38.315789 | 165 | 0.661172 |
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Protocols(Base):
__slots__ = ()
_SDM_NAME = 'protocols'
_SDM_ATT_MAP = {
'ProtocolMaxNodeCount': 'protocolMaxNodeCount',
}
def __init__(self, parent):
super(Protocols, self).__init__(parent)
@property
def Arp(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.arp_02ad3a87af613c0238e23157d241afd5 import Arp
if self._properties.get('Arp', None) is None:
return Arp(self)
else:
return self._properties.get('Arp')
@property
def Bfd(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bfd_d5e88ec401a1941c80be1a5a01aa0e3b import Bfd
if self._properties.get('Bfd', None) is None:
return Bfd(self)._select()
else:
return self._properties.get('Bfd')
@property
def Bgp(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bgp_eaa18e059aea3db7489e5e5975c78393 import Bgp
if self._properties.get('Bgp', None) is None:
return Bgp(self)._select()
else:
return self._properties.get('Bgp')
@property
def Cfm(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.cfm_a96182d068c89cc1a09d61bc9826454e import Cfm
if self._properties.get('Cfm', None) is None:
return Cfm(self)._select()
else:
return self._properties.get('Cfm')
@property
def Eigrp(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.eigrp_8c0911879bb0fc33556819b581ef1cd5 import Eigrp
if self._properties.get('Eigrp', None) is None:
return Eigrp(self)._select()
else:
return self._properties.get('Eigrp')
@property
def Elmi(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.elmi_e04c43a1f4f61fd0c7e21feec7754432 import Elmi
if self._properties.get('Elmi', None) is None:
return Elmi(self)._select()
else:
return self._properties.get('Elmi')
@property
def Igmp(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.igmp_9e398aa1578ce37d86fcb3bff949b26a import Igmp
if self._properties.get('Igmp', None) is None:
return Igmp(self)._select()
else:
return self._properties.get('Igmp')
@property
def Isis(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.isis_ed275f3afdd4b191a3a0bf3a3e4fcf23 import Isis
if self._properties.get('Isis', None) is None:
return Isis(self)._select()
else:
return self._properties.get('Isis')
@property
def Lacp(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lacp_c536b7fd522e328f25d1b7294149d8d5 import Lacp
if self._properties.get('Lacp', None) is None:
return Lacp(self)._select()
else:
return self._properties.get('Lacp')
@property
def Ldp(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ldp_896c0d96a713502abc9340d8047a5f9d import Ldp
if self._properties.get('Ldp', None) is None:
return Ldp(self)._select()
else:
return self._properties.get('Ldp')
@property
def LinkOam(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.linkoam_5924c549b7c15a4bb415f30f997378d2 import LinkOam
if self._properties.get('LinkOam', None) is None:
return LinkOam(self)._select()
else:
return self._properties.get('LinkOam')
@property
def Lisp(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.lisp_7412d0bff0b602ec68ba446d28f90626 import Lisp
if self._properties.get('Lisp', None) is None:
return Lisp(self)._select()
else:
return self._properties.get('Lisp')
@property
def Mld(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mld_f1161f34d4a4d525e0d1c8288951f7e6 import Mld
if self._properties.get('Mld', None) is None:
return Mld(self)._select()
else:
return self._properties.get('Mld')
@property
def MplsOam(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplsoam_c5ec6f5217261009d57292d968097bd8 import MplsOam
if self._properties.get('MplsOam', None) is None:
return MplsOam(self)._select()
else:
return self._properties.get('MplsOam')
@property
def MplsTp(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.mplstp_77f517ba5758139a40ffca4f733d6d7d import MplsTp
if self._properties.get('MplsTp', None) is None:
return MplsTp(self)._select()
else:
return self._properties.get('MplsTp')
@property
def OpenFlow(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.openflow_ade3a58bbdcdf92d5bc0db71ff6aa555 import OpenFlow
if self._properties.get('OpenFlow', None) is None:
return OpenFlow(self)._select()
else:
return self._properties.get('OpenFlow')
@property
def Ospf(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ospf_ececb387932f5a00700a5d4ee1d36de9 import Ospf
if self._properties.get('Ospf', None) is None:
return Ospf(self)._select()
else:
return self._properties.get('Ospf')
@property
def OspfV3(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ospfv3_34232cdb18da44847a8884d00dc4b2ba import OspfV3
if self._properties.get('OspfV3', None) is None:
return OspfV3(self)._select()
else:
return self._properties.get('OspfV3')
@property
def Pimsm(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.pimsm_33ba141f2aa1f04e67b06fb3580eec0d import Pimsm
if self._properties.get('Pimsm', None) is None:
return Pimsm(self)._select()
else:
return self._properties.get('Pimsm')
@property
def Ping(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ping_0ede3677595a61e44be090b8970ef408 import Ping
if self._properties.get('Ping', None) is None:
return Ping(self)
else:
return self._properties.get('Ping')
@property
def Rip(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rip_ad63955be154e2f3b80204b8ae6114dc import Rip
if self._properties.get('Rip', None) is None:
return Rip(self)._select()
else:
return self._properties.get('Rip')
@property
def Ripng(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.ripng_cf5e0e826b2079e4dfc76d9c1ce9f4e8 import Ripng
if self._properties.get('Ripng', None) is None:
return Ripng(self)._select()
else:
return self._properties.get('Ripng')
@property
def Rsvp(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.rsvp_94f31353a0f6b62a8a1920dfb1c87e99 import Rsvp
if self._properties.get('Rsvp', None) is None:
return Rsvp(self)._select()
else:
return self._properties.get('Rsvp')
@property
def Static(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.static_6aeb02f98d8f1810c1a06cf568987348 import Static
if self._properties.get('Static', None) is None:
return Static(self)._select()
else:
return self._properties.get('Static')
@property
def Stp(self):
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.stp_4de133a97c1f0320b38f5c69c651d367 import Stp
if self._properties.get('Stp', None) is None:
return Stp(self)._select()
else:
return self._properties.get('Stp')
@property
def ProtocolMaxNodeCount(self):
return self._get_attribute(self._SDM_ATT_MAP['ProtocolMaxNodeCount'])
def find(self, ProtocolMaxNodeCount=None):
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
return self._read(href)
| true | true |
1c2c59574a1b63bcdd79eda9edb55687ab1f37bd | 9,787 | py | Python | pgn_read/samples/_utilities.py | RogerMarsh/pgn-read | 514031110afcdcda29bb769f532e0993525abc85 | [
"BSD-3-Clause"
] | 2 | 2021-02-21T07:13:48.000Z | 2021-02-25T21:50:15.000Z | pgn_read/samples/_utilities.py | RogerMarsh/pgn-read | 514031110afcdcda29bb769f532e0993525abc85 | [
"BSD-3-Clause"
] | null | null | null | pgn_read/samples/_utilities.py | RogerMarsh/pgn-read | 514031110afcdcda29bb769f532e0993525abc85 | [
"BSD-3-Clause"
] | 1 | 2021-02-21T14:47:29.000Z | 2021-02-21T14:47:29.000Z | # _utilities.py
# Copyright 2020 Roger Marsh
# Licence: See LICENCE (BSD licence)
"""Utilities to run a sample module."""
import tkinter
import tkinter.ttk
import tkinter.filedialog
import tkinter.messagebox
import os
import time
from ..core.parser import PGN
from ..core.game import generate_fen_for_position
def read_pgn(filename, game_class=None, size=10000000):
"""Return ok and error counts, and PGN text in error."""
game_ok_count = 0
game_not_ok_count = 0
game_ok_token_count = 0
game_not_ok_token_count = 0
games_with_error = []
last_fen_before_error = []
error_text = []
error_game_number = []
for y in PGN(game_class=game_class).read_games(
open(filename, encoding="iso-8859-1"), size=size
):
if y.state is None:
game_ok_count += 1
game_ok_token_count += len(y._text)
else:
game_not_ok_count += 1
game_not_ok_token_count += len(y._text)
games_with_error.append(y._text[: y.state])
if y._piece_placement_data:
last_fen_before_error.append(
generate_fen_for_position(
y._piece_placement_data.values(),
y._active_color,
y._castling_availability,
y._en_passant_target_square,
y._halfmove_clock,
y._fullmove_number,
)
)
else:
last_fen_before_error.append("No FEN")
error_text.append(y._text[y.state :])
error_game_number.append(game_ok_count + game_not_ok_count)
return (
game_ok_count,
game_not_ok_count,
game_ok_token_count,
game_not_ok_token_count,
games_with_error,
last_fen_before_error,
error_text,
error_game_number,
)
class Main:
"""Select PGN file to process."""
_START_TEXT = "".join(("Right-click for menu.\n",))
def __init__(self, game_class=None, size=10000000, samples_title=""):
"""Build the user interface."""
self.game_class = game_class
self.size = size
root = tkinter.Tk()
root.wm_title(string=samples_title)
root.wm_resizable(width=tkinter.FALSE, height=tkinter.TRUE)
tkinter.ttk.Label(master=root, text="PGN file").grid(row=0, column=0)
tkinter.ttk.Label(master=root, text="Log").grid(
row=1, column=1, pady=5
)
entry = tkinter.ttk.Entry(master=root)
entry.grid(row=0, column=1, columnspan=2, sticky="ew", pady=5)
pgn_file = tkinter.StringVar(root, "")
entry["textvariable"] = pgn_file
frame = tkinter.ttk.Frame(master=root)
frame.grid(row=2, column=0, columnspan=4, sticky="nsew")
root.rowconfigure(2, weight=1)
text = tkinter.Text(master=frame, wrap=tkinter.WORD)
scrollbar = tkinter.ttk.Scrollbar(
master=frame, orient=tkinter.VERTICAL, command=text.yview
)
text.configure(yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
text.pack(side=tkinter.RIGHT, fill=tkinter.Y)
self.menu = tkinter.Menu(master=frame, tearoff=False)
self.__menu = self.menu
self.root = root
self.text = text
self.entry = entry
self.pgn_file = pgn_file
self.set_menu_and_entry_events(True)
entry.bind("<ButtonPress-3>", self.show_menu)
text.bind("<ButtonPress-3>", self.show_menu)
self.insert_text(self._START_TEXT)
entry.focus_set()
def insert_text(self, text):
"""Wrap Text widget insert with Enable and Disable state configure."""
self.text.insert(tkinter.END, text)
def report_action(self, msg):
"""Show dialogue to report an action."""
self.insert_text("\n")
self.insert_text(" ".join(msg))
tkinter.messagebox.showinfo(master=self.root, message="\n".join(msg))
def report_error(self, msg):
"""Show dialogue to report an error."""
self.insert_text("\n")
self.insert_text(" ".join(msg))
tkinter.messagebox.showerror(master=self.root, message="\n".join(msg))
def show_menu(self, event=None):
"""Show the popup menu for widget."""
self.__menu.tk_popup(*event.widget.winfo_pointerxy())
self.__xy = event.x, event.y
self.__menu = self.menu
def select_pgn_file(self, event=None):
"""Select a PGN file."""
filename = tkinter.filedialog.askopenfilename(
title="PGN file of Games",
defaultextension=".pgn",
filetypes=(("PGN Chess Games", "*.pgn"),),
)
if filename:
self.pgn_file.set(filename)
def process_pgn_file(self, event=None):
"""Process PGN file."""
if self.pgn_file.get() == "":
tkinter.messagebox.showerror(
master=self.root, message="Please select a PGN file."
)
return
path = self.pgn_file.get()
if not os.path.exists(path):
msg = (
"Cannot process\n",
self.pgn_file.get(),
"\nwhich does not exist.",
)
self.report_error(msg)
return
if not os.path.isfile(path):
msg = (
"Cannot process\n",
self.pgn_file.get(),
"\nbecause it is not a file.",
)
self.report_error(msg)
return
start = time.process_time()
goc, gnoc, gotc, gnotc, gwe, lfbe, et, egn = read_pgn(
path, game_class=self.game_class, size=self.size
)
end = time.process_time()
m, s = divmod(round(end - start), 60)
t = ":".join((str(m).zfill(2), str(s).zfill(2)))
self.insert_text("\n")
self.insert_text(" ".join(("PGN file", os.path.basename(path))))
self.insert_text("\n")
self.report_summary(start, end, t, goc, gnoc, gotc, gnotc)
self.insert_text("\n")
for c, z in enumerate(zip(gwe, lfbe, et, egn)):
g, f, e, n = z
self.insert_text("\n")
self.insert_text(
" ".join(
(
"Game number in file:",
str(n),
"\t\tNot ok game number:",
str(c + 1),
)
)
)
self.insert_text("\n")
self.insert_text(" ".join(g))
self.insert_text("".join(("\n", f)))
self.insert_text("\n")
self.insert_text(" ".join(e))
self.insert_text("\n")
if et:
self.report_summary(start, end, t, goc, gnoc, gotc, gnotc)
self.insert_text("\n")
self.report_action(
(
"PGN file",
os.path.basename(path),
"done at",
time.ctime(),
)
)
self.insert_text("\n")
self.pgn_file.set("")
def report_summary(self, start, end, t, goc, gnoc, gotc, gnotc):
"""Add report summary to application report widget."""
self.insert_text("\n")
self.insert_text(" ".join((t, "time to process file")))
if end - start < 10:
self.insert_text("\n")
self.insert_text(
" ".join((str(end - start), "exact process time in seconds"))
)
self.insert_text("\n")
self.insert_text(" ".join(("Game ok count", str(goc))))
self.insert_text("\n")
self.insert_text(" ".join(("Game not ok count", str(gnoc))))
self.insert_text("\n")
self.insert_text(" ".join(("Game ok token count", str(gotc))))
self.insert_text("\n")
self.insert_text(" ".join(("Game not ok token count", str(gnotc))))
def set_menu_and_entry_events(self, active):
"""Turn events for opening a URL on if active is True otherwise off."""
menu = self.menu
if active:
menu.add_separator()
menu.add_command(
label="Process PGN File",
command=self.process_pgn_file,
accelerator="Alt F4",
)
menu.add_separator()
menu.add_command(
label="Select PGN File",
command=self.select_pgn_file,
accelerator="Alt F5",
)
menu.add_separator()
else:
menu.delete(0, tkinter.END)
for entry in (self.text,):
self._bind_for_scrolling_only(entry)
for entry in self.entry, self.text:
entry.bind(
"<Alt-KeyPress-F5>", "" if not active else self.select_pgn_file
)
entry.bind(
"<Alt-KeyPress-F4>",
"" if not active else self.process_pgn_file,
)
entry.bind(
"<KeyPress-Return>",
"" if not active else self.process_pgn_file,
)
def _bind_for_scrolling_only(self, widget):
widget.bind("<KeyPress>", "break")
widget.bind("<Home>", "return")
widget.bind("<Left>", "return")
widget.bind("<Up>", "return")
widget.bind("<Right>", "return")
widget.bind("<Down>", "return")
widget.bind("<Prior>", "return")
widget.bind("<Next>", "return")
widget.bind("<End>", "return")
def main(game_class=None, size=10000000, samples_title=""):
"""Run sample application."""
Main(
game_class=game_class, size=size, samples_title=samples_title
).root.mainloop()
| 35.33213 | 79 | 0.547154 |
import tkinter
import tkinter.ttk
import tkinter.filedialog
import tkinter.messagebox
import os
import time
from ..core.parser import PGN
from ..core.game import generate_fen_for_position
def read_pgn(filename, game_class=None, size=10000000):
game_ok_count = 0
game_not_ok_count = 0
game_ok_token_count = 0
game_not_ok_token_count = 0
games_with_error = []
last_fen_before_error = []
error_text = []
error_game_number = []
for y in PGN(game_class=game_class).read_games(
open(filename, encoding="iso-8859-1"), size=size
):
if y.state is None:
game_ok_count += 1
game_ok_token_count += len(y._text)
else:
game_not_ok_count += 1
game_not_ok_token_count += len(y._text)
games_with_error.append(y._text[: y.state])
if y._piece_placement_data:
last_fen_before_error.append(
generate_fen_for_position(
y._piece_placement_data.values(),
y._active_color,
y._castling_availability,
y._en_passant_target_square,
y._halfmove_clock,
y._fullmove_number,
)
)
else:
last_fen_before_error.append("No FEN")
error_text.append(y._text[y.state :])
error_game_number.append(game_ok_count + game_not_ok_count)
return (
game_ok_count,
game_not_ok_count,
game_ok_token_count,
game_not_ok_token_count,
games_with_error,
last_fen_before_error,
error_text,
error_game_number,
)
class Main:
_START_TEXT = "".join(("Right-click for menu.\n",))
def __init__(self, game_class=None, size=10000000, samples_title=""):
self.game_class = game_class
self.size = size
root = tkinter.Tk()
root.wm_title(string=samples_title)
root.wm_resizable(width=tkinter.FALSE, height=tkinter.TRUE)
tkinter.ttk.Label(master=root, text="PGN file").grid(row=0, column=0)
tkinter.ttk.Label(master=root, text="Log").grid(
row=1, column=1, pady=5
)
entry = tkinter.ttk.Entry(master=root)
entry.grid(row=0, column=1, columnspan=2, sticky="ew", pady=5)
pgn_file = tkinter.StringVar(root, "")
entry["textvariable"] = pgn_file
frame = tkinter.ttk.Frame(master=root)
frame.grid(row=2, column=0, columnspan=4, sticky="nsew")
root.rowconfigure(2, weight=1)
text = tkinter.Text(master=frame, wrap=tkinter.WORD)
scrollbar = tkinter.ttk.Scrollbar(
master=frame, orient=tkinter.VERTICAL, command=text.yview
)
text.configure(yscrollcommand=scrollbar.set)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
text.pack(side=tkinter.RIGHT, fill=tkinter.Y)
self.menu = tkinter.Menu(master=frame, tearoff=False)
self.__menu = self.menu
self.root = root
self.text = text
self.entry = entry
self.pgn_file = pgn_file
self.set_menu_and_entry_events(True)
entry.bind("<ButtonPress-3>", self.show_menu)
text.bind("<ButtonPress-3>", self.show_menu)
self.insert_text(self._START_TEXT)
entry.focus_set()
def insert_text(self, text):
self.text.insert(tkinter.END, text)
def report_action(self, msg):
self.insert_text("\n")
self.insert_text(" ".join(msg))
tkinter.messagebox.showinfo(master=self.root, message="\n".join(msg))
def report_error(self, msg):
self.insert_text("\n")
self.insert_text(" ".join(msg))
tkinter.messagebox.showerror(master=self.root, message="\n".join(msg))
def show_menu(self, event=None):
self.__menu.tk_popup(*event.widget.winfo_pointerxy())
self.__xy = event.x, event.y
self.__menu = self.menu
def select_pgn_file(self, event=None):
filename = tkinter.filedialog.askopenfilename(
title="PGN file of Games",
defaultextension=".pgn",
filetypes=(("PGN Chess Games", "*.pgn"),),
)
if filename:
self.pgn_file.set(filename)
def process_pgn_file(self, event=None):
if self.pgn_file.get() == "":
tkinter.messagebox.showerror(
master=self.root, message="Please select a PGN file."
)
return
path = self.pgn_file.get()
if not os.path.exists(path):
msg = (
"Cannot process\n",
self.pgn_file.get(),
"\nwhich does not exist.",
)
self.report_error(msg)
return
if not os.path.isfile(path):
msg = (
"Cannot process\n",
self.pgn_file.get(),
"\nbecause it is not a file.",
)
self.report_error(msg)
return
start = time.process_time()
goc, gnoc, gotc, gnotc, gwe, lfbe, et, egn = read_pgn(
path, game_class=self.game_class, size=self.size
)
end = time.process_time()
m, s = divmod(round(end - start), 60)
t = ":".join((str(m).zfill(2), str(s).zfill(2)))
self.insert_text("\n")
self.insert_text(" ".join(("PGN file", os.path.basename(path))))
self.insert_text("\n")
self.report_summary(start, end, t, goc, gnoc, gotc, gnotc)
self.insert_text("\n")
for c, z in enumerate(zip(gwe, lfbe, et, egn)):
g, f, e, n = z
self.insert_text("\n")
self.insert_text(
" ".join(
(
"Game number in file:",
str(n),
"\t\tNot ok game number:",
str(c + 1),
)
)
)
self.insert_text("\n")
self.insert_text(" ".join(g))
self.insert_text("".join(("\n", f)))
self.insert_text("\n")
self.insert_text(" ".join(e))
self.insert_text("\n")
if et:
self.report_summary(start, end, t, goc, gnoc, gotc, gnotc)
self.insert_text("\n")
self.report_action(
(
"PGN file",
os.path.basename(path),
"done at",
time.ctime(),
)
)
self.insert_text("\n")
self.pgn_file.set("")
def report_summary(self, start, end, t, goc, gnoc, gotc, gnotc):
self.insert_text("\n")
self.insert_text(" ".join((t, "time to process file")))
if end - start < 10:
self.insert_text("\n")
self.insert_text(
" ".join((str(end - start), "exact process time in seconds"))
)
self.insert_text("\n")
self.insert_text(" ".join(("Game ok count", str(goc))))
self.insert_text("\n")
self.insert_text(" ".join(("Game not ok count", str(gnoc))))
self.insert_text("\n")
self.insert_text(" ".join(("Game ok token count", str(gotc))))
self.insert_text("\n")
self.insert_text(" ".join(("Game not ok token count", str(gnotc))))
def set_menu_and_entry_events(self, active):
menu = self.menu
if active:
menu.add_separator()
menu.add_command(
label="Process PGN File",
command=self.process_pgn_file,
accelerator="Alt F4",
)
menu.add_separator()
menu.add_command(
label="Select PGN File",
command=self.select_pgn_file,
accelerator="Alt F5",
)
menu.add_separator()
else:
menu.delete(0, tkinter.END)
for entry in (self.text,):
self._bind_for_scrolling_only(entry)
for entry in self.entry, self.text:
entry.bind(
"<Alt-KeyPress-F5>", "" if not active else self.select_pgn_file
)
entry.bind(
"<Alt-KeyPress-F4>",
"" if not active else self.process_pgn_file,
)
entry.bind(
"<KeyPress-Return>",
"" if not active else self.process_pgn_file,
)
def _bind_for_scrolling_only(self, widget):
widget.bind("<KeyPress>", "break")
widget.bind("<Home>", "return")
widget.bind("<Left>", "return")
widget.bind("<Up>", "return")
widget.bind("<Right>", "return")
widget.bind("<Down>", "return")
widget.bind("<Prior>", "return")
widget.bind("<Next>", "return")
widget.bind("<End>", "return")
def main(game_class=None, size=10000000, samples_title=""):
Main(
game_class=game_class, size=size, samples_title=samples_title
).root.mainloop()
| true | true |
1c2c5a98edc05a4e7efc4472c00d75f91a63068e | 4,786 | py | Python | sdk/python/pulumi_azure_nextgen/sql/v20200801preview/_inputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/sql/v20200801preview/_inputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/sql/v20200801preview/_inputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ElasticPoolPerDatabaseSettingsArgs',
'SkuArgs',
]
@pulumi.input_type
class ElasticPoolPerDatabaseSettingsArgs:
def __init__(__self__, *,
max_capacity: Optional[pulumi.Input[float]] = None,
min_capacity: Optional[pulumi.Input[float]] = None):
"""
Per database settings of an elastic pool.
:param pulumi.Input[float] max_capacity: The maximum capacity any one database can consume.
:param pulumi.Input[float] min_capacity: The minimum capacity all databases are guaranteed.
"""
if max_capacity is not None:
pulumi.set(__self__, "max_capacity", max_capacity)
if min_capacity is not None:
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> Optional[pulumi.Input[float]]:
"""
The maximum capacity any one database can consume.
"""
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> Optional[pulumi.Input[float]]:
"""
The minimum capacity all databases are guaranteed.
"""
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "min_capacity", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
capacity: Optional[pulumi.Input[int]] = None,
family: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input[str]] = None):
"""
An ARM Resource SKU.
:param pulumi.Input[str] name: The name of the SKU, typically, a letter + Number code, e.g. P3.
:param pulumi.Input[int] capacity: Capacity of the particular SKU.
:param pulumi.Input[str] family: If the service has different generations of hardware, for the same SKU, then that can be captured here.
:param pulumi.Input[str] size: Size of the particular SKU
:param pulumi.Input[str] tier: The tier or edition of the particular SKU, e.g. Basic, Premium.
"""
pulumi.set(__self__, "name", name)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if family is not None:
pulumi.set(__self__, "family", family)
if size is not None:
pulumi.set(__self__, "size", size)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the SKU, typically, a letter + Number code, e.g. P3.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
"""
Capacity of the particular SKU.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def family(self) -> Optional[pulumi.Input[str]]:
"""
If the service has different generations of hardware, for the same SKU, then that can be captured here.
"""
return pulumi.get(self, "family")
@family.setter
def family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "family", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
"""
Size of the particular SKU
"""
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[str]]:
"""
The tier or edition of the particular SKU, e.g. Basic, Premium.
"""
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tier", value)
| 33.468531 | 144 | 0.619933 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ElasticPoolPerDatabaseSettingsArgs',
'SkuArgs',
]
@pulumi.input_type
class ElasticPoolPerDatabaseSettingsArgs:
def __init__(__self__, *,
max_capacity: Optional[pulumi.Input[float]] = None,
min_capacity: Optional[pulumi.Input[float]] = None):
if max_capacity is not None:
pulumi.set(__self__, "max_capacity", max_capacity)
if min_capacity is not None:
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> Optional[pulumi.Input[float]]:
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "min_capacity", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
capacity: Optional[pulumi.Input[int]] = None,
family: Optional[pulumi.Input[str]] = None,
size: Optional[pulumi.Input[str]] = None,
tier: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "name", name)
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if family is not None:
pulumi.set(__self__, "family", family)
if size is not None:
pulumi.set(__self__, "size", size)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def family(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "family")
@family.setter
def family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "family", value)
@property
@pulumi.getter
def size(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "size")
@size.setter
def size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "size", value)
@property
@pulumi.getter
def tier(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "tier")
@tier.setter
def tier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tier", value)
| true | true |
1c2c5bf02958a264687eb3a041c1006eb85b72bd | 2,058 | py | Python | competition/portfolio.py | xfuzzycomp/FuzzyChallenge2021 | 5876450fdb913c6707352bfe9fcc25748f041f52 | [
"MIT"
] | null | null | null | competition/portfolio.py | xfuzzycomp/FuzzyChallenge2021 | 5876450fdb913c6707352bfe9fcc25748f041f52 | [
"MIT"
] | null | null | null | competition/portfolio.py | xfuzzycomp/FuzzyChallenge2021 | 5876450fdb913c6707352bfe9fcc25748f041f52 | [
"MIT"
] | null | null | null | from competition.scenarios import *
portfolio = [
threat_test_1,
threat_test_2,
threat_test_3,
threat_test_4,
accuracy_test_1,
accuracy_test_2,
accuracy_test_3,
accuracy_test_4,
accuracy_test_5,
accuracy_test_6,
accuracy_test_7,
accuracy_test_8,
accuracy_test_9,
accuracy_test_10,
wall_left_easy,
wall_right_easy,
wall_top_easy,
wall_bottom_easy,
ring_closing,
ring_static_left,
ring_static_right,
ring_static_top,
ring_static_bottom,
wall_right_wrap_1,
wall_right_wrap_2,
wall_right_wrap_3,
wall_right_wrap_4,
wall_left_wrap_1,
wall_left_wrap_2,
wall_left_wrap_3,
wall_left_wrap_4,
wall_top_wrap_1,
wall_top_wrap_2,
wall_top_wrap_3,
wall_top_wrap_4,
wall_bottom_wrap_1,
wall_bottom_wrap_2,
wall_bottom_wrap_3,
wall_bottom_wrap_4,
]
show_portfolio = [
threat_test_1,
threat_test_2,
threat_test_3,
threat_test_4,
accuracy_test_5,
accuracy_test_6,
accuracy_test_7,
accuracy_test_8,
accuracy_test_9,
accuracy_test_10,
wall_left_easy,
wall_right_easy,
wall_top_easy,
wall_bottom_easy,
ring_closing,
ring_static_left,
ring_static_right,
ring_static_top,
ring_static_bottom,
wall_right_wrap_3,
wall_right_wrap_4,
wall_left_wrap_3,
wall_left_wrap_4,
wall_top_wrap_3,
wall_top_wrap_4,
wall_bottom_wrap_3,
wall_bottom_wrap_4,
]
alternate_scenarios = [
corridor_left,
corridor_right,
corridor_top,
corridor_bottom,
# May have to cut these
moving_corridor_1,
moving_corridor_2,
moving_corridor_3,
moving_corridor_4,
moving_corridor_angled_1,
moving_corridor_angled_2,
moving_corridor_curve_1,
moving_corridor_curve_2,
scenario_small_box,
scenario_big_box,
scenario_2_still_corridors,
]
portfolio_dict = {scenario.name: scenario for scenario in portfolio}
show_portfolio_dict = {scenario.name: scenario for scenario in show_portfolio}
| 20.787879 | 78 | 0.729349 | from competition.scenarios import *
portfolio = [
threat_test_1,
threat_test_2,
threat_test_3,
threat_test_4,
accuracy_test_1,
accuracy_test_2,
accuracy_test_3,
accuracy_test_4,
accuracy_test_5,
accuracy_test_6,
accuracy_test_7,
accuracy_test_8,
accuracy_test_9,
accuracy_test_10,
wall_left_easy,
wall_right_easy,
wall_top_easy,
wall_bottom_easy,
ring_closing,
ring_static_left,
ring_static_right,
ring_static_top,
ring_static_bottom,
wall_right_wrap_1,
wall_right_wrap_2,
wall_right_wrap_3,
wall_right_wrap_4,
wall_left_wrap_1,
wall_left_wrap_2,
wall_left_wrap_3,
wall_left_wrap_4,
wall_top_wrap_1,
wall_top_wrap_2,
wall_top_wrap_3,
wall_top_wrap_4,
wall_bottom_wrap_1,
wall_bottom_wrap_2,
wall_bottom_wrap_3,
wall_bottom_wrap_4,
]
show_portfolio = [
threat_test_1,
threat_test_2,
threat_test_3,
threat_test_4,
accuracy_test_5,
accuracy_test_6,
accuracy_test_7,
accuracy_test_8,
accuracy_test_9,
accuracy_test_10,
wall_left_easy,
wall_right_easy,
wall_top_easy,
wall_bottom_easy,
ring_closing,
ring_static_left,
ring_static_right,
ring_static_top,
ring_static_bottom,
wall_right_wrap_3,
wall_right_wrap_4,
wall_left_wrap_3,
wall_left_wrap_4,
wall_top_wrap_3,
wall_top_wrap_4,
wall_bottom_wrap_3,
wall_bottom_wrap_4,
]
alternate_scenarios = [
corridor_left,
corridor_right,
corridor_top,
corridor_bottom,
moving_corridor_1,
moving_corridor_2,
moving_corridor_3,
moving_corridor_4,
moving_corridor_angled_1,
moving_corridor_angled_2,
moving_corridor_curve_1,
moving_corridor_curve_2,
scenario_small_box,
scenario_big_box,
scenario_2_still_corridors,
]
portfolio_dict = {scenario.name: scenario for scenario in portfolio}
show_portfolio_dict = {scenario.name: scenario for scenario in show_portfolio}
| true | true |
1c2c5c765623036e293e8c37573a07c2f79363f7 | 27,063 | py | Python | girderformindlogger/models/user.py | shnizzedy/mindlogger-app-backend | a8ba865af82fc95fa3fcc4251afb8a0d22e63cc3 | [
"Apache-2.0"
] | null | null | null | girderformindlogger/models/user.py | shnizzedy/mindlogger-app-backend | a8ba865af82fc95fa3fcc4251afb8a0d22e63cc3 | [
"Apache-2.0"
] | null | null | null | girderformindlogger/models/user.py | shnizzedy/mindlogger-app-backend | a8ba865af82fc95fa3fcc4251afb8a0d22e63cc3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import os
import re
from passlib.context import CryptContext
from passlib.totp import TOTP, TokenError
import six
from girderformindlogger import events
from girderformindlogger.constants import AccessType, CoreEventHandler, TokenScope
from girderformindlogger.exceptions import AccessException, ValidationException
from girderformindlogger.settings import SettingKey
from girderformindlogger.utility import config, mail_utils
from girderformindlogger.utility._cache import rateLimitBuffer
from .model_base import AccessControlledModel
from .setting import Setting
class User(AccessControlledModel):
"""
This model represents the users of the system.
"""
def initialize(self):
self.name = 'user'
self.ensureIndices(['login', 'email', 'groupInvites.groupId', 'size',
'created', 'deviceId'])
self.prefixSearchFields = (
'login', ('firstName', 'i'), ('displayName', 'i'), 'email')
self.ensureTextIndex({
'login': 1,
'displayName': 1,
'email': 1,
}, language='none')
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'login', 'public', 'displayName', 'firstName', 'lastName',
'admin', 'email', 'created')) # 🔥 delete firstName, lastName and email once fully deprecated
self.exposeFields(level=AccessType.ADMIN, fields=(
'size', 'status', 'emailVerified', 'creatorId'))
# To ensure compatibility with authenticator apps, other defaults shouldn't be changed
self._TotpFactory = TOTP.using(
# An application secret could be set here, if it existed
wallet=None
)
self._cryptContext = CryptContext(
schemes=['bcrypt']
)
events.bind('model.user.save.created',
CoreEventHandler.USER_SELF_ACCESS, self._grantSelfAccess)
events.bind('model.user.save.created',
CoreEventHandler.USER_DEFAULT_FOLDERS,
self._addDefaultFolders)
def validate(self, doc):
"""
Validate the user every time it is stored in the database.
"""
for s in ['email', 'displayName', 'firstName']:
if s in doc and doc[s] is None:
doc[s] = ''
doc['login'] = doc.get('login', '').lower().strip()
doc['email'] = doc.get('email', '').lower().strip()
doc['displayName'] = doc.get(
'displayName',
doc.get('firstName', '')
).strip()
doc['firstName'] = doc.get('firstName', '').strip()
doc['status'] = doc.get('status', 'enabled')
doc['deviceId'] = doc.get('deviceId', '')
if 'salt' not in doc:
# Internal error, this should not happen
raise Exception('Tried to save user document with no salt.')
if not doc['displayName']:
raise ValidationException('Display name must not be empty.',
'displayName')
if doc['status'] not in ('pending', 'enabled', 'disabled'):
raise ValidationException(
'Status must be pending, enabled, or disabled.', 'status')
if 'hashAlg' in doc:
# This is a legacy field; hash algorithms are now inline with the password hash
del doc['hashAlg']
self._validateLogin(doc['login'])
if len(doc['email']) and not mail_utils.validateEmailAddress(
doc['email']
):
raise ValidationException('Invalid email address.', 'email')
# Ensure unique logins
q = {'login': doc['login']}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
existing = self.findOne(q)
if existing is not None:
raise ValidationException('That login is already registered.',
'login')
# If this is the first user being created, make it an admin
existing = self.findOne({})
if existing is None:
doc['admin'] = True
# Ensure settings don't stop this user from logging in
doc['emailVerified'] = True
doc['status'] = 'enabled'
return doc
def _validateLogin(self, login):
if '@' in login:
# Hard-code this constraint so we can always easily distinguish
# an email address from a login
raise ValidationException('Login may not contain "@".', 'login')
if not re.match(r'^[a-z][\da-z\-\.]{3,}$', login):
raise ValidationException(
'Login must be at least 4 characters, start with a letter, and may only contain '
'letters, numbers, dashes, and dots.', 'login')
def filter(self, doc, user, additionalKeys=None):
filteredDoc = super(User, self).filter(doc, user, additionalKeys)
level = self.getAccessLevel(doc, user)
if level >= AccessType.ADMIN:
filteredDoc['otp'] = doc.get('otp', {})
filteredDoc['otp'] = filteredDoc['otp'].get(
'enabled',
False
) if isinstance(filteredDoc['otp'], dict) else False
return filteredDoc
def authenticate(self, login, password, otpToken=None, deviceId=None):
"""
Validate a user login via username and password. If authentication
fails, an ``AccessException`` is raised.
:param login: The user's login or email.
:type login: str
:param password: The user's password.
:type password: str
:param otpToken: A one-time password for the user. If "True", then the
one-time password (if required) is assumed to be
concatenated to the password.
:type otpToken: str or bool or None
:returns: The corresponding user if the login was successful.
:rtype: dict
"""
user = None
event = events.trigger('model.user.authenticate', {
'login': login,
'password': password
})
if event.defaultPrevented and len(event.responses):
return event.responses[-1]
login = login.lower().strip()
loginField = 'email' if '@' in login else 'login'
if loginField=='login':
user = self.findOne({loginField: login})
else:
raise AccessException(
'Please log in with your username rather than your email '
'address.'
)
if user is None:
raise AccessException('Login failed. Username not found.')
# Handle users with no password
if not self.hasPassword(user):
e = events.trigger('no_password_login_attempt', {
'user': user,
'password': password
})
if len(e.responses):
return e.responses[-1]
raise ValidationException(
'This user does not have a password. You must log in with an '
'external service, or reset your password.')
# Handle OTP token concatenation
if otpToken is True and self.hasOtpEnabled(user):
# Assume the last (typically 6) characters are the OTP, so split at
# that point
otpTokenLength = self._TotpFactory.digits
otpToken = password[-otpTokenLength:]
password = password[:-otpTokenLength]
self._verify_password(password, user)
# Verify OTP
if self.hasOtpEnabled(user):
if otpToken is None:
raise AccessException(
'User authentication must include a one-time password '
'(typically in the "Girder-OTP" header).')
self.verifyOtp(user, otpToken)
elif isinstance(otpToken, six.string_types):
raise AccessException(
'The user has not enabled one-time passwords.'
)
# This has the same behavior as User.canLogin, but returns more
# detailed error messages
if user.get('status', 'enabled') == 'disabled':
return { 'exception' : 'Account is disabled.' }
if self.emailVerificationRequired(user):
return { 'exception' : 'Email verification is required.' }
if self.adminApprovalRequired(user):
return { 'exception' : 'Admin approval required' }
return user
def remove(self, user, progress=None, **kwargs):
"""
Delete a user, and all references to it in the database.
:param user: The user document to delete.
:type user: dict
:param progress: A progress context to record progress on.
:type progress: girderformindlogger.utility.progress.ProgressContext or None.
"""
from .folder import Folder
from .group import Group
from .token import Token
# Delete all authentication tokens owned by this user
Token().removeWithQuery({'userId': user['_id']})
# Delete all pending group invites for this user
Group().update(
{'requests': user['_id']},
{'$pull': {'requests': user['_id']}}
)
# Delete all of the folders under this user
folderModel = Folder()
folders = folderModel.find({
'parentId': user['_id'],
'parentCollection': 'user'
})
for folder in folders:
folderModel.remove(folder, progress=progress, **kwargs)
# Finally, delete the user document itself
AccessControlledModel.remove(self, user)
if progress:
progress.update(increment=1, message='Deleted user ' + user['login'])
def getAdmins(self):
"""
Helper to return a cursor of all site-admin users. The number of site
admins is assumed to be small enough that we will not need to page the
results for now.
"""
return self.find({'admin': True})
def search(self, text=None, user=None, limit=0, offset=0, sort=None):
"""
List all users. Since users are access-controlled, this will filter
them by access policy.
:param text: Pass this to perform a full-text search for users.
:param user: The user running the query. Only returns users that this
user can see.
:param limit: Result limit.
:param offset: Result offset.
:param sort: The sort structure to pass to pymongo.
:returns: Iterable of users.
"""
# Perform the find; we'll do access-based filtering of the result set
# afterward.
if text is not None:
cursor = self.textSearch(text, sort=sort)
else:
cursor = self.find({}, sort=sort)
return self.filterResultsByPermission(
cursor=cursor, user=user, level=AccessType.READ, limit=limit,
offset=offset)
def hasPassword(self, user):
"""
Returns whether or not the given user has a password stored in the
database. If not, it is expected that the user will be authenticated by
an external service.
:param user: The user to test.
:type user: dict
:returns: bool
"""
return user['salt'] is not None
def setPassword(self, user, password, save=True):
"""
Change a user's password.
:param user: The user whose password to change.
:param password: The new password. If set to None, no password will
be stored for this user. This should be done in cases
where an external system is responsible for
authenticating the user.
"""
if password is None:
user['salt'] = None
else:
cur_config = config.getConfig()
# Normally this would go in validate() but password is a special case.
if not re.match(cur_config['users']['password_regex'], password):
raise ValidationException(cur_config['users']['password_description'], 'password')
user['salt'] = self._cryptContext.hash(password)
if save:
self.save(user)
def initializeOtp(self, user):
"""
Initialize the use of one-time passwords with this user.
This does not save the modified user model.
:param user: The user to modify.
:return: The new OTP keys, each in KeyUriFormat.
:rtype: dict
"""
totp = self._TotpFactory.new()
user['otp'] = {
'enabled': False,
'totp': totp.to_dict()
}
# Use the brand name as the OTP issuer if it's non-default (since that's prettier and more
# meaningful for users), but fallback to the site hostname if the brand name isn't set
# (to disambiguate otherwise identical "Girder" issuers)
# Prevent circular import
from girderformindlogger.api.rest import getUrlParts
brandName = Setting().get(SettingKey.BRAND_NAME)
defaultBrandName = Setting().getDefault(SettingKey.BRAND_NAME)
# OTP URIs ( https://github.com/google/google-authenticator/wiki/Key-Uri-Format ) do not
# allow colons, so use only the hostname component
serverHostname = getUrlParts().netloc.partition(':')[0]
# Normally, the issuer would be set when "self._TotpFactory" is instantiated, but that
# happens during model initialization, when there's no current request, so the server
# hostname is not known then
otpIssuer = brandName if brandName != defaultBrandName else serverHostname
return {
'totpUri': totp.to_uri(label=user['login'], issuer=otpIssuer)
}
def hasOtpEnabled(self, user):
return 'otp' in user and user['otp']['enabled']
def verifyOtp(self, user, otpToken):
lastCounterKey = 'girderformindlogger.models.user.%s.otp.totp.counter' % user['_id']
# The last successfully-authenticated key (which is blacklisted from reuse)
lastCounter = rateLimitBuffer.get(lastCounterKey) or None
try:
totpMatch = self._TotpFactory.verify(
otpToken, user['otp']['totp'], last_counter=lastCounter)
except TokenError as e:
raise AccessException('One-time password validation failed: %s' % e)
# The totpMatch.cache_seconds tells us prospectively how long the counter needs to be cached
# for, but dogpile.cache expiration times work retrospectively (on "get"), so there's no
# point to using it (over-caching just wastes cache resources, but does not impact
# "totp.verify" security)
rateLimitBuffer.set(lastCounterKey, totpMatch.counter)
def createUser(self, login, password, displayName="", email="",
admin=False, public=False, currentUser=None,
firstName="", lastName=""): # 🔥 delete firstName and lastName once fully deprecated
"""
Create a new user with the given information.
:param admin: Whether user is global administrator.
:type admin: bool
:param public: Whether user is publicly visible.
:type public: bool
:returns: The user document that was created.
"""
from .group import Group
from .setting import Setting
requireApproval = Setting(
).get(SettingKey.REGISTRATION_POLICY) == 'approve'
email = "" if not email else email
if admin:
requireApproval = False
user = {
'login': login,
'email': email,
'displayName': displayName if len(
displayName
) else firstName if firstName is not None else "",
'firstName': firstName,
'created': datetime.datetime.utcnow(),
'emailVerified': False,
'status': 'pending' if requireApproval else 'enabled',
'admin': admin,
'size': 0,
'deviceId': '',
'groups': [],
'groupInvites': [
{
"groupId": gi.get('_id'),
"level": 0
} for gi in list(Group().find(query={"queue": email}))
] if len(email) else []
}
self.setPassword(user, password, save=False)
self.setPublic(user, public, save=False)
if currentUser:
self.setUserAccess(
user, user=currentUser, level=AccessType.WRITE, save=False
)
user['creatorId'] = currentUser['_id']
user = self.save(user)
if currentUser:
User().setUserAccess(
doc=currentUser, user=user, level=AccessType.READ, save=True
)
else:
user['creatorId'] = user['_id']
user = self.save(user)
verifyEmail = Setting().get(SettingKey.EMAIL_VERIFICATION) != 'disabled'
if verifyEmail:
self._sendVerificationEmail(user)
if requireApproval:
self._sendApprovalEmail(user)
Group().update(
query={"queue": email},
update={"$pull": {"queue": email}},
multi=True
)
user = self._getGroupInvitesFromProtoUser(user)
self._deleteProtoUser(user)
return(user)
def canLogin(self, user):
"""
Returns True if the user is allowed to login, e.g. email verification
is not needed and admin approval is not needed.
"""
if user.get('status', 'enabled') == 'disabled':
return False
if self.emailVerificationRequired(user):
return False
if self.adminApprovalRequired(user):
return False
return True
def emailVerificationRequired(self, user):
"""
Returns True if email verification is required and this user has not
yet verified their email address.
"""
from .setting import Setting
return (not user['emailVerified']) and \
Setting().get(SettingKey.EMAIL_VERIFICATION) == 'required'
def adminApprovalRequired(self, user):
"""
Returns True if the registration policy requires admin approval and
this user is pending approval.
"""
from .setting import Setting
return user.get('status', 'enabled') == 'pending' and \
Setting().get(SettingKey.REGISTRATION_POLICY) == 'approve'
def _sendApprovalEmail(self, user):
url = '%s#user/%s' % (
mail_utils.getEmailUrlPrefix(), str(user['_id']))
text = mail_utils.renderTemplate('accountApproval.mako', {
'user': user,
'url': url
})
mail_utils.sendMailToAdmins(
'Girder: Account pending approval',
text)
def _sendApprovedEmail(self, user):
text = mail_utils.renderTemplate('accountApproved.mako', {
'user': user,
'url': mail_utils.getEmailUrlPrefix()
})
mail_utils.sendMail(
'Girder: Account approved',
text,
[user.get('email')])
def _sendVerificationEmail(self, user):
from .token import Token
token = Token().createToken(
user, days=1, scope=TokenScope.EMAIL_VERIFICATION)
url = '%s#useraccount/%s/verification/%s' % (
mail_utils.getEmailUrlPrefix(), str(user['_id']), str(token['_id']))
text = mail_utils.renderTemplate('emailVerification.mako', {
'url': url
})
mail_utils.sendMail(
'Girder: Email verification',
text,
[user.get('email')])
def _grantSelfAccess(self, event):
"""
This callback grants a user admin access to itself.
This generally should not be called or overridden directly, but it may
be unregistered from the `model.user.save.created` event.
"""
user = event.info
self.setUserAccess(user, user, level=AccessType.ADMIN, save=True)
def _addDefaultFolders(self, event):
"""
This callback creates "Public" and "Private" folders on a user, after
it is first created.
This generally should not be called or overridden directly, but it may
be unregistered from the `model.user.save.created` event.
"""
from .folder import Folder
from .setting import Setting
if Setting().get(SettingKey.USER_DEFAULT_FOLDERS) == 'public_private':
user = event.info
publicFolder = Folder().createFolder(
user, 'Public', parentType='user', public=True, creator=user)
privateFolder = Folder().createFolder(
user, 'Private', parentType='user', public=False, creator=user)
# Give the user admin access to their own folders
Folder().setUserAccess(publicFolder, user, AccessType.ADMIN, save=True)
Folder().setUserAccess(privateFolder, user, AccessType.ADMIN, save=True)
def fileList(self, doc, user=None, path='', includeMetadata=False, subpath=True, data=True):
"""
This function generates a list of 2-tuples whose first element is the
relative path to the file from the user's folders root and whose second
element depends on the value of the `data` flag. If `data=True`, the
second element will be a generator that will generate the bytes of the
file data as stored in the assetstore. If `data=False`, the second
element is the file document itself.
:param doc: the user to list.
:param user: a user used to validate data that is returned.
:param path: a path prefix to add to the results.
:param includeMetadata: if True and there is any metadata, include a
result which is the JSON string of the
metadata. This is given a name of
metadata[-(number).json that is distinct from
any file within the item.
:param subpath: if True, add the user's name to the path.
:param data: If True return raw content of each file as stored in the
assetstore, otherwise return file document.
:type data: bool
"""
from .folder import Folder
if subpath:
path = os.path.join(path, doc['login'])
folderModel = Folder()
# Eagerly evaluate this list, as the MongoDB cursor can time out on long requests
childFolders = list(folderModel.childFolders(
parentType='user', parent=doc, user=user,
fields=['name'] + (['meta'] if includeMetadata else [])
))
for folder in childFolders:
for (filepath, file) in folderModel.fileList(
folder, user, path, includeMetadata, subpath=True, data=data):
yield (filepath, file)
def subtreeCount(self, doc, includeItems=True, user=None, level=None):
"""
Return the size of the user's folders. The user is counted as well.
:param doc: The user.
:param includeItems: Whether to include items in the subtree count, or
just folders.
:type includeItems: bool
:param user: If filtering by permission, the user to filter against.
:param level: If filtering by permission, the required permission level.
:type level: AccessLevel
"""
from .folder import Folder
count = 1
folderModel = Folder()
folders = folderModel.findWithPermissions({
'parentId': doc['_id'],
'parentCollection': 'user'
}, fields='access', user=user, level=level)
count += sum(folderModel.subtreeCount(
folder, includeItems=includeItems, user=user, level=level)
for folder in folders)
return count
def countFolders(self, user, filterUser=None, level=None):
"""
Returns the number of top level folders under this user. Access
checking is optional; to circumvent access checks, pass ``level=None``.
:param user: The user whose top level folders to count.
:type collection: dict
:param filterUser: If performing access checks, the user to check
against.
:type filterUser: dict or None
:param level: The required access level, or None to return the raw
top-level folder count.
"""
from .folder import Folder
fields = () if level is None else ('access', 'public')
folderModel = Folder()
folders = folderModel.findWithPermissions({
'parentId': user['_id'],
'parentCollection': 'user'
}, fields=fields, user=filterUser, level=level)
return folders.count()
def updateSize(self, doc):
"""
Recursively recomputes the size of this user and its underlying
folders and fixes the sizes as needed.
:param doc: The user.
:type doc: dict
"""
from .folder import Folder
size = 0
fixes = 0
folderModel = Folder()
folders = folderModel.find({
'parentId': doc['_id'],
'parentCollection': 'user'
})
for folder in folders:
# fix folder size if needed
_, f = folderModel.updateSize(folder)
fixes += f
# get total recursive folder size
folder = folderModel.load(folder['_id'], force=True)
size += folderModel.getSizeRecursive(folder)
# fix value if incorrect
if size != doc.get('size'):
self.update({'_id': doc['_id']}, update={'$set': {'size': size}})
fixes += 1
return size, fixes
def _getGroupInvitesFromProtoUser(self, doc):
"""
"""
from girderformindlogger.models.protoUser import ProtoUser
# Ensure unique emails
q = {'email': doc['email']}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
existing = ProtoUser().findOne(q)
if existing is not None:
doc['groupInvites'] = existing['groupInvites']
return(doc)
def _deleteProtoUser(self, doc):
"""
"""
from girderformindlogger.models.protoUser import ProtoUser
# Ensure unique emails
q = {'email': doc['email']}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
existing = ProtoUser().findOne(q)
if existing is not None:
ProtoUser().remove(existing)
def _verify_password(self, password, user):
# Verify password
if not self._cryptContext.verify(password, user['salt']):
raise AccessException('Login failed.')
else:
return(True)
| 37.797486 | 104 | 0.586409 |
import datetime
import os
import re
from passlib.context import CryptContext
from passlib.totp import TOTP, TokenError
import six
from girderformindlogger import events
from girderformindlogger.constants import AccessType, CoreEventHandler, TokenScope
from girderformindlogger.exceptions import AccessException, ValidationException
from girderformindlogger.settings import SettingKey
from girderformindlogger.utility import config, mail_utils
from girderformindlogger.utility._cache import rateLimitBuffer
from .model_base import AccessControlledModel
from .setting import Setting
class User(AccessControlledModel):
def initialize(self):
self.name = 'user'
self.ensureIndices(['login', 'email', 'groupInvites.groupId', 'size',
'created', 'deviceId'])
self.prefixSearchFields = (
'login', ('firstName', 'i'), ('displayName', 'i'), 'email')
self.ensureTextIndex({
'login': 1,
'displayName': 1,
'email': 1,
}, language='none')
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'login', 'public', 'displayName', 'firstName', 'lastName',
'admin', 'email', 'created'))
self.exposeFields(level=AccessType.ADMIN, fields=(
'size', 'status', 'emailVerified', 'creatorId'))
self._TotpFactory = TOTP.using(
# An application secret could be set here, if it existed
wallet=None
)
self._cryptContext = CryptContext(
schemes=['bcrypt']
)
events.bind('model.user.save.created',
CoreEventHandler.USER_SELF_ACCESS, self._grantSelfAccess)
events.bind('model.user.save.created',
CoreEventHandler.USER_DEFAULT_FOLDERS,
self._addDefaultFolders)
def validate(self, doc):
for s in ['email', 'displayName', 'firstName']:
if s in doc and doc[s] is None:
doc[s] = ''
doc['login'] = doc.get('login', '').lower().strip()
doc['email'] = doc.get('email', '').lower().strip()
doc['displayName'] = doc.get(
'displayName',
doc.get('firstName', '')
).strip()
doc['firstName'] = doc.get('firstName', '').strip()
doc['status'] = doc.get('status', 'enabled')
doc['deviceId'] = doc.get('deviceId', '')
if 'salt' not in doc:
# Internal error, this should not happen
raise Exception('Tried to save user document with no salt.')
if not doc['displayName']:
raise ValidationException('Display name must not be empty.',
'displayName')
if doc['status'] not in ('pending', 'enabled', 'disabled'):
raise ValidationException(
'Status must be pending, enabled, or disabled.', 'status')
if 'hashAlg' in doc:
# This is a legacy field; hash algorithms are now inline with the password hash
del doc['hashAlg']
self._validateLogin(doc['login'])
if len(doc['email']) and not mail_utils.validateEmailAddress(
doc['email']
):
raise ValidationException('Invalid email address.', 'email')
# Ensure unique logins
q = {'login': doc['login']}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
existing = self.findOne(q)
if existing is not None:
raise ValidationException('That login is already registered.',
'login')
# If this is the first user being created, make it an admin
existing = self.findOne({})
if existing is None:
doc['admin'] = True
# Ensure settings don't stop this user from logging in
doc['emailVerified'] = True
doc['status'] = 'enabled'
return doc
def _validateLogin(self, login):
if '@' in login:
raise ValidationException('Login may not contain "@".', 'login')
if not re.match(r'^[a-z][\da-z\-\.]{3,}$', login):
raise ValidationException(
'Login must be at least 4 characters, start with a letter, and may only contain '
'letters, numbers, dashes, and dots.', 'login')
def filter(self, doc, user, additionalKeys=None):
filteredDoc = super(User, self).filter(doc, user, additionalKeys)
level = self.getAccessLevel(doc, user)
if level >= AccessType.ADMIN:
filteredDoc['otp'] = doc.get('otp', {})
filteredDoc['otp'] = filteredDoc['otp'].get(
'enabled',
False
) if isinstance(filteredDoc['otp'], dict) else False
return filteredDoc
def authenticate(self, login, password, otpToken=None, deviceId=None):
user = None
event = events.trigger('model.user.authenticate', {
'login': login,
'password': password
})
if event.defaultPrevented and len(event.responses):
return event.responses[-1]
login = login.lower().strip()
loginField = 'email' if '@' in login else 'login'
if loginField=='login':
user = self.findOne({loginField: login})
else:
raise AccessException(
'Please log in with your username rather than your email '
'address.'
)
if user is None:
raise AccessException('Login failed. Username not found.')
if not self.hasPassword(user):
e = events.trigger('no_password_login_attempt', {
'user': user,
'password': password
})
if len(e.responses):
return e.responses[-1]
raise ValidationException(
'This user does not have a password. You must log in with an '
'external service, or reset your password.')
if otpToken is True and self.hasOtpEnabled(user):
otpTokenLength = self._TotpFactory.digits
otpToken = password[-otpTokenLength:]
password = password[:-otpTokenLength]
self._verify_password(password, user)
if self.hasOtpEnabled(user):
if otpToken is None:
raise AccessException(
'User authentication must include a one-time password '
'(typically in the "Girder-OTP" header).')
self.verifyOtp(user, otpToken)
elif isinstance(otpToken, six.string_types):
raise AccessException(
'The user has not enabled one-time passwords.'
)
if user.get('status', 'enabled') == 'disabled':
return { 'exception' : 'Account is disabled.' }
if self.emailVerificationRequired(user):
return { 'exception' : 'Email verification is required.' }
if self.adminApprovalRequired(user):
return { 'exception' : 'Admin approval required' }
return user
def remove(self, user, progress=None, **kwargs):
from .folder import Folder
from .group import Group
from .token import Token
Token().removeWithQuery({'userId': user['_id']})
Group().update(
{'requests': user['_id']},
{'$pull': {'requests': user['_id']}}
)
folderModel = Folder()
folders = folderModel.find({
'parentId': user['_id'],
'parentCollection': 'user'
})
for folder in folders:
folderModel.remove(folder, progress=progress, **kwargs)
AccessControlledModel.remove(self, user)
if progress:
progress.update(increment=1, message='Deleted user ' + user['login'])
def getAdmins(self):
return self.find({'admin': True})
def search(self, text=None, user=None, limit=0, offset=0, sort=None):
# afterward.
if text is not None:
cursor = self.textSearch(text, sort=sort)
else:
cursor = self.find({}, sort=sort)
return self.filterResultsByPermission(
cursor=cursor, user=user, level=AccessType.READ, limit=limit,
offset=offset)
def hasPassword(self, user):
return user['salt'] is not None
def setPassword(self, user, password, save=True):
if password is None:
user['salt'] = None
else:
cur_config = config.getConfig()
# Normally this would go in validate() but password is a special case.
if not re.match(cur_config['users']['password_regex'], password):
raise ValidationException(cur_config['users']['password_description'], 'password')
user['salt'] = self._cryptContext.hash(password)
if save:
self.save(user)
def initializeOtp(self, user):
totp = self._TotpFactory.new()
user['otp'] = {
'enabled': False,
'totp': totp.to_dict()
}
# Use the brand name as the OTP issuer if it's non-default (since that's prettier and more
# meaningful for users), but fallback to the site hostname if the brand name isn't set
from girderformindlogger.api.rest import getUrlParts
brandName = Setting().get(SettingKey.BRAND_NAME)
defaultBrandName = Setting().getDefault(SettingKey.BRAND_NAME)
serverHostname = getUrlParts().netloc.partition(':')[0]
# hostname is not known then
otpIssuer = brandName if brandName != defaultBrandName else serverHostname
return {
'totpUri': totp.to_uri(label=user['login'], issuer=otpIssuer)
}
def hasOtpEnabled(self, user):
return 'otp' in user and user['otp']['enabled']
def verifyOtp(self, user, otpToken):
lastCounterKey = 'girderformindlogger.models.user.%s.otp.totp.counter' % user['_id']
# The last successfully-authenticated key (which is blacklisted from reuse)
lastCounter = rateLimitBuffer.get(lastCounterKey) or None
try:
totpMatch = self._TotpFactory.verify(
otpToken, user['otp']['totp'], last_counter=lastCounter)
except TokenError as e:
raise AccessException('One-time password validation failed: %s' % e)
# The totpMatch.cache_seconds tells us prospectively how long the counter needs to be cached
# for, but dogpile.cache expiration times work retrospectively (on "get"), so there's no
rateLimitBuffer.set(lastCounterKey, totpMatch.counter)
def createUser(self, login, password, displayName="", email="",
admin=False, public=False, currentUser=None,
firstName="", lastName=""):
from .group import Group
from .setting import Setting
requireApproval = Setting(
).get(SettingKey.REGISTRATION_POLICY) == 'approve'
email = "" if not email else email
if admin:
requireApproval = False
user = {
'login': login,
'email': email,
'displayName': displayName if len(
displayName
) else firstName if firstName is not None else "",
'firstName': firstName,
'created': datetime.datetime.utcnow(),
'emailVerified': False,
'status': 'pending' if requireApproval else 'enabled',
'admin': admin,
'size': 0,
'deviceId': '',
'groups': [],
'groupInvites': [
{
"groupId": gi.get('_id'),
"level": 0
} for gi in list(Group().find(query={"queue": email}))
] if len(email) else []
}
self.setPassword(user, password, save=False)
self.setPublic(user, public, save=False)
if currentUser:
self.setUserAccess(
user, user=currentUser, level=AccessType.WRITE, save=False
)
user['creatorId'] = currentUser['_id']
user = self.save(user)
if currentUser:
User().setUserAccess(
doc=currentUser, user=user, level=AccessType.READ, save=True
)
else:
user['creatorId'] = user['_id']
user = self.save(user)
verifyEmail = Setting().get(SettingKey.EMAIL_VERIFICATION) != 'disabled'
if verifyEmail:
self._sendVerificationEmail(user)
if requireApproval:
self._sendApprovalEmail(user)
Group().update(
query={"queue": email},
update={"$pull": {"queue": email}},
multi=True
)
user = self._getGroupInvitesFromProtoUser(user)
self._deleteProtoUser(user)
return(user)
def canLogin(self, user):
if user.get('status', 'enabled') == 'disabled':
return False
if self.emailVerificationRequired(user):
return False
if self.adminApprovalRequired(user):
return False
return True
def emailVerificationRequired(self, user):
from .setting import Setting
return (not user['emailVerified']) and \
Setting().get(SettingKey.EMAIL_VERIFICATION) == 'required'
def adminApprovalRequired(self, user):
from .setting import Setting
return user.get('status', 'enabled') == 'pending' and \
Setting().get(SettingKey.REGISTRATION_POLICY) == 'approve'
def _sendApprovalEmail(self, user):
url = '%s#user/%s' % (
mail_utils.getEmailUrlPrefix(), str(user['_id']))
text = mail_utils.renderTemplate('accountApproval.mako', {
'user': user,
'url': url
})
mail_utils.sendMailToAdmins(
'Girder: Account pending approval',
text)
def _sendApprovedEmail(self, user):
text = mail_utils.renderTemplate('accountApproved.mako', {
'user': user,
'url': mail_utils.getEmailUrlPrefix()
})
mail_utils.sendMail(
'Girder: Account approved',
text,
[user.get('email')])
def _sendVerificationEmail(self, user):
from .token import Token
token = Token().createToken(
user, days=1, scope=TokenScope.EMAIL_VERIFICATION)
url = '%s#useraccount/%s/verification/%s' % (
mail_utils.getEmailUrlPrefix(), str(user['_id']), str(token['_id']))
text = mail_utils.renderTemplate('emailVerification.mako', {
'url': url
})
mail_utils.sendMail(
'Girder: Email verification',
text,
[user.get('email')])
def _grantSelfAccess(self, event):
user = event.info
self.setUserAccess(user, user, level=AccessType.ADMIN, save=True)
def _addDefaultFolders(self, event):
from .folder import Folder
from .setting import Setting
if Setting().get(SettingKey.USER_DEFAULT_FOLDERS) == 'public_private':
user = event.info
publicFolder = Folder().createFolder(
user, 'Public', parentType='user', public=True, creator=user)
privateFolder = Folder().createFolder(
user, 'Private', parentType='user', public=False, creator=user)
Folder().setUserAccess(publicFolder, user, AccessType.ADMIN, save=True)
Folder().setUserAccess(privateFolder, user, AccessType.ADMIN, save=True)
def fileList(self, doc, user=None, path='', includeMetadata=False, subpath=True, data=True):
from .folder import Folder
if subpath:
path = os.path.join(path, doc['login'])
folderModel = Folder()
childFolders = list(folderModel.childFolders(
parentType='user', parent=doc, user=user,
fields=['name'] + (['meta'] if includeMetadata else [])
))
for folder in childFolders:
for (filepath, file) in folderModel.fileList(
folder, user, path, includeMetadata, subpath=True, data=data):
yield (filepath, file)
def subtreeCount(self, doc, includeItems=True, user=None, level=None):
from .folder import Folder
count = 1
folderModel = Folder()
folders = folderModel.findWithPermissions({
'parentId': doc['_id'],
'parentCollection': 'user'
}, fields='access', user=user, level=level)
count += sum(folderModel.subtreeCount(
folder, includeItems=includeItems, user=user, level=level)
for folder in folders)
return count
def countFolders(self, user, filterUser=None, level=None):
from .folder import Folder
fields = () if level is None else ('access', 'public')
folderModel = Folder()
folders = folderModel.findWithPermissions({
'parentId': user['_id'],
'parentCollection': 'user'
}, fields=fields, user=filterUser, level=level)
return folders.count()
def updateSize(self, doc):
from .folder import Folder
size = 0
fixes = 0
folderModel = Folder()
folders = folderModel.find({
'parentId': doc['_id'],
'parentCollection': 'user'
})
for folder in folders:
_, f = folderModel.updateSize(folder)
fixes += f
folder = folderModel.load(folder['_id'], force=True)
size += folderModel.getSizeRecursive(folder)
if size != doc.get('size'):
self.update({'_id': doc['_id']}, update={'$set': {'size': size}})
fixes += 1
return size, fixes
def _getGroupInvitesFromProtoUser(self, doc):
from girderformindlogger.models.protoUser import ProtoUser
q = {'email': doc['email']}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
existing = ProtoUser().findOne(q)
if existing is not None:
doc['groupInvites'] = existing['groupInvites']
return(doc)
def _deleteProtoUser(self, doc):
from girderformindlogger.models.protoUser import ProtoUser
q = {'email': doc['email']}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
existing = ProtoUser().findOne(q)
if existing is not None:
ProtoUser().remove(existing)
def _verify_password(self, password, user):
if not self._cryptContext.verify(password, user['salt']):
raise AccessException('Login failed.')
else:
return(True)
| true | true |
1c2c5cd62e42cd0bc8971612062aac4c733a7a57 | 466 | py | Python | Apps/contatos/migrations/0021_alter_chat_data.py | arthur-asilva/rc_plataforma | 7e6f7eb7f9a3b9089c02db98518b60d8e481ce4c | [
"BSD-2-Clause"
] | null | null | null | Apps/contatos/migrations/0021_alter_chat_data.py | arthur-asilva/rc_plataforma | 7e6f7eb7f9a3b9089c02db98518b60d8e481ce4c | [
"BSD-2-Clause"
] | null | null | null | Apps/contatos/migrations/0021_alter_chat_data.py | arthur-asilva/rc_plataforma | 7e6f7eb7f9a3b9089c02db98518b60d8e481ce4c | [
"BSD-2-Clause"
] | null | null | null | # Generated by Django 4.0 on 2022-02-24 18:48
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contatos', '0020_alter_chat_data'),
]
operations = [
migrations.AlterField(
model_name='chat',
name='data',
field=models.DateTimeField(blank=True, default=datetime.datetime(2022, 2, 24, 15, 48, 12, 831074), null=True),
),
]
| 23.3 | 122 | 0.620172 |
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contatos', '0020_alter_chat_data'),
]
operations = [
migrations.AlterField(
model_name='chat',
name='data',
field=models.DateTimeField(blank=True, default=datetime.datetime(2022, 2, 24, 15, 48, 12, 831074), null=True),
),
]
| true | true |
1c2c5cec8efd1af9f39dda1bf64c9fd122edf367 | 2,568 | py | Python | virtual/lib/python3.8/site-packages/pylint/extensions/emptystring.py | erastus-1/GramApp | d926a8a894b5dc673d077b35f9df867dab737372 | [
"MIT"
] | 10 | 2021-05-31T07:18:08.000Z | 2022-03-19T09:20:11.000Z | virtual/lib/python3.8/site-packages/pylint/extensions/emptystring.py | erastus-1/GramApp | d926a8a894b5dc673d077b35f9df867dab737372 | [
"MIT"
] | 10 | 2020-09-30T12:49:45.000Z | 2020-10-04T10:26:33.000Z | virtual/lib/python3.8/site-packages/pylint/extensions/emptystring.py | erastus-1/GramApp | d926a8a894b5dc673d077b35f9df867dab737372 | [
"MIT"
] | 7 | 2021-03-15T13:39:20.000Z | 2022-03-29T12:08:21.000Z | # Copyright (c) 2016 Alexander Todorov <atodorov@otb.bg>
# Copyright (c) 2017-2018, 2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2019 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Looks for comparisons to empty string."""
import itertools
import astroid
from pylint import checkers, interfaces
from pylint.checkers import utils
def _is_constant_empty_str(node):
return isinstance(node, astroid.Const) and node.value == ""
class CompareToEmptyStringChecker(checkers.BaseChecker):
"""Checks for comparisons to empty string.
Most of the times you should use the fact that empty strings are false.
An exception to this rule is when an empty string value is allowed in the program
and has a different meaning than None!
"""
__implements__ = (interfaces.IAstroidChecker,)
# configuration section name
name = "compare-to-empty-string"
msgs = {
"C1901": (
"Avoid comparisons to empty string",
"compare-to-empty-string",
"Used when Pylint detects comparison to an empty string constant.",
)
}
priority = -2
options = ()
@utils.check_messages("compare-to-empty-string")
def visit_compare(self, node):
_operators = ["!=", "==", "is not", "is"]
# note: astroid.Compare has the left most operand in node.left
# while the rest are a list of tuples in node.ops
# the format of the tuple is ('compare operator sign', node)
# here we squash everything into `ops` to make it easier for processing later
ops = [("", node.left)]
ops.extend(node.ops)
ops = list(itertools.chain(*ops))
for ops_idx in range(len(ops) - 2):
op_1 = ops[ops_idx]
op_2 = ops[ops_idx + 1]
op_3 = ops[ops_idx + 2]
error_detected = False
# x ?? ""
if _is_constant_empty_str(op_1) and op_2 in _operators:
error_detected = True
# '' ?? X
elif op_2 in _operators and _is_constant_empty_str(op_3):
error_detected = True
if error_detected:
self.add_message("compare-to-empty-string", node=node)
def register(linter):
"""Required method to auto register this checker."""
linter.register_checker(CompareToEmptyStringChecker(linter))
| 33.789474 | 85 | 0.648754 |
import itertools
import astroid
from pylint import checkers, interfaces
from pylint.checkers import utils
def _is_constant_empty_str(node):
return isinstance(node, astroid.Const) and node.value == ""
class CompareToEmptyStringChecker(checkers.BaseChecker):
__implements__ = (interfaces.IAstroidChecker,)
name = "compare-to-empty-string"
msgs = {
"C1901": (
"Avoid comparisons to empty string",
"compare-to-empty-string",
"Used when Pylint detects comparison to an empty string constant.",
)
}
priority = -2
options = ()
@utils.check_messages("compare-to-empty-string")
def visit_compare(self, node):
_operators = ["!=", "==", "is not", "is"]
ops = [("", node.left)]
ops.extend(node.ops)
ops = list(itertools.chain(*ops))
for ops_idx in range(len(ops) - 2):
op_1 = ops[ops_idx]
op_2 = ops[ops_idx + 1]
op_3 = ops[ops_idx + 2]
error_detected = False
if _is_constant_empty_str(op_1) and op_2 in _operators:
error_detected = True
elif op_2 in _operators and _is_constant_empty_str(op_3):
error_detected = True
if error_detected:
self.add_message("compare-to-empty-string", node=node)
def register(linter):
linter.register_checker(CompareToEmptyStringChecker(linter))
| true | true |
1c2c5e5c68e34865d3a608d0d05b86301c5d6ffe | 2,661 | py | Python | app/recipe/tests/test_tags_api.py | JDuskey/recipe-app-api | 911c892b57fcfe62b9ba7fa74f3b7c998d0b7918 | [
"MIT"
] | null | null | null | app/recipe/tests/test_tags_api.py | JDuskey/recipe-app-api | 911c892b57fcfe62b9ba7fa74f3b7c998d0b7918 | [
"MIT"
] | null | null | null | app/recipe/tests/test_tags_api.py | JDuskey/recipe-app-api | 911c892b57fcfe62b9ba7fa74f3b7c998d0b7918 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsAPITests(TestCase):
"""Test the publicly available tags API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving tags"""
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsAPITests(TestCase):
"""Test the authenticated user tags API"""
def setUp(self):
self.user = get_user_model().objects.create_user(
email='test@duskey.io',
password='password123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
"""Test retrieving tags"""
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
response = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_tags_limited_to_user(self):
"""Test that tags returned are for the authenticated user"""
new_user = get_user_model().objects.create_user(
email='test2@duskey.io',
password='testpass123'
)
Tag.objects.create(user=new_user, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort')
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], tag.name)
def test_create_tag_successful(self):
"""Test creating a new tag"""
payload = {
'name': 'Test Tag'
}
self.client.post(TAGS_URL, payload)
self.assertTrue(Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
)
def test_create_tag_invalid(self):
"""Test creating a new tag with invalid payload"""
payload = {
'name': ''
}
response = self.client.post(TAGS_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| 30.238636 | 76 | 0.652011 | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsAPITests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateTagsAPITests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
email='test@duskey.io',
password='password123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
response = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_tags_limited_to_user(self):
new_user = get_user_model().objects.create_user(
email='test2@duskey.io',
password='testpass123'
)
Tag.objects.create(user=new_user, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort')
response = self.client.get(TAGS_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data[0]['name'], tag.name)
def test_create_tag_successful(self):
payload = {
'name': 'Test Tag'
}
self.client.post(TAGS_URL, payload)
self.assertTrue(Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
)
def test_create_tag_invalid(self):
payload = {
'name': ''
}
response = self.client.post(TAGS_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| true | true |
1c2c5e911abaf13e56c422deb67ffab421752834 | 4,993 | py | Python | scripts/mo.py | aafulei/leetcode | e3a0ef9c912abf99a1d6e56eff8802ba44b0057d | [
"MIT"
] | 2 | 2019-04-13T09:55:04.000Z | 2019-05-16T12:47:40.000Z | scripts/mo.py | aafulei/leetcode | e3a0ef9c912abf99a1d6e56eff8802ba44b0057d | [
"MIT"
] | null | null | null | scripts/mo.py | aafulei/leetcode | e3a0ef9c912abf99a1d6e56eff8802ba44b0057d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# 22/06/03 = Fri
# 22/05/27 = Fri
# -----------------
# Markdown Outliner
# -----------------
# A Python module that support the LeetCode Information Tracker. It reads and
# understands a markdown document.
import bisect
import io
import os
import sys
class Section:
def __init__(self, level=0, name="", father=None, children=None):
"""
Why children=None instead of children=[]?
Given
def foo(a=[]):
a.append(1)
print(a)
Can you predict the output?
> foo()
> foo()
> foo([2])
> foo([3])
> foo()
"""
self.level = level
self.name = name
self.father = father
self.children = [] if not children else children
def __iter__(self):
return SectionIterator(self)
def __repr__(self):
return f"({self.level}:{len(self.children)}) {self.name}"
def __str__(self):
prefix = "" if self.level != 7 else "#" * self.level
return f"{prefix} {self.name}"
class SectionIterator:
def __init__(self, section):
self.sec_stack = [section]
self.pos_stack = [0]
self.head_todo = bool(section.level)
def _has_next(self):
while self.sec_stack:
sec = self.sec_stack[-1]
pos = self.pos_stack[-1]
if self.head_todo:
return True
if pos == len(sec.children):
self.sec_stack.pop()
self.pos_stack.pop()
else:
if isinstance(sec.children[pos], str):
return True
else:
self.pos_stack[-1] += 1
self.sec_stack.append(sec.children[pos])
self.pos_stack.append(0)
self.head_todo = True
return False
def __next__(self):
if not self._has_next():
raise StopIteration
sec = self.sec_stack[-1]
pos = self.pos_stack[-1]
if self.head_todo:
level = sec.level
line = sec.name
fat = sec.father
pos = self.pos_stack[-2] - 1
self.head_todo = False
else:
level = 7
line = sec.children[pos]
fat = sec
self.pos_stack[-1] += 1
return level, line, fat, pos
def parse_line(line):
count = 0
while count != len(line) and line[count] == '#':
count += 1
if 1 <= count <= 6 and count < len(line) and line[count] == ' ':
level = count
text = line[count + 1:]
else:
level = 7
text = line
return level, text
class Page:
def __init__(self, filename=None):
self.root = Section()
if filename:
self.load(filename)
def get_father(self, prev, level):
while prev.level and level <= prev.level:
prev = prev.father
return prev
def load(self, filename):
with open(filename) as file:
prev = self.root
for line in file:
line = line.rstrip()
level, text = parse_line(line)
fat = self.get_father(prev, level)
if level == 7:
fat.children.append(text)
else:
sec = Section(level, text, fat)
fat.children.append(sec)
prev = sec
def dump(self, filename, newline="\n", override=False):
if not override and os.path.exists(filename):
raise FileExistsError
with io.open(filename, "w", newline=newline) as file:
for level, line, fat, pos in self:
if level != 7:
print("#" * level, end=" ", file=file)
print(line, file=file)
def __iter__(self):
return SectionIterator(self.root)
def add_sections(add_set, fat):
is_section = [isinstance(x, Section) for x in fat.children]
beg = bisect.bisect(is_section, False)
cur_set = set(x.name for x in fat.children[beg:])
diff_set = add_set - cur_set
if not diff_set:
return False
lev = min(fat.level + 1, 6)
add = [Section(level=lev, name=x, father=fat, children=[""])
for x in diff_set]
fat.children.extend(add)
fat.children[beg:] = sorted(fat.children[beg:], key=lambda x: x.name)
return True
def add_line(line, fat, key):
is_section = [isinstance(x, Section) for x in fat.children]
end = bisect.bisect(is_section, False)
values = [key(x) for x in fat.children[:end]]
add_val = key(line)
pos = bisect.bisect_left(values, add_val)
if pos != end:
if fat.children[pos] == line:
return False
elif values[pos] == add_val:
fat.children[pos] = line
else:
fat.children.insert(pos, line)
else:
fat.children.append(line)
fat.children.append("")
return True
| 27.434066 | 77 | 0.527939 |
import bisect
import io
import os
import sys
class Section:
def __init__(self, level=0, name="", father=None, children=None):
self.level = level
self.name = name
self.father = father
self.children = [] if not children else children
def __iter__(self):
return SectionIterator(self)
def __repr__(self):
return f"({self.level}:{len(self.children)}) {self.name}"
def __str__(self):
prefix = "" if self.level != 7 else "#" * self.level
return f"{prefix} {self.name}"
class SectionIterator:
def __init__(self, section):
self.sec_stack = [section]
self.pos_stack = [0]
self.head_todo = bool(section.level)
def _has_next(self):
while self.sec_stack:
sec = self.sec_stack[-1]
pos = self.pos_stack[-1]
if self.head_todo:
return True
if pos == len(sec.children):
self.sec_stack.pop()
self.pos_stack.pop()
else:
if isinstance(sec.children[pos], str):
return True
else:
self.pos_stack[-1] += 1
self.sec_stack.append(sec.children[pos])
self.pos_stack.append(0)
self.head_todo = True
return False
def __next__(self):
if not self._has_next():
raise StopIteration
sec = self.sec_stack[-1]
pos = self.pos_stack[-1]
if self.head_todo:
level = sec.level
line = sec.name
fat = sec.father
pos = self.pos_stack[-2] - 1
self.head_todo = False
else:
level = 7
line = sec.children[pos]
fat = sec
self.pos_stack[-1] += 1
return level, line, fat, pos
def parse_line(line):
count = 0
while count != len(line) and line[count] == '#':
count += 1
if 1 <= count <= 6 and count < len(line) and line[count] == ' ':
level = count
text = line[count + 1:]
else:
level = 7
text = line
return level, text
class Page:
def __init__(self, filename=None):
self.root = Section()
if filename:
self.load(filename)
def get_father(self, prev, level):
while prev.level and level <= prev.level:
prev = prev.father
return prev
def load(self, filename):
with open(filename) as file:
prev = self.root
for line in file:
line = line.rstrip()
level, text = parse_line(line)
fat = self.get_father(prev, level)
if level == 7:
fat.children.append(text)
else:
sec = Section(level, text, fat)
fat.children.append(sec)
prev = sec
def dump(self, filename, newline="\n", override=False):
if not override and os.path.exists(filename):
raise FileExistsError
with io.open(filename, "w", newline=newline) as file:
for level, line, fat, pos in self:
if level != 7:
print("#" * level, end=" ", file=file)
print(line, file=file)
def __iter__(self):
return SectionIterator(self.root)
def add_sections(add_set, fat):
is_section = [isinstance(x, Section) for x in fat.children]
beg = bisect.bisect(is_section, False)
cur_set = set(x.name for x in fat.children[beg:])
diff_set = add_set - cur_set
if not diff_set:
return False
lev = min(fat.level + 1, 6)
add = [Section(level=lev, name=x, father=fat, children=[""])
for x in diff_set]
fat.children.extend(add)
fat.children[beg:] = sorted(fat.children[beg:], key=lambda x: x.name)
return True
def add_line(line, fat, key):
is_section = [isinstance(x, Section) for x in fat.children]
end = bisect.bisect(is_section, False)
values = [key(x) for x in fat.children[:end]]
add_val = key(line)
pos = bisect.bisect_left(values, add_val)
if pos != end:
if fat.children[pos] == line:
return False
elif values[pos] == add_val:
fat.children[pos] = line
else:
fat.children.insert(pos, line)
else:
fat.children.append(line)
fat.children.append("")
return True
| true | true |
1c2c5fb1897434edf813ca11fee52e66fc56491b | 458 | py | Python | mspyteams/aiohttp.py | sander76/mspyteams | b8755afe8533fd1b9c059ca5d176b335f8070310 | [
"MIT"
] | null | null | null | mspyteams/aiohttp.py | sander76/mspyteams | b8755afe8533fd1b9c059ca5d176b335f8070310 | [
"MIT"
] | null | null | null | mspyteams/aiohttp.py | sander76/mspyteams | b8755afe8533fd1b9c059ca5d176b335f8070310 | [
"MIT"
] | null | null | null | """Send card using aiohttp"""
import logging
from typing import TYPE_CHECKING
import aiohttp
if TYPE_CHECKING:
from mspyteams.card import Card
_LOGGER = logging.getLogger(__name__)
async def send(card: "Card", webhook_url, session: aiohttp.ClientSession):
"""Send the card to the teams webhook."""
data = card.card_data()
async with session.post(webhook_url, json=data) as response:
return response.status, await response.text()
| 25.444444 | 74 | 0.735808 |
import logging
from typing import TYPE_CHECKING
import aiohttp
if TYPE_CHECKING:
from mspyteams.card import Card
_LOGGER = logging.getLogger(__name__)
async def send(card: "Card", webhook_url, session: aiohttp.ClientSession):
data = card.card_data()
async with session.post(webhook_url, json=data) as response:
return response.status, await response.text()
| true | true |
1c2c612e60bb50ea7a694d80419862366ba69591 | 2,514 | py | Python | gmprocess/io/asdf/utils.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | 54 | 2019-01-12T02:05:38.000Z | 2022-03-29T19:43:56.000Z | gmprocess/io/asdf/utils.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | 700 | 2018-12-18T19:44:31.000Z | 2022-03-30T20:54:28.000Z | gmprocess/io/asdf/utils.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | 41 | 2018-11-29T23:17:56.000Z | 2022-03-31T04:04:23.000Z | """Utilities for simple operations on an ASDF workspace.
"""
# third party imports
import h5py
class TallyStorage(object):
"""Tally storage used within each group."""
def __init__(self, group_detail=None):
"""Constructor.
Args:
group_detail (list):
List of group names for which to tally subgroups and
datasets. By default only the storage is only reported
for the top-level groups.
"""
self.group_detail = group_detail if group_detail else []
return
@staticmethod
def compute_dataset_storage(dataset):
"""Compute the storage used by a dataset.
Args:
dataset (h5py.Dataset):
HDF5 dataset for which storage is computed.
Returns:
int:
Storage of dataset in bytes.
"""
assert isinstance(dataset, h5py.Dataset)
return dataset.size * dataset.dtype.itemsize
def compute_storage(self, items, store_subtotals=False):
"""Compute the storage for a group of items. By default only the total
storage is stored.
Args:
items (iterable):
Iterable object of items to compute total storage.
store_subtotals (bool):
Store storage for each item in items.
Returns:
int:
Total storage of items, in bytes.
dict:
Dictionary of storage for items.
"""
subtotal_bytes = 0
storage = {}
for name, item in items:
if isinstance(item, h5py.Group):
(item_bytes, item_groups) = self.compute_storage(
item.items(), store_subtotals=name in self.group_detail
)
if store_subtotals:
storage[name] = {
"total_bytes": item_bytes,
"groups": item_groups,
}
elif isinstance(item, h5py.Dataset):
item_bytes = self.compute_dataset_storage(item)
if store_subtotals:
storage[name] = {"total_bytes": item_bytes, "groups": {}}
else:
raise ValueError(
"Group item '{}' is of type '{}', expected "
"'h5.Dataset' or 'h5.Group'".format(name, type(item))
)
subtotal_bytes += item_bytes
return (subtotal_bytes, storage)
| 31.822785 | 78 | 0.536993 |
import h5py
class TallyStorage(object):
def __init__(self, group_detail=None):
self.group_detail = group_detail if group_detail else []
return
@staticmethod
def compute_dataset_storage(dataset):
assert isinstance(dataset, h5py.Dataset)
return dataset.size * dataset.dtype.itemsize
def compute_storage(self, items, store_subtotals=False):
subtotal_bytes = 0
storage = {}
for name, item in items:
if isinstance(item, h5py.Group):
(item_bytes, item_groups) = self.compute_storage(
item.items(), store_subtotals=name in self.group_detail
)
if store_subtotals:
storage[name] = {
"total_bytes": item_bytes,
"groups": item_groups,
}
elif isinstance(item, h5py.Dataset):
item_bytes = self.compute_dataset_storage(item)
if store_subtotals:
storage[name] = {"total_bytes": item_bytes, "groups": {}}
else:
raise ValueError(
"Group item '{}' is of type '{}', expected "
"'h5.Dataset' or 'h5.Group'".format(name, type(item))
)
subtotal_bytes += item_bytes
return (subtotal_bytes, storage)
| true | true |
1c2c615b05e64ea5edd0451a394a00de6035afd7 | 5,596 | py | Python | macro_benchmark/WideDeep/census_test.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 180 | 2018-09-20T07:27:40.000Z | 2022-03-19T07:55:42.000Z | macro_benchmark/WideDeep/census_test.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 80 | 2018-09-26T18:55:56.000Z | 2022-02-10T02:03:26.000Z | macro_benchmark/WideDeep/census_test.py | songhappy/ai-matrix | 901078e480c094235c721c49f8141aec7a84e70e | [
"Apache-2.0"
] | 72 | 2018-08-30T00:49:15.000Z | 2022-02-15T23:22:40.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf # pylint: disable=g-bad-import-order
from utils.testing import integration
import census_dataset
import census_main
import wide_deep_run_loop
tf.logging.set_verbosity(tf.logging.ERROR)
TEST_INPUT = ('18,Self-emp-not-inc,987,Bachelors,12,Married-civ-spouse,abc,'
'Husband,zyx,wvu,34,56,78,tsr,<=50K')
TEST_INPUT_VALUES = {
'age': 18,
'education_num': 12,
'capital_gain': 34,
'capital_loss': 56,
'hours_per_week': 78,
'education': 'Bachelors',
'marital_status': 'Married-civ-spouse',
'relationship': 'Husband',
'workclass': 'Self-emp-not-inc',
'occupation': 'abc',
}
TEST_CSV = os.path.join(os.path.dirname(__file__), 'census_test.csv')
class BaseTest(tf.test.TestCase):
"""Tests for Wide Deep model."""
@classmethod
def setUpClass(cls): # pylint: disable=invalid-name
super(BaseTest, cls).setUpClass()
census_main.define_census_flags()
def setUp(self):
# Create temporary CSV file
self.temp_dir = self.get_temp_dir()
self.input_csv = os.path.join(self.temp_dir, 'test.csv')
with tf.gfile.Open(self.input_csv, 'w') as temp_csv:
temp_csv.write(TEST_INPUT)
with tf.gfile.Open(TEST_CSV, "r") as temp_csv:
test_csv_contents = temp_csv.read()
# Used for end-to-end tests.
for fname in [census_dataset.TRAINING_FILE, census_dataset.EVAL_FILE]:
with tf.gfile.Open(os.path.join(self.temp_dir, fname), 'w') as test_csv:
test_csv.write(test_csv_contents)
def test_input_fn(self):
dataset = census_dataset.input_fn(self.input_csv, 1, False, 1)
features, labels = dataset.make_one_shot_iterator().get_next()
with self.test_session() as sess:
features, labels = sess.run((features, labels))
# Compare the two features dictionaries.
for key in TEST_INPUT_VALUES:
self.assertTrue(key in features)
self.assertEqual(len(features[key]), 1)
feature_value = features[key][0]
# Convert from bytes to string for Python 3.
if isinstance(feature_value, bytes):
feature_value = feature_value.decode()
self.assertEqual(TEST_INPUT_VALUES[key], feature_value)
self.assertFalse(labels)
def build_and_test_estimator(self, model_type):
"""Ensure that model trains and minimizes loss."""
model = census_main.build_estimator(
self.temp_dir, model_type,
model_column_fn=census_dataset.build_model_columns,
inter_op=0, intra_op=0)
# Train for 1 step to initialize model and evaluate initial loss
def get_input_fn(num_epochs, shuffle, batch_size):
def input_fn():
return census_dataset.input_fn(
TEST_CSV, num_epochs=num_epochs, shuffle=shuffle,
batch_size=batch_size)
return input_fn
model.train(input_fn=get_input_fn(1, True, 1), steps=1)
initial_results = model.evaluate(input_fn=get_input_fn(1, False, 1))
# Train for 100 epochs at batch size 3 and evaluate final loss
model.train(input_fn=get_input_fn(100, True, 3))
final_results = model.evaluate(input_fn=get_input_fn(1, False, 1))
print('%s initial results:' % model_type, initial_results)
print('%s final results:' % model_type, final_results)
# Ensure loss has decreased, while accuracy and both AUCs have increased.
self.assertLess(final_results['loss'], initial_results['loss'])
self.assertGreater(final_results['auc'], initial_results['auc'])
self.assertGreater(final_results['auc_precision_recall'],
initial_results['auc_precision_recall'])
self.assertGreater(final_results['accuracy'], initial_results['accuracy'])
def test_wide_deep_estimator_training(self):
self.build_and_test_estimator('wide_deep')
def test_end_to_end_wide(self):
integration.run_synthetic(
main=census_main.main, tmp_root=self.get_temp_dir(),
extra_flags=[
'--data_dir', self.get_temp_dir(),
'--model_type', 'wide',
'--download_if_missing=false'
],
synth=False, max_train=None)
def test_end_to_end_deep(self):
integration.run_synthetic(
main=census_main.main, tmp_root=self.get_temp_dir(),
extra_flags=[
'--data_dir', self.get_temp_dir(),
'--model_type', 'deep',
'--download_if_missing=false'
],
synth=False, max_train=None)
def test_end_to_end_wide_deep(self):
integration.run_synthetic(
main=census_main.main, tmp_root=self.get_temp_dir(),
extra_flags=[
'--data_dir', self.get_temp_dir(),
'--model_type', 'wide_deep',
'--download_if_missing=false'
],
synth=False, max_train=None)
if __name__ == '__main__':
tf.test.main()
| 34.54321 | 80 | 0.682809 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from utils.testing import integration
import census_dataset
import census_main
import wide_deep_run_loop
tf.logging.set_verbosity(tf.logging.ERROR)
TEST_INPUT = ('18,Self-emp-not-inc,987,Bachelors,12,Married-civ-spouse,abc,'
'Husband,zyx,wvu,34,56,78,tsr,<=50K')
TEST_INPUT_VALUES = {
'age': 18,
'education_num': 12,
'capital_gain': 34,
'capital_loss': 56,
'hours_per_week': 78,
'education': 'Bachelors',
'marital_status': 'Married-civ-spouse',
'relationship': 'Husband',
'workclass': 'Self-emp-not-inc',
'occupation': 'abc',
}
TEST_CSV = os.path.join(os.path.dirname(__file__), 'census_test.csv')
class BaseTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
super(BaseTest, cls).setUpClass()
census_main.define_census_flags()
def setUp(self):
self.temp_dir = self.get_temp_dir()
self.input_csv = os.path.join(self.temp_dir, 'test.csv')
with tf.gfile.Open(self.input_csv, 'w') as temp_csv:
temp_csv.write(TEST_INPUT)
with tf.gfile.Open(TEST_CSV, "r") as temp_csv:
test_csv_contents = temp_csv.read()
for fname in [census_dataset.TRAINING_FILE, census_dataset.EVAL_FILE]:
with tf.gfile.Open(os.path.join(self.temp_dir, fname), 'w') as test_csv:
test_csv.write(test_csv_contents)
def test_input_fn(self):
dataset = census_dataset.input_fn(self.input_csv, 1, False, 1)
features, labels = dataset.make_one_shot_iterator().get_next()
with self.test_session() as sess:
features, labels = sess.run((features, labels))
for key in TEST_INPUT_VALUES:
self.assertTrue(key in features)
self.assertEqual(len(features[key]), 1)
feature_value = features[key][0]
if isinstance(feature_value, bytes):
feature_value = feature_value.decode()
self.assertEqual(TEST_INPUT_VALUES[key], feature_value)
self.assertFalse(labels)
def build_and_test_estimator(self, model_type):
model = census_main.build_estimator(
self.temp_dir, model_type,
model_column_fn=census_dataset.build_model_columns,
inter_op=0, intra_op=0)
def get_input_fn(num_epochs, shuffle, batch_size):
def input_fn():
return census_dataset.input_fn(
TEST_CSV, num_epochs=num_epochs, shuffle=shuffle,
batch_size=batch_size)
return input_fn
model.train(input_fn=get_input_fn(1, True, 1), steps=1)
initial_results = model.evaluate(input_fn=get_input_fn(1, False, 1))
model.train(input_fn=get_input_fn(100, True, 3))
final_results = model.evaluate(input_fn=get_input_fn(1, False, 1))
print('%s initial results:' % model_type, initial_results)
print('%s final results:' % model_type, final_results)
self.assertLess(final_results['loss'], initial_results['loss'])
self.assertGreater(final_results['auc'], initial_results['auc'])
self.assertGreater(final_results['auc_precision_recall'],
initial_results['auc_precision_recall'])
self.assertGreater(final_results['accuracy'], initial_results['accuracy'])
def test_wide_deep_estimator_training(self):
self.build_and_test_estimator('wide_deep')
def test_end_to_end_wide(self):
integration.run_synthetic(
main=census_main.main, tmp_root=self.get_temp_dir(),
extra_flags=[
'--data_dir', self.get_temp_dir(),
'--model_type', 'wide',
'--download_if_missing=false'
],
synth=False, max_train=None)
def test_end_to_end_deep(self):
integration.run_synthetic(
main=census_main.main, tmp_root=self.get_temp_dir(),
extra_flags=[
'--data_dir', self.get_temp_dir(),
'--model_type', 'deep',
'--download_if_missing=false'
],
synth=False, max_train=None)
def test_end_to_end_wide_deep(self):
integration.run_synthetic(
main=census_main.main, tmp_root=self.get_temp_dir(),
extra_flags=[
'--data_dir', self.get_temp_dir(),
'--model_type', 'wide_deep',
'--download_if_missing=false'
],
synth=False, max_train=None)
if __name__ == '__main__':
tf.test.main()
| true | true |
1c2c61708ea7a4fbdd8343ddaced6398fa026d2a | 8,750 | py | Python | base-LASED/build/lib/LASED/time_evolution_matrix.py | mvpmanish/LASED | 7793037b3e77ee9205f631d7ff6c511895108400 | [
"MIT"
] | 7 | 2021-06-07T14:58:01.000Z | 2022-03-24T18:08:13.000Z | base-LASED/LASED/.ipynb_checkpoints/time_evolution_matrix-checkpoint.py | mvpmanish/LASED | 7793037b3e77ee9205f631d7ff6c511895108400 | [
"MIT"
] | 13 | 2021-06-07T14:15:54.000Z | 2022-03-29T11:06:10.000Z | base-LASED/LASED/time_evolution_matrix.py | mvpmanish/LASED | 7793037b3e77ee9205f631d7ff6c511895108400 | [
"MIT"
] | 2 | 2021-12-09T06:31:23.000Z | 2022-03-18T17:29:48.000Z | '''
This is a file to define a function to populate the time evolution matrix for a laser-atom system
Author: Manish Patel
Date created: 12/05/2021
'''
from LASED.state import *
from LASED.detuning import *
from LASED.symbolic_print import *
from LASED.half_rabi_freq import *
from LASED.decay_constant import *
from LASED.index import *
from sympy import *
from sympy import Symbol
import numpy as np
def timeEvolutionMatrix(n, E, G, Q, Q_decay, tau, laser_wavelength, laser_intensity,
tau_f = None, detuning = None, symbolic_print = None, numeric_print = None,
rabi_scaling = None, rabi_factors = None, atomic_velocity = None):
"""Function to create and populate the coupled differential equation matrix A for the laser-atom system.
Returns:
ndarray: Matrix which contains all thera coefficients for the set of coupled differential equations describing a laser-atom system.
"""
# Initialise matrix with zeros
A = np.zeros((n*n,n*n), dtype = np.complex)
# Calculate half-Rabi frequency
rabi = halfRabiFreq(laser_intensity, tau, laser_wavelength)
if(rabi_scaling != None): # For normalising the rabi frequency
rabi = rabi*rabi_scaling
else:
rabi_scaling = 1 # For symbolic printing
if(rabi_factors != None):
if(len(rabi_factors) != len(Q)):
print("rabi_factors must be the same length as Q! Each element of Q is multiplied by the corresponding rabi_factor.")
else:
rabi_factors = [1 for q in Q] # Set Rabi factors to 1
# Initialise null parameters
if(atomic_velocity == None):
atomic_velocity = 0
# Populate A matrix
rho_ggpp(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, numeric_print = numeric_print)
rho_eepp(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, tau_f = tau_f, numeric_print = numeric_print)
rho_ge(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, laser_wavelength, atomic_velocity, tau_f = tau_f, detuning = detuning, numeric_print = numeric_print)
rho_eg(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, laser_wavelength, atomic_velocity, tau_f = tau_f, detuning = detuning, numeric_print = numeric_print)
# Symbolic Printing
if(symbolic_print == True):
init_printing()
symbolicPrintSystem(n, E, G, Q, Q_decay, tau_f, detuning,
laser_wavelength, atomic_velocity, rabi_scaling, rabi_factors)
return A
def rho_ggpp(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, numeric_print = None):
""" Function to populate the matrix A with coefficients for populations and atomic coherences of the ground states.
"""
# rho_gg''
for g in G: # Start with looping over g and g'' for rho_gg''
for gpp in G:
row = index(g, gpp, n) # matrix positions of rho_gg'' in 1D array
A[row, row] += -1.j*delta(g, gpp) # first term in equation
for e in E:
column = index(g, e, n)
for i,q in enumerate(Q): # Sum over all polarisations
A[row, column] += coupling(e, gpp, q)*1.j*rabi*rabi_factors[i]
for e in E:
column = index(e, gpp, n)
for i,q in enumerate(Q):
A[row, column] += -1.j*coupling(e, g, q)*rabi*rabi_factors[i]
for ep in E:
for epp in E:
column = index(epp, ep, n)
column2 = index(ep, epp, n)
sum_decay_channels = 0
for gp in G:
for qp in Q_decay: # Sum over decay channel polarisations
sum_decay_channels += abs(coupling(epp, gp, qp)*coupling(ep, gp, qp))
if(sum_decay_channels != 0):
if(ep.label == epp.label):
for qp in Q_decay:
A[row, column] += 1/(2*tau)*abs(coupling(ep, gpp, qp)*coupling(epp,g, qp))/sum_decay_channels
A[row, column2] += 1/(2*tau)*abs(coupling(epp, gpp, qp)*coupling(ep, g, qp))/sum_decay_channels
else:
# Then this is a vertical coherence and the generalised decay constant must be evaluated
decay_const = generalisedDecayConstant(ep, epp, gpp, G, Q_decay)/(2*tau) # Divide by two to take into account double counting of e'e'' and e''e'
A[row, column] += decay_const
A[row, column2] += decay_const
if(numeric_print == True): # Print numerical equations
print("rho_dot", g.label, gpp.label, " = ")
for line in range(n*n):
if (A[row, line] != 0):
print(A[row, line], "rho", getStateLabelsFromLineNo(line, n))
def rho_eepp(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, tau_f = None, numeric_print = None):
""" Function to populate the matrix A with coefficients for populations and atomic coherences of the excited states.
"""
# rho_ee''
for e in E:
for epp in E:
row = index(e, epp, n)
A[row, row] += -1.j*delta(e, epp) - 1/(tau)
if(tau_f != None):
A[row, row] -= 1/tau_f
for g in G:
column = index(e, g, n)
for i,q in enumerate(Q):
A[row, column] += 1.j*coupling(epp, g, q)*rabi*rabi_factors[i]
for g in G:
column = index(g, epp, n)
for i,q in enumerate(Q):
A[row, column] += -1.j*coupling(e, g, q)*rabi*rabi_factors[i]
if(numeric_print == True):
print("rho_dot", e.label, epp.label, " = ")
for line in range(n*n):
if (A[row, line] != 0):
print(A[row, line], "rho", getStateLabelsFromLineNo(line, n))
def rho_ge(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, laser_wavelength, atomic_velocity, tau_f = None, detuning = None, numeric_print = None):
""" Function to populate the matrix A with coefficients for optical coherences between ground and excited states.
"""
# rho_ge
for g in G:
for e in E:
row = index(g, e, n)
A[row, row] += -1.j*dopplerDelta(e, g, w_q = angularFreq(laser_wavelength),
lambda_q = laser_wavelength, v_z = atomic_velocity) - 1/(2*tau)
if(detuning != None):
A[row, row] += -1.j*detuning
if(tau_f != None):
A[row, row] -= 1/(2*tau_f)
for ep in E:
column = index(ep, e, n)
for i,q in enumerate(Q):
A[row, column] += -1.j*coupling(ep, g, q)*rabi*rabi_factors[i]
for gp in G:
column = index(g, gp, n)
for i,q in enumerate(Q):
A[row, column] += 1.j*coupling(e, gp, q)*rabi*rabi_factors[i]
if(numeric_print == True):
print("rho_dot", g.label, e.label, " = ")
for line in range(n*n):
if (A[row, line] != 0):
print(A[row, line], "rho", getStateLabelsFromLineNo(line, n))
def rho_eg(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, laser_wavelength, atomic_velocity, tau_f = None, detuning = None, numeric_print = None):
""" Function to populate the matrix A with coefficients for optical coherences between excited and ground states.
"""
# rho_eg
for e in E:
for g in G:
row = index(e, g, n)
A[row, row] += 1.j*dopplerDelta(e, g, w_q = angularFreq(laser_wavelength),
lambda_q = laser_wavelength, v_z = atomic_velocity) - 1/(2*tau)
if(detuning != None):
A[row, row] += 1.j*detuning
if(tau_f != None):
A[row, row] -= 1/(2*tau_f)
for ep in E:
column = index(e, ep, n)
for i,q in enumerate(Q):
A[row, column] += 1.j*coupling(ep, g, q)*rabi*rabi_factors[i]
for gp in G:
column = index(gp, g, n)
for i,q in enumerate(Q):
A[row, column] += -1.j*coupling(e, gp, q)*rabi*rabi_factors[i]
if(numeric_print == True):
print("rho_dot", e.label, g.label, " = ")
for line in range(n*n):
if (A[row, line] != 0):
print(A[row, line], "rho", getStateLabelsFromLineNo(line, n)) | 49.157303 | 173 | 0.543886 |
from LASED.state import *
from LASED.detuning import *
from LASED.symbolic_print import *
from LASED.half_rabi_freq import *
from LASED.decay_constant import *
from LASED.index import *
from sympy import *
from sympy import Symbol
import numpy as np
def timeEvolutionMatrix(n, E, G, Q, Q_decay, tau, laser_wavelength, laser_intensity,
tau_f = None, detuning = None, symbolic_print = None, numeric_print = None,
rabi_scaling = None, rabi_factors = None, atomic_velocity = None):
A = np.zeros((n*n,n*n), dtype = np.complex)
rabi = halfRabiFreq(laser_intensity, tau, laser_wavelength)
if(rabi_scaling != None):
rabi = rabi*rabi_scaling
else:
rabi_scaling = 1
if(rabi_factors != None):
if(len(rabi_factors) != len(Q)):
print("rabi_factors must be the same length as Q! Each element of Q is multiplied by the corresponding rabi_factor.")
else:
rabi_factors = [1 for q in Q]
if(atomic_velocity == None):
atomic_velocity = 0
rho_ggpp(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, numeric_print = numeric_print)
rho_eepp(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, tau_f = tau_f, numeric_print = numeric_print)
rho_ge(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, laser_wavelength, atomic_velocity, tau_f = tau_f, detuning = detuning, numeric_print = numeric_print)
rho_eg(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, laser_wavelength, atomic_velocity, tau_f = tau_f, detuning = detuning, numeric_print = numeric_print)
if(symbolic_print == True):
init_printing()
symbolicPrintSystem(n, E, G, Q, Q_decay, tau_f, detuning,
laser_wavelength, atomic_velocity, rabi_scaling, rabi_factors)
return A
def rho_ggpp(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, numeric_print = None):
for g in G:
for gpp in G:
row = index(g, gpp, n)
A[row, row] += -1.j*delta(g, gpp)
for e in E:
column = index(g, e, n)
for i,q in enumerate(Q):
A[row, column] += coupling(e, gpp, q)*1.j*rabi*rabi_factors[i]
for e in E:
column = index(e, gpp, n)
for i,q in enumerate(Q):
A[row, column] += -1.j*coupling(e, g, q)*rabi*rabi_factors[i]
for ep in E:
for epp in E:
column = index(epp, ep, n)
column2 = index(ep, epp, n)
sum_decay_channels = 0
for gp in G:
for qp in Q_decay:
sum_decay_channels += abs(coupling(epp, gp, qp)*coupling(ep, gp, qp))
if(sum_decay_channels != 0):
if(ep.label == epp.label):
for qp in Q_decay:
A[row, column] += 1/(2*tau)*abs(coupling(ep, gpp, qp)*coupling(epp,g, qp))/sum_decay_channels
A[row, column2] += 1/(2*tau)*abs(coupling(epp, gpp, qp)*coupling(ep, g, qp))/sum_decay_channels
else:
decay_const = generalisedDecayConstant(ep, epp, gpp, G, Q_decay)/(2*tau)
A[row, column] += decay_const
A[row, column2] += decay_const
if(numeric_print == True):
print("rho_dot", g.label, gpp.label, " = ")
for line in range(n*n):
if (A[row, line] != 0):
print(A[row, line], "rho", getStateLabelsFromLineNo(line, n))
def rho_eepp(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, tau_f = None, numeric_print = None):
for e in E:
for epp in E:
row = index(e, epp, n)
A[row, row] += -1.j*delta(e, epp) - 1/(tau)
if(tau_f != None):
A[row, row] -= 1/tau_f
for g in G:
column = index(e, g, n)
for i,q in enumerate(Q):
A[row, column] += 1.j*coupling(epp, g, q)*rabi*rabi_factors[i]
for g in G:
column = index(g, epp, n)
for i,q in enumerate(Q):
A[row, column] += -1.j*coupling(e, g, q)*rabi*rabi_factors[i]
if(numeric_print == True):
print("rho_dot", e.label, epp.label, " = ")
for line in range(n*n):
if (A[row, line] != 0):
print(A[row, line], "rho", getStateLabelsFromLineNo(line, n))
def rho_ge(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, laser_wavelength, atomic_velocity, tau_f = None, detuning = None, numeric_print = None):
for g in G:
for e in E:
row = index(g, e, n)
A[row, row] += -1.j*dopplerDelta(e, g, w_q = angularFreq(laser_wavelength),
lambda_q = laser_wavelength, v_z = atomic_velocity) - 1/(2*tau)
if(detuning != None):
A[row, row] += -1.j*detuning
if(tau_f != None):
A[row, row] -= 1/(2*tau_f)
for ep in E:
column = index(ep, e, n)
for i,q in enumerate(Q):
A[row, column] += -1.j*coupling(ep, g, q)*rabi*rabi_factors[i]
for gp in G:
column = index(g, gp, n)
for i,q in enumerate(Q):
A[row, column] += 1.j*coupling(e, gp, q)*rabi*rabi_factors[i]
if(numeric_print == True):
print("rho_dot", g.label, e.label, " = ")
for line in range(n*n):
if (A[row, line] != 0):
print(A[row, line], "rho", getStateLabelsFromLineNo(line, n))
def rho_eg(A, n, E, G, Q, Q_decay, tau, rabi, rabi_factors, laser_wavelength, atomic_velocity, tau_f = None, detuning = None, numeric_print = None):
for e in E:
for g in G:
row = index(e, g, n)
A[row, row] += 1.j*dopplerDelta(e, g, w_q = angularFreq(laser_wavelength),
lambda_q = laser_wavelength, v_z = atomic_velocity) - 1/(2*tau)
if(detuning != None):
A[row, row] += 1.j*detuning
if(tau_f != None):
A[row, row] -= 1/(2*tau_f)
for ep in E:
column = index(e, ep, n)
for i,q in enumerate(Q):
A[row, column] += 1.j*coupling(ep, g, q)*rabi*rabi_factors[i]
for gp in G:
column = index(gp, g, n)
for i,q in enumerate(Q):
A[row, column] += -1.j*coupling(e, gp, q)*rabi*rabi_factors[i]
if(numeric_print == True):
print("rho_dot", e.label, g.label, " = ")
for line in range(n*n):
if (A[row, line] != 0):
print(A[row, line], "rho", getStateLabelsFromLineNo(line, n)) | true | true |
1c2c620fe4e04f19a92c2ab7051646f1df26af3b | 23,267 | py | Python | bdgym/envs/driver_assistant/policy.py | RDLLab/benevolent-deception-gym | 4d04e097609097e0f07c661aac221184ebdec2fe | [
"MIT"
] | null | null | null | bdgym/envs/driver_assistant/policy.py | RDLLab/benevolent-deception-gym | 4d04e097609097e0f07c661aac221184ebdec2fe | [
"MIT"
] | null | null | null | bdgym/envs/driver_assistant/policy.py | RDLLab/benevolent-deception-gym | 4d04e097609097e0f07c661aac221184ebdec2fe | [
"MIT"
] | null | null | null | """Driver Policies for the Driver Assistant Environment """
from copy import deepcopy
from typing import Tuple, Optional, List, Dict
import numpy as np
from highway_env.utils import not_zero, do_every
from highway_env.types import Vector
from highway_env.road.lane import AbstractLane
from highway_env.envs.common.action import Action
from highway_env.vehicle.kinematics import Vehicle
from highway_env.vehicle.behavior import IDMVehicle
from highway_env.road.road import Road, LaneIndex, Route
import bdgym.envs.utils as utils
from bdgym.envs.driver_assistant.action import AssistantDiscreteAction
from bdgym.envs.driver_assistant.vehicle import DriverAssistantVehicle
from bdgym.envs.driver_assistant.driver_types import \
sample_driver_config, DRIVER_PARAM_LIMITS
Observation = np.ndarray
class IDMDriverPolicy(IDMVehicle):
"""A driver Policy that acts similar to IDMVehicle.
Key difference is that it's decisions are based on the observations
of it's own position and velocity that it recieves from the assistant and
the noisy observations it recieves about the other nearby vehicles
"""
MAX_SPEED = DriverAssistantVehicle.MAX_SPEED
def __init__(self,
road: Road,
position: Vector,
heading: float = 0,
speed: float = 0,
target_lane_index: int = None,
target_speed: float = None,
route: Route = None,
enable_lane_change: bool = True,
timer: float = None,
acc_max: float = None,
comfort_acc_max: float = None,
comfort_acc_min: float = None,
distance_wanted: float = None,
time_wanted: float = None,
delta: float = None,
politeness: float = None,
lane_change_min_acc_gain: float = None,
lane_change_max_braking_imposed: float = None,
lane_change_delay: float = None,
**kwargs):
super().__init__(
road,
position,
heading=heading,
speed=speed,
target_lane_index=target_lane_index,
target_speed=target_speed,
route=route,
enable_lane_change=enable_lane_change,
timer=timer
)
self.acc_max = self.ACC_MAX if acc_max is None else acc_max
self.comfort_acc_max = self.COMFORT_ACC_MAX \
if comfort_acc_max is None else comfort_acc_max
self.comfort_acc_min = self.COMFORT_ACC_MIN \
if comfort_acc_min is None else comfort_acc_min
self.distance_wanted = self.DISTANCE_WANTED \
if distance_wanted is None else distance_wanted
self.time_wanted = self.TIME_WANTED \
if time_wanted is None else time_wanted
self.delta = self.DELTA if delta is None else delta
self.politeness = self.POLITENESS if politeness is None else politeness
self.lane_change_min_acc_gain = self.LANE_CHANGE_MIN_ACC_GAIN \
if lane_change_min_acc_gain is None else lane_change_min_acc_gain
self.lane_change_max_braking_imposed = \
self.LANE_CHANGE_MAX_BRAKING_IMPOSED \
if lane_change_max_braking_imposed is None \
else lane_change_max_braking_imposed
self.lane_change_delay = self.LANE_CHANGE_DELAY \
if lane_change_delay is None else lane_change_delay
if timer is None:
self.timer = (np.sum(self.position)*np.pi) % self.lane_change_delay
@classmethod
def create_from(cls, vehicle: Vehicle, **kwargs) -> "IDMDriverPolicy":
"""
Create a new vehicle from an existing one.
The vehicle dynamics are copied, other properties are default.
:param vehicle: a vehicle
:return: a new vehicle at the same dynamical state
"""
return cls(
road=vehicle.road,
position=vehicle.position,
heading=vehicle.heading,
speed=vehicle.speed,
**kwargs
)
def get_action(self, obs: np.ndarray, dt: float) -> Action:
"""
Get the next action driver will take
Note: assistant signal and other vehicle observations should
be non-normalized values.
:param obs: the driver observation of ego and neighbouring vehicles
['presence', 'x', 'y', 'vx', 'vy', 'acceleration', 'steering']
:param dt: the step size for action
:return: action ['acceleration', 'steering'] the vehicle would take
"""
assistant_signal, other_vehicle_obs = self.parse_obs(obs)
self._update_dynamics(assistant_signal, dt)
return self._get_idm_action(other_vehicle_obs)
@staticmethod
def parse_obs(obs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Parse driver observation into ego and other vehicle obs """
# for ego vehicle ignore 'presence' feature
assistant_signal = obs[0][1:]
# for other vehicle obs ignore 'acceleration' and 'steering' as these
# are always 0.0 for other vehicles
other_vehicle_obs = obs[1:, :-2]
return assistant_signal, other_vehicle_obs
def _update_dynamics(self, ego_vehicle_obs: Action, dt: float):
self.position = ego_vehicle_obs[0:2]
vx, vy = ego_vehicle_obs[2], ego_vehicle_obs[3]
self.speed, self.heading = self._get_speed_and_heading(vx, vy)
if self.road:
self.lane_index = self.road.network.get_closest_lane_index(
self.position
)
self.lane = self.road.network.get_lane(self.lane_index)
# self.road.update_nearest_neighbours()
self.timer += dt
@staticmethod
def _get_speed_and_heading(vx: float, vy: float) -> Tuple[float, float]:
speed = np.sqrt(vx**2 + vy**2)
if speed == 0.0:
# vx = vy = 0.0
heading = 0.0
elif vx == 0.0:
heading = np.arcsin(vy / speed)
elif vy == 0.0:
heading = np.arccos(vx / speed)
else:
heading = np.arctan(vy / vx)
return speed, heading
@staticmethod
def _get_direction(heading: float) -> np.ndarray:
return np.array([np.cos(heading), np.sin(heading)])
@property
def observation(self) -> np.ndarray:
"""Vehicle position and velocity in standard observation format """
return np.array([1.0, *self.position, *self.velocity])
@property
def configuration(self) -> dict:
"""The driver policy parameters """
config = {
param: self.__getattribute__(param)
for param in DRIVER_PARAM_LIMITS
}
return config
def _get_idm_action(self, other_vehicle_obs: np.ndarray) -> np.ndarray:
# Lateral: MOBIL
self.follow_road()
if self.enable_lane_change:
self._change_lane_policy(other_vehicle_obs)
steering = self.steering_control(self.target_lane_index)
steering = np.clip(
steering, -self.MAX_STEERING_ANGLE, self.MAX_STEERING_ANGLE
)
# Longitudinal: IDM
front_vehicle_obs, _ = self._get_neighbours(other_vehicle_obs)
acceleration = self._acceleration_from_obs(front_vehicle_obs)
acceleration = np.clip(acceleration, -1*self.acc_max, self.acc_max)
return np.array([acceleration, steering])
def _acceleration_from_obs(self, front_vehicle_obs: np.ndarray) -> float:
acceleration = self.comfort_acc_max * (
1 - np.power(max(self.speed, 0) / self.target_speed, self.delta)
)
if front_vehicle_obs is not None:
front_pos = front_vehicle_obs[1:3]
d = (
self.lane.local_coordinates(front_pos)[0]
- self.lane.local_coordinates(self.position)[0]
)
gap = self._desired_gap_from_obs(front_vehicle_obs)
gap /= not_zero(d)
acceleration -= self.comfort_acc_max * np.power(gap, 2)
return acceleration
def _acceleration(self,
ego_xy: Tuple[float, float],
ego_velocity: Tuple[float, float],
front_xy: Tuple[float, float] = None,
front_velocity: Tuple[float, float] = None,
lane: AbstractLane = None) -> float:
ego_target_speed = not_zero(0)
ego_speed, ego_heading = self._get_speed_and_heading(*ego_velocity)
speed = max(ego_speed, 0) / ego_target_speed
acceleration = self.comfort_acc_max * (
1 - np.power(speed, self.delta)
)
if front_xy is not None:
if lane is None:
lane = self.lane
d = (
lane.local_coordinates(front_xy)[0]
- lane.local_coordinates(ego_xy)[0]
)
ego_direction = self._get_direction(ego_heading)
gap = self._desired_gap(
ego_velocity, ego_speed, ego_direction, front_velocity
)
gap /= not_zero(d)
acceleration -= self.comfort_acc_max * np.power(gap, 2)
return acceleration
def _desired_gap_from_obs(self, vehicle_obs: np.ndarray):
other_velocity = vehicle_obs[3:4]
return self._desired_gap(
self.velocity, self.speed, self.direction, other_velocity
)
def _desired_gap(self,
ego_velocity: np.ndarray,
ego_speed: float,
ego_direction: float,
other_velocity: np.ndarray) -> float:
d0 = self.distance_wanted
tau = self.time_wanted
ab = -1*self.comfort_acc_max * self.comfort_acc_min
dv = np.dot(ego_velocity - other_velocity, ego_direction)
d_star = d0 + ego_speed * tau + ego_speed * dv / (2 * np.sqrt(ab))
return d_star
def _get_neighbours(self,
other_vehicle_obs: np.ndarray,
lane_index: LaneIndex = None
) -> Tuple[Optional[Vehicle], Optional[Vehicle]]:
"""Get closest vehicles to ego vehicle within specified lane """
if lane_index is None:
lane_index = self.lane_index
lane = self.road.network.get_lane(lane_index)
ego_lane_x = lane.local_coordinates(self.position)[0]
closest_front = None
min_front_distance = float('inf')
closest_rear = None
min_rear_distance = float('inf')
for other in other_vehicle_obs:
other_xy = other[1:3]
if not lane.on_lane(other_xy):
continue
other_lane_x = lane.local_coordinates(other_xy)[0]
distance = abs(other_lane_x - ego_lane_x)
if ego_lane_x < other_lane_x and distance < min_front_distance:
closest_front = other
min_front_distance = distance
elif ego_lane_x > other_lane_x and distance < min_rear_distance:
closest_rear = other
min_rear_distance = distance
return closest_front, closest_rear
def _change_lane_policy(self, other_vehicle_obs: np.ndarray) -> None:
# at a given frequency,
if not do_every(self.lane_change_delay, self.timer):
return
self.timer = 0
# decide to make a lane change
for lane_index in self.road.network.side_lanes(self.lane_index):
# Is the candidate lane close enough?
if not self.road.network.get_lane(lane_index).is_reachable_from(
self.position):
continue
# Does the MOBIL model recommend a lane change?
if self._mobil(lane_index, other_vehicle_obs):
self.target_lane_index = lane_index
def _mobil(self,
lane_index: LaneIndex,
other_vehicle_obs: np.ndarray) -> bool:
""" Overrides parent """
# Is the maneuver unsafe for the new following vehicle?
new_preceding, new_following = self._get_neighbours(
other_vehicle_obs, lane_index
)
lane = self.road.network.get_lane(lane_index)
ego_obs = self.observation
new_following_a, new_following_pred_a = self._acceleration_changes(
following=new_following,
preceding=new_preceding,
new_preceding=ego_obs,
lane=lane
)
if new_following_pred_a < -1*self.lane_change_max_braking_imposed:
return False
# Do I have a planned route for a specific lane which is safe for me
# to access?
old_preceding, old_following = self._get_neighbours(other_vehicle_obs)
self_pred_a = self._acceleration_from_obs(new_preceding)
if self.route and self.route[0][2]:
# Wrong direction
if np.sign(lane_index[2] - self.target_lane_index[2]) \
!= np.sign(self.route[0][2] - self.target_lane_index[2]):
return False
# Unsafe braking required
if self_pred_a < -1*self.lane_change_max_braking_imposed:
return False
else:
# Is there an acceleration advantage for me and/or my
# followers to change lane?
self_a = self._acceleration_from_obs(old_preceding)
old_following_a, old_following_pred_a = self._acceleration_changes(
following=old_following,
preceding=ego_obs,
new_preceding=old_preceding
)
jerk = self_pred_a - self_a
jerk += self.politeness * (
new_following_pred_a - new_following_a
+ old_following_pred_a - old_following_a
)
if jerk < self.lane_change_min_acc_gain:
return False
# All clear, let's go!
return True
def _acceleration_changes(self,
following: np.ndarray,
preceding: np.ndarray,
new_preceding: np.ndarray,
lane: AbstractLane = None
) -> Tuple[float, float]:
following_a = 0.0
following_pred_a = 0.0
if following is not None and preceding is not None:
following_a = self._acceleration(
ego_xy=following[1:3],
ego_velocity=following[3:5],
front_xy=preceding[1:3],
front_velocity=preceding[3:5],
lane=lane
)
if following is not None and new_preceding is not None:
following_pred_a = self._acceleration(
ego_xy=following[1:3],
ego_velocity=following[3:5],
front_xy=new_preceding[1:3],
front_velocity=new_preceding[3:5],
lane=lane
)
return following_a, following_pred_a
class IDMAssistantPolicy(IDMDriverPolicy):
"""An IDM Assistant policy that provides vehicle obs and recommendation
Specifically:
- Returns the actual observations recieved for the driver vehicle
- Recommends acceleration and steering controls to the driver based on IDM
model
"""
def __init__(self,
road: Road,
position: Vector,
*args,
action_ranges: Optional[Dict[str, List[float]]] = None,
normalize: bool = True,
**kwargs):
super().__init__(road, position, *args, **kwargs)
self.action_ranges = {}
if action_ranges is not None:
self.action_ranges = action_ranges
self.normalize = normalize
def get_action(self, obs: np.ndarray, dt: float) -> Action:
"""
Get the next action assistant will take
:param full_obs: full observation of vehicle including of nearby
vehicles. Includes ['presence', 'x', 'y', 'vx', 'vy']
:return: the assistant action
['x', 'y', 'vx', 'vy', 'acceleration', 'steering']
"""
other_vehicle_obs = obs[1:, :]
assistant_signal = obs[0, 1:]
self._update_dynamics(assistant_signal, dt)
recommended_controls = self._get_idm_action(other_vehicle_obs)
action = np.concatenate((assistant_signal, recommended_controls))
if self.normalize:
for i, frange in enumerate(self.action_ranges.values()):
action[i] = utils.lmap(
action[i], frange, [-1, 1]
)
return action
class RandomAssistantPolicy(IDMAssistantPolicy):
"""A Random Assistant policy
Specifically:
- Returns the actual observations recieved for the driver vehicle
- Recommends a random acceleration and steering control to the driver
"""
def get_action(self, obs: np.ndarray, dt: float) -> Action:
""" Overrides IDMDriverVehicle.get_action() """
action = super().get_action(obs, dt)
action[4] = utils.get_truncated_normal(0.0, 1.0, -1.0, 1.0)
action[5] = utils.get_truncated_normal(0.0, 1.0, -1.0, 1.0)
return action
class RandomDiscreteAssistantPolicy(IDMAssistantPolicy):
"""A Random Assistant policy for Discrete Assistant Action
Specifically:
- Returns the actual observations recieved for the driver vehicle
- Recommends a random acceleration and steering control to the driver
"""
def get_action(self, obs: np.ndarray, dt: float) -> Action:
""" Overrides IDMDriverVehicle.get_action() """
avail_actions = [
AssistantDiscreteAction.NOOP,
AssistantDiscreteAction.UP,
AssistantDiscreteAction.DOWN
]
action = np.full(
AssistantDiscreteAction.ASSISTANT_DISCRETE_ACTION_SPACE_SIZE,
AssistantDiscreteAction.NOOP
)
action[4] = np.random.choice(avail_actions)
action[5] = np.random.choice(avail_actions)
return action
class DoNothingDiscreteAssistantPolicy(IDMAssistantPolicy):
"""Assistant policy that recommends no action and applies no deception """
def get_action(self, obs: np.ndarray, dt: float) -> Action:
""" Overrides IDMDriverVehicle.get_action() """
action = np.full(
AssistantDiscreteAction.ASSISTANT_DISCRETE_ACTION_SPACE_SIZE,
AssistantDiscreteAction.NOOP
)
return action
class RandomDriverPolicy(IDMDriverPolicy):
"""A Random driver policy """
def get_action(self, obs: np.ndarray, dt: float) -> Action:
""" Overrides IDMDriverVehicle.get_action() """
assistant_signal, _ = self.parse_obs(obs)
self._update_dynamics(assistant_signal, dt)
return np.random.uniform(
low=[-1*self.acc_max, -1*self.MAX_STEERING_ANGLE],
high=[self.acc_max, self.MAX_STEERING_ANGLE],
)
class GuidedIDMDriverPolicy(IDMDriverPolicy):
"""A Driver policy that also considers recommended actions from assistant
How much the driver follows the assistant's suggestions versus relying on
the IDM driver model is controlled by the "independence" hyperparameter
"""
def __init__(self,
road: Road,
position: Vector,
*args,
independence: float = 0.9,
**kwargs):
super().__init__(road, position, *args, **kwargs)
self.independence = independence
@property
def configuration(self) -> dict:
"""The driver policy parameters """
config = super().configuration
config["independence"] = self.independence
return config
def get_action(self, obs: np.ndarray, dt: float) -> Action:
""" Overrides IDMDriverVehicle.get_action() """
assistant_signal, other_vehicle_obs = self.parse_obs(obs)
self._update_dynamics(assistant_signal, dt)
idm_action = self._get_idm_action(other_vehicle_obs)
action = self._calc_action(assistant_signal[-2:], idm_action)
return action
def _calc_action(self,
assistant_action: np.ndarray,
idm_action: np.ndarray) -> np.ndarray:
return (
(1 - self.independence) * assistant_action
+ (self.independence * idm_action)
)
class ChangingGuidedIDMDriverPolicy(GuidedIDMDriverPolicy):
"""A GuidedIDMDriverPolicy where the driver parameters can be re-sampled
How much the driver follows the assistant's suggestions versus relying on
the IDM driver model is controlled by the "independence" hyperparameter
"""
INDEPENDENCE_DIST = utils.get_truncated_normal(
0.5, 0.25, 0.0, 1.0
)
def __init__(self,
road: Road,
position: Vector,
*args,
independence: float = 0.9,
**kwargs):
super().__init__(road, position, *args, **kwargs)
self.independence = independence
def get_action(self, obs: np.ndarray, dt: float) -> Action:
""" Overrides IDMDriverVehicle.get_action() """
assistant_signal, other_vehicle_obs = self.parse_obs(obs)
self._update_dynamics(assistant_signal, dt)
idm_action = self._get_idm_action(other_vehicle_obs)
action = self._calc_action(assistant_signal[-2:], idm_action)
return action
def _calc_action(self,
assistant_action: np.ndarray,
idm_action: np.ndarray) -> np.ndarray:
return (
(1 - self.independence) * assistant_action
+ (self.independence * idm_action)
)
@classmethod
def create_from(cls, vehicle: Vehicle, **kwargs) -> "IDMDriverPolicy":
"""
Create a new vehicle from an existing one.
The vehicle dynamics are copied, other properties are default.
:param vehicle: a vehicle
:return: a new vehicle at the same dynamical state
"""
driver_config = deepcopy(kwargs)
driver_config.update(sample_driver_config())
if kwargs.get("force_independent", False):
driver_config["independence"] = 1.0
else:
driver_config["independence"] = cls.INDEPENDENCE_DIST.rvs()
return cls(
road=vehicle.road,
position=vehicle.position,
heading=vehicle.heading,
speed=vehicle.speed,
**kwargs
)
def driver_policy_factory(env, driver_config: dict) -> 'IDMDriverPolicy':
"""Get the driver policy for given driver configuration """
if driver_config["type"] == "RandomDriverPolicy":
policy_cls = RandomDriverPolicy
elif driver_config["type"] == "GuidedIDMDriverPolicy":
policy_cls = GuidedIDMDriverPolicy
elif driver_config["type"] == "ChangingGuidedIDMDriverPolicy":
policy_cls = ChangingGuidedIDMDriverPolicy
else:
raise ValueError(f"Unsupported Driver Type: {driver_config['type']}")
return policy_cls.create_from(env.vehicle, **driver_config)
| 37.588045 | 79 | 0.614261 | from copy import deepcopy
from typing import Tuple, Optional, List, Dict
import numpy as np
from highway_env.utils import not_zero, do_every
from highway_env.types import Vector
from highway_env.road.lane import AbstractLane
from highway_env.envs.common.action import Action
from highway_env.vehicle.kinematics import Vehicle
from highway_env.vehicle.behavior import IDMVehicle
from highway_env.road.road import Road, LaneIndex, Route
import bdgym.envs.utils as utils
from bdgym.envs.driver_assistant.action import AssistantDiscreteAction
from bdgym.envs.driver_assistant.vehicle import DriverAssistantVehicle
from bdgym.envs.driver_assistant.driver_types import \
sample_driver_config, DRIVER_PARAM_LIMITS
Observation = np.ndarray
class IDMDriverPolicy(IDMVehicle):
MAX_SPEED = DriverAssistantVehicle.MAX_SPEED
def __init__(self,
road: Road,
position: Vector,
heading: float = 0,
speed: float = 0,
target_lane_index: int = None,
target_speed: float = None,
route: Route = None,
enable_lane_change: bool = True,
timer: float = None,
acc_max: float = None,
comfort_acc_max: float = None,
comfort_acc_min: float = None,
distance_wanted: float = None,
time_wanted: float = None,
delta: float = None,
politeness: float = None,
lane_change_min_acc_gain: float = None,
lane_change_max_braking_imposed: float = None,
lane_change_delay: float = None,
**kwargs):
super().__init__(
road,
position,
heading=heading,
speed=speed,
target_lane_index=target_lane_index,
target_speed=target_speed,
route=route,
enable_lane_change=enable_lane_change,
timer=timer
)
self.acc_max = self.ACC_MAX if acc_max is None else acc_max
self.comfort_acc_max = self.COMFORT_ACC_MAX \
if comfort_acc_max is None else comfort_acc_max
self.comfort_acc_min = self.COMFORT_ACC_MIN \
if comfort_acc_min is None else comfort_acc_min
self.distance_wanted = self.DISTANCE_WANTED \
if distance_wanted is None else distance_wanted
self.time_wanted = self.TIME_WANTED \
if time_wanted is None else time_wanted
self.delta = self.DELTA if delta is None else delta
self.politeness = self.POLITENESS if politeness is None else politeness
self.lane_change_min_acc_gain = self.LANE_CHANGE_MIN_ACC_GAIN \
if lane_change_min_acc_gain is None else lane_change_min_acc_gain
self.lane_change_max_braking_imposed = \
self.LANE_CHANGE_MAX_BRAKING_IMPOSED \
if lane_change_max_braking_imposed is None \
else lane_change_max_braking_imposed
self.lane_change_delay = self.LANE_CHANGE_DELAY \
if lane_change_delay is None else lane_change_delay
if timer is None:
self.timer = (np.sum(self.position)*np.pi) % self.lane_change_delay
@classmethod
def create_from(cls, vehicle: Vehicle, **kwargs) -> "IDMDriverPolicy":
return cls(
road=vehicle.road,
position=vehicle.position,
heading=vehicle.heading,
speed=vehicle.speed,
**kwargs
)
def get_action(self, obs: np.ndarray, dt: float) -> Action:
assistant_signal, other_vehicle_obs = self.parse_obs(obs)
self._update_dynamics(assistant_signal, dt)
return self._get_idm_action(other_vehicle_obs)
@staticmethod
def parse_obs(obs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
assistant_signal = obs[0][1:]
other_vehicle_obs = obs[1:, :-2]
return assistant_signal, other_vehicle_obs
def _update_dynamics(self, ego_vehicle_obs: Action, dt: float):
self.position = ego_vehicle_obs[0:2]
vx, vy = ego_vehicle_obs[2], ego_vehicle_obs[3]
self.speed, self.heading = self._get_speed_and_heading(vx, vy)
if self.road:
self.lane_index = self.road.network.get_closest_lane_index(
self.position
)
self.lane = self.road.network.get_lane(self.lane_index)
self.timer += dt
@staticmethod
def _get_speed_and_heading(vx: float, vy: float) -> Tuple[float, float]:
speed = np.sqrt(vx**2 + vy**2)
if speed == 0.0:
heading = 0.0
elif vx == 0.0:
heading = np.arcsin(vy / speed)
elif vy == 0.0:
heading = np.arccos(vx / speed)
else:
heading = np.arctan(vy / vx)
return speed, heading
@staticmethod
def _get_direction(heading: float) -> np.ndarray:
return np.array([np.cos(heading), np.sin(heading)])
@property
def observation(self) -> np.ndarray:
return np.array([1.0, *self.position, *self.velocity])
@property
def configuration(self) -> dict:
config = {
param: self.__getattribute__(param)
for param in DRIVER_PARAM_LIMITS
}
return config
def _get_idm_action(self, other_vehicle_obs: np.ndarray) -> np.ndarray:
self.follow_road()
if self.enable_lane_change:
self._change_lane_policy(other_vehicle_obs)
steering = self.steering_control(self.target_lane_index)
steering = np.clip(
steering, -self.MAX_STEERING_ANGLE, self.MAX_STEERING_ANGLE
)
front_vehicle_obs, _ = self._get_neighbours(other_vehicle_obs)
acceleration = self._acceleration_from_obs(front_vehicle_obs)
acceleration = np.clip(acceleration, -1*self.acc_max, self.acc_max)
return np.array([acceleration, steering])
def _acceleration_from_obs(self, front_vehicle_obs: np.ndarray) -> float:
acceleration = self.comfort_acc_max * (
1 - np.power(max(self.speed, 0) / self.target_speed, self.delta)
)
if front_vehicle_obs is not None:
front_pos = front_vehicle_obs[1:3]
d = (
self.lane.local_coordinates(front_pos)[0]
- self.lane.local_coordinates(self.position)[0]
)
gap = self._desired_gap_from_obs(front_vehicle_obs)
gap /= not_zero(d)
acceleration -= self.comfort_acc_max * np.power(gap, 2)
return acceleration
def _acceleration(self,
ego_xy: Tuple[float, float],
ego_velocity: Tuple[float, float],
front_xy: Tuple[float, float] = None,
front_velocity: Tuple[float, float] = None,
lane: AbstractLane = None) -> float:
ego_target_speed = not_zero(0)
ego_speed, ego_heading = self._get_speed_and_heading(*ego_velocity)
speed = max(ego_speed, 0) / ego_target_speed
acceleration = self.comfort_acc_max * (
1 - np.power(speed, self.delta)
)
if front_xy is not None:
if lane is None:
lane = self.lane
d = (
lane.local_coordinates(front_xy)[0]
- lane.local_coordinates(ego_xy)[0]
)
ego_direction = self._get_direction(ego_heading)
gap = self._desired_gap(
ego_velocity, ego_speed, ego_direction, front_velocity
)
gap /= not_zero(d)
acceleration -= self.comfort_acc_max * np.power(gap, 2)
return acceleration
def _desired_gap_from_obs(self, vehicle_obs: np.ndarray):
other_velocity = vehicle_obs[3:4]
return self._desired_gap(
self.velocity, self.speed, self.direction, other_velocity
)
def _desired_gap(self,
ego_velocity: np.ndarray,
ego_speed: float,
ego_direction: float,
other_velocity: np.ndarray) -> float:
d0 = self.distance_wanted
tau = self.time_wanted
ab = -1*self.comfort_acc_max * self.comfort_acc_min
dv = np.dot(ego_velocity - other_velocity, ego_direction)
d_star = d0 + ego_speed * tau + ego_speed * dv / (2 * np.sqrt(ab))
return d_star
def _get_neighbours(self,
other_vehicle_obs: np.ndarray,
lane_index: LaneIndex = None
) -> Tuple[Optional[Vehicle], Optional[Vehicle]]:
if lane_index is None:
lane_index = self.lane_index
lane = self.road.network.get_lane(lane_index)
ego_lane_x = lane.local_coordinates(self.position)[0]
closest_front = None
min_front_distance = float('inf')
closest_rear = None
min_rear_distance = float('inf')
for other in other_vehicle_obs:
other_xy = other[1:3]
if not lane.on_lane(other_xy):
continue
other_lane_x = lane.local_coordinates(other_xy)[0]
distance = abs(other_lane_x - ego_lane_x)
if ego_lane_x < other_lane_x and distance < min_front_distance:
closest_front = other
min_front_distance = distance
elif ego_lane_x > other_lane_x and distance < min_rear_distance:
closest_rear = other
min_rear_distance = distance
return closest_front, closest_rear
def _change_lane_policy(self, other_vehicle_obs: np.ndarray) -> None:
if not do_every(self.lane_change_delay, self.timer):
return
self.timer = 0
for lane_index in self.road.network.side_lanes(self.lane_index):
if not self.road.network.get_lane(lane_index).is_reachable_from(
self.position):
continue
if self._mobil(lane_index, other_vehicle_obs):
self.target_lane_index = lane_index
def _mobil(self,
lane_index: LaneIndex,
other_vehicle_obs: np.ndarray) -> bool:
new_preceding, new_following = self._get_neighbours(
other_vehicle_obs, lane_index
)
lane = self.road.network.get_lane(lane_index)
ego_obs = self.observation
new_following_a, new_following_pred_a = self._acceleration_changes(
following=new_following,
preceding=new_preceding,
new_preceding=ego_obs,
lane=lane
)
if new_following_pred_a < -1*self.lane_change_max_braking_imposed:
return False
old_preceding, old_following = self._get_neighbours(other_vehicle_obs)
self_pred_a = self._acceleration_from_obs(new_preceding)
if self.route and self.route[0][2]:
if np.sign(lane_index[2] - self.target_lane_index[2]) \
!= np.sign(self.route[0][2] - self.target_lane_index[2]):
return False
if self_pred_a < -1*self.lane_change_max_braking_imposed:
return False
else:
self_a = self._acceleration_from_obs(old_preceding)
old_following_a, old_following_pred_a = self._acceleration_changes(
following=old_following,
preceding=ego_obs,
new_preceding=old_preceding
)
jerk = self_pred_a - self_a
jerk += self.politeness * (
new_following_pred_a - new_following_a
+ old_following_pred_a - old_following_a
)
if jerk < self.lane_change_min_acc_gain:
return False
return True
def _acceleration_changes(self,
following: np.ndarray,
preceding: np.ndarray,
new_preceding: np.ndarray,
lane: AbstractLane = None
) -> Tuple[float, float]:
following_a = 0.0
following_pred_a = 0.0
if following is not None and preceding is not None:
following_a = self._acceleration(
ego_xy=following[1:3],
ego_velocity=following[3:5],
front_xy=preceding[1:3],
front_velocity=preceding[3:5],
lane=lane
)
if following is not None and new_preceding is not None:
following_pred_a = self._acceleration(
ego_xy=following[1:3],
ego_velocity=following[3:5],
front_xy=new_preceding[1:3],
front_velocity=new_preceding[3:5],
lane=lane
)
return following_a, following_pred_a
class IDMAssistantPolicy(IDMDriverPolicy):
def __init__(self,
road: Road,
position: Vector,
*args,
action_ranges: Optional[Dict[str, List[float]]] = None,
normalize: bool = True,
**kwargs):
super().__init__(road, position, *args, **kwargs)
self.action_ranges = {}
if action_ranges is not None:
self.action_ranges = action_ranges
self.normalize = normalize
def get_action(self, obs: np.ndarray, dt: float) -> Action:
other_vehicle_obs = obs[1:, :]
assistant_signal = obs[0, 1:]
self._update_dynamics(assistant_signal, dt)
recommended_controls = self._get_idm_action(other_vehicle_obs)
action = np.concatenate((assistant_signal, recommended_controls))
if self.normalize:
for i, frange in enumerate(self.action_ranges.values()):
action[i] = utils.lmap(
action[i], frange, [-1, 1]
)
return action
class RandomAssistantPolicy(IDMAssistantPolicy):
def get_action(self, obs: np.ndarray, dt: float) -> Action:
action = super().get_action(obs, dt)
action[4] = utils.get_truncated_normal(0.0, 1.0, -1.0, 1.0)
action[5] = utils.get_truncated_normal(0.0, 1.0, -1.0, 1.0)
return action
class RandomDiscreteAssistantPolicy(IDMAssistantPolicy):
def get_action(self, obs: np.ndarray, dt: float) -> Action:
avail_actions = [
AssistantDiscreteAction.NOOP,
AssistantDiscreteAction.UP,
AssistantDiscreteAction.DOWN
]
action = np.full(
AssistantDiscreteAction.ASSISTANT_DISCRETE_ACTION_SPACE_SIZE,
AssistantDiscreteAction.NOOP
)
action[4] = np.random.choice(avail_actions)
action[5] = np.random.choice(avail_actions)
return action
class DoNothingDiscreteAssistantPolicy(IDMAssistantPolicy):
def get_action(self, obs: np.ndarray, dt: float) -> Action:
action = np.full(
AssistantDiscreteAction.ASSISTANT_DISCRETE_ACTION_SPACE_SIZE,
AssistantDiscreteAction.NOOP
)
return action
class RandomDriverPolicy(IDMDriverPolicy):
def get_action(self, obs: np.ndarray, dt: float) -> Action:
assistant_signal, _ = self.parse_obs(obs)
self._update_dynamics(assistant_signal, dt)
return np.random.uniform(
low=[-1*self.acc_max, -1*self.MAX_STEERING_ANGLE],
high=[self.acc_max, self.MAX_STEERING_ANGLE],
)
class GuidedIDMDriverPolicy(IDMDriverPolicy):
def __init__(self,
road: Road,
position: Vector,
*args,
independence: float = 0.9,
**kwargs):
super().__init__(road, position, *args, **kwargs)
self.independence = independence
@property
def configuration(self) -> dict:
config = super().configuration
config["independence"] = self.independence
return config
def get_action(self, obs: np.ndarray, dt: float) -> Action:
assistant_signal, other_vehicle_obs = self.parse_obs(obs)
self._update_dynamics(assistant_signal, dt)
idm_action = self._get_idm_action(other_vehicle_obs)
action = self._calc_action(assistant_signal[-2:], idm_action)
return action
def _calc_action(self,
assistant_action: np.ndarray,
idm_action: np.ndarray) -> np.ndarray:
return (
(1 - self.independence) * assistant_action
+ (self.independence * idm_action)
)
class ChangingGuidedIDMDriverPolicy(GuidedIDMDriverPolicy):
INDEPENDENCE_DIST = utils.get_truncated_normal(
0.5, 0.25, 0.0, 1.0
)
def __init__(self,
road: Road,
position: Vector,
*args,
independence: float = 0.9,
**kwargs):
super().__init__(road, position, *args, **kwargs)
self.independence = independence
def get_action(self, obs: np.ndarray, dt: float) -> Action:
assistant_signal, other_vehicle_obs = self.parse_obs(obs)
self._update_dynamics(assistant_signal, dt)
idm_action = self._get_idm_action(other_vehicle_obs)
action = self._calc_action(assistant_signal[-2:], idm_action)
return action
def _calc_action(self,
assistant_action: np.ndarray,
idm_action: np.ndarray) -> np.ndarray:
return (
(1 - self.independence) * assistant_action
+ (self.independence * idm_action)
)
@classmethod
def create_from(cls, vehicle: Vehicle, **kwargs) -> "IDMDriverPolicy":
driver_config = deepcopy(kwargs)
driver_config.update(sample_driver_config())
if kwargs.get("force_independent", False):
driver_config["independence"] = 1.0
else:
driver_config["independence"] = cls.INDEPENDENCE_DIST.rvs()
return cls(
road=vehicle.road,
position=vehicle.position,
heading=vehicle.heading,
speed=vehicle.speed,
**kwargs
)
def driver_policy_factory(env, driver_config: dict) -> 'IDMDriverPolicy':
if driver_config["type"] == "RandomDriverPolicy":
policy_cls = RandomDriverPolicy
elif driver_config["type"] == "GuidedIDMDriverPolicy":
policy_cls = GuidedIDMDriverPolicy
elif driver_config["type"] == "ChangingGuidedIDMDriverPolicy":
policy_cls = ChangingGuidedIDMDriverPolicy
else:
raise ValueError(f"Unsupported Driver Type: {driver_config['type']}")
return policy_cls.create_from(env.vehicle, **driver_config)
| true | true |
1c2c635271b21d5a400449f0ebcfa562d6bb73e8 | 6,582 | py | Python | qa/rpc-tests/txn_doublespend.py | raasakh/beenode | b28e026ab14059c5774a13210e65e376725c82ad | [
"MIT"
] | null | null | null | qa/rpc-tests/txn_doublespend.py | raasakh/beenode | b28e026ab14059c5774a13210e65e376725c82ad | [
"MIT"
] | null | null | null | qa/rpc-tests/txn_doublespend.py | raasakh/beenode | b28e026ab14059c5774a13210e65e376725c82ad | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with a double-spend conflict
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 12,500 BEENODE:
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 12190)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 290)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 12190 - 290 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 12400 BEENODE to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 12190)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 290)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 12400
outputs[change_address] = 12480 - 12400 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 500 BEENODE coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 400, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 200, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500BEENODE for another
# matured block, minus 400, minus 200, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 12190+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 290+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 1000BEENODE for
# two more matured blocks, minus 12400 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 1000 - 12400 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance("foo"), 12190)
assert_equal(self.nodes[0].getbalance("bar"), 290)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-12190
- 290
-12400
+ 1000
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
# Node1's "from0" account balance should be just the doublespend:
assert_equal(self.nodes[1].getbalance("from0"), 12400)
if __name__ == '__main__':
TxnMallTest().main()
| 46.027972 | 111 | 0.609389 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("")
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 12190)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 290)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 12190 - 290 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
node1_address = self.nodes[1].getnewaddress("from0")
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 12190)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 290)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 12400
outputs[change_address] = 12480 - 12400 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 500 BEENODE coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 400, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 200, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500BEENODE for another
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("foo", 0), 12190+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 290+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 1000BEENODE for
expected = starting_balance + 1000 - 12400 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
assert_equal(self.nodes[0].getbalance("foo"), 12190)
assert_equal(self.nodes[0].getbalance("bar"), 290)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-12190
- 290
-12400
+ 1000
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
assert_equal(self.nodes[1].getbalance("from0"), 12400)
if __name__ == '__main__':
TxnMallTest().main()
| true | true |
1c2c63f5b27e9b7d657b53a9fba7139c1916373d | 3,314 | py | Python | app/app/settings.py | dilnawaz07/recipe-app-api | bc6fe1693d5117390461f09a902dc6799c975b71 | [
"MIT"
] | null | null | null | app/app/settings.py | dilnawaz07/recipe-app-api | bc6fe1693d5117390461f09a902dc6799c975b71 | [
"MIT"
] | null | null | null | app/app/settings.py | dilnawaz07/recipe-app-api | bc6fe1693d5117390461f09a902dc6799c975b71 | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u*g%p(%4(b&4dq)yh$1gw_cv3cag-=pqicw(q1y47_0_56pyt$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.492308 | 91 | 0.687085 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'u*g%p(%4(b&4dq)yh$1gw_cv3cag-=pqicw(q1y47_0_56pyt$'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| true | true |
1c2c6486806e2c5d1e0bf1b01c8c40c028b80544 | 5,091 | py | Python | nose/test_pearl.py | finn-dodgson/DeepHalos | 86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c | [
"MIT"
] | 2 | 2021-07-26T10:56:33.000Z | 2021-12-20T17:30:53.000Z | nose/test_pearl.py | finn-dodgson/DeepHalos | 86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c | [
"MIT"
] | 1 | 2021-11-25T21:01:19.000Z | 2021-12-05T01:40:53.000Z | nose/test_pearl.py | finn-dodgson/DeepHalos | 86e0ac6c24ac97a0a2a0a60a7ea3721a04bd050c | [
"MIT"
] | 1 | 2021-11-27T02:35:10.000Z | 2021-11-27T02:35:10.000Z | import sys
sys.path.append("/home/luisals/DeepHalos")
from dlhalos_code import CNN
from dlhalos_code import custom_regularizers as reg
import dlhalos_code.data_processing as tn
from dlhalos_code import evaluation as evalu
from pickle import dump, load
import numpy as np
import os
########### CREATE GENERATORS FOR TRAINING AND VALIDATION #########
# Create the generators for training
# path = "/lfstev/deepskies/luisals/regression/test_pearl/"
# path_sims = "/lfstev/deepskies/luisals/"
path = "/mnt/beegfs/work/ati/pearl037/regression/test_pearl/"
path_sims = "/mnt/beegfs/work/ati/pearl037/"
all_sims = ["0", "1", "2", "4", "5", "6"]
s = tn.SimulationPreparation(all_sims, path=path_sims)
val_sim = all_sims[-1]
for sim in all_sims:
train_sims = [sim]
try:
os.mkdir(path + 'sim_' + sim)
except:
pass
training_set = tn.InputsPreparation(train_sims, shuffle=False, scaler_type="minmax", return_rescaled_outputs=True,
output_range=(-1, 1), load_ids=False,
random_style="random", random_subset_all=100,
random_subset_each_sim=None,
# random_style="uniform", random_subset_each_sim=1000000, num_per_mass_bin=10000,
path=path_sims)
dim = (31, 31, 31)
params_tr = {'batch_size': 100, 'rescale_mean': 1.005, 'rescale_std': 0.05050, 'dim': dim}
generator_training = tn.DataGenerator(training_set.particle_IDs, training_set.labels_particle_IDS, s.sims_dic,
shuffle=False, **params_tr)
dump(training_set.particle_IDs, open(path + 'sim_' + sim + '/particle_ids.pkl', 'wb'))
dump(training_set.labels_particle_IDS, open(path + 'sim_' + sim + '/labels_particle_ids.pkl', 'wb'))
dump(generator_training[0], open(path + 'sim_' + sim + '/generator_trainings.pkl', 'wb'))
# sims = ["training_simulation", "reseed1_simulation", "reseed2_simulation", "reseed4_simulation",
# "reseed5_simulation", "reseed6_simulation"]
# sims_num = ["0", "1", "2", "4", "5", "6"]
# path_sims = "/Users/lls/Desktop/data_transfer/"
# path = "/Users/lls/Desktop/PEARL/"
#
# for i, sim in enumerate(sims_num):
# s = tn.SimulationPreparation([sim], path=path_sims)
#
# p_ids = load(open(path + 'sim_' + sim + '/particle_ids.pkl', 'rb'))
# l_ids = load(open(path + 'sim_' + sim + '/labels_particle_ids.pkl', 'rb'))
# gen = load(open(path + 'sim_' + sim + '/generator_trainings.pkl', 'rb'))
#
# params_tr = {'batch_size': 100, 'rescale_mean': 1.005, 'rescale_std': 0.05050, 'dim': (31, 31, 31)}
# generator_training = tn.DataGenerator(p_ids, l_ids, s.sims_dic,
# shuffle=False, **params_tr)
# np.allclose(gen[0], generator_training[0][0])
# np.allclose(gen[1], generator_training[0][1])
# del s
# params_val = {'batch_size': 100, 'rescale_mean': 1.005, 'rescale_std': 0.05050, 'dim': dim}
# generator_validation = tn.DataGenerator(v_set.particle_IDs, v_set.labels_particle_IDS, s.sims_dic,
# shuffle=True, **params_val)
#
# ######### TRAIN THE MODEL ################
#
# log_alpha = -3.5
# alpha = 10 ** log_alpha
#
# # Convolutional layers parameters
#
# params_all_conv = {'activation': "linear", 'relu': True, 'strides': 1, 'padding': 'same', 'bn': False,
# 'kernel_regularizer': reg.l2_norm(alpha)}
# param_conv = {'conv_1': {'num_kernels': 2, 'dim_kernel': (3, 3, 3), 'pool': None, **params_all_conv},
# 'conv_2': {'num_kernels': 2, 'dim_kernel': (3, 3, 3), 'pool': "max", **params_all_conv},
# 'conv_3': {'num_kernels': 2, 'dim_kernel': (3, 3, 3), 'pool': "max", **params_all_conv},
# 'conv_4': {'num_kernels': 2, 'dim_kernel': (3, 3, 3), 'pool': "max", **params_all_conv},
# 'conv_5': {'num_kernels': 2, 'dim_kernel': (3, 3, 3), 'pool': "max", **params_all_conv}
# }
#
# # Dense layers parameters
#
# params_all_fcc = {'bn': False, 'activation': "linear", 'relu': True,
# 'kernel_regularizer': reg.l1_and_l21_group(alpha)}
# param_fcc = {'dense_1': {'neurons': 256, **params_all_fcc}, 'dense_2': {'neurons': 128, **params_all_fcc},
# 'last': {}}
#
# # Regularization parameters + Cauchy likelihood
#
# reg_params = {'init_gamma': 0.2}
#
# Model = CNN.CNNCauchy(param_conv, param_fcc, model_type="regression", dim=generator_training.dim,
# training_generator=generator_training, validation_generator=generator_validation,
# num_epochs=10, validation_freq=1, lr=0.0001, max_queue_size=10,
# use_multiprocessing=False,
# workers=0, verbose=1, num_gpu=1, save_summary=True, path_summary=path,
# compile=True, train=True, load_weights=None,
# load_mse_weights=False, use_mse_n_epoch=1, use_tanh_n_epoch=0,
# **reg_params)
| 48.028302 | 121 | 0.607739 | import sys
sys.path.append("/home/luisals/DeepHalos")
from dlhalos_code import CNN
from dlhalos_code import custom_regularizers as reg
import dlhalos_code.data_processing as tn
from dlhalos_code import evaluation as evalu
from pickle import dump, load
import numpy as np
import os
random_subset_each_sim=None,
path=path_sims)
dim = (31, 31, 31)
params_tr = {'batch_size': 100, 'rescale_mean': 1.005, 'rescale_std': 0.05050, 'dim': dim}
generator_training = tn.DataGenerator(training_set.particle_IDs, training_set.labels_particle_IDS, s.sims_dic,
shuffle=False, **params_tr)
dump(training_set.particle_IDs, open(path + 'sim_' + sim + '/particle_ids.pkl', 'wb'))
dump(training_set.labels_particle_IDS, open(path + 'sim_' + sim + '/labels_particle_ids.pkl', 'wb'))
dump(generator_training[0], open(path + 'sim_' + sim + '/generator_trainings.pkl', 'wb'))
| true | true |
1c2c64d2b856540aff814af4368f6379ad600b93 | 1,598 | py | Python | Numpy XOR/Tensor_ImageRecognition.py | chaseaucoin/Neural-Primer | d5163af8cd74ea5cea620069dffca8c124bffc05 | [
"MIT"
] | 1 | 2017-08-07T04:45:53.000Z | 2017-08-07T04:45:53.000Z | Numpy XOR/Tensor_ImageRecognition.py | chaseaucoin/Neural-Primer | d5163af8cd74ea5cea620069dffca8c124bffc05 | [
"MIT"
] | null | null | null | Numpy XOR/Tensor_ImageRecognition.py | chaseaucoin/Neural-Primer | d5163af8cd74ea5cea620069dffca8c124bffc05 | [
"MIT"
] | null | null | null | from tensorflow.examples.tutorials.mnist import input_data
#Get The MNIST Data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
# Create the model
inputs = tf.placeholder(tf.float32, [None, 784])
weights = tf.Variable(tf.zeros([784, 10]))
biases = tf.Variable(tf.zeros([10]))
predictions = tf.matmul(inputs, weights) + biases
# Define loss and optimizer
expectedPredictions = tf.placeholder(tf.float32, [None, 10])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
meanError = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=expectedPredictions, logits=predictions))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(meanError)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={inputs: batch_xs, expectedPredictions: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(expectedPredictions, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={inputs: mnist.test.images,
expectedPredictions: mnist.test.labels})) | 37.162791 | 96 | 0.706508 | from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
inputs = tf.placeholder(tf.float32, [None, 784])
weights = tf.Variable(tf.zeros([784, 10]))
biases = tf.Variable(tf.zeros([10]))
predictions = tf.matmul(inputs, weights) + biases
expectedPredictions = tf.placeholder(tf.float32, [None, 10])
meanError = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=expectedPredictions, logits=predictions))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(meanError)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={inputs: batch_xs, expectedPredictions: batch_ys})
correct_prediction = tf.equal(tf.argmax(predictions, 1), tf.argmax(expectedPredictions, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={inputs: mnist.test.images,
expectedPredictions: mnist.test.labels})) | true | true |
1c2c652f3bebe66d5eab4b30dfd527aeb1c21307 | 252 | py | Python | contest/abc084/B.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | contest/abc084/B.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | contest/abc084/B.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | a, b = map(int, input().split())
s = input()
if len(s) == a + b + 1 and s[a] == '-':
for i in range(len(s)):
if i != a and not ('0' <= s[i] and s[i] <= '9'):
print('No')
exit()
print('Yes')
else:
print('No')
| 22.909091 | 56 | 0.404762 | a, b = map(int, input().split())
s = input()
if len(s) == a + b + 1 and s[a] == '-':
for i in range(len(s)):
if i != a and not ('0' <= s[i] and s[i] <= '9'):
print('No')
exit()
print('Yes')
else:
print('No')
| true | true |
1c2c659cd569322139014dd69e52efc1ae242aa0 | 3,339 | py | Python | test/test_simple/test_multiswitch.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | test/test_simple/test_multiswitch.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | test/test_simple/test_multiswitch.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Jens Krüger <jens.krueger@frm2.tum.de>
#
# *****************************************************************************
"""NICOS generic devices test suite."""
import mock
from nicos.core import ConfigurationError, InvalidValueError, LimitError, \
NicosError, PositionError, status
from test.utils import raises
session_setup = 'multiswitch'
def test_multi_switcher(session):
sc1 = session.getDevice('sc1')
x = session.getDevice('x')
y = session.getDevice('y')
sc1.maw('1')
assert sc1.read(0) == '1'
assert raises(NicosError, sc1.doStart, '123')
assert raises(InvalidValueError, sc1.maw, '23')
assert raises(LimitError, sc1.start, 'outside')
sc1.move('2')
assert sc1.read() in ['2']
assert abs(x.read() - 535.5) < 0.05
x.curstatus = (status.BUSY, 'moving')
sc1.stop()
assert x.status(0)[0] == status.OK
y.curvalue = 0
assert raises(PositionError, sc1.read, 0)
assert sc1.status(0)[0] == status.NOTREACHED
sc2 = session.getDevice('sc2')
sc2.maw('1')
assert sc2.read(0) == '1'
assert sc2.status(0)[0] == status.OK
sc2.move('3')
# case 1: motor in position, but still busy
y.curstatus = (status.BUSY, 'busy')
assert sc2.status(0)[0] != status.OK
# case 2: motor idle, but wronmg position
y.curstatus = (status.OK, 'on target')
y.curvalue = 22.0
assert sc2.status(0)[0] == status.NOTREACHED
y.curvalue = 28.0
assert sc2.status(0)[0] == status.OK
assert raises(InvalidValueError, sc2.maw, '23')
assert raises(LimitError, sc2.start, 'outside')
with mock.patch('nicos.devices.generic.virtual.VirtualMotor.doReset',
create=True) as m:
sc2.reset()
assert m.call_count == 2 # once for x, once for y
def test_multi_switcher_fallback(session):
mswfb = session.getDevice('mswfb')
x = session.getDevice('x')
x.maw(0)
assert mswfb.read(0) == 'unknown'
def test_multi_switcher_fails(session, log):
assert raises(ConfigurationError, session.getDevice, 'msw3')
assert raises(ConfigurationError, session.getDevice, 'msw4')
msw5 = session.getDevice('msw5')
msw5.move('1')
# msw5 has a precision of None for motor 'y', but that motor has
# a jitter set so that it will never be exactly at 0
with log.allow_errors():
assert raises(PositionError, msw5.wait)
| 33.727273 | 79 | 0.648997 |
import mock
from nicos.core import ConfigurationError, InvalidValueError, LimitError, \
NicosError, PositionError, status
from test.utils import raises
session_setup = 'multiswitch'
def test_multi_switcher(session):
sc1 = session.getDevice('sc1')
x = session.getDevice('x')
y = session.getDevice('y')
sc1.maw('1')
assert sc1.read(0) == '1'
assert raises(NicosError, sc1.doStart, '123')
assert raises(InvalidValueError, sc1.maw, '23')
assert raises(LimitError, sc1.start, 'outside')
sc1.move('2')
assert sc1.read() in ['2']
assert abs(x.read() - 535.5) < 0.05
x.curstatus = (status.BUSY, 'moving')
sc1.stop()
assert x.status(0)[0] == status.OK
y.curvalue = 0
assert raises(PositionError, sc1.read, 0)
assert sc1.status(0)[0] == status.NOTREACHED
sc2 = session.getDevice('sc2')
sc2.maw('1')
assert sc2.read(0) == '1'
assert sc2.status(0)[0] == status.OK
sc2.move('3')
y.curstatus = (status.BUSY, 'busy')
assert sc2.status(0)[0] != status.OK
y.curstatus = (status.OK, 'on target')
y.curvalue = 22.0
assert sc2.status(0)[0] == status.NOTREACHED
y.curvalue = 28.0
assert sc2.status(0)[0] == status.OK
assert raises(InvalidValueError, sc2.maw, '23')
assert raises(LimitError, sc2.start, 'outside')
with mock.patch('nicos.devices.generic.virtual.VirtualMotor.doReset',
create=True) as m:
sc2.reset()
assert m.call_count == 2
def test_multi_switcher_fallback(session):
mswfb = session.getDevice('mswfb')
x = session.getDevice('x')
x.maw(0)
assert mswfb.read(0) == 'unknown'
def test_multi_switcher_fails(session, log):
assert raises(ConfigurationError, session.getDevice, 'msw3')
assert raises(ConfigurationError, session.getDevice, 'msw4')
msw5 = session.getDevice('msw5')
msw5.move('1')
with log.allow_errors():
assert raises(PositionError, msw5.wait)
| true | true |
1c2c6644942a06b5e2cb6058c9c5629cf6809176 | 2,524 | py | Python | pkgs/grep_p2/grep_p2.py | Morphux/installer | a26419838fcfc156a93a42c1516157e2aae54d9c | [
"Apache-2.0"
] | null | null | null | pkgs/grep_p2/grep_p2.py | Morphux/installer | a26419838fcfc156a93a42c1516157e2aae54d9c | [
"Apache-2.0"
] | null | null | null | pkgs/grep_p2/grep_p2.py | Morphux/installer | a26419838fcfc156a93a42c1516157e2aae54d9c | [
"Apache-2.0"
] | null | null | null | ################################### LICENSE ####################################
# Copyright 2016 Morphux #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
##
# grep_p2.py
# Created: 21/12/2016
# By: Louis Solofrizzo <louis@morphux.org>
##
import os
class Grep_P2:
conf_lst = {}
e = False
root_dir = ""
def init(self, c_lst, ex, root_dir):
self.conf_lst = c_lst
self.e = ex
self.root_dir = root_dir
self.config = {
"name": "grep", # Name of the package
"version": "2.25", # Version of the package
"size": 18, # Size of the installed package (MB)
"archive": "grep-2.25.tar.xz", # Archive name
"SBU": 0.4, # SBU (Compilation time)
"tmp_install": False, # Is this package part of the temporary install
"next": "readline", # Next package to install
"before": False,
"after": False,
"urls": [ # Url to download the package. The first one must be morphux servers
"https://install.morphux.org/packages/grep-2.25.tar.xz"
]
}
return self.config
def configure(self):
return self.e(["./configure",
"--prefix=/usr",
"--bindir=/bin"
])
def make(self):
return self.e(["make", "-j", self.conf_lst["cpus"]])
def install(self):
return self.e(["make", "install"])
| 40.709677 | 90 | 0.442155 | true | true | |
1c2c664cd146ce55db4b181d0f81c3dcacfa2a68 | 26,955 | py | Python | flaml/model.py | dan0nchik/FLAML | 9d661759b49de6e403d9288af7a015606528fe7e | [
"Apache-2.0"
] | null | null | null | flaml/model.py | dan0nchik/FLAML | 9d661759b49de6e403d9288af7a015606528fe7e | [
"Apache-2.0"
] | null | null | null | flaml/model.py | dan0nchik/FLAML | 9d661759b49de6e403d9288af7a015606528fe7e | [
"Apache-2.0"
] | null | null | null | '''!
* Copyright (c) 2020-2021 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License.
'''
import numpy as np
import xgboost as xgb
import time
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier, LGBMRegressor
from scipy.sparse import issparse
import pandas as pd
from . import tune
import logging
logger = logging.getLogger(__name__)
class BaseEstimator:
'''The abstract class for all learners
Typical example:
XGBoostEstimator: for regression
XGBoostSklearnEstimator: for classification
LGBMEstimator, RandomForestEstimator, LRL1Classifier, LRL2Classifier:
for both regression and classification
'''
def __init__(self, task = 'binary:logistic', **params):
'''Constructor
Args:
task: A string of the task type, one of
'binary:logistic', 'multi:softmax', 'regression'
n_jobs: An integer of the number of parallel threads
params: A dictionary of the hyperparameter names and values
'''
self.params = params
self.estimator_class = self._model = None
self._task = task
if '_estimator_type' in params:
self._estimator_type = params['_estimator_type']
else:
self._estimator_type = "regressor" if task=='regression' \
else "classifier"
def get_params(self, deep=False):
params = self.params.copy()
params["task"] = self._task
if hasattr(self, '_estimator_type'):
params['_estimator_type'] = self._estimator_type
return params
@property
def classes_(self):
return self._model.classes_
@property
def n_features_in_(self):
return self.model.n_features_in_
@property
def model(self):
'''Trained model after fit() is called, or None before fit() is called
'''
return self._model
def _preprocess(self, X):
return X
def _fit(self, X_train, y_train, **kwargs):
curent_time = time.time()
X_train = self._preprocess(X_train)
model = self.estimator_class(**self.params)
model.fit(X_train, y_train, **kwargs)
train_time = time.time() - curent_time
self._model = model
return train_time
def fit(self, X_train, y_train, budget=None, **kwargs):
'''Train the model from given training data
Args:
X_train: A numpy array of training data in shape n*m
y_train: A numpy array of labels in shape n*1
budget: A float of the time budget in seconds
Returns:
train_time: A float of the training time in seconds
'''
return self._fit(X_train, y_train, **kwargs)
def predict(self, X_test):
'''Predict label from features
Args:
X_test: A numpy array of featurized instances, shape n*m
Returns:
A numpy array of shape n*1.
Each element is the label for a instance
'''
X_test = self._preprocess(X_test)
return self._model.predict(X_test)
def predict_proba(self, X_test):
'''Predict the probability of each class from features
Only works for classification problems
Args:
model: An object of trained model with method predict_proba()
X_test: A numpy array of featurized instances, shape n*m
Returns:
A numpy array of shape n*c. c is the # classes
Each element at (i,j) is the probability for instance i to be in
class j
'''
if 'regression' in self._task:
print('Regression tasks do not support predict_prob')
raise ValueError
else:
X_test = self._preprocess(X_test)
return self._model.predict_proba(X_test)
def cleanup(self): pass
@classmethod
def search_space(cls, **params):
'''[required method] search space
Returns:
A dictionary of the search space.
Each key is the name of a hyperparameter, and value is a dict with
its domain and init_value (optional), cat_hp_cost (optional)
e.g.,
{'domain': tune.randint(lower=1, upper=10), 'init_value': 1}
'''
return {}
@classmethod
def size(cls, config):
'''[optional method] memory size of the estimator in bytes
Args:
config - the dict of the hyperparameter config
Returns:
A float of the memory size required by the estimator to train the
given config
'''
return 1.0
@classmethod
def cost_relative2lgbm(cls):
'''[optional method] relative cost compared to lightgbm'''
return 1.0
class SKLearnEstimator(BaseEstimator):
def _preprocess(self, X):
if isinstance(X, pd.DataFrame):
X = X.copy()
cat_columns = X.select_dtypes(include=['category']).columns
X[cat_columns] = X[cat_columns].apply(lambda x: x.cat.codes)
return X
class LGBMEstimator(BaseEstimator):
@classmethod
def search_space(cls, data_size, **params):
upper = min(32768,int(data_size))
return {
'n_estimators': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
'max_leaves': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
'min_child_weight': {
'domain': tune.loguniform(lower=0.001, upper=20.0),
'init_value': 20.0,
},
'learning_rate': {
'domain': tune.loguniform(lower=0.01, upper=1.0),
'init_value': 0.1,
},
'subsample': {
'domain': tune.uniform(lower=0.6, upper=1.0),
'init_value': 1.0,
},
'log_max_bin': {
'domain': tune.qloguniform(lower=3, upper=10, q=1),
'init_value': 8,
},
'colsample_bytree': {
'domain': tune.uniform(lower=0.7, upper=1.0),
'init_value': 1.0,
},
'reg_alpha': {
'domain': tune.loguniform(lower=1e-10, upper=1.0),
'init_value': 1e-10,
},
'reg_lambda': {
'domain': tune.loguniform(lower=1e-10, upper=1.0),
'init_value': 1.0,
},
}
@classmethod
def size(cls, config):
max_leaves = int(round(config['max_leaves']))
n_estimators = int(round(config['n_estimators']))
return (max_leaves*3 + (max_leaves-1)*4 + 1.0)*n_estimators*8
def __init__(self, task='binary:logistic', n_jobs=1,
n_estimators=2, max_leaves=2, min_child_weight=1e-3, learning_rate=0.1,
subsample=1.0, reg_lambda=1.0, reg_alpha=0.0, colsample_bylevel=1.0,
colsample_bytree=1.0, log_max_bin=8, **params):
super().__init__(task, **params)
# Default: ‘regression’ for LGBMRegressor,
# ‘binary’ or ‘multiclass’ for LGBMClassifier
if 'regression' in task:
objective = 'regression'
elif 'binary' in task:
objective = 'binary'
elif 'multi' in task:
objective = 'multiclass'
else: objective = 'regression'
self.params = {
"n_estimators": int(round(n_estimators)),
"num_leaves": params[
'num_leaves'] if 'num_leaves' in params else int(
round(max_leaves)),
'objective': params[
"objective"] if "objective" in params else objective,
'n_jobs': n_jobs,
'learning_rate': float(learning_rate),
'reg_alpha': float(reg_alpha),
'reg_lambda': float(reg_lambda),
'min_child_weight': float(min_child_weight),
'colsample_bytree':float(colsample_bytree),
'subsample': float(subsample),
}
self.params['max_bin'] = params['max_bin'] if 'max_bin' in params else (
1<<int(round(log_max_bin)))-1
if 'regression' in task:
self.estimator_class = LGBMRegressor
else:
self.estimator_class = LGBMClassifier
self._time_per_iter = None
self._train_size = 0
def _preprocess(self, X):
if not isinstance(X, pd.DataFrame) and issparse(
X) and np.issubdtype(X.dtype, np.integer):
X = X.astype(float)
return X
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
n_iter = self.params["n_estimators"]
if (not self._time_per_iter or
abs(self._train_size-X_train.shape[0])>4) and budget is not None:
self.params["n_estimators"] = 1
self._t1 = self._fit(X_train, y_train, **kwargs)
if self._t1 >= budget:
self.params["n_estimators"] = n_iter
return self._t1
self.params["n_estimators"] = 4
self._t2 = self._fit(X_train, y_train, **kwargs)
self._time_per_iter = (self._t2 - self._t1)/(
self.params["n_estimators"]-1) if self._t2 > self._t1 \
else self._t1 if self._t1 else 0.001
self._train_size = X_train.shape[0]
if self._t1+self._t2>=budget or n_iter==self.params["n_estimators"]:
self.params["n_estimators"] = n_iter
return time.time() - start_time
if budget is not None:
self.params["n_estimators"] = min(n_iter, int((budget-time.time()+
start_time-self._t1)/self._time_per_iter+1))
if self.params["n_estimators"] > 0:
self._fit(X_train, y_train, **kwargs)
self.params["n_estimators"] = n_iter
train_time = time.time() - start_time
return train_time
class XGBoostEstimator(SKLearnEstimator):
''' not using sklearn API, used for regression '''
@classmethod
def search_space(cls, data_size, **params):
upper = min(32768,int(data_size))
return {
'n_estimators': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
'max_leaves': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
'min_child_weight': {
'domain': tune.loguniform(lower=0.001, upper=20.0),
'init_value': 20.0,
},
'learning_rate': {
'domain': tune.loguniform(lower=0.01, upper=1.0),
'init_value': 0.1,
},
'subsample': {
'domain': tune.uniform(lower=0.6, upper=1.0),
'init_value': 1.0,
},
'colsample_bylevel': {
'domain': tune.uniform(lower=0.6, upper=1.0),
'init_value': 1.0,
},
'colsample_bytree': {
'domain': tune.uniform(lower=0.7, upper=1.0),
'init_value': 1.0,
},
'reg_alpha': {
'domain': tune.loguniform(lower=1e-10, upper=1.0),
'init_value': 1e-10,
},
'reg_lambda': {
'domain': tune.loguniform(lower=1e-10, upper=1.0),
'init_value': 1.0,
},
}
@classmethod
def size(cls, config):
return LGBMEstimator.size(config)
@classmethod
def cost_relative2lgbm(cls):
return 1.6
def __init__(self, task='regression', all_thread=False, n_jobs=1,
n_estimators=4, max_leaves=4, subsample=1.0, min_child_weight=1,
learning_rate=0.1, reg_lambda=1.0, reg_alpha=0.0, colsample_bylevel=1.0,
colsample_bytree=1.0, tree_method='auto', **params):
super().__init__(task, **params)
self._n_estimators = int(round(n_estimators))
self._max_leaves = int(round(max_leaves))
self.params = {
'max_leaves': int(round(max_leaves)),
'max_depth': 0,
'grow_policy': params[
"grow_policy"] if "grow_policy" in params else 'lossguide',
'tree_method':tree_method,
'verbosity': 0,
'nthread':n_jobs,
'learning_rate': float(learning_rate),
'subsample': float(subsample),
'reg_alpha': float(reg_alpha),
'reg_lambda': float(reg_lambda),
'min_child_weight': float(min_child_weight),
'booster': params['booster'] if 'booster' in params else 'gbtree',
'colsample_bylevel': float(colsample_bylevel),
'colsample_bytree':float(colsample_bytree),
}
if all_thread:
del self.params['nthread']
def get_params(self, deep=False):
params = super().get_params()
params["n_jobs"] = params['nthread']
return params
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
if not issparse(X_train):
self.params['tree_method'] = 'hist'
X_train = self._preprocess(X_train)
dtrain = xgb.DMatrix(X_train, label=y_train)
if self._max_leaves>0:
if 'sample_weight' in kwargs:
self._model = xgb.train(self.params, dtrain,
self._n_estimators, weight=kwargs['sample_weight'])
else:
self._model = xgb.train(self.params, dtrain, self._n_estimators)
del dtrain
train_time = time.time() - start_time
return train_time
else:
return None
def predict(self, X_test):
if not issparse(X_test):
X_test = self._preprocess(X_test)
dtest = xgb.DMatrix(X_test)
return super().predict(dtest)
class XGBoostSklearnEstimator(SKLearnEstimator, LGBMEstimator):
''' using sklearn API, used for classification '''
@classmethod
def search_space(cls, data_size, **params):
return XGBoostEstimator.search_space(data_size)
@classmethod
def cost_relative2lgbm(cls):
return XGBoostEstimator.cost_relative2lgbm()
def __init__(self, task='binary:logistic', n_jobs=1,
n_estimators=4, max_leaves=4, subsample=1.0,
min_child_weight=1, learning_rate=0.1, reg_lambda=1.0, reg_alpha=0.0,
colsample_bylevel=1.0, colsample_bytree=1.0, tree_method='hist',
**params):
super().__init__(task, **params)
self.params = {
"n_estimators": int(round(n_estimators)),
'max_leaves': int(round(max_leaves)),
'max_depth': 0,
'grow_policy': params[
"grow_policy"] if "grow_policy" in params else 'lossguide',
'tree_method':tree_method,
'verbosity': 0,
'n_jobs': n_jobs,
'learning_rate': float(learning_rate),
'subsample': float(subsample),
'reg_alpha': float(reg_alpha),
'reg_lambda': float(reg_lambda),
'min_child_weight': float(min_child_weight),
'booster': params['booster'] if 'booster' in params else 'gbtree',
'colsample_bylevel': float(colsample_bylevel),
'colsample_bytree': float(colsample_bytree),
}
if 'regression' in task:
self.estimator_class = xgb.XGBRegressor
else:
self.estimator_class = xgb.XGBClassifier
self._time_per_iter = None
self._train_size = 0
def fit(self, X_train, y_train, budget=None, **kwargs):
if issparse(X_train):
self.params['tree_method'] = 'auto'
return super().fit(X_train, y_train, budget, **kwargs)
class RandomForestEstimator(SKLearnEstimator, LGBMEstimator):
@classmethod
def search_space(cls, data_size, task, **params):
upper = min(2048, int(data_size))
space = {
'n_estimators': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
'max_features': {
'domain': tune.loguniform(lower=0.1, upper=1.0),
'init_value': 1.0,
},
}
if task != 'regression':
space['criterion'] = {
'domain': tune.choice(['gini', 'entropy']),
# 'init_value': 'gini',
}
return space
@classmethod
def size(cls, config):
return 1.0
@classmethod
def cost_relative2lgbm(cls):
return 2.0
def __init__(self, task = 'binary:logistic', n_jobs = 1,
n_estimators = 4, max_features = 1.0, criterion = 'gini', **params):
super().__init__(task, **params)
self.params = {
"n_estimators": int(round(n_estimators)),
"n_jobs": n_jobs,
'max_features': float(max_features),
}
if 'regression' in task:
self.estimator_class = RandomForestRegressor
else:
self.estimator_class = RandomForestClassifier
self.params['criterion'] = criterion
self._time_per_iter = None
self._train_size = 0
def get_params(self, deep=False):
params = super().get_params()
params["criterion"] = 1 if params["criterion"]=='gini' else 2
return params
class ExtraTreeEstimator(RandomForestEstimator):
@classmethod
def cost_relative2lgbm(cls):
return 1.9
def __init__(self, task = 'binary:logistic', **params):
super().__init__(task, **params)
if 'regression' in task:
self.estimator_class = ExtraTreesRegressor
else:
self.estimator_class = ExtraTreesClassifier
class LRL1Classifier(SKLearnEstimator):
@classmethod
def search_space(cls, **params):
return {
'C': {
'domain': tune.loguniform(lower=0.03125, upper=32768.0),
'init_value': 1.0,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 160
def __init__(self, task='binary:logistic', n_jobs=1, tol=0.0001, C=1.0,
**params):
super().__init__(task, **params)
self.params = {
'penalty': 'l1',
'tol': float(tol),
'C': float(C),
'solver': 'saga',
'n_jobs': n_jobs,
}
if 'regression' in task:
self.estimator_class = None
print('LR does not support regression task')
raise NotImplementedError
else:
self.estimator_class = LogisticRegression
class LRL2Classifier(SKLearnEstimator):
@classmethod
def search_space(cls, **params):
return LRL1Classifier.search_space(**params)
@classmethod
def cost_relative2lgbm(cls):
return 25
def __init__(self, task='binary:logistic', n_jobs=1, tol=0.0001, C=1.0,
**params):
super().__init__(task, **params)
self.params = {
'penalty': 'l2',
'tol': float(tol),
'C': float(C),
'solver': 'lbfgs',
'n_jobs': n_jobs,
}
if 'regression' in task:
self.estimator_class = None
print('LR does not support regression task')
raise NotImplementedError
else:
self.estimator_class = LogisticRegression
class CatBoostEstimator(BaseEstimator):
_time_per_iter = None
_train_size = 0
@classmethod
def search_space(cls, data_size, **params):
upper = max(min(round(1500000/data_size),150), 11)
return {
'early_stopping_rounds': {
'domain': tune.qloguniform(lower=10, upper=upper, q=1),
'init_value': 10,
},
'learning_rate': {
'domain': tune.loguniform(lower=.005, upper=.2),
'init_value': 0.1,
},
}
@classmethod
def size(cls, config):
n_estimators = 8192
max_leaves = 64
return (max_leaves*3 + (max_leaves-1)*4 + 1.0)*n_estimators*8
@classmethod
def cost_relative2lgbm(cls):
return 15
def __init__(self, task = 'binary:logistic', n_jobs=1,
n_estimators=8192, learning_rate=0.1, early_stopping_rounds=4, **params):
super().__init__(task, **params)
self.params = {
"early_stopping_rounds": int(round(early_stopping_rounds)),
"n_estimators": n_estimators,
'learning_rate': learning_rate,
'thread_count': n_jobs,
'verbose': False,
'random_seed': params[
"random_seed"] if "random_seed" in params else 10242048,
}
if 'regression' in task:
from catboost import CatBoostRegressor
self.estimator_class = CatBoostRegressor
else:
from catboost import CatBoostClassifier
self.estimator_class = CatBoostClassifier
def get_params(self, deep=False):
params = super().get_params()
params['n_jobs'] = params['thread_count']
return params
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
n_iter = self.params["n_estimators"]
if isinstance(X_train, pd.DataFrame):
cat_features = list(X_train.select_dtypes(
include='category').columns)
else:
cat_features = []
if (not CatBoostEstimator._time_per_iter or
abs(CatBoostEstimator._train_size-len(y_train))>4) and budget:
# measure the time per iteration
self.params["n_estimators"] = 1
CatBoostEstimator._smallmodel = self.estimator_class(**self.params)
CatBoostEstimator._smallmodel.fit(X_train, y_train,
cat_features=cat_features, **kwargs)
CatBoostEstimator._t1 = time.time() - start_time
if CatBoostEstimator._t1 >= budget:
self.params["n_estimators"] = n_iter
self._model = CatBoostEstimator._smallmodel
return CatBoostEstimator._t1
self.params["n_estimators"] = 4
CatBoostEstimator._smallmodel = self.estimator_class(**self.params)
CatBoostEstimator._smallmodel.fit(X_train, y_train,
cat_features=cat_features, **kwargs)
CatBoostEstimator._time_per_iter = (time.time() - start_time -
CatBoostEstimator._t1)/(self.params["n_estimators"]-1)
if CatBoostEstimator._time_per_iter <= 0:
CatBoostEstimator._time_per_iter = CatBoostEstimator._t1
CatBoostEstimator._train_size = len(y_train)
if time.time()-start_time>=budget or n_iter==self.params[
"n_estimators"]:
self.params["n_estimators"] = n_iter
self._model = CatBoostEstimator._smallmodel
return time.time()-start_time
if budget:
train_times = 1
self.params["n_estimators"] = min(n_iter, int((budget-time.time()+
start_time-CatBoostEstimator._t1)/train_times/
CatBoostEstimator._time_per_iter+1))
self._model = CatBoostEstimator._smallmodel
if self.params["n_estimators"] > 0:
l = max(int(len(y_train)*0.9), len(y_train)-1000)
X_tr, y_tr = X_train[:l], y_train[:l]
if 'sample_weight' in kwargs:
weight = kwargs['sample_weight']
if weight is not None: kwargs['sample_weight'] = weight[:l]
else: weight = None
from catboost import Pool
model = self.estimator_class(**self.params)
model.fit(X_tr, y_tr, cat_features=cat_features, eval_set=Pool(
data=X_train[l:], label=y_train[l:], cat_features=cat_features),
**kwargs)
if weight is not None: kwargs['sample_weight'] = weight
# print(self.params["n_estimators"], model.get_best_iteration())
self._model = model
self.params["n_estimators"] = n_iter
train_time = time.time() - start_time
# print(budget, train_time)
return train_time
class KNeighborsEstimator(BaseEstimator):
@classmethod
def search_space(cls, data_size, **params):
upper = min(512, int(data_size/2))
return {
'n_neighbors': {
'domain': tune.qloguniform(lower=1, upper=upper, q=1),
'init_value': 5,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 30
def __init__(self, task='binary:logistic', n_jobs=1,
n_neighbors=5, **params):
super().__init__(task, **params)
self.params= {
'n_neighbors': int(round(n_neighbors)),
'weights': 'distance',
'n_jobs': n_jobs,
}
if 'regression' in task:
from sklearn.neighbors import KNeighborsRegressor
self.estimator_class = KNeighborsRegressor
else:
from sklearn.neighbors import KNeighborsClassifier
self.estimator_class = KNeighborsClassifier
def _preprocess(self, X):
if isinstance(X, pd.DataFrame):
cat_columns = X.select_dtypes(['category']).columns
# print(X.dtypes)
# print(cat_columns)
if X.shape[1] == len(cat_columns):
raise ValueError(
"kneighbor requires at least one numeric feature")
X = X.drop(cat_columns, axis=1)
return X
| 36.036096 | 81 | 0.551475 |
import numpy as np
import xgboost as xgb
import time
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import ExtraTreesRegressor, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
from lightgbm import LGBMClassifier, LGBMRegressor
from scipy.sparse import issparse
import pandas as pd
from . import tune
import logging
logger = logging.getLogger(__name__)
class BaseEstimator:
def __init__(self, task = 'binary:logistic', **params):
self.params = params
self.estimator_class = self._model = None
self._task = task
if '_estimator_type' in params:
self._estimator_type = params['_estimator_type']
else:
self._estimator_type = "regressor" if task=='regression' \
else "classifier"
def get_params(self, deep=False):
params = self.params.copy()
params["task"] = self._task
if hasattr(self, '_estimator_type'):
params['_estimator_type'] = self._estimator_type
return params
@property
def classes_(self):
return self._model.classes_
@property
def n_features_in_(self):
return self.model.n_features_in_
@property
def model(self):
return self._model
def _preprocess(self, X):
return X
def _fit(self, X_train, y_train, **kwargs):
curent_time = time.time()
X_train = self._preprocess(X_train)
model = self.estimator_class(**self.params)
model.fit(X_train, y_train, **kwargs)
train_time = time.time() - curent_time
self._model = model
return train_time
def fit(self, X_train, y_train, budget=None, **kwargs):
return self._fit(X_train, y_train, **kwargs)
def predict(self, X_test):
X_test = self._preprocess(X_test)
return self._model.predict(X_test)
def predict_proba(self, X_test):
if 'regression' in self._task:
print('Regression tasks do not support predict_prob')
raise ValueError
else:
X_test = self._preprocess(X_test)
return self._model.predict_proba(X_test)
def cleanup(self): pass
@classmethod
def search_space(cls, **params):
return {}
@classmethod
def size(cls, config):
return 1.0
@classmethod
def cost_relative2lgbm(cls):
return 1.0
class SKLearnEstimator(BaseEstimator):
def _preprocess(self, X):
if isinstance(X, pd.DataFrame):
X = X.copy()
cat_columns = X.select_dtypes(include=['category']).columns
X[cat_columns] = X[cat_columns].apply(lambda x: x.cat.codes)
return X
class LGBMEstimator(BaseEstimator):
@classmethod
def search_space(cls, data_size, **params):
upper = min(32768,int(data_size))
return {
'n_estimators': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
'max_leaves': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
'min_child_weight': {
'domain': tune.loguniform(lower=0.001, upper=20.0),
'init_value': 20.0,
},
'learning_rate': {
'domain': tune.loguniform(lower=0.01, upper=1.0),
'init_value': 0.1,
},
'subsample': {
'domain': tune.uniform(lower=0.6, upper=1.0),
'init_value': 1.0,
},
'log_max_bin': {
'domain': tune.qloguniform(lower=3, upper=10, q=1),
'init_value': 8,
},
'colsample_bytree': {
'domain': tune.uniform(lower=0.7, upper=1.0),
'init_value': 1.0,
},
'reg_alpha': {
'domain': tune.loguniform(lower=1e-10, upper=1.0),
'init_value': 1e-10,
},
'reg_lambda': {
'domain': tune.loguniform(lower=1e-10, upper=1.0),
'init_value': 1.0,
},
}
@classmethod
def size(cls, config):
max_leaves = int(round(config['max_leaves']))
n_estimators = int(round(config['n_estimators']))
return (max_leaves*3 + (max_leaves-1)*4 + 1.0)*n_estimators*8
def __init__(self, task='binary:logistic', n_jobs=1,
n_estimators=2, max_leaves=2, min_child_weight=1e-3, learning_rate=0.1,
subsample=1.0, reg_lambda=1.0, reg_alpha=0.0, colsample_bylevel=1.0,
colsample_bytree=1.0, log_max_bin=8, **params):
super().__init__(task, **params)
if 'regression' in task:
objective = 'regression'
elif 'binary' in task:
objective = 'binary'
elif 'multi' in task:
objective = 'multiclass'
else: objective = 'regression'
self.params = {
"n_estimators": int(round(n_estimators)),
"num_leaves": params[
'num_leaves'] if 'num_leaves' in params else int(
round(max_leaves)),
'objective': params[
"objective"] if "objective" in params else objective,
'n_jobs': n_jobs,
'learning_rate': float(learning_rate),
'reg_alpha': float(reg_alpha),
'reg_lambda': float(reg_lambda),
'min_child_weight': float(min_child_weight),
'colsample_bytree':float(colsample_bytree),
'subsample': float(subsample),
}
self.params['max_bin'] = params['max_bin'] if 'max_bin' in params else (
1<<int(round(log_max_bin)))-1
if 'regression' in task:
self.estimator_class = LGBMRegressor
else:
self.estimator_class = LGBMClassifier
self._time_per_iter = None
self._train_size = 0
def _preprocess(self, X):
if not isinstance(X, pd.DataFrame) and issparse(
X) and np.issubdtype(X.dtype, np.integer):
X = X.astype(float)
return X
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
n_iter = self.params["n_estimators"]
if (not self._time_per_iter or
abs(self._train_size-X_train.shape[0])>4) and budget is not None:
self.params["n_estimators"] = 1
self._t1 = self._fit(X_train, y_train, **kwargs)
if self._t1 >= budget:
self.params["n_estimators"] = n_iter
return self._t1
self.params["n_estimators"] = 4
self._t2 = self._fit(X_train, y_train, **kwargs)
self._time_per_iter = (self._t2 - self._t1)/(
self.params["n_estimators"]-1) if self._t2 > self._t1 \
else self._t1 if self._t1 else 0.001
self._train_size = X_train.shape[0]
if self._t1+self._t2>=budget or n_iter==self.params["n_estimators"]:
self.params["n_estimators"] = n_iter
return time.time() - start_time
if budget is not None:
self.params["n_estimators"] = min(n_iter, int((budget-time.time()+
start_time-self._t1)/self._time_per_iter+1))
if self.params["n_estimators"] > 0:
self._fit(X_train, y_train, **kwargs)
self.params["n_estimators"] = n_iter
train_time = time.time() - start_time
return train_time
class XGBoostEstimator(SKLearnEstimator):
@classmethod
def search_space(cls, data_size, **params):
upper = min(32768,int(data_size))
return {
'n_estimators': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
'max_leaves': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
'min_child_weight': {
'domain': tune.loguniform(lower=0.001, upper=20.0),
'init_value': 20.0,
},
'learning_rate': {
'domain': tune.loguniform(lower=0.01, upper=1.0),
'init_value': 0.1,
},
'subsample': {
'domain': tune.uniform(lower=0.6, upper=1.0),
'init_value': 1.0,
},
'colsample_bylevel': {
'domain': tune.uniform(lower=0.6, upper=1.0),
'init_value': 1.0,
},
'colsample_bytree': {
'domain': tune.uniform(lower=0.7, upper=1.0),
'init_value': 1.0,
},
'reg_alpha': {
'domain': tune.loguniform(lower=1e-10, upper=1.0),
'init_value': 1e-10,
},
'reg_lambda': {
'domain': tune.loguniform(lower=1e-10, upper=1.0),
'init_value': 1.0,
},
}
@classmethod
def size(cls, config):
return LGBMEstimator.size(config)
@classmethod
def cost_relative2lgbm(cls):
return 1.6
def __init__(self, task='regression', all_thread=False, n_jobs=1,
n_estimators=4, max_leaves=4, subsample=1.0, min_child_weight=1,
learning_rate=0.1, reg_lambda=1.0, reg_alpha=0.0, colsample_bylevel=1.0,
colsample_bytree=1.0, tree_method='auto', **params):
super().__init__(task, **params)
self._n_estimators = int(round(n_estimators))
self._max_leaves = int(round(max_leaves))
self.params = {
'max_leaves': int(round(max_leaves)),
'max_depth': 0,
'grow_policy': params[
"grow_policy"] if "grow_policy" in params else 'lossguide',
'tree_method':tree_method,
'verbosity': 0,
'nthread':n_jobs,
'learning_rate': float(learning_rate),
'subsample': float(subsample),
'reg_alpha': float(reg_alpha),
'reg_lambda': float(reg_lambda),
'min_child_weight': float(min_child_weight),
'booster': params['booster'] if 'booster' in params else 'gbtree',
'colsample_bylevel': float(colsample_bylevel),
'colsample_bytree':float(colsample_bytree),
}
if all_thread:
del self.params['nthread']
def get_params(self, deep=False):
params = super().get_params()
params["n_jobs"] = params['nthread']
return params
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
if not issparse(X_train):
self.params['tree_method'] = 'hist'
X_train = self._preprocess(X_train)
dtrain = xgb.DMatrix(X_train, label=y_train)
if self._max_leaves>0:
if 'sample_weight' in kwargs:
self._model = xgb.train(self.params, dtrain,
self._n_estimators, weight=kwargs['sample_weight'])
else:
self._model = xgb.train(self.params, dtrain, self._n_estimators)
del dtrain
train_time = time.time() - start_time
return train_time
else:
return None
def predict(self, X_test):
if not issparse(X_test):
X_test = self._preprocess(X_test)
dtest = xgb.DMatrix(X_test)
return super().predict(dtest)
class XGBoostSklearnEstimator(SKLearnEstimator, LGBMEstimator):
@classmethod
def search_space(cls, data_size, **params):
return XGBoostEstimator.search_space(data_size)
@classmethod
def cost_relative2lgbm(cls):
return XGBoostEstimator.cost_relative2lgbm()
def __init__(self, task='binary:logistic', n_jobs=1,
n_estimators=4, max_leaves=4, subsample=1.0,
min_child_weight=1, learning_rate=0.1, reg_lambda=1.0, reg_alpha=0.0,
colsample_bylevel=1.0, colsample_bytree=1.0, tree_method='hist',
**params):
super().__init__(task, **params)
self.params = {
"n_estimators": int(round(n_estimators)),
'max_leaves': int(round(max_leaves)),
'max_depth': 0,
'grow_policy': params[
"grow_policy"] if "grow_policy" in params else 'lossguide',
'tree_method':tree_method,
'verbosity': 0,
'n_jobs': n_jobs,
'learning_rate': float(learning_rate),
'subsample': float(subsample),
'reg_alpha': float(reg_alpha),
'reg_lambda': float(reg_lambda),
'min_child_weight': float(min_child_weight),
'booster': params['booster'] if 'booster' in params else 'gbtree',
'colsample_bylevel': float(colsample_bylevel),
'colsample_bytree': float(colsample_bytree),
}
if 'regression' in task:
self.estimator_class = xgb.XGBRegressor
else:
self.estimator_class = xgb.XGBClassifier
self._time_per_iter = None
self._train_size = 0
def fit(self, X_train, y_train, budget=None, **kwargs):
if issparse(X_train):
self.params['tree_method'] = 'auto'
return super().fit(X_train, y_train, budget, **kwargs)
class RandomForestEstimator(SKLearnEstimator, LGBMEstimator):
@classmethod
def search_space(cls, data_size, task, **params):
upper = min(2048, int(data_size))
space = {
'n_estimators': {
'domain': tune.qloguniform(lower=4, upper=upper, q=1),
'init_value': 4,
},
'max_features': {
'domain': tune.loguniform(lower=0.1, upper=1.0),
'init_value': 1.0,
},
}
if task != 'regression':
space['criterion'] = {
'domain': tune.choice(['gini', 'entropy']),
}
return space
@classmethod
def size(cls, config):
return 1.0
@classmethod
def cost_relative2lgbm(cls):
return 2.0
def __init__(self, task = 'binary:logistic', n_jobs = 1,
n_estimators = 4, max_features = 1.0, criterion = 'gini', **params):
super().__init__(task, **params)
self.params = {
"n_estimators": int(round(n_estimators)),
"n_jobs": n_jobs,
'max_features': float(max_features),
}
if 'regression' in task:
self.estimator_class = RandomForestRegressor
else:
self.estimator_class = RandomForestClassifier
self.params['criterion'] = criterion
self._time_per_iter = None
self._train_size = 0
def get_params(self, deep=False):
params = super().get_params()
params["criterion"] = 1 if params["criterion"]=='gini' else 2
return params
class ExtraTreeEstimator(RandomForestEstimator):
@classmethod
def cost_relative2lgbm(cls):
return 1.9
def __init__(self, task = 'binary:logistic', **params):
super().__init__(task, **params)
if 'regression' in task:
self.estimator_class = ExtraTreesRegressor
else:
self.estimator_class = ExtraTreesClassifier
class LRL1Classifier(SKLearnEstimator):
@classmethod
def search_space(cls, **params):
return {
'C': {
'domain': tune.loguniform(lower=0.03125, upper=32768.0),
'init_value': 1.0,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 160
def __init__(self, task='binary:logistic', n_jobs=1, tol=0.0001, C=1.0,
**params):
super().__init__(task, **params)
self.params = {
'penalty': 'l1',
'tol': float(tol),
'C': float(C),
'solver': 'saga',
'n_jobs': n_jobs,
}
if 'regression' in task:
self.estimator_class = None
print('LR does not support regression task')
raise NotImplementedError
else:
self.estimator_class = LogisticRegression
class LRL2Classifier(SKLearnEstimator):
@classmethod
def search_space(cls, **params):
return LRL1Classifier.search_space(**params)
@classmethod
def cost_relative2lgbm(cls):
return 25
def __init__(self, task='binary:logistic', n_jobs=1, tol=0.0001, C=1.0,
**params):
super().__init__(task, **params)
self.params = {
'penalty': 'l2',
'tol': float(tol),
'C': float(C),
'solver': 'lbfgs',
'n_jobs': n_jobs,
}
if 'regression' in task:
self.estimator_class = None
print('LR does not support regression task')
raise NotImplementedError
else:
self.estimator_class = LogisticRegression
class CatBoostEstimator(BaseEstimator):
_time_per_iter = None
_train_size = 0
@classmethod
def search_space(cls, data_size, **params):
upper = max(min(round(1500000/data_size),150), 11)
return {
'early_stopping_rounds': {
'domain': tune.qloguniform(lower=10, upper=upper, q=1),
'init_value': 10,
},
'learning_rate': {
'domain': tune.loguniform(lower=.005, upper=.2),
'init_value': 0.1,
},
}
@classmethod
def size(cls, config):
n_estimators = 8192
max_leaves = 64
return (max_leaves*3 + (max_leaves-1)*4 + 1.0)*n_estimators*8
@classmethod
def cost_relative2lgbm(cls):
return 15
def __init__(self, task = 'binary:logistic', n_jobs=1,
n_estimators=8192, learning_rate=0.1, early_stopping_rounds=4, **params):
super().__init__(task, **params)
self.params = {
"early_stopping_rounds": int(round(early_stopping_rounds)),
"n_estimators": n_estimators,
'learning_rate': learning_rate,
'thread_count': n_jobs,
'verbose': False,
'random_seed': params[
"random_seed"] if "random_seed" in params else 10242048,
}
if 'regression' in task:
from catboost import CatBoostRegressor
self.estimator_class = CatBoostRegressor
else:
from catboost import CatBoostClassifier
self.estimator_class = CatBoostClassifier
def get_params(self, deep=False):
params = super().get_params()
params['n_jobs'] = params['thread_count']
return params
def fit(self, X_train, y_train, budget=None, **kwargs):
start_time = time.time()
n_iter = self.params["n_estimators"]
if isinstance(X_train, pd.DataFrame):
cat_features = list(X_train.select_dtypes(
include='category').columns)
else:
cat_features = []
if (not CatBoostEstimator._time_per_iter or
abs(CatBoostEstimator._train_size-len(y_train))>4) and budget:
self.params["n_estimators"] = 1
CatBoostEstimator._smallmodel = self.estimator_class(**self.params)
CatBoostEstimator._smallmodel.fit(X_train, y_train,
cat_features=cat_features, **kwargs)
CatBoostEstimator._t1 = time.time() - start_time
if CatBoostEstimator._t1 >= budget:
self.params["n_estimators"] = n_iter
self._model = CatBoostEstimator._smallmodel
return CatBoostEstimator._t1
self.params["n_estimators"] = 4
CatBoostEstimator._smallmodel = self.estimator_class(**self.params)
CatBoostEstimator._smallmodel.fit(X_train, y_train,
cat_features=cat_features, **kwargs)
CatBoostEstimator._time_per_iter = (time.time() - start_time -
CatBoostEstimator._t1)/(self.params["n_estimators"]-1)
if CatBoostEstimator._time_per_iter <= 0:
CatBoostEstimator._time_per_iter = CatBoostEstimator._t1
CatBoostEstimator._train_size = len(y_train)
if time.time()-start_time>=budget or n_iter==self.params[
"n_estimators"]:
self.params["n_estimators"] = n_iter
self._model = CatBoostEstimator._smallmodel
return time.time()-start_time
if budget:
train_times = 1
self.params["n_estimators"] = min(n_iter, int((budget-time.time()+
start_time-CatBoostEstimator._t1)/train_times/
CatBoostEstimator._time_per_iter+1))
self._model = CatBoostEstimator._smallmodel
if self.params["n_estimators"] > 0:
l = max(int(len(y_train)*0.9), len(y_train)-1000)
X_tr, y_tr = X_train[:l], y_train[:l]
if 'sample_weight' in kwargs:
weight = kwargs['sample_weight']
if weight is not None: kwargs['sample_weight'] = weight[:l]
else: weight = None
from catboost import Pool
model = self.estimator_class(**self.params)
model.fit(X_tr, y_tr, cat_features=cat_features, eval_set=Pool(
data=X_train[l:], label=y_train[l:], cat_features=cat_features),
**kwargs)
if weight is not None: kwargs['sample_weight'] = weight
self._model = model
self.params["n_estimators"] = n_iter
train_time = time.time() - start_time
return train_time
class KNeighborsEstimator(BaseEstimator):
@classmethod
def search_space(cls, data_size, **params):
upper = min(512, int(data_size/2))
return {
'n_neighbors': {
'domain': tune.qloguniform(lower=1, upper=upper, q=1),
'init_value': 5,
},
}
@classmethod
def cost_relative2lgbm(cls):
return 30
def __init__(self, task='binary:logistic', n_jobs=1,
n_neighbors=5, **params):
super().__init__(task, **params)
self.params= {
'n_neighbors': int(round(n_neighbors)),
'weights': 'distance',
'n_jobs': n_jobs,
}
if 'regression' in task:
from sklearn.neighbors import KNeighborsRegressor
self.estimator_class = KNeighborsRegressor
else:
from sklearn.neighbors import KNeighborsClassifier
self.estimator_class = KNeighborsClassifier
def _preprocess(self, X):
if isinstance(X, pd.DataFrame):
cat_columns = X.select_dtypes(['category']).columns
if X.shape[1] == len(cat_columns):
raise ValueError(
"kneighbor requires at least one numeric feature")
X = X.drop(cat_columns, axis=1)
return X
| true | true |
1c2c67ee1b5f4c5fbb03ab10efcc9133f3946fa0 | 843 | py | Python | metarecord/migrations/0036_add_classification_function_allowed.py | kerkkoheiskanen/helerm | bdaf801a940d42325a1076b42bb0edef831fbac9 | [
"MIT"
] | 2 | 2017-04-21T15:36:23.000Z | 2020-12-04T09:32:39.000Z | metarecord/migrations/0036_add_classification_function_allowed.py | kerkkoheiskanen/helerm | bdaf801a940d42325a1076b42bb0edef831fbac9 | [
"MIT"
] | 168 | 2016-10-05T12:58:41.000Z | 2021-08-31T14:29:56.000Z | metarecord/migrations/0036_add_classification_function_allowed.py | kerkkoheiskanen/helerm | bdaf801a940d42325a1076b42bb0edef831fbac9 | [
"MIT"
] | 7 | 2016-10-13T12:51:36.000Z | 2021-01-21T13:05:04.000Z | # Generated by Django 2.0.1 on 2018-01-12 12:04
from django.db import migrations, models
def populate_function_allowed(apps, schema_editor):
Classification = apps.get_model('metarecord', 'Classification')
for classification in Classification.objects.all():
classification.function_allowed = not classification.children.exists()
classification.save(update_fields=('function_allowed',))
class Migration(migrations.Migration):
dependencies = [
('metarecord', '0035_add_on_deletes'),
]
operations = [
migrations.AddField(
model_name='classification',
name='function_allowed',
field=models.BooleanField(default=False, verbose_name='function allowed'),
),
migrations.RunPython(populate_function_allowed, migrations.RunPython.noop)
]
| 30.107143 | 86 | 0.699881 |
from django.db import migrations, models
def populate_function_allowed(apps, schema_editor):
Classification = apps.get_model('metarecord', 'Classification')
for classification in Classification.objects.all():
classification.function_allowed = not classification.children.exists()
classification.save(update_fields=('function_allowed',))
class Migration(migrations.Migration):
dependencies = [
('metarecord', '0035_add_on_deletes'),
]
operations = [
migrations.AddField(
model_name='classification',
name='function_allowed',
field=models.BooleanField(default=False, verbose_name='function allowed'),
),
migrations.RunPython(populate_function_allowed, migrations.RunPython.noop)
]
| true | true |
1c2c68c004e78ae9ce7a1151915aa845b798f605 | 1,051 | py | Python | toontown/ai/DistributedBlackCatMgrAI.py | CrankySupertoon01/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2021-02-13T22:40:50.000Z | 2021-02-13T22:40:50.000Z | toontown/ai/DistributedBlackCatMgrAI.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | toontown/ai/DistributedBlackCatMgrAI.py | CrankySupertoonArchive/Toontown-2 | 60893d104528a8e7eb4aced5d0015f22e203466d | [
"MIT"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from toontown.toon.ToonDNA import ToonDNA
from toontown.toonbase import ToontownGlobals
class DistributedBlackCatMgrAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedBlackCatMgrAI")
def requestBlackCatTransformation(self):
if not self.air.newsManager.isHolidayRunning(ToontownGlobals.BLACK_CAT_DAY):
return
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if not av or av.getTutorialAck() or av.dna.getAnimal() != 'cat' or av.dna.headColor == 0x1a:
return
newDNA = ToonDNA()
newDNA.makeFromNetString(av.getDNAString())
newDNA.updateToonProperties(armColor=26, legColor=26, headColor=26)
taskMgr.doMethodLater(1.0, lambda task: av.b_setDNAString(newDNA.makeNetString()), 'transform-%d' % avId)
self.sendUpdateToAvatarId(avId, 'doBlackCatTransformation', [])
| 43.791667 | 113 | 0.743102 | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from toontown.toon.ToonDNA import ToonDNA
from toontown.toonbase import ToontownGlobals
class DistributedBlackCatMgrAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedBlackCatMgrAI")
def requestBlackCatTransformation(self):
if not self.air.newsManager.isHolidayRunning(ToontownGlobals.BLACK_CAT_DAY):
return
avId = self.air.getAvatarIdFromSender()
av = self.air.doId2do.get(avId)
if not av or av.getTutorialAck() or av.dna.getAnimal() != 'cat' or av.dna.headColor == 0x1a:
return
newDNA = ToonDNA()
newDNA.makeFromNetString(av.getDNAString())
newDNA.updateToonProperties(armColor=26, legColor=26, headColor=26)
taskMgr.doMethodLater(1.0, lambda task: av.b_setDNAString(newDNA.makeNetString()), 'transform-%d' % avId)
self.sendUpdateToAvatarId(avId, 'doBlackCatTransformation', [])
| true | true |
1c2c68cda1ba5abb7f297c0c2cbabf3c195e892b | 202 | py | Python | examples/HelloWorld.py | piotrmaslanka/systemy | fff963cb9622f6a449524bea2ee10bb9503bcaa6 | [
"MIT"
] | null | null | null | examples/HelloWorld.py | piotrmaslanka/systemy | fff963cb9622f6a449524bea2ee10bb9503bcaa6 | [
"MIT"
] | null | null | null | examples/HelloWorld.py | piotrmaslanka/systemy | fff963cb9622f6a449524bea2ee10bb9503bcaa6 | [
"MIT"
] | 1 | 2021-11-28T09:52:04.000Z | 2021-11-28T09:52:04.000Z | from yos.rt import BaseTasklet
from yos.tasklets import Tasklet
from yos.io import NetworkSocket
class HelloWorldTasklet(BaseTasklet):
def on_startup(self):
print('Hello World!')
| 20.2 | 37 | 0.727723 | from yos.rt import BaseTasklet
from yos.tasklets import Tasklet
from yos.io import NetworkSocket
class HelloWorldTasklet(BaseTasklet):
def on_startup(self):
print('Hello World!')
| true | true |
1c2c696274b2d93fa0051a0dbadbbce7c014e570 | 1,384 | py | Python | web/books/management/commands/books.py | hdknr/django-books | b4bf6d144240edc4bcfc94180377adaadc9e533c | [
"MIT"
] | null | null | null | web/books/management/commands/books.py | hdknr/django-books | b4bf6d144240edc4bcfc94180377adaadc9e533c | [
"MIT"
] | null | null | null | web/books/management/commands/books.py | hdknr/django-books | b4bf6d144240edc4bcfc94180377adaadc9e533c | [
"MIT"
] | null | null | null | from django.utils import translation ,timezone
from django.conf import settings
from datetime import datetime, time, timedelta
import djclick as click
from logging import getLogger
from collections import OrderedDict
import requests
import yaml
import os
import uuid
import itertools
from books import models
log = getLogger()
translation.activate('ja')
DOC_PATH = os.path.join(os.path.dirname(settings.BASE_DIR), 'docs')
@click.group(invoke_without_command=True)
@click.pass_context
def main(ctx):
pass
@main.command()
@click.argument('invoice_id')
@click.pass_context
def dup_invoice(ctx, invoice_id):
'''複製する'''
invoice = models.Invoice.objects.filter(id=invoice_id).first()
if invoice:
instance = invoice.dup()
@main.command()
@click.argument('fisical_year_id')
@click.pass_context
def fisical_year(ctx, fisical_year_id):
'''数字'''
year = models.FisicalYear.objects.filter(id=fisical_year_id).first()
if year:
click.echo(year.income())
click.echo(year.spending())
@main.command()
@click.argument('org_id')
@click.pass_context
def payments(ctx, org_id):
'''指定した組織への支払い'''
data = models.Payment.objects.spendings(organization__id=org_id).totals()
click.echo(data)
@main.command()
@click.pass_context
def receipts(ctx):
'''入金予定'''
data = models.Receipt.objects.incomes().totals()
click.echo(data) | 22.688525 | 77 | 0.732659 | from django.utils import translation ,timezone
from django.conf import settings
from datetime import datetime, time, timedelta
import djclick as click
from logging import getLogger
from collections import OrderedDict
import requests
import yaml
import os
import uuid
import itertools
from books import models
log = getLogger()
translation.activate('ja')
DOC_PATH = os.path.join(os.path.dirname(settings.BASE_DIR), 'docs')
@click.group(invoke_without_command=True)
@click.pass_context
def main(ctx):
pass
@main.command()
@click.argument('invoice_id')
@click.pass_context
def dup_invoice(ctx, invoice_id):
invoice = models.Invoice.objects.filter(id=invoice_id).first()
if invoice:
instance = invoice.dup()
@main.command()
@click.argument('fisical_year_id')
@click.pass_context
def fisical_year(ctx, fisical_year_id):
year = models.FisicalYear.objects.filter(id=fisical_year_id).first()
if year:
click.echo(year.income())
click.echo(year.spending())
@main.command()
@click.argument('org_id')
@click.pass_context
def payments(ctx, org_id):
data = models.Payment.objects.spendings(organization__id=org_id).totals()
click.echo(data)
@main.command()
@click.pass_context
def receipts(ctx):
data = models.Receipt.objects.incomes().totals()
click.echo(data) | true | true |
1c2c6a527f99317b1fa4a91b5989f19c6b3bc987 | 5,605 | py | Python | survival_evaluation/evaluations.py | haiderstats/survival_evaluation | 70e3a4d530a61549609689e3ebd80818f3ab14d9 | [
"MIT"
] | 4 | 2021-03-02T00:33:22.000Z | 2021-10-06T12:33:19.000Z | survival_evaluation/evaluations.py | haiderstats/survival_evaluation | 70e3a4d530a61549609689e3ebd80818f3ab14d9 | [
"MIT"
] | null | null | null | survival_evaluation/evaluations.py | haiderstats/survival_evaluation | 70e3a4d530a61549609689e3ebd80818f3ab14d9 | [
"MIT"
] | null | null | null | from typing import Optional
import numpy as np # type: ignore
from scipy.stats import chi2 # type: ignore
from survival_evaluation.types import NumericArrayLike
from survival_evaluation.utility import (
KaplanMeier,
KaplanMeierArea,
to_array,
validate_size,
)
# pylint: disable=too-many-arguments
def l1(
event_times: NumericArrayLike,
event_indicators: NumericArrayLike,
predictions: NumericArrayLike,
training_event_times: Optional[NumericArrayLike] = None,
training_event_indicators: Optional[NumericArrayLike] = None,
l1_type: str = "hinge",
) -> float:
event_times = to_array(event_times)
event_indicators = to_array(event_indicators, to_boolean=True)
predictions = to_array(predictions)
validate_size(event_times, event_indicators, predictions)
if l1_type == "hinge":
scores = event_times - predictions
scores[~event_indicators] = np.maximum(scores[~event_indicators], 0)
return np.mean(np.abs(scores))
if l1_type == "margin":
if training_event_times is None or training_event_indicators is None:
raise ValueError(
"If 'margin' is chosen, training set values must be included."
)
training_event_times = to_array(training_event_times)
training_event_indicators = to_array(training_event_indicators, to_boolean=True)
km_model = KaplanMeierArea(training_event_times, training_event_indicators)
censor_times = event_times[~event_indicators]
weights = 1 - km_model.predict(censor_times)
best_guesses = km_model.best_guess(censor_times)
scores = np.empty(predictions.size)
scores[event_indicators] = (
event_times[event_indicators] - predictions[event_indicators]
)
scores[~event_indicators] = weights * (
best_guesses - predictions[~event_indicators]
)
weighted_multiplier = 1 / (np.sum(event_indicators) + np.sum(weights))
return weighted_multiplier * np.sum(np.abs(scores))
raise ValueError("L1 type must be either 'hinge' or 'margin'.")
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
def one_calibration(
event_times: NumericArrayLike,
event_indicators: NumericArrayLike,
predictions: NumericArrayLike,
time: float,
bins: int = 10,
) -> dict:
event_times = to_array(event_times)
event_indicators = to_array(event_indicators, to_boolean=True)
predictions = 1 - to_array(predictions)
prediction_order = np.argsort(-predictions)
predictions = predictions[prediction_order]
event_times = event_times[prediction_order]
event_indicators = event_indicators[prediction_order]
# Can't do np.mean since split array may be of different sizes.
binned_event_times = np.array_split(event_times, bins)
binned_event_indicators = np.array_split(event_indicators, bins)
probability_means = [np.mean(x) for x in np.array_split(predictions, bins)]
hosmer_lemeshow = 0
observed_probabilities = list()
expected_probabilities = list()
for b in range(bins):
prob = probability_means[b]
if prob == 1.0:
raise ValueError(
"One-Calibration is not well defined: the risk"
f"probability of the {b}th bin was {prob}."
)
km_model = KaplanMeier(binned_event_times[b], binned_event_indicators[b])
event_probability = 1 - km_model.predict(time)
bin_count = len(binned_event_times[b])
hosmer_lemeshow += (bin_count * event_probability - bin_count * prob) ** 2 / (
bin_count * prob * (1 - prob)
)
observed_probabilities.append(event_probability)
expected_probabilities.append(prob)
return dict(
p_value=1 - chi2.cdf(hosmer_lemeshow, bins - 1),
observed=observed_probabilities,
expected=expected_probabilities,
)
def d_calibration(
event_indicators: NumericArrayLike,
predictions: NumericArrayLike,
bins: int = 10,
) -> dict:
event_indicators = to_array(event_indicators, to_boolean=True)
predictions = to_array(predictions)
# include minimum to catch if probability = 1.
bin_index = np.minimum(np.floor(predictions * bins), bins - 1).astype(int)
censored_bin_indexes = bin_index[~event_indicators]
uncensored_bin_indexes = bin_index[event_indicators]
censored_predictions = predictions[~event_indicators]
censored_contribution = 1 - (censored_bin_indexes / bins) * (
1 / censored_predictions
)
censored_following_contribution = 1 / (bins * censored_predictions)
contribution_pattern = np.tril(np.ones([bins, bins]), k=-1).astype(bool)
following_contributions = np.matmul(
censored_following_contribution, contribution_pattern[censored_bin_indexes]
)
single_contributions = np.matmul(
censored_contribution, np.eye(bins)[censored_bin_indexes]
)
uncensored_contributions = np.sum(np.eye(bins)[uncensored_bin_indexes], axis=0)
bin_count = (
single_contributions + following_contributions + uncensored_contributions
)
chi2_statistic = np.sum(
np.square(bin_count - len(predictions) / bins) / (len(predictions) / bins)
)
return dict(
p_value=1 - chi2.cdf(chi2_statistic, bins - 1),
bin_proportions=bin_count / len(predictions),
censored_contributions=(single_contributions + following_contributions)
/ len(predictions),
uncensored_contributions=uncensored_contributions / len(predictions),
)
| 36.633987 | 88 | 0.70116 | from typing import Optional
import numpy as np
from scipy.stats import chi2
from survival_evaluation.types import NumericArrayLike
from survival_evaluation.utility import (
KaplanMeier,
KaplanMeierArea,
to_array,
validate_size,
)
def l1(
event_times: NumericArrayLike,
event_indicators: NumericArrayLike,
predictions: NumericArrayLike,
training_event_times: Optional[NumericArrayLike] = None,
training_event_indicators: Optional[NumericArrayLike] = None,
l1_type: str = "hinge",
) -> float:
event_times = to_array(event_times)
event_indicators = to_array(event_indicators, to_boolean=True)
predictions = to_array(predictions)
validate_size(event_times, event_indicators, predictions)
if l1_type == "hinge":
scores = event_times - predictions
scores[~event_indicators] = np.maximum(scores[~event_indicators], 0)
return np.mean(np.abs(scores))
if l1_type == "margin":
if training_event_times is None or training_event_indicators is None:
raise ValueError(
"If 'margin' is chosen, training set values must be included."
)
training_event_times = to_array(training_event_times)
training_event_indicators = to_array(training_event_indicators, to_boolean=True)
km_model = KaplanMeierArea(training_event_times, training_event_indicators)
censor_times = event_times[~event_indicators]
weights = 1 - km_model.predict(censor_times)
best_guesses = km_model.best_guess(censor_times)
scores = np.empty(predictions.size)
scores[event_indicators] = (
event_times[event_indicators] - predictions[event_indicators]
)
scores[~event_indicators] = weights * (
best_guesses - predictions[~event_indicators]
)
weighted_multiplier = 1 / (np.sum(event_indicators) + np.sum(weights))
return weighted_multiplier * np.sum(np.abs(scores))
raise ValueError("L1 type must be either 'hinge' or 'margin'.")
def one_calibration(
event_times: NumericArrayLike,
event_indicators: NumericArrayLike,
predictions: NumericArrayLike,
time: float,
bins: int = 10,
) -> dict:
event_times = to_array(event_times)
event_indicators = to_array(event_indicators, to_boolean=True)
predictions = 1 - to_array(predictions)
prediction_order = np.argsort(-predictions)
predictions = predictions[prediction_order]
event_times = event_times[prediction_order]
event_indicators = event_indicators[prediction_order]
binned_event_times = np.array_split(event_times, bins)
binned_event_indicators = np.array_split(event_indicators, bins)
probability_means = [np.mean(x) for x in np.array_split(predictions, bins)]
hosmer_lemeshow = 0
observed_probabilities = list()
expected_probabilities = list()
for b in range(bins):
prob = probability_means[b]
if prob == 1.0:
raise ValueError(
"One-Calibration is not well defined: the risk"
f"probability of the {b}th bin was {prob}."
)
km_model = KaplanMeier(binned_event_times[b], binned_event_indicators[b])
event_probability = 1 - km_model.predict(time)
bin_count = len(binned_event_times[b])
hosmer_lemeshow += (bin_count * event_probability - bin_count * prob) ** 2 / (
bin_count * prob * (1 - prob)
)
observed_probabilities.append(event_probability)
expected_probabilities.append(prob)
return dict(
p_value=1 - chi2.cdf(hosmer_lemeshow, bins - 1),
observed=observed_probabilities,
expected=expected_probabilities,
)
def d_calibration(
event_indicators: NumericArrayLike,
predictions: NumericArrayLike,
bins: int = 10,
) -> dict:
event_indicators = to_array(event_indicators, to_boolean=True)
predictions = to_array(predictions)
# include minimum to catch if probability = 1.
bin_index = np.minimum(np.floor(predictions * bins), bins - 1).astype(int)
censored_bin_indexes = bin_index[~event_indicators]
uncensored_bin_indexes = bin_index[event_indicators]
censored_predictions = predictions[~event_indicators]
censored_contribution = 1 - (censored_bin_indexes / bins) * (
1 / censored_predictions
)
censored_following_contribution = 1 / (bins * censored_predictions)
contribution_pattern = np.tril(np.ones([bins, bins]), k=-1).astype(bool)
following_contributions = np.matmul(
censored_following_contribution, contribution_pattern[censored_bin_indexes]
)
single_contributions = np.matmul(
censored_contribution, np.eye(bins)[censored_bin_indexes]
)
uncensored_contributions = np.sum(np.eye(bins)[uncensored_bin_indexes], axis=0)
bin_count = (
single_contributions + following_contributions + uncensored_contributions
)
chi2_statistic = np.sum(
np.square(bin_count - len(predictions) / bins) / (len(predictions) / bins)
)
return dict(
p_value=1 - chi2.cdf(chi2_statistic, bins - 1),
bin_proportions=bin_count / len(predictions),
censored_contributions=(single_contributions + following_contributions)
/ len(predictions),
uncensored_contributions=uncensored_contributions / len(predictions),
)
| true | true |
1c2c6a9b7cfebf83a0d807beb46af8faed3075a5 | 8,978 | py | Python | plugin/plugins/network/check_ibm_san_directors_crcs.py | crazy-canux/xPlugin_Monitoring | 4a66d26f9d2982609489eaa0f57d6afb16aca37c | [
"Apache-2.0"
] | 2 | 2015-12-17T04:00:30.000Z | 2015-12-22T11:49:01.000Z | plugin/plugins/network/check_ibm_san_directors_crcs.py | crazy-canux/xPlugin_Monitoring | 4a66d26f9d2982609489eaa0f57d6afb16aca37c | [
"Apache-2.0"
] | null | null | null | plugin/plugins/network/check_ibm_san_directors_crcs.py | crazy-canux/xPlugin_Monitoring | 4a66d26f9d2982609489eaa0f57d6afb16aca37c | [
"Apache-2.0"
] | 1 | 2017-02-20T22:57:17.000Z | 2017-02-20T22:57:17.000Z | #!/usr/bin/env python2.7
# -*- coding: UTF-8 -*-
#===============================================================================
# Name : check_ibm_san_directors_crcs.py
# Authors : Canux CHENG <canuxcheng@gmail.com>
# Description : Check IBM SAN Directors for CRCs on ports.
#-------------------------------------------------------------------------------
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#===============================================================================
#
import logging as log
from pprint import pformat
import traceback
from time import time
from datetime import datetime
import math
from shared import __version__
from monitoring.nagios.plugin import NagiosPluginSNMP
logger = log.getLogger('plugin')
# Plugin init class
class IBMSanDirectorsCRC(NagiosPluginSNMP):
def initialize(self):
super(IBMSanDirectorsCRC, self).initialize()
# Timestamp at execution
self.runtime = time()
logger.debug('-- Plugin runtime: %s' % self.runtime)
def define_plugin_arguments(self):
"""Add extra specific arguments"""
super(IBMSanDirectorsCRC, self).define_plugin_arguments()
self.required_args.add_argument('-r',
dest='avgrec',
type=int,
default=2,
help="Make an average for the last <N> records (default to 2).",
)
self.required_args.add_argument('-w',
dest='warning',
type=int,
help="Warn if average number of CRCs are above this threshold.",
required=True,
)
self.required_args.add_argument('-c',
dest='critical',
type=int,
help="Crit if average number of CRCs are above this threshold.",
required=True,
)
def verify_plugin_arguments(self):
super(IBMSanDirectorsCRC, self).verify_plugin_arguments()
# Number of records
if self.options.avgrec < 2: self.unknown('Number of records must be >= 2 to make an average !')
# Thresholds
if self.options.warning > self.options.critical:
self.unknown('Warning threshold cannot be above critical !')
elif self.options.warning < 0 or self.options.critical < 0:
self.unknown('Warning / Critical threshold cannot be below zero !')
# Init plugin
progdesc = 'Check IBM SAN Directors for CRCs on ports.'
plugin = IBMSanDirectorsCRC(version=__version__, description=progdesc)
# Load any existing pickled data
try:
# Load retention data
retention_data = plugin.load_data()
except IOError:
# No retention data to load
retention_data = []
# Prepare SNMP query
oids = {
'name': '1.3.6.1.4.1.1588.2.1.1.1.6.2.1.36',
'alias': '1.3.6.1.4.1.1588.2.1.1.1.6.2.1.37',
'crc': '1.3.6.1.4.1.1588.2.1.1.1.6.2.1.22',
}
query = plugin.snmp.getnext(oids)
# Dictionnary used to store gathered SNMP data and used by pickle for saving results
snmp_results = {
'timestamp': plugin.runtime,
'values': {}, # Store SNMP results
}
try:
for port in query['name']:
data = snmp_results['values']
name = port.pretty()
alias = [a.pretty() for a in query['alias'] if a.index == port.index ][0]
crc = [c.value for c in query['crc'] if c.index == port.index ][0]
if alias:
if not data.has_key(alias):
data[alias] = {}
if name:
data[alias]['name'] = name
else:
data[alias]['name'] = 'No description'
data[alias]['crc'] = crc
except KeyError:
# That would mean unexpected result from the query (empty OID, etc...)
message = """Unexpected error ! Please check plugin configuration.
If you see this message that would mean the query returned no result (empty OID) or the equipment does not
support such requests...
%s
Please check your plugin configuration first.""" % traceback.format_exc(limit=1)
plugin.unknown(message)
logger.debug('-- SNMP data:')
logger.debug(pformat(snmp_results, indent=4))
# Save to pickle file the new data gathered
retention_data.append(snmp_results)
plugin.save_data(retention_data, limit=50)
# Calculate average time
last_records = retention_data[-plugin.options.avgrec:]
avg_record_time = 0
if len(retention_data) >= plugin.options.avgrec:
calc_total_seconds = datetime.fromtimestamp(plugin.runtime) - datetime.fromtimestamp(last_records[0]['timestamp'])
avg_record_time = int(math.ceil(calc_total_seconds.total_seconds()/60))
else:
# Stop execution if not enough records in retention file. Wait next check.
missing = plugin.options.avgrec - len(retention_data)
plugin.unknown('Not enough data to generate average, need %d more checks. Waiting next check.' % missing)
# Calculate CRC increase
port_stats = {}
logger.debug('-- Processing pickled data for the last %d records.' % plugin.options.avgrec)
for data in last_records:
index = last_records.index(data)
val = data['values']
prev_val = None
if not index:
continue
else:
prev_val = last_records[index-1]['values']
logger.debug("Prev: %s, Next: %s" % (last_records[index-1]['timestamp'], data['timestamp']))
for alias, stat in val.viewitems():
name = stat['name']
crc = stat['crc']
prev_crc = prev_val[alias]['crc']
if prev_val:
prev_crc = prev_val[alias]['crc']
if not port_stats.has_key(alias):
port_stats[alias] = {'crc': 0}
port_stats[alias]['name'] = name
port_stats[alias]['crc'] += crc - prev_crc
logger.debug('port_stats:')
logger.debug(pformat(port_stats, indent=4))
# Define some Nagios related stuff used in output and status
nagios_output = ""
nagios_longoutput = ""
nagios_perfdata = " | "
nagios_status = None
errors = {
'warning': [],
'critical': [],
}
# Checking if we have port crc above or below thresholds
for port, stat in port_stats.viewitems():
if plugin.options.warning < stat['crc'] <= plugin.options.critical:
errors['warning'].append(port)
elif stat['crc'] > plugin.options.critical:
errors['critical'].append(port)
nagios_perfdata += "\'Port_{name}\'={crc}c;{opt.warning};{opt.critical};0; ".format(crc=stat['crc'], name=port,
opt=plugin.options)
# Show short message in Nagios output
nbr_warn = len(errors['warning'])
nbr_crit = len(errors['critical'])
if not nbr_warn and not nbr_crit:
nagios_status = plugin.ok
nagios_output = "No CRC error detected on ports."
elif nbr_warn and not nbr_crit:
nagios_status = plugin.warning
nagios_output = "%d ports have warnings CRC errors !" % nbr_warn
elif nbr_crit and not nbr_warn:
nagios_status = plugin.critical
nagios_output = "%d ports have criticals CRC errors !" % nbr_crit
elif nbr_warn and nbr_crit:
nagios_status = plugin.critical
nagios_output = "%d ports have criticals, %d ports have warnings CRC errors !" % (nbr_crit, nbr_warn)
# Show average record time
nagios_output = "%s (Average on last %s mins)" % (nagios_output, avg_record_time)
# Check for errors details in long output
for status in errors:
if len(errors[status]):
nagios_longoutput += "\n{status} ({nbrerr}) (>= {thr}):\n".format(status=status.title(),
nbrerr=len(errors[status]),
thr=eval('plugin.options.{}'.format(status)))
for alias in errors[status]:
nagios_longoutput += " Port %s: %d crc (%s)\n" % (
alias,
port_stats[alias]['crc'],
port_stats[alias]['name'],
)
# Output and return status to Nagios
output = nagios_output + nagios_longoutput + nagios_perfdata
nagios_status(output)
| 37.099174 | 118 | 0.597349 |
import logging as log
from pprint import pformat
import traceback
from time import time
from datetime import datetime
import math
from shared import __version__
from monitoring.nagios.plugin import NagiosPluginSNMP
logger = log.getLogger('plugin')
class IBMSanDirectorsCRC(NagiosPluginSNMP):
def initialize(self):
super(IBMSanDirectorsCRC, self).initialize()
self.runtime = time()
logger.debug('-- Plugin runtime: %s' % self.runtime)
def define_plugin_arguments(self):
super(IBMSanDirectorsCRC, self).define_plugin_arguments()
self.required_args.add_argument('-r',
dest='avgrec',
type=int,
default=2,
help="Make an average for the last <N> records (default to 2).",
)
self.required_args.add_argument('-w',
dest='warning',
type=int,
help="Warn if average number of CRCs are above this threshold.",
required=True,
)
self.required_args.add_argument('-c',
dest='critical',
type=int,
help="Crit if average number of CRCs are above this threshold.",
required=True,
)
def verify_plugin_arguments(self):
super(IBMSanDirectorsCRC, self).verify_plugin_arguments()
if self.options.avgrec < 2: self.unknown('Number of records must be >= 2 to make an average !')
if self.options.warning > self.options.critical:
self.unknown('Warning threshold cannot be above critical !')
elif self.options.warning < 0 or self.options.critical < 0:
self.unknown('Warning / Critical threshold cannot be below zero !')
progdesc = 'Check IBM SAN Directors for CRCs on ports.'
plugin = IBMSanDirectorsCRC(version=__version__, description=progdesc)
try:
retention_data = plugin.load_data()
except IOError:
retention_data = []
oids = {
'name': '1.3.6.1.4.1.1588.2.1.1.1.6.2.1.36',
'alias': '1.3.6.1.4.1.1588.2.1.1.1.6.2.1.37',
'crc': '1.3.6.1.4.1.1588.2.1.1.1.6.2.1.22',
}
query = plugin.snmp.getnext(oids)
snmp_results = {
'timestamp': plugin.runtime,
'values': {},
}
try:
for port in query['name']:
data = snmp_results['values']
name = port.pretty()
alias = [a.pretty() for a in query['alias'] if a.index == port.index ][0]
crc = [c.value for c in query['crc'] if c.index == port.index ][0]
if alias:
if not data.has_key(alias):
data[alias] = {}
if name:
data[alias]['name'] = name
else:
data[alias]['name'] = 'No description'
data[alias]['crc'] = crc
except KeyError:
message = """Unexpected error ! Please check plugin configuration.
If you see this message that would mean the query returned no result (empty OID) or the equipment does not
support such requests...
%s
Please check your plugin configuration first.""" % traceback.format_exc(limit=1)
plugin.unknown(message)
logger.debug('-- SNMP data:')
logger.debug(pformat(snmp_results, indent=4))
retention_data.append(snmp_results)
plugin.save_data(retention_data, limit=50)
last_records = retention_data[-plugin.options.avgrec:]
avg_record_time = 0
if len(retention_data) >= plugin.options.avgrec:
calc_total_seconds = datetime.fromtimestamp(plugin.runtime) - datetime.fromtimestamp(last_records[0]['timestamp'])
avg_record_time = int(math.ceil(calc_total_seconds.total_seconds()/60))
else:
missing = plugin.options.avgrec - len(retention_data)
plugin.unknown('Not enough data to generate average, need %d more checks. Waiting next check.' % missing)
port_stats = {}
logger.debug('-- Processing pickled data for the last %d records.' % plugin.options.avgrec)
for data in last_records:
index = last_records.index(data)
val = data['values']
prev_val = None
if not index:
continue
else:
prev_val = last_records[index-1]['values']
logger.debug("Prev: %s, Next: %s" % (last_records[index-1]['timestamp'], data['timestamp']))
for alias, stat in val.viewitems():
name = stat['name']
crc = stat['crc']
prev_crc = prev_val[alias]['crc']
if prev_val:
prev_crc = prev_val[alias]['crc']
if not port_stats.has_key(alias):
port_stats[alias] = {'crc': 0}
port_stats[alias]['name'] = name
port_stats[alias]['crc'] += crc - prev_crc
logger.debug('port_stats:')
logger.debug(pformat(port_stats, indent=4))
nagios_output = ""
nagios_longoutput = ""
nagios_perfdata = " | "
nagios_status = None
errors = {
'warning': [],
'critical': [],
}
for port, stat in port_stats.viewitems():
if plugin.options.warning < stat['crc'] <= plugin.options.critical:
errors['warning'].append(port)
elif stat['crc'] > plugin.options.critical:
errors['critical'].append(port)
nagios_perfdata += "\'Port_{name}\'={crc}c;{opt.warning};{opt.critical};0; ".format(crc=stat['crc'], name=port,
opt=plugin.options)
nbr_warn = len(errors['warning'])
nbr_crit = len(errors['critical'])
if not nbr_warn and not nbr_crit:
nagios_status = plugin.ok
nagios_output = "No CRC error detected on ports."
elif nbr_warn and not nbr_crit:
nagios_status = plugin.warning
nagios_output = "%d ports have warnings CRC errors !" % nbr_warn
elif nbr_crit and not nbr_warn:
nagios_status = plugin.critical
nagios_output = "%d ports have criticals CRC errors !" % nbr_crit
elif nbr_warn and nbr_crit:
nagios_status = plugin.critical
nagios_output = "%d ports have criticals, %d ports have warnings CRC errors !" % (nbr_crit, nbr_warn)
nagios_output = "%s (Average on last %s mins)" % (nagios_output, avg_record_time)
for status in errors:
if len(errors[status]):
nagios_longoutput += "\n{status} ({nbrerr}) (>= {thr}):\n".format(status=status.title(),
nbrerr=len(errors[status]),
thr=eval('plugin.options.{}'.format(status)))
for alias in errors[status]:
nagios_longoutput += " Port %s: %d crc (%s)\n" % (
alias,
port_stats[alias]['crc'],
port_stats[alias]['name'],
)
output = nagios_output + nagios_longoutput + nagios_perfdata
nagios_status(output)
| true | true |
1c2c6b4afff1c3c11a4c616543565ad5bc21a643 | 11,347 | py | Python | tests/auth_tests/test_validators.py | Fak3/django | 1ae8014a0bbae0cc1d951c1ee0f7888b6141f582 | [
"PSF-2.0",
"BSD-3-Clause"
] | 19 | 2015-07-07T02:08:59.000Z | 2021-11-08T11:05:40.000Z | tests/auth_tests/test_validators.py | Fak3/django | 1ae8014a0bbae0cc1d951c1ee0f7888b6141f582 | [
"PSF-2.0",
"BSD-3-Clause"
] | 2 | 2020-03-13T18:15:37.000Z | 2020-03-16T18:33:38.000Z | tests/auth_tests/test_validators.py | Fak3/django | 1ae8014a0bbae0cc1d951c1ee0f7888b6141f582 | [
"PSF-2.0",
"BSD-3-Clause"
] | 145 | 2019-03-14T18:54:45.000Z | 2022-03-04T20:25:31.000Z | import os
from django.contrib.auth import validators
from django.contrib.auth.models import User
from django.contrib.auth.password_validation import (
CommonPasswordValidator, MinimumLengthValidator, NumericPasswordValidator,
UserAttributeSimilarityValidator, get_default_password_validators,
get_password_validators, password_changed,
password_validators_help_text_html, password_validators_help_texts,
validate_password,
)
from django.core.exceptions import ValidationError
from django.db import models
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.utils import isolate_apps
from django.utils.html import conditional_escape
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
class PasswordValidationTest(SimpleTestCase):
def test_get_default_password_validators(self):
validators = get_default_password_validators()
self.assertEqual(len(validators), 2)
self.assertEqual(validators[0].__class__.__name__, 'CommonPasswordValidator')
self.assertEqual(validators[1].__class__.__name__, 'MinimumLengthValidator')
self.assertEqual(validators[1].min_length, 12)
def test_get_password_validators_custom(self):
validator_config = [{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'}]
validators = get_password_validators(validator_config)
self.assertEqual(len(validators), 1)
self.assertEqual(validators[0].__class__.__name__, 'CommonPasswordValidator')
self.assertEqual(get_password_validators([]), [])
def test_validate_password(self):
self.assertIsNone(validate_password('sufficiently-long'))
msg_too_short = 'This password is too short. It must contain at least 12 characters.'
with self.assertRaises(ValidationError) as cm:
validate_password('django4242')
self.assertEqual(cm.exception.messages, [msg_too_short])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_short')
with self.assertRaises(ValidationError) as cm:
validate_password('password')
self.assertEqual(cm.exception.messages, ['This password is too common.', msg_too_short])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_common')
self.assertIsNone(validate_password('password', password_validators=[]))
def test_password_changed(self):
self.assertIsNone(password_changed('password'))
def test_password_changed_with_custom_validator(self):
class Validator:
def password_changed(self, password, user):
self.password = password
self.user = user
user = object()
validator = Validator()
password_changed('password', user=user, password_validators=(validator,))
self.assertIs(validator.user, user)
self.assertEqual(validator.password, 'password')
def test_password_validators_help_texts(self):
help_texts = password_validators_help_texts()
self.assertEqual(len(help_texts), 2)
self.assertIn('12 characters', help_texts[1])
self.assertEqual(password_validators_help_texts(password_validators=[]), [])
def test_password_validators_help_text_html(self):
help_text = password_validators_help_text_html()
self.assertEqual(help_text.count('<li>'), 2)
self.assertIn('12 characters', help_text)
def test_password_validators_help_text_html_escaping(self):
class AmpersandValidator:
def get_help_text(self):
return 'Must contain &'
help_text = password_validators_help_text_html([AmpersandValidator()])
self.assertEqual(help_text, '<ul><li>Must contain &</li></ul>')
# help_text is marked safe and therefore unchanged by conditional_escape().
self.assertEqual(help_text, conditional_escape(help_text))
@override_settings(AUTH_PASSWORD_VALIDATORS=[])
def test_empty_password_validator_help_text_html(self):
self.assertEqual(password_validators_help_text_html(), '')
class MinimumLengthValidatorTest(SimpleTestCase):
def test_validate(self):
expected_error = "This password is too short. It must contain at least %d characters."
self.assertIsNone(MinimumLengthValidator().validate('12345678'))
self.assertIsNone(MinimumLengthValidator(min_length=3).validate('123'))
with self.assertRaises(ValidationError) as cm:
MinimumLengthValidator().validate('1234567')
self.assertEqual(cm.exception.messages, [expected_error % 8])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_short')
with self.assertRaises(ValidationError) as cm:
MinimumLengthValidator(min_length=3).validate('12')
self.assertEqual(cm.exception.messages, [expected_error % 3])
def test_help_text(self):
self.assertEqual(
MinimumLengthValidator().get_help_text(),
"Your password must contain at least 8 characters."
)
class UserAttributeSimilarityValidatorTest(TestCase):
def test_validate(self):
user = User.objects.create_user(
username='testclient', password='password', email='testclient@example.com',
first_name='Test', last_name='Client',
)
expected_error = "The password is too similar to the %s."
self.assertIsNone(UserAttributeSimilarityValidator().validate('testclient'))
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('testclient', user=user),
self.assertEqual(cm.exception.messages, [expected_error % "username"])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_similar')
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('example.com', user=user),
self.assertEqual(cm.exception.messages, [expected_error % "email address"])
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator(
user_attributes=['first_name'],
max_similarity=0.3,
).validate('testclient', user=user)
self.assertEqual(cm.exception.messages, [expected_error % "first name"])
# max_similarity=1 doesn't allow passwords that are identical to the
# attribute's value.
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator(
user_attributes=['first_name'],
max_similarity=1,
).validate(user.first_name, user=user)
self.assertEqual(cm.exception.messages, [expected_error % "first name"])
# max_similarity=0 rejects all passwords.
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator(
user_attributes=['first_name'],
max_similarity=0,
).validate('XXX', user=user)
self.assertEqual(cm.exception.messages, [expected_error % "first name"])
# Passes validation.
self.assertIsNone(
UserAttributeSimilarityValidator(user_attributes=['first_name']).validate('testclient', user=user)
)
@isolate_apps('auth_tests')
def test_validate_property(self):
class TestUser(models.Model):
pass
@property
def username(self):
return 'foobar'
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('foobar', user=TestUser()),
self.assertEqual(cm.exception.messages, ['The password is too similar to the username.'])
def test_help_text(self):
self.assertEqual(
UserAttributeSimilarityValidator().get_help_text(),
"Your password can't be too similar to your other personal information."
)
class CommonPasswordValidatorTest(SimpleTestCase):
def test_validate(self):
expected_error = "This password is too common."
self.assertIsNone(CommonPasswordValidator().validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
CommonPasswordValidator().validate('godzilla')
self.assertEqual(cm.exception.messages, [expected_error])
def test_validate_custom_list(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'common-passwords-custom.txt')
validator = CommonPasswordValidator(password_list_path=path)
expected_error = "This password is too common."
self.assertIsNone(validator.validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
validator.validate('from-my-custom-list')
self.assertEqual(cm.exception.messages, [expected_error])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_common')
def test_validate_django_supplied_file(self):
validator = CommonPasswordValidator()
for password in validator.passwords:
self.assertEqual(password, password.lower())
def test_help_text(self):
self.assertEqual(
CommonPasswordValidator().get_help_text(),
"Your password can't be a commonly used password."
)
class NumericPasswordValidatorTest(SimpleTestCase):
def test_validate(self):
expected_error = "This password is entirely numeric."
self.assertIsNone(NumericPasswordValidator().validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
NumericPasswordValidator().validate('42424242')
self.assertEqual(cm.exception.messages, [expected_error])
self.assertEqual(cm.exception.error_list[0].code, 'password_entirely_numeric')
def test_help_text(self):
self.assertEqual(
NumericPasswordValidator().get_help_text(),
"Your password can't be entirely numeric."
)
class UsernameValidatorsTests(SimpleTestCase):
def test_unicode_validator(self):
valid_usernames = ['joe', 'René', 'ᴮᴵᴳᴮᴵᴿᴰ', 'أحمد']
invalid_usernames = [
"o'connell", "عبد ال",
"zerowidth\u200Bspace", "nonbreaking\u00A0space",
"en\u2013dash",
]
v = validators.UnicodeUsernameValidator()
for valid in valid_usernames:
with self.subTest(valid=valid):
v(valid)
for invalid in invalid_usernames:
with self.subTest(invalid=invalid):
with self.assertRaises(ValidationError):
v(invalid)
def test_ascii_validator(self):
valid_usernames = ['glenn', 'GLEnN', 'jean-marc']
invalid_usernames = ["o'connell", 'Éric', 'jean marc', "أحمد"]
v = validators.ASCIIUsernameValidator()
for valid in valid_usernames:
with self.subTest(valid=valid):
v(valid)
for invalid in invalid_usernames:
with self.subTest(invalid=invalid):
with self.assertRaises(ValidationError):
v(invalid)
| 43.30916 | 110 | 0.689345 | import os
from django.contrib.auth import validators
from django.contrib.auth.models import User
from django.contrib.auth.password_validation import (
CommonPasswordValidator, MinimumLengthValidator, NumericPasswordValidator,
UserAttributeSimilarityValidator, get_default_password_validators,
get_password_validators, password_changed,
password_validators_help_text_html, password_validators_help_texts,
validate_password,
)
from django.core.exceptions import ValidationError
from django.db import models
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.utils import isolate_apps
from django.utils.html import conditional_escape
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
class PasswordValidationTest(SimpleTestCase):
def test_get_default_password_validators(self):
validators = get_default_password_validators()
self.assertEqual(len(validators), 2)
self.assertEqual(validators[0].__class__.__name__, 'CommonPasswordValidator')
self.assertEqual(validators[1].__class__.__name__, 'MinimumLengthValidator')
self.assertEqual(validators[1].min_length, 12)
def test_get_password_validators_custom(self):
validator_config = [{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator'}]
validators = get_password_validators(validator_config)
self.assertEqual(len(validators), 1)
self.assertEqual(validators[0].__class__.__name__, 'CommonPasswordValidator')
self.assertEqual(get_password_validators([]), [])
def test_validate_password(self):
self.assertIsNone(validate_password('sufficiently-long'))
msg_too_short = 'This password is too short. It must contain at least 12 characters.'
with self.assertRaises(ValidationError) as cm:
validate_password('django4242')
self.assertEqual(cm.exception.messages, [msg_too_short])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_short')
with self.assertRaises(ValidationError) as cm:
validate_password('password')
self.assertEqual(cm.exception.messages, ['This password is too common.', msg_too_short])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_common')
self.assertIsNone(validate_password('password', password_validators=[]))
def test_password_changed(self):
self.assertIsNone(password_changed('password'))
def test_password_changed_with_custom_validator(self):
class Validator:
def password_changed(self, password, user):
self.password = password
self.user = user
user = object()
validator = Validator()
password_changed('password', user=user, password_validators=(validator,))
self.assertIs(validator.user, user)
self.assertEqual(validator.password, 'password')
def test_password_validators_help_texts(self):
help_texts = password_validators_help_texts()
self.assertEqual(len(help_texts), 2)
self.assertIn('12 characters', help_texts[1])
self.assertEqual(password_validators_help_texts(password_validators=[]), [])
def test_password_validators_help_text_html(self):
help_text = password_validators_help_text_html()
self.assertEqual(help_text.count('<li>'), 2)
self.assertIn('12 characters', help_text)
def test_password_validators_help_text_html_escaping(self):
class AmpersandValidator:
def get_help_text(self):
return 'Must contain &'
help_text = password_validators_help_text_html([AmpersandValidator()])
self.assertEqual(help_text, '<ul><li>Must contain &</li></ul>')
self.assertEqual(help_text, conditional_escape(help_text))
@override_settings(AUTH_PASSWORD_VALIDATORS=[])
def test_empty_password_validator_help_text_html(self):
self.assertEqual(password_validators_help_text_html(), '')
class MinimumLengthValidatorTest(SimpleTestCase):
def test_validate(self):
expected_error = "This password is too short. It must contain at least %d characters."
self.assertIsNone(MinimumLengthValidator().validate('12345678'))
self.assertIsNone(MinimumLengthValidator(min_length=3).validate('123'))
with self.assertRaises(ValidationError) as cm:
MinimumLengthValidator().validate('1234567')
self.assertEqual(cm.exception.messages, [expected_error % 8])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_short')
with self.assertRaises(ValidationError) as cm:
MinimumLengthValidator(min_length=3).validate('12')
self.assertEqual(cm.exception.messages, [expected_error % 3])
def test_help_text(self):
self.assertEqual(
MinimumLengthValidator().get_help_text(),
"Your password must contain at least 8 characters."
)
class UserAttributeSimilarityValidatorTest(TestCase):
def test_validate(self):
user = User.objects.create_user(
username='testclient', password='password', email='testclient@example.com',
first_name='Test', last_name='Client',
)
expected_error = "The password is too similar to the %s."
self.assertIsNone(UserAttributeSimilarityValidator().validate('testclient'))
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('testclient', user=user),
self.assertEqual(cm.exception.messages, [expected_error % "username"])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_similar')
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('example.com', user=user),
self.assertEqual(cm.exception.messages, [expected_error % "email address"])
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator(
user_attributes=['first_name'],
max_similarity=0.3,
).validate('testclient', user=user)
self.assertEqual(cm.exception.messages, [expected_error % "first name"])
# attribute's value.
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator(
user_attributes=['first_name'],
max_similarity=1,
).validate(user.first_name, user=user)
self.assertEqual(cm.exception.messages, [expected_error % "first name"])
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator(
user_attributes=['first_name'],
max_similarity=0,
).validate('XXX', user=user)
self.assertEqual(cm.exception.messages, [expected_error % "first name"])
self.assertIsNone(
UserAttributeSimilarityValidator(user_attributes=['first_name']).validate('testclient', user=user)
)
@isolate_apps('auth_tests')
def test_validate_property(self):
class TestUser(models.Model):
pass
@property
def username(self):
return 'foobar'
with self.assertRaises(ValidationError) as cm:
UserAttributeSimilarityValidator().validate('foobar', user=TestUser()),
self.assertEqual(cm.exception.messages, ['The password is too similar to the username.'])
def test_help_text(self):
self.assertEqual(
UserAttributeSimilarityValidator().get_help_text(),
"Your password can't be too similar to your other personal information."
)
class CommonPasswordValidatorTest(SimpleTestCase):
def test_validate(self):
expected_error = "This password is too common."
self.assertIsNone(CommonPasswordValidator().validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
CommonPasswordValidator().validate('godzilla')
self.assertEqual(cm.exception.messages, [expected_error])
def test_validate_custom_list(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'common-passwords-custom.txt')
validator = CommonPasswordValidator(password_list_path=path)
expected_error = "This password is too common."
self.assertIsNone(validator.validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
validator.validate('from-my-custom-list')
self.assertEqual(cm.exception.messages, [expected_error])
self.assertEqual(cm.exception.error_list[0].code, 'password_too_common')
def test_validate_django_supplied_file(self):
validator = CommonPasswordValidator()
for password in validator.passwords:
self.assertEqual(password, password.lower())
def test_help_text(self):
self.assertEqual(
CommonPasswordValidator().get_help_text(),
"Your password can't be a commonly used password."
)
class NumericPasswordValidatorTest(SimpleTestCase):
def test_validate(self):
expected_error = "This password is entirely numeric."
self.assertIsNone(NumericPasswordValidator().validate('a-safe-password'))
with self.assertRaises(ValidationError) as cm:
NumericPasswordValidator().validate('42424242')
self.assertEqual(cm.exception.messages, [expected_error])
self.assertEqual(cm.exception.error_list[0].code, 'password_entirely_numeric')
def test_help_text(self):
self.assertEqual(
NumericPasswordValidator().get_help_text(),
"Your password can't be entirely numeric."
)
class UsernameValidatorsTests(SimpleTestCase):
def test_unicode_validator(self):
valid_usernames = ['joe', 'René', 'ᴮᴵᴳᴮᴵᴿᴰ', 'أحمد']
invalid_usernames = [
"o'connell", "عبد ال",
"zerowidth\u200Bspace", "nonbreaking\u00A0space",
"en\u2013dash",
]
v = validators.UnicodeUsernameValidator()
for valid in valid_usernames:
with self.subTest(valid=valid):
v(valid)
for invalid in invalid_usernames:
with self.subTest(invalid=invalid):
with self.assertRaises(ValidationError):
v(invalid)
def test_ascii_validator(self):
valid_usernames = ['glenn', 'GLEnN', 'jean-marc']
invalid_usernames = ["o'connell", 'Éric', 'jean marc', "أحمد"]
v = validators.ASCIIUsernameValidator()
for valid in valid_usernames:
with self.subTest(valid=valid):
v(valid)
for invalid in invalid_usernames:
with self.subTest(invalid=invalid):
with self.assertRaises(ValidationError):
v(invalid)
| true | true |
1c2c6ba87184c8614268e8666ecd7ffc3d3da608 | 1,868 | py | Python | tests/acceptance/test_organization_rate_limits.py | JannKleen/sentry | 8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88 | [
"BSD-3-Clause"
] | 1 | 2019-02-27T15:13:06.000Z | 2019-02-27T15:13:06.000Z | tests/acceptance/test_organization_rate_limits.py | rmax/sentry | 8b29c8234bb51a81d5cab821a1f2ed4ea8e8bd88 | [
"BSD-3-Clause"
] | 5 | 2020-07-17T11:20:41.000Z | 2021-05-09T12:16:53.000Z | tests/acceptance/test_organization_rate_limits.py | zaasmi/codeerrorhelp | 1ab8d3e314386b9b2d58dad9df45355bf6014ac9 | [
"BSD-3-Clause"
] | 2 | 2021-01-26T09:53:39.000Z | 2022-03-22T09:01:47.000Z | from __future__ import absolute_import
from django.utils import timezone
from mock import Mock, patch
from sentry.testutils import AcceptanceTestCase
class OrganizationRateLimitsTest(AcceptanceTestCase):
def setUp(self):
super(OrganizationRateLimitsTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.org = self.create_organization(
name='Rowdy Tiger',
owner=None,
)
self.team = self.create_team(organization=self.org, name='Mariachi Band')
self.project = self.create_project(
organization=self.org,
team=self.team,
name='Bengal',
)
self.create_member(
user=self.user,
organization=self.org,
role='owner',
teams=[self.team],
)
self.login_as(self.user)
self.path = '/organizations/{}/rate-limits/'.format(self.org.slug)
@patch('sentry.app.quotas.get_maximum_quota', Mock(return_value=(100, 60)))
def test_with_rate_limits(self):
self.project.update(first_event=timezone.now())
self.browser.get(self.path)
self.browser.wait_until('.organization-home')
self.browser.wait_until_not('.loading-indicator')
self.browser.snapshot('organization rate limits with quota')
assert self.browser.element_exists('.ref-rate-limit-editor')
@patch('sentry.app.quotas.get_maximum_quota', Mock(return_value=(0, 60)))
def test_without_rate_limits(self):
self.project.update(first_event=timezone.now())
self.browser.get(self.path)
self.browser.wait_until('.organization-home')
self.browser.wait_until_not('.loading-indicator')
self.browser.snapshot('organization rate limits without quota')
assert self.browser.element_exists('.ref-rate-limit-editor')
| 38.122449 | 81 | 0.662741 | from __future__ import absolute_import
from django.utils import timezone
from mock import Mock, patch
from sentry.testutils import AcceptanceTestCase
class OrganizationRateLimitsTest(AcceptanceTestCase):
def setUp(self):
super(OrganizationRateLimitsTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.org = self.create_organization(
name='Rowdy Tiger',
owner=None,
)
self.team = self.create_team(organization=self.org, name='Mariachi Band')
self.project = self.create_project(
organization=self.org,
team=self.team,
name='Bengal',
)
self.create_member(
user=self.user,
organization=self.org,
role='owner',
teams=[self.team],
)
self.login_as(self.user)
self.path = '/organizations/{}/rate-limits/'.format(self.org.slug)
@patch('sentry.app.quotas.get_maximum_quota', Mock(return_value=(100, 60)))
def test_with_rate_limits(self):
self.project.update(first_event=timezone.now())
self.browser.get(self.path)
self.browser.wait_until('.organization-home')
self.browser.wait_until_not('.loading-indicator')
self.browser.snapshot('organization rate limits with quota')
assert self.browser.element_exists('.ref-rate-limit-editor')
@patch('sentry.app.quotas.get_maximum_quota', Mock(return_value=(0, 60)))
def test_without_rate_limits(self):
self.project.update(first_event=timezone.now())
self.browser.get(self.path)
self.browser.wait_until('.organization-home')
self.browser.wait_until_not('.loading-indicator')
self.browser.snapshot('organization rate limits without quota')
assert self.browser.element_exists('.ref-rate-limit-editor')
| true | true |
1c2c6cb9e5f2a0bb11c0027ddc29692246a672bf | 712 | py | Python | Algorithms/Easy/500. Keyboard Row/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/500. Keyboard Row/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | Algorithms/Easy/500. Keyboard Row/answer.py | KenWoo/Algorithm | 4012a2f0a099a502df1e5df2e39faa75fe6463e8 | [
"Apache-2.0"
] | null | null | null | from typing import List
class Solution:
def findWords(self, words: List[str]) -> List[str]:
l1 = "qwertyuiop"
l2 = "asdfghjkl"
l3 = "zxcvbnm"
res = []
for w in words:
tmp = []
for c in w:
c = c.lower()
if c in l1:
tmp.append(0)
elif c in l2:
tmp.append(1)
elif c in l3:
tmp.append(2)
tmp.sort()
if tmp[0] == tmp[-1]:
res.append(w)
return res
if __name__ == "__main__":
s = Solution()
result = s.findWords(["Hello", "Alaska", "Dad", "Peace"])
print(result)
| 23.733333 | 61 | 0.414326 | from typing import List
class Solution:
def findWords(self, words: List[str]) -> List[str]:
l1 = "qwertyuiop"
l2 = "asdfghjkl"
l3 = "zxcvbnm"
res = []
for w in words:
tmp = []
for c in w:
c = c.lower()
if c in l1:
tmp.append(0)
elif c in l2:
tmp.append(1)
elif c in l3:
tmp.append(2)
tmp.sort()
if tmp[0] == tmp[-1]:
res.append(w)
return res
if __name__ == "__main__":
s = Solution()
result = s.findWords(["Hello", "Alaska", "Dad", "Peace"])
print(result)
| true | true |
1c2c6d144c9c98ed224fc1dcd860aa913fbc7268 | 5,400 | py | Python | src/sage/repl/configuration.py | fchapoton/sage | 765c5cb3e24dd134708eca97e4c52e0221cd94ba | [
"BSL-1.0"
] | 4 | 2020-07-17T04:49:44.000Z | 2020-07-29T06:33:51.000Z | src/sage/repl/configuration.py | Ivo-Maffei/sage | 467fbc70a08b552b3de33d9065204ee9cbfb02c7 | [
"BSL-1.0"
] | 1 | 2020-04-18T16:30:43.000Z | 2020-04-18T16:30:43.000Z | src/sage/repl/configuration.py | dimpase/sage | 468f23815ade42a2192b0a9cd378de8fdc594dcd | [
"BSL-1.0"
] | 1 | 2020-07-23T10:40:14.000Z | 2020-07-23T10:40:14.000Z | r"""
Sage's IPython Configuration
TESTS:
We check that Sage stdin can be piped in even if stdout is a tty; In that case
the IPython simple prompt is being used::
sage: cmd = 'print([sys.stdin.isatty(), sys.stdout.isatty()])'
sage: import pexpect
sage: output = pexpect.run(
....: 'bash -c \'echo "{0}" | sage\''.format(cmd),
....: ).decode('utf-8', 'surrogateescape')
sage: 'sage: [False, True]' in output
True
"""
#*****************************************************************************
# Copyright (C) 2016 Volker Braun <vbraun.name@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import
import sys
import copy
from traitlets.config.loader import Config
from sage.repl.prompts import SagePrompts
# Name of the Sage IPython extension
SAGE_EXTENSION = 'sage'
class SageIpythonConfiguration(object):
def _doctest_mode(self):
"""
Whether we are in doctest mode
This returns ``True`` during doctests.
EXAMPLES::
sage: from sage.repl.configuration import sage_ipython_config
sage: sage_ipython_config._doctest_mode()
True
"""
from sage.doctest import DOCTEST_MODE
return DOCTEST_MODE
def _allow_ansi(self):
"""
Whether to allow ANSI escape sequences
This returns ``False`` during doctests to avoid ANSI escape
sequences.
EXAMPLES::
sage: from sage.repl.configuration import sage_ipython_config
sage: sage_ipython_config._allow_ansi()
False
"""
return (not self._doctest_mode()) and sys.stdin.isatty() and sys.stdout.isatty()
def colors(self):
"""
Return the IPython color palette
This returns ``'NoColor'`` during doctests to avoid ANSI escape
sequences.
EXAMPLES::
sage: from sage.repl.configuration import sage_ipython_config
sage: sage_ipython_config.simple_prompt()
True
"""
return 'LightBG' if self._allow_ansi() else 'NoColor'
def simple_prompt(self):
"""
Return whether to use the simple prompt
This returns ``True`` during doctests to avoid ANSI escape sequences.
EXAMPLES::
sage: from sage.repl.configuration import sage_ipython_config
sage: sage_ipython_config.simple_prompt()
True
"""
return not self._allow_ansi()
def term_title(self):
"""
Return whether to set the terminal title
This returns false during doctests to avoid ANSI escape sequences.
EXAMPLES::
sage: from sage.repl.configuration import sage_ipython_config
sage: sage_ipython_config.term_title()
False
"""
return self._allow_ansi()
def default(self):
"""
Return a new default configuration object
EXAMPLES::
sage: from sage.repl.configuration import sage_ipython_config
sage: conf = sage_ipython_config.default()
sage: type(conf)
<class 'traitlets.config.loader.Config'>
sage: 'InteractiveShell' in conf
True
"""
from sage.repl.interpreter import SageTerminalInteractiveShell
# Use the same config for both InteractiveShell, and its subclass
# TerminalInteractiveShell (note: in fact some configs like term_title
# only apply to the latter, but we can still use the same config for
# both for simplicity's sake; see Trac #28289)
InteractiveShell=Config(
prompts_class=SagePrompts,
ast_node_interactivity='all',
colors=self.colors(),
simple_prompt=self.simple_prompt(),
term_title=self.term_title(),
confirm_exit=False,
separate_in=''
)
cfg = Config(
TerminalIPythonApp=Config(
display_banner=False,
verbose_crash=True,
test_shell=False,
shell_class=SageTerminalInteractiveShell,
),
InteractiveShell=InteractiveShell,
TerminalInteractiveShell=InteractiveShell,
InteractiveShellApp=Config(extensions=[SAGE_EXTENSION]),
)
if self._doctest_mode():
# Using the file-backed history causes problems in parallel tests
cfg.HistoryManager = Config(hist_file=':memory:')
return cfg
def copy(self):
"""
Return a copy of the current configuration
EXAMPLES::
sage: from sage.repl.configuration import sage_ipython_config
sage: conf = sage_ipython_config.copy()
sage: type(conf)
<class 'traitlets.config.loader.Config'>
sage: 'InteractiveShell' in conf
True
"""
try:
return copy.deepcopy(get_ipython().config)
except NameError:
return self.default()
sage_ipython_config = SageIpythonConfiguration()
| 30 | 88 | 0.599074 |
from __future__ import absolute_import
import sys
import copy
from traitlets.config.loader import Config
from sage.repl.prompts import SagePrompts
SAGE_EXTENSION = 'sage'
class SageIpythonConfiguration(object):
def _doctest_mode(self):
from sage.doctest import DOCTEST_MODE
return DOCTEST_MODE
def _allow_ansi(self):
return (not self._doctest_mode()) and sys.stdin.isatty() and sys.stdout.isatty()
def colors(self):
return 'LightBG' if self._allow_ansi() else 'NoColor'
def simple_prompt(self):
return not self._allow_ansi()
def term_title(self):
return self._allow_ansi()
def default(self):
from sage.repl.interpreter import SageTerminalInteractiveShell
InteractiveShell=Config(
prompts_class=SagePrompts,
ast_node_interactivity='all',
colors=self.colors(),
simple_prompt=self.simple_prompt(),
term_title=self.term_title(),
confirm_exit=False,
separate_in=''
)
cfg = Config(
TerminalIPythonApp=Config(
display_banner=False,
verbose_crash=True,
test_shell=False,
shell_class=SageTerminalInteractiveShell,
),
InteractiveShell=InteractiveShell,
TerminalInteractiveShell=InteractiveShell,
InteractiveShellApp=Config(extensions=[SAGE_EXTENSION]),
)
if self._doctest_mode():
# Using the file-backed history causes problems in parallel tests
cfg.HistoryManager = Config(hist_file=':memory:')
return cfg
def copy(self):
try:
return copy.deepcopy(get_ipython().config)
except NameError:
return self.default()
sage_ipython_config = SageIpythonConfiguration()
| true | true |
1c2c6d5ee9b956cecf5d71c01f591b811b204490 | 57,919 | py | Python | test/functional/test_framework/mininode.py | honeycomb-project/litecoin | 636cc4974613040ca15bdedf803ec5ac2bb59c30 | [
"MIT"
] | null | null | null | test/functional/test_framework/mininode.py | honeycomb-project/litecoin | 636cc4974613040ca15bdedf803ec5ac2bb59c30 | [
"MIT"
] | null | null | null | test/functional/test_framework/mininode.py | honeycomb-project/litecoin | 636cc4974613040ca15bdedf803ec5ac2bb59c30 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
NodeConn: an object which manages p2p connectivity to a bitcoin node
NodeConnCB: a base class that describes the interface for receiving
callbacks with network messages from a NodeConn
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization
"""
import asyncore
from codecs import encode
from collections import defaultdict
import copy
import hashlib
from io import BytesIO
import logging
import random
import socket
import struct
import sys
import time
from threading import RLock, Thread
import honeycomb_scrypt
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
BIP0031_VERSION = 60000
MY_VERSION = 80014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
logger = logging.getLogger("TestFramework.mininode")
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.scrypt256 = header.scrypt256
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
self.scrypt256 = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
self.scrypt256 = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
self.scrypt256 = uint256_from_str(honeycomb_scrypt.getPoWHash(r))
def rehash(self):
self.sha256 = None
self.scrypt256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.scrypt256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.scrypt256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
class NodeConnCB(object):
"""Callback and helper functions for P2P connection to a bitcoind node.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour.
"""
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
# Message receiving methods
def deliver(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type.
Optionally waits for deliver_sleep_time before dispatching message.
"""
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
self.connection = None
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_reject(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
# Connection helper methods
def add_connection(self, conn):
self.connection = conn
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.connected
assert wait_until(test_function, timeout=timeout)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
assert wait_until(test_function, timeout=timeout)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
assert wait_until(test_function, timeout=timeout)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
assert wait_until(test_function, timeout=timeout)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
assert wait_until(test_function, timeout=timeout)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
assert wait_until(test_function, timeout=timeout)
# Message sending helper functions
def send_message(self, message):
if self.connection:
self.connection.send_message(message)
else:
logger.error("Cannot send message. No connection to node!")
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
assert wait_until(test_function, timeout=timeout)
self.ping_counter += 1
return True
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb", # mainnet
"testnet3": b"\xfc\xc1\xb7\xdc", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
logger.info('Connecting to Honeycomb Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
except Exception as e:
logger.exception('got_data:', repr(e))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
self.cb.deliver(self, message)
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 30.923118 | 262 | 0.597593 |
import asyncore
from codecs import encode
from collections import defaultdict
import copy
import hashlib
from io import BytesIO
import logging
import random
import socket
import struct
import sys
import time
from threading import RLock, Thread
import honeycomb_scrypt
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
BIP0031_VERSION = 60000
MY_VERSION = 80014
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
logger = logging.getLogger("TestFramework.mininode")
mininode_socket_map = dict()
mininode_lock = RLock()
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
def serialize(self):
return self.serialize_without_witness()
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self, with_witness=False):
if with_witness:
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.scrypt256 = header.scrypt256
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
self.scrypt256 = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
self.scrypt256 = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
self.scrypt256 = uint256_from_str(honeycomb_scrypt.getPoWHash(r))
def rehash(self):
self.sha256 = None
self.scrypt256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.scrypt256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.scrypt256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
class NodeConnCB(object):
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
self.deliver_sleep_time = None
self.peer_services = None
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
self.connection = None
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_reject(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
def add_connection(self, conn):
self.connection = conn
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.connected
assert wait_until(test_function, timeout=timeout)
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
assert wait_until(test_function, timeout=timeout)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
assert wait_until(test_function, timeout=timeout)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
assert wait_until(test_function, timeout=timeout)
def wait_for_inv(self, expected_inv, timeout=60):
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
assert wait_until(test_function, timeout=timeout)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
assert wait_until(test_function, timeout=timeout)
def send_message(self, message):
if self.connection:
self.connection.send_message(message)
else:
logger.error("Cannot send message. No connection to node!")
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
assert wait_until(test_function, timeout=timeout)
self.ping_counter += 1
return True
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xfb\xc0\xb6\xdb",
"testnet3": b"\xfc\xc1\xb7\xdc",
"regtest": b"\xfa\xbf\xb5\xda",
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
logger.info('Connecting to Honeycomb Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
except Exception as e:
logger.exception('got_data:', repr(e))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
self.cb.deliver(self, message)
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| true | true |
1c2c6e74502a6c621fd01f854bf4f97c4970f8a2 | 986 | py | Python | tf_quant_finance/math/root_search/__init__.py | slowy07/tf-quant-finance | 0976f720fb58a2d7bfd863640c12a2425cd2f94f | [
"Apache-2.0"
] | 3,138 | 2019-07-24T21:43:17.000Z | 2022-03-30T12:11:09.000Z | tf_quant_finance/math/root_search/__init__.py | slowy07/tf-quant-finance | 0976f720fb58a2d7bfd863640c12a2425cd2f94f | [
"Apache-2.0"
] | 63 | 2019-09-07T19:16:03.000Z | 2022-03-29T19:29:40.000Z | tf_quant_finance/math/root_search/__init__.py | slowy07/tf-quant-finance | 0976f720fb58a2d7bfd863640c12a2425cd2f94f | [
"Apache-2.0"
] | 423 | 2019-07-26T21:28:05.000Z | 2022-03-26T13:07:44.000Z | # Lint as: python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ops related to root search."""
from tf_quant_finance.math.root_search.brent import brentq
from tf_quant_finance.math.root_search.newton import root_finder as newton_root
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
'brentq',
'newton_root',
]
remove_undocumented(__name__, _allowed_symbols)
| 34 | 109 | 0.776876 |
from tf_quant_finance.math.root_search.brent import brentq
from tf_quant_finance.math.root_search.newton import root_finder as newton_root
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'brentq',
'newton_root',
]
remove_undocumented(__name__, _allowed_symbols)
| true | true |
1c2c6eb508fbb7db0f4bc9542432ad2c73f2504b | 181 | py | Python | modules/__init__.py | trislaz/SimCLR | d7c8950a1afc8adad4e617e7ecd90a0d3828740f | [
"MIT"
] | null | null | null | modules/__init__.py | trislaz/SimCLR | d7c8950a1afc8adad4e617e7ecd90a0d3828740f | [
"MIT"
] | null | null | null | modules/__init__.py | trislaz/SimCLR | d7c8950a1afc8adad4e617e7ecd90a0d3828740f | [
"MIT"
] | null | null | null | from .simclr import SimCLR
from .nt_xent import NT_Xent
from .logistic_regression import LogisticRegression
from .lars import LARS
from .dataloader import dataset, datasetWSI_simple | 36.2 | 51 | 0.856354 | from .simclr import SimCLR
from .nt_xent import NT_Xent
from .logistic_regression import LogisticRegression
from .lars import LARS
from .dataloader import dataset, datasetWSI_simple | true | true |
1c2c6f1bcf8a0d65242015ce640b42950c65e4f4 | 316 | py | Python | errorges/config/docs.py | paurosello/errorges | c91f48282f28224dee33d9feb77ca9abd6e2eb5b | [
"MIT"
] | null | null | null | errorges/config/docs.py | paurosello/errorges | c91f48282f28224dee33d9feb77ca9abd6e2eb5b | [
"MIT"
] | null | null | null | errorges/config/docs.py | paurosello/errorges | c91f48282f28224dee33d9feb77ca9abd6e2eb5b | [
"MIT"
] | null | null | null | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/errorges"
# docs_base_url = "https://[org_name].github.io/errorges"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Errorges"
| 26.333333 | 68 | 0.724684 |
def get_context(context):
context.brand_html = "Errorges"
| true | true |
1c2c6f46414369ce2c51604c5dc0bf18f43b745e | 11,171 | py | Python | contrib/runners/orquesta_runner/tests/unit/test_cancel.py | shusugmt/st2 | 31da26badfb4ca3fb3e8cae07cfeec4791191afd | [
"Apache-2.0"
] | null | null | null | contrib/runners/orquesta_runner/tests/unit/test_cancel.py | shusugmt/st2 | 31da26badfb4ca3fb3e8cae07cfeec4791191afd | [
"Apache-2.0"
] | null | null | null | contrib/runners/orquesta_runner/tests/unit/test_cancel.py | shusugmt/st2 | 31da26badfb4ca3fb3e8cae07cfeec4791191afd | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
import st2tests
from oslo_config import cfg
# XXX: actionsensor import depends on config being setup.
import st2tests.config as tests_config
tests_config.parse_args()
from tests.unit import base
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import runnersregistrar
from st2common.constants import action as ac_const
from st2common.models.db import liveaction as lv_db_models
from st2common.persistence import execution as ex_db_access
from st2common.persistence import liveaction as lv_db_access
from st2common.persistence import workflow as wf_db_access
from st2common.runners import base as runners
from st2common.services import action as ac_svc
from st2common.services import workflows as wf_svc
from st2common.transport import liveaction as lv_ac_xport
from st2common.transport import workflow as wf_ex_xport
from st2common.transport import publishers
from st2tests.mocks import liveaction as mock_lv_ac_xport
from st2tests.mocks import workflow as mock_wf_ex_xport
TEST_PACK = 'orquesta_tests'
TEST_PACK_PATH = st2tests.fixturesloader.get_fixtures_packs_base_path() + '/' + TEST_PACK
PACKS = [
TEST_PACK_PATH,
st2tests.fixturesloader.get_fixtures_packs_base_path() + '/core'
]
@mock.patch.object(
publishers.CUDPublisher,
'publish_update',
mock.MagicMock(return_value=None))
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
'publish_create',
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_create))
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
'publish_state',
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_state))
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
'publish_create',
mock.MagicMock(side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_create))
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
'publish_state',
mock.MagicMock(side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_state))
class OrquestaRunnerCancelTest(st2tests.DbTestCase):
@classmethod
def setUpClass(cls):
super(OrquestaRunnerCancelTest, cls).setUpClass()
# Register runners.
runnersregistrar.register_runners()
# Register test pack(s).
actions_registrar = actionsregistrar.ActionsRegistrar(
use_pack_cache=False,
fail_on_failure=True
)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
@classmethod
def get_runner_class(cls, runner_name):
return runners.get_runner(runner_name, runner_name).__class__
@mock.patch.object(
ac_svc, 'is_children_active',
mock.MagicMock(return_value=True))
def test_cancel(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result)
requester = cfg.CONF.system_user.user
lv_ac_db, ac_ex_db = ac_svc.request_cancellation(lv_ac_db, requester)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING)
def test_cancel_workflow_cascade_down_to_subworkflow(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'subworkflow.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result)
# Identify the records for the subworkflow.
wf_ex_dbs = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))
self.assertEqual(len(wf_ex_dbs), 1)
tk_ex_dbs = wf_db_access.TaskExecution.query(workflow_execution=str(wf_ex_dbs[0].id))
self.assertEqual(len(tk_ex_dbs), 1)
tk_ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(tk_ex_dbs[0].id))
self.assertEqual(len(tk_ac_ex_dbs), 1)
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk_ac_ex_dbs[0].liveaction['id'])
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
# Cancel the main workflow.
requester = cfg.CONF.system_user.user
lv_ac_db, ac_ex_db = ac_svc.request_cancellation(lv_ac_db, requester)
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING)
# Assert the subworkflow is canceled.
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(tk_lv_ac_db.id))
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
# Assert the main workflow is canceled.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
def test_cancel_subworkflow_cascade_up_to_workflow(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'subworkflow.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result)
# Identify the records for the subworkflow.
wf_ex_dbs = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))
self.assertEqual(len(wf_ex_dbs), 1)
tk_ex_dbs = wf_db_access.TaskExecution.query(workflow_execution=str(wf_ex_dbs[0].id))
self.assertEqual(len(tk_ex_dbs), 1)
tk_ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(tk_ex_dbs[0].id))
self.assertEqual(len(tk_ac_ex_dbs), 1)
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk_ac_ex_dbs[0].liveaction['id'])
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
# Cancel the subworkflow.
requester = cfg.CONF.system_user.user
tk_lv_ac_db, tk_ac_ex_db = ac_svc.request_cancellation(tk_lv_ac_db, requester)
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING)
# Assert the subworkflow is canceled.
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(tk_lv_ac_db.id))
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
# Manually handle action execution completion for the task.
tk_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(tk_ac_ex_db.id))
self.assertEqual(tk_ac_ex_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
wf_svc.handle_action_execution_completion(tk_ac_ex_db)
# Assert the main workflow is canceled.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
def test_cancel_subworkflow_cascade_up_to_workflow_with_other_subworkflows(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'subworkflows.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result)
# Identify the records for the subworkflow.
wf_ex_dbs = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))
self.assertEqual(len(wf_ex_dbs), 1)
tk_ex_dbs = wf_db_access.TaskExecution.query(workflow_execution=str(wf_ex_dbs[0].id))
self.assertEqual(len(tk_ex_dbs), 2)
tk1_ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(tk_ex_dbs[0].id))
self.assertEqual(len(tk1_ac_ex_dbs), 1)
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_dbs[0].liveaction['id'])
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
tk2_ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(tk_ex_dbs[1].id))
self.assertEqual(len(tk2_ac_ex_dbs), 1)
tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk2_ac_ex_dbs[0].liveaction['id'])
self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
# Cancel the subworkflow which should cascade up to the root.
requester = cfg.CONF.system_user.user
tk1_lv_ac_db, tk1_ac_ex_db = ac_svc.request_cancellation(tk1_lv_ac_db, requester)
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING)
# Assert the main workflow is canceling.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING)
# Assert both subworkflows are canceled.
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(tk1_lv_ac_db.id))
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(tk2_lv_ac_db.id))
self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
# Manually handle action execution completion for one of the tasks.
tk1_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(tk1_ac_ex_db.id))
self.assertEqual(tk1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
wf_svc.handle_action_execution_completion(tk1_ac_ex_db)
# Manually handle action execution completion for the other task.
tk2_ac_ex_db = tk2_ac_ex_dbs[0]
tk2_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(tk2_ac_ex_db.id))
self.assertEqual(tk2_ac_ex_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
wf_svc.handle_action_execution_completion(tk2_ac_ex_db)
# Assert the main workflow is canceling.
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
| 46.740586 | 95 | 0.760183 |
from __future__ import absolute_import
import mock
import st2tests
from oslo_config import cfg
import st2tests.config as tests_config
tests_config.parse_args()
from tests.unit import base
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import runnersregistrar
from st2common.constants import action as ac_const
from st2common.models.db import liveaction as lv_db_models
from st2common.persistence import execution as ex_db_access
from st2common.persistence import liveaction as lv_db_access
from st2common.persistence import workflow as wf_db_access
from st2common.runners import base as runners
from st2common.services import action as ac_svc
from st2common.services import workflows as wf_svc
from st2common.transport import liveaction as lv_ac_xport
from st2common.transport import workflow as wf_ex_xport
from st2common.transport import publishers
from st2tests.mocks import liveaction as mock_lv_ac_xport
from st2tests.mocks import workflow as mock_wf_ex_xport
TEST_PACK = 'orquesta_tests'
TEST_PACK_PATH = st2tests.fixturesloader.get_fixtures_packs_base_path() + '/' + TEST_PACK
PACKS = [
TEST_PACK_PATH,
st2tests.fixturesloader.get_fixtures_packs_base_path() + '/core'
]
@mock.patch.object(
publishers.CUDPublisher,
'publish_update',
mock.MagicMock(return_value=None))
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
'publish_create',
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_create))
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
'publish_state',
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_state))
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
'publish_create',
mock.MagicMock(side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_create))
@mock.patch.object(
wf_ex_xport.WorkflowExecutionPublisher,
'publish_state',
mock.MagicMock(side_effect=mock_wf_ex_xport.MockWorkflowExecutionPublisher.publish_state))
class OrquestaRunnerCancelTest(st2tests.DbTestCase):
@classmethod
def setUpClass(cls):
super(OrquestaRunnerCancelTest, cls).setUpClass()
runnersregistrar.register_runners()
actions_registrar = actionsregistrar.ActionsRegistrar(
use_pack_cache=False,
fail_on_failure=True
)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
@classmethod
def get_runner_class(cls, runner_name):
return runners.get_runner(runner_name, runner_name).__class__
@mock.patch.object(
ac_svc, 'is_children_active',
mock.MagicMock(return_value=True))
def test_cancel(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'sequential.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result)
requester = cfg.CONF.system_user.user
lv_ac_db, ac_ex_db = ac_svc.request_cancellation(lv_ac_db, requester)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING)
def test_cancel_workflow_cascade_down_to_subworkflow(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'subworkflow.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result)
wf_ex_dbs = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))
self.assertEqual(len(wf_ex_dbs), 1)
tk_ex_dbs = wf_db_access.TaskExecution.query(workflow_execution=str(wf_ex_dbs[0].id))
self.assertEqual(len(tk_ex_dbs), 1)
tk_ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(tk_ex_dbs[0].id))
self.assertEqual(len(tk_ac_ex_dbs), 1)
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk_ac_ex_dbs[0].liveaction['id'])
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
requester = cfg.CONF.system_user.user
lv_ac_db, ac_ex_db = ac_svc.request_cancellation(lv_ac_db, requester)
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING)
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(tk_lv_ac_db.id))
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
def test_cancel_subworkflow_cascade_up_to_workflow(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'subworkflow.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result)
wf_ex_dbs = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))
self.assertEqual(len(wf_ex_dbs), 1)
tk_ex_dbs = wf_db_access.TaskExecution.query(workflow_execution=str(wf_ex_dbs[0].id))
self.assertEqual(len(tk_ex_dbs), 1)
tk_ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(tk_ex_dbs[0].id))
self.assertEqual(len(tk_ac_ex_dbs), 1)
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk_ac_ex_dbs[0].liveaction['id'])
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
requester = cfg.CONF.system_user.user
tk_lv_ac_db, tk_ac_ex_db = ac_svc.request_cancellation(tk_lv_ac_db, requester)
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING)
tk_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(tk_lv_ac_db.id))
self.assertEqual(tk_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
tk_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(tk_ac_ex_db.id))
self.assertEqual(tk_ac_ex_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
wf_svc.handle_action_execution_completion(tk_ac_ex_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
def test_cancel_subworkflow_cascade_up_to_workflow_with_other_subworkflows(self):
wf_meta = base.get_wf_fixture_meta_data(TEST_PACK_PATH, 'subworkflows.yaml')
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta['name'])
lv_ac_db, ac_ex_db = ac_svc.request(lv_ac_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING, lv_ac_db.result)
wf_ex_dbs = wf_db_access.WorkflowExecution.query(action_execution=str(ac_ex_db.id))
self.assertEqual(len(wf_ex_dbs), 1)
tk_ex_dbs = wf_db_access.TaskExecution.query(workflow_execution=str(wf_ex_dbs[0].id))
self.assertEqual(len(tk_ex_dbs), 2)
tk1_ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(tk_ex_dbs[0].id))
self.assertEqual(len(tk1_ac_ex_dbs), 1)
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk1_ac_ex_dbs[0].liveaction['id'])
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
tk2_ac_ex_dbs = ex_db_access.ActionExecution.query(task_execution=str(tk_ex_dbs[1].id))
self.assertEqual(len(tk2_ac_ex_dbs), 1)
tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(tk2_ac_ex_dbs[0].liveaction['id'])
self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_RUNNING)
requester = cfg.CONF.system_user.user
tk1_lv_ac_db, tk1_ac_ex_db = ac_svc.request_cancellation(tk1_lv_ac_db, requester)
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELING)
tk1_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(tk1_lv_ac_db.id))
self.assertEqual(tk1_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
tk2_lv_ac_db = lv_db_access.LiveAction.get_by_id(str(tk2_lv_ac_db.id))
self.assertEqual(tk2_lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
tk1_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(tk1_ac_ex_db.id))
self.assertEqual(tk1_ac_ex_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
wf_svc.handle_action_execution_completion(tk1_ac_ex_db)
tk2_ac_ex_db = tk2_ac_ex_dbs[0]
tk2_ac_ex_db = ex_db_access.ActionExecution.get_by_id(str(tk2_ac_ex_db.id))
self.assertEqual(tk2_ac_ex_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
wf_svc.handle_action_execution_completion(tk2_ac_ex_db)
lv_ac_db = lv_db_access.LiveAction.get_by_id(str(lv_ac_db.id))
self.assertEqual(lv_ac_db.status, ac_const.LIVEACTION_STATUS_CANCELED)
| true | true |
1c2c710854e2a2abe4f872614d3e8b926df1f78f | 4,933 | py | Python | pandapower/test/opf/test_dcline.py | Zamwell/pandapower | ce51946342109e969b87b60c8883d7eec02d3060 | [
"BSD-3-Clause"
] | 1 | 2019-06-16T05:06:03.000Z | 2019-06-16T05:06:03.000Z | pandapower/test/opf/test_dcline.py | Zamwell/pandapower | ce51946342109e969b87b60c8883d7eec02d3060 | [
"BSD-3-Clause"
] | null | null | null | pandapower/test/opf/test_dcline.py | Zamwell/pandapower | ce51946342109e969b87b60c8883d7eec02d3060 | [
"BSD-3-Clause"
] | 1 | 2022-02-07T14:11:03.000Z | 2022-02-07T14:11:03.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pytest
from numpy import array, allclose, isclose
import pandapower as pp
from pandapower.test.consistency_checks import consistency_checks
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
@pytest.fixture
def dcline_net():
net = pp.create_empty_network()
b5 = pp.create_bus(net, 380)
b3 = pp.create_bus(net, 380)
b2 = pp.create_bus(net, 380)
b4 = pp.create_bus(net, 380)
b1 = pp.create_bus(net, 380)
pp.create_ext_grid(net, b1, 1.02, min_p_mw=0., max_p_mw=1e9)
pp.create_line(net, b1, b2, 30, "490-AL1/64-ST1A 380.0")
pp.create_dcline(net, name="dc line", from_bus=b2, to_bus=b3, p_mw=200, loss_percent=1.0,
loss_mw=0.5, vm_from_pu=1.01, vm_to_pu=1.012, max_p_mw=1000,
in_service=True, index=4)
pp.create_line(net, b3, b4, 20, "490-AL1/64-ST1A 380.0")
pp.create_load(net, bus=b4, p_mw=800, controllable=False)
pp.create_line(net, b4, b5, 20, "490-AL1/64-ST1A 380.0")
pp.create_ext_grid(net, b5, 1.02, min_p_mw=0., max_p_mw=1e9)
return net
def test_dispatch1(dcline_net):
net = dcline_net
pp.create_pwl_cost(net, 0, "ext_grid", [[-1e12, 1e9, 100]])
pp.create_pwl_cost(net, 1, "ext_grid", [[-1e12, 1e9, 80]])
net.bus["max_vm_pu"] = 2
net.bus["min_vm_pu"] = 0 # needs to be constrained more than default
net.line["max_loading_percent"] = 1000 # does not converge if unconstrained
pp.runopp(net)
consistency_checks(net)
rel_loss_expect = (net.res_dcline.pl_mw - net.dcline.loss_mw) / \
(net.res_dcline.p_from_mw - net.res_dcline.pl_mw) * 100
assert allclose(rel_loss_expect.values, net.dcline.loss_percent.values, rtol=1e-2)
assert allclose(net.res_ext_grid.p_mw.values, [0.5, 805], atol=0.1)
assert allclose(net.res_ext_grid.q_mvar.values, [-7.78755773243, 0.62830727889], atol=1e-3)
assert allclose(net.res_dcline.p_from_mw.values, [0.500754071], atol=1e-3)
assert allclose(net.res_dcline.q_from_mvar.values, [7.78745600524])
assert allclose(net.res_dcline.p_to_mw.values, array([-0.0746605e-3]))
assert allclose(net.res_dcline.q_to_mvar.values, array([-.62712636707]))
def test_dcline_dispatch2(dcline_net):
net = dcline_net
pp.create_poly_cost(net, 0, "ext_grid", cp1_eur_per_mw=80)
pp.create_poly_cost(net, 1, "ext_grid", cp1_eur_per_mw=100)
# pp.create_poly_cost(net, 0, "ext_grid", array([.08, 0]))
# pp.create_poly_cost(net, 1, "ext_grid", array([.1, 0]))
net.bus["max_vm_pu"] = 2
net.bus["min_vm_pu"] = 0# needs to be constrained more than default
net.line["max_loading_percent"] = 1000 # does not converge if unconstrained
pp.runopp(net)
consistency_checks(net, rtol=1e-3)
consistency_checks(net, rtol=1e-3)
rel_loss_expect = (net.res_dcline.pl_mw - net.dcline.loss_mw) / \
(net.res_dcline.p_from_mw - net.res_dcline.pl_mw) * 100
assert allclose(rel_loss_expect.values, net.dcline.loss_percent.values)
p_eg_expect = array([8.21525358e+02, 5.43498903e-05])
q_eg_expect = array([-7787.55852923e-3, -21048.59213887e-3])
assert allclose(net.res_ext_grid.p_mw.values, p_eg_expect)
assert allclose(net.res_ext_grid.q_mvar.values, q_eg_expect)
p_from_expect = array([813573.88366999e-3])
q_from_expect = array([-26446.0473644e-3])
assert allclose(net.res_dcline.p_from_mw.values, p_from_expect)
assert allclose(net.res_dcline.q_from_mvar.values, q_from_expect)
p_to_expect = array([-805023.64719801e-3])
q_to_expect = array([-21736.31196315e-3])
assert allclose(net.res_dcline.p_to_mw.values, p_to_expect)
assert allclose(net.res_dcline.q_to_mvar.values, q_to_expect)
def test_dcline_dispatch3(dcline_net):
net = dcline_net
pp.create_poly_cost(net, 4, "dcline", cp1_eur_per_mw=1.5)
net.bus["max_vm_pu"] = 1.03 # needs to be constrained more than default
net.line["max_loading_percent"] = 1000 # does not converge if unconstrained
pp.runopp(net)
consistency_checks(net, rtol=1e-1)
# dc line is not dispatched because of the assigned costs
assert isclose(net.res_dcline.at[4, "p_to_mw"], 0, atol=1e-2)
assert all(net.res_ext_grid.p_mw.values > 0)
# costs for ext_grid at the end of the DC line get double the costs of DC line transfer
pp.create_poly_cost(net, 1, "ext_grid", cp1_eur_per_mw=2000)
pp.runopp(net)
#now the total power is supplied through the DC line
assert (net.res_dcline.at[4, "p_to_mw"]) < 1e3
assert net.res_ext_grid.p_mw.at[1] < 1
assert isclose(net.res_cost, net.res_dcline.at[4, "p_from_mw"]*1.5)
if __name__ == "__main__":
pytest.main([__file__])
| 38.539063 | 96 | 0.700993 |
import pytest
from numpy import array, allclose, isclose
import pandapower as pp
from pandapower.test.consistency_checks import consistency_checks
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
@pytest.fixture
def dcline_net():
net = pp.create_empty_network()
b5 = pp.create_bus(net, 380)
b3 = pp.create_bus(net, 380)
b2 = pp.create_bus(net, 380)
b4 = pp.create_bus(net, 380)
b1 = pp.create_bus(net, 380)
pp.create_ext_grid(net, b1, 1.02, min_p_mw=0., max_p_mw=1e9)
pp.create_line(net, b1, b2, 30, "490-AL1/64-ST1A 380.0")
pp.create_dcline(net, name="dc line", from_bus=b2, to_bus=b3, p_mw=200, loss_percent=1.0,
loss_mw=0.5, vm_from_pu=1.01, vm_to_pu=1.012, max_p_mw=1000,
in_service=True, index=4)
pp.create_line(net, b3, b4, 20, "490-AL1/64-ST1A 380.0")
pp.create_load(net, bus=b4, p_mw=800, controllable=False)
pp.create_line(net, b4, b5, 20, "490-AL1/64-ST1A 380.0")
pp.create_ext_grid(net, b5, 1.02, min_p_mw=0., max_p_mw=1e9)
return net
def test_dispatch1(dcline_net):
net = dcline_net
pp.create_pwl_cost(net, 0, "ext_grid", [[-1e12, 1e9, 100]])
pp.create_pwl_cost(net, 1, "ext_grid", [[-1e12, 1e9, 80]])
net.bus["max_vm_pu"] = 2
net.bus["min_vm_pu"] = 0
net.line["max_loading_percent"] = 1000
pp.runopp(net)
consistency_checks(net)
rel_loss_expect = (net.res_dcline.pl_mw - net.dcline.loss_mw) / \
(net.res_dcline.p_from_mw - net.res_dcline.pl_mw) * 100
assert allclose(rel_loss_expect.values, net.dcline.loss_percent.values, rtol=1e-2)
assert allclose(net.res_ext_grid.p_mw.values, [0.5, 805], atol=0.1)
assert allclose(net.res_ext_grid.q_mvar.values, [-7.78755773243, 0.62830727889], atol=1e-3)
assert allclose(net.res_dcline.p_from_mw.values, [0.500754071], atol=1e-3)
assert allclose(net.res_dcline.q_from_mvar.values, [7.78745600524])
assert allclose(net.res_dcline.p_to_mw.values, array([-0.0746605e-3]))
assert allclose(net.res_dcline.q_to_mvar.values, array([-.62712636707]))
def test_dcline_dispatch2(dcline_net):
net = dcline_net
pp.create_poly_cost(net, 0, "ext_grid", cp1_eur_per_mw=80)
pp.create_poly_cost(net, 1, "ext_grid", cp1_eur_per_mw=100)
net.bus["max_vm_pu"] = 2
net.bus["min_vm_pu"] = 0
net.line["max_loading_percent"] = 1000
pp.runopp(net)
consistency_checks(net, rtol=1e-3)
consistency_checks(net, rtol=1e-3)
rel_loss_expect = (net.res_dcline.pl_mw - net.dcline.loss_mw) / \
(net.res_dcline.p_from_mw - net.res_dcline.pl_mw) * 100
assert allclose(rel_loss_expect.values, net.dcline.loss_percent.values)
p_eg_expect = array([8.21525358e+02, 5.43498903e-05])
q_eg_expect = array([-7787.55852923e-3, -21048.59213887e-3])
assert allclose(net.res_ext_grid.p_mw.values, p_eg_expect)
assert allclose(net.res_ext_grid.q_mvar.values, q_eg_expect)
p_from_expect = array([813573.88366999e-3])
q_from_expect = array([-26446.0473644e-3])
assert allclose(net.res_dcline.p_from_mw.values, p_from_expect)
assert allclose(net.res_dcline.q_from_mvar.values, q_from_expect)
p_to_expect = array([-805023.64719801e-3])
q_to_expect = array([-21736.31196315e-3])
assert allclose(net.res_dcline.p_to_mw.values, p_to_expect)
assert allclose(net.res_dcline.q_to_mvar.values, q_to_expect)
def test_dcline_dispatch3(dcline_net):
net = dcline_net
pp.create_poly_cost(net, 4, "dcline", cp1_eur_per_mw=1.5)
net.bus["max_vm_pu"] = 1.03
net.line["max_loading_percent"] = 1000
pp.runopp(net)
consistency_checks(net, rtol=1e-1)
assert isclose(net.res_dcline.at[4, "p_to_mw"], 0, atol=1e-2)
assert all(net.res_ext_grid.p_mw.values > 0)
pp.create_poly_cost(net, 1, "ext_grid", cp1_eur_per_mw=2000)
pp.runopp(net)
assert (net.res_dcline.at[4, "p_to_mw"]) < 1e3
assert net.res_ext_grid.p_mw.at[1] < 1
assert isclose(net.res_cost, net.res_dcline.at[4, "p_from_mw"]*1.5)
if __name__ == "__main__":
pytest.main([__file__])
| true | true |
1c2c711f45577a76148ae8af191d115d86196cec | 8,486 | py | Python | client/filesystem.py | thatch/pyre-check | 31278b71081ec3a3636f0d94da91dc0e4273ece9 | [
"MIT"
] | null | null | null | client/filesystem.py | thatch/pyre-check | 31278b71081ec3a3636f0d94da91dc0e4273ece9 | [
"MIT"
] | null | null | null | client/filesystem.py | thatch/pyre-check | 31278b71081ec3a3636f0d94da91dc0e4273ece9 | [
"MIT"
] | null | null | null | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import errno
import fcntl
import functools
import logging
import os
import shutil
import subprocess
from contextlib import contextmanager
from typing import ContextManager, Dict, Generator, Iterable, List, Optional, Set
from .exceptions import EnvironmentException
LOG: logging.Logger = logging.getLogger(__name__)
class BuckBuilder:
def build(self, targets: Iterable[str]) -> Iterable[str]:
"""
Build the given targets, and return a list of output directories
containing the target output.
"""
raise NotImplementedError
def translate_path(root: str, path: str) -> str:
if os.path.isabs(path):
return path
translated = os.path.join(root, path)
if os.path.exists(translated):
return os.path.realpath(translated)
return path
def translate_paths(paths: Set[str], original_directory: str) -> Set[str]:
current_directory = os.getcwd()
if not original_directory.startswith(current_directory):
return paths
translation = os.path.relpath(original_directory, current_directory)
if not translation:
return paths
return {translate_path(translation, path) for path in paths}
def find_root(original_directory: str, target_file: str) -> Optional[str]:
current_directory = os.path.abspath(original_directory)
while current_directory != "/":
absolute = os.path.join(current_directory, target_file)
if os.path.isfile(absolute):
return current_directory
current_directory = os.path.dirname(current_directory)
return None
def exists(path: str) -> str:
if not os.path.isfile(path):
raise ValueError("%s is not a valid file" % path)
return path
def is_parent(parent: str, child: str) -> bool:
return child.startswith(parent.rstrip(os.sep) + os.sep)
def find_paths_with_extensions(root: str, extensions: Iterable[str]) -> List[str]:
root = os.path.abspath(root) # Return absolute paths.
extension_filter = []
for extension in extensions:
if len(extension_filter) > 0:
extension_filter.append("-or")
extension_filter.extend(["-name", "*.{}".format(extension)])
output = (
subprocess.check_output(
[
"find",
root,
# All files ending with the given extensions ...
"(",
*extension_filter,
")",
# ... and that are either regular files ...
"(",
"-type",
"f",
"-or",
# ... or symlinks.
"-type",
"l",
")",
# Print all such files.
"-print",
],
stderr=subprocess.DEVNULL,
)
.decode("utf-8")
.strip()
)
return output.split("\n") if output else []
def find_python_paths(root: str) -> List[str]:
try:
return find_paths_with_extensions(root, ["py", "pyi"])
except subprocess.CalledProcessError:
raise EnvironmentException(
"Pyre was unable to locate an analysis directory. "
"Ensure that your project is built and re-run pyre."
)
def is_empty(path: str) -> bool:
try:
return os.stat(path).st_size == 0
except FileNotFoundError:
return False
def remove_if_exists(path: str) -> None:
try:
os.remove(path)
except OSError:
pass # Not a file.
try:
shutil.rmtree(path)
except OSError:
pass # Not a directory.
def _compute_symbolic_link_mapping(
directory: str, extensions: Iterable[str]
) -> Dict[str, str]:
"""
Given a shared analysis directory, produce a mapping from actual source files
to files contained within this directory. Only includes files which have
one of the provided extensions.
Watchman watches actual source files, so when a change is detected to a
file, this mapping can be used to identify what file changed from Pyre's
perspective.
"""
symbolic_links = {}
try:
for symbolic_link in find_paths_with_extensions(directory, extensions):
symbolic_links[os.path.realpath(symbolic_link)] = symbolic_link
except subprocess.CalledProcessError as error:
LOG.warning(
"Exception encountered trying to find source files "
"in the analysis directory: `%s`",
error,
)
LOG.warning("Starting with an empty set of tracked files.")
return symbolic_links
def _delete_symbolic_link(link_path: str) -> None:
os.unlink(link_path)
def add_symbolic_link(link_path: str, actual_path: str) -> None:
directory = os.path.dirname(link_path)
try:
os.makedirs(directory)
except OSError:
pass
try:
os.symlink(actual_path, link_path)
except OSError as error:
if error.errno == errno.EEXIST:
os.unlink(link_path)
os.symlink(actual_path, link_path)
else:
LOG.error(str(error))
@contextmanager
def acquire_lock(path: str, blocking: bool) -> Generator[Optional[int], None, None]:
"""Raises an OSError if the lock can't be acquired"""
LOG.debug("Trying to acquire lock on file %s", path)
try:
with open(path, "w+") as lockfile:
if not blocking:
lock_command = fcntl.LOCK_EX | fcntl.LOCK_NB
else:
lock_command = fcntl.LOCK_EX
fcntl.lockf(lockfile.fileno(), lock_command)
yield lockfile.fileno()
fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN)
except FileNotFoundError:
LOG.debug(f"Unable to acquire lock because lock file {path} was not found")
yield
@contextmanager
def do_nothing() -> Generator[None, None, None]:
yield
def acquire_lock_if_needed(
lock_path: str, blocking: bool, needed: bool
) -> ContextManager[Optional[int]]:
if needed:
return acquire_lock(lock_path, blocking)
else:
return do_nothing()
class Filesystem:
def list(
self, root: str, patterns: List[str], exclude: Optional[List[str]] = None
) -> List[str]:
"""
Return the list of files that match any of the patterns within root.
If exclude is provided, files that match an exclude pattern are omitted.
Note: The `find` command does not understand globs properly.
e.g. 'a/*.py' will match 'a/b/c.py'
For this reason, avoid calling this method with glob patterns.
"""
command = ["find", "."]
command += self._match_any(patterns)
if exclude:
command += ["-and", "!"]
command += self._match_any(exclude)
return (
subprocess.run(command, stdout=subprocess.PIPE, cwd=root)
.stdout.decode("utf-8")
.split()
)
def _match_any(self, patterns: List[str]) -> List[str]:
expression = []
for pattern in patterns:
if expression:
expression.append("-or")
expression.extend(["-path", "./{}".format(pattern)])
return ["(", *expression, ")"]
class MercurialBackedFilesystem(Filesystem):
def list(
self, root: str, patterns: List[str], exclude: Optional[List[str]] = None
) -> List[str]:
try:
command = ["hg", "files"]
for pattern in patterns:
command += ["--include", pattern]
if exclude:
for pattern in exclude:
command += ["--exclude", pattern]
return (
subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, cwd=root
)
.stdout.decode("utf-8")
.split()
)
except FileNotFoundError:
raise EnvironmentException("hg executable not found.")
@functools.lru_cache(1)
def get_filesystem() -> Filesystem:
try:
subprocess.check_output(["hg", "status"], stderr=subprocess.DEVNULL)
return MercurialBackedFilesystem()
except (subprocess.CalledProcessError, FileNotFoundError):
return Filesystem()
| 30.415771 | 88 | 0.604054 |
import errno
import fcntl
import functools
import logging
import os
import shutil
import subprocess
from contextlib import contextmanager
from typing import ContextManager, Dict, Generator, Iterable, List, Optional, Set
from .exceptions import EnvironmentException
LOG: logging.Logger = logging.getLogger(__name__)
class BuckBuilder:
def build(self, targets: Iterable[str]) -> Iterable[str]:
raise NotImplementedError
def translate_path(root: str, path: str) -> str:
if os.path.isabs(path):
return path
translated = os.path.join(root, path)
if os.path.exists(translated):
return os.path.realpath(translated)
return path
def translate_paths(paths: Set[str], original_directory: str) -> Set[str]:
current_directory = os.getcwd()
if not original_directory.startswith(current_directory):
return paths
translation = os.path.relpath(original_directory, current_directory)
if not translation:
return paths
return {translate_path(translation, path) for path in paths}
def find_root(original_directory: str, target_file: str) -> Optional[str]:
current_directory = os.path.abspath(original_directory)
while current_directory != "/":
absolute = os.path.join(current_directory, target_file)
if os.path.isfile(absolute):
return current_directory
current_directory = os.path.dirname(current_directory)
return None
def exists(path: str) -> str:
if not os.path.isfile(path):
raise ValueError("%s is not a valid file" % path)
return path
def is_parent(parent: str, child: str) -> bool:
return child.startswith(parent.rstrip(os.sep) + os.sep)
def find_paths_with_extensions(root: str, extensions: Iterable[str]) -> List[str]:
root = os.path.abspath(root)
extension_filter = []
for extension in extensions:
if len(extension_filter) > 0:
extension_filter.append("-or")
extension_filter.extend(["-name", "*.{}".format(extension)])
output = (
subprocess.check_output(
[
"find",
root,
"(",
*extension_filter,
")",
"(",
"-type",
"f",
"-or",
"-type",
"l",
")",
"-print",
],
stderr=subprocess.DEVNULL,
)
.decode("utf-8")
.strip()
)
return output.split("\n") if output else []
def find_python_paths(root: str) -> List[str]:
try:
return find_paths_with_extensions(root, ["py", "pyi"])
except subprocess.CalledProcessError:
raise EnvironmentException(
"Pyre was unable to locate an analysis directory. "
"Ensure that your project is built and re-run pyre."
)
def is_empty(path: str) -> bool:
try:
return os.stat(path).st_size == 0
except FileNotFoundError:
return False
def remove_if_exists(path: str) -> None:
try:
os.remove(path)
except OSError:
pass
try:
shutil.rmtree(path)
except OSError:
pass
def _compute_symbolic_link_mapping(
directory: str, extensions: Iterable[str]
) -> Dict[str, str]:
symbolic_links = {}
try:
for symbolic_link in find_paths_with_extensions(directory, extensions):
symbolic_links[os.path.realpath(symbolic_link)] = symbolic_link
except subprocess.CalledProcessError as error:
LOG.warning(
"Exception encountered trying to find source files "
"in the analysis directory: `%s`",
error,
)
LOG.warning("Starting with an empty set of tracked files.")
return symbolic_links
def _delete_symbolic_link(link_path: str) -> None:
os.unlink(link_path)
def add_symbolic_link(link_path: str, actual_path: str) -> None:
directory = os.path.dirname(link_path)
try:
os.makedirs(directory)
except OSError:
pass
try:
os.symlink(actual_path, link_path)
except OSError as error:
if error.errno == errno.EEXIST:
os.unlink(link_path)
os.symlink(actual_path, link_path)
else:
LOG.error(str(error))
@contextmanager
def acquire_lock(path: str, blocking: bool) -> Generator[Optional[int], None, None]:
LOG.debug("Trying to acquire lock on file %s", path)
try:
with open(path, "w+") as lockfile:
if not blocking:
lock_command = fcntl.LOCK_EX | fcntl.LOCK_NB
else:
lock_command = fcntl.LOCK_EX
fcntl.lockf(lockfile.fileno(), lock_command)
yield lockfile.fileno()
fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN)
except FileNotFoundError:
LOG.debug(f"Unable to acquire lock because lock file {path} was not found")
yield
@contextmanager
def do_nothing() -> Generator[None, None, None]:
yield
def acquire_lock_if_needed(
lock_path: str, blocking: bool, needed: bool
) -> ContextManager[Optional[int]]:
if needed:
return acquire_lock(lock_path, blocking)
else:
return do_nothing()
class Filesystem:
def list(
self, root: str, patterns: List[str], exclude: Optional[List[str]] = None
) -> List[str]:
command = ["find", "."]
command += self._match_any(patterns)
if exclude:
command += ["-and", "!"]
command += self._match_any(exclude)
return (
subprocess.run(command, stdout=subprocess.PIPE, cwd=root)
.stdout.decode("utf-8")
.split()
)
def _match_any(self, patterns: List[str]) -> List[str]:
expression = []
for pattern in patterns:
if expression:
expression.append("-or")
expression.extend(["-path", "./{}".format(pattern)])
return ["(", *expression, ")"]
class MercurialBackedFilesystem(Filesystem):
def list(
self, root: str, patterns: List[str], exclude: Optional[List[str]] = None
) -> List[str]:
try:
command = ["hg", "files"]
for pattern in patterns:
command += ["--include", pattern]
if exclude:
for pattern in exclude:
command += ["--exclude", pattern]
return (
subprocess.run(
command, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, cwd=root
)
.stdout.decode("utf-8")
.split()
)
except FileNotFoundError:
raise EnvironmentException("hg executable not found.")
@functools.lru_cache(1)
def get_filesystem() -> Filesystem:
try:
subprocess.check_output(["hg", "status"], stderr=subprocess.DEVNULL)
return MercurialBackedFilesystem()
except (subprocess.CalledProcessError, FileNotFoundError):
return Filesystem()
| true | true |
1c2c733fc10a2c7c9bd106f46e4f2937d96ffece | 4,121 | py | Python | test/jpypetest/test_caller_sensitive.py | pitmanst/jpype | 7256261e435b5c9309941c668258bebd1bcdff2d | [
"Apache-2.0"
] | 531 | 2018-07-19T03:30:04.000Z | 2022-03-29T16:52:44.000Z | test/jpypetest/test_caller_sensitive.py | pitmanst/jpype | 7256261e435b5c9309941c668258bebd1bcdff2d | [
"Apache-2.0"
] | 715 | 2018-07-18T09:21:01.000Z | 2022-03-24T17:45:49.000Z | test/jpypetest/test_caller_sensitive.py | pitmanst/jpype | 7256261e435b5c9309941c668258bebd1bcdff2d | [
"Apache-2.0"
] | 80 | 2018-07-18T13:10:55.000Z | 2022-03-31T19:47:16.000Z | # *****************************************************************************
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See NOTICE file for details.
#
# *****************************************************************************
import jpype
import common
class JCallerSensitiveCase(common.JPypeTestCase):
""" Test for caller sensitive methods.
Java uses a special pattern to deal with the security manager. It
uses the stack to find the callers class and then decides if the
call is permitted. But when we call from JNI there is no class.
Thus, we need a special caller pattern which proxies to Java and
then calls the method.
This alternative method has to be tested against all of the
different patterns (static, member), returning (void, primitive, object),
called with (nothing, object, primitive, many, varargs)
Unfortunately, the actual problematic method in Java is private,
so we can't get to it directly. Thus will will perform indirect tests.
For now we do not support caller sensitive constructors.
"""
def setUp(self):
common.JPypeTestCase.setUp(self)
if not jpype.getJVMVersion() > (1, 8, 0):
raise common.unittest.SkipTest
self.Class = jpype.JClass("jpype.method.Caller")
self.obj = self.Class()
def testCallStatic(self):
self.assertIsInstance(self.Class.callObjectStatic(), self.Class)
def testCallStaticAsMember(self):
self.assertIsInstance(self.obj.callObjectStatic(), self.Class)
def testCallMember(self):
self.assertIsInstance(self.obj.callObjectMember(), self.Class)
def testCallMemberFromClass(self):
self.assertIsInstance(
self.Class.callObjectMember(self.obj), self.Class)
def testCallVoidStatic(self):
self.assertEqual(self.Class.callVoidStatic(), None)
def testCallVoidStaticAsMember(self):
self.assertEqual(self.obj.callVoidStatic(), None)
def testCallVoidMemberFromClass(self):
self.assertEqual(self.Class.callVoidMember(self.obj), None)
def testCallIntegerStatic(self):
self.assertEqual(self.Class.callIntegerStatic(), 123)
def testCallIntegerMember(self):
self.assertEqual(self.obj.callIntegerMember(), 123)
def testCallIntegerStaticAsMember(self):
self.assertEqual(self.obj.callIntegerStatic(), 123)
def testCallIntegerMemberFromClass(self):
self.assertEqual(self.Class.callIntegerMember(self.obj), 123)
def testArgs(self):
self.assertEqual(self.obj.callArgs(1, 2), 2)
def testArgsFromClass(self):
self.assertEqual(self.Class.callArgs(self.obj, 1, 2), 2)
def testPrimitive(self):
self.assertEqual(self.obj.callArg1(123), 123)
def testPrimitiveFromClass(self):
self.assertEqual(self.Class.callArg1(self.obj, 125), 125)
def testVarArgs(self):
self.assertEqual(tuple(self.obj.callVarArgs(1, 2, 3)), (2, 3))
def testVarArgsFromClass(self):
self.assertEqual(
tuple(self.Class.callVarArgs(self.obj, 1, 2, 3)), (2, 3))
def testDeclaredMethod(self):
self.assertIsInstance(jpype.java.lang.Object.class_.getDeclaredMethod(
'wait'), jpype.java.lang.reflect.Method)
def testStackWalker1(self):
with self.assertRaises(jpype.java.lang.IllegalCallerException):
self.obj.callStackWalker1()
def testStackWalker2(self):
self.assertEqual(self.obj.callStackWalker2(), jpype.JClass(
jpype.java.lang.Class.forName("org.jpype.JPypeContext")).class_)
| 36.469027 | 79 | 0.67605 |
import jpype
import common
class JCallerSensitiveCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
if not jpype.getJVMVersion() > (1, 8, 0):
raise common.unittest.SkipTest
self.Class = jpype.JClass("jpype.method.Caller")
self.obj = self.Class()
def testCallStatic(self):
self.assertIsInstance(self.Class.callObjectStatic(), self.Class)
def testCallStaticAsMember(self):
self.assertIsInstance(self.obj.callObjectStatic(), self.Class)
def testCallMember(self):
self.assertIsInstance(self.obj.callObjectMember(), self.Class)
def testCallMemberFromClass(self):
self.assertIsInstance(
self.Class.callObjectMember(self.obj), self.Class)
def testCallVoidStatic(self):
self.assertEqual(self.Class.callVoidStatic(), None)
def testCallVoidStaticAsMember(self):
self.assertEqual(self.obj.callVoidStatic(), None)
def testCallVoidMemberFromClass(self):
self.assertEqual(self.Class.callVoidMember(self.obj), None)
def testCallIntegerStatic(self):
self.assertEqual(self.Class.callIntegerStatic(), 123)
def testCallIntegerMember(self):
self.assertEqual(self.obj.callIntegerMember(), 123)
def testCallIntegerStaticAsMember(self):
self.assertEqual(self.obj.callIntegerStatic(), 123)
def testCallIntegerMemberFromClass(self):
self.assertEqual(self.Class.callIntegerMember(self.obj), 123)
def testArgs(self):
self.assertEqual(self.obj.callArgs(1, 2), 2)
def testArgsFromClass(self):
self.assertEqual(self.Class.callArgs(self.obj, 1, 2), 2)
def testPrimitive(self):
self.assertEqual(self.obj.callArg1(123), 123)
def testPrimitiveFromClass(self):
self.assertEqual(self.Class.callArg1(self.obj, 125), 125)
def testVarArgs(self):
self.assertEqual(tuple(self.obj.callVarArgs(1, 2, 3)), (2, 3))
def testVarArgsFromClass(self):
self.assertEqual(
tuple(self.Class.callVarArgs(self.obj, 1, 2, 3)), (2, 3))
def testDeclaredMethod(self):
self.assertIsInstance(jpype.java.lang.Object.class_.getDeclaredMethod(
'wait'), jpype.java.lang.reflect.Method)
def testStackWalker1(self):
with self.assertRaises(jpype.java.lang.IllegalCallerException):
self.obj.callStackWalker1()
def testStackWalker2(self):
self.assertEqual(self.obj.callStackWalker2(), jpype.JClass(
jpype.java.lang.Class.forName("org.jpype.JPypeContext")).class_)
| true | true |
1c2c73f8fcee516f5d2087002b4b428c69c7db50 | 32,923 | py | Python | orgbook-issuer-agent/issuer_controller/src/issuer.py | BrendanBeachBC/jag-lcrb-carla-public | cd270e463ea5995073a609a86be7ca5a1e9b8ca3 | [
"Apache-2.0"
] | 7 | 2019-03-06T20:11:00.000Z | 2021-09-04T13:58:10.000Z | orgbook-issuer-agent/issuer_controller/src/issuer.py | BrendanBeachBC/jag-lcrb-carla-public | cd270e463ea5995073a609a86be7ca5a1e9b8ca3 | [
"Apache-2.0"
] | 47 | 2018-07-21T22:39:11.000Z | 2022-03-02T13:08:36.000Z | orgbook-issuer-agent/issuer_controller/src/issuer.py | BrendanBeachBC/jag-lcrb-carla-public | cd270e463ea5995073a609a86be7ca5a1e9b8ca3 | [
"Apache-2.0"
] | 20 | 2018-12-12T14:15:09.000Z | 2022-03-23T00:01:34.000Z | import json
import os
import threading
import time
from datetime import datetime
import requests
import logging
import random
import requests
from flask import jsonify
from src import config
AGENT_ADMIN_API_KEY = os.environ.get("AGENT_ADMIN_API_KEY")
ADMIN_REQUEST_HEADERS = {"Content-Type": "application/json"}
if AGENT_ADMIN_API_KEY is not None and 0 < len(AGENT_ADMIN_API_KEY):
ADMIN_REQUEST_HEADERS["x-api-key"] = AGENT_ADMIN_API_KEY
TOB_ADMIN_API_KEY = os.environ.get("TOB_ADMIN_API_KEY")
TOB_REQUEST_HEADERS = {}
if TOB_ADMIN_API_KEY is not None and 0 < len(TOB_ADMIN_API_KEY):
TOB_REQUEST_HEADERS = {"x-api-key": TOB_ADMIN_API_KEY}
TRACE_EVENTS = os.getenv("TRACE_EVENTS", "True").lower() == "true"
TRACE_LABEL = os.getenv("TRACE_LABEL", "bcreg.controller")
TRACE_TAG = os.getenv("TRACE_TAG", "acapy.events")
TRACE_LOG_TARGET = "log"
TRACE_TARGET = os.getenv("TRACE_TARGET", TRACE_LOG_TARGET)
# percentage of credential exchanges to trace, between 0 and 100
TRACE_MSG_PCT = int(os.getenv("TRACE_MSG_PCT", "0"))
TRACE_MSG_PCT = max(min(TRACE_MSG_PCT, 100), 0)
ACK_ERROR_PCT = int(os.getenv("ACK_ERROR_PCT", "0"))
ACK_ERROR_PCT = max(min(ACK_ERROR_PCT, 100), 0)
LOG_LEVEL = os.environ.get("LOG_LEVEL", "WARNING").upper()
LOGGER = logging.getLogger(__name__)
if TRACE_EVENTS and TRACE_TARGET == TRACE_LOG_TARGET:
LOGGER.setLevel(logging.INFO)
elif LOG_LEVEL and 0 < len(LOG_LEVEL):
LOGGER.setLevel(LOG_LEVEL)
DT_FMT = "%Y-%m-%d %H:%M:%S.%f%z"
# list of cred defs per schema name/version
app_config = {}
app_config["schemas"] = {}
app_config["running"] = True
app_config["config_services"] = {}
synced = {}
MAX_RETRIES = 3
def agent_post_with_retry(url, payload, headers=None):
retries = 0
while True:
try:
# test code to test exception handling
# if retries < MAX_RETRIES:
# raise Exception("Fake exception!!!")
response = requests.post(
url,
payload,
headers=headers,
)
response.raise_for_status()
return response
except Exception as e:
LOGGER.error("Error posting %s %s", url, str(e))
retries = retries + 1
if retries > MAX_RETRIES:
raise e
time.sleep(5)
def agent_schemas_cred_defs(agent_admin_url):
ret_schemas = {}
# get loaded cred defs and schemas
response = requests.get(
agent_admin_url + "/schemas/created",
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
schemas = response.json()["schema_ids"]
for schema_id in schemas:
response = requests.get(
agent_admin_url + "/schemas/" + schema_id,
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
schema = response.json()["schema"]
if schema:
schema_key = schema["name"] + "::" + schema["version"]
ret_schemas[schema_key] = {
"schema": schema,
"schema_id": str(schema["seqNo"]),
}
response = requests.get(
agent_admin_url + "/credential-definitions/created",
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
cred_defs = response.json()["credential_definition_ids"]
for cred_def_id in cred_defs:
response = requests.get(
agent_admin_url + "/credential-definitions/" + cred_def_id,
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
cred_def = response.json()["credential_definition"]
for schema_key in ret_schemas:
if ret_schemas[schema_key]["schema_id"] == cred_def["schemaId"]:
ret_schemas[schema_key]["cred_def"] = cred_def
break
return ret_schemas
def register_issuer_with_orgbook(connection_id):
if connection_id in synced and synced[connection_id]:
return
app_config["TOB_CONNECTION"] = connection_id
synced[connection_id] = False
config_root = app_config["config_root"]
config_services = app_config["config_services"]
agent_admin_url = app_config["AGENT_ADMIN_URL"]
for issuer_name, issuer_info in config_services["issuers"].items():
# register ourselves (issuer, schema(s), cred def(s)) with TOB
issuer_config = {
"name": issuer_name,
"did": app_config["DID"],
"config_root": config_root,
}
issuer_config.update(issuer_info)
issuer_spec = config.assemble_issuer_spec(issuer_config)
credential_types = []
for credential_type in issuer_info["credential_types"]:
schema_name = credential_type["schema"]
schema_info = app_config["schemas"]["SCHEMA_" + schema_name]
ctype_config = {
"schema_name": schema_name,
"schema_version": schema_info["version"],
"issuer_url": issuer_config["url"],
"config_root": config_root,
"credential_def_id": app_config["schemas"][
"CRED_DEF_" + schema_name + "_" + schema_info["version"]
],
}
credential_type["attributes"] = schema_info["attributes"]
ctype_config.update(credential_type)
ctype = config.assemble_credential_type_spec(
ctype_config, schema_info.get("attributes")
)
if ctype is not None:
credential_types.append(ctype)
issuer_request = {
"connection_id": app_config["TOB_CONNECTION"],
"issuer_registration": {
"credential_types": credential_types,
"issuer": issuer_spec,
},
}
response = requests.post(
agent_admin_url + "/issuer_registration/send",
json.dumps(issuer_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
response.json()
print("Registered issuer: ", issuer_name)
synced[connection_id] = True
print("Connection {} is synchronized".format(connection_id))
class StartupProcessingThread(threading.Thread):
global app_config
def __init__(self, ENV):
threading.Thread.__init__(self)
self.ENV = ENV
def run(self):
# read configuration files
config_root = self.ENV.get("CONFIG_ROOT", "../config")
config_schemas = config.load_config(config_root + "/schemas.yml", env=self.ENV)
config_services = config.load_config(
config_root + "/services.yml", env=self.ENV
)
app_config["config_root"] = config_root
app_config["config_services"] = config_services
agent_admin_url = self.ENV.get("AGENT_ADMIN_URL")
if not agent_admin_url:
raise RuntimeError(
"Error AGENT_ADMIN_URL is not specified, can't connect to Agent."
)
app_config["AGENT_ADMIN_URL"] = agent_admin_url
# get public DID from our agent
response = requests.get(
agent_admin_url + "/wallet/did/public",
headers=ADMIN_REQUEST_HEADERS,
)
result = response.json()
did = result["result"]
LOGGER.info("Fetched DID from agent: %s", did)
app_config["DID"] = did["did"]
# determine pre-registered schemas and cred defs
existing_schemas = agent_schemas_cred_defs(agent_admin_url)
# register schemas and credential definitions
for schema in config_schemas:
schema_name = schema["name"]
schema_version = schema["version"]
schema_key = schema_name + "::" + schema_version
if schema_key not in existing_schemas:
schema_attrs = []
schema_descs = {}
if isinstance(schema["attributes"], dict):
# each element is a dict
for attr, desc in schema["attributes"].items():
schema_attrs.append(attr)
schema_descs[attr] = desc
else:
# assume it's an array
for attr in schema["attributes"]:
schema_attrs.append(attr)
# register our schema(s) and credential definition(s)
schema_request = {
"schema_name": schema_name,
"schema_version": schema_version,
"attributes": schema_attrs,
}
response = agent_post_with_retry(
agent_admin_url + "/schemas",
json.dumps(schema_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
schema_id = response.json()
else:
schema_id = {"schema_id": existing_schemas[schema_key]["schema"]["id"]}
app_config["schemas"]["SCHEMA_" + schema_name] = schema
app_config["schemas"][
"SCHEMA_" + schema_name + "_" + schema_version
] = schema_id["schema_id"]
LOGGER.info("Registered schema: %s", schema_id)
if (
schema_key not in existing_schemas
or "cred_def" not in existing_schemas[schema_key]
):
cred_def_request = {"schema_id": schema_id["schema_id"]}
response = agent_post_with_retry(
agent_admin_url + "/credential-definitions",
json.dumps(cred_def_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
credential_definition_id = response.json()
else:
credential_definition_id = {
"credential_definition_id": existing_schemas[schema_key][
"cred_def"
]["id"]
}
app_config["schemas"][
"CRED_DEF_" + schema_name + "_" + schema_version
] = credential_definition_id["credential_definition_id"]
LOGGER.info(
"Registered credential definition: %s", credential_definition_id
)
# what is the TOB connection name?
tob_connection_params = config_services["verifiers"]["bctob"]
# check if we have a TOB connection
response = requests.get(
agent_admin_url + "/connections?alias=" + tob_connection_params["alias"],
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
connections = response.json()["results"]
tob_connection = None
for connection in connections:
# check for TOB connection
if connection["alias"] == tob_connection_params["alias"]:
tob_connection = connection
if not tob_connection:
# if no tob connection then establish one (if we can)
# (agent_admin_url is provided if we can directly ask the TOB agent for an invitation,
# ... otherwise the invitation has to be provided manually through the admin api
# ... WITH THE CORRECT ALIAS)
if (
"agent_admin_url" in tob_connection_params["connection"]
and tob_connection_params["connection"]["agent_admin_url"]
):
tob_agent_admin_url = tob_connection_params["connection"][
"agent_admin_url"
]
response = requests.post(
tob_agent_admin_url + "/connections/create-invitation",
headers=TOB_REQUEST_HEADERS,
)
response.raise_for_status()
invitation = response.json()
response = requests.post(
agent_admin_url
+ "/connections/receive-invitation?alias="
+ tob_connection_params["alias"],
json.dumps(invitation["invitation"]),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
tob_connection = response.json()
LOGGER.info(
"Established tob connection: %s", json.dumps(tob_connection)
)
time.sleep(5)
# if we have a connection to the TOB agent, we can register our issuer
if tob_connection:
register_issuer_with_orgbook(tob_connection["connection_id"])
else:
print(
"No TOB connection found or established, awaiting invitation to connect to TOB ..."
)
def tob_connection_synced():
return (
("TOB_CONNECTION" in app_config)
and (app_config["TOB_CONNECTION"] in synced)
and (synced[app_config["TOB_CONNECTION"]])
)
def tob_connection_active():
"""
Return True if there are pending credential requests, False otherwise.
Note this will return False if the TOB connection is not yet sync'ed.
"""
if not tob_connection_synced():
return False
return 0 < len(list(credential_requests.keys()))
def issuer_liveness_check():
"""
Check if we can shut down the container - if we have received a shutdown request and there are
no outstanding credential requests.
"""
global app_config
if app_config["running"]:
# return True until we get a shutdown request
return True
# return True until the work queue is cleared
return tob_connection_active()
class ShutdownProcessingThread(threading.Thread):
def run(self):
while issuer_liveness_check():
LOGGER.error("... Waiting for work queue to clear before shutdown ...")
time.sleep(1)
def signal_issuer_shutdown(signum, frame):
"""
Tell the issuer to do a clean shutdown (finish work queue first).
"""
global app_config
LOGGER.error(">>> Received shutdown signal!")
app_config["running"] = False
thread = ShutdownProcessingThread()
thread.start()
thread.join()
LOGGER.error(">>> Shutting down issuer controller process.")
def startup_init(ENV):
global app_config
thread = StartupProcessingThread(ENV)
thread.start()
return thread
credential_lock = threading.Lock()
credential_requests = {}
credential_responses = {}
credential_threads = {}
USE_LOCK = os.getenv("USE_LOCK", "True").lower() == "true"
# need to specify an env variable RECORD_TIMINGS=True to get method timings
RECORD_TIMINGS = os.getenv("RECORD_TIMINGS", "False").lower() == "true"
timing_lock = threading.Lock()
timings = {}
def clear_stats():
global timings
timing_lock.acquire()
try:
timings = {}
finally:
timing_lock.release()
def get_stats():
timing_lock.acquire()
try:
return timings
finally:
timing_lock.release()
def log_timing_method(method, start_time, end_time, success, data=None):
if not RECORD_TIMINGS:
return
timing_lock.acquire()
try:
elapsed_time = end_time - start_time
if not method in timings:
timings[method] = {
"total_count": 1,
"success_count": 1 if success else 0,
"fail_count": 0 if success else 1,
"min_time": elapsed_time,
"max_time": elapsed_time,
"total_time": elapsed_time,
"avg_time": elapsed_time,
"data": {},
}
else:
timings[method]["total_count"] = timings[method]["total_count"] + 1
if success:
timings[method]["success_count"] = timings[method]["success_count"] + 1
else:
timings[method]["fail_count"] = timings[method]["fail_count"] + 1
if elapsed_time > timings[method]["max_time"]:
timings[method]["max_time"] = elapsed_time
if elapsed_time < timings[method]["min_time"]:
timings[method]["min_time"] = elapsed_time
timings[method]["total_time"] = timings[method]["total_time"] + elapsed_time
timings[method]["avg_time"] = (
timings[method]["total_time"] / timings[method]["total_count"]
)
if data:
timings[method]["data"][str(timings[method]["total_count"])] = data
finally:
timing_lock.release()
def log_timing_event(method, message, start_time, end_time, success, outcome=None):
"""Record a timing event in the system log or http endpoint."""
if (not TRACE_EVENTS) and (not message.get("trace")):
return
if not TRACE_TARGET:
return
msg_id = "N/A"
thread_id = message["thread_id"] if message.get("thread_id") else "N/A"
handler = TRACE_LABEL
ep_time = time.time()
str_time = datetime.utcfromtimestamp(ep_time).strftime(DT_FMT)
if end_time:
str_outcome = method + ".SUCCESS" if success else ".FAIL"
else:
str_outcome = method + ".START"
if outcome:
str_outcome = str_outcome + "." + outcome
event = {
"msg_id": msg_id,
"thread_id": thread_id if thread_id else msg_id,
"traced_type": method,
"timestamp": ep_time,
"str_time": str_time,
"handler": str(handler),
"ellapsed_milli": int(1000 * (end_time - start_time)) if end_time else 0,
"outcome": str_outcome,
}
event_str = json.dumps(event)
try:
if TRACE_TARGET == TRACE_LOG_TARGET:
# write to standard log file
LOGGER.error(" %s %s", TRACE_TAG, event_str)
else:
# should be an http endpoint
_ = requests.post(
TRACE_TARGET + TRACE_TAG,
data=event_str,
headers={"Content-Type": "application/json"},
)
except Exception as e:
LOGGER.error(
"Error logging trace target: %s tag: %s event: %s",
TRACE_TARGET,
TRACE_TAG,
event_str,
)
LOGGER.exception(e)
def set_credential_thread_id(cred_exch_id, thread_id):
start_time = time.perf_counter()
if USE_LOCK:
credential_lock.acquire()
try:
# add 2 records so we can x-ref
credential_threads[thread_id] = cred_exch_id
credential_threads[cred_exch_id] = thread_id
finally:
if USE_LOCK:
credential_lock.release()
processing_time = time.perf_counter() - start_time
if processing_time > 0.001:
LOGGER.warn(">>> lock time = %s", str(processing_time))
def add_credential_request(cred_exch_id):
start_time = time.perf_counter()
if USE_LOCK:
credential_lock.acquire()
try:
# short circuit if we already have the response
if cred_exch_id in credential_responses:
return None
result_available = threading.Event()
credential_requests[cred_exch_id] = result_available
return result_available
finally:
if USE_LOCK:
credential_lock.release()
processing_time = time.perf_counter() - start_time
if processing_time > 0.001:
LOGGER.warn(">>> lock time = %s", str(processing_time))
def add_credential_response(cred_exch_id, response):
start_time = time.perf_counter()
if USE_LOCK:
credential_lock.acquire()
try:
credential_responses[cred_exch_id] = response
if cred_exch_id in credential_requests:
result_available = credential_requests[cred_exch_id]
result_available.set()
del credential_requests[cred_exch_id]
finally:
if USE_LOCK:
credential_lock.release()
processing_time = time.perf_counter() - start_time
if processing_time > 0.001:
LOGGER.warn(">>> lock time = %s", str(processing_time))
def add_credential_problem_report(thread_id, response):
LOGGER.error(
"get problem report for thread %s %s", thread_id, str(len(credential_requests))
)
if thread_id in credential_threads:
cred_exch_id = credential_threads[thread_id]
LOGGER.error(" ... cred_exch_id is %s: %s", cred_exch_id, str(response))
add_credential_response(cred_exch_id, response)
else:
LOGGER.error("thread_id not found %s", thread_id)
# hack for now
if 1 == len(list(credential_requests.keys())):
cred_exch_id = list(credential_requests.keys())[0]
add_credential_response(cred_exch_id, response)
elif 0 == len(list(credential_requests.keys())):
LOGGER.error(
"NO outstanding requests, can't map problem report to request :-("
)
LOGGER.error(credential_requests)
else:
LOGGER.error(
"Too many outstanding requests, can't map problem report to request :-("
)
LOGGER.error(credential_requests)
def add_credential_timeout_report(cred_exch_id, thread_id):
LOGGER.error("add timeout report for cred %s %s", thread_id, cred_exch_id)
response = {"success": False, "result": thread_id + "::Error thread timeout"}
add_credential_response(cred_exch_id, response)
def add_credential_exception_report(cred_exch_id, exc):
LOGGER.error("add exception report for cred %s", cred_exch_id)
response = {"success": False, "result": cred_exch_id + "::" + str(exc)}
add_credential_response(cred_exch_id, response)
def get_credential_response(cred_exch_id):
start_time = time.perf_counter()
if USE_LOCK:
credential_lock.acquire()
try:
if cred_exch_id in credential_responses:
thread_id = None
response = credential_responses[cred_exch_id]
del credential_responses[cred_exch_id]
if cred_exch_id in credential_threads:
thread_id = credential_threads[cred_exch_id]
del credential_threads[cred_exch_id]
del credential_threads[thread_id]
# override returned id with thread_id, if we have it (unless we have received a problem report)
if not "::" in response["result"]:
response["result"] = thread_id
return response
else:
return None
finally:
if USE_LOCK:
credential_lock.release()
processing_time = time.perf_counter() - start_time
if processing_time > 0.001:
LOGGER.warn(">>> lock time = %s", str(processing_time))
TOPIC_CONNECTIONS = "connections"
TOPIC_CONNECTIONS_ACTIVITY = "connections_actvity"
TOPIC_CREDENTIALS = "issue_credential"
TOPIC_PRESENTATIONS = "presentations"
TOPIC_GET_ACTIVE_MENU = "get-active-menu"
TOPIC_PERFORM_MENU_ACTION = "perform-menu-action"
TOPIC_ISSUER_REGISTRATION = "issuer_registration"
TOPIC_PROBLEM_REPORT = "problem_report"
# max 15 second wait for a credential response (prevents blocking forever)
MAX_CRED_RESPONSE_TIMEOUT = int(os.getenv("MAX_CRED_RESPONSE_TIMEOUT", "120"))
def handle_connections(state, message):
# if TOB connection becomes "active" then register our issuer
# what is the TOB connection name?
config_services = app_config["config_services"]
tob_connection_params = config_services["verifiers"]["bctob"]
# check this is the TOB connection
if "alias" in message and message["alias"] == tob_connection_params["alias"]:
if state == "active":
register_issuer_with_orgbook(message["connection_id"])
return jsonify({"message": state})
def handle_credentials(state, message):
start_time = time.perf_counter()
method = "Handle callback:" + state
log_timing_event(method, message, start_time, None, False)
if "thread_id" in message:
set_credential_thread_id(
message["credential_exchange_id"], message["thread_id"]
)
else:
pass
if state == "credential_acked":
# raise 10% errors
do_error = random.randint(1, 100)
if do_error <= ACK_ERROR_PCT:
raise Exception(
"Fake exception to test error handling: " + message["thread_id"]
)
response = {"success": True, "result": message["credential_exchange_id"]}
add_credential_response(message["credential_exchange_id"], response)
end_time = time.perf_counter()
processing_time = end_time - start_time
log_timing_event(method, message, start_time, end_time, True, outcome=str(state))
return jsonify({"message": state})
def handle_presentations(state, message):
# TODO auto-respond to proof requests
return jsonify({"message": state})
def handle_get_active_menu(message):
# TODO add/update issuer info?
return jsonify({})
def handle_perform_menu_action(message):
# TODO add/update issuer info?
return jsonify({})
def handle_register_issuer(message):
# TODO add/update issuer info?
return jsonify({})
def handle_problem_report(message):
LOGGER.error("handle_problem_report() %s", json.dumps(message))
msg = message["~thread"]["thid"] + "::" + message["explain-ltxt"]
response = {"success": False, "result": msg}
add_credential_problem_report(message["~thread"]["thid"], response)
return jsonify({})
class SendCredentialThread(threading.Thread):
def __init__(self, credential_definition_id, cred_offer, url, headers):
threading.Thread.__init__(self)
self.credential_definition_id = credential_definition_id
self.cred_offer = cred_offer
self.url = url
self.headers = headers
def run(self):
start_time = time.perf_counter()
method = "submit_credential.credential"
log_timing_event("issue_credential", {}, start_time, None, False)
LOGGER.info("Sending credential offer: %s", json.dumps(self.cred_offer))
cred_data = None
try:
response = requests.post(
self.url, json.dumps(self.cred_offer), headers=self.headers
)
response.raise_for_status()
cred_data = response.json()
if "credential_exchange_id" in cred_data:
result_available = add_credential_request(
cred_data["credential_exchange_id"]
)
else:
raise Exception(json.dumps(cred_data))
# wait for confirmation from the agent, which will include the credential exchange id
if result_available and not result_available.wait(
MAX_CRED_RESPONSE_TIMEOUT
):
add_credential_timeout_report(
cred_data["credential_exchange_id"], cred_data["thread_id"]
)
LOGGER.error(
"Got credential TIMEOUT: %s %s %s",
cred_data["thread_id"],
cred_data["credential_exchange_id"],
cred_data["connection_id"],
)
end_time = time.perf_counter()
log_timing_method(
method,
start_time,
end_time,
False,
data={
"thread_id": cred_data["thread_id"],
"credential_exchange_id": cred_data["credential_exchange_id"],
"Error": "Timeout",
"elapsed_time": (end_time - start_time),
},
)
success = False
outcome = "timeout"
else:
# response was received for this cred exchange via a web hook
end_time = time.perf_counter()
log_timing_method(method, start_time, end_time, True)
success = True
outcome = "success"
# there should be some form of response available
self.cred_response = get_credential_response(
cred_data["credential_exchange_id"]
)
except Exception as exc:
LOGGER.error("got credential exception: %s", str(exc))
# if cred_data is not set we don't have a credential to set status for
end_time = time.perf_counter()
success = False
outcome = str(exc)
if cred_data:
add_credential_exception_report(
cred_data["credential_exchange_id"], exc
)
data = {
"thread_id": cred_data["thread_id"],
"credential_exchange_id": cred_data["credential_exchange_id"],
"Error": str(exc),
"elapsed_time": (end_time - start_time),
}
else:
data = {"Error": str(exc), "elapsed_time": (end_time - start_time)}
log_timing_method(method, start_time, end_time, False, data=data)
# don't re-raise; we want to log the exception as the credential error response
self.cred_response = {"success": False, "result": str(exc)}
processing_time = end_time - start_time
message = {"thread_id": self.cred_response["result"]}
log_timing_event(
"issue_credential", message, start_time, end_time, success, outcome=outcome
)
def handle_send_credential(cred_input):
"""
# other sample data
sample_credentials = [
{
"schema": "ian-registration.ian-ville",
"version": "1.0.0",
"attributes": {
"corp_num": "ABC12345",
"registration_date": "2018-01-01",
"entity_name": "Ima Permit",
"entity_name_effective": "2018-01-01",
"entity_status": "ACT",
"entity_status_effective": "2019-01-01",
"entity_type": "ABC",
"registered_jurisdiction": "BC",
"effective_date": "2019-01-01",
"expiry_date": ""
}
},
{
"schema": "ian-permit.ian-ville",
"version": "1.0.0",
"attributes": {
"permit_id": str(uuid.uuid4()),
"entity_name": "Ima Permit",
"corp_num": "ABC12345",
"permit_issued_date": "2018-01-01",
"permit_type": "ABC",
"permit_status": "OK",
"effective_date": "2019-01-01"
}
}
]
"""
# construct and send the credential
# print("Received credentials", cred_input)
global app_config
agent_admin_url = app_config["AGENT_ADMIN_URL"]
start_time = time.perf_counter()
processing_time = 0
processed_count = 0
# let's send a credential!
cred_responses = []
for credential in cred_input:
cred_def_key = "CRED_DEF_" + credential["schema"] + "_" + credential["version"]
credential_definition_id = app_config["schemas"][cred_def_key]
credential_attributes = []
for attribute in credential["attributes"]:
credential_attributes.append(
{
"name": attribute,
"mime-type": "text/plain",
"value": credential["attributes"][attribute],
}
)
cred_offer = {
"schema_id": app_config["schemas"][
"SCHEMA_" + credential["schema"] + "_" + credential["version"]
],
"schema_name": credential["schema"],
"issuer_did": app_config["DID"],
"schema_version": credential["version"],
"credential_proposal": {
"@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview",
"attributes": credential_attributes,
},
"schema_issuer_did": app_config["DID"],
"cred_def_id": credential_definition_id,
"comment": "",
"connection_id": app_config["TOB_CONNECTION"],
}
do_trace = random.randint(1, 100)
if do_trace <= TRACE_MSG_PCT:
cred_offer["trace"] = True
thread = SendCredentialThread(
credential_definition_id,
cred_offer,
agent_admin_url + "/issue-credential/send",
ADMIN_REQUEST_HEADERS,
)
thread.start()
thread.join()
cred_responses.append(thread.cred_response)
processed_count = processed_count + 1
processing_time = time.perf_counter() - start_time
print(">>> Processed", processed_count, "credentials in", processing_time)
print(" ", processing_time / processed_count, "seconds per credential")
return jsonify(cred_responses)
| 35.515642 | 111 | 0.598184 | import json
import os
import threading
import time
from datetime import datetime
import requests
import logging
import random
import requests
from flask import jsonify
from src import config
AGENT_ADMIN_API_KEY = os.environ.get("AGENT_ADMIN_API_KEY")
ADMIN_REQUEST_HEADERS = {"Content-Type": "application/json"}
if AGENT_ADMIN_API_KEY is not None and 0 < len(AGENT_ADMIN_API_KEY):
ADMIN_REQUEST_HEADERS["x-api-key"] = AGENT_ADMIN_API_KEY
TOB_ADMIN_API_KEY = os.environ.get("TOB_ADMIN_API_KEY")
TOB_REQUEST_HEADERS = {}
if TOB_ADMIN_API_KEY is not None and 0 < len(TOB_ADMIN_API_KEY):
TOB_REQUEST_HEADERS = {"x-api-key": TOB_ADMIN_API_KEY}
TRACE_EVENTS = os.getenv("TRACE_EVENTS", "True").lower() == "true"
TRACE_LABEL = os.getenv("TRACE_LABEL", "bcreg.controller")
TRACE_TAG = os.getenv("TRACE_TAG", "acapy.events")
TRACE_LOG_TARGET = "log"
TRACE_TARGET = os.getenv("TRACE_TARGET", TRACE_LOG_TARGET)
TRACE_MSG_PCT = int(os.getenv("TRACE_MSG_PCT", "0"))
TRACE_MSG_PCT = max(min(TRACE_MSG_PCT, 100), 0)
ACK_ERROR_PCT = int(os.getenv("ACK_ERROR_PCT", "0"))
ACK_ERROR_PCT = max(min(ACK_ERROR_PCT, 100), 0)
LOG_LEVEL = os.environ.get("LOG_LEVEL", "WARNING").upper()
LOGGER = logging.getLogger(__name__)
if TRACE_EVENTS and TRACE_TARGET == TRACE_LOG_TARGET:
LOGGER.setLevel(logging.INFO)
elif LOG_LEVEL and 0 < len(LOG_LEVEL):
LOGGER.setLevel(LOG_LEVEL)
DT_FMT = "%Y-%m-%d %H:%M:%S.%f%z"
app_config = {}
app_config["schemas"] = {}
app_config["running"] = True
app_config["config_services"] = {}
synced = {}
MAX_RETRIES = 3
def agent_post_with_retry(url, payload, headers=None):
retries = 0
while True:
try:
response = requests.post(
url,
payload,
headers=headers,
)
response.raise_for_status()
return response
except Exception as e:
LOGGER.error("Error posting %s %s", url, str(e))
retries = retries + 1
if retries > MAX_RETRIES:
raise e
time.sleep(5)
def agent_schemas_cred_defs(agent_admin_url):
ret_schemas = {}
response = requests.get(
agent_admin_url + "/schemas/created",
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
schemas = response.json()["schema_ids"]
for schema_id in schemas:
response = requests.get(
agent_admin_url + "/schemas/" + schema_id,
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
schema = response.json()["schema"]
if schema:
schema_key = schema["name"] + "::" + schema["version"]
ret_schemas[schema_key] = {
"schema": schema,
"schema_id": str(schema["seqNo"]),
}
response = requests.get(
agent_admin_url + "/credential-definitions/created",
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
cred_defs = response.json()["credential_definition_ids"]
for cred_def_id in cred_defs:
response = requests.get(
agent_admin_url + "/credential-definitions/" + cred_def_id,
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
cred_def = response.json()["credential_definition"]
for schema_key in ret_schemas:
if ret_schemas[schema_key]["schema_id"] == cred_def["schemaId"]:
ret_schemas[schema_key]["cred_def"] = cred_def
break
return ret_schemas
def register_issuer_with_orgbook(connection_id):
if connection_id in synced and synced[connection_id]:
return
app_config["TOB_CONNECTION"] = connection_id
synced[connection_id] = False
config_root = app_config["config_root"]
config_services = app_config["config_services"]
agent_admin_url = app_config["AGENT_ADMIN_URL"]
for issuer_name, issuer_info in config_services["issuers"].items():
issuer_config = {
"name": issuer_name,
"did": app_config["DID"],
"config_root": config_root,
}
issuer_config.update(issuer_info)
issuer_spec = config.assemble_issuer_spec(issuer_config)
credential_types = []
for credential_type in issuer_info["credential_types"]:
schema_name = credential_type["schema"]
schema_info = app_config["schemas"]["SCHEMA_" + schema_name]
ctype_config = {
"schema_name": schema_name,
"schema_version": schema_info["version"],
"issuer_url": issuer_config["url"],
"config_root": config_root,
"credential_def_id": app_config["schemas"][
"CRED_DEF_" + schema_name + "_" + schema_info["version"]
],
}
credential_type["attributes"] = schema_info["attributes"]
ctype_config.update(credential_type)
ctype = config.assemble_credential_type_spec(
ctype_config, schema_info.get("attributes")
)
if ctype is not None:
credential_types.append(ctype)
issuer_request = {
"connection_id": app_config["TOB_CONNECTION"],
"issuer_registration": {
"credential_types": credential_types,
"issuer": issuer_spec,
},
}
response = requests.post(
agent_admin_url + "/issuer_registration/send",
json.dumps(issuer_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
response.json()
print("Registered issuer: ", issuer_name)
synced[connection_id] = True
print("Connection {} is synchronized".format(connection_id))
class StartupProcessingThread(threading.Thread):
global app_config
def __init__(self, ENV):
threading.Thread.__init__(self)
self.ENV = ENV
def run(self):
config_root = self.ENV.get("CONFIG_ROOT", "../config")
config_schemas = config.load_config(config_root + "/schemas.yml", env=self.ENV)
config_services = config.load_config(
config_root + "/services.yml", env=self.ENV
)
app_config["config_root"] = config_root
app_config["config_services"] = config_services
agent_admin_url = self.ENV.get("AGENT_ADMIN_URL")
if not agent_admin_url:
raise RuntimeError(
"Error AGENT_ADMIN_URL is not specified, can't connect to Agent."
)
app_config["AGENT_ADMIN_URL"] = agent_admin_url
# get public DID from our agent
response = requests.get(
agent_admin_url + "/wallet/did/public",
headers=ADMIN_REQUEST_HEADERS,
)
result = response.json()
did = result["result"]
LOGGER.info("Fetched DID from agent: %s", did)
app_config["DID"] = did["did"]
# determine pre-registered schemas and cred defs
existing_schemas = agent_schemas_cred_defs(agent_admin_url)
# register schemas and credential definitions
for schema in config_schemas:
schema_name = schema["name"]
schema_version = schema["version"]
schema_key = schema_name + "::" + schema_version
if schema_key not in existing_schemas:
schema_attrs = []
schema_descs = {}
if isinstance(schema["attributes"], dict):
# each element is a dict
for attr, desc in schema["attributes"].items():
schema_attrs.append(attr)
schema_descs[attr] = desc
else:
# assume it's an array
for attr in schema["attributes"]:
schema_attrs.append(attr)
schema_request = {
"schema_name": schema_name,
"schema_version": schema_version,
"attributes": schema_attrs,
}
response = agent_post_with_retry(
agent_admin_url + "/schemas",
json.dumps(schema_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
schema_id = response.json()
else:
schema_id = {"schema_id": existing_schemas[schema_key]["schema"]["id"]}
app_config["schemas"]["SCHEMA_" + schema_name] = schema
app_config["schemas"][
"SCHEMA_" + schema_name + "_" + schema_version
] = schema_id["schema_id"]
LOGGER.info("Registered schema: %s", schema_id)
if (
schema_key not in existing_schemas
or "cred_def" not in existing_schemas[schema_key]
):
cred_def_request = {"schema_id": schema_id["schema_id"]}
response = agent_post_with_retry(
agent_admin_url + "/credential-definitions",
json.dumps(cred_def_request),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
credential_definition_id = response.json()
else:
credential_definition_id = {
"credential_definition_id": existing_schemas[schema_key][
"cred_def"
]["id"]
}
app_config["schemas"][
"CRED_DEF_" + schema_name + "_" + schema_version
] = credential_definition_id["credential_definition_id"]
LOGGER.info(
"Registered credential definition: %s", credential_definition_id
)
tob_connection_params = config_services["verifiers"]["bctob"]
response = requests.get(
agent_admin_url + "/connections?alias=" + tob_connection_params["alias"],
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
connections = response.json()["results"]
tob_connection = None
for connection in connections:
if connection["alias"] == tob_connection_params["alias"]:
tob_connection = connection
if not tob_connection:
if (
"agent_admin_url" in tob_connection_params["connection"]
and tob_connection_params["connection"]["agent_admin_url"]
):
tob_agent_admin_url = tob_connection_params["connection"][
"agent_admin_url"
]
response = requests.post(
tob_agent_admin_url + "/connections/create-invitation",
headers=TOB_REQUEST_HEADERS,
)
response.raise_for_status()
invitation = response.json()
response = requests.post(
agent_admin_url
+ "/connections/receive-invitation?alias="
+ tob_connection_params["alias"],
json.dumps(invitation["invitation"]),
headers=ADMIN_REQUEST_HEADERS,
)
response.raise_for_status()
tob_connection = response.json()
LOGGER.info(
"Established tob connection: %s", json.dumps(tob_connection)
)
time.sleep(5)
if tob_connection:
register_issuer_with_orgbook(tob_connection["connection_id"])
else:
print(
"No TOB connection found or established, awaiting invitation to connect to TOB ..."
)
def tob_connection_synced():
return (
("TOB_CONNECTION" in app_config)
and (app_config["TOB_CONNECTION"] in synced)
and (synced[app_config["TOB_CONNECTION"]])
)
def tob_connection_active():
if not tob_connection_synced():
return False
return 0 < len(list(credential_requests.keys()))
def issuer_liveness_check():
global app_config
if app_config["running"]:
return True
return tob_connection_active()
class ShutdownProcessingThread(threading.Thread):
def run(self):
while issuer_liveness_check():
LOGGER.error("... Waiting for work queue to clear before shutdown ...")
time.sleep(1)
def signal_issuer_shutdown(signum, frame):
global app_config
LOGGER.error(">>> Received shutdown signal!")
app_config["running"] = False
thread = ShutdownProcessingThread()
thread.start()
thread.join()
LOGGER.error(">>> Shutting down issuer controller process.")
def startup_init(ENV):
global app_config
thread = StartupProcessingThread(ENV)
thread.start()
return thread
credential_lock = threading.Lock()
credential_requests = {}
credential_responses = {}
credential_threads = {}
USE_LOCK = os.getenv("USE_LOCK", "True").lower() == "true"
RECORD_TIMINGS = os.getenv("RECORD_TIMINGS", "False").lower() == "true"
timing_lock = threading.Lock()
timings = {}
def clear_stats():
global timings
timing_lock.acquire()
try:
timings = {}
finally:
timing_lock.release()
def get_stats():
timing_lock.acquire()
try:
return timings
finally:
timing_lock.release()
def log_timing_method(method, start_time, end_time, success, data=None):
if not RECORD_TIMINGS:
return
timing_lock.acquire()
try:
elapsed_time = end_time - start_time
if not method in timings:
timings[method] = {
"total_count": 1,
"success_count": 1 if success else 0,
"fail_count": 0 if success else 1,
"min_time": elapsed_time,
"max_time": elapsed_time,
"total_time": elapsed_time,
"avg_time": elapsed_time,
"data": {},
}
else:
timings[method]["total_count"] = timings[method]["total_count"] + 1
if success:
timings[method]["success_count"] = timings[method]["success_count"] + 1
else:
timings[method]["fail_count"] = timings[method]["fail_count"] + 1
if elapsed_time > timings[method]["max_time"]:
timings[method]["max_time"] = elapsed_time
if elapsed_time < timings[method]["min_time"]:
timings[method]["min_time"] = elapsed_time
timings[method]["total_time"] = timings[method]["total_time"] + elapsed_time
timings[method]["avg_time"] = (
timings[method]["total_time"] / timings[method]["total_count"]
)
if data:
timings[method]["data"][str(timings[method]["total_count"])] = data
finally:
timing_lock.release()
def log_timing_event(method, message, start_time, end_time, success, outcome=None):
if (not TRACE_EVENTS) and (not message.get("trace")):
return
if not TRACE_TARGET:
return
msg_id = "N/A"
thread_id = message["thread_id"] if message.get("thread_id") else "N/A"
handler = TRACE_LABEL
ep_time = time.time()
str_time = datetime.utcfromtimestamp(ep_time).strftime(DT_FMT)
if end_time:
str_outcome = method + ".SUCCESS" if success else ".FAIL"
else:
str_outcome = method + ".START"
if outcome:
str_outcome = str_outcome + "." + outcome
event = {
"msg_id": msg_id,
"thread_id": thread_id if thread_id else msg_id,
"traced_type": method,
"timestamp": ep_time,
"str_time": str_time,
"handler": str(handler),
"ellapsed_milli": int(1000 * (end_time - start_time)) if end_time else 0,
"outcome": str_outcome,
}
event_str = json.dumps(event)
try:
if TRACE_TARGET == TRACE_LOG_TARGET:
LOGGER.error(" %s %s", TRACE_TAG, event_str)
else:
_ = requests.post(
TRACE_TARGET + TRACE_TAG,
data=event_str,
headers={"Content-Type": "application/json"},
)
except Exception as e:
LOGGER.error(
"Error logging trace target: %s tag: %s event: %s",
TRACE_TARGET,
TRACE_TAG,
event_str,
)
LOGGER.exception(e)
def set_credential_thread_id(cred_exch_id, thread_id):
start_time = time.perf_counter()
if USE_LOCK:
credential_lock.acquire()
try:
credential_threads[thread_id] = cred_exch_id
credential_threads[cred_exch_id] = thread_id
finally:
if USE_LOCK:
credential_lock.release()
processing_time = time.perf_counter() - start_time
if processing_time > 0.001:
LOGGER.warn(">>> lock time = %s", str(processing_time))
def add_credential_request(cred_exch_id):
start_time = time.perf_counter()
if USE_LOCK:
credential_lock.acquire()
try:
if cred_exch_id in credential_responses:
return None
result_available = threading.Event()
credential_requests[cred_exch_id] = result_available
return result_available
finally:
if USE_LOCK:
credential_lock.release()
processing_time = time.perf_counter() - start_time
if processing_time > 0.001:
LOGGER.warn(">>> lock time = %s", str(processing_time))
def add_credential_response(cred_exch_id, response):
start_time = time.perf_counter()
if USE_LOCK:
credential_lock.acquire()
try:
credential_responses[cred_exch_id] = response
if cred_exch_id in credential_requests:
result_available = credential_requests[cred_exch_id]
result_available.set()
del credential_requests[cred_exch_id]
finally:
if USE_LOCK:
credential_lock.release()
processing_time = time.perf_counter() - start_time
if processing_time > 0.001:
LOGGER.warn(">>> lock time = %s", str(processing_time))
def add_credential_problem_report(thread_id, response):
LOGGER.error(
"get problem report for thread %s %s", thread_id, str(len(credential_requests))
)
if thread_id in credential_threads:
cred_exch_id = credential_threads[thread_id]
LOGGER.error(" ... cred_exch_id is %s: %s", cred_exch_id, str(response))
add_credential_response(cred_exch_id, response)
else:
LOGGER.error("thread_id not found %s", thread_id)
if 1 == len(list(credential_requests.keys())):
cred_exch_id = list(credential_requests.keys())[0]
add_credential_response(cred_exch_id, response)
elif 0 == len(list(credential_requests.keys())):
LOGGER.error(
"NO outstanding requests, can't map problem report to request :-("
)
LOGGER.error(credential_requests)
else:
LOGGER.error(
"Too many outstanding requests, can't map problem report to request :-("
)
LOGGER.error(credential_requests)
def add_credential_timeout_report(cred_exch_id, thread_id):
LOGGER.error("add timeout report for cred %s %s", thread_id, cred_exch_id)
response = {"success": False, "result": thread_id + "::Error thread timeout"}
add_credential_response(cred_exch_id, response)
def add_credential_exception_report(cred_exch_id, exc):
LOGGER.error("add exception report for cred %s", cred_exch_id)
response = {"success": False, "result": cred_exch_id + "::" + str(exc)}
add_credential_response(cred_exch_id, response)
def get_credential_response(cred_exch_id):
start_time = time.perf_counter()
if USE_LOCK:
credential_lock.acquire()
try:
if cred_exch_id in credential_responses:
thread_id = None
response = credential_responses[cred_exch_id]
del credential_responses[cred_exch_id]
if cred_exch_id in credential_threads:
thread_id = credential_threads[cred_exch_id]
del credential_threads[cred_exch_id]
del credential_threads[thread_id]
if not "::" in response["result"]:
response["result"] = thread_id
return response
else:
return None
finally:
if USE_LOCK:
credential_lock.release()
processing_time = time.perf_counter() - start_time
if processing_time > 0.001:
LOGGER.warn(">>> lock time = %s", str(processing_time))
TOPIC_CONNECTIONS = "connections"
TOPIC_CONNECTIONS_ACTIVITY = "connections_actvity"
TOPIC_CREDENTIALS = "issue_credential"
TOPIC_PRESENTATIONS = "presentations"
TOPIC_GET_ACTIVE_MENU = "get-active-menu"
TOPIC_PERFORM_MENU_ACTION = "perform-menu-action"
TOPIC_ISSUER_REGISTRATION = "issuer_registration"
TOPIC_PROBLEM_REPORT = "problem_report"
MAX_CRED_RESPONSE_TIMEOUT = int(os.getenv("MAX_CRED_RESPONSE_TIMEOUT", "120"))
def handle_connections(state, message):
config_services = app_config["config_services"]
tob_connection_params = config_services["verifiers"]["bctob"]
if "alias" in message and message["alias"] == tob_connection_params["alias"]:
if state == "active":
register_issuer_with_orgbook(message["connection_id"])
return jsonify({"message": state})
def handle_credentials(state, message):
start_time = time.perf_counter()
method = "Handle callback:" + state
log_timing_event(method, message, start_time, None, False)
if "thread_id" in message:
set_credential_thread_id(
message["credential_exchange_id"], message["thread_id"]
)
else:
pass
if state == "credential_acked":
do_error = random.randint(1, 100)
if do_error <= ACK_ERROR_PCT:
raise Exception(
"Fake exception to test error handling: " + message["thread_id"]
)
response = {"success": True, "result": message["credential_exchange_id"]}
add_credential_response(message["credential_exchange_id"], response)
end_time = time.perf_counter()
processing_time = end_time - start_time
log_timing_event(method, message, start_time, end_time, True, outcome=str(state))
return jsonify({"message": state})
def handle_presentations(state, message):
return jsonify({"message": state})
def handle_get_active_menu(message):
return jsonify({})
def handle_perform_menu_action(message):
return jsonify({})
def handle_register_issuer(message):
return jsonify({})
def handle_problem_report(message):
LOGGER.error("handle_problem_report() %s", json.dumps(message))
msg = message["~thread"]["thid"] + "::" + message["explain-ltxt"]
response = {"success": False, "result": msg}
add_credential_problem_report(message["~thread"]["thid"], response)
return jsonify({})
class SendCredentialThread(threading.Thread):
def __init__(self, credential_definition_id, cred_offer, url, headers):
threading.Thread.__init__(self)
self.credential_definition_id = credential_definition_id
self.cred_offer = cred_offer
self.url = url
self.headers = headers
def run(self):
start_time = time.perf_counter()
method = "submit_credential.credential"
log_timing_event("issue_credential", {}, start_time, None, False)
LOGGER.info("Sending credential offer: %s", json.dumps(self.cred_offer))
cred_data = None
try:
response = requests.post(
self.url, json.dumps(self.cred_offer), headers=self.headers
)
response.raise_for_status()
cred_data = response.json()
if "credential_exchange_id" in cred_data:
result_available = add_credential_request(
cred_data["credential_exchange_id"]
)
else:
raise Exception(json.dumps(cred_data))
if result_available and not result_available.wait(
MAX_CRED_RESPONSE_TIMEOUT
):
add_credential_timeout_report(
cred_data["credential_exchange_id"], cred_data["thread_id"]
)
LOGGER.error(
"Got credential TIMEOUT: %s %s %s",
cred_data["thread_id"],
cred_data["credential_exchange_id"],
cred_data["connection_id"],
)
end_time = time.perf_counter()
log_timing_method(
method,
start_time,
end_time,
False,
data={
"thread_id": cred_data["thread_id"],
"credential_exchange_id": cred_data["credential_exchange_id"],
"Error": "Timeout",
"elapsed_time": (end_time - start_time),
},
)
success = False
outcome = "timeout"
else:
end_time = time.perf_counter()
log_timing_method(method, start_time, end_time, True)
success = True
outcome = "success"
self.cred_response = get_credential_response(
cred_data["credential_exchange_id"]
)
except Exception as exc:
LOGGER.error("got credential exception: %s", str(exc))
end_time = time.perf_counter()
success = False
outcome = str(exc)
if cred_data:
add_credential_exception_report(
cred_data["credential_exchange_id"], exc
)
data = {
"thread_id": cred_data["thread_id"],
"credential_exchange_id": cred_data["credential_exchange_id"],
"Error": str(exc),
"elapsed_time": (end_time - start_time),
}
else:
data = {"Error": str(exc), "elapsed_time": (end_time - start_time)}
log_timing_method(method, start_time, end_time, False, data=data)
# don't re-raise; we want to log the exception as the credential error response
self.cred_response = {"success": False, "result": str(exc)}
processing_time = end_time - start_time
message = {"thread_id": self.cred_response["result"]}
log_timing_event(
"issue_credential", message, start_time, end_time, success, outcome=outcome
)
def handle_send_credential(cred_input):
global app_config
agent_admin_url = app_config["AGENT_ADMIN_URL"]
start_time = time.perf_counter()
processing_time = 0
processed_count = 0
cred_responses = []
for credential in cred_input:
cred_def_key = "CRED_DEF_" + credential["schema"] + "_" + credential["version"]
credential_definition_id = app_config["schemas"][cred_def_key]
credential_attributes = []
for attribute in credential["attributes"]:
credential_attributes.append(
{
"name": attribute,
"mime-type": "text/plain",
"value": credential["attributes"][attribute],
}
)
cred_offer = {
"schema_id": app_config["schemas"][
"SCHEMA_" + credential["schema"] + "_" + credential["version"]
],
"schema_name": credential["schema"],
"issuer_did": app_config["DID"],
"schema_version": credential["version"],
"credential_proposal": {
"@type": "did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview",
"attributes": credential_attributes,
},
"schema_issuer_did": app_config["DID"],
"cred_def_id": credential_definition_id,
"comment": "",
"connection_id": app_config["TOB_CONNECTION"],
}
do_trace = random.randint(1, 100)
if do_trace <= TRACE_MSG_PCT:
cred_offer["trace"] = True
thread = SendCredentialThread(
credential_definition_id,
cred_offer,
agent_admin_url + "/issue-credential/send",
ADMIN_REQUEST_HEADERS,
)
thread.start()
thread.join()
cred_responses.append(thread.cred_response)
processed_count = processed_count + 1
processing_time = time.perf_counter() - start_time
print(">>> Processed", processed_count, "credentials in", processing_time)
print(" ", processing_time / processed_count, "seconds per credential")
return jsonify(cred_responses)
| true | true |
1c2c74ebac6f9b761ec268cc65a3a706ab9a78fd | 620 | py | Python | setup.py | EndevelCZ/flexibee-export | fdc333249ec2ebf438009469d9229ea6f6532b62 | [
"MIT"
] | null | null | null | setup.py | EndevelCZ/flexibee-export | fdc333249ec2ebf438009469d9229ea6f6532b62 | [
"MIT"
] | null | null | null | setup.py | EndevelCZ/flexibee-export | fdc333249ec2ebf438009469d9229ea6f6532b62 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="flexibee_export",
version="0.1.0",
author="Endevel",
author_email="info@endevel.cz",
description="FlexiBee xml export package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/EndevelCZ/flexibee-export.git",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | 29.52381 | 59 | 0.670968 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="flexibee_export",
version="0.1.0",
author="Endevel",
author_email="info@endevel.cz",
description="FlexiBee xml export package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/EndevelCZ/flexibee-export.git",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) | true | true |
1c2c7514183a01a411328b1524fc7485a462acfa | 65 | py | Python | python/testData/mover/outsideStatement_afterUp.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/mover/outsideStatement_afterUp.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/mover/outsideStatement_afterUp.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | def foo():
a = 1
a = 3
if a:
pass
a = 4 | 8.125 | 12 | 0.292308 | def foo():
a = 1
a = 3
if a:
pass
a = 4 | true | true |
1c2c75f5f5b645fe3d21e5d7ac21ebfec9f0fd10 | 3,154 | py | Python | submodel/resnet.py | yukyeongleee/CodeTemplate | d0777d501bf64b01ea63787c75b505f7b76ebeda | [
"MIT"
] | null | null | null | submodel/resnet.py | yukyeongleee/CodeTemplate | d0777d501bf64b01ea63787c75b505f7b76ebeda | [
"MIT"
] | 2 | 2022-03-15T09:00:29.000Z | 2022-03-17T03:36:43.000Z | submodel/resnet.py | yukyeongleee/CodeTemplate | d0777d501bf64b01ea63787c75b505f7b76ebeda | [
"MIT"
] | 1 | 2022-03-19T08:08:22.000Z | 2022-03-19T08:08:22.000Z | import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as modelzoo
from lib.utils import conv3x3
resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
class Resnet18(nn.Module):
def __init__(self):
super(Resnet18, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
self.init_weight()
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.maxpool(x)
x = self.layer1(x)
feat8 = self.layer2(x) # 1/8
feat16 = self.layer3(feat8) # 1/16
feat32 = self.layer4(feat16) # 1/32
return feat8, feat16, feat32
def init_weight(self):
state_dict = modelzoo.load_url(resnet18_url)
self_state_dict = self.state_dict()
for k, v in state_dict.items():
if 'fc' in k: continue
self_state_dict.update({k: v})
self.load_state_dict(self_state_dict)
def get_params(self):
wd_params, nowd_params = [], []
for module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params
class BasicBlock(nn.Module):
def __init__(self, in_chan, out_chan, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_chan, out_chan, stride)
self.bn1 = nn.BatchNorm2d(out_chan)
self.conv2 = conv3x3(out_chan, out_chan)
self.bn2 = nn.BatchNorm2d(out_chan)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
if in_chan != out_chan or stride != 1:
self.downsample = nn.Sequential(
nn.Conv2d(in_chan, out_chan,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_chan),
)
def forward(self, x):
residual = self.conv1(x)
residual = F.relu(self.bn1(residual))
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
out = shortcut + residual
out = self.relu(out)
return out
def create_layer_basic(in_chan, out_chan, bnum, stride=1):
layers = [BasicBlock(in_chan, out_chan, stride=stride)]
for i in range(bnum-1):
layers.append(BasicBlock(out_chan, out_chan, stride=1))
return nn.Sequential(*layers)
| 35.044444 | 74 | 0.606214 | import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as modelzoo
from lib.utils import conv3x3
resnet18_url = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'
class Resnet18(nn.Module):
def __init__(self):
super(Resnet18, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
self.init_weight()
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.bn1(x))
x = self.maxpool(x)
x = self.layer1(x)
feat8 = self.layer2(x)
feat16 = self.layer3(feat8)
feat32 = self.layer4(feat16)
return feat8, feat16, feat32
def init_weight(self):
state_dict = modelzoo.load_url(resnet18_url)
self_state_dict = self.state_dict()
for k, v in state_dict.items():
if 'fc' in k: continue
self_state_dict.update({k: v})
self.load_state_dict(self_state_dict)
def get_params(self):
wd_params, nowd_params = [], []
for module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv2d)):
wd_params.append(module.weight)
if not module.bias is None:
nowd_params.append(module.bias)
elif isinstance(module, nn.BatchNorm2d):
nowd_params += list(module.parameters())
return wd_params, nowd_params
class BasicBlock(nn.Module):
def __init__(self, in_chan, out_chan, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_chan, out_chan, stride)
self.bn1 = nn.BatchNorm2d(out_chan)
self.conv2 = conv3x3(out_chan, out_chan)
self.bn2 = nn.BatchNorm2d(out_chan)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
if in_chan != out_chan or stride != 1:
self.downsample = nn.Sequential(
nn.Conv2d(in_chan, out_chan,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_chan),
)
def forward(self, x):
residual = self.conv1(x)
residual = F.relu(self.bn1(residual))
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
out = shortcut + residual
out = self.relu(out)
return out
def create_layer_basic(in_chan, out_chan, bnum, stride=1):
layers = [BasicBlock(in_chan, out_chan, stride=stride)]
for i in range(bnum-1):
layers.append(BasicBlock(out_chan, out_chan, stride=1))
return nn.Sequential(*layers)
| true | true |
1c2c76ffd1479b59a53f3ea57987fc92d476200e | 2,931 | py | Python | data/external/repositories_2to3/137656/blundercheck-master/combine/contest_20150210a/data_prep/make_scatterplots.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/137656/blundercheck-master/combine/contest_20150210a/data_prep/make_scatterplots.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/137656/blundercheck-master/combine/contest_20150210a/data_prep/make_scatterplots.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | #!/usr/bin/env python
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import sys
import seaborn as sns
from pandas import read_pickle, qcut
from itertools import combinations
import matplotlib.pyplot as plt
from djeval import *
sns.set_palette("deep", desat=.6)
sns.set_context(rc={"figure.figsize": (8, 4)})
msg("Hello there, reading yy_df.")
yy_df = read_pickle(sys.argv[1])
x = yy_df['nmerror']
y = yy_df['elo']
with sns.axes_style("white"):
sns.jointplot(x, y, kind="hex")
plt.savefig('/data/seaborn.png')
plt.close()
with_elo = yy_df[yy_df['elo'].notnull()]
features = ['nmerror',
'blunderrate', 'noblunders',
'perfectrate',
'gameoutcome',
'won_by_checkmate', 'lost_by_checkmate', 'ended_by_checkmate',
'my_final_equity', 'final_equity',
'grit', 'any_grit', 'opponent_any_grit', 'major_grit',
'mate_created', 'mate_destroyed', 'premature_quit',
'side',
'drawn_game',
'gamelength',
'meanecho',
'opponent_nmerror', 'opponent_noblunders',
'mean_depth_clipped',
'mean_seldepth',
'min_nmerror', 'max_nmerror', 'max_meanecho',
'early_lead',
'q_error_one', 'q_error_two',
'opponent_q_error_one', 'opponent_q_error_two',
'pct_sanemoves',
'opponent_blunderrate', 'opponent_perfectrate',
'opponent_grit', 'opponent_meanecho',
'opponent_mate_created', 'opponent_mate_destroyed',
'mean_seldepth',
'mean_depths_ar', 'mean_deepest_ar',
'opponent_mean_depths_ar', 'opponent_mean_deepest_ar',
'pct_sanemoves',
'moveelo_weighted'
]
plottables = ['elo', 'gbr_prediction', 'gbr_error']
plottables.extend(['gamelength', 'mean_depth_clipped', 'mean_deepest_ar', 'opponent_mean_deepest_ar'])
do_indivs = False
if do_indivs:
for a, b in combinations(plottables, 2):
for first, second in [(a,b), (b,a)]:
try:
groupings, bins = qcut(with_elo[first], 10, labels=False, retbins=True)
sns.violinplot(with_elo[second], groupings)
plt.savefig('/data/' + first + '_' + second + '.png')
plt.close()
except:
print(("Couldnt manage for %s %s" % (first, second)))
# f, ax = plt.subplots(figsize=(11, 6))
# sns.violinplot(with_elo[second], groupings, names=[str(b) + str(b+1) for b in bins[:-1]])
# ax.set(ylim=(-.7, 1.05))
# sns.despine(left=True, bottom=True)
print('.', end=' ')
sys.stdout.flush()
g = sns.pairplot(with_elo[plottables], size=2.5)
plt.savefig('/data/pairplot.png')
plt.close()
| 34.892857 | 103 | 0.578983 |
import matplotlib
matplotlib.use('Agg')
import sys
import seaborn as sns
from pandas import read_pickle, qcut
from itertools import combinations
import matplotlib.pyplot as plt
from djeval import *
sns.set_palette("deep", desat=.6)
sns.set_context(rc={"figure.figsize": (8, 4)})
msg("Hello there, reading yy_df.")
yy_df = read_pickle(sys.argv[1])
x = yy_df['nmerror']
y = yy_df['elo']
with sns.axes_style("white"):
sns.jointplot(x, y, kind="hex")
plt.savefig('/data/seaborn.png')
plt.close()
with_elo = yy_df[yy_df['elo'].notnull()]
features = ['nmerror',
'blunderrate', 'noblunders',
'perfectrate',
'gameoutcome',
'won_by_checkmate', 'lost_by_checkmate', 'ended_by_checkmate',
'my_final_equity', 'final_equity',
'grit', 'any_grit', 'opponent_any_grit', 'major_grit',
'mate_created', 'mate_destroyed', 'premature_quit',
'side',
'drawn_game',
'gamelength',
'meanecho',
'opponent_nmerror', 'opponent_noblunders',
'mean_depth_clipped',
'mean_seldepth',
'min_nmerror', 'max_nmerror', 'max_meanecho',
'early_lead',
'q_error_one', 'q_error_two',
'opponent_q_error_one', 'opponent_q_error_two',
'pct_sanemoves',
'opponent_blunderrate', 'opponent_perfectrate',
'opponent_grit', 'opponent_meanecho',
'opponent_mate_created', 'opponent_mate_destroyed',
'mean_seldepth',
'mean_depths_ar', 'mean_deepest_ar',
'opponent_mean_depths_ar', 'opponent_mean_deepest_ar',
'pct_sanemoves',
'moveelo_weighted'
]
plottables = ['elo', 'gbr_prediction', 'gbr_error']
plottables.extend(['gamelength', 'mean_depth_clipped', 'mean_deepest_ar', 'opponent_mean_deepest_ar'])
do_indivs = False
if do_indivs:
for a, b in combinations(plottables, 2):
for first, second in [(a,b), (b,a)]:
try:
groupings, bins = qcut(with_elo[first], 10, labels=False, retbins=True)
sns.violinplot(with_elo[second], groupings)
plt.savefig('/data/' + first + '_' + second + '.png')
plt.close()
except:
print(("Couldnt manage for %s %s" % (first, second)))
print('.', end=' ')
sys.stdout.flush()
g = sns.pairplot(with_elo[plottables], size=2.5)
plt.savefig('/data/pairplot.png')
plt.close()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.