hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3fddd736b4b331ce0e9cce4a4deefad4aaaa9d
| 5,605
|
py
|
Python
|
ibis_mssql/compiler.py
|
safrazRampersaud/ibis-mssql
|
b4349d0d195c77b02c7a2531e4b83e6cf6ecd79a
|
[
"Apache-2.0"
] | null | null | null |
ibis_mssql/compiler.py
|
safrazRampersaud/ibis-mssql
|
b4349d0d195c77b02c7a2531e4b83e6cf6ecd79a
|
[
"Apache-2.0"
] | 15
|
2020-06-04T17:27:26.000Z
|
2021-02-15T16:29:14.000Z
|
ibis_mssql/compiler.py
|
safrazRampersaud/ibis-mssql
|
b4349d0d195c77b02c7a2531e4b83e6cf6ecd79a
|
[
"Apache-2.0"
] | 5
|
2021-01-05T23:20:13.000Z
|
2021-04-17T10:52:53.000Z
|
import pyodbc
import sqlalchemy as sa
import sqlalchemy.dialects.mssql as mssql
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.backends.base_sqlalchemy.alchemy as alch
# used for literal translate
from ibis.backends.base_sqlalchemy.alchemy import fixed_arity, unary
def raise_unsupported_op_error(translator, expr, *args):
msg = "SQLServer backend doesn't support {} operation!"
op = expr.op()
raise com.UnsupportedOperationError(msg.format(type(op)))
# Aggregation
# copied from postgresql compiler
# support for of bit columns in aggregate methods
def _reduction(func_name, cast_type='int32'):
def reduction_compiler(t, expr):
arg, where = expr.op().args
if arg.type().equals(dt.boolean):
arg = arg.cast(cast_type)
func = getattr(sa.func, func_name)
if where is not None:
arg = where.ifelse(arg, None)
return func(t.translate(arg))
return reduction_compiler
# String
# TODO: substr and find are copied from SQLite, we should really have a
# "base" set of SQL functions that are the most common APIs across the major
# RDBMS
def _substr(t, expr):
f = sa.func.substring
arg, start, length = expr.op().args
sa_arg = t.translate(arg)
sa_start = t.translate(start)
if length is None:
return f(sa_arg, sa_start + 1)
else:
sa_length = t.translate(length)
return f(sa_arg, sa_start + 1, sa_length)
def _string_find(t, expr):
arg, substr, start, _ = expr.op().args
sa_arg = t.translate(arg)
sa_substr = t.translate(substr)
if start is not None:
sa_start = t.translate(start)
return sa.func.charindex(sa_substr, sa_arg, sa_start) - 1
return sa.func.charindex(sa_substr, sa_arg) - 1
# Numerical
def _floor_divide(t, expr):
left, right = map(t.translate, expr.op().args)
return sa.func.floor(left / right)
def _extract(fmt):
def translator(t, expr):
(arg,) = expr.op().args
sa_arg = t.translate(arg)
# sa.literal_column is used becuase it makes the argument pass
# in NOT as a parameter
return sa.cast(
sa.func.datepart(sa.literal_column(fmt), sa_arg), sa.SMALLINT
)
return translator
_operation_registry = alch._operation_registry.copy()
_operation_registry.update(
{
# aggregate methods
ops.Count: _reduction(sa.func.count),
ops.Max: _reduction('max'),
ops.Min: _reduction('min'),
ops.Sum: _reduction('sum'),
ops.Mean: _reduction('avg', 'float64'),
# string methods
ops.LStrip: unary(sa.func.ltrim),
ops.Lowercase: unary(sa.func.lower),
ops.RStrip: unary(sa.func.rtrim),
ops.Repeat: fixed_arity(sa.func.replicate, 2),
ops.Reverse: unary(sa.func.reverse),
ops.StringFind: _string_find,
ops.StringLength: unary(sa.func.length),
ops.StringReplace: fixed_arity(sa.func.replace, 3),
ops.Strip: unary(sa.func.trim),
ops.Substring: _substr,
ops.Uppercase: unary(sa.func.upper),
# math
ops.Abs: unary(sa.func.abs),
ops.Acos: unary(sa.func.acos),
ops.Asin: unary(sa.func.asin),
ops.Atan2: fixed_arity(sa.func.atn2, 2),
ops.Atan: unary(sa.func.atan),
ops.Ceil: unary(sa.func.ceiling),
ops.Cos: unary(sa.func.cos),
ops.Floor: unary(sa.func.floor),
ops.FloorDivide: _floor_divide,
ops.Power: fixed_arity(sa.func.power, 2),
ops.Sign: unary(sa.func.sign),
ops.Sin: unary(sa.func.sin),
ops.Sqrt: unary(sa.func.sqrt),
ops.Tan: unary(sa.func.tan),
# timestamp methods
ops.TimestampNow: fixed_arity(sa.func.GETDATE, 0),
ops.ExtractYear: _extract('year'),
ops.ExtractMonth: _extract('month'),
ops.ExtractDay: _extract('day'),
ops.ExtractHour: _extract('hour'),
ops.ExtractMinute: _extract('minute'),
ops.ExtractSecond: _extract('second'),
ops.ExtractMillisecond: _extract('millisecond'),
}
)
_unsupported_ops = [
# standard operations
ops.NotContains,
ops.NullIf,
ops.NotAny,
# miscellaneous
ops.Least,
ops.Greatest,
# numeric
ops.Round,
ops.Log2,
ops.Ln,
ops.Log10,
ops.Log,
ops.Exp,
ops.Modulus,
# string
ops.Contains,
ops.LPad,
ops.RPad,
ops.Capitalize,
ops.RegexSearch,
ops.RegexExtract,
ops.RegexReplace,
ops.StringAscii,
ops.StringSQLLike,
# aggregate methods
ops.CumulativeMax,
ops.CumulativeMin,
ops.CumulativeMean,
ops.CumulativeSum,
# datetime methods
ops.TimestampTruncate,
]
_unsupported_ops = {k: raise_unsupported_op_error for k in _unsupported_ops}
_operation_registry.update(_unsupported_ops)
class MSSQLExprTranslator(alch.AlchemyExprTranslator):
_registry = _operation_registry
_rewrites = alch.AlchemyExprTranslator._rewrites.copy()
_type_map = alch.AlchemyExprTranslator._type_map.copy()
_type_map.update(
{
dt.Boolean: pyodbc.SQL_BIT,
dt.Int8: mssql.TINYINT,
dt.Int32: mssql.INTEGER,
dt.Int64: mssql.BIGINT,
dt.Float: mssql.REAL,
dt.Double: mssql.REAL,
dt.String: mssql.VARCHAR,
}
)
rewrites = MSSQLExprTranslator.rewrites
compiles = MSSQLExprTranslator.compiles
class MSSQLDialect(alch.AlchemyDialect):
translator = MSSQLExprTranslator
dialect = MSSQLDialect
| 27.077295
| 76
| 0.649955
|
import pyodbc
import sqlalchemy as sa
import sqlalchemy.dialects.mssql as mssql
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.backends.base_sqlalchemy.alchemy as alch
from ibis.backends.base_sqlalchemy.alchemy import fixed_arity, unary
def raise_unsupported_op_error(translator, expr, *args):
msg = "SQLServer backend doesn't support {} operation!"
op = expr.op()
raise com.UnsupportedOperationError(msg.format(type(op)))
# Aggregation
# copied from postgresql compiler
# support for of bit columns in aggregate methods
def _reduction(func_name, cast_type='int32'):
def reduction_compiler(t, expr):
arg, where = expr.op().args
if arg.type().equals(dt.boolean):
arg = arg.cast(cast_type)
func = getattr(sa.func, func_name)
if where is not None:
arg = where.ifelse(arg, None)
return func(t.translate(arg))
return reduction_compiler
# String
# TODO: substr and find are copied from SQLite, we should really have a
# "base" set of SQL functions that are the most common APIs across the major
# RDBMS
def _substr(t, expr):
f = sa.func.substring
arg, start, length = expr.op().args
sa_arg = t.translate(arg)
sa_start = t.translate(start)
if length is None:
return f(sa_arg, sa_start + 1)
else:
sa_length = t.translate(length)
return f(sa_arg, sa_start + 1, sa_length)
def _string_find(t, expr):
arg, substr, start, _ = expr.op().args
sa_arg = t.translate(arg)
sa_substr = t.translate(substr)
if start is not None:
sa_start = t.translate(start)
return sa.func.charindex(sa_substr, sa_arg, sa_start) - 1
return sa.func.charindex(sa_substr, sa_arg) - 1
# Numerical
def _floor_divide(t, expr):
left, right = map(t.translate, expr.op().args)
return sa.func.floor(left / right)
def _extract(fmt):
def translator(t, expr):
(arg,) = expr.op().args
sa_arg = t.translate(arg)
# sa.literal_column is used becuase it makes the argument pass
# in NOT as a parameter
return sa.cast(
sa.func.datepart(sa.literal_column(fmt), sa_arg), sa.SMALLINT
)
return translator
_operation_registry = alch._operation_registry.copy()
_operation_registry.update(
{
# aggregate methods
ops.Count: _reduction(sa.func.count),
ops.Max: _reduction('max'),
ops.Min: _reduction('min'),
ops.Sum: _reduction('sum'),
ops.Mean: _reduction('avg', 'float64'),
# string methods
ops.LStrip: unary(sa.func.ltrim),
ops.Lowercase: unary(sa.func.lower),
ops.RStrip: unary(sa.func.rtrim),
ops.Repeat: fixed_arity(sa.func.replicate, 2),
ops.Reverse: unary(sa.func.reverse),
ops.StringFind: _string_find,
ops.StringLength: unary(sa.func.length),
ops.StringReplace: fixed_arity(sa.func.replace, 3),
ops.Strip: unary(sa.func.trim),
ops.Substring: _substr,
ops.Uppercase: unary(sa.func.upper),
# math
ops.Abs: unary(sa.func.abs),
ops.Acos: unary(sa.func.acos),
ops.Asin: unary(sa.func.asin),
ops.Atan2: fixed_arity(sa.func.atn2, 2),
ops.Atan: unary(sa.func.atan),
ops.Ceil: unary(sa.func.ceiling),
ops.Cos: unary(sa.func.cos),
ops.Floor: unary(sa.func.floor),
ops.FloorDivide: _floor_divide,
ops.Power: fixed_arity(sa.func.power, 2),
ops.Sign: unary(sa.func.sign),
ops.Sin: unary(sa.func.sin),
ops.Sqrt: unary(sa.func.sqrt),
ops.Tan: unary(sa.func.tan),
# timestamp methods
ops.TimestampNow: fixed_arity(sa.func.GETDATE, 0),
ops.ExtractYear: _extract('year'),
ops.ExtractMonth: _extract('month'),
ops.ExtractDay: _extract('day'),
ops.ExtractHour: _extract('hour'),
ops.ExtractMinute: _extract('minute'),
ops.ExtractSecond: _extract('second'),
ops.ExtractMillisecond: _extract('millisecond'),
}
)
_unsupported_ops = [
# standard operations
ops.NotContains,
ops.NullIf,
ops.NotAny,
# miscellaneous
ops.Least,
ops.Greatest,
# numeric
ops.Round,
ops.Log2,
ops.Ln,
ops.Log10,
ops.Log,
ops.Exp,
ops.Modulus,
# string
ops.Contains,
ops.LPad,
ops.RPad,
ops.Capitalize,
ops.RegexSearch,
ops.RegexExtract,
ops.RegexReplace,
ops.StringAscii,
ops.StringSQLLike,
# aggregate methods
ops.CumulativeMax,
ops.CumulativeMin,
ops.CumulativeMean,
ops.CumulativeSum,
# datetime methods
ops.TimestampTruncate,
]
_unsupported_ops = {k: raise_unsupported_op_error for k in _unsupported_ops}
_operation_registry.update(_unsupported_ops)
class MSSQLExprTranslator(alch.AlchemyExprTranslator):
_registry = _operation_registry
_rewrites = alch.AlchemyExprTranslator._rewrites.copy()
_type_map = alch.AlchemyExprTranslator._type_map.copy()
_type_map.update(
{
dt.Boolean: pyodbc.SQL_BIT,
dt.Int8: mssql.TINYINT,
dt.Int32: mssql.INTEGER,
dt.Int64: mssql.BIGINT,
dt.Float: mssql.REAL,
dt.Double: mssql.REAL,
dt.String: mssql.VARCHAR,
}
)
rewrites = MSSQLExprTranslator.rewrites
compiles = MSSQLExprTranslator.compiles
class MSSQLDialect(alch.AlchemyDialect):
translator = MSSQLExprTranslator
dialect = MSSQLDialect
| true
| true
|
1c3fde21a6ddd37df1568bf727236ba6d5ba10f9
| 707
|
py
|
Python
|
cla-backend/helpers/get_token.py
|
rinkeshbhutwala/easycla
|
b92eae57a696050b0a5c15c12e4084583f3d1c4c
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 52
|
2019-07-03T17:13:03.000Z
|
2022-03-29T20:42:55.000Z
|
cla-backend/helpers/get_token.py
|
rinkeshbhutwala/easycla
|
b92eae57a696050b0a5c15c12e4084583f3d1c4c
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 1,368
|
2019-07-03T21:24:07.000Z
|
2022-03-30T22:56:17.000Z
|
cla-backend/helpers/get_token.py
|
rinkeshbhutwala/easycla
|
b92eae57a696050b0a5c15c12e4084583f3d1c4c
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | 49
|
2019-07-03T21:20:58.000Z
|
2021-12-10T08:22:18.000Z
|
# Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
import sys
sys.path.append('../')
from keycloak import KeycloakOpenID
import cla
# kc = KeycloakOpenID(cla.conf['KEYCLOAK_ENDPOINT'],
# cla.conf['KEYCLOAK_CLIENT_ID'],
# cla.conf['KEYCLOAK_REALM'],
# cla.conf['KEYCLOAK_CLIENT_SECRET'])
# certs = kc.certs()
# token = kc.token('password', 'foobarski', 'foobarski') # Password is same as username for sandbox.
# print(token)
# print(kc.decode_token(token['access_token'], certs))
# token = kc.token('client_credentials')
# print(token)
# print(kc.decode_token(token['access_token'], certs))
| 33.666667
| 100
| 0.678925
|
import sys
sys.path.append('../')
from keycloak import KeycloakOpenID
import cla
| true
| true
|
1c3fdfdf45eeccd2bbadce8856a0d6f7840228fe
| 1,659
|
py
|
Python
|
dive/soundex/stage1/soundex2c.py
|
abos5/pythontutor
|
eba451700def8bd98d74668d1b6cc08c0ccc0d3c
|
[
"MIT"
] | null | null | null |
dive/soundex/stage1/soundex2c.py
|
abos5/pythontutor
|
eba451700def8bd98d74668d1b6cc08c0ccc0d3c
|
[
"MIT"
] | null | null | null |
dive/soundex/stage1/soundex2c.py
|
abos5/pythontutor
|
eba451700def8bd98d74668d1b6cc08c0ccc0d3c
|
[
"MIT"
] | null | null | null |
import string
import re
allchars = string.uppercase + string.lowercase
charToSoundex = string.maketrans(allchars, "91239129922455912623919292" * 2)
isOnlyChars = re.compile('^[A-Za-z]+$').search
def soundex(source):
"convert string to Soundex equivalent"
if not isOnlyChars(source):
return "0000"
digits = source[0].upper() + source[1:].translate(charToSoundex)
digits2 = digits[0]
for d in digits[1:]:
if digits2[-1] != d:
digits2 += d
digits3 = re.sub('9', '', digits2)
while len(digits3) < 4:
digits3 += "0"
return digits3[:4]
if __name__ == '__main__':
from timeit import Timer
names = ('Woo', 'Pilgrim', 'Flingjingwaller')
print("\n#\n# result of way 2")
for name in names:
statement = "soundex('%s')" % name
t = Timer(statement, "from __main__ import soundex")
print("# "), (name.ljust(15)), (soundex(name)), (
min(t.repeat(20, 50000)))
print("# "),
# result of way 1 20 tests for 50000 each
# Woo W000 0.44571715703
# Pilgrim P426 0.479107457274
# Flingjingwaller F452 0.682555275898
# [Finished in 35.1s]
# result of way 2a
# Woo W000 0.394521652421
# Pilgrim P426 0.472973832852
# Flingjingwaller F452 0.626513547054
# [Finished in 33.2s]
#
# result of way 2b
# Woo W000 0.370683812297
# Pilgrim P426 0.432303317395
# Flingjingwaller F452 0.554885901285
#
# [Finished in 29.7s]
#
# result of way 2c
# Woo W000 0.242933975172
# Pilgrim P426 0.27535503057
# Flingjingwaller F452 0.364904879764
#
# [Finished in 18.9s]
# end of file
| 27.196721
| 76
| 0.621459
|
import string
import re
allchars = string.uppercase + string.lowercase
charToSoundex = string.maketrans(allchars, "91239129922455912623919292" * 2)
isOnlyChars = re.compile('^[A-Za-z]+$').search
def soundex(source):
if not isOnlyChars(source):
return "0000"
digits = source[0].upper() + source[1:].translate(charToSoundex)
digits2 = digits[0]
for d in digits[1:]:
if digits2[-1] != d:
digits2 += d
digits3 = re.sub('9', '', digits2)
while len(digits3) < 4:
digits3 += "0"
return digits3[:4]
if __name__ == '__main__':
from timeit import Timer
names = ('Woo', 'Pilgrim', 'Flingjingwaller')
print("\n#\n# result of way 2")
for name in names:
statement = "soundex('%s')" % name
t = Timer(statement, "from __main__ import soundex")
print("# "), (name.ljust(15)), (soundex(name)), (
min(t.repeat(20, 50000)))
print("# "),
| true
| true
|
1c3fe0bfadc33918d956aa1c6d71ceca984dd702
| 28,756
|
py
|
Python
|
archetypal/template/zonedefinition.py
|
louisleroy5/archetypal
|
71f13aaed859c10e663e68624e2b74d816de631f
|
[
"MIT"
] | null | null | null |
archetypal/template/zonedefinition.py
|
louisleroy5/archetypal
|
71f13aaed859c10e663e68624e2b74d816de631f
|
[
"MIT"
] | null | null | null |
archetypal/template/zonedefinition.py
|
louisleroy5/archetypal
|
71f13aaed859c10e663e68624e2b74d816de631f
|
[
"MIT"
] | null | null | null |
################################################################################
# Module: archetypal.template
# Description:
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import collections
import functools
import math
import sqlite3
import time
from operator import add
import numpy as np
from deprecation import deprecated
from eppy.bunch_subclass import BadEPFieldError
from geomeppy.geom.polygons import Polygon3D
from sigfig import round
from archetypal import __version__, is_referenced, log, settings, timeit
from archetypal.template import (
DomesticHotWaterSetting,
OpaqueConstruction,
UmiBase,
UniqueName,
VentilationSetting,
WindowSetting,
ZoneConditioning,
ZoneConstructionSet,
ZoneLoad,
)
class ZoneDefinition(UmiBase):
"""Class containing HVAC settings: Conditioning, Domestic Hot Water, Loads,
Ventilation, adn Constructions
.. image:: ../images/template/zoneinfo-zone.png
"""
def __init__(
self,
Name,
Constructions=None,
Loads=None,
Conditioning=None,
Ventilation=None,
DomesticHotWater=None,
DaylightMeshResolution=1,
DaylightWorkplaneHeight=0.8,
InternalMassConstruction=None,
InternalMassExposedPerFloorArea=1.05,
Windows=None,
**kwargs,
):
"""Initialize :class:`Zone` object.
Args:
Name (str): Name of the object. Must be Unique.
Constructions (ZoneConstructionSet):
Loads (ZoneLoad): Loads of the zone defined with the lights,
equipment and occupancy parameters (see :class:`ZoneLoad`)
Conditioning (ZoneConditioning): Conditioning of the zone defined
with heating/cooling and mechanical ventilation parameters (see
:class:`ZoneConditioning`)
Ventilation (VentilationSetting): Ventilation settings of the zone
defined with the infiltration rate and natural ventilation
parameters (see :class:`VentilationSetting`)
DomesticHotWater (archetypal.template.dhw.DomesticHotWaterSetting):
DaylightMeshResolution (float):
DaylightWorkplaneHeight (float):
InternalMassConstruction (archetypal.OpaqueConstruction):
InternalMassExposedPerFloorArea:
Windows (WindowSetting): The WindowSetting object associated with
this zone.
**kwargs:
"""
super(ZoneDefinition, self).__init__(Name, **kwargs)
self.Ventilation = Ventilation
self.Loads = Loads
self.Conditioning = Conditioning
self.Constructions = Constructions
self.DaylightMeshResolution = DaylightMeshResolution
self.DaylightWorkplaneHeight = DaylightWorkplaneHeight
self.DomesticHotWater = DomesticHotWater
self.InternalMassConstruction = InternalMassConstruction
self.InternalMassExposedPerFloorArea = InternalMassExposedPerFloorArea
self.Windows = Windows # This is not used in to_json()
self._epbunch = kwargs.get("epbunch", None)
self._zonesurfaces = kwargs.get("zonesurfaces", None)
self._area = None
self._volume = None
self._is_part_of_conditioned_floor_area = None
self._is_part_of_total_floor_area = None
self._multiplier = None
@property
def InternalMassExposedPerFloorArea(self):
return float(self._InternalMassExposedPerFloorArea)
@InternalMassExposedPerFloorArea.setter
def InternalMassExposedPerFloorArea(self, value):
self._InternalMassExposedPerFloorArea = value
def __add__(self, other):
"""
Args:
other (ZoneDefinition):
"""
# create the new merged zone from self
return ZoneDefinition.combine(self, other)
def __hash__(self):
return hash(
(self.__class__.__name__, getattr(self, "Name", None), self.DataSource)
)
def __eq__(self, other):
if not isinstance(other, ZoneDefinition):
return False
else:
return all(
[
self.Conditioning == other.Conditioning,
self.Constructions == other.Constructions,
self.DomesticHotWater == other.DomesticHotWater,
self.Loads == other.Loads,
self.Ventilation == other.Ventilation,
self.Windows == other.Windows,
self.InternalMassConstruction == other.InternalMassConstruction,
self.InternalMassExposedPerFloorArea
== other.InternalMassExposedPerFloorArea,
self.DaylightMeshResolution == other.DaylightMeshResolution,
self.DaylightWorkplaneHeight == other.DaylightWorkplaneHeight,
]
)
@property
def area(self):
"""Calculates the floor surface area of the zone
Returns (float): zone's area in m²
"""
if self._area is None:
zone_surfs = self.zonesurfaces(
exclude=["INTERNALMASS", "WINDOWSHADINGCONTROL"]
)
floors = [s for s in zone_surfs if s.Surface_Type.upper() == "FLOOR"]
area = sum([floor.area for floor in floors])
return area
else:
return self._area
@area.setter
def area(self, value):
self._area = value
@property
def volume(self):
"""Calculates the volume of the zone
Returns (float): zone's volume in m³
"""
if not self._volume:
zone_surfs = self.zonesurfaces(
exclude=["INTERNALMASS", "WINDOWSHADINGCONTROL"]
)
vol = self.get_volume_from_surfs(zone_surfs)
if self._epbunch.Multiplier == "":
multiplier = 1
else:
multiplier = float(self._epbunch.Multiplier)
# multiply to volume by the zone multiplier.
return vol * multiplier
else:
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
def zonesurfaces(self, exclude=None):
"""Returns list of surfaces belonging to this zone. Optionally filter
surface types.
Args:
exclude (list): exclude surface types, e.g.: ["INTERNALMASS",
"WINDOWSHADINGCONTROL"]. Object key must be in capital letters.
"""
if exclude is None:
exclude = []
if self._zonesurfaces is None:
self._zonesurfaces = [surf for surf in self._epbunch.zonesurfaces]
return [surf for surf in self._zonesurfaces if surf.key.upper() not in exclude]
@property
def is_core(self):
return is_core(self._epbunch)
@property
def multiplier(self):
"""Zone multipliers are designed as a “multiplier” for floor area,
zone loads, and energy consumed by internal gains.
"""
if self._multiplier is None:
with sqlite3.connect(self.idf.sql_file) as conn:
sql_query = "SELECT t.Value FROM TabularDataWithStrings t WHERE TableName='Zone Summary' and ColumnName='Multipliers' and RowName=?"
(res,) = conn.execute(sql_query, (self.Name.upper(),)).fetchone()
self._multiplier = int(float(res))
return self._multiplier
@multiplier.setter
def multiplier(self, value):
self._multiplier = value
@property
def is_part_of_conditioned_floor_area(self):
"""Returns True if zone is conditioned"""
if self._is_part_of_conditioned_floor_area is None:
with sqlite3.connect(self.idf.sql_file) as conn:
sql_query = (
"SELECT t.Value FROM TabularDataWithStrings t WHERE TableName='Zone Summary' and ColumnName='Conditioned (Y/N)' and RowName=?"
""
)
res = conn.execute(sql_query, (self.Name.upper(),)).fetchone()
self._is_part_of_conditioned_floor_area = "Yes" in res
return self._is_part_of_conditioned_floor_area
@property
def is_part_of_total_floor_area(self):
"""Returns True if zone is part of the total floor area"""
if self._is_part_of_total_floor_area is None:
with sqlite3.connect(self.idf.sql_file) as conn:
sql_query = "SELECT t.Value FROM TabularDataWithStrings t WHERE TableName='Zone Summary' and ColumnName='Part of Total Floor Area (Y/N)' and RowName=?"
res = conn.execute(sql_query, (self.Name.upper(),)).fetchone()
self._is_part_of_total_floor_area = "Yes" in res
return self._is_part_of_total_floor_area
@staticmethod
def get_volume_from_surfs(zone_surfs):
"""Calculate the volume of a zone only and only if the surfaces are such
that you can find a point inside so that you can connect every vertex to
the point without crossing a face.
Adapted from: https://stackoverflow.com/a/19125446
Args:
zone_surfs (list): List of zone surfaces (EpBunch)
"""
vol = 0
for surf in zone_surfs:
polygon_d = Polygon3D(surf.coords) # create Polygon3D from surf
n = len(polygon_d.vertices_list)
v2 = polygon_d[0]
x2 = v2.x
y2 = v2.y
z2 = v2.z
for i in range(1, n - 1):
v0 = polygon_d[i]
x0 = v0.x
y0 = v0.y
z0 = v0.z
v1 = polygon_d[i + 1]
x1 = v1.x
y1 = v1.y
z1 = v1.z
# Add volume of tetrahedron formed by triangle and origin
vol += math.fabs(
x0 * y1 * z2
+ x1 * y2 * z0
+ x2 * y0 * z1
- x0 * y2 * z1
- x1 * y0 * z2
- x2 * y1 * z0
)
return vol / 6.0
@timeit
def _internalmassconstruction(self):
"""Specifies the internal mass construction based on InternalMass objects
referenced to the zone. Group internal walls into a ThermalMass
object for this Zone"""
# Check for internal mass objects in all zones.
mass_opaque_constructions = [] # placeholder for possible InternalMass
area = 0 # placeholder for possible InternalMass area.
internal_mass_objs = self.idf.idfobjects["INTERNALMASS"]
# then loop to find referenced InternalMass to zone self
if internal_mass_objs:
# There are InternalMass objects, but is there one assigned to this zone?
for int_obj in internal_mass_objs:
# Looping over possible InternalMass objects
if is_referenced(self.Name, int_obj):
# This InternalMass object (int_obj) is assigned to self,
# then create object and append to list. There could be more then
# one.
mass_opaque_constructions.append(
OpaqueConstruction.from_epbunch(
int_obj, Category="Internal Mass"
)
)
area += float(int_obj.Surface_Area)
# If one or more constructions, combine them into one.
if mass_opaque_constructions:
# Combine elements and assign the aggregated Surface Area
self.InternalMassExposedPerFloorArea = float(area) / self.area
return functools.reduce(add, mass_opaque_constructions)
else:
# No InternalMass object assigned to this Zone, then return Zone and set
# floor area to 0
self.InternalMassExposedPerFloorArea = 0
return None
def set_generic_internalmass(self):
"""Creates a valid internal mass object with
InternalMassExposedPerFloorArea = 0 and sets it to the
self.InternalMassConstruction attribute.
"""
self.InternalMassConstruction = OpaqueConstruction.generic_internalmass(
idf=self.idf
)
self.InternalMassExposedPerFloorArea = 0
def to_json(self):
self.validate() # Validate object before trying to get json format
data_dict = collections.OrderedDict()
data_dict["$id"] = str(self.id)
data_dict["Conditioning"] = self.Conditioning.to_dict()
data_dict["Constructions"] = self.Constructions.to_dict()
data_dict["DaylightMeshResolution"] = round(self.DaylightMeshResolution, 2)
data_dict["DaylightWorkplaneHeight"] = round(self.DaylightWorkplaneHeight, 2)
data_dict["DomesticHotWater"] = self.DomesticHotWater.to_dict()
data_dict["InternalMassConstruction"] = self.InternalMassConstruction.to_dict()
data_dict["InternalMassExposedPerFloorArea"] = round(
self.InternalMassExposedPerFloorArea, 2
)
data_dict["Loads"] = self.Loads.to_dict()
data_dict["Ventilation"] = self.Ventilation.to_dict()
data_dict["Category"] = self.Category
data_dict["Comments"] = self.Comments
data_dict["DataSource"] = self.DataSource
data_dict["Name"] = UniqueName(self.Name)
return data_dict
@classmethod
@deprecated(
deprecated_in="1.3.1",
removed_in="1.5",
current_version=__version__,
details="Use from_dict function instead",
)
def from_json(cls, *args, **kwargs):
return cls.from_dict(*args, **kwargs)
@classmethod
def from_dict(
cls,
Conditioning,
Constructions,
DomesticHotWater,
InternalMassConstruction,
Loads,
Ventilation,
*args,
**kwargs,
):
"""
Args:
*args:
**kwargs:
"""
Conditioning = cls.get_classref(Conditioning)
Constructions = cls.get_classref(Constructions)
DomesticHotWater = cls.get_classref(DomesticHotWater)
InternalMassConstruction = cls.get_classref(InternalMassConstruction)
Loads = cls.get_classref(Loads)
Ventilation = cls.get_classref(Ventilation)
zone = cls(
*args,
Conditioning=Conditioning,
Constructions=Constructions,
DomesticHotWater=DomesticHotWater,
InternalMassConstruction=InternalMassConstruction,
Loads=Loads,
Ventilation=Ventilation,
**kwargs,
)
return zone
@classmethod
def from_zone_epbunch(cls, zone_ep, sql, **kwargs):
"""Create a Zone object from an eppy 'ZONE' epbunch.
Args:
zone_ep (eppy.bunch_subclass.EpBunch): The Zone EpBunch.
sql (dict): The sql dict for this IDF object.
"""
start_time = time.time()
log('Constructing :class:`Zone` for zone "{}"'.format(zone_ep.Name))
name = zone_ep.Name
zone = cls(
Name=name,
idf=zone_ep.theidf,
Category=zone_ep.theidf.name,
**kwargs,
)
zone._epbunch = zone_ep
zone._zonesurfaces = zone_ep.zonesurfaces
zone.Constructions = ZoneConstructionSet.from_zone(zone, **kwargs)
zone.Conditioning = ZoneConditioning.from_zone(zone, **kwargs)
zone.Ventilation = VentilationSetting.from_zone(zone, **kwargs)
zone.DomesticHotWater = DomesticHotWaterSetting.from_zone(zone, **kwargs)
zone.Loads = ZoneLoad.from_zone(zone, **kwargs)
zone.InternalMassConstruction = zone._internalmassconstruction()
zone.Windows = WindowSetting.from_zone(zone, **kwargs)
log(
'completed Zone "{}" constructor in {:,.2f} seconds'.format(
zone_ep.Name, time.time() - start_time
)
)
return zone
def combine(self, other, weights=None, allow_duplicates=False):
"""
Args:
other (ZoneDefinition):
weights (list-like, optional): A list-like object of len 2. If None,
the volume of the zones for which self and other belongs is
used.
Todo:
Create Equivalent InternalMassConstruction from partitions when combining
zones.
Returns:
(ZoneDefinition): the combined Zone object.
"""
# Check if other is None. Simply return self
if not other:
return self
if not self:
return other
# Check if other is the same type as self
if not isinstance(other, self.__class__):
msg = "Cannot combine %s with %s" % (
self.__class__.__name__,
other.__class__.__name__,
)
raise NotImplementedError(msg)
meta = self._get_predecessors_meta(other)
if not weights:
zone_weight = settings.zone_weight
weights = [
getattr(self, str(zone_weight)),
getattr(other, str(zone_weight)),
]
log(
'using zone {} "{}" as weighting factor in "{}" '
"combine.".format(
zone_weight,
" & ".join(list(map(str, map(int, weights)))),
self.__class__.__name__,
)
)
new_attr = dict(
Conditioning=ZoneConditioning.combine(
self.Conditioning, other.Conditioning, weights
),
Constructions=ZoneConstructionSet.combine(
self.Constructions, other.Constructions, weights
),
Ventilation=VentilationSetting.combine(
self.Ventilation, other.Ventilation, weights
),
Windows=WindowSetting.combine(self.Windows, other.Windows, weights),
DaylightMeshResolution=self._float_mean(
other, "DaylightMeshResolution", weights=weights
),
DaylightWorkplaneHeight=self._float_mean(
other, "DaylightWorkplaneHeight", weights
),
DomesticHotWater=DomesticHotWaterSetting.combine(
self.DomesticHotWater, other.DomesticHotWater, weights
),
InternalMassConstruction=OpaqueConstruction.combine(
self.InternalMassConstruction, other.InternalMassConstruction
),
InternalMassExposedPerFloorArea=self._float_mean(
other, "InternalMassExposedPerFloorArea", weights
),
Loads=ZoneLoad.combine(self.Loads, other.Loads, weights),
)
new_obj = ZoneDefinition(**meta, **new_attr, idf=self.idf)
new_obj.volume = self.volume + other.volume
new_obj.area = self.area + other.area
if new_attr["Conditioning"]: # Could be None
new_attr["Conditioning"]._belongs_to_zone = new_obj
if new_attr["Constructions"]: # Could be None
new_attr["Constructions"]._belongs_to_zone = new_obj
if new_attr["Ventilation"]: # Could be None
new_attr["Ventilation"]._belongs_to_zone = new_obj
if new_attr["DomesticHotWater"]: # Could be None
new_attr["DomesticHotWater"]._belongs_to_zone = new_obj
if new_attr["Windows"]: # Could be None
new_attr["Windows"]._belongs_to_zone = new_obj
new_obj.predecessors.update(self.predecessors + other.predecessors)
return new_obj
def validate(self):
"""Validates UmiObjects and fills in missing values"""
if not self.InternalMassConstruction:
self.set_generic_internalmass()
self.InternalMassExposedPerFloorArea = 0
log(
f"While validating {self}, the required attribute "
f"'InternalMassConstruction' was filled "
f"with {self.InternalMassConstruction} and the "
f"'InternalMassExposedPerFloorArea' set to"
f" {self.InternalMassExposedPerFloorArea}"
)
if not self.DomesticHotWater:
self.DomesticHotWater = DomesticHotWaterSetting.whole_building(self.idf)
if self.Conditioning is None:
self.Conditioning = ZoneConditioning(Name="Unconditioned Zone")
return self
def mapping(self):
self.validate()
return dict(
Conditioning=self.Conditioning,
Constructions=self.Constructions,
DaylightMeshResolution=self.DaylightMeshResolution,
DaylightWorkplaneHeight=self.DaylightWorkplaneHeight,
DomesticHotWater=self.DomesticHotWater,
InternalMassConstruction=self.InternalMassConstruction,
InternalMassExposedPerFloorArea=self.InternalMassExposedPerFloorArea,
Loads=self.Loads,
Ventilation=self.Ventilation,
Category=self.Category,
Comments=self.Comments,
DataSource=self.DataSource,
Name=self.Name,
)
def get_ref(self, ref):
"""Gets item matching ref id
Args:
ref:
"""
return next(
iter(
[
value
for value in ZoneDefinition.CREATED_OBJECTS
if value.id == ref["$ref"]
]
),
None,
)
def resolve_obco(this):
"""Resolve the outside boundary condition of a surface and return the other
SURFACE epbunch and, if possible, the ZONE epbunch.
Args:
this (EpBunch): The surface for which we are identifying the boundary
object.
Returns:
(EpBunch, EpBunch): A tuple of:
EpBunch: The other surface EpBunch: The other zone
Notes:
Info on the Outside Boundary Condition Object of a surface of type
BuildingSurface:Detailed:
Non-blank only if the field `Outside Boundary Condition` is *Surface*,
*Zone*, *OtherSideCoefficients* or *OtherSideConditionsModel*. If
Surface, specify name of corresponding surface in adjacent zone or
specify current surface name for internal partition separating like
zones. If Zone, specify the name of the corresponding zone and the
program will generate the corresponding interzone surface. If
Foundation, specify the name of the corresponding Foundation object and
the program will calculate the heat transfer appropriately. If
OtherSideCoefficients, specify name of
SurfaceProperty:OtherSideCoefficients. If OtherSideConditionsModel,
specify name of SurfaceProperty:OtherSideConditionsModel.
"""
# other belongs to which zone?
# for key in this.getfieldidd_item('Outside_Boundary_Condition_Object',
# 'validobjects'):
obc = this.Outside_Boundary_Condition
if obc.upper() == "ZONE":
name = this.Outside_Boundary_Condition_Object
adj_zone = this.theidf.getobject("ZONE", name)
return None, adj_zone
elif obc.upper() == "SURFACE":
obco = this.get_referenced_object("Outside_Boundary_Condition_Object")
adj_zone = obco.theidf.getobject("ZONE", obco.Zone_Name)
return obco, adj_zone
else:
return None, None
def label_surface(row):
"""Takes a boundary and returns its corresponding umi-Category
Args:
row:
"""
# Floors
if row["Surface_Type"] == "Floor":
if row["Outside_Boundary_Condition"] == "Surface":
return "Interior Floor"
if row["Outside_Boundary_Condition"] == "Ground":
return "Ground Floor"
if row["Outside_Boundary_Condition"] == "Outdoors":
return "Exterior Floor"
if row["Outside_Boundary_Condition"] == "Adiabatic":
return "Interior Floor"
else:
return "Other"
# Roofs & Ceilings
if row["Surface_Type"] == "Roof":
return "Roof"
if row["Surface_Type"] == "Ceiling":
return "Interior Floor"
# Walls
if row["Surface_Type"] == "Wall":
if row["Outside_Boundary_Condition"] == "Surface":
return "Partition"
if row["Outside_Boundary_Condition"] == "Outdoors":
return "Facade"
if row["Outside_Boundary_Condition"] == "Adiabatic":
return "Partition"
return "Other"
def type_surface(row):
"""Takes a boundary and returns its corresponding umi-type
Args:
row:
"""
# Floors
if row["Surface_Type"] == "Floor":
if row["Outside_Boundary_Condition"] == "Surface":
return 3 # umi defined
if row["Outside_Boundary_Condition"] == "Ground":
return 2 # umi defined
if row["Outside_Boundary_Condition"] == "Outdoors":
return 4 # umi defined
if row["Outside_Boundary_Condition"] == "Adiabatic":
return 5
else:
return ValueError('Cannot find Construction Type for "{}"'.format(row))
# Roofs & Ceilings
elif row["Surface_Type"] == "Roof":
return 1
elif row["Surface_Type"] == "Ceiling":
return 3
# Walls
elif row["Surface_Type"] == "Wall":
if row["Outside_Boundary_Condition"] == "Surface":
return 5 # umi defined
if row["Outside_Boundary_Condition"] == "Outdoors":
return 0 # umi defined
if row["Outside_Boundary_Condition"] == "Adiabatic":
return 5 # umi defined
else:
raise ValueError('Cannot find Construction Type for "{}"'.format(row))
def zone_information(df):
"""Each zone_loads is summarized in a simple set of statements
Args:
df:
Returns:
df
References:
* ` Zone Loads Information
< https://bigladdersoftware.com/epx/docs/8-3/output-details-and
-examples/eplusout.eio.html#zone_loads-information>`_
"""
df = get_from_tabulardata(df)
tbstr = df[
(df.ReportName == "Initialization Summary")
& (df.TableName == "Zone Information")
].reset_index()
# Ignore Zone that are not part of building area
pivoted = tbstr.pivot_table(
index=["RowName"],
columns="ColumnName",
values="Value",
aggfunc=lambda x: " ".join(x),
)
return pivoted.loc[pivoted["Part of Total Building Area"] == "Yes", :]
def get_from_tabulardata(sql):
"""Returns a DataFrame from the 'TabularDataWithStrings' table.
Args:
sql (dict):
Returns:
(pandas.DataFrame)
"""
tab_data_wstring = sql["TabularDataWithStrings"]
tab_data_wstring.index.names = ["Index"]
# strip whitespaces
tab_data_wstring.Value = tab_data_wstring.Value.str.strip()
tab_data_wstring.RowName = tab_data_wstring.RowName.str.strip()
return tab_data_wstring
def is_core(zone):
"""
Args:
zone (eppy.bunch_subclass.EpBunch): The Zone object.
Returns:
(bool): Whether the zone is a core zone or not.
"""
# if all surfaces don't have boundary condition == "Outdoors"
iscore = True
for s in zone.zonesurfaces:
try:
if (abs(int(s.tilt)) < 180) & (abs(int(s.tilt)) > 0):
obc = s.Outside_Boundary_Condition.lower()
if obc in ["outdoors", "ground"]:
iscore = False
break
except BadEPFieldError:
pass # pass surfaces that don't have an OBC,
# eg. InternalMass
return iscore
def iscore(row):
"""Helps to group by core and perimeter zones. If any of "has `core` in
name" and "ExtGrossWallArea == 0" is true, will consider zone_loads as core,
else as perimeter.
Todo:
* assumes a basement zone_loads will be considered as a core zone_loads
since no ext wall area for basements.
Args:
row (pandas.Series): a row
Returns:
str: 'Core' or 'Perimeter'
"""
if any(
[
"core" in row["Zone Name"].lower(),
float(row["Exterior Gross Wall Area {m2}"]) == 0,
]
):
# We look for the string `core` in the Zone_Name
return "Core"
elif row["Part of Total Building Area"] == "No":
return np.NaN
elif "plenum" in row["Zone Name"].lower():
return np.NaN
else:
return "Perimeter"
| 35.326781
| 167
| 0.60224
|
res
return self._is_part_of_total_floor_area
@staticmethod
def get_volume_from_surfs(zone_surfs):
vol = 0
for surf in zone_surfs:
polygon_d = Polygon3D(surf.coords)
n = len(polygon_d.vertices_list)
v2 = polygon_d[0]
x2 = v2.x
y2 = v2.y
z2 = v2.z
for i in range(1, n - 1):
v0 = polygon_d[i]
x0 = v0.x
y0 = v0.y
z0 = v0.z
v1 = polygon_d[i + 1]
x1 = v1.x
y1 = v1.y
z1 = v1.z
vol += math.fabs(
x0 * y1 * z2
+ x1 * y2 * z0
+ x2 * y0 * z1
- x0 * y2 * z1
- x1 * y0 * z2
- x2 * y1 * z0
)
return vol / 6.0
@timeit
def _internalmassconstruction(self):
mass_opaque_constructions = []
area = 0
internal_mass_objs = self.idf.idfobjects["INTERNALMASS"]
if internal_mass_objs:
for int_obj in internal_mass_objs:
if is_referenced(self.Name, int_obj):
mass_opaque_constructions.append(
OpaqueConstruction.from_epbunch(
int_obj, Category="Internal Mass"
)
)
area += float(int_obj.Surface_Area)
if mass_opaque_constructions:
self.InternalMassExposedPerFloorArea = float(area) / self.area
return functools.reduce(add, mass_opaque_constructions)
else:
self.InternalMassExposedPerFloorArea = 0
return None
def set_generic_internalmass(self):
self.InternalMassConstruction = OpaqueConstruction.generic_internalmass(
idf=self.idf
)
self.InternalMassExposedPerFloorArea = 0
def to_json(self):
self.validate()
data_dict = collections.OrderedDict()
data_dict["$id"] = str(self.id)
data_dict["Conditioning"] = self.Conditioning.to_dict()
data_dict["Constructions"] = self.Constructions.to_dict()
data_dict["DaylightMeshResolution"] = round(self.DaylightMeshResolution, 2)
data_dict["DaylightWorkplaneHeight"] = round(self.DaylightWorkplaneHeight, 2)
data_dict["DomesticHotWater"] = self.DomesticHotWater.to_dict()
data_dict["InternalMassConstruction"] = self.InternalMassConstruction.to_dict()
data_dict["InternalMassExposedPerFloorArea"] = round(
self.InternalMassExposedPerFloorArea, 2
)
data_dict["Loads"] = self.Loads.to_dict()
data_dict["Ventilation"] = self.Ventilation.to_dict()
data_dict["Category"] = self.Category
data_dict["Comments"] = self.Comments
data_dict["DataSource"] = self.DataSource
data_dict["Name"] = UniqueName(self.Name)
return data_dict
@classmethod
@deprecated(
deprecated_in="1.3.1",
removed_in="1.5",
current_version=__version__,
details="Use from_dict function instead",
)
def from_json(cls, *args, **kwargs):
return cls.from_dict(*args, **kwargs)
@classmethod
def from_dict(
cls,
Conditioning,
Constructions,
DomesticHotWater,
InternalMassConstruction,
Loads,
Ventilation,
*args,
**kwargs,
):
Conditioning = cls.get_classref(Conditioning)
Constructions = cls.get_classref(Constructions)
DomesticHotWater = cls.get_classref(DomesticHotWater)
InternalMassConstruction = cls.get_classref(InternalMassConstruction)
Loads = cls.get_classref(Loads)
Ventilation = cls.get_classref(Ventilation)
zone = cls(
*args,
Conditioning=Conditioning,
Constructions=Constructions,
DomesticHotWater=DomesticHotWater,
InternalMassConstruction=InternalMassConstruction,
Loads=Loads,
Ventilation=Ventilation,
**kwargs,
)
return zone
@classmethod
def from_zone_epbunch(cls, zone_ep, sql, **kwargs):
start_time = time.time()
log('Constructing :class:`Zone` for zone "{}"'.format(zone_ep.Name))
name = zone_ep.Name
zone = cls(
Name=name,
idf=zone_ep.theidf,
Category=zone_ep.theidf.name,
**kwargs,
)
zone._epbunch = zone_ep
zone._zonesurfaces = zone_ep.zonesurfaces
zone.Constructions = ZoneConstructionSet.from_zone(zone, **kwargs)
zone.Conditioning = ZoneConditioning.from_zone(zone, **kwargs)
zone.Ventilation = VentilationSetting.from_zone(zone, **kwargs)
zone.DomesticHotWater = DomesticHotWaterSetting.from_zone(zone, **kwargs)
zone.Loads = ZoneLoad.from_zone(zone, **kwargs)
zone.InternalMassConstruction = zone._internalmassconstruction()
zone.Windows = WindowSetting.from_zone(zone, **kwargs)
log(
'completed Zone "{}" constructor in {:,.2f} seconds'.format(
zone_ep.Name, time.time() - start_time
)
)
return zone
def combine(self, other, weights=None, allow_duplicates=False):
if not other:
return self
if not self:
return other
if not isinstance(other, self.__class__):
msg = "Cannot combine %s with %s" % (
self.__class__.__name__,
other.__class__.__name__,
)
raise NotImplementedError(msg)
meta = self._get_predecessors_meta(other)
if not weights:
zone_weight = settings.zone_weight
weights = [
getattr(self, str(zone_weight)),
getattr(other, str(zone_weight)),
]
log(
'using zone {} "{}" as weighting factor in "{}" '
"combine.".format(
zone_weight,
" & ".join(list(map(str, map(int, weights)))),
self.__class__.__name__,
)
)
new_attr = dict(
Conditioning=ZoneConditioning.combine(
self.Conditioning, other.Conditioning, weights
),
Constructions=ZoneConstructionSet.combine(
self.Constructions, other.Constructions, weights
),
Ventilation=VentilationSetting.combine(
self.Ventilation, other.Ventilation, weights
),
Windows=WindowSetting.combine(self.Windows, other.Windows, weights),
DaylightMeshResolution=self._float_mean(
other, "DaylightMeshResolution", weights=weights
),
DaylightWorkplaneHeight=self._float_mean(
other, "DaylightWorkplaneHeight", weights
),
DomesticHotWater=DomesticHotWaterSetting.combine(
self.DomesticHotWater, other.DomesticHotWater, weights
),
InternalMassConstruction=OpaqueConstruction.combine(
self.InternalMassConstruction, other.InternalMassConstruction
),
InternalMassExposedPerFloorArea=self._float_mean(
other, "InternalMassExposedPerFloorArea", weights
),
Loads=ZoneLoad.combine(self.Loads, other.Loads, weights),
)
new_obj = ZoneDefinition(**meta, **new_attr, idf=self.idf)
new_obj.volume = self.volume + other.volume
new_obj.area = self.area + other.area
if new_attr["Conditioning"]:
new_attr["Conditioning"]._belongs_to_zone = new_obj
if new_attr["Constructions"]:
new_attr["Constructions"]._belongs_to_zone = new_obj
if new_attr["Ventilation"]:
new_attr["Ventilation"]._belongs_to_zone = new_obj
if new_attr["DomesticHotWater"]:
new_attr["DomesticHotWater"]._belongs_to_zone = new_obj
if new_attr["Windows"]:
new_attr["Windows"]._belongs_to_zone = new_obj
new_obj.predecessors.update(self.predecessors + other.predecessors)
return new_obj
def validate(self):
if not self.InternalMassConstruction:
self.set_generic_internalmass()
self.InternalMassExposedPerFloorArea = 0
log(
f"While validating {self}, the required attribute "
f"'InternalMassConstruction' was filled "
f"with {self.InternalMassConstruction} and the "
f"'InternalMassExposedPerFloorArea' set to"
f" {self.InternalMassExposedPerFloorArea}"
)
if not self.DomesticHotWater:
self.DomesticHotWater = DomesticHotWaterSetting.whole_building(self.idf)
if self.Conditioning is None:
self.Conditioning = ZoneConditioning(Name="Unconditioned Zone")
return self
def mapping(self):
self.validate()
return dict(
Conditioning=self.Conditioning,
Constructions=self.Constructions,
DaylightMeshResolution=self.DaylightMeshResolution,
DaylightWorkplaneHeight=self.DaylightWorkplaneHeight,
DomesticHotWater=self.DomesticHotWater,
InternalMassConstruction=self.InternalMassConstruction,
InternalMassExposedPerFloorArea=self.InternalMassExposedPerFloorArea,
Loads=self.Loads,
Ventilation=self.Ventilation,
Category=self.Category,
Comments=self.Comments,
DataSource=self.DataSource,
Name=self.Name,
)
def get_ref(self, ref):
return next(
iter(
[
value
for value in ZoneDefinition.CREATED_OBJECTS
if value.id == ref["$ref"]
]
),
None,
)
def resolve_obco(this):
obc = this.Outside_Boundary_Condition
if obc.upper() == "ZONE":
name = this.Outside_Boundary_Condition_Object
adj_zone = this.theidf.getobject("ZONE", name)
return None, adj_zone
elif obc.upper() == "SURFACE":
obco = this.get_referenced_object("Outside_Boundary_Condition_Object")
adj_zone = obco.theidf.getobject("ZONE", obco.Zone_Name)
return obco, adj_zone
else:
return None, None
def label_surface(row):
if row["Surface_Type"] == "Floor":
if row["Outside_Boundary_Condition"] == "Surface":
return "Interior Floor"
if row["Outside_Boundary_Condition"] == "Ground":
return "Ground Floor"
if row["Outside_Boundary_Condition"] == "Outdoors":
return "Exterior Floor"
if row["Outside_Boundary_Condition"] == "Adiabatic":
return "Interior Floor"
else:
return "Other"
if row["Surface_Type"] == "Roof":
return "Roof"
if row["Surface_Type"] == "Ceiling":
return "Interior Floor"
if row["Surface_Type"] == "Wall":
if row["Outside_Boundary_Condition"] == "Surface":
return "Partition"
if row["Outside_Boundary_Condition"] == "Outdoors":
return "Facade"
if row["Outside_Boundary_Condition"] == "Adiabatic":
return "Partition"
return "Other"
def type_surface(row):
if row["Surface_Type"] == "Floor":
if row["Outside_Boundary_Condition"] == "Surface":
return 3
if row["Outside_Boundary_Condition"] == "Ground":
return 2
if row["Outside_Boundary_Condition"] == "Outdoors":
return 4
if row["Outside_Boundary_Condition"] == "Adiabatic":
return 5
else:
return ValueError('Cannot find Construction Type for "{}"'.format(row))
elif row["Surface_Type"] == "Roof":
return 1
elif row["Surface_Type"] == "Ceiling":
return 3
elif row["Surface_Type"] == "Wall":
if row["Outside_Boundary_Condition"] == "Surface":
return 5
if row["Outside_Boundary_Condition"] == "Outdoors":
return 0
if row["Outside_Boundary_Condition"] == "Adiabatic":
return 5
else:
raise ValueError('Cannot find Construction Type for "{}"'.format(row))
def zone_information(df):
df = get_from_tabulardata(df)
tbstr = df[
(df.ReportName == "Initialization Summary")
& (df.TableName == "Zone Information")
].reset_index()
pivoted = tbstr.pivot_table(
index=["RowName"],
columns="ColumnName",
values="Value",
aggfunc=lambda x: " ".join(x),
)
return pivoted.loc[pivoted["Part of Total Building Area"] == "Yes", :]
def get_from_tabulardata(sql):
tab_data_wstring = sql["TabularDataWithStrings"]
tab_data_wstring.index.names = ["Index"]
tab_data_wstring.Value = tab_data_wstring.Value.str.strip()
tab_data_wstring.RowName = tab_data_wstring.RowName.str.strip()
return tab_data_wstring
def is_core(zone):
iscore = True
for s in zone.zonesurfaces:
try:
if (abs(int(s.tilt)) < 180) & (abs(int(s.tilt)) > 0):
obc = s.Outside_Boundary_Condition.lower()
if obc in ["outdoors", "ground"]:
iscore = False
break
except BadEPFieldError:
pass # pass surfaces that don't have an OBC,
return iscore
def iscore(row):
if any(
[
"core" in row["Zone Name"].lower(),
float(row["Exterior Gross Wall Area {m2}"]) == 0,
]
):
return "Core"
elif row["Part of Total Building Area"] == "No":
return np.NaN
elif "plenum" in row["Zone Name"].lower():
return np.NaN
else:
return "Perimeter"
| true
| true
|
1c3fe2578a469520372eedadc600c4ff9471a9ef
| 4,503
|
py
|
Python
|
samples/openapi3/client/petstore/python-experimental/petstore_api/models/cat_all_of.py
|
doc22940/openapi-generator
|
50d21cb0d161e7917bb410a7db78811635f0837b
|
[
"Apache-2.0"
] | 1
|
2020-09-16T22:26:09.000Z
|
2020-09-16T22:26:09.000Z
|
samples/openapi3/client/petstore/python-experimental/petstore_api/models/cat_all_of.py
|
doc22940/openapi-generator
|
50d21cb0d161e7917bb410a7db78811635f0837b
|
[
"Apache-2.0"
] | null | null | null |
samples/openapi3/client/petstore/python-experimental/petstore_api/models/cat_all_of.py
|
doc22940/openapi-generator
|
50d21cb0d161e7917bb410a7db78811635f0837b
|
[
"Apache-2.0"
] | 1
|
2020-10-06T15:41:06.000Z
|
2020-10-06T15:41:06.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class CatAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'declawed': (bool,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
'declawed': 'declawed', # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set([
'_data_store',
'_check_type',
'_from_server',
'_path_to_item',
'_configuration',
])
def __init__(self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs): # noqa: E501
"""cat_all_of.CatAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
declawed (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 33.604478
| 174
| 0.609594
|
from __future__ import absolute_import
import re
import sys
import six
from petstore_api.model_utils import (
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class CatAllOf(ModelNormal):
allowed_values = {
}
validations = {
}
additional_properties_type = None
@staticmethod
def openapi_types():
return {
'declawed': (bool,),
}
@staticmethod
def discriminator():
return None
attribute_map = {
'declawed': 'declawed',
}
@staticmethod
def _composed_schemas():
return None
required_properties = set([
'_data_store',
'_check_type',
'_from_server',
'_path_to_item',
'_configuration',
])
def __init__(self, _check_type=True, _from_server=False, _path_to_item=(), _configuration=None, **kwargs):
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
| true
| true
|
1c3fe3235baa13687662db78218ee5a102d7bf2a
| 1,780
|
py
|
Python
|
src/omission/interface/highscore.py
|
mousepawmedia/omission
|
e2a25d9c510be5c2d3469d064ee764f03050911f
|
[
"BSD-3-Clause"
] | 13
|
2019-06-10T02:30:30.000Z
|
2022-01-09T08:25:48.000Z
|
src/omission/interface/highscore.py
|
mousepawmedia/omission
|
e2a25d9c510be5c2d3469d064ee764f03050911f
|
[
"BSD-3-Clause"
] | null | null | null |
src/omission/interface/highscore.py
|
mousepawmedia/omission
|
e2a25d9c510be5c2d3469d064ee764f03050911f
|
[
"BSD-3-Clause"
] | 2
|
2019-09-02T03:51:38.000Z
|
2020-11-30T01:50:57.000Z
|
"""
Highscore Prompt Interface [Omission]
"""
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.textinput import TextInput
class Highscore(BoxLayout):
"""
Displays the prompt for entering a name for a new high score.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.datastring = None
self.score = None
def set_info(self, datastring, score):
"""
Set the information for the highscore box.
"""
self.datastring = datastring
self.score = score
self.ids.lbl_score.text = str(self.score)
def submit(self, name):
"""
Validate the name, submit the score, and close.
"""
# If we actually have a name...
if not name == "":
# Register the high score.
App.get_running_app().dataloader.add_score(self.datastring,
self.score, name)
# Switch to the menu.
self.parent.show_menu(self)
class NameTextInput(TextInput):
"""
Extend the TextInput widget to introduce a character maximum.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.font_name = App.get_running_app().dataloader.fontloader.decorative()
def insert_text(self, substring, from_undo=False):
"""
Prevent too many characters.
"""
limit = 12
if len(self.text) >= limit:
substring = ""
return super().insert_text(substring, from_undo=from_undo)
def submit(self):
"""
Mirror to parent's submit.
"""
# Get the name entered in the text box.
name = self.text
self.parent.submit(name)
| 27.8125
| 81
| 0.57809
|
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.textinput import TextInput
class Highscore(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.datastring = None
self.score = None
def set_info(self, datastring, score):
self.datastring = datastring
self.score = score
self.ids.lbl_score.text = str(self.score)
def submit(self, name):
if not name == "":
App.get_running_app().dataloader.add_score(self.datastring,
self.score, name)
self.parent.show_menu(self)
class NameTextInput(TextInput):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.font_name = App.get_running_app().dataloader.fontloader.decorative()
def insert_text(self, substring, from_undo=False):
limit = 12
if len(self.text) >= limit:
substring = ""
return super().insert_text(substring, from_undo=from_undo)
def submit(self):
name = self.text
self.parent.submit(name)
| true
| true
|
1c3fe36663b363fbad6630f23d3c1ce859e431dd
| 158
|
py
|
Python
|
gigs/filter.py
|
djangoLovers/fiverrClone
|
567ea972603b4997590e1351a7d27fba8add91af
|
[
"MIT"
] | 2
|
2021-04-17T19:08:07.000Z
|
2022-03-21T11:11:54.000Z
|
gigs/filter.py
|
djangoLovers/fiverrClone
|
567ea972603b4997590e1351a7d27fba8add91af
|
[
"MIT"
] | null | null | null |
gigs/filter.py
|
djangoLovers/fiverrClone
|
567ea972603b4997590e1351a7d27fba8add91af
|
[
"MIT"
] | 4
|
2021-04-17T19:08:08.000Z
|
2021-09-15T21:04:41.000Z
|
import django_filters
from .models import Gig
class gigFilter(django_filters.FilterSet):
class Meta:
model = Gig
fields = ['category']
| 15.8
| 42
| 0.677215
|
import django_filters
from .models import Gig
class gigFilter(django_filters.FilterSet):
class Meta:
model = Gig
fields = ['category']
| true
| true
|
1c3fe426dc2304d925c2926fa64efd31c2a68274
| 16,293
|
py
|
Python
|
examples/system/ota/native_ota_example/example_test.py
|
moolitayer/esp-idf
|
c1d0daf36d0dca81c23c226001560edfa51c30ea
|
[
"Apache-2.0"
] | 1
|
2020-02-26T02:30:07.000Z
|
2020-02-26T02:30:07.000Z
|
examples/system/ota/native_ota_example/example_test.py
|
moolitayer/esp-idf
|
c1d0daf36d0dca81c23c226001560edfa51c30ea
|
[
"Apache-2.0"
] | null | null | null |
examples/system/ota/native_ota_example/example_test.py
|
moolitayer/esp-idf
|
c1d0daf36d0dca81c23c226001560edfa51c30ea
|
[
"Apache-2.0"
] | null | null | null |
import re
import os
import socket
import BaseHTTPServer
import SimpleHTTPServer
from threading import Thread
import ssl
from tiny_test_fw import DUT
import ttfw_idf
import random
import subprocess
server_cert = "-----BEGIN CERTIFICATE-----\n" \
"MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"\
"BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"\
"aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n"\
"MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"\
"ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n"\
"CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n"\
"nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n"\
"9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n"\
"w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n"\
"3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n"\
"lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n"\
"IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n"\
"DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n"\
"/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n"\
"lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n"\
"6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n"\
"fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n"\
"y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n"\
"hA==\n"\
"-----END CERTIFICATE-----\n"
server_key = "-----BEGIN PRIVATE KEY-----\n"\
"MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n"\
"uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n"\
"iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n"\
"ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n"\
"BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n"\
"1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n"\
"Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n"\
"02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n"\
"4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n"\
"SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n"\
"cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n"\
"8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n"\
"MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n"\
"6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n"\
"CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n"\
"ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n"\
"0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n"\
"5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n"\
"zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n"\
"V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n"\
"RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n"\
"nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n"\
"GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n"\
"9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n"\
"qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n"\
"muhfskWf4MABV0yTUaKcGg==\n"\
"-----END PRIVATE KEY-----\n"
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def start_https_server(ota_image_dir, server_ip, server_port):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
cert_file_handle = open(server_file, "w+")
cert_file_handle.write(server_cert)
cert_file_handle.close()
key_file = os.path.join(ota_image_dir, "server_key.pem")
key_file_handle = open("server_key.pem", "w+")
key_file_handle.write(server_key)
key_file_handle.close()
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
cert_file_handle = open(server_file, "w+")
cert_file_handle.write(server_cert)
cert_file_handle.close()
key_file = os.path.join(ota_image_dir, "server_key.pem")
key_file_handle = open("server_key.pem", "w+")
key_file_handle.write(server_key)
key_file_handle.close()
chunked_server = subprocess.Popen(["openssl", "s_server", "-WWW", "-key", key_file, "-cert", server_file, "-port", str(server_port)])
return chunked_server
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
# No. of times working of application to be validated
iterations = 3
# File to be downloaded. This file is generated after compilation
bin_name = "native_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8002))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + bin_name))
dut1.write("https://" + host_ip + ":8002/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting OTA example", timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
# Original binary file generated after compilation
bin_name = "native_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated.bin"
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":8002/" + truncated_bin_name)
dut1.expect("native_ota_example: Image validation failed, image is corrupted", timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
# Original binary file generated after compilation
bin_name = "native_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated_header.bin"
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":8002/" + truncated_bin_name)
dut1.expect("native_ota_example: received package is not fit len", timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
# Random binary file to be generated
random_bin_name = "random.bin"
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, "w+")
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(str(0))
for i in range(random_bin_size - 1):
fo.write(str(random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + random_bin_name))
dut1.write("https://" + host_ip + ":8002/" + random_bin_name)
dut1.expect("esp_ota_ops: OTA image has invalid magic byte", timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_chunked(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
# File to be downloaded. This file is generated after compilation
bin_name = "native_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8070/" + bin_name))
dut1.write("https://" + host_ip + ":8070/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting OTA example", timeout=30)
chunked_server.kill()
os.remove(os.path.join(dut1.app.binary_path, "server_cert.pem"))
os.remove(os.path.join(dut1.app.binary_path, "server_key.pem"))
if __name__ == '__main__':
test_examples_protocol_native_ota_example()
test_examples_protocol_native_ota_example_chunked()
test_examples_protocol_native_ota_example_truncated_bin()
test_examples_protocol_native_ota_example_truncated_header()
test_examples_protocol_native_ota_example_random()
| 48.204142
| 137
| 0.711471
|
import re
import os
import socket
import BaseHTTPServer
import SimpleHTTPServer
from threading import Thread
import ssl
from tiny_test_fw import DUT
import ttfw_idf
import random
import subprocess
server_cert = "-----BEGIN CERTIFICATE-----\n" \
"MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"\
"BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"\
"aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n"\
"MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"\
"ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n"\
"CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n"\
"nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n"\
"9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n"\
"w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n"\
"3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n"\
"lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n"\
"IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n"\
"DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n"\
"/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n"\
"lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n"\
"6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n"\
"fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n"\
"y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n"\
"hA==\n"\
"-----END CERTIFICATE-----\n"
server_key = "-----BEGIN PRIVATE KEY-----\n"\
"MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n"\
"uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n"\
"iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n"\
"ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n"\
"BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n"\
"1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n"\
"Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n"\
"02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n"\
"4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n"\
"SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n"\
"cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n"\
"8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n"\
"MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n"\
"6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n"\
"CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n"\
"ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n"\
"0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n"\
"5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n"\
"zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n"\
"V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n"\
"RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n"\
"nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n"\
"GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n"\
"9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n"\
"qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n"\
"muhfskWf4MABV0yTUaKcGg==\n"\
"-----END PRIVATE KEY-----\n"
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def start_https_server(ota_image_dir, server_ip, server_port):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
cert_file_handle = open(server_file, "w+")
cert_file_handle.write(server_cert)
cert_file_handle.close()
key_file = os.path.join(ota_image_dir, "server_key.pem")
key_file_handle = open("server_key.pem", "w+")
key_file_handle.write(server_key)
key_file_handle.close()
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
cert_file_handle = open(server_file, "w+")
cert_file_handle.write(server_cert)
cert_file_handle.close()
key_file = os.path.join(ota_image_dir, "server_key.pem")
key_file_handle = open("server_key.pem", "w+")
key_file_handle.write(server_key)
key_file_handle.close()
chunked_server = subprocess.Popen(["openssl", "s_server", "-WWW", "-key", key_file, "-cert", server_file, "-port", str(server_port)])
return chunked_server
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example(env, extra_data):
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
iterations = 3
bin_name = "native_ota.bin"
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
host_ip = get_my_ip()
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8002))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + bin_name))
dut1.write("https://" + host_ip + ":8002/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting OTA example", timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_truncated_bin(env, extra_data):
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
bin_name = "native_ota.bin"
truncated_bin_name = "truncated.bin"
truncated_bin_size = 64000
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":8002/" + truncated_bin_name)
dut1.expect("native_ota_example: Image validation failed, image is corrupted", timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_truncated_header(env, extra_data):
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
bin_name = "native_ota.bin"
truncated_bin_name = "truncated_header.bin"
truncated_bin_size = 180
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "r+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "w+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":8002/" + truncated_bin_name)
dut1.expect("native_ota_example: received package is not fit len", timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_random(env, extra_data):
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
random_bin_name = "random.bin"
random_bin_size = 32000
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, "w+")
fo.write(str(0))
for i in range(random_bin_size - 1):
fo.write(str(random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
host_ip = get_my_ip()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=60)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8002/" + random_bin_name))
dut1.write("https://" + host_ip + ":8002/" + random_bin_name)
dut1.expect("esp_ota_ops: OTA image has invalid magic byte", timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_native_ota_example_chunked(env, extra_data):
dut1 = env.get_dut("native_ota_example", "examples/system/ota/native_ota_example", dut_class=ttfw_idf.ESP32DUT)
bin_name = "native_ota.bin"
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("native_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("native_ota_bin_size", bin_size // 1024)
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8070/" + bin_name))
dut1.write("https://" + host_ip + ":8070/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting OTA example", timeout=30)
chunked_server.kill()
os.remove(os.path.join(dut1.app.binary_path, "server_cert.pem"))
os.remove(os.path.join(dut1.app.binary_path, "server_key.pem"))
if __name__ == '__main__':
test_examples_protocol_native_ota_example()
test_examples_protocol_native_ota_example_chunked()
test_examples_protocol_native_ota_example_truncated_bin()
test_examples_protocol_native_ota_example_truncated_header()
test_examples_protocol_native_ota_example_random()
| true
| true
|
1c3fe459583d256e465eeff96c43107ef808c2d6
| 1,049
|
py
|
Python
|
Totoro/bin/ic342_lst_range.py
|
sdss/Totoro
|
74befd99bda47ebb8c03a276b57371b5788e154a
|
[
"Apache-2.0"
] | 1
|
2018-08-22T00:34:30.000Z
|
2018-08-22T00:34:30.000Z
|
Totoro/bin/ic342_lst_range.py
|
sdss/Totoro
|
74befd99bda47ebb8c03a276b57371b5788e154a
|
[
"Apache-2.0"
] | 4
|
2018-06-06T22:10:14.000Z
|
2018-06-14T04:47:23.000Z
|
Totoro/bin/ic342_lst_range.py
|
sdss/Totoro
|
74befd99bda47ebb8c03a276b57371b5788e154a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2019-10-07
# @Filename: ic342_lst_range.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import sys
from Totoro.db import getConnection
def load_lst_ranges(lst0, lst1):
lst0 = float(lst0)
lst1 = float(lst1)
ic342_ra = 55.910158
ha0 = lst0 * 15. - ic342_ra
ha1 = lst1 * 15. - ic342_ra
db = getConnection()
session = db.Session()
ic342_plates = session.query(db.mangaDB.Plate).join(
db.plateDB.Plate, db.plateDB.PlateToSurvey, db.plateDB.Survey,
db.plateDB.SurveyMode).filter(db.plateDB.Survey.label == 'MaNGA',
db.plateDB.SurveyMode.label == 'MaNGA 10min').all()
with session.begin():
for plate in ic342_plates:
plate.ha_min = ha0
plate.ha_max = ha1
plate.field_name = 'IC342'
if __name__ == '__main__':
lst0, lst1 = sys.argv[1:3]
load_lst_ranges(lst0, lst1)
| 24.395349
| 89
| 0.620591
|
import sys
from Totoro.db import getConnection
def load_lst_ranges(lst0, lst1):
lst0 = float(lst0)
lst1 = float(lst1)
ic342_ra = 55.910158
ha0 = lst0 * 15. - ic342_ra
ha1 = lst1 * 15. - ic342_ra
db = getConnection()
session = db.Session()
ic342_plates = session.query(db.mangaDB.Plate).join(
db.plateDB.Plate, db.plateDB.PlateToSurvey, db.plateDB.Survey,
db.plateDB.SurveyMode).filter(db.plateDB.Survey.label == 'MaNGA',
db.plateDB.SurveyMode.label == 'MaNGA 10min').all()
with session.begin():
for plate in ic342_plates:
plate.ha_min = ha0
plate.ha_max = ha1
plate.field_name = 'IC342'
if __name__ == '__main__':
lst0, lst1 = sys.argv[1:3]
load_lst_ranges(lst0, lst1)
| true
| true
|
1c3fe4ea951aef122728a7aed7fc4ecaf8e7607e
| 4,082
|
py
|
Python
|
tests/compare_results.py
|
an1018/PaddleOCR
|
0a8ca67a0c4a4ed468e82a575cc64ce73f21e068
|
[
"Apache-2.0"
] | 1
|
2022-01-21T07:48:15.000Z
|
2022-01-21T07:48:15.000Z
|
tests/compare_results.py
|
an1018/PaddleOCR
|
0a8ca67a0c4a4ed468e82a575cc64ce73f21e068
|
[
"Apache-2.0"
] | null | null | null |
tests/compare_results.py
|
an1018/PaddleOCR
|
0a8ca67a0c4a4ed468e82a575cc64ce73f21e068
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import os
import subprocess
import json
import argparse
import glob
def init_args():
parser = argparse.ArgumentParser()
# params for testing assert allclose
parser.add_argument("--atol", type=float, default=1e-3)
parser.add_argument("--rtol", type=float, default=1e-3)
parser.add_argument("--gt_file", type=str, default="")
parser.add_argument("--log_file", type=str, default="")
parser.add_argument("--precision", type=str, default="fp32")
return parser
def parse_args():
parser = init_args()
return parser.parse_args()
def run_shell_command(cmd):
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
if p.returncode == 0:
return out.decode('utf-8')
else:
return None
def parser_results_from_log_by_name(log_path, names_list):
if not os.path.exists(log_path):
raise ValueError("The log file {} does not exists!".format(log_path))
if names_list is None or len(names_list) < 1:
return []
parser_results = {}
for name in names_list:
cmd = "grep {} {}".format(name, log_path)
outs = run_shell_command(cmd)
outs = outs.split("\n")[0]
result = outs.split("{}".format(name))[-1]
result = json.loads(result)
parser_results[name] = result
return parser_results
def load_gt_from_file(gt_file):
if not os.path.exists(gt_file):
raise ValueError("The log file {} does not exists!".format(gt_file))
with open(gt_file, 'r') as f:
data = f.readlines()
f.close()
parser_gt = {}
for line in data:
image_name, result = line.strip("\n").split("\t")
result = json.loads(result)
parser_gt[image_name] = result
return parser_gt
def load_gt_from_txts(gt_file):
gt_list = glob.glob(gt_file)
gt_collection = {}
for gt_f in gt_list:
gt_dict = load_gt_from_file(gt_f)
basename = os.path.basename(gt_f)
if "fp32" in basename:
gt_collection["fp32"] = [gt_dict, gt_f]
elif "fp16" in basename:
gt_collection["fp16"] = [gt_dict, gt_f]
elif "int8" in basename:
gt_collection["int8"] = [gt_dict, gt_f]
else:
continue
return gt_collection
def collect_predict_from_logs(log_path, key_list):
log_list = glob.glob(log_path)
pred_collection = {}
for log_f in log_list:
pred_dict = parser_results_from_log_by_name(log_f, key_list)
key = os.path.basename(log_f)
pred_collection[key] = pred_dict
return pred_collection
def testing_assert_allclose(dict_x, dict_y, atol=1e-7, rtol=1e-7):
for k in dict_x:
np.testing.assert_allclose(
np.array(dict_x[k]), np.array(dict_y[k]), atol=atol, rtol=rtol)
if __name__ == "__main__":
# Usage:
# python3.7 tests/compare_results.py --gt_file=./tests/results/*.txt --log_file=./tests/output/infer_*.log
args = parse_args()
gt_collection = load_gt_from_txts(args.gt_file)
key_list = gt_collection["fp32"][0].keys()
pred_collection = collect_predict_from_logs(args.log_file, key_list)
for filename in pred_collection.keys():
if "fp32" in filename:
gt_dict, gt_filename = gt_collection["fp32"]
elif "fp16" in filename:
gt_dict, gt_filename = gt_collection["fp16"]
elif "int8" in filename:
gt_dict, gt_filename = gt_collection["int8"]
else:
continue
pred_dict = pred_collection[filename]
try:
testing_assert_allclose(
gt_dict, pred_dict, atol=args.atol, rtol=args.rtol)
print(
"Assert allclose passed! The results of {} and {} are consistent!".
format(filename, gt_filename))
except Exception as E:
print(E)
raise ValueError(
"The results of {} and the results of {} are inconsistent!".
format(filename, gt_filename))
| 30.462687
| 111
| 0.628613
|
import numpy as np
import os
import subprocess
import json
import argparse
import glob
def init_args():
parser = argparse.ArgumentParser()
parser.add_argument("--atol", type=float, default=1e-3)
parser.add_argument("--rtol", type=float, default=1e-3)
parser.add_argument("--gt_file", type=str, default="")
parser.add_argument("--log_file", type=str, default="")
parser.add_argument("--precision", type=str, default="fp32")
return parser
def parse_args():
parser = init_args()
return parser.parse_args()
def run_shell_command(cmd):
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
if p.returncode == 0:
return out.decode('utf-8')
else:
return None
def parser_results_from_log_by_name(log_path, names_list):
if not os.path.exists(log_path):
raise ValueError("The log file {} does not exists!".format(log_path))
if names_list is None or len(names_list) < 1:
return []
parser_results = {}
for name in names_list:
cmd = "grep {} {}".format(name, log_path)
outs = run_shell_command(cmd)
outs = outs.split("\n")[0]
result = outs.split("{}".format(name))[-1]
result = json.loads(result)
parser_results[name] = result
return parser_results
def load_gt_from_file(gt_file):
if not os.path.exists(gt_file):
raise ValueError("The log file {} does not exists!".format(gt_file))
with open(gt_file, 'r') as f:
data = f.readlines()
f.close()
parser_gt = {}
for line in data:
image_name, result = line.strip("\n").split("\t")
result = json.loads(result)
parser_gt[image_name] = result
return parser_gt
def load_gt_from_txts(gt_file):
gt_list = glob.glob(gt_file)
gt_collection = {}
for gt_f in gt_list:
gt_dict = load_gt_from_file(gt_f)
basename = os.path.basename(gt_f)
if "fp32" in basename:
gt_collection["fp32"] = [gt_dict, gt_f]
elif "fp16" in basename:
gt_collection["fp16"] = [gt_dict, gt_f]
elif "int8" in basename:
gt_collection["int8"] = [gt_dict, gt_f]
else:
continue
return gt_collection
def collect_predict_from_logs(log_path, key_list):
log_list = glob.glob(log_path)
pred_collection = {}
for log_f in log_list:
pred_dict = parser_results_from_log_by_name(log_f, key_list)
key = os.path.basename(log_f)
pred_collection[key] = pred_dict
return pred_collection
def testing_assert_allclose(dict_x, dict_y, atol=1e-7, rtol=1e-7):
for k in dict_x:
np.testing.assert_allclose(
np.array(dict_x[k]), np.array(dict_y[k]), atol=atol, rtol=rtol)
if __name__ == "__main__":
args = parse_args()
gt_collection = load_gt_from_txts(args.gt_file)
key_list = gt_collection["fp32"][0].keys()
pred_collection = collect_predict_from_logs(args.log_file, key_list)
for filename in pred_collection.keys():
if "fp32" in filename:
gt_dict, gt_filename = gt_collection["fp32"]
elif "fp16" in filename:
gt_dict, gt_filename = gt_collection["fp16"]
elif "int8" in filename:
gt_dict, gt_filename = gt_collection["int8"]
else:
continue
pred_dict = pred_collection[filename]
try:
testing_assert_allclose(
gt_dict, pred_dict, atol=args.atol, rtol=args.rtol)
print(
"Assert allclose passed! The results of {} and {} are consistent!".
format(filename, gt_filename))
except Exception as E:
print(E)
raise ValueError(
"The results of {} and the results of {} are inconsistent!".
format(filename, gt_filename))
| true
| true
|
1c3fe51d7dfc9a3980ef339a227c06617611f30a
| 4,807
|
py
|
Python
|
tests/forte/data/ontology/ndarray_attribute_test.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
tests/forte/data/ontology/ndarray_attribute_test.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | 1
|
2022-02-23T23:21:03.000Z
|
2022-02-23T23:21:03.000Z
|
tests/forte/data/ontology/ndarray_attribute_test.py
|
KGerring/forte
|
7dc6e6c7d62d9a4126bdfc5ca02d15be3ffd61ca
|
[
"Apache-2.0"
] | null | null | null |
import os
import tempfile
import unittest
from ddt import data, ddt
import numpy as np
from forte.data.data_pack import DataPack
from .test_outputs.ft.onto.test_ndarray import NdEntry1, NdEntry2, NdEntry3
"""
NdEntry1, NdEntry2, and NdEntry3 are sample Entry containing NdArray attributes
for testing.
NdEntry1 has both dtype and shape specified,
while NdEntry2 has only dtype specified and NdEntry3 has only shape specified.
"""
@ddt
class SerializationTest(unittest.TestCase):
@data(
NdEntry1,
NdEntry2,
NdEntry3
)
def test_serialization(self, TestEntry):
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
data_pack.add_entry(nd_entry)
with tempfile.TemporaryDirectory() as output_dir:
output_path = os.path.join(output_dir, "datapack.json")
data_pack.serialize(output_path, indent=2)
datapack_deseri = DataPack.deserialize(output_path)
nd_entry_deseri = datapack_deseri.get_single(TestEntry)
if nd_entry.value.dtype:
self.assertEqual(nd_entry.value.dtype, nd_entry_deseri.value.dtype)
if nd_entry.value.shape:
self.assertEqual(nd_entry.value.shape, nd_entry_deseri.value.shape)
if nd_entry.value.data is not None:
self.assertEqual(np.sum(nd_entry.value.data - nd_entry_deseri.value.data), 0)
@ddt
class PropertyTest(unittest.TestCase):
@data(
(NdEntry1, np.array([1], dtype="int")),
(NdEntry1, np.array([[1, 1], [1, 1]], dtype="float")),
(NdEntry2, np.array([[1, 1], [1, 1]], dtype="float")),
(NdEntry3, np.array([1], dtype="int")),
)
def test_bad_np_array(self, input_data):
"""
Test for numpy array with invalid dtype and shape.
"""
TestEntry, input_array = input_data
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
if nd_entry.value.dtype and input_array.dtype != nd_entry.value.dtype:
with self.assertRaises(TypeError):
nd_entry.value.data = input_array
if nd_entry.value.shape and input_array.shape != nd_entry.value.shape:
with self.assertRaises(AttributeError):
nd_entry.value.data = input_array
@data(
(NdEntry1, [1]),
(NdEntry3, [1]),
(NdEntry1, [[[1]]]),
(NdEntry3, [[[1]]]),
)
def test_bad_py_list(self, input_data):
"""
Test for python list with invalid shape.
"""
TestEntry, input_list = input_data
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
input_array = np.array(input_list)
if nd_entry.value.shape and input_array.shape != nd_entry.value.shape:
with self.assertRaises(AttributeError):
nd_entry.value.data = input_list
@data(
(NdEntry1, 1),
(NdEntry2, 1),
(NdEntry3, 1),
)
def test_invalid_input(self, input_data):
"""
Test for invalid input (anything other than numpy array or python list)
"""
TestEntry, invalid_value = input_data
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
with self.assertRaises(ValueError):
nd_entry.value.data = invalid_value
@data(
(NdEntry1, [[1, 1], [1, 1]]),
(NdEntry1, [[1., 1.], [1., 1.]]),
(NdEntry2, [[1, 1], [1, 1]]),
(NdEntry2, [1]),
(NdEntry2, [1.]),
(NdEntry2, [[1., 1.], [1., 1.]]),
(NdEntry3, [[1, 1], [1, 1]]),
(NdEntry3, [[1., 1.], [1., 1.]]),
)
def test_valid_py_list(self, input_data):
TestEntry, input_list = input_data
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
try:
nd_entry.value.data = input_list
except Exception:
self.fail()
@data(
(NdEntry1, np.array([[1, 1], [1, 1]], dtype="int")),
(NdEntry2, np.array([[1, 1], [1, 1]], dtype="int")),
(NdEntry2, np.array([1, 1], dtype="int")),
(NdEntry3, np.array([[1, 1], [1, 1]], dtype="int")),
(NdEntry3, np.array([[1, 1], [1, 1]], dtype="float")),
)
def test_valid_np_array(self, input_data):
TestEntry, input_array = input_data
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
try:
nd_entry.value.data = input_array
except Exception:
self.fail()
# If assign value successfully, dtype and shape of
# nd_entry.value should match to input_array's.
self.assertEqual(nd_entry.value.dtype, input_array.dtype)
self.assertEqual(nd_entry.value.shape, input_array.shape)
self.assertEqual(np.sum(nd_entry.value.data - input_array), 0)
| 34.092199
| 93
| 0.598294
|
import os
import tempfile
import unittest
from ddt import data, ddt
import numpy as np
from forte.data.data_pack import DataPack
from .test_outputs.ft.onto.test_ndarray import NdEntry1, NdEntry2, NdEntry3
@ddt
class SerializationTest(unittest.TestCase):
@data(
NdEntry1,
NdEntry2,
NdEntry3
)
def test_serialization(self, TestEntry):
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
data_pack.add_entry(nd_entry)
with tempfile.TemporaryDirectory() as output_dir:
output_path = os.path.join(output_dir, "datapack.json")
data_pack.serialize(output_path, indent=2)
datapack_deseri = DataPack.deserialize(output_path)
nd_entry_deseri = datapack_deseri.get_single(TestEntry)
if nd_entry.value.dtype:
self.assertEqual(nd_entry.value.dtype, nd_entry_deseri.value.dtype)
if nd_entry.value.shape:
self.assertEqual(nd_entry.value.shape, nd_entry_deseri.value.shape)
if nd_entry.value.data is not None:
self.assertEqual(np.sum(nd_entry.value.data - nd_entry_deseri.value.data), 0)
@ddt
class PropertyTest(unittest.TestCase):
@data(
(NdEntry1, np.array([1], dtype="int")),
(NdEntry1, np.array([[1, 1], [1, 1]], dtype="float")),
(NdEntry2, np.array([[1, 1], [1, 1]], dtype="float")),
(NdEntry3, np.array([1], dtype="int")),
)
def test_bad_np_array(self, input_data):
TestEntry, input_array = input_data
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
if nd_entry.value.dtype and input_array.dtype != nd_entry.value.dtype:
with self.assertRaises(TypeError):
nd_entry.value.data = input_array
if nd_entry.value.shape and input_array.shape != nd_entry.value.shape:
with self.assertRaises(AttributeError):
nd_entry.value.data = input_array
@data(
(NdEntry1, [1]),
(NdEntry3, [1]),
(NdEntry1, [[[1]]]),
(NdEntry3, [[[1]]]),
)
def test_bad_py_list(self, input_data):
TestEntry, input_list = input_data
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
input_array = np.array(input_list)
if nd_entry.value.shape and input_array.shape != nd_entry.value.shape:
with self.assertRaises(AttributeError):
nd_entry.value.data = input_list
@data(
(NdEntry1, 1),
(NdEntry2, 1),
(NdEntry3, 1),
)
def test_invalid_input(self, input_data):
TestEntry, invalid_value = input_data
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
with self.assertRaises(ValueError):
nd_entry.value.data = invalid_value
@data(
(NdEntry1, [[1, 1], [1, 1]]),
(NdEntry1, [[1., 1.], [1., 1.]]),
(NdEntry2, [[1, 1], [1, 1]]),
(NdEntry2, [1]),
(NdEntry2, [1.]),
(NdEntry2, [[1., 1.], [1., 1.]]),
(NdEntry3, [[1, 1], [1, 1]]),
(NdEntry3, [[1., 1.], [1., 1.]]),
)
def test_valid_py_list(self, input_data):
TestEntry, input_list = input_data
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
try:
nd_entry.value.data = input_list
except Exception:
self.fail()
@data(
(NdEntry1, np.array([[1, 1], [1, 1]], dtype="int")),
(NdEntry2, np.array([[1, 1], [1, 1]], dtype="int")),
(NdEntry2, np.array([1, 1], dtype="int")),
(NdEntry3, np.array([[1, 1], [1, 1]], dtype="int")),
(NdEntry3, np.array([[1, 1], [1, 1]], dtype="float")),
)
def test_valid_np_array(self, input_data):
TestEntry, input_array = input_data
data_pack = DataPack()
nd_entry = TestEntry(data_pack)
try:
nd_entry.value.data = input_array
except Exception:
self.fail()
self.assertEqual(nd_entry.value.dtype, input_array.dtype)
self.assertEqual(nd_entry.value.shape, input_array.shape)
self.assertEqual(np.sum(nd_entry.value.data - input_array), 0)
| true
| true
|
1c3fe5c3f7e422f2e584fd59cb3e47b18251a7e5
| 235
|
py
|
Python
|
pt-1/sem_7/6sem7_ex2_sum_array_elements.py
|
lucaszarza/python_coursera-usp-pt1
|
eaf8d32ec09b82755f9716237ffadb0cf8d46169
|
[
"Apache-2.0"
] | null | null | null |
pt-1/sem_7/6sem7_ex2_sum_array_elements.py
|
lucaszarza/python_coursera-usp-pt1
|
eaf8d32ec09b82755f9716237ffadb0cf8d46169
|
[
"Apache-2.0"
] | null | null | null |
pt-1/sem_7/6sem7_ex2_sum_array_elements.py
|
lucaszarza/python_coursera-usp-pt1
|
eaf8d32ec09b82755f9716237ffadb0cf8d46169
|
[
"Apache-2.0"
] | null | null | null |
def soma_elementos(lista):
sum = 0
for i in lista:
sum += i
return sum
def test_sum():
test = [1, 2, 3, 4, 5]
assert soma_elementos(test) == 15
def test_sum2():
test = [5, 5, 4, 6, 9, 2, 5]
assert soma_elementos(test) == 36
| 16.785714
| 34
| 0.617021
|
def soma_elementos(lista):
sum = 0
for i in lista:
sum += i
return sum
def test_sum():
test = [1, 2, 3, 4, 5]
assert soma_elementos(test) == 15
def test_sum2():
test = [5, 5, 4, 6, 9, 2, 5]
assert soma_elementos(test) == 36
| true
| true
|
1c3fe7b23b37e235bc5dbaeab901904b62b9b074
| 11,279
|
py
|
Python
|
thirdparty/cv_bridge/core.py
|
Tsinghua-OpenICV/carla_icv_bridge
|
4d5f8c26b1847dbb16a81fe43f146bf4a9a8da5e
|
[
"MIT"
] | null | null | null |
thirdparty/cv_bridge/core.py
|
Tsinghua-OpenICV/carla_icv_bridge
|
4d5f8c26b1847dbb16a81fe43f146bf4a9a8da5e
|
[
"MIT"
] | null | null | null |
thirdparty/cv_bridge/core.py
|
Tsinghua-OpenICV/carla_icv_bridge
|
4d5f8c26b1847dbb16a81fe43f146bf4a9a8da5e
|
[
"MIT"
] | 1
|
2020-12-19T05:48:01.000Z
|
2020-12-19T05:48:01.000Z
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# Copyright (c) 2016, Tal Regev.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sensor_msgs.msg
import sys
import cv2
import numpy as np
class CvBridgeError(TypeError):
"""
This is the error raised by :class:`cv_bridge.CvBridge` methods when they fail.
"""
pass
class CvBridge(object):
"""
The CvBridge is an object that converts between OpenCV Images and icv Image messages.
.. doctest::
:options: -ELLIPSIS, +NORMALIZE_WHITESPACE
>>> import cv2
>>> import numpy as np
>>> from cv_bridge import CvBridge
>>> br = CvBridge()
>>> dtype, n_channels = br.encoding_as_cvtype2('8UC3')
>>> im = np.ndarray(shape=(480, 640, n_channels), dtype=dtype)
>>> msg = br.cv2_to_imgmsg(im) # Convert the image to a message
>>> im2 = br.imgmsg_to_cv2(msg) # Convert the message to a new image
>>> cmprsmsg = br.cv2_to_compressed_imgmsg(im) # Convert the image to a compress message
>>> im22 = br.compressed_imgmsg_to_cv2(msg) # Convert the compress message to a new image
>>> cv2.imwrite("this_was_a_message_briefly.png", im2)
"""
def __init__(self):
import cv2
self.cvtype_to_name = {}
self.cvdepth_to_numpy_depth = {cv2.CV_8U: 'uint8', cv2.CV_8S: 'int8', cv2.CV_16U: 'uint16',
cv2.CV_16S: 'int16', cv2.CV_32S:'int32', cv2.CV_32F:'float32',
cv2.CV_64F: 'float64'}
for t in ["8U", "8S", "16U", "16S", "32S", "32F", "64F"]:
for c in [1, 2, 3, 4]:
nm = "%sC%d" % (t, c)
self.cvtype_to_name[getattr(cv2, "CV_%s" % nm)] = nm
self.numpy_type_to_cvtype = {'uint8': '8U', 'int8': '8S', 'uint16': '16U',
'int16': '16S', 'int32': '32S', 'float32': '32F',
'float64': '64F'}
self.numpy_type_to_cvtype.update(dict((v, k) for (k, v) in self.numpy_type_to_cvtype.items()))
def dtype_with_channels_to_cvtype2(self, dtype, n_channels):
return '%sC%d' % (self.numpy_type_to_cvtype[dtype.name], n_channels)
def encoding_to_cvtype2(self, encoding):
if encoding=="bgra8":
return cv2.CV_8UC4
def encoding_to_dtype_with_channels(self, encoding):
if encoding=="bgra8":
dpt=self.cvdepth_to_numpy_depth[cv2.CV_8U]
cha=4
else:
dpt=self.cvdepth_to_numpy_depth[cv2.CV_8U]
cha=4
return dpt,cha
#return self.cvdepth_to_numpy_depth[CV_MAT_DEPTHWrap(cvtype)], CV_MAT_CNWrap(cvtype)
#return self.cvtype2_to_dtype_with_channels(self.encoding_to_cvtype2(encoding))
def compressed_imgmsg_to_cv2(self, cmprs_img_msg, desired_encoding = "passthrough"):
"""
Convert a sensor_msgs::CompressedImage message to an OpenCV :cpp:type:`cv::Mat`.
:param cmprs_img_msg: A :cpp:type:`sensor_msgs::CompressedImage` message
:param desired_encoding: The encoding of the image data, one of the following strings:
* ``"passthrough"``
* one of the standard strings in sensor_msgs/image_encodings.h
:rtype: :cpp:type:`cv::Mat`
:raises CvBridgeError: when conversion is not possible.
If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg.
Otherwise desired_encoding must be one of the standard image encodings
This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
If the image only has one channel, the shape has size 2 (width and height)
"""
str_msg = cmprs_img_msg.data
buf = np.ndarray(shape=(1, len(str_msg)),
dtype=np.uint8, buffer=cmprs_img_msg.data)
im = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)
if desired_encoding == "passthrough":
return im
return im
# try:
# res = cvtColor2(im, "bgr8", desired_encoding)
# except RuntimeError as e:
# raise CvBridgeError(e)
# return res
def imgmsg_to_cv2(self, img_msg, desired_encoding = "passthrough"):
"""
Convert a sensor_msgs::Image message to an OpenCV :cpp:type:`cv::Mat`.
:param img_msg: A :cpp:type:`sensor_msgs::Image` message
:param desired_encoding: The encoding of the image data, one of the following strings:
* ``"passthrough"``
* one of the standard strings in sensor_msgs/image_encodings.h
:rtype: :cpp:type:`cv::Mat`
:raises CvBridgeError: when conversion is not possible.
If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg.
Otherwise desired_encoding must be one of the standard image encodings
This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
If the image only has one channel, the shape has size 2 (width and height)
"""
dtype, n_channels = self.encoding_to_dtype_with_channels(img_msg.encoding)
dtype = np.dtype(dtype)
dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<')
if n_channels == 1:
im = np.ndarray(shape=(img_msg.height, img_msg.width),
dtype=dtype, buffer=img_msg.data)
else:
im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels),
dtype=dtype, buffer=img_msg.data)
# If the byt order is different between the message and the system.
if img_msg.is_bigendian == (sys.byteorder == 'little'):
im = im.byteswap().newbyteorder()
#res = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
return im
# if desired_encoding == "passthrough":
# from cv2 import cvtColor2
#try:
# res = cvtColor2(im, img_msg.encoding, desired_encoding)
#except RuntimeError as e:
# raise CvBridgeError(e)
#return res
def cv2_to_compressed_imgmsg(self, cvim, dst_format = "jpg"):
"""
Convert an OpenCV :cpp:type:`cv::Mat` type to a icv sensor_msgs::CompressedImage message.
:param cvim: An OpenCV :cpp:type:`cv::Mat`
:param dst_format: The format of the image data, one of the following strings:
* from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html
* from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags)
* bmp, dib
* jpeg, jpg, jpe
* jp2
* png
* pbm, pgm, ppm
* sr, ras
* tiff, tif
:rtype: A sensor_msgs.msg.CompressedImage message
:raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``format``
This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
"""
if not isinstance(cvim, (np.ndarray, np.generic)):
raise TypeError('Your input type is not a numpy array')
cmprs_img_msg = sensor_msgs.msg.CompressedImage()
cmprs_img_msg.format = dst_format
ext_format = '.' + dst_format
try:
cmprs_img_msg.data = np.array(cv2.imencode(ext_format, cvim)[1]).tostring()
except RuntimeError as e:
raise CvBridgeError(e)
return cmprs_img_msg
def cv2_to_imgmsg(self, cvim, encoding = "passthrough"):
"""
Convert an OpenCV :cpp:type:`cv::Mat` type to a icv sensor_msgs::Image message.
:param cvim: An OpenCV :cpp:type:`cv::Mat`
:param encoding: The encoding of the image data, one of the following strings:
* ``"passthrough"``
* one of the standard strings in sensor_msgs/image_encodings.h
:rtype: A sensor_msgs.msg.Image message
:raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``encoding``
If encoding is ``"passthrough"``, then the message has the same encoding as the image's OpenCV type.
Otherwise desired_encoding must be one of the standard image encodings
This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
"""
if not isinstance(cvim, (np.ndarray, np.generic)):
raise TypeError('Your input type is not a numpy array')
img_msg = sensor_msgs.msg.Image()
img_msg.height = cvim.shape[0]
img_msg.width = cvim.shape[1]
if len(cvim.shape) < 3:
cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, 1)
else:
cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, cvim.shape[2])
if encoding == "passthrough":
img_msg.encoding = cv_type
else:
img_msg.encoding = encoding
# Verify that the supplied encoding is compatible with the type of the OpenCV image
if self.cvtype_to_name[self.encoding_to_cvtype2(encoding)] != cv_type:
raise CvBridgeError("encoding specified as %s, but image has incompatible type %s" % (encoding, cv_type))
if cvim.dtype.byteorder == '>':
img_msg.is_bigendian = True
img_msg.data = cvim.tostring()
img_msg.step = len(img_msg.data) // img_msg.height
return img_msg
| 41.164234
| 152
| 0.63862
|
import sensor_msgs.msg
import sys
import cv2
import numpy as np
class CvBridgeError(TypeError):
pass
class CvBridge(object):
def __init__(self):
import cv2
self.cvtype_to_name = {}
self.cvdepth_to_numpy_depth = {cv2.CV_8U: 'uint8', cv2.CV_8S: 'int8', cv2.CV_16U: 'uint16',
cv2.CV_16S: 'int16', cv2.CV_32S:'int32', cv2.CV_32F:'float32',
cv2.CV_64F: 'float64'}
for t in ["8U", "8S", "16U", "16S", "32S", "32F", "64F"]:
for c in [1, 2, 3, 4]:
nm = "%sC%d" % (t, c)
self.cvtype_to_name[getattr(cv2, "CV_%s" % nm)] = nm
self.numpy_type_to_cvtype = {'uint8': '8U', 'int8': '8S', 'uint16': '16U',
'int16': '16S', 'int32': '32S', 'float32': '32F',
'float64': '64F'}
self.numpy_type_to_cvtype.update(dict((v, k) for (k, v) in self.numpy_type_to_cvtype.items()))
def dtype_with_channels_to_cvtype2(self, dtype, n_channels):
return '%sC%d' % (self.numpy_type_to_cvtype[dtype.name], n_channels)
def encoding_to_cvtype2(self, encoding):
if encoding=="bgra8":
return cv2.CV_8UC4
def encoding_to_dtype_with_channels(self, encoding):
if encoding=="bgra8":
dpt=self.cvdepth_to_numpy_depth[cv2.CV_8U]
cha=4
else:
dpt=self.cvdepth_to_numpy_depth[cv2.CV_8U]
cha=4
return dpt,cha
def compressed_imgmsg_to_cv2(self, cmprs_img_msg, desired_encoding = "passthrough"):
str_msg = cmprs_img_msg.data
buf = np.ndarray(shape=(1, len(str_msg)),
dtype=np.uint8, buffer=cmprs_img_msg.data)
im = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)
if desired_encoding == "passthrough":
return im
return im
def imgmsg_to_cv2(self, img_msg, desired_encoding = "passthrough"):
dtype, n_channels = self.encoding_to_dtype_with_channels(img_msg.encoding)
dtype = np.dtype(dtype)
dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<')
if n_channels == 1:
im = np.ndarray(shape=(img_msg.height, img_msg.width),
dtype=dtype, buffer=img_msg.data)
else:
im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels),
dtype=dtype, buffer=img_msg.data)
if img_msg.is_bigendian == (sys.byteorder == 'little'):
im = im.byteswap().newbyteorder()
return im
def cv2_to_compressed_imgmsg(self, cvim, dst_format = "jpg"):
if not isinstance(cvim, (np.ndarray, np.generic)):
raise TypeError('Your input type is not a numpy array')
cmprs_img_msg = sensor_msgs.msg.CompressedImage()
cmprs_img_msg.format = dst_format
ext_format = '.' + dst_format
try:
cmprs_img_msg.data = np.array(cv2.imencode(ext_format, cvim)[1]).tostring()
except RuntimeError as e:
raise CvBridgeError(e)
return cmprs_img_msg
def cv2_to_imgmsg(self, cvim, encoding = "passthrough"):
if not isinstance(cvim, (np.ndarray, np.generic)):
raise TypeError('Your input type is not a numpy array')
img_msg = sensor_msgs.msg.Image()
img_msg.height = cvim.shape[0]
img_msg.width = cvim.shape[1]
if len(cvim.shape) < 3:
cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, 1)
else:
cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, cvim.shape[2])
if encoding == "passthrough":
img_msg.encoding = cv_type
else:
img_msg.encoding = encoding
if self.cvtype_to_name[self.encoding_to_cvtype2(encoding)] != cv_type:
raise CvBridgeError("encoding specified as %s, but image has incompatible type %s" % (encoding, cv_type))
if cvim.dtype.byteorder == '>':
img_msg.is_bigendian = True
img_msg.data = cvim.tostring()
img_msg.step = len(img_msg.data) // img_msg.height
return img_msg
| true
| true
|
1c3fe8fee0a2b724d2ae89380da2ede97ecde881
| 3,121
|
py
|
Python
|
emtf_nnet/keras/layers/mutated_dense.py
|
jiafulow/emtf-nnet
|
70a6c747c221178f9db940197ea886bdb60bf3ba
|
[
"Apache-2.0"
] | null | null | null |
emtf_nnet/keras/layers/mutated_dense.py
|
jiafulow/emtf-nnet
|
70a6c747c221178f9db940197ea886bdb60bf3ba
|
[
"Apache-2.0"
] | null | null | null |
emtf_nnet/keras/layers/mutated_dense.py
|
jiafulow/emtf-nnet
|
70a6c747c221178f9db940197ea886bdb60bf3ba
|
[
"Apache-2.0"
] | null | null | null |
# The following source code was originally obtained from:
# https://github.com/keras-team/keras/blob/r2.6/keras/layers/core.py#L1066-L1270
# ==============================================================================
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras dense layers."""
import tensorflow.compat.v2 as tf
from keras.layers.core import Dense
class MutatedDense(Dense):
"""Dense layer with correction to the gradient."""
def __init__(self,
units,
**kwargs):
super().__init__(units=units, **kwargs)
self.supports_masking = True
self._compute_output_and_mask_jointly = True
def _dense(self, inputs, corr, kernel, bias=None, activation=None, dtype=None):
if dtype:
if inputs.dtype.base_dtype != dtype.base_dtype:
inputs = tf.cast(inputs, dtype)
if corr.dtype.base_dtype != dtype.base_dtype:
corr = tf.cast(corr, dtype)
rank = inputs.shape.rank
if rank == 2:
# Apply correction to the gradient while keeping the same outputs.
# f(x) = x * stop[gx] + stop[fx - x * gx]
# = stop[fx] + ((x - stop[x]) * stop[gx])
# = stop[fx] + 0
# g(x) = stop[gx] + grad[stop[fx - x * gx]]
# = stop[gx] + 0
outputs = tf.raw_ops.AddV2(
x=tf.raw_ops.MatMul(a=tf.raw_ops.Mul(x=inputs, y=tf.stop_gradient(corr)), b=kernel),
y=-tf.stop_gradient(tf.raw_ops.MatMul(a=tf.raw_ops.Mul(x=inputs, y=corr), b=kernel)))
outputs = tf.raw_ops.AddV2(
x=outputs,
y=tf.stop_gradient(tf.raw_ops.MatMul(a=inputs, b=kernel)))
else:
raise ValueError('inputs must be rank 2.')
if bias is not None:
outputs = tf.nn.bias_add(outputs, bias)
if activation is not None:
outputs = activation(outputs)
return outputs
def call(self, inputs, training=None, mask=None):
# Returns Dense(x) with a correction to the gradient
if mask is None:
mask = tf.math.is_finite(inputs)
mask = tf.cast(mask, inputs.dtype)
mean = tf.math.reduce_mean(mask, axis=0) # reduce along the batch dimension
corr = tf.math.reciprocal_no_nan(mean) # corr = 1/mean
outputs = self._dense(
inputs * mask,
corr,
self.kernel,
bias=self.bias,
activation=self.activation,
dtype=self._compute_dtype_object)
# Compute the mask and outputs simultaneously.
outputs._keras_mask = tf.math.is_finite(outputs)
return outputs
| 36.717647
| 95
| 0.627043
|
import tensorflow.compat.v2 as tf
from keras.layers.core import Dense
class MutatedDense(Dense):
def __init__(self,
units,
**kwargs):
super().__init__(units=units, **kwargs)
self.supports_masking = True
self._compute_output_and_mask_jointly = True
def _dense(self, inputs, corr, kernel, bias=None, activation=None, dtype=None):
if dtype:
if inputs.dtype.base_dtype != dtype.base_dtype:
inputs = tf.cast(inputs, dtype)
if corr.dtype.base_dtype != dtype.base_dtype:
corr = tf.cast(corr, dtype)
rank = inputs.shape.rank
if rank == 2:
outputs = tf.raw_ops.AddV2(
x=tf.raw_ops.MatMul(a=tf.raw_ops.Mul(x=inputs, y=tf.stop_gradient(corr)), b=kernel),
y=-tf.stop_gradient(tf.raw_ops.MatMul(a=tf.raw_ops.Mul(x=inputs, y=corr), b=kernel)))
outputs = tf.raw_ops.AddV2(
x=outputs,
y=tf.stop_gradient(tf.raw_ops.MatMul(a=inputs, b=kernel)))
else:
raise ValueError('inputs must be rank 2.')
if bias is not None:
outputs = tf.nn.bias_add(outputs, bias)
if activation is not None:
outputs = activation(outputs)
return outputs
def call(self, inputs, training=None, mask=None):
if mask is None:
mask = tf.math.is_finite(inputs)
mask = tf.cast(mask, inputs.dtype)
mean = tf.math.reduce_mean(mask, axis=0)
corr = tf.math.reciprocal_no_nan(mean)
outputs = self._dense(
inputs * mask,
corr,
self.kernel,
bias=self.bias,
activation=self.activation,
dtype=self._compute_dtype_object)
outputs._keras_mask = tf.math.is_finite(outputs)
return outputs
| true
| true
|
1c3fe9a0321b68f22254ea683c2077a5ca3f207c
| 5,419
|
py
|
Python
|
enno/utils/annotation.py
|
HPI-Information-Systems/enno
|
25d9c7aaf4a9ef0090bc7eca947684e354621586
|
[
"MIT"
] | 5
|
2019-05-15T05:53:27.000Z
|
2021-01-07T03:00:53.000Z
|
enno/utils/annotation.py
|
HPI-Information-Systems/enno
|
25d9c7aaf4a9ef0090bc7eca947684e354621586
|
[
"MIT"
] | 8
|
2017-09-28T13:41:49.000Z
|
2018-06-27T09:38:51.000Z
|
enno/utils/annotation.py
|
TimRepke/enno
|
25d9c7aaf4a9ef0090bc7eca947684e354621586
|
[
"MIT"
] | 4
|
2017-09-28T14:34:04.000Z
|
2017-10-11T14:23:25.000Z
|
import json
import re
class Annotation:
def __init__(self, wrapper='plaintext'):
self.anno = {}
self.wrapper = wrapper
# some helper "indices"
self.max = {
'denotations': 0,
'relations': 0
}
self.ids = {
'denotations': [],
'relations': []
}
@property
def text(self):
return self.anno.get('text', '')
@text.setter
def text(self, txt):
self.anno['text'] = txt
@property
def id(self):
return self.anno.get('id', '')
@id.setter
def id(self, id):
self.anno['id'] = id
@property
def meta(self):
return self.anno.get('meta', {})
@meta.setter
def meta(self, obj):
tmp = self.meta
tmp.update(obj)
self.anno['meta'] = tmp
@property
def wrapper(self):
return self.anno.get('wrapper', 'plaintext')
@wrapper.setter
def wrapper(self, wrapper_module):
self.anno['wrapper'] = wrapper_module
@property
def relations(self):
return self.anno.get('relations', [])
@relations.setter
def relations(self, lst):
for r in lst:
self.upsert_relation(r['origin'], r['target'], typ=r.get('type', None),
id=r.get('id', None), meta=r.get('meta', None))
@property
def denotations(self):
return self.anno.get('denotations', [])
@denotations.setter
def denotations(self, lst):
for d in lst:
self.upsert_denotation(d['start'], d['end'], text=d.get('text', None),
typ=d.get('type', None), id=d.get('id', None), meta=d.get('meta', None))
def upsert_denotation(self, start, end, text=None, typ=None, id=None, meta=None):
lst = self.denotations
deno = {
'id': id,
'start': start,
'end': end,
'text': text,
'type': typ,
'meta': meta
}
if text is None:
deno['text'] = self.text[start:end]
if id is not None and self.contains_id(id, 'denotations'):
i = self.get_index(id, lst)
lst[i].update(deno)
elif id is not None:
lst.append(deno)
self.ids['denotations'].append(deno['id'])
self.max['denotations'] = max([self.max['denotations'], int(deno['id'])])
else:
deno['id'] = self.max['denotations'] + 1
lst.append(deno)
self.ids['denotations'].append(deno['id'])
self.max['denotations'] += 1
self.anno['denotations'] = lst
return deno
def delete_denotation(self, denotation):
denotations = self.denotations
remove_index = self.get_index(denotation, denotations)
removed_denotation = denotations.pop(remove_index)
relations = self.relations
removed_relations = []
for relation in relations:
if str(relation['origin']) == str(denotation) or str(relation['target']) == str(denotation):
removed_relations.append(self.delete_relation(relation['id']))
self.anno['denotations'] = denotations
return removed_relations
def upsert_relation(self, origin, target, id=None, typ=None, meta=None):
lst = self.relations
rela = {
'id': id,
'origin': origin,
'target': target,
'type': typ,
'meta': meta
}
if id is not None and self.contains_id(id, 'relations'):
i = self.get_index(id, lst)
lst[i].update(rela)
elif id is not None:
lst.append(rela)
self.ids['relations'].append(rela['id'])
self.max['relations'] = max([self.max['relations'], int(rela['id'])])
else:
rela['id'] = self.max['relations'] + 1
lst.append(rela)
self.ids['relations'].append(rela['id'])
self.max['relations'] += 1
self.anno['relations'] = lst
return rela
def delete_relation(self, relation):
relations = self.relations
remove_index = self.get_index(relation, relations)
removed_relation = relations.pop(remove_index)
self.anno['relations'] = relations
return removed_relation
def __ensure_index(self, lst):
if len(self.anno.get(lst, [])) > 0 and len(self.ids[lst]) == 0:
for l in self.anno.get(lst, []):
self.ids[lst].append(l['id'])
self.max[lst] = max([self.max[lst], int(l['id'])])
def contains_id(self, id, lst):
self.__ensure_index(lst)
return id in self.ids[lst]
def get_index(self, id, lst):
for i, d in enumerate(lst):
if str(d['id']) == str(id):
return i
return None
@staticmethod
def from_json(obj):
anno = Annotation()
anno.text = obj['text']
anno.denotations = obj.get('denotations', [])
anno.relations = obj.get('relations', [])
anno.meta = obj.get('meta', {})
return anno
@staticmethod
def from_file(path):
f = open(path, 'r')
obj = json.loads(f.read())
f.close()
return Annotation.from_json(obj)
def __repr__(self):
repr = self.anno
return json.dumps(repr, indent=2)
| 28.824468
| 107
| 0.532017
|
import json
import re
class Annotation:
def __init__(self, wrapper='plaintext'):
self.anno = {}
self.wrapper = wrapper
self.max = {
'denotations': 0,
'relations': 0
}
self.ids = {
'denotations': [],
'relations': []
}
@property
def text(self):
return self.anno.get('text', '')
@text.setter
def text(self, txt):
self.anno['text'] = txt
@property
def id(self):
return self.anno.get('id', '')
@id.setter
def id(self, id):
self.anno['id'] = id
@property
def meta(self):
return self.anno.get('meta', {})
@meta.setter
def meta(self, obj):
tmp = self.meta
tmp.update(obj)
self.anno['meta'] = tmp
@property
def wrapper(self):
return self.anno.get('wrapper', 'plaintext')
@wrapper.setter
def wrapper(self, wrapper_module):
self.anno['wrapper'] = wrapper_module
@property
def relations(self):
return self.anno.get('relations', [])
@relations.setter
def relations(self, lst):
for r in lst:
self.upsert_relation(r['origin'], r['target'], typ=r.get('type', None),
id=r.get('id', None), meta=r.get('meta', None))
@property
def denotations(self):
return self.anno.get('denotations', [])
@denotations.setter
def denotations(self, lst):
for d in lst:
self.upsert_denotation(d['start'], d['end'], text=d.get('text', None),
typ=d.get('type', None), id=d.get('id', None), meta=d.get('meta', None))
def upsert_denotation(self, start, end, text=None, typ=None, id=None, meta=None):
lst = self.denotations
deno = {
'id': id,
'start': start,
'end': end,
'text': text,
'type': typ,
'meta': meta
}
if text is None:
deno['text'] = self.text[start:end]
if id is not None and self.contains_id(id, 'denotations'):
i = self.get_index(id, lst)
lst[i].update(deno)
elif id is not None:
lst.append(deno)
self.ids['denotations'].append(deno['id'])
self.max['denotations'] = max([self.max['denotations'], int(deno['id'])])
else:
deno['id'] = self.max['denotations'] + 1
lst.append(deno)
self.ids['denotations'].append(deno['id'])
self.max['denotations'] += 1
self.anno['denotations'] = lst
return deno
def delete_denotation(self, denotation):
denotations = self.denotations
remove_index = self.get_index(denotation, denotations)
removed_denotation = denotations.pop(remove_index)
relations = self.relations
removed_relations = []
for relation in relations:
if str(relation['origin']) == str(denotation) or str(relation['target']) == str(denotation):
removed_relations.append(self.delete_relation(relation['id']))
self.anno['denotations'] = denotations
return removed_relations
def upsert_relation(self, origin, target, id=None, typ=None, meta=None):
lst = self.relations
rela = {
'id': id,
'origin': origin,
'target': target,
'type': typ,
'meta': meta
}
if id is not None and self.contains_id(id, 'relations'):
i = self.get_index(id, lst)
lst[i].update(rela)
elif id is not None:
lst.append(rela)
self.ids['relations'].append(rela['id'])
self.max['relations'] = max([self.max['relations'], int(rela['id'])])
else:
rela['id'] = self.max['relations'] + 1
lst.append(rela)
self.ids['relations'].append(rela['id'])
self.max['relations'] += 1
self.anno['relations'] = lst
return rela
def delete_relation(self, relation):
relations = self.relations
remove_index = self.get_index(relation, relations)
removed_relation = relations.pop(remove_index)
self.anno['relations'] = relations
return removed_relation
def __ensure_index(self, lst):
if len(self.anno.get(lst, [])) > 0 and len(self.ids[lst]) == 0:
for l in self.anno.get(lst, []):
self.ids[lst].append(l['id'])
self.max[lst] = max([self.max[lst], int(l['id'])])
def contains_id(self, id, lst):
self.__ensure_index(lst)
return id in self.ids[lst]
def get_index(self, id, lst):
for i, d in enumerate(lst):
if str(d['id']) == str(id):
return i
return None
@staticmethod
def from_json(obj):
anno = Annotation()
anno.text = obj['text']
anno.denotations = obj.get('denotations', [])
anno.relations = obj.get('relations', [])
anno.meta = obj.get('meta', {})
return anno
@staticmethod
def from_file(path):
f = open(path, 'r')
obj = json.loads(f.read())
f.close()
return Annotation.from_json(obj)
def __repr__(self):
repr = self.anno
return json.dumps(repr, indent=2)
| true
| true
|
1c3fea7ec6376c09b265b6fd7238ec2962f0cb87
| 866
|
py
|
Python
|
scripts/deployment/deploy_multisig_keyholders.py
|
JohnAllerdyce/Sovryn-smart-contracts
|
e0dc44582b4e4ae2fc1bc3f9a7d775384c69d169
|
[
"Apache-2.0"
] | 108
|
2020-08-30T17:52:32.000Z
|
2022-02-26T00:00:15.000Z
|
scripts/deployment/deploy_multisig_keyholders.py
|
JohnAllerdyce/Sovryn-smart-contracts
|
e0dc44582b4e4ae2fc1bc3f9a7d775384c69d169
|
[
"Apache-2.0"
] | 181
|
2020-08-24T09:28:53.000Z
|
2022-02-11T13:22:22.000Z
|
scripts/deployment/deploy_multisig_keyholders.py
|
JohnAllerdyce/Sovryn-smart-contracts
|
e0dc44582b4e4ae2fc1bc3f9a7d775384c69d169
|
[
"Apache-2.0"
] | 36
|
2020-09-10T07:53:41.000Z
|
2022-03-26T00:35:30.000Z
|
from brownie import *
import json
def main():
thisNetwork = network.show_active()
if thisNetwork == "development":
acct = accounts[0]
# configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "testnet" or thisNetwork == "rsk-mainnet":
acct = accounts.load("rskdeployer")
else:
raise Exception("network not supported")
if thisNetwork == "rsk-mainnet":
configFile = open('./scripts/contractInteraction/mainnet_contracts.json')
elif thisNetwork == "testnet":
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
contracts = json.load(configFile)
timelockOwnerAddress = contracts['timelockOwner']
multiSigKeyHolders= acct.deploy(MultiSigKeyHolders)
multiSigKeyHolders.transferOwnership(timelockOwnerAddress)
| 36.083333
| 84
| 0.700924
|
from brownie import *
import json
def main():
thisNetwork = network.show_active()
if thisNetwork == "development":
acct = accounts[0]
elif thisNetwork == "testnet" or thisNetwork == "rsk-mainnet":
acct = accounts.load("rskdeployer")
else:
raise Exception("network not supported")
if thisNetwork == "rsk-mainnet":
configFile = open('./scripts/contractInteraction/mainnet_contracts.json')
elif thisNetwork == "testnet":
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
contracts = json.load(configFile)
timelockOwnerAddress = contracts['timelockOwner']
multiSigKeyHolders= acct.deploy(MultiSigKeyHolders)
multiSigKeyHolders.transferOwnership(timelockOwnerAddress)
| true
| true
|
1c3fecd87798632e7d2525d71f35386553f5ecb5
| 10,785
|
py
|
Python
|
pyplusplus/creators_factory/sort_algorithms.py
|
electronicvisions/pyplusplus
|
4d88bb8754d22654a61202ae8adc222807953e38
|
[
"BSL-1.0"
] | 5
|
2021-01-29T19:54:34.000Z
|
2022-03-23T11:16:37.000Z
|
pyplusplus/creators_factory/sort_algorithms.py
|
electronicvisions/pyplusplus
|
4d88bb8754d22654a61202ae8adc222807953e38
|
[
"BSL-1.0"
] | null | null | null |
pyplusplus/creators_factory/sort_algorithms.py
|
electronicvisions/pyplusplus
|
4d88bb8754d22654a61202ae8adc222807953e38
|
[
"BSL-1.0"
] | null | null | null |
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from pygccxml import declarations
from pyplusplus import decl_wrappers
class COLOR:
WHITE = 0
GRAY = 1
BLACK = 2
class class_organizer_t(object):
def __init__( self, decls, include_vars=False):
object.__init__( self )
self.__include_vars = include_vars
self.__classes = [x for x in decls if isinstance( x, declarations.class_t )]
self.__classes.sort( key = lambda cls: cls.decl_string )
self.__dependencies_graph = self._build_graph()
self.__time = 0
self.__colors = dict( list(zip( list(self.__dependencies_graph.keys())
, [ COLOR.WHITE ] * len( self.__dependencies_graph ) )) )
self.__class_discovered = dict( list(zip( list(self.__dependencies_graph.keys())
, [ 0 ] * len( self.__dependencies_graph ) )) )
self.__class_treated = dict( list(zip( list(self.__dependencies_graph.keys())
, [ 0 ] * len( self.__dependencies_graph ) )) )
self.__desired_order = []
self._topological_sort()
def _build_graph(self):
full_name = declarations.full_name
graph = {} #
for class_ in self.__classes:
assert isinstance( class_, declarations.class_t )
fname = full_name( class_ )
graph[ fname ] = self.__find_out_class_dependencies( class_ )
return graph
def __find_out_class_dependencies( self, class_ ):
full_name = declarations.full_name
#class depends on it's base classes
i_depend_on_them = set( [ full_name( base.related_class ) for base in class_.bases ] )
#class depends on all classes that used in function as argument
# types and those arguments have default value
calldefs = [declaration for declaration in declarations.make_flatten( class_ ) if isinstance( declaration, declarations.calldef_t )]
for calldef in calldefs:
for arg in calldef.arguments:
if declarations.is_enum( arg.decl_type ):
top_class_inst = self.__get_top_class_inst( declarations.enum_declaration( arg.decl_type ) )
if top_class_inst:
i_depend_on_them.add( full_name( top_class_inst ) )
continue
if not arg.default_value:
continue
if declarations.is_pointer( arg.decl_type ) and arg.default_value == 0:
continue
base_type = declarations.base_type( arg.decl_type )
if not isinstance( base_type, declarations.declarated_t ):
continue
top_class_inst = self.__get_top_class_inst( base_type.declaration )
if top_class_inst:
i_depend_on_them.add( full_name( top_class_inst ) )
if self.__include_vars:
vars = [declaration for declaration in declarations.make_flatten( class_ ) if isinstance( declaration, declarations.variable_t )]
for var in vars:
if declarations.is_pointer( var.decl_type ):
continue
base_type = declarations.base_type( var.decl_type )
if not isinstance( base_type, declarations.declarated_t ):
continue
top_class_inst = self.__get_top_class_inst( base_type.declaration )
if top_class_inst:
i_depend_on_them.add( full_name( top_class_inst ) )
for internal_cls in class_.classes(allow_empty=True):
internal_cls_dependencies = self.__find_out_class_dependencies( internal_cls )
i_depend_on_them.update( internal_cls_dependencies )
i_depend_on_them = list( i_depend_on_them )
i_depend_on_them.sort()
return i_depend_on_them
def __get_top_class_inst( self, declaration ):
curr = declaration
while isinstance( curr.parent, declarations.class_t ):
curr = curr.parent
if isinstance( curr, declarations.class_t ):
return curr
def _topological_sort(self):
self._dfs()
def _dfs( self ):
for class_ in sorted( self.__dependencies_graph.keys() ):
if self.__colors[class_] == COLOR.WHITE:
self._dfs_visit(class_)
def _dfs_visit(self, base):
self.__colors[base] = COLOR.GRAY
self.__time += 1
self.__class_discovered[base] = self.__time
for derived in self.__dependencies_graph[base]:
if derived in self.__colors and self.__colors[derived] == COLOR.WHITE:
self._dfs_visit( derived )
else:
pass
#there is usecase where base class defined within some class
#but his derives defined out of the class. right now `Py++`
#doesn't supports this situation.
self.__colors[base] = COLOR.BLACK
self.__time += 1
self.__class_treated = self.__time
self.__desired_order.append(base)
def desired_order(self):
full_name = declarations.full_name
fname2inst = {}
for class_inst in self.__classes:
fname2inst[ full_name( class_inst ) ] = class_inst
answer = []
for fname in self.__desired_order:
answer.append( fname2inst[fname] )
return answer
def cmp_to_key(mycmp):
"""Convert a cmp= function into a key= function"""
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
class calldef_organizer_t( object ):
#Take a look on this post:
# http://mail.python.org/pipermail/c++-sig/2006-October/011463.html
#calldef_organizer_t will take into account only required arguments.
#Next rules are implemented:
#1. calldef( bool ) will be the last registered function
#2. T* will come after T ( const T& )
def __init__( self ):
object.__init__( self )
#preserve order in which functions where defined
self.cmp_calldefs_fallback = lambda d1, d2: cmp( d1.location.line, d2.location.line )
def build_groups( self, decls ):
groups = { None: [] }
decl2order = {}
for index,d in enumerate( decls ):
decl2order[d] = index
if not isinstance( d, declarations.calldef_t ) or 0 == len( d.required_args ):
groups[ None ].append( d )
else:
key = ( d.name, len( d.required_args ) )
if key not in groups:
groups[ key ] = []
groups[key].append( d )
#keep backward compatibility
to_be_deleted = []
for group, group_decls in groups.items():
if None is group:
continue
if len( group_decls ) == 1:
groups[ None ].append( group_decls[0] )
to_be_deleted.append( group )
for group in to_be_deleted:
del groups[ group ]
groups[ None ].sort( key=lambda d: decl2order[d] )
return groups
def cmp_args_types( self, t1, t2 ):
return decl_wrappers.algorithm.registration_order.is_related( t1, t2 )
def cmp_calldefs( self, f1, f2 ):
result = self.cmp_args_types( f1.required_args[-1].decl_type, f2.required_args[-1].decl_type )
if None is result:
result = self.cmp_calldefs_fallback( f1, f2 )
return result
def sort_groups( self, groups ):
for group in list(groups.keys()):
if None is group:
continue
groups[ group ].sort( key=cmp_to_key(self.cmp_calldefs) )
def join_groups( self, groups ):
decls = []
keys = set(groups.keys())
if None in keys:
keys.remove(None)
for group in sorted(keys):
decls.extend( groups[group] )
return decls
def sort( self, decls ):
groups = self.build_groups( decls )
self.sort_groups(groups)
result = self.join_groups(groups)
return result
def sort_classes( classes, include_vars=False ):
organizer = class_organizer_t( classes, include_vars=include_vars )
return organizer.desired_order()
def sort_calldefs( decls ):
return calldef_organizer_t().sort( decls )
USE_CALLDEF_ORGANIZER = False
#If you understand what problem calldef_organizer_t solves, than may be you should
#use this.
def sort( decls ):
classes = [x for x in decls if isinstance( x, declarations.class_t )]
ordered = sort_classes( classes )
ids = set( [ id( inst ) for inst in ordered ] )
for declaration in decls:
if id( declaration ) not in ids:
ids.add( id(declaration) )
ordered.append( declaration )
#type should be exported before it can be used.
variables = []
enums = []
others = []
classes = []
constructors = []
for inst in ordered:
if isinstance( inst, declarations.variable_t ):
variables.append( inst )
elif isinstance( inst, declarations.enumeration_t ):
enums.append( inst )
elif isinstance( inst, ( declarations.class_t, declarations.class_declaration_t ) ):
classes.append( inst )
elif isinstance( inst, declarations.constructor_t ):
constructors.append( inst )
else:
others.append( inst )
#this will prevent from py++ to change the order of generated code
cmp_by_name = lambda d: d.name
cmp_by_line = lambda d: d.location.line
enums.sort( key=cmp_by_name )
variables.sort( key=cmp_by_name )
if USE_CALLDEF_ORGANIZER:
others = sort_calldefs(others)
constructors = sort_calldefs(constructors)
else:
others.sort( key=cmp_by_name )
constructors.sort( key=cmp_by_line )
new_ordered = []
new_ordered.extend( enums )
new_ordered.extend( classes )
new_ordered.extend( constructors )
new_ordered.extend( others )
new_ordered.extend( variables )
return new_ordered #
| 38.935018
| 141
| 0.612239
|
from pygccxml import declarations
from pyplusplus import decl_wrappers
class COLOR:
WHITE = 0
GRAY = 1
BLACK = 2
class class_organizer_t(object):
def __init__( self, decls, include_vars=False):
object.__init__( self )
self.__include_vars = include_vars
self.__classes = [x for x in decls if isinstance( x, declarations.class_t )]
self.__classes.sort( key = lambda cls: cls.decl_string )
self.__dependencies_graph = self._build_graph()
self.__time = 0
self.__colors = dict( list(zip( list(self.__dependencies_graph.keys())
, [ COLOR.WHITE ] * len( self.__dependencies_graph ) )) )
self.__class_discovered = dict( list(zip( list(self.__dependencies_graph.keys())
, [ 0 ] * len( self.__dependencies_graph ) )) )
self.__class_treated = dict( list(zip( list(self.__dependencies_graph.keys())
, [ 0 ] * len( self.__dependencies_graph ) )) )
self.__desired_order = []
self._topological_sort()
def _build_graph(self):
full_name = declarations.full_name
graph = {}
for class_ in self.__classes:
assert isinstance( class_, declarations.class_t )
fname = full_name( class_ )
graph[ fname ] = self.__find_out_class_dependencies( class_ )
return graph
def __find_out_class_dependencies( self, class_ ):
full_name = declarations.full_name
i_depend_on_them = set( [ full_name( base.related_class ) for base in class_.bases ] )
#class depends on all classes that used in function as argument
# types and those arguments have default value
calldefs = [declaration for declaration in declarations.make_flatten( class_ ) if isinstance( declaration, declarations.calldef_t )]
for calldef in calldefs:
for arg in calldef.arguments:
if declarations.is_enum( arg.decl_type ):
top_class_inst = self.__get_top_class_inst( declarations.enum_declaration( arg.decl_type ) )
if top_class_inst:
i_depend_on_them.add( full_name( top_class_inst ) )
continue
if not arg.default_value:
continue
if declarations.is_pointer( arg.decl_type ) and arg.default_value == 0:
continue
base_type = declarations.base_type( arg.decl_type )
if not isinstance( base_type, declarations.declarated_t ):
continue
top_class_inst = self.__get_top_class_inst( base_type.declaration )
if top_class_inst:
i_depend_on_them.add( full_name( top_class_inst ) )
if self.__include_vars:
vars = [declaration for declaration in declarations.make_flatten( class_ ) if isinstance( declaration, declarations.variable_t )]
for var in vars:
if declarations.is_pointer( var.decl_type ):
continue
base_type = declarations.base_type( var.decl_type )
if not isinstance( base_type, declarations.declarated_t ):
continue
top_class_inst = self.__get_top_class_inst( base_type.declaration )
if top_class_inst:
i_depend_on_them.add( full_name( top_class_inst ) )
for internal_cls in class_.classes(allow_empty=True):
internal_cls_dependencies = self.__find_out_class_dependencies( internal_cls )
i_depend_on_them.update( internal_cls_dependencies )
i_depend_on_them = list( i_depend_on_them )
i_depend_on_them.sort()
return i_depend_on_them
def __get_top_class_inst( self, declaration ):
curr = declaration
while isinstance( curr.parent, declarations.class_t ):
curr = curr.parent
if isinstance( curr, declarations.class_t ):
return curr
def _topological_sort(self):
self._dfs()
def _dfs( self ):
for class_ in sorted( self.__dependencies_graph.keys() ):
if self.__colors[class_] == COLOR.WHITE:
self._dfs_visit(class_)
def _dfs_visit(self, base):
self.__colors[base] = COLOR.GRAY
self.__time += 1
self.__class_discovered[base] = self.__time
for derived in self.__dependencies_graph[base]:
if derived in self.__colors and self.__colors[derived] == COLOR.WHITE:
self._dfs_visit( derived )
else:
pass
#there is usecase where base class defined within some class
#but his derives defined out of the class. right now `Py++`
#doesn't supports this situation.
self.__colors[base] = COLOR.BLACK
self.__time += 1
self.__class_treated = self.__time
self.__desired_order.append(base)
def desired_order(self):
full_name = declarations.full_name
fname2inst = {}
for class_inst in self.__classes:
fname2inst[ full_name( class_inst ) ] = class_inst
answer = []
for fname in self.__desired_order:
answer.append( fname2inst[fname] )
return answer
def cmp_to_key(mycmp):
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
class calldef_organizer_t( object ):
def __init__( self ):
object.__init__( self )
self.cmp_calldefs_fallback = lambda d1, d2: cmp( d1.location.line, d2.location.line )
def build_groups( self, decls ):
groups = { None: [] }
decl2order = {}
for index,d in enumerate( decls ):
decl2order[d] = index
if not isinstance( d, declarations.calldef_t ) or 0 == len( d.required_args ):
groups[ None ].append( d )
else:
key = ( d.name, len( d.required_args ) )
if key not in groups:
groups[ key ] = []
groups[key].append( d )
to_be_deleted = []
for group, group_decls in groups.items():
if None is group:
continue
if len( group_decls ) == 1:
groups[ None ].append( group_decls[0] )
to_be_deleted.append( group )
for group in to_be_deleted:
del groups[ group ]
groups[ None ].sort( key=lambda d: decl2order[d] )
return groups
def cmp_args_types( self, t1, t2 ):
return decl_wrappers.algorithm.registration_order.is_related( t1, t2 )
def cmp_calldefs( self, f1, f2 ):
result = self.cmp_args_types( f1.required_args[-1].decl_type, f2.required_args[-1].decl_type )
if None is result:
result = self.cmp_calldefs_fallback( f1, f2 )
return result
def sort_groups( self, groups ):
for group in list(groups.keys()):
if None is group:
continue
groups[ group ].sort( key=cmp_to_key(self.cmp_calldefs) )
def join_groups( self, groups ):
decls = []
keys = set(groups.keys())
if None in keys:
keys.remove(None)
for group in sorted(keys):
decls.extend( groups[group] )
return decls
def sort( self, decls ):
groups = self.build_groups( decls )
self.sort_groups(groups)
result = self.join_groups(groups)
return result
def sort_classes( classes, include_vars=False ):
organizer = class_organizer_t( classes, include_vars=include_vars )
return organizer.desired_order()
def sort_calldefs( decls ):
return calldef_organizer_t().sort( decls )
USE_CALLDEF_ORGANIZER = False
def sort( decls ):
classes = [x for x in decls if isinstance( x, declarations.class_t )]
ordered = sort_classes( classes )
ids = set( [ id( inst ) for inst in ordered ] )
for declaration in decls:
if id( declaration ) not in ids:
ids.add( id(declaration) )
ordered.append( declaration )
variables = []
enums = []
others = []
classes = []
constructors = []
for inst in ordered:
if isinstance( inst, declarations.variable_t ):
variables.append( inst )
elif isinstance( inst, declarations.enumeration_t ):
enums.append( inst )
elif isinstance( inst, ( declarations.class_t, declarations.class_declaration_t ) ):
classes.append( inst )
elif isinstance( inst, declarations.constructor_t ):
constructors.append( inst )
else:
others.append( inst )
cmp_by_name = lambda d: d.name
cmp_by_line = lambda d: d.location.line
enums.sort( key=cmp_by_name )
variables.sort( key=cmp_by_name )
if USE_CALLDEF_ORGANIZER:
others = sort_calldefs(others)
constructors = sort_calldefs(constructors)
else:
others.sort( key=cmp_by_name )
constructors.sort( key=cmp_by_line )
new_ordered = []
new_ordered.extend( enums )
new_ordered.extend( classes )
new_ordered.extend( constructors )
new_ordered.extend( others )
new_ordered.extend( variables )
return new_ordered
| true
| true
|
1c3fef2e535e5662f29af722fb99adb738d0b837
| 3,279
|
py
|
Python
|
jwql/tests/test_calculations.py
|
cracraft/jwql
|
030c1663bc433465e01ad803e1578a2bc53035f4
|
[
"BSD-3-Clause"
] | 42
|
2018-10-03T13:38:18.000Z
|
2022-03-11T12:19:32.000Z
|
jwql/tests/test_calculations.py
|
cracraft/jwql
|
030c1663bc433465e01ad803e1578a2bc53035f4
|
[
"BSD-3-Clause"
] | 723
|
2018-08-29T18:29:49.000Z
|
2022-03-31T21:09:20.000Z
|
jwql/tests/test_calculations.py
|
cracraft/jwql
|
030c1663bc433465e01ad803e1578a2bc53035f4
|
[
"BSD-3-Clause"
] | 30
|
2018-08-29T18:17:32.000Z
|
2022-03-10T19:43:39.000Z
|
#! /usr/bin/env python
"""Tests for the ``calculations`` module.
Authors
-------
- Bryan Hilbert
Use
---
These tests can be run via the command line (omit the ``-s`` to
suppress verbose output to stdout):
::
pytest -s test_calculations.py
"""
import numpy as np
from jwql.utils import calculations
def test_double_gaussian_fit():
"""Test the double Gaussian fitting function"""
amplitude1 = 500
mean_value1 = 0.5
sigma_value1 = 0.05
amplitude2 = 300
mean_value2 = 0.4
sigma_value2 = 0.03
bin_centers = np.arange(0., 1.1, 0.007)
input_params = [amplitude1, mean_value1, sigma_value1, amplitude2, mean_value2, sigma_value2]
input_values = calculations.double_gaussian(bin_centers, *input_params)
initial_params = [np.max(input_values), 0.55, 0.1, np.max(input_values), 0.5, 0.05]
params, sigma = calculations.double_gaussian_fit(bin_centers, input_values, initial_params)
assert np.allclose(np.array(params[0:3]), np.array([amplitude2, mean_value2, sigma_value2]),
atol=0, rtol=0.000001)
assert np.allclose(np.array(params[3:]), np.array([amplitude1, mean_value1, sigma_value1]),
atol=0, rtol=0.000001)
def test_gaussian1d_fit():
"""Test histogram fitting function"""
mean_value = 0.5
sigma_value = 0.05
image = np.random.normal(loc=mean_value, scale=sigma_value, size=(100, 100))
hist, bin_edges = np.histogram(image, bins='auto')
bin_centers = (bin_edges[1:] + bin_edges[0: -1]) / 2.
initial_params = [np.max(hist), 0.55, 0.1]
amplitude, peak, width = calculations.gaussian1d_fit(bin_centers, hist, initial_params)
assert np.isclose(peak[0], mean_value, atol=0.0035, rtol=0.)
assert np.isclose(width[0], sigma_value, atol=0.0035, rtol=0.)
assert ((mean_value <= peak[0] + 7 * peak[1]) & (mean_value >= peak[0] - 7 * peak[1]))
assert ((sigma_value <= width[0] + 7 * width[1]) & (sigma_value >= width[0] - 7 * width[1]))
def test_mean_image():
"""Test the sigma-clipped mean and stdev image calculator"""
# Create a stack of 50 5x5 pixel images
nstack = 50
cube = np.zeros((nstack, 5, 5))
# Set alternating frames equal to 4 and 5
for i in range(nstack):
if i % 2 == 0:
cube[i, :, :] = 4.
else:
cube[i, :, :] = 5.
# Insert a few signal values that will be removed by sigma clipping.
# Make sure you "remove" and equal number of 4's and 5's from each
# pixel in order to keep the mean at 4.5 and dev at 0.5
cube[0, 0, 0] = 55.
cube[1, 0, 0] = -78.
cube[3, 3, 3] = 150.
cube[2, 3, 3] = 32.
cube[1, 4, 4] = -96.
cube[4, 4, 4] = -25.
mean_img, dev_img = calculations.mean_image(cube, sigma_threshold=3)
assert np.all(mean_img == 4.5)
assert np.all(dev_img == 0.5)
def test_mean_stdev():
"""Test calcualtion of the sigma-clipped mean from an image"""
image = np.zeros((50, 50)) + 1.
badx = [1, 4, 10, 14, 16, 20, 22, 25, 29, 30]
bady = [13, 27, 43, 21, 1, 32, 25, 21, 9, 14]
for x, y in zip(badx, bady):
image[y, x] = 100.
meanval, stdval = calculations.mean_stdev(image, sigma_threshold=3)
assert meanval == 1.
assert stdval == 0.
| 30.933962
| 97
| 0.625801
|
import numpy as np
from jwql.utils import calculations
def test_double_gaussian_fit():
amplitude1 = 500
mean_value1 = 0.5
sigma_value1 = 0.05
amplitude2 = 300
mean_value2 = 0.4
sigma_value2 = 0.03
bin_centers = np.arange(0., 1.1, 0.007)
input_params = [amplitude1, mean_value1, sigma_value1, amplitude2, mean_value2, sigma_value2]
input_values = calculations.double_gaussian(bin_centers, *input_params)
initial_params = [np.max(input_values), 0.55, 0.1, np.max(input_values), 0.5, 0.05]
params, sigma = calculations.double_gaussian_fit(bin_centers, input_values, initial_params)
assert np.allclose(np.array(params[0:3]), np.array([amplitude2, mean_value2, sigma_value2]),
atol=0, rtol=0.000001)
assert np.allclose(np.array(params[3:]), np.array([amplitude1, mean_value1, sigma_value1]),
atol=0, rtol=0.000001)
def test_gaussian1d_fit():
mean_value = 0.5
sigma_value = 0.05
image = np.random.normal(loc=mean_value, scale=sigma_value, size=(100, 100))
hist, bin_edges = np.histogram(image, bins='auto')
bin_centers = (bin_edges[1:] + bin_edges[0: -1]) / 2.
initial_params = [np.max(hist), 0.55, 0.1]
amplitude, peak, width = calculations.gaussian1d_fit(bin_centers, hist, initial_params)
assert np.isclose(peak[0], mean_value, atol=0.0035, rtol=0.)
assert np.isclose(width[0], sigma_value, atol=0.0035, rtol=0.)
assert ((mean_value <= peak[0] + 7 * peak[1]) & (mean_value >= peak[0] - 7 * peak[1]))
assert ((sigma_value <= width[0] + 7 * width[1]) & (sigma_value >= width[0] - 7 * width[1]))
def test_mean_image():
nstack = 50
cube = np.zeros((nstack, 5, 5))
for i in range(nstack):
if i % 2 == 0:
cube[i, :, :] = 4.
else:
cube[i, :, :] = 5.
cube[0, 0, 0] = 55.
cube[1, 0, 0] = -78.
cube[3, 3, 3] = 150.
cube[2, 3, 3] = 32.
cube[1, 4, 4] = -96.
cube[4, 4, 4] = -25.
mean_img, dev_img = calculations.mean_image(cube, sigma_threshold=3)
assert np.all(mean_img == 4.5)
assert np.all(dev_img == 0.5)
def test_mean_stdev():
image = np.zeros((50, 50)) + 1.
badx = [1, 4, 10, 14, 16, 20, 22, 25, 29, 30]
bady = [13, 27, 43, 21, 1, 32, 25, 21, 9, 14]
for x, y in zip(badx, bady):
image[y, x] = 100.
meanval, stdval = calculations.mean_stdev(image, sigma_threshold=3)
assert meanval == 1.
assert stdval == 0.
| true
| true
|
1c3fefba1ba14e9ffa62bb96df0e1e0d72ed83e9
| 94
|
py
|
Python
|
output/models/saxon_data/simple/simple046_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/saxon_data/simple/simple046_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/saxon_data/simple/simple046_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.saxon_data.simple.simple046_xsd.simple046 import E
__all__ = [
"E",
]
| 15.666667
| 69
| 0.734043
|
from output.models.saxon_data.simple.simple046_xsd.simple046 import E
__all__ = [
"E",
]
| true
| true
|
1c3feffbedeeffc98326325bf161e1a8938e3714
| 610
|
py
|
Python
|
music/models.py
|
parthrao/Django-Rest
|
7921be3de53b7992409ae0dc6e620925f63f9af0
|
[
"Unlicense"
] | null | null | null |
music/models.py
|
parthrao/Django-Rest
|
7921be3de53b7992409ae0dc6e620925f63f9af0
|
[
"Unlicense"
] | null | null | null |
music/models.py
|
parthrao/Django-Rest
|
7921be3de53b7992409ae0dc6e620925f63f9af0
|
[
"Unlicense"
] | null | null | null |
from django.db import models
# Create your models here.
class Album(models.Model):
artist = models.CharField(max_length=250)
album_title = models.CharField(max_length=500)
genre = models.CharField(max_length=100)
album_logo = models.CharField(max_length=1000)
def __str__(self):
return self.album_title + ' - ' + self.artist
class Song(models.Model):
album = models.ForeignKey(Album, on_delete=models.CASCADE)
file_type = models.CharField(max_length=50)
song_title = models.CharField(max_length=250)
is_favorite = models.BooleanField(default=False)
def __str__(self):
return self.song_title
| 30.5
| 59
| 0.77377
|
from django.db import models
class Album(models.Model):
artist = models.CharField(max_length=250)
album_title = models.CharField(max_length=500)
genre = models.CharField(max_length=100)
album_logo = models.CharField(max_length=1000)
def __str__(self):
return self.album_title + ' - ' + self.artist
class Song(models.Model):
album = models.ForeignKey(Album, on_delete=models.CASCADE)
file_type = models.CharField(max_length=50)
song_title = models.CharField(max_length=250)
is_favorite = models.BooleanField(default=False)
def __str__(self):
return self.song_title
| true
| true
|
1c3ff1466c2b4efc9a30b95f8f66043026bac9d5
| 3,076
|
py
|
Python
|
2020/day/11/seating.py
|
mboos/advent-of-code
|
4477bb32c50b951b0a1be4850ed28a2c6f78e65d
|
[
"Apache-2.0"
] | null | null | null |
2020/day/11/seating.py
|
mboos/advent-of-code
|
4477bb32c50b951b0a1be4850ed28a2c6f78e65d
|
[
"Apache-2.0"
] | null | null | null |
2020/day/11/seating.py
|
mboos/advent-of-code
|
4477bb32c50b951b0a1be4850ed28a2c6f78e65d
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
"""
Solution to https://adventofcode.com/2020/day/11
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from collections import Counter
FLAGS = flags.FLAGS
flags.DEFINE_string("input", None, "Input file.")
flags.mark_flag_as_required("input")
def count_neighbours(seats, cx, cy):
count = 0
min_x = max(cx - 1, 0)
max_x = min(cx + 2, len(seats[0]))
min_y = max(cy - 1, 0)
max_y = min(cy + 2, len(seats))
for x in range(min_x, max_x):
for y in range(min_y, max_y):
if x != cx or y != cy:
if seats[y][x] == '#':
count += 1
return count
def find_visible_neighbours(seats):
dirx = -1, 0, 1, -1, 1, -1, 0, 1
diry = -1, -1, -1, 0, 0, 1, 1, 1
max_dist = max(len(seats), len(seats[0]))
neighbours = []
for y, row in enumerate(seats):
neighbours.append([])
for x, seat in enumerate(row):
neighbours[-1].append([])
for dx, dy in zip(dirx, diry):
for d in range(1, max_dist):
ax = x + d * dx
ay = y + d * dy
if ax < 0 or ax >= len(seats[0]):
break
if ay < 0 or ay >= len(seats):
break
if seats[ay][ax] != '.':
neighbours[-1][-1].append((ax, ay))
break
return neighbours
def count_visible_neighbours(seats, neighbours, x, y):
count = 0
for nx, ny in neighbours[y][x]:
if seats[ny][nx] == '#':
count += 1
return count
def print_seats(seats):
for line in seats:
print(''.join(line))
print()
def main(argv):
if len(argv) > 2:
raise app.UsageError('Too many command-line arguments.')
with open(FLAGS.input) as fp:
seats = list(map(lambda s: list(str.strip(s)), fp))
changes = True
occupied = 0
while changes:
changes = set()
for y, row in enumerate(seats):
for x, seat in enumerate(row):
neighbours = count_neighbours(seats, x, y)
if seat == 'L' and neighbours == 0:
changes.add((x, y, '#'))
occupied += 1
elif seat == '#' and neighbours >= 4:
changes.add((x, y, 'L'))
occupied -= 1
for x, y, seat in changes:
seats[y][x] = seat
#print_seats(seats)
print(f'Occupied seats: {occupied}')
for y, row in enumerate(seats):
for x, seat in enumerate(row):
if seat != '.':
seats[y][x] = 'L'
visible_neighbours = find_visible_neighbours(seats)
changes = True
occupied = 0
while changes:
changes = set()
for y, row in enumerate(seats):
for x, seat in enumerate(row):
neighbours = count_visible_neighbours(seats, visible_neighbours, x, y)
if seat == 'L' and neighbours == 0:
changes.add((x, y, '#'))
occupied += 1
elif seat == '#' and neighbours >= 5:
changes.add((x, y, 'L'))
occupied -= 1
for x, y, seat in changes:
seats[y][x] = seat
print(f'Occupied seats in part 2: {occupied}')
if __name__ == '__main__':
app.run(main)
| 25.848739
| 78
| 0.575423
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from collections import Counter
FLAGS = flags.FLAGS
flags.DEFINE_string("input", None, "Input file.")
flags.mark_flag_as_required("input")
def count_neighbours(seats, cx, cy):
count = 0
min_x = max(cx - 1, 0)
max_x = min(cx + 2, len(seats[0]))
min_y = max(cy - 1, 0)
max_y = min(cy + 2, len(seats))
for x in range(min_x, max_x):
for y in range(min_y, max_y):
if x != cx or y != cy:
if seats[y][x] == '#':
count += 1
return count
def find_visible_neighbours(seats):
dirx = -1, 0, 1, -1, 1, -1, 0, 1
diry = -1, -1, -1, 0, 0, 1, 1, 1
max_dist = max(len(seats), len(seats[0]))
neighbours = []
for y, row in enumerate(seats):
neighbours.append([])
for x, seat in enumerate(row):
neighbours[-1].append([])
for dx, dy in zip(dirx, diry):
for d in range(1, max_dist):
ax = x + d * dx
ay = y + d * dy
if ax < 0 or ax >= len(seats[0]):
break
if ay < 0 or ay >= len(seats):
break
if seats[ay][ax] != '.':
neighbours[-1][-1].append((ax, ay))
break
return neighbours
def count_visible_neighbours(seats, neighbours, x, y):
count = 0
for nx, ny in neighbours[y][x]:
if seats[ny][nx] == '#':
count += 1
return count
def print_seats(seats):
for line in seats:
print(''.join(line))
print()
def main(argv):
if len(argv) > 2:
raise app.UsageError('Too many command-line arguments.')
with open(FLAGS.input) as fp:
seats = list(map(lambda s: list(str.strip(s)), fp))
changes = True
occupied = 0
while changes:
changes = set()
for y, row in enumerate(seats):
for x, seat in enumerate(row):
neighbours = count_neighbours(seats, x, y)
if seat == 'L' and neighbours == 0:
changes.add((x, y, '#'))
occupied += 1
elif seat == '#' and neighbours >= 4:
changes.add((x, y, 'L'))
occupied -= 1
for x, y, seat in changes:
seats[y][x] = seat
print(f'Occupied seats: {occupied}')
for y, row in enumerate(seats):
for x, seat in enumerate(row):
if seat != '.':
seats[y][x] = 'L'
visible_neighbours = find_visible_neighbours(seats)
changes = True
occupied = 0
while changes:
changes = set()
for y, row in enumerate(seats):
for x, seat in enumerate(row):
neighbours = count_visible_neighbours(seats, visible_neighbours, x, y)
if seat == 'L' and neighbours == 0:
changes.add((x, y, '#'))
occupied += 1
elif seat == '#' and neighbours >= 5:
changes.add((x, y, 'L'))
occupied -= 1
for x, y, seat in changes:
seats[y][x] = seat
print(f'Occupied seats in part 2: {occupied}')
if __name__ == '__main__':
app.run(main)
| true
| true
|
1c3ff15c8a420e719e49ef9f29e7361f57a3b52b
| 5,623
|
py
|
Python
|
network/detecthead.py
|
530824679/side_camera_perception
|
b83fb67b3128a048477def1330bac56f703766e6
|
[
"MIT"
] | null | null | null |
network/detecthead.py
|
530824679/side_camera_perception
|
b83fb67b3128a048477def1330bac56f703766e6
|
[
"MIT"
] | null | null | null |
network/detecthead.py
|
530824679/side_camera_perception
|
b83fb67b3128a048477def1330bac56f703766e6
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# coding=utf-8
#================================================================
# Copyright (C) 2020 * Ltd. All rights reserved.
#
# Editor : pycharm
# File name : train.py
# Author : oscar chen
# Created date: 2020-10-13 9:50:26
# Description :
#
#================================================================
import os
import numpy as np
import tensorflow as tf
from network.ops import conv2d, batch_normalization
from network.backbone import darknet53
class Model(object):
def __init__(self, norm_epsilon, norm_decay, classes_path, anchors_path, pre_train):
self.norm_epsilon = norm_epsilon
self.norm_decay = norm_decay
self.classes_path = classes_path
self.anchors_path = anchors_path
self.pre_train = pre_train
self.anchors = self.get_anchors()
self.classes = self.get_classes()
def get_classes(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def detect_block(self, inputs, filters_num, out_filters, conv_index, training=True, norm_decay=0.99, norm_epsilon=1e-3):
conv = conv2d(inputs, filters_num=filters_num, kernel_size=1, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
conv = conv2d(conv, filters_num=filters_num * 2, kernel_size=3, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
conv = conv2d(conv, filters_num=filters_num, kernel_size=1, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
conv = conv2d(conv, filters_num=filters_num * 2, kernel_size=3, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
conv = conv2d(conv, filters_num=filters_num, kernel_size=1, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
route = conv
conv = conv2d(conv, filters_num=filters_num * 2, kernel_size=3, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
conv = conv2d(conv, filters_num=out_filters, kernel_size=1, strides=1, name="conv2d_" + str(conv_index), use_bias=True)
conv_index += 1
return route, conv, conv_index
def build(self, inputs, num_anchors, num_classes, training=True):
conv_index = 1
conv2d_26, conv2d_43, conv2d_52, conv_index = darknet53(inputs, conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
with tf.variable_scope('yolo'):
conv2d_57, conv2d_59, conv_index = self.detect_block(conv2d_52, 512, num_anchors * (num_classes + 5), conv_index=conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
conv2d_60 = conv2d(conv2d_57, filters_num=256, kernel_size=1, strides=1, name="conv2d_" + str(conv_index))
conv2d_60 = batch_normalization(conv2d_60, name="batch_normalization_" + str(conv_index), training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
conv_index += 1
unsample_0 = tf.image.resize_nearest_neighbor(conv2d_60, [2 * tf.shape(conv2d_60)[1], 2 * tf.shape(conv2d_60)[1]], name='upsample_0')
route0 = tf.concat([unsample_0, conv2d_43], axis=-1, name='route_0')
conv2d_65, conv2d_67, conv_index = self.detect_block(route0, 256, num_anchors * (num_classes + 5), conv_index=conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
conv2d_68 = conv2d(conv2d_65, filters_num=128, kernel_size=1, strides=1, name="conv2d_" + str(conv_index))
conv2d_68 = batch_normalization(conv2d_68, name="batch_normalization_" + str(conv_index), training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
conv_index += 1
unsample_1 = tf.image.resize_nearest_neighbor(conv2d_68, [2 * tf.shape(conv2d_68)[1], 2 * tf.shape(conv2d_68)[1]], name='upsample_1')
route1 = tf.concat([unsample_1, conv2d_26], axis=-1, name='route_1')
_, conv2d_75, _ = self.detect_block(route1, 128, num_anchors * (num_classes + 5), conv_index=conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
return [conv2d_59, conv2d_67, conv2d_75]
| 57.377551
| 215
| 0.688067
|
import os
import numpy as np
import tensorflow as tf
from network.ops import conv2d, batch_normalization
from network.backbone import darknet53
class Model(object):
def __init__(self, norm_epsilon, norm_decay, classes_path, anchors_path, pre_train):
self.norm_epsilon = norm_epsilon
self.norm_decay = norm_decay
self.classes_path = classes_path
self.anchors_path = anchors_path
self.pre_train = pre_train
self.anchors = self.get_anchors()
self.classes = self.get_classes()
def get_classes(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def detect_block(self, inputs, filters_num, out_filters, conv_index, training=True, norm_decay=0.99, norm_epsilon=1e-3):
conv = conv2d(inputs, filters_num=filters_num, kernel_size=1, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
conv = conv2d(conv, filters_num=filters_num * 2, kernel_size=3, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
conv = conv2d(conv, filters_num=filters_num, kernel_size=1, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
conv = conv2d(conv, filters_num=filters_num * 2, kernel_size=3, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
conv = conv2d(conv, filters_num=filters_num, kernel_size=1, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
route = conv
conv = conv2d(conv, filters_num=filters_num * 2, kernel_size=3, strides=1, name="conv2d_" + str(conv_index))
conv = batch_normalization(conv, name="batch_normalization_" + str(conv_index), training=training, norm_decay=norm_decay, norm_epsilon=norm_epsilon)
conv_index += 1
conv = conv2d(conv, filters_num=out_filters, kernel_size=1, strides=1, name="conv2d_" + str(conv_index), use_bias=True)
conv_index += 1
return route, conv, conv_index
def build(self, inputs, num_anchors, num_classes, training=True):
conv_index = 1
conv2d_26, conv2d_43, conv2d_52, conv_index = darknet53(inputs, conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
with tf.variable_scope('yolo'):
conv2d_57, conv2d_59, conv_index = self.detect_block(conv2d_52, 512, num_anchors * (num_classes + 5), conv_index=conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
conv2d_60 = conv2d(conv2d_57, filters_num=256, kernel_size=1, strides=1, name="conv2d_" + str(conv_index))
conv2d_60 = batch_normalization(conv2d_60, name="batch_normalization_" + str(conv_index), training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
conv_index += 1
unsample_0 = tf.image.resize_nearest_neighbor(conv2d_60, [2 * tf.shape(conv2d_60)[1], 2 * tf.shape(conv2d_60)[1]], name='upsample_0')
route0 = tf.concat([unsample_0, conv2d_43], axis=-1, name='route_0')
conv2d_65, conv2d_67, conv_index = self.detect_block(route0, 256, num_anchors * (num_classes + 5), conv_index=conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
conv2d_68 = conv2d(conv2d_65, filters_num=128, kernel_size=1, strides=1, name="conv2d_" + str(conv_index))
conv2d_68 = batch_normalization(conv2d_68, name="batch_normalization_" + str(conv_index), training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
conv_index += 1
unsample_1 = tf.image.resize_nearest_neighbor(conv2d_68, [2 * tf.shape(conv2d_68)[1], 2 * tf.shape(conv2d_68)[1]], name='upsample_1')
route1 = tf.concat([unsample_1, conv2d_26], axis=-1, name='route_1')
_, conv2d_75, _ = self.detect_block(route1, 128, num_anchors * (num_classes + 5), conv_index=conv_index, training=training, norm_decay=self.norm_decay, norm_epsilon=self.norm_epsilon)
return [conv2d_59, conv2d_67, conv2d_75]
| true
| true
|
1c3ff197fb1a5a69165b018285460b36c006c720
| 5,726
|
py
|
Python
|
zenml/core/steps/split/categorical_ratio_split_step.py
|
Camicb/zenml
|
92788a76c7923a30612c5f5bdaaf5bb9554773a1
|
[
"Apache-2.0"
] | 1
|
2021-05-04T17:11:23.000Z
|
2021-05-04T17:11:23.000Z
|
zenml/core/steps/split/categorical_ratio_split_step.py
|
I-m-Zee/zenml
|
c0bfd70716c0cfed5ec825f467ab04b7bd97343e
|
[
"Apache-2.0"
] | null | null | null |
zenml/core/steps/split/categorical_ratio_split_step.py
|
I-m-Zee/zenml
|
c0bfd70716c0cfed5ec825f467ab04b7bd97343e
|
[
"Apache-2.0"
] | 1
|
2020-12-27T08:16:42.000Z
|
2020-12-27T08:16:42.000Z
|
# Copyright (c) maiot GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Implementation of the ratio-based categorical split."""
from typing import Text, List, Dict, Union
from zenml.core.steps.split import constants
from zenml.core.steps.split.base_split_step import BaseSplit
from zenml.core.steps.split.categorical_domain_split_step import \
CategoricalPartitionFn
from zenml.core.steps.split.utils import partition_cat_list
CategoricalValue = Union[Text, int]
def lint_split_map(split_map: Dict[Text, float]):
"""Small utility to lint the split_map"""
if constants.TRAIN not in split_map.keys():
raise AssertionError(f'You have to define some values for '
f'the {constants.TRAIN} split.')
if len(split_map) <= 1:
raise AssertionError('Please specify more than 1 split name in the '
'split_map!')
class CategoricalRatioSplit(BaseSplit):
"""
Categorical ratio split. Use this to split data based on a list of values
of interest in a single categorical column. A categorical column is
defined here as a column with finitely many values of type `integer` or
`string`. In contrast to the categorical domain split, here categorical
values are assigned to different splits by the corresponding percentages,
defined inside a split ratio object.
"""
def __init__(
self,
categorical_column: Text,
categories: List[CategoricalValue],
split_ratio: Dict[Text, float],
unknown_category_policy: Text = constants.SKIP,
statistics=None,
schema=None,
):
"""
Categorical domain split constructor.
Use this class to split your data based on values in
a single categorical column. A categorical column is defined here as a
column with finitely many values of type `integer` or `string`.
Example usage:
# Split on a categorical attribute called "color", with a defined list
of categories of interest
# half of the categories go entirely into the train set,
the other half into the eval set. Other colors, e.g. "purple",
are discarded due to the "skip" flag.
>>> split = CategoricalRatioSplit(
... categorical_column="color",
... categories = ["red", "green", "blue", "yellow"],
... split_ratio = {"train": 0.5,
... "eval": 0.5},
... unknown_category_policy="skip")
Supply the unknown_category_policy flag to set the unknown category
handling policy. There are two main options:
Setting unknown_category_policy to any key in the split map indicates
that any missing categories should be put into that particular split.
For example, supplying ``unknown_category_policy="train"`` indicates
that all missing categories should go into the training dataset, while
``unknown_category_policy="eval"`` indicates that all missing
categories should go into the evaluation dataset.
Setting ``unknown_category_policy="skip"`` indicates that data points
with unknown categorical values (i.e., values not present in the
categorical value list) should be taken out of the data set.
Args:
statistics: Parsed statistics from a preceding StatisticsGen.
schema: Parsed schema from a preceding SchemaGen.
categorical_column: Name of the categorical column used for
splitting.
categories: List of categorical values found in the categorical
column on which to split.
split_ratio: A dict mapping { split_name: percentage of categories
in split }.
unknown_category_policy: String, indicates how to handle categories
in the data that are not present in the supplied category list.
"""
self.categorical_column = categorical_column
split_map = partition_cat_list(categories, split_ratio)
lint_split_map(split_map)
self.split_map = split_map
if unknown_category_policy in self.split_map:
self.unknown_category_policy = unknown_category_policy
else:
self.unknown_category_policy = constants.SKIP
super().__init__(statistics=statistics,
schema=schema,
categorical_column=categorical_column,
split_ratio=split_ratio,
categories=categories,
unknown_category_policy=unknown_category_policy)
def partition_fn(self):
return CategoricalPartitionFn, {
'split_map': self.split_map,
'categorical_column': self.categorical_column,
'unknown_category_policy': self.unknown_category_policy
}
def get_split_names(self) -> List[Text]:
split_names = list(self.split_map.keys())
if self.unknown_category_policy in self.split_map:
return split_names
else:
return split_names + [constants.SKIP]
| 42.102941
| 79
| 0.663465
|
from typing import Text, List, Dict, Union
from zenml.core.steps.split import constants
from zenml.core.steps.split.base_split_step import BaseSplit
from zenml.core.steps.split.categorical_domain_split_step import \
CategoricalPartitionFn
from zenml.core.steps.split.utils import partition_cat_list
CategoricalValue = Union[Text, int]
def lint_split_map(split_map: Dict[Text, float]):
if constants.TRAIN not in split_map.keys():
raise AssertionError(f'You have to define some values for '
f'the {constants.TRAIN} split.')
if len(split_map) <= 1:
raise AssertionError('Please specify more than 1 split name in the '
'split_map!')
class CategoricalRatioSplit(BaseSplit):
def __init__(
self,
categorical_column: Text,
categories: List[CategoricalValue],
split_ratio: Dict[Text, float],
unknown_category_policy: Text = constants.SKIP,
statistics=None,
schema=None,
):
self.categorical_column = categorical_column
split_map = partition_cat_list(categories, split_ratio)
lint_split_map(split_map)
self.split_map = split_map
if unknown_category_policy in self.split_map:
self.unknown_category_policy = unknown_category_policy
else:
self.unknown_category_policy = constants.SKIP
super().__init__(statistics=statistics,
schema=schema,
categorical_column=categorical_column,
split_ratio=split_ratio,
categories=categories,
unknown_category_policy=unknown_category_policy)
def partition_fn(self):
return CategoricalPartitionFn, {
'split_map': self.split_map,
'categorical_column': self.categorical_column,
'unknown_category_policy': self.unknown_category_policy
}
def get_split_names(self) -> List[Text]:
split_names = list(self.split_map.keys())
if self.unknown_category_policy in self.split_map:
return split_names
else:
return split_names + [constants.SKIP]
| true
| true
|
1c3ff1bdd5bd4f35b40e41df990e6222f62c69db
| 16,550
|
py
|
Python
|
docker-compose/core-network.py
|
kukkalli/oai-cn5g-fed
|
15634fac935ac8671b61654bdf75bf8af07d3c3a
|
[
"Apache-2.0"
] | null | null | null |
docker-compose/core-network.py
|
kukkalli/oai-cn5g-fed
|
15634fac935ac8671b61654bdf75bf8af07d3c3a
|
[
"Apache-2.0"
] | null | null | null |
docker-compose/core-network.py
|
kukkalli/oai-cn5g-fed
|
15634fac935ac8671b61654bdf75bf8af07d3c3a
|
[
"Apache-2.0"
] | null | null | null |
"""
Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The OpenAirInterface Software Alliance licenses this file to You under
the OAI Public License, Version 1.1 (the "License"); you may not use this file
except in compliance with the License.
You may obtain a copy of the License at
http://www.openairinterface.org/?page_id=698
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
------------------------------------------------------------------------------
For more information about the OpenAirInterface (OAI) Software Alliance:
contact@openairinterface.org
------------------------------------------------------------------------------
"""
import yaml
import re
import subprocess
import time
import logging
import argparse
import sys
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="[%(asctime)s] %(name)s:%(levelname)s: %(message)s"
)
# Docker Compose files
MINI_W_NRF = 'docker-compose-mini-nrf.yaml'
MINI_NO_NRF = 'docker-compose-mini-nonrf.yaml'
BASIC_W_NRF = 'docker-compose-basic-nrf.yaml'
BASIC_NO_NRF = 'docker-compose-basic-nonrf.yaml'
BASIC_VPP_W_NRF = 'docker-compose-basic-vpp-nrf.yaml'
BASIC_VPP_NO_NRF = 'docker-compose-basic-vpp-nonrf.yaml'
def _parse_args() -> argparse.Namespace:
"""Parse the command line args
Returns:
argparse.Namespace: the created parser
"""
example_text = '''example:
python3 core-network.py --type start-mini
python3 core-network.py --type start-basic
python3 core-network.py --type start-basic-vpp
python3 core-network.py --type stop-mini
python3 core-network.py --type start-mini --fqdn no --scenario 1
python3 core-network.py --type start-basic --fqdn no --scenario 1'''
parser = argparse.ArgumentParser(description='OAI 5G CORE NETWORK DEPLOY',
epilog=example_text,
formatter_class=argparse.RawDescriptionHelpFormatter)
# 5GCN function TYPE
parser.add_argument(
'--type', '-t',
action='store',
required=True,
choices=['start-mini', 'start-basic', 'start-basic-vpp', 'stop-mini', 'stop-basic', 'stop-basic-vpp'],
help='Functional type of 5g core network ("start-mini"|"start-basic"|"start-basic-vpp"|"stop-mini"|"stop-basic"|"stop-basic-vpp")',
)
# Deployment scenario with FQDN/IP based
parser.add_argument(
'--fqdn', '-fq',
action='store',
choices=['yes', 'no'],
default='yes',
help='Deployment scenario with FQDN ("yes"|"no")',
)
# Deployment scenario with NRF/ without NRF
parser.add_argument(
'--scenario', '-s',
action='store',
choices=['1', '2'],
default='1',
help='Scenario with NRF ("1") and without NRF ("2")',
)
# Automatic PCAP capture
parser.add_argument(
'--capture', '-c',
action='store',
help='Add an automatic PCAP capture on docker networks to CAPTURE file',
)
return parser.parse_args()
def deploy(file_name, ct, extra_interface=False):
"""Deploy the containers using the docker-compose template
Returns:
None
"""
# Before deploy adapting with fqdn/ip
if args.fqdn == 'no':
subprocess.run(f'sed -i -e "s#USE_FQDN_DNS=yes#USE_FQDN_DNS=no#g" {file_name}', shell=True)
subprocess.run(f'sed -i -e "s#USE_FQDN_NRF=yes#USE_FQDN_NRF=no#g" {file_name}', shell=True)
elif args.fqdn == 'yes':
subprocess.run(f'sed -i -e "s#USE_FQDN_DNS=no#USE_FQDN_DNS=yes#g" {file_name}', shell=True)
subprocess.run(f'sed -i -e "s#USE_FQDN_NRF=no#USE_FQDN_NRF=yes#g" {file_name}', shell=True)
logging.debug('\033[0;34m Starting 5gcn components... Please wait\033[0m....')
if args.capture is None:
# When no capture, just deploy all at once.
cmd = f'docker-compose -f {file_name} up -d'
res = run_cmd(cmd, False)
else:
# First just deploy mysql container, all docker networks will be up.
cmd = f'docker-compose -f {file_name} up -d mysql'
res = run_cmd(cmd, False)
if res is None:
exit(f'\033[0;31m Incorrect/Unsupported executing command {cmd}')
print(res)
# Then we can start the capture on the "demo-oai" interface.
# When we undeploy, the process will terminate automatically.
# Explanation of the capture filter:
# - On all containers but oai-ext-dn
# * `not arp` --> NO ARP packets
# * `not port 53` --> NO DNS requests from any container
# * `not port 2152` --> When running w/ OAI RF simulator, remove all GTP packets
# - On oai-ext-dn container
# * `icmp` --> Only ping packets
cmd = f'nohup sudo tshark -i demo-oai -f "(not host 192.168.70.135 and not arp and not port 53 and not port 2152) or (host 192.168.70.135 and icmp)" -w {args.capture} > /dev/null 2>&1 &'
if extra_interface:
cmd = re.sub('-i demo-oai', '-i demo-oai -i cn5g-core', cmd)
cmd = re.sub('70', '73', cmd)
res = run_cmd(cmd, False)
if res is None:
exit(f'\033[0;31m Incorrect/Unsupported executing command {cmd}')
cmd = f'sleep 20; sudo chmod 666 {args.capture}'
run_cmd(cmd)
# Finally deploy the rest of the network functions.
cmd = f'docker-compose -f {file_name} up -d'
res = run_cmd(cmd, False)
# sometimes first try does not go through
if args.capture is not None:
cmd = f'sudo chmod 666 {args.capture}'
run_cmd(cmd)
if res is None:
exit(f'\033[0;31m Incorrect/Unsupported executing command {cmd}')
print(res)
logging.debug('\033[0;32m OAI 5G Core network started, checking the health status of the containers... takes few secs\033[0m....')
notSilentForFirstTime = False
for x in range(40):
cmd = f'docker-compose -f {file_name} ps -a'
res = run_cmd(cmd, notSilentForFirstTime)
notSilentForFirstTime = True
if res is None:
exit(f'\033[0;31m Incorrect/Unsupported executing command "{cmd}"')
time.sleep(2)
cnt = res.count('(healthy)')
if cnt == ct:
logging.debug('\033[0;32m All components are healthy, please see below for more details\033[0m....')
print(res)
break
if cnt != ct:
logging.error('\033[0;31m Core network is un-healthy, please see below for more details\033[0m....')
print(res)
exit(-1)
check_config(file_name)
def undeploy(file_name):
"""UnDeploy the docker container
Returns:
None
"""
logging.debug('\033[0;34m UnDeploying OAI 5G core components\033[0m....')
cmd = f'docker-compose -f {file_name} down'
res = run_cmd(cmd, False)
if res is None:
exit(f'\033[0;31m Incorrect/Unsupported executing command {cmd}')
print(res)
logging.debug('\033[0;32m OAI 5G core components are UnDeployed\033[0m....')
def check_config(file_name):
"""Checks the container configurations
Returns:
None
"""
logging.debug('\033[0;34m Checking if the containers are configured\033[0m....')
# With NRF configuration check
if args.scenario == '1':
logging.debug('\033[0;34m Checking if AMF, SMF and UPF registered with nrf core network\033[0m....')
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="AMF" | grep -o "192.168.70.132"'
amf_registration_nrf = run_cmd(cmd, False)
if amf_registration_nrf is not None:
print(amf_registration_nrf)
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="SMF" | grep -o "192.168.70.133"'
smf_registration_nrf = run_cmd(cmd, False)
if smf_registration_nrf is not None:
print(smf_registration_nrf)
if file_name == BASIC_VPP_W_NRF:
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="UPF" | grep -o "192.168.70.202"'
else:
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="UPF" | grep -o "192.168.70.134"'
upf_registration_nrf = run_cmd(cmd, False)
if upf_registration_nrf is not None:
print(upf_registration_nrf)
if file_name == BASIC_VPP_W_NRF or file_name == BASIC_W_NRF:
logging.debug('\033[0;34m Checking if AUSF, UDM and UDR registered with nrf core network\033[0m....')
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="AUSF" | grep -o "192.168.70.138"'
ausf_registration_nrf = run_cmd(cmd, False)
if ausf_registration_nrf is not None:
print(ausf_registration_nrf)
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="UDM" | grep -o "192.168.70.137"'
udm_registration_nrf = run_cmd(cmd, False)
if udm_registration_nrf is not None:
print(udm_registration_nrf)
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="UDR" | grep -o "192.168.70.136"'
udr_registration_nrf = run_cmd(cmd, False)
if udr_registration_nrf is not None:
print(udr_registration_nrf)
else:
ausf_registration_nrf = True
udm_registration_nrf = True
udr_registration_nrf = True
if amf_registration_nrf is None or smf_registration_nrf is None or upf_registration_nrf is None or \
ausf_registration_nrf is None or udm_registration_nrf is None or udr_registration_nrf is None:
logging.error('\033[0;31m Registration problem with NRF, check the reason manually\033[0m....')
else:
if file_name == BASIC_VPP_W_NRF or file_name == BASIC_W_NRF:
logging.debug('\033[0;32m AUSF, UDM, UDR, AMF, SMF and UPF are registered to NRF\033[0m....')
else:
logging.debug('\033[0;32m AMF, SMF and UPF are registered to NRF\033[0m....')
if file_name == BASIC_VPP_W_NRF:
logging.debug('\033[0;34m Checking if SMF is able to connect with UPF\033[0m....')
cmd1 = 'docker logs oai-smf | grep "Received N4 ASSOCIATION SETUP RESPONSE from an UPF"'
cmd2 = 'docker logs oai-smf | grep "Node ID Type FQDN: gw1"'
upf_logs1 = run_cmd(cmd1)
upf_logs2 = run_cmd(cmd2)
if upf_logs1 is None or upf_logs2 is None:
logging.error('\033[0;31m UPF did not answer to N4 Association request from SMF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m UPF did answer to N4 Association request from SMF\033[0m....')
cmd1 = 'docker logs oai-smf | grep "PFCP HEARTBEAT PROCEDURE"'
upf_logs1 = run_cmd(cmd1)
if upf_logs1 is None:
logging.error('\033[0;31m SMF not receiving heartbeats from UPF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m SMF receiving heathbeats from UPF\033[0m....')
elif file_name == BASIC_W_NRF:
logging.debug('\033[0;34m Checking if SMF is able to connect with UPF\033[0m....')
cmd1 = 'docker logs oai-smf | grep "Received N4 ASSOCIATION SETUP RESPONSE from an UPF"'
cmd2 = 'docker logs oai-smf | grep "Node ID Type FQDN: oai-spgwu"'
upf_logs1 = run_cmd(cmd1)
upf_logs2 = run_cmd(cmd2)
if upf_logs1 is None or upf_logs2 is None:
logging.error('\033[0;31m UPF did not answer to N4 Association request from SMF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m UPF did answer to N4 Association request from SMF\033[0m....')
cmd1 = 'docker logs oai-smf | grep "PFCP HEARTBEAT PROCEDURE"'
upf_logs1 = run_cmd(cmd1)
if upf_logs1 is None:
logging.error('\033[0;31m SMF not receiving heartbeats from UPF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m SMF receiving heathbeats from UPF\033[0m....')
else:
logging.debug('\033[0;34m Checking if SMF is able to connect with UPF\033[0m....')
cmd1 = 'docker logs oai-spgwu | grep "Received SX HEARTBEAT RESPONSE"'
cmd2 = 'docker logs oai-spgwu | grep "Received SX HEARTBEAT REQUEST"'
upf_logs1 = run_cmd(cmd1)
upf_logs2 = run_cmd(cmd2)
if upf_logs1 is None and upf_logs2 is None:
logging.error('\033[0;31m UPF not receiving heartbeats from SMF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m UPF receiving heathbeats from SMF\033[0m....')
# With noNRF configuration checks
elif args.scenario == '2':
logging.debug('\033[0;34m Checking if SMF is able to connect with UPF\033[0m....')
if file_name == BASIC_VPP_NO_NRF:
cmd1 = 'docker logs oai-smf | grep "Received N4 ASSOCIATION SETUP RESPONSE from an UPF"'
cmd2 = 'docker logs oai-smf | grep "Node ID Type FQDN: gw1"'
upf_logs1 = run_cmd(cmd1)
upf_logs2 = run_cmd(cmd2)
if upf_logs1 is None or upf_logs2 is None:
logging.error('\033[0;31m UPF did not answer to N4 Association request from SMF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m UPF did answer to N4 Association request from SMF\033[0m....')
status = 0
for x in range(4):
cmd = "docker logs oai-smf | grep 'handle_receive(16 bytes)'"
res = run_cmd(cmd)
if res is None:
logging.error('\033[0;31m UPF not receiving heartbeats from SMF, re-trying\033[0m....')
else:
status += 1
if status > 2:
logging.debug('\033[0;32m UPF receiving heathbeats from SMF\033[0m....')
logging.debug('\033[0;32m OAI 5G Core network is configured and healthy\033[0m....')
def run_cmd(cmd, silent=True):
if not silent:
logging.debug(cmd)
result = None
try:
res = subprocess.run(cmd,
shell=True, check=True,
stdout=subprocess.PIPE,
universal_newlines=True)
result = res.stdout.strip()
except:
pass
return result
if __name__ == '__main__':
# Parse the arguments to get the deployment instruction
args = _parse_args()
if args.type == 'start-mini':
# Mini function with NRF
if args.scenario == '1':
deploy(MINI_W_NRF, 5)
# Mini function without NRF
elif args.scenario == '2':
deploy(MINI_NO_NRF, 4)
elif args.type == 'start-basic':
# Basic function with NRF
if args.scenario == '1':
deploy(BASIC_W_NRF, 8)
# Basic function without NRF
elif args.scenario == '2':
deploy(BASIC_NO_NRF, 7)
elif args.type == 'start-basic-vpp':
if args.fqdn == 'yes':
logging.error('Configuration not supported yet')
exit(-1)
# Basic function with NRF and VPP-UPF
if args.scenario == '1':
deploy(BASIC_VPP_W_NRF, 8, True)
# Basic function without NRF but with VPP-UPF
elif args.scenario == '2':
deploy(BASIC_VPP_NO_NRF, 7, True)
elif args.type == 'stop-mini':
if args.scenario == '1':
undeploy(MINI_W_NRF)
elif args.scenario == '2':
undeploy(MINI_NO_NRF)
elif args.type == 'stop-basic':
if args.scenario == '1':
undeploy(BASIC_W_NRF)
elif args.scenario == '2':
undeploy(BASIC_NO_NRF)
elif args.type == 'stop-basic-vpp':
if args.scenario == '1':
undeploy(BASIC_VPP_W_NRF)
elif args.scenario == '2':
undeploy(BASIC_VPP_NO_NRF)
| 45.218579
| 194
| 0.603686
|
import yaml
import re
import subprocess
import time
import logging
import argparse
import sys
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="[%(asctime)s] %(name)s:%(levelname)s: %(message)s"
)
MINI_W_NRF = 'docker-compose-mini-nrf.yaml'
MINI_NO_NRF = 'docker-compose-mini-nonrf.yaml'
BASIC_W_NRF = 'docker-compose-basic-nrf.yaml'
BASIC_NO_NRF = 'docker-compose-basic-nonrf.yaml'
BASIC_VPP_W_NRF = 'docker-compose-basic-vpp-nrf.yaml'
BASIC_VPP_NO_NRF = 'docker-compose-basic-vpp-nonrf.yaml'
def _parse_args() -> argparse.Namespace:
example_text = '''example:
python3 core-network.py --type start-mini
python3 core-network.py --type start-basic
python3 core-network.py --type start-basic-vpp
python3 core-network.py --type stop-mini
python3 core-network.py --type start-mini --fqdn no --scenario 1
python3 core-network.py --type start-basic --fqdn no --scenario 1'''
parser = argparse.ArgumentParser(description='OAI 5G CORE NETWORK DEPLOY',
epilog=example_text,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--type', '-t',
action='store',
required=True,
choices=['start-mini', 'start-basic', 'start-basic-vpp', 'stop-mini', 'stop-basic', 'stop-basic-vpp'],
help='Functional type of 5g core network ("start-mini"|"start-basic"|"start-basic-vpp"|"stop-mini"|"stop-basic"|"stop-basic-vpp")',
)
parser.add_argument(
'--fqdn', '-fq',
action='store',
choices=['yes', 'no'],
default='yes',
help='Deployment scenario with FQDN ("yes"|"no")',
)
parser.add_argument(
'--scenario', '-s',
action='store',
choices=['1', '2'],
default='1',
help='Scenario with NRF ("1") and without NRF ("2")',
)
parser.add_argument(
'--capture', '-c',
action='store',
help='Add an automatic PCAP capture on docker networks to CAPTURE file',
)
return parser.parse_args()
def deploy(file_name, ct, extra_interface=False):
if args.fqdn == 'no':
subprocess.run(f'sed -i -e "s#USE_FQDN_DNS=yes#USE_FQDN_DNS=no#g" {file_name}', shell=True)
subprocess.run(f'sed -i -e "s#USE_FQDN_NRF=yes#USE_FQDN_NRF=no#g" {file_name}', shell=True)
elif args.fqdn == 'yes':
subprocess.run(f'sed -i -e "s#USE_FQDN_DNS=no#USE_FQDN_DNS=yes#g" {file_name}', shell=True)
subprocess.run(f'sed -i -e "s#USE_FQDN_NRF=no#USE_FQDN_NRF=yes#g" {file_name}', shell=True)
logging.debug('\033[0;34m Starting 5gcn components... Please wait\033[0m....')
if args.capture is None:
cmd = f'docker-compose -f {file_name} up -d'
res = run_cmd(cmd, False)
else:
cmd = f'docker-compose -f {file_name} up -d mysql'
res = run_cmd(cmd, False)
if res is None:
exit(f'\033[0;31m Incorrect/Unsupported executing command {cmd}')
print(res)
cmd = f'nohup sudo tshark -i demo-oai -f "(not host 192.168.70.135 and not arp and not port 53 and not port 2152) or (host 192.168.70.135 and icmp)" -w {args.capture} > /dev/null 2>&1 &'
if extra_interface:
cmd = re.sub('-i demo-oai', '-i demo-oai -i cn5g-core', cmd)
cmd = re.sub('70', '73', cmd)
res = run_cmd(cmd, False)
if res is None:
exit(f'\033[0;31m Incorrect/Unsupported executing command {cmd}')
cmd = f'sleep 20; sudo chmod 666 {args.capture}'
run_cmd(cmd)
cmd = f'docker-compose -f {file_name} up -d'
res = run_cmd(cmd, False)
if args.capture is not None:
cmd = f'sudo chmod 666 {args.capture}'
run_cmd(cmd)
if res is None:
exit(f'\033[0;31m Incorrect/Unsupported executing command {cmd}')
print(res)
logging.debug('\033[0;32m OAI 5G Core network started, checking the health status of the containers... takes few secs\033[0m....')
notSilentForFirstTime = False
for x in range(40):
cmd = f'docker-compose -f {file_name} ps -a'
res = run_cmd(cmd, notSilentForFirstTime)
notSilentForFirstTime = True
if res is None:
exit(f'\033[0;31m Incorrect/Unsupported executing command "{cmd}"')
time.sleep(2)
cnt = res.count('(healthy)')
if cnt == ct:
logging.debug('\033[0;32m All components are healthy, please see below for more details\033[0m....')
print(res)
break
if cnt != ct:
logging.error('\033[0;31m Core network is un-healthy, please see below for more details\033[0m....')
print(res)
exit(-1)
check_config(file_name)
def undeploy(file_name):
logging.debug('\033[0;34m UnDeploying OAI 5G core components\033[0m....')
cmd = f'docker-compose -f {file_name} down'
res = run_cmd(cmd, False)
if res is None:
exit(f'\033[0;31m Incorrect/Unsupported executing command {cmd}')
print(res)
logging.debug('\033[0;32m OAI 5G core components are UnDeployed\033[0m....')
def check_config(file_name):
logging.debug('\033[0;34m Checking if the containers are configured\033[0m....')
if args.scenario == '1':
logging.debug('\033[0;34m Checking if AMF, SMF and UPF registered with nrf core network\033[0m....')
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="AMF" | grep -o "192.168.70.132"'
amf_registration_nrf = run_cmd(cmd, False)
if amf_registration_nrf is not None:
print(amf_registration_nrf)
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="SMF" | grep -o "192.168.70.133"'
smf_registration_nrf = run_cmd(cmd, False)
if smf_registration_nrf is not None:
print(smf_registration_nrf)
if file_name == BASIC_VPP_W_NRF:
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="UPF" | grep -o "192.168.70.202"'
else:
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="UPF" | grep -o "192.168.70.134"'
upf_registration_nrf = run_cmd(cmd, False)
if upf_registration_nrf is not None:
print(upf_registration_nrf)
if file_name == BASIC_VPP_W_NRF or file_name == BASIC_W_NRF:
logging.debug('\033[0;34m Checking if AUSF, UDM and UDR registered with nrf core network\033[0m....')
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="AUSF" | grep -o "192.168.70.138"'
ausf_registration_nrf = run_cmd(cmd, False)
if ausf_registration_nrf is not None:
print(ausf_registration_nrf)
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="UDM" | grep -o "192.168.70.137"'
udm_registration_nrf = run_cmd(cmd, False)
if udm_registration_nrf is not None:
print(udm_registration_nrf)
cmd = 'curl -s -X GET http://192.168.70.130/nnrf-nfm/v1/nf-instances?nf-type="UDR" | grep -o "192.168.70.136"'
udr_registration_nrf = run_cmd(cmd, False)
if udr_registration_nrf is not None:
print(udr_registration_nrf)
else:
ausf_registration_nrf = True
udm_registration_nrf = True
udr_registration_nrf = True
if amf_registration_nrf is None or smf_registration_nrf is None or upf_registration_nrf is None or \
ausf_registration_nrf is None or udm_registration_nrf is None or udr_registration_nrf is None:
logging.error('\033[0;31m Registration problem with NRF, check the reason manually\033[0m....')
else:
if file_name == BASIC_VPP_W_NRF or file_name == BASIC_W_NRF:
logging.debug('\033[0;32m AUSF, UDM, UDR, AMF, SMF and UPF are registered to NRF\033[0m....')
else:
logging.debug('\033[0;32m AMF, SMF and UPF are registered to NRF\033[0m....')
if file_name == BASIC_VPP_W_NRF:
logging.debug('\033[0;34m Checking if SMF is able to connect with UPF\033[0m....')
cmd1 = 'docker logs oai-smf | grep "Received N4 ASSOCIATION SETUP RESPONSE from an UPF"'
cmd2 = 'docker logs oai-smf | grep "Node ID Type FQDN: gw1"'
upf_logs1 = run_cmd(cmd1)
upf_logs2 = run_cmd(cmd2)
if upf_logs1 is None or upf_logs2 is None:
logging.error('\033[0;31m UPF did not answer to N4 Association request from SMF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m UPF did answer to N4 Association request from SMF\033[0m....')
cmd1 = 'docker logs oai-smf | grep "PFCP HEARTBEAT PROCEDURE"'
upf_logs1 = run_cmd(cmd1)
if upf_logs1 is None:
logging.error('\033[0;31m SMF not receiving heartbeats from UPF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m SMF receiving heathbeats from UPF\033[0m....')
elif file_name == BASIC_W_NRF:
logging.debug('\033[0;34m Checking if SMF is able to connect with UPF\033[0m....')
cmd1 = 'docker logs oai-smf | grep "Received N4 ASSOCIATION SETUP RESPONSE from an UPF"'
cmd2 = 'docker logs oai-smf | grep "Node ID Type FQDN: oai-spgwu"'
upf_logs1 = run_cmd(cmd1)
upf_logs2 = run_cmd(cmd2)
if upf_logs1 is None or upf_logs2 is None:
logging.error('\033[0;31m UPF did not answer to N4 Association request from SMF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m UPF did answer to N4 Association request from SMF\033[0m....')
cmd1 = 'docker logs oai-smf | grep "PFCP HEARTBEAT PROCEDURE"'
upf_logs1 = run_cmd(cmd1)
if upf_logs1 is None:
logging.error('\033[0;31m SMF not receiving heartbeats from UPF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m SMF receiving heathbeats from UPF\033[0m....')
else:
logging.debug('\033[0;34m Checking if SMF is able to connect with UPF\033[0m....')
cmd1 = 'docker logs oai-spgwu | grep "Received SX HEARTBEAT RESPONSE"'
cmd2 = 'docker logs oai-spgwu | grep "Received SX HEARTBEAT REQUEST"'
upf_logs1 = run_cmd(cmd1)
upf_logs2 = run_cmd(cmd2)
if upf_logs1 is None and upf_logs2 is None:
logging.error('\033[0;31m UPF not receiving heartbeats from SMF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m UPF receiving heathbeats from SMF\033[0m....')
elif args.scenario == '2':
logging.debug('\033[0;34m Checking if SMF is able to connect with UPF\033[0m....')
if file_name == BASIC_VPP_NO_NRF:
cmd1 = 'docker logs oai-smf | grep "Received N4 ASSOCIATION SETUP RESPONSE from an UPF"'
cmd2 = 'docker logs oai-smf | grep "Node ID Type FQDN: gw1"'
upf_logs1 = run_cmd(cmd1)
upf_logs2 = run_cmd(cmd2)
if upf_logs1 is None or upf_logs2 is None:
logging.error('\033[0;31m UPF did not answer to N4 Association request from SMF\033[0m....')
exit(-1)
else:
logging.debug('\033[0;32m UPF did answer to N4 Association request from SMF\033[0m....')
status = 0
for x in range(4):
cmd = "docker logs oai-smf | grep 'handle_receive(16 bytes)'"
res = run_cmd(cmd)
if res is None:
logging.error('\033[0;31m UPF not receiving heartbeats from SMF, re-trying\033[0m....')
else:
status += 1
if status > 2:
logging.debug('\033[0;32m UPF receiving heathbeats from SMF\033[0m....')
logging.debug('\033[0;32m OAI 5G Core network is configured and healthy\033[0m....')
def run_cmd(cmd, silent=True):
if not silent:
logging.debug(cmd)
result = None
try:
res = subprocess.run(cmd,
shell=True, check=True,
stdout=subprocess.PIPE,
universal_newlines=True)
result = res.stdout.strip()
except:
pass
return result
if __name__ == '__main__':
args = _parse_args()
if args.type == 'start-mini':
if args.scenario == '1':
deploy(MINI_W_NRF, 5)
elif args.scenario == '2':
deploy(MINI_NO_NRF, 4)
elif args.type == 'start-basic':
if args.scenario == '1':
deploy(BASIC_W_NRF, 8)
elif args.scenario == '2':
deploy(BASIC_NO_NRF, 7)
elif args.type == 'start-basic-vpp':
if args.fqdn == 'yes':
logging.error('Configuration not supported yet')
exit(-1)
if args.scenario == '1':
deploy(BASIC_VPP_W_NRF, 8, True)
elif args.scenario == '2':
deploy(BASIC_VPP_NO_NRF, 7, True)
elif args.type == 'stop-mini':
if args.scenario == '1':
undeploy(MINI_W_NRF)
elif args.scenario == '2':
undeploy(MINI_NO_NRF)
elif args.type == 'stop-basic':
if args.scenario == '1':
undeploy(BASIC_W_NRF)
elif args.scenario == '2':
undeploy(BASIC_NO_NRF)
elif args.type == 'stop-basic-vpp':
if args.scenario == '1':
undeploy(BASIC_VPP_W_NRF)
elif args.scenario == '2':
undeploy(BASIC_VPP_NO_NRF)
| true
| true
|
1c3ff1dd2ff12e899dc1584fe534fb5dbf17b99d
| 2,578
|
py
|
Python
|
camkes/parser/stage6.py
|
aisamanra/camkes-tool
|
4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3
|
[
"BSD-2-Clause"
] | null | null | null |
camkes/parser/stage6.py
|
aisamanra/camkes-tool
|
4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3
|
[
"BSD-2-Clause"
] | null | null | null |
camkes/parser/stage6.py
|
aisamanra/camkes-tool
|
4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
#
'''
Stage 6 parser. The following parser is designed to accept a stage 5 parser,
whose output it consumes. This parser's purpose is to combine multiple assembly
entities into a single top-level assembly.
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
from camkes.internal.seven import cmp, filter, map, zip
from .base import Transformer
from camkes.ast import Assembly, Composition, Configuration, Group, \
TraversalAction
from .exception import ParseError
def precondition(ast_lifted):
'''
Precondition of this parser. No groups should be present in the AST.
'''
return all(not isinstance(x, Group) for x in ast_lifted)
def postcondition(ast_lifted):
'''
Postcondition of the stage 6 parser. Only a single assembly should remain.
'''
class Post(TraversalAction):
def __init__(self):
self.count = 0
def __call__(self, item):
if isinstance(item, Assembly):
self.count += 1
return item
p = Post()
ast_lifted.postorder(p)
return p.count <= 1
def compose_assemblies(ast):
assembly = ast.assembly
if assembly is None:
raise ParseError('no assembly found in input specification')
# collect pieces from all assemblies
for a in [x for x in ast.items if isinstance(x, Assembly) and
not x is assembly]:
assembly.composition.instances.extend(a.composition.instances)
assembly.composition.connections.extend(a.composition.connections)
if a.configuration is not None:
assembly.configuration.settings.extend(a.configuration.settings)
# Ensure AST consistency.
assembly.composition.claim_children()
assembly.configuration.claim_children()
# Remove all other assemblies from AST.
ast.filter(lambda x: not isinstance(x, Assembly) or x is assembly)
class Parse6(Transformer):
def precondition(self, ast_lifted, _):
return precondition(ast_lifted)
def postcondition(self, ast_lifted, _):
return postcondition(ast_lifted)
def transform(self, ast_lifted, read):
compose_assemblies(ast_lifted)
return ast_lifted, read
| 29.632184
| 79
| 0.703646
|
from __future__ import absolute_import, division, print_function, \
unicode_literals
from camkes.internal.seven import cmp, filter, map, zip
from .base import Transformer
from camkes.ast import Assembly, Composition, Configuration, Group, \
TraversalAction
from .exception import ParseError
def precondition(ast_lifted):
return all(not isinstance(x, Group) for x in ast_lifted)
def postcondition(ast_lifted):
class Post(TraversalAction):
def __init__(self):
self.count = 0
def __call__(self, item):
if isinstance(item, Assembly):
self.count += 1
return item
p = Post()
ast_lifted.postorder(p)
return p.count <= 1
def compose_assemblies(ast):
assembly = ast.assembly
if assembly is None:
raise ParseError('no assembly found in input specification')
for a in [x for x in ast.items if isinstance(x, Assembly) and
not x is assembly]:
assembly.composition.instances.extend(a.composition.instances)
assembly.composition.connections.extend(a.composition.connections)
if a.configuration is not None:
assembly.configuration.settings.extend(a.configuration.settings)
assembly.composition.claim_children()
assembly.configuration.claim_children()
ast.filter(lambda x: not isinstance(x, Assembly) or x is assembly)
class Parse6(Transformer):
def precondition(self, ast_lifted, _):
return precondition(ast_lifted)
def postcondition(self, ast_lifted, _):
return postcondition(ast_lifted)
def transform(self, ast_lifted, read):
compose_assemblies(ast_lifted)
return ast_lifted, read
| true
| true
|
1c3ff2bbb22ab4f99787b4f2b0fe0a3023f613cf
| 10,932
|
py
|
Python
|
pystac_client/stac_api_io.py
|
ocefpaf/pystac-client
|
ddf0e0566b2b1783a4d32d3d77f9f51b80270df3
|
[
"Apache-2.0"
] | 52
|
2021-04-15T23:24:12.000Z
|
2022-03-09T23:02:27.000Z
|
pystac_client/stac_api_io.py
|
ocefpaf/pystac-client
|
ddf0e0566b2b1783a4d32d3d77f9f51b80270df3
|
[
"Apache-2.0"
] | 119
|
2021-04-13T11:42:01.000Z
|
2022-02-24T10:02:35.000Z
|
pystac_client/stac_api_io.py
|
ocefpaf/pystac-client
|
ddf0e0566b2b1783a4d32d3d77f9f51b80270df3
|
[
"Apache-2.0"
] | 14
|
2021-04-13T19:00:19.000Z
|
2022-02-23T09:17:30.000Z
|
from copy import deepcopy
import json
import logging
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
TYPE_CHECKING,
Union,
)
from urllib.parse import urlparse
import re
from requests import Request, Session
import pystac
from pystac.link import Link
from pystac.serialization import (
merge_common_properties,
identify_stac_object_type,
identify_stac_object,
migrate_to_latest,
)
from pystac.stac_io import DefaultStacIO
import pystac_client
from .exceptions import APIError
from .conformance import ConformanceClasses, CONFORMANCE_URIS
if TYPE_CHECKING:
from pystac.stac_object import STACObject as STACObject_Type
from pystac.catalog import Catalog as Catalog_Type
logger = logging.getLogger(__name__)
class StacApiIO(DefaultStacIO):
def __init__(
self,
headers: Optional[Dict] = None,
conformance: Optional[List[str]] = None,
parameters: Optional[Dict] = None,
):
"""Initialize class for API IO
Args:
headers : Optional dictionary of headers to include in all requests
conformance : Optional list of `Conformance Classes
<https://github.com/radiantearth/stac-api-spec/blob/master/overview.md#conformance-classes>`__.
parameters: Optional dictionary of query string parameters to include in all requests.
Return:
StacApiIO : StacApiIO instance
"""
# TODO - this should super() to parent class
self.session = Session()
self.session.headers.update(headers or {})
self.session.params.update(parameters or {})
self._conformance = conformance
def read_text(self,
source: Union[str, Link],
*args: Any,
parameters: Optional[dict] = {},
**kwargs: Any) -> str:
"""Read text from the given URI.
Overwrites the default method for reading text from a URL or file to allow :class:`urllib.request.Request`
instances as input. This method also raises any :exc:`urllib.error.HTTPError` exceptions rather than catching
them to allow us to handle different response status codes as needed.
"""
if isinstance(source, str):
href = source
if bool(urlparse(href).scheme):
return self.request(href, *args, parameters=parameters, **kwargs)
else:
with open(href) as f:
href_contents = f.read()
return href_contents
elif isinstance(source, Link):
link = source.to_dict()
href = link['href']
# get headers and body from Link and add to request from simple stac resolver
merge = bool(link.get('merge', False))
# If the link object includes a "method" property, use that. If not fall back to 'GET'.
method = link.get('method', 'GET')
# If the link object includes a "headers" property, use that and respect the "merge" property.
headers = link.get('headers', None)
# If "POST" use the body object that and respect the "merge" property.
link_body = link.get('body', {})
if method == 'POST':
parameters = {**parameters, **link_body} if merge else link_body
else:
# parameters are already in the link href
parameters = {}
return self.request(href, *args, method=method, headers=headers, parameters=parameters)
def request(self,
href: str,
method: Optional[str] = 'GET',
headers: Optional[dict] = {},
parameters: Optional[dict] = {}) -> str:
"""Makes a request to an http endpoint
Args:
href (str): The request URL
method (Optional[str], optional): The http method to use, 'GET' or 'POST'. Defaults to 'GET'.
headers (Optional[dict], optional): Additional headers to include in request. Defaults to {}.
parameters (Optional[dict], optional): parameters to send with request. Defaults to {}.
Raises:
APIError: raised if the server returns an error response
Return:
str: The decoded response from the endpoint
"""
if method == 'POST':
request = Request(method=method, url=href, headers=headers, json=parameters)
else:
params = deepcopy(parameters)
if 'intersects' in params:
params['intersects'] = json.dumps(params['intersects'])
request = Request(method=method, url=href, headers=headers, params=params)
try:
prepped = self.session.prepare_request(request)
msg = f"{prepped.method} {prepped.url} Headers: {prepped.headers}"
if method == 'POST':
msg += f" Payload: {json.dumps(request.json)}"
logger.debug(msg)
resp = self.session.send(prepped)
if resp.status_code != 200:
raise APIError(resp.text)
return resp.content.decode("utf-8")
except Exception as err:
raise APIError(str(err))
def write_text_to_href(self, href: str, *args: Any, **kwargs: Any) -> None:
if bool(urlparse(href).scheme):
raise APIError("Transactions not supported")
else:
return super().write_text_to_href(*args, **kwargs)
def stac_object_from_dict(
self,
d: Dict[str, Any],
href: Optional[str] = None,
root: Optional["Catalog_Type"] = None,
preserve_dict: bool = True,
) -> "STACObject_Type":
"""Deserializes a :class:`~pystac.STACObject` sub-class instance from a dictionary.
Args:
d : The dictionary to deserialize
href : Optional href to associate with the STAC object
root : Optional root :class:`~pystac.Catalog` to associate with the
STAC object.
preserve_dict: If ``False``, the dict parameter ``d`` may be modified
during this method call. Otherwise the dict is not mutated.
Defaults to ``True``, which results results in a deepcopy of the
parameter. Set to ``False`` when possible to avoid the performance
hit of a deepcopy.
"""
if identify_stac_object_type(d) == pystac.STACObjectType.ITEM:
collection_cache = None
if root is not None:
collection_cache = root._resolved_objects.as_collection_cache()
# Merge common properties in case this is an older STAC object.
merge_common_properties(d, json_href=href, collection_cache=collection_cache)
info = identify_stac_object(d)
d = migrate_to_latest(d, info)
if info.object_type == pystac.STACObjectType.CATALOG:
result = pystac_client.Client.from_dict(d,
href=href,
root=root,
migrate=False,
preserve_dict=preserve_dict)
result._stac_io = self
return result
if info.object_type == pystac.STACObjectType.COLLECTION:
return pystac_client.CollectionClient.from_dict(d,
href=href,
root=root,
migrate=False,
preserve_dict=preserve_dict)
if info.object_type == pystac.STACObjectType.ITEM:
return pystac.Item.from_dict(d,
href=href,
root=root,
migrate=False,
preserve_dict=preserve_dict)
raise ValueError(f"Unknown STAC object type {info.object_type}")
def get_pages(self, url, method='GET', parameters={}) -> Iterator[Dict]:
"""Iterator that yields dictionaries for each page at a STAC paging endpoint, e.g., /collections, /search
Return:
Dict : JSON content from a single page
"""
page = self.read_json(url, method=method, parameters=parameters)
yield page
next_link = next((link for link in page.get('links', []) if link['rel'] == 'next'), None)
while next_link:
link = Link.from_dict(next_link)
page = self.read_json(link, parameters=parameters)
yield page
# get the next link and make the next request
next_link = next((link for link in page.get('links', []) if link['rel'] == 'next'),
None)
def assert_conforms_to(self, conformance_class: ConformanceClasses) -> None:
"""Raises a :exc:`NotImplementedError` if the API does not publish the given conformance class. This method
only checks against the ``"conformsTo"`` property from the API landing page and does not make any additional
calls to a ``/conformance`` endpoint even if the API provides such an endpoint.
Args:
conformance_class: The ``ConformanceClasses`` key to check conformance against.
"""
if not self.conforms_to(conformance_class):
raise NotImplementedError(f"{conformance_class} not supported")
def conforms_to(self, conformance_class: ConformanceClasses) -> bool:
"""Whether the API conforms to the given standard. This method only checks against the ``"conformsTo"``
property from the API landing page and does not make any additional calls to a ``/conformance`` endpoint
even if the API provides such an endpoint.
Args:
key : The ``ConformanceClasses`` key to check conformance against.
Return:
bool: Indicates if the API conforms to the given spec or URI.
"""
# Conformance of None means ignore all conformance as opposed to an
# empty array which would indicate the API conforms to nothing
if self._conformance is None:
return True
class_regex = CONFORMANCE_URIS.get(conformance_class.name, None)
if class_regex is None:
raise Exception(f"Invalid conformance class {conformance_class}")
pattern = re.compile(class_regex)
if not any(re.match(pattern, uri) for uri in self._conformance):
return False
return True
def set_conformance(self, conformance: Optional[List[str]]) -> None:
"""Sets (or clears) the conformances for this StacIO."""
self._conformance = conformance
| 41.25283
| 117
| 0.589005
|
from copy import deepcopy
import json
import logging
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
TYPE_CHECKING,
Union,
)
from urllib.parse import urlparse
import re
from requests import Request, Session
import pystac
from pystac.link import Link
from pystac.serialization import (
merge_common_properties,
identify_stac_object_type,
identify_stac_object,
migrate_to_latest,
)
from pystac.stac_io import DefaultStacIO
import pystac_client
from .exceptions import APIError
from .conformance import ConformanceClasses, CONFORMANCE_URIS
if TYPE_CHECKING:
from pystac.stac_object import STACObject as STACObject_Type
from pystac.catalog import Catalog as Catalog_Type
logger = logging.getLogger(__name__)
class StacApiIO(DefaultStacIO):
def __init__(
self,
headers: Optional[Dict] = None,
conformance: Optional[List[str]] = None,
parameters: Optional[Dict] = None,
):
self.session = Session()
self.session.headers.update(headers or {})
self.session.params.update(parameters or {})
self._conformance = conformance
def read_text(self,
source: Union[str, Link],
*args: Any,
parameters: Optional[dict] = {},
**kwargs: Any) -> str:
if isinstance(source, str):
href = source
if bool(urlparse(href).scheme):
return self.request(href, *args, parameters=parameters, **kwargs)
else:
with open(href) as f:
href_contents = f.read()
return href_contents
elif isinstance(source, Link):
link = source.to_dict()
href = link['href']
merge = bool(link.get('merge', False))
method = link.get('method', 'GET')
headers = link.get('headers', None)
link_body = link.get('body', {})
if method == 'POST':
parameters = {**parameters, **link_body} if merge else link_body
else:
parameters = {}
return self.request(href, *args, method=method, headers=headers, parameters=parameters)
def request(self,
href: str,
method: Optional[str] = 'GET',
headers: Optional[dict] = {},
parameters: Optional[dict] = {}) -> str:
if method == 'POST':
request = Request(method=method, url=href, headers=headers, json=parameters)
else:
params = deepcopy(parameters)
if 'intersects' in params:
params['intersects'] = json.dumps(params['intersects'])
request = Request(method=method, url=href, headers=headers, params=params)
try:
prepped = self.session.prepare_request(request)
msg = f"{prepped.method} {prepped.url} Headers: {prepped.headers}"
if method == 'POST':
msg += f" Payload: {json.dumps(request.json)}"
logger.debug(msg)
resp = self.session.send(prepped)
if resp.status_code != 200:
raise APIError(resp.text)
return resp.content.decode("utf-8")
except Exception as err:
raise APIError(str(err))
def write_text_to_href(self, href: str, *args: Any, **kwargs: Any) -> None:
if bool(urlparse(href).scheme):
raise APIError("Transactions not supported")
else:
return super().write_text_to_href(*args, **kwargs)
def stac_object_from_dict(
self,
d: Dict[str, Any],
href: Optional[str] = None,
root: Optional["Catalog_Type"] = None,
preserve_dict: bool = True,
) -> "STACObject_Type":
if identify_stac_object_type(d) == pystac.STACObjectType.ITEM:
collection_cache = None
if root is not None:
collection_cache = root._resolved_objects.as_collection_cache()
merge_common_properties(d, json_href=href, collection_cache=collection_cache)
info = identify_stac_object(d)
d = migrate_to_latest(d, info)
if info.object_type == pystac.STACObjectType.CATALOG:
result = pystac_client.Client.from_dict(d,
href=href,
root=root,
migrate=False,
preserve_dict=preserve_dict)
result._stac_io = self
return result
if info.object_type == pystac.STACObjectType.COLLECTION:
return pystac_client.CollectionClient.from_dict(d,
href=href,
root=root,
migrate=False,
preserve_dict=preserve_dict)
if info.object_type == pystac.STACObjectType.ITEM:
return pystac.Item.from_dict(d,
href=href,
root=root,
migrate=False,
preserve_dict=preserve_dict)
raise ValueError(f"Unknown STAC object type {info.object_type}")
def get_pages(self, url, method='GET', parameters={}) -> Iterator[Dict]:
page = self.read_json(url, method=method, parameters=parameters)
yield page
next_link = next((link for link in page.get('links', []) if link['rel'] == 'next'), None)
while next_link:
link = Link.from_dict(next_link)
page = self.read_json(link, parameters=parameters)
yield page
next_link = next((link for link in page.get('links', []) if link['rel'] == 'next'),
None)
def assert_conforms_to(self, conformance_class: ConformanceClasses) -> None:
if not self.conforms_to(conformance_class):
raise NotImplementedError(f"{conformance_class} not supported")
def conforms_to(self, conformance_class: ConformanceClasses) -> bool:
if self._conformance is None:
return True
class_regex = CONFORMANCE_URIS.get(conformance_class.name, None)
if class_regex is None:
raise Exception(f"Invalid conformance class {conformance_class}")
pattern = re.compile(class_regex)
if not any(re.match(pattern, uri) for uri in self._conformance):
return False
return True
def set_conformance(self, conformance: Optional[List[str]]) -> None:
self._conformance = conformance
| true
| true
|
1c3ff4337d079b6133dc0b44d79eca6a3b4a2637
| 1,919
|
py
|
Python
|
scrap_from_cusat.py
|
abinshoby/Malayalam-word-net
|
c7c3f5915d9fc59f96a381c2d35a65f6bbdd3b43
|
[
"MIT"
] | null | null | null |
scrap_from_cusat.py
|
abinshoby/Malayalam-word-net
|
c7c3f5915d9fc59f96a381c2d35a65f6bbdd3b43
|
[
"MIT"
] | null | null | null |
scrap_from_cusat.py
|
abinshoby/Malayalam-word-net
|
c7c3f5915d9fc59f96a381c2d35a65f6bbdd3b43
|
[
"MIT"
] | null | null | null |
#from cusat using api
# -*- coding: utf-8 -*-
import requests
import csv
import sqlite3
import json
from sqlite3 import Error
import unicodedata
def add_to_csv(l):
f = open('test6_cusat.csv',"a")
csv_file = csv.writer(f)
l = json.loads(l)
csv_file.writerow([str(l[0]['sid']), l[0]['synset'] ,l[0]['meaning'] , l[0]['pos']] + l[0]['hypernym'] + l[0]['hyponym'] + l[0]['meronym'] + l[0]['antonym'])
f.close()
def scrap_word_syn(word):
#url = "http://malayalamwordnet.cusat.ac.in/restapi/json/synset?searchWord="+word
url = "http://malayalamwordnet.cusat.ac.in/restapi/json/synset/"+str(word)
response = requests.request("GET", url)
if(response.status_code==200):
print(response.text)
return response.text
else:
return -1
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by the db_file
"""
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
def select_all_tasks(conn):
"""
Query all rows in the tasks table
"""
cur = conn.cursor()
#count=get_last_count()
cur.execute("SELECT word FROM words_ml where _id;")
rows = cur.fetchall()
return rows
conn=create_connection("enml.db")
if(conn):
rows=select_all_tasks(conn)
#print(scrap_word_syn("നിഘണ്ടു"))
fin=[]
#c=get_last_count() #currently the no of words is limited to 3 you can remove this condition for all words
tot=0
#for row in rows:
#st=str(row[0])
#print(st,"\n")
# q=st.split()
# if(tot>=6):
# break
#for i in q:
i=20028
while(True):
k=scrap_word_syn(i)
i+=1
if(k!=-1 and len(k)>0):
add_to_csv(k)
tot+=1
print(tot)
#c+=1
# inc_count(c)
| 24.922078
| 161
| 0.581553
|
import requests
import csv
import sqlite3
import json
from sqlite3 import Error
import unicodedata
def add_to_csv(l):
f = open('test6_cusat.csv',"a")
csv_file = csv.writer(f)
l = json.loads(l)
csv_file.writerow([str(l[0]['sid']), l[0]['synset'] ,l[0]['meaning'] , l[0]['pos']] + l[0]['hypernym'] + l[0]['hyponym'] + l[0]['meronym'] + l[0]['antonym'])
f.close()
def scrap_word_syn(word):
url = "http://malayalamwordnet.cusat.ac.in/restapi/json/synset/"+str(word)
response = requests.request("GET", url)
if(response.status_code==200):
print(response.text)
return response.text
else:
return -1
def create_connection(db_file):
try:
conn = sqlite3.connect(db_file)
return conn
except Error as e:
print(e)
return None
def select_all_tasks(conn):
cur = conn.cursor()
cur.execute("SELECT word FROM words_ml where _id;")
rows = cur.fetchall()
return rows
conn=create_connection("enml.db")
if(conn):
rows=select_all_tasks(conn)
fin=[]
while(True):
k=scrap_word_syn(i)
i+=1
if(k!=-1 and len(k)>0):
add_to_csv(k)
tot+=1
print(tot)
| true
| true
|
1c3ff4ce33f9efe7358a1f50a8174fa7eefaac36
| 8,417
|
py
|
Python
|
pointmvsnet/networks.py
|
MarcWong/PointMVSNet
|
b48f20f3695eb4418f522daedb60e7329eebf05f
|
[
"MIT"
] | 419
|
2019-08-13T06:03:32.000Z
|
2022-03-29T06:26:08.000Z
|
pointmvsnet/networks.py
|
MarcWong/PointMVSNet
|
b48f20f3695eb4418f522daedb60e7329eebf05f
|
[
"MIT"
] | 31
|
2019-08-20T19:25:14.000Z
|
2022-03-30T05:14:39.000Z
|
pointmvsnet/networks.py
|
MarcWong/PointMVSNet
|
b48f20f3695eb4418f522daedb60e7329eebf05f
|
[
"MIT"
] | 99
|
2019-08-13T08:52:46.000Z
|
2022-03-20T08:30:22.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from pointmvsnet.functions.gather_knn import gather_knn
from pointmvsnet.nn.conv import *
class EdgeConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(EdgeConv, self).__init__()
self.conv1 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
self.conv2 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
self.bn = nn.BatchNorm2d(2 * out_channels)
def forward(self, feature, knn_inds):
batch_size, _, num_points = feature.shape
k = knn_inds.shape[2]
local_feature = self.conv1(feature) # (batch_size, out_channels, num_points)
edge_feature = self.conv2(feature) # (batch_size, out_channels, num_points)
channels = local_feature.shape[1]
if feature.is_cuda:
# custom improved gather
neighbour_feature = gather_knn(edge_feature, knn_inds)
else:
# pytorch gather
knn_inds_expand = knn_inds.unsqueeze(1).expand(batch_size, channels, num_points, k)
edge_feature_expand = local_feature.unsqueeze(2).expand(batch_size, -1, num_points, num_points)
neighbour_feature = torch.gather(edge_feature_expand, 3, knn_inds_expand)
# (batch_size, out_channels, num_points, k)
central_feature = local_feature.unsqueeze(-1).expand(-1, -1, -1, k)
edge_feature = torch.cat([central_feature, neighbour_feature - central_feature], dim=1)
edge_feature = self.bn(edge_feature)
edge_feature = F.relu(edge_feature, inplace=True)
edge_feature = torch.mean(edge_feature, dim=3)
return edge_feature
class EdgeConvNoC(nn.Module):
def __init__(self, in_channels, out_channels):
super(EdgeConvNoC, self).__init__()
self.conv1 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
self.conv2 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, feature, knn_inds):
batch_size, _, num_points = feature.shape
k = knn_inds.shape[2]
local_feature = self.conv1(feature) # (batch_size, out_channels, num_points)
edge_feature = self.conv2(feature) # (batch_size, out_channels, num_points)
channels = local_feature.shape[1]
if feature.is_cuda:
# custom improved gather
neighbour_feature = gather_knn(edge_feature, knn_inds)
else:
# pytorch gather
knn_inds_expand = knn_inds.unsqueeze(1).expand(batch_size, channels, num_points, k)
edge_feature_expand = edge_feature.unsqueeze(2).expand(batch_size, -1, num_points, num_points)
neighbour_feature = torch.gather(edge_feature_expand, 3, knn_inds_expand)
# (batch_size, out_channels, num_points, k)
central_feature = local_feature.unsqueeze(-1).expand(-1, -1, -1, k)
edge_feature = neighbour_feature - central_feature
edge_feature = self.bn(edge_feature)
edge_feature = F.relu(edge_feature, inplace=True)
edge_feature = torch.mean(edge_feature, dim=3)
return edge_feature
class ImageConv(nn.Module):
def __init__(self, base_channels):
super(ImageConv, self).__init__()
self.base_channels = base_channels
self.out_channels = 8 * base_channels
self.conv0 = nn.Sequential(
Conv2d(3, base_channels, 3, 1, padding=1),
Conv2d(base_channels, base_channels, 3, 1, padding=1),
)
self.conv1 = nn.Sequential(
Conv2d(base_channels, base_channels * 2, 5, stride=2, padding=2),
Conv2d(base_channels * 2, base_channels * 2, 3, 1, padding=1),
Conv2d(base_channels * 2, base_channels * 2, 3, 1, padding=1),
)
self.conv2 = nn.Sequential(
Conv2d(base_channels * 2, base_channels * 4, 5, stride=2, padding=2),
Conv2d(base_channels * 4, base_channels * 4, 3, 1, padding=1),
Conv2d(base_channels * 4, base_channels * 4, 3, 1, padding=1),
)
self.conv3 = nn.Sequential(
Conv2d(base_channels * 4, base_channels * 8, 5, stride=2, padding=2),
Conv2d(base_channels * 8, base_channels * 8, 3, 1, padding=1),
nn.Conv2d(base_channels * 8, base_channels * 8, 3, padding=1, bias=False)
)
def forward(self, imgs):
out_dict = {}
conv0 = self.conv0(imgs)
out_dict["conv0"] = conv0
conv1 = self.conv1(conv0)
out_dict["conv1"] = conv1
conv2 = self.conv2(conv1)
out_dict["conv2"] = conv2
conv3 = self.conv3(conv2)
out_dict["conv3"] = conv3
return out_dict
class VolumeConv(nn.Module):
def __init__(self, in_channels, base_channels):
super(VolumeConv, self).__init__()
self.in_channels = in_channels
self.out_channels = base_channels * 8
self.base_channels = base_channels
self.conv1_0 = Conv3d(in_channels, base_channels * 2, 3, stride=2, padding=1)
self.conv2_0 = Conv3d(base_channels * 2, base_channels * 4, 3, stride=2, padding=1)
self.conv3_0 = Conv3d(base_channels * 4, base_channels * 8, 3, stride=2, padding=1)
self.conv0_1 = Conv3d(in_channels, base_channels, 3, 1, padding=1)
self.conv1_1 = Conv3d(base_channels * 2, base_channels * 2, 3, 1, padding=1)
self.conv2_1 = Conv3d(base_channels * 4, base_channels * 4, 3, 1, padding=1)
self.conv3_1 = Conv3d(base_channels * 8, base_channels * 8, 3, 1, padding=1)
self.conv4_0 = Deconv3d(base_channels * 8, base_channels * 4, 3, 2, padding=1, output_padding=1)
self.conv5_0 = Deconv3d(base_channels * 4, base_channels * 2, 3, 2, padding=1, output_padding=1)
self.conv6_0 = Deconv3d(base_channels * 2, base_channels, 3, 2, padding=1, output_padding=1)
self.conv6_2 = nn.Conv3d(base_channels, 1, 3, padding=1, bias=False)
def forward(self, x):
conv0_1 = self.conv0_1(x)
conv1_0 = self.conv1_0(x)
conv2_0 = self.conv2_0(conv1_0)
conv3_0 = self.conv3_0(conv2_0)
conv1_1 = self.conv1_1(conv1_0)
conv2_1 = self.conv2_1(conv2_0)
conv3_1 = self.conv3_1(conv3_0)
conv4_0 = self.conv4_0(conv3_1)
conv5_0 = self.conv5_0(conv4_0 + conv2_1)
conv6_0 = self.conv6_0(conv5_0 + conv1_1)
conv6_2 = self.conv6_2(conv6_0 + conv0_1)
return conv6_2
class MAELoss(nn.Module):
def forward(self, pred_depth_image, gt_depth_image, depth_interval):
"""non zero mean absolute loss for one batch"""
# shape = list(pred_depth_image)
depth_interval = depth_interval.view(-1)
mask_valid = (~torch.eq(gt_depth_image, 0.0)).type(torch.float)
denom = torch.sum(mask_valid, dim=(1, 2, 3)) + 1e-7
masked_abs_error = mask_valid * torch.abs(pred_depth_image - gt_depth_image)
masked_mae = torch.sum(masked_abs_error, dim=(1, 2, 3))
masked_mae = torch.sum((masked_mae / depth_interval) / denom)
return masked_mae
class Valid_MAELoss(nn.Module):
def __init__(self, valid_threshold=2.0):
super(Valid_MAELoss, self).__init__()
self.valid_threshold = valid_threshold
def forward(self, pred_depth_image, gt_depth_image, depth_interval, before_depth_image):
"""non zero mean absolute loss for one batch"""
# shape = list(pred_depth_image)
pred_height = pred_depth_image.size(2)
pred_width = pred_depth_image.size(3)
depth_interval = depth_interval.view(-1)
mask_true = (~torch.eq(gt_depth_image, 0.0)).type(torch.float)
before_hight = before_depth_image.size(2)
if before_hight != pred_height:
before_depth_image = F.interpolate(before_depth_image, (pred_height, pred_width))
diff = torch.abs(gt_depth_image - before_depth_image) / depth_interval.view(-1, 1, 1, 1)
mask_valid = (diff < self.valid_threshold).type(torch.float)
mask_valid = mask_true * mask_valid
denom = torch.sum(mask_valid, dim=(1, 2, 3)) + 1e-7
masked_abs_error = mask_valid * torch.abs(pred_depth_image - gt_depth_image)
masked_mae = torch.sum(masked_abs_error, dim=(1, 2, 3))
masked_mae = torch.sum((masked_mae / depth_interval) / denom)
return masked_mae
| 40.466346
| 107
| 0.653439
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from pointmvsnet.functions.gather_knn import gather_knn
from pointmvsnet.nn.conv import *
class EdgeConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(EdgeConv, self).__init__()
self.conv1 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
self.conv2 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
self.bn = nn.BatchNorm2d(2 * out_channels)
def forward(self, feature, knn_inds):
batch_size, _, num_points = feature.shape
k = knn_inds.shape[2]
local_feature = self.conv1(feature)
edge_feature = self.conv2(feature)
channels = local_feature.shape[1]
if feature.is_cuda:
neighbour_feature = gather_knn(edge_feature, knn_inds)
else:
knn_inds_expand = knn_inds.unsqueeze(1).expand(batch_size, channels, num_points, k)
edge_feature_expand = local_feature.unsqueeze(2).expand(batch_size, -1, num_points, num_points)
neighbour_feature = torch.gather(edge_feature_expand, 3, knn_inds_expand)
central_feature = local_feature.unsqueeze(-1).expand(-1, -1, -1, k)
edge_feature = torch.cat([central_feature, neighbour_feature - central_feature], dim=1)
edge_feature = self.bn(edge_feature)
edge_feature = F.relu(edge_feature, inplace=True)
edge_feature = torch.mean(edge_feature, dim=3)
return edge_feature
class EdgeConvNoC(nn.Module):
def __init__(self, in_channels, out_channels):
super(EdgeConvNoC, self).__init__()
self.conv1 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
self.conv2 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, feature, knn_inds):
batch_size, _, num_points = feature.shape
k = knn_inds.shape[2]
local_feature = self.conv1(feature)
edge_feature = self.conv2(feature)
channels = local_feature.shape[1]
if feature.is_cuda:
neighbour_feature = gather_knn(edge_feature, knn_inds)
else:
knn_inds_expand = knn_inds.unsqueeze(1).expand(batch_size, channels, num_points, k)
edge_feature_expand = edge_feature.unsqueeze(2).expand(batch_size, -1, num_points, num_points)
neighbour_feature = torch.gather(edge_feature_expand, 3, knn_inds_expand)
central_feature = local_feature.unsqueeze(-1).expand(-1, -1, -1, k)
edge_feature = neighbour_feature - central_feature
edge_feature = self.bn(edge_feature)
edge_feature = F.relu(edge_feature, inplace=True)
edge_feature = torch.mean(edge_feature, dim=3)
return edge_feature
class ImageConv(nn.Module):
def __init__(self, base_channels):
super(ImageConv, self).__init__()
self.base_channels = base_channels
self.out_channels = 8 * base_channels
self.conv0 = nn.Sequential(
Conv2d(3, base_channels, 3, 1, padding=1),
Conv2d(base_channels, base_channels, 3, 1, padding=1),
)
self.conv1 = nn.Sequential(
Conv2d(base_channels, base_channels * 2, 5, stride=2, padding=2),
Conv2d(base_channels * 2, base_channels * 2, 3, 1, padding=1),
Conv2d(base_channels * 2, base_channels * 2, 3, 1, padding=1),
)
self.conv2 = nn.Sequential(
Conv2d(base_channels * 2, base_channels * 4, 5, stride=2, padding=2),
Conv2d(base_channels * 4, base_channels * 4, 3, 1, padding=1),
Conv2d(base_channels * 4, base_channels * 4, 3, 1, padding=1),
)
self.conv3 = nn.Sequential(
Conv2d(base_channels * 4, base_channels * 8, 5, stride=2, padding=2),
Conv2d(base_channels * 8, base_channels * 8, 3, 1, padding=1),
nn.Conv2d(base_channels * 8, base_channels * 8, 3, padding=1, bias=False)
)
def forward(self, imgs):
out_dict = {}
conv0 = self.conv0(imgs)
out_dict["conv0"] = conv0
conv1 = self.conv1(conv0)
out_dict["conv1"] = conv1
conv2 = self.conv2(conv1)
out_dict["conv2"] = conv2
conv3 = self.conv3(conv2)
out_dict["conv3"] = conv3
return out_dict
class VolumeConv(nn.Module):
def __init__(self, in_channels, base_channels):
super(VolumeConv, self).__init__()
self.in_channels = in_channels
self.out_channels = base_channels * 8
self.base_channels = base_channels
self.conv1_0 = Conv3d(in_channels, base_channels * 2, 3, stride=2, padding=1)
self.conv2_0 = Conv3d(base_channels * 2, base_channels * 4, 3, stride=2, padding=1)
self.conv3_0 = Conv3d(base_channels * 4, base_channels * 8, 3, stride=2, padding=1)
self.conv0_1 = Conv3d(in_channels, base_channels, 3, 1, padding=1)
self.conv1_1 = Conv3d(base_channels * 2, base_channels * 2, 3, 1, padding=1)
self.conv2_1 = Conv3d(base_channels * 4, base_channels * 4, 3, 1, padding=1)
self.conv3_1 = Conv3d(base_channels * 8, base_channels * 8, 3, 1, padding=1)
self.conv4_0 = Deconv3d(base_channels * 8, base_channels * 4, 3, 2, padding=1, output_padding=1)
self.conv5_0 = Deconv3d(base_channels * 4, base_channels * 2, 3, 2, padding=1, output_padding=1)
self.conv6_0 = Deconv3d(base_channels * 2, base_channels, 3, 2, padding=1, output_padding=1)
self.conv6_2 = nn.Conv3d(base_channels, 1, 3, padding=1, bias=False)
def forward(self, x):
conv0_1 = self.conv0_1(x)
conv1_0 = self.conv1_0(x)
conv2_0 = self.conv2_0(conv1_0)
conv3_0 = self.conv3_0(conv2_0)
conv1_1 = self.conv1_1(conv1_0)
conv2_1 = self.conv2_1(conv2_0)
conv3_1 = self.conv3_1(conv3_0)
conv4_0 = self.conv4_0(conv3_1)
conv5_0 = self.conv5_0(conv4_0 + conv2_1)
conv6_0 = self.conv6_0(conv5_0 + conv1_1)
conv6_2 = self.conv6_2(conv6_0 + conv0_1)
return conv6_2
class MAELoss(nn.Module):
def forward(self, pred_depth_image, gt_depth_image, depth_interval):
depth_interval = depth_interval.view(-1)
mask_valid = (~torch.eq(gt_depth_image, 0.0)).type(torch.float)
denom = torch.sum(mask_valid, dim=(1, 2, 3)) + 1e-7
masked_abs_error = mask_valid * torch.abs(pred_depth_image - gt_depth_image)
masked_mae = torch.sum(masked_abs_error, dim=(1, 2, 3))
masked_mae = torch.sum((masked_mae / depth_interval) / denom)
return masked_mae
class Valid_MAELoss(nn.Module):
def __init__(self, valid_threshold=2.0):
super(Valid_MAELoss, self).__init__()
self.valid_threshold = valid_threshold
def forward(self, pred_depth_image, gt_depth_image, depth_interval, before_depth_image):
pred_height = pred_depth_image.size(2)
pred_width = pred_depth_image.size(3)
depth_interval = depth_interval.view(-1)
mask_true = (~torch.eq(gt_depth_image, 0.0)).type(torch.float)
before_hight = before_depth_image.size(2)
if before_hight != pred_height:
before_depth_image = F.interpolate(before_depth_image, (pred_height, pred_width))
diff = torch.abs(gt_depth_image - before_depth_image) / depth_interval.view(-1, 1, 1, 1)
mask_valid = (diff < self.valid_threshold).type(torch.float)
mask_valid = mask_true * mask_valid
denom = torch.sum(mask_valid, dim=(1, 2, 3)) + 1e-7
masked_abs_error = mask_valid * torch.abs(pred_depth_image - gt_depth_image)
masked_mae = torch.sum(masked_abs_error, dim=(1, 2, 3))
masked_mae = torch.sum((masked_mae / depth_interval) / denom)
return masked_mae
| true
| true
|
1c3ff586b6fbef23eed4818abfd348673ee07652
| 2,762
|
py
|
Python
|
chars/hud_scripts/HUDTester.py
|
camsdu59/Zelda_BlenderGame
|
0f5d5d15bfa79e9f8ea15f0ebcb76bce92f77a21
|
[
"FSFAP"
] | 27
|
2016-01-13T14:16:13.000Z
|
2022-01-03T05:38:44.000Z
|
chars/hud_scripts/HUDTester.py
|
camsdu59/Zelda_BlenderGame
|
0f5d5d15bfa79e9f8ea15f0ebcb76bce92f77a21
|
[
"FSFAP"
] | 1
|
2017-04-29T00:51:26.000Z
|
2017-04-29T00:54:43.000Z
|
chars/hud_scripts/HUDTester.py
|
camsdu59/Zelda_BlenderGame
|
0f5d5d15bfa79e9f8ea15f0ebcb76bce92f77a21
|
[
"FSFAP"
] | 14
|
2016-01-20T21:02:37.000Z
|
2020-07-19T05:47:20.000Z
|
from bge import logic
from link_scripts.Gamepad import Gamepad
from hud_scripts.MessageBox import MessageBoxMode
from link_scripts.GameInit import initGame
from link_scripts.PlayerInventory import *
scene = logic.getCurrentScene()
def test(cont):
own = cont.owner
inventory = scene.objects['Inventory']
if not 'init' in own:
# init fake Player
logic.globalDict['Player'] = {}
# init fake heart
logic.globalDict['Player']['heartContainer'] = {'heart' : 5, 'maxHeart' : 5}
# init fake rupee
logic.globalDict['Player']['rupeeContainer'] = {'rupee' : 5, 'maxRupee' : 99}
logic.globalDict['Player']['Gamepad'] = Gamepad()
# Init game
initGame()
logic.inventory = PlayerInventory(None)
logic.globalDict['Player']['Inventory']['Equipement']['Swords']['basic_sword']['have'] = True
logic.globalDict['Player']['Inventory']['Equipement']['Swords']['hero_sword']['have'] = True
logic.globalDict['Player']['Inventory']['Equipement']['Shields']['wood_shield']['have'] = True
# Real test
from hud_scripts.HUD import PlayerHUD
from hud_scripts.MessageBox import MessageBox
from hud_scripts.Inventory import Inventory
# Instance
msgBox = MessageBox(scene.objects['MessageBox'])
own = PlayerHUD(own, msgBox)
# Inventory
inventory = Inventory(scene.objects['Inventory'])
#msgBox.setText("Quisque maximus odio nec est efficitur, sit amet feugiat dui aliquam. Praesent dapibus, sem sed auctor venenatis, lorem justo maximus risus, eget dignissim dolor elit id nibh! Curabitur nec interdum orci. Sed ut turpis sagittis, semper orci sed, ullamcorper purus. ")
# Update
own.updateRupee()
own.updateHeart()
own.setMiniMap("dungeon1_enter.png")
# init
own['init'] = True
# active it
#cont.activate('mainState')
else:
# update contro ltest
gamepad = logic.globalDict['Player']['Gamepad']
if (gamepad.isAttackPressed() and own.messageBox.active == False):
own.messageBox.displayText("Quisque maximus odio nec est efficitur, sit amet feugiat dui aliquam. Praesent dapibus, sem sed auctor venenatis, lorem justo maximus risus, eget dignissim dolor elit id nibh! Curabitur nec interdum orci. Sed ut turpis sagittis, semper orci sed, ullamcorper purus. ",
MessageBoxMode.WAIT_INPUT_TYPE)
if gamepad.isPausePressed() and not inventory.active:
own.displayInventory()
elif inventory.active and gamepad.isPausePressed():
own.closeInventory()
# update
own.main()
# update inventory
inventory.main()
| 38.901408
| 307
| 0.658219
|
from bge import logic
from link_scripts.Gamepad import Gamepad
from hud_scripts.MessageBox import MessageBoxMode
from link_scripts.GameInit import initGame
from link_scripts.PlayerInventory import *
scene = logic.getCurrentScene()
def test(cont):
own = cont.owner
inventory = scene.objects['Inventory']
if not 'init' in own:
logic.globalDict['Player'] = {}
logic.globalDict['Player']['heartContainer'] = {'heart' : 5, 'maxHeart' : 5}
logic.globalDict['Player']['rupeeContainer'] = {'rupee' : 5, 'maxRupee' : 99}
logic.globalDict['Player']['Gamepad'] = Gamepad()
initGame()
logic.inventory = PlayerInventory(None)
logic.globalDict['Player']['Inventory']['Equipement']['Swords']['basic_sword']['have'] = True
logic.globalDict['Player']['Inventory']['Equipement']['Swords']['hero_sword']['have'] = True
logic.globalDict['Player']['Inventory']['Equipement']['Shields']['wood_shield']['have'] = True
from hud_scripts.HUD import PlayerHUD
from hud_scripts.MessageBox import MessageBox
from hud_scripts.Inventory import Inventory
msgBox = MessageBox(scene.objects['MessageBox'])
own = PlayerHUD(own, msgBox)
inventory = Inventory(scene.objects['Inventory'])
own.updateRupee()
own.updateHeart()
own.setMiniMap("dungeon1_enter.png")
own['init'] = True
else:
gamepad = logic.globalDict['Player']['Gamepad']
if (gamepad.isAttackPressed() and own.messageBox.active == False):
own.messageBox.displayText("Quisque maximus odio nec est efficitur, sit amet feugiat dui aliquam. Praesent dapibus, sem sed auctor venenatis, lorem justo maximus risus, eget dignissim dolor elit id nibh! Curabitur nec interdum orci. Sed ut turpis sagittis, semper orci sed, ullamcorper purus. ",
MessageBoxMode.WAIT_INPUT_TYPE)
if gamepad.isPausePressed() and not inventory.active:
own.displayInventory()
elif inventory.active and gamepad.isPausePressed():
own.closeInventory()
own.main()
inventory.main()
| true
| true
|
1c3ff62f20f5327454cb556b874ff0cd2bb8a5d3
| 3,602
|
py
|
Python
|
build.py
|
sfaleron/march-for-science-2017-sign
|
9af7d41a734df1ca8f225f2fbea652169e323185
|
[
"Apache-2.0"
] | null | null | null |
build.py
|
sfaleron/march-for-science-2017-sign
|
9af7d41a734df1ca8f225f2fbea652169e323185
|
[
"Apache-2.0"
] | null | null | null |
build.py
|
sfaleron/march-for-science-2017-sign
|
9af7d41a734df1ca8f225f2fbea652169e323185
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
### Copyright 2017 Christopher Fuller
###
### Licensed under the Apache License, Version 2.0 (the "License");
### you may not use this file except in compliance with the License.
### You may obtain a copy of the License at
###
### http://www.apache.org/licenses/LICENSE-2.0
###
### Unless required by applicable law or agreed to in writing, software
### distributed under the License is distributed on an "AS IS" BASIS,
### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
### See the License for the specific language governing permissions and
### limitations under the License.
############################################################################
############################################################################
# sorry, just not a fan of Bash/Makefile programming
# also, not tested in Python v3, but that'll get fixed soon
# expects to be run from project's root
# decent command line validation, but could still blow up messily
# if the input SVG isn't Just So.
SVG = 'src/sign.svg'
import sys
from string import Template
import xml.etree.ElementTree as ET
TMPLS = {
'outer' : [ Template(s) if s else None for s in (
'inkscape -Cj -i $id -y 255 --export-pdf=tmp/${name}1.pdf $svg',
'mutool poster -x 2 -y 2 tmp/${name}1.pdf tmp/tiles.pdf',
'pdftk tmp/tiles.pdf burst output tmp/tile%d1.pdf',
'pdftk tmp/tile21.pdf background src/copyleft.pdf output tmp/tile22.pdf',
'',
'pdftk tmp/tile14.pdf tmp/tile25.pdf tmp/tile34.pdf tmp/tile44.pdf cat output output/${name}.pdf',
'pdfnup --nup 2x2 --outfile tmp/${name}2.pdf tmp/tile11.pdf tmp/tile22.pdf tmp/tile31.pdf tmp/tile41.pdf',
'convert tmp/${name}2.pdf -background white -alpha remove -geometry 440x340 output/${name}.png',
'zip -jn .png output/sign.zip output/${name}.pdf output/${name}.png src/assembly.pdf',
) ],
'inner' : [ Template(s) for s in (
'pdftk tmp/tile${i}${j}.pdf rotate 1east output tmp/tile${i}${k}.pdf',
'pdf2ps tmp/tile${i}${k}.pdf tmp/tile${i}${k}.ps',
'pstops -p letter "@0.9(0.425in,0.55in)" tmp/tile${i}${k}.ps tmp/tile${i}${m}.ps',
'ps2pdf tmp/tile${i}${m}.ps tmp/tile${i}${m}.pdf',
'pdftk tmp/tile${i}${m}.pdf rotate 1west output tmp/tile${i}${n}.pdf'
) ],
'inner_why_not_this_work' : [ Template(s) for s in (
'./pdfScale.sh -s 0.9 tmp/tile${i}${j}.pdf tmp/tile${i}${k}.pdf',
) ],
}
def nonsplz(s):
t = ''
for c in s[::-1]:
if c == '}':
break
else:
t = c+t
return t
if __name__ == '__main__':
argv = sys.argv[1:]
sides_in = argv[:2]
sides = []
while sides_in:
if sides_in[-1] in ('front', 'back'):
sides.append(sides_in[-1])
sides_in.pop()
if len(sides) == 2 and sides[0] == sides[1]:
sides.pop()
if not sides:
sides = ('front', 'back')
tree = ET.parse(SVG)
root = tree.getroot()
IDs = [ e.attrib['id'] for e in root.iter() if nonsplz(e.tag) == 'g' ]
layers = dict(list(zip(('back', 'front'), IDs)))
kwargs = dict(svg=SVG)
print('mkdir -p tmp')
for name in sides:
kwargs['name'] = name
kwargs[ 'id'] = layers[name]
for tplo in TMPLS['outer']:
if tplo:
print(tplo.substitute(**kwargs))
else:
for i,j,k,m,n in zip(
(1,2,3,4), (1,2,1,1), (2,3,2,2), (3,4,3,3), (4,5,4,4) ):
for tpli in TMPLS['inner']:
print(tpli.substitute(i=i, j=j, k=k, m=m, n=n))
print('rm -f tmp/*')
| 30.525424
| 112
| 0.574958
|
from __future__ import print_function
| true
| true
|
1c3ff6645dcf5bed9eac830cbeb144b7de90e41f
| 2,667
|
py
|
Python
|
slap.py
|
LokiL/troutslapbot
|
ddc4b9e2e3fb37f09882ee197dadd173dcccadfb
|
[
"MIT"
] | null | null | null |
slap.py
|
LokiL/troutslapbot
|
ddc4b9e2e3fb37f09882ee197dadd173dcccadfb
|
[
"MIT"
] | null | null | null |
slap.py
|
LokiL/troutslapbot
|
ddc4b9e2e3fb37f09882ee197dadd173dcccadfb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import time
import requests
import sys
if (len(sys.argv) < 2):
print("Usage:")
print(" slap.py <bot_token>")
exit(1)
botapi_url = 'https://api.telegram.org/bot'
token = sys.argv[1]
endpoint = botapi_url + token
offset = 0
print(time.ctime(), ': bot started')
while(True):
try:
method = 'getUpdates'
request = endpoint + '/' + method
query = {'offset': offset}
response = requests.get(request, params=query)
json = response.json()
if (json['result']):
result = json['result']
for update in result:
if 'message' in update:
message = update['message']
if 'text' in message:
text = message['text']
spl = text.split(' ')
chat_id = message['chat']['id']
command = spl[0]
msg_text = ''
if (command[:6] == '/start'):
msg_text = 'Slap someone with a wet trout using /slap username'
elif (command[:5] == '/slap'):
user_from = ''
if 'username' in message['from']:
user_from = '@' + message['from']['username']
else:
user_from = message['from']['first_name']
msg_text += user_from + ' slaps '
if (len(spl) == 1):
msg_text += 'himself'
else:
user_slap = spl[1]
if user_slap[0] != '@':
msg_text += '@'
msg_text += user_slap
msg_text += ' around a bit with a large trout'
else:
continue
method_resp = 'sendMessage'
query_resp = {'chat_id': chat_id, 'text': msg_text}
requests.get(endpoint + '/' + method_resp, params=query_resp)
# move offset
offset = int(update['update_id']) + 1
# print json
time.sleep(1)
except ValueError:
print(time.ctime(), ": Broken response: ", response)
time.sleep(60)
except KeyboardInterrupt:
print(time.ctime(), ": Ctrl-C pressed - exiting")
exit(1)
except:
print(time.ctime(), ": Unexpected error", sys.exc_info()[0])
time.sleep(300)
| 38.1
| 91
| 0.425197
|
import time
import requests
import sys
if (len(sys.argv) < 2):
print("Usage:")
print(" slap.py <bot_token>")
exit(1)
botapi_url = 'https://api.telegram.org/bot'
token = sys.argv[1]
endpoint = botapi_url + token
offset = 0
print(time.ctime(), ': bot started')
while(True):
try:
method = 'getUpdates'
request = endpoint + '/' + method
query = {'offset': offset}
response = requests.get(request, params=query)
json = response.json()
if (json['result']):
result = json['result']
for update in result:
if 'message' in update:
message = update['message']
if 'text' in message:
text = message['text']
spl = text.split(' ')
chat_id = message['chat']['id']
command = spl[0]
msg_text = ''
if (command[:6] == '/start'):
msg_text = 'Slap someone with a wet trout using /slap username'
elif (command[:5] == '/slap'):
user_from = ''
if 'username' in message['from']:
user_from = '@' + message['from']['username']
else:
user_from = message['from']['first_name']
msg_text += user_from + ' slaps '
if (len(spl) == 1):
msg_text += 'himself'
else:
user_slap = spl[1]
if user_slap[0] != '@':
msg_text += '@'
msg_text += user_slap
msg_text += ' around a bit with a large trout'
else:
continue
method_resp = 'sendMessage'
query_resp = {'chat_id': chat_id, 'text': msg_text}
requests.get(endpoint + '/' + method_resp, params=query_resp)
offset = int(update['update_id']) + 1
time.sleep(1)
except ValueError:
print(time.ctime(), ": Broken response: ", response)
time.sleep(60)
except KeyboardInterrupt:
print(time.ctime(), ": Ctrl-C pressed - exiting")
exit(1)
except:
print(time.ctime(), ": Unexpected error", sys.exc_info()[0])
time.sleep(300)
| true
| true
|
1c3ff6a03a9eb1d011ed4514c214a6fdf8f75813
| 23,810
|
py
|
Python
|
src/UIOverlay.py
|
alexberryman/poe-archnemesis-scanner
|
0f274748cbde9d3ebe2e15af59e58c7bf2aa239c
|
[
"Apache-2.0"
] | null | null | null |
src/UIOverlay.py
|
alexberryman/poe-archnemesis-scanner
|
0f274748cbde9d3ebe2e15af59e58c7bf2aa239c
|
[
"Apache-2.0"
] | null | null | null |
src/UIOverlay.py
|
alexberryman/poe-archnemesis-scanner
|
0f274748cbde9d3ebe2e15af59e58c7bf2aa239c
|
[
"Apache-2.0"
] | null | null | null |
from configparser import ConfigParser
import keyboard
import tkinter as tk
import sys
from win32clipboard import OpenClipboard, EmptyClipboard, SetClipboardText, CloseClipboard
from typing import Dict, List, Tuple
from DataClasses import PoeWindowInfo
from ArchnemesisItemsMap import ArchnemesisItemsMap
from ImageScanner import ImageScanner
from DataClasses import RecipeItemNode
from RecipeShopper import RecipeShopper
from constants import COLOR_BG, COLOR_FG_GREEN, COLOR_FG_LIGHT_GREEN, COLOR_FG_ORANGE, COLOR_FG_WHITE, FONT_BIG, FONT_SMALL
class UIOverlay:
"""
Overlay window using tkinter '-topmost' property
"""
def __init__(self, root: tk.Tk, info: PoeWindowInfo, items_map: ArchnemesisItemsMap, image_scanner: ImageScanner, recipe_shopper: RecipeShopper):
self._window_info = info
self._items_map = items_map
self._image_scanner = image_scanner
self._root = root
self._recipe_shopper = recipe_shopper
self._scan_results_window = None
self._recipe_browser_window = None
self._recipe_browser_current_root = ''
self._tooltip_window = None
self._highlight_windows_to_show = list()
self._scan_results_window_saved_position = (-1, 0)
self._settings = Settings(root, items_map, image_scanner, on_scan_hotkey=self._hotkey_pressed)
self._create_controls()
self._root.configure(bg='')
self._root.geometry(f'+{info.x + 5}+{info.y + info.title_bar_height + 5}')
if self._settings.should_run_as_overlay():
self._root.overrideredirect(True)
self._root.wm_attributes('-topmost', True)
self._root.deiconify()
@staticmethod
def create_toplevel_window(bg=''):
w = tk.Toplevel()
w.configure(bg=bg)
# Hide window outline/controls
w.overrideredirect(True)
# Make sure the window is always on top
w.wm_attributes('-topmost', True)
return w
def _hotkey_pressed(self) -> None:
if self._scan_label_text.get() == 'Scan':
self._scan(None)
elif self._scan_label_text.get() == 'Hide':
self._hide(None)
def _create_controls(self) -> None:
l = tk.Button(self._root, text='[X]', fg=COLOR_FG_GREEN, bg=COLOR_BG, font=FONT_SMALL)
l.bind('<Button-1>', sys.exit)
l.bind('<B3-Motion>', lambda event: self._drag(self._root, -5, -5, event))
l.grid(row=0, column=0)
settings = tk.Button(self._root, text='Settings', fg=COLOR_FG_GREEN, bg=COLOR_BG, font=FONT_SMALL)
settings.bind('<Button-1>', lambda _: self._settings.show())
settings.bind('<B3-Motion>', lambda event: self._drag(self._root, -5, -5, event))
settings.grid(row=0, column=1)
self._scan_label_text = tk.StringVar(self._root, value='Scan')
self._scan_label = tk.Button(self._root, textvariable=self._scan_label_text, fg=COLOR_FG_GREEN, bg=COLOR_BG, font=FONT_SMALL)
self._scan_label.bind("<Button-1>", self._scan)
self._scan_label.bind('<B3-Motion>', lambda event: self._drag(self._root, -5, -5, event))
self._scan_label.grid(row=0, column=2)
def _drag(self, window, offset_x: int, offset_y: int, event) -> Tuple[int, int]:
x = offset_x + event.x + window.winfo_x()
y = offset_y + event.y + window.winfo_y()
window.geometry(f'+{x}+{y}')
return (x, y)
def _scan(self, _) -> None:
self._scan_label_text.set('Scanning...')
self._root.update()
results = self._image_scanner.scan()
shopping_list_mode = self._settings.is_shopping_list_mode() is True
desired_items = [x for x in self._settings.get_shopping_list().split(",") if x]
shopping_list = self._recipe_shopper.get_missing_items(desired_items, results)
print("Missing Items:", shopping_list)
main_recipe_list = self._items_map.recipes()
if shopping_list_mode:
recipe_list = [x for x in self._items_map.recipes() if x[0] in self._recipe_shopper._get_full_shopping_list(desired_items)]
else:
recipe_list = main_recipe_list
if len(results) > 0:
recipes = list()
for item, recipe in recipe_list:
screen_items = [results.get(x) for x in recipe]
if (all(screen_items) or self._settings.should_display_unavailable_recipes()):
recipes.append((item, [x[0] for x in screen_items if x is not None], item in results, all(screen_items)))
if shopping_list_mode:
trash_inventory = self._recipe_shopper.get_trash_inventory(desired_items, results)
trash_recipe_items = [None] * min(4, len(trash_inventory.keys()))
trash_recipe_items = [trash_inventory[list(trash_inventory.keys())[i]][0] for i,x in enumerate(trash_recipe_items)]
trash_recipe = ('Trash', trash_recipe_items, False, True)
recipes.append(trash_recipe)
self._show_scan_results(results, recipes)
self._scan_label_text.set('Hide')
self._scan_label.bind('<Button-1>', self._hide)
else:
self._hide(None)
def _hide(self, _) -> None:
if self._scan_results_window is not None:
self._scan_results_window.destroy()
if self._recipe_browser_window is not None:
self._recipe_browser_window.destroy()
if self._tooltip_window is not None:
self._tooltip_window.destroy()
self._clear_highlights(None)
self._scan_label_text.set('Scan')
self._scan_label.bind('<Button-1>', self._scan)
def _show_scan_results(self, results: Dict[str, List[Tuple[int, int]]], recipes: List[Tuple[str, List[Tuple[int, int]], bool, bool]]) -> None:
self._scan_results_window = UIOverlay.create_toplevel_window()
x, y = self._scan_results_window_saved_position
if x == -1:
x = self._window_info.x + int(self._window_info.client_width / 3)
y = self._window_info.y + self._window_info.title_bar_height
self._scan_results_window.geometry(f'+{x}+{y}')
last_column = 0
if self._settings.should_display_inventory_items():
last_column = self._show_inventory_list(results)
self._show_recipes_list(results, recipes, last_column + 2)
def _show_inventory_list(self, results: Dict[str, List[Tuple[int, int]]]) -> int:
row = 0
column = 0
for item in self._items_map.items():
inventory_items = results.get(item)
if inventory_items is not None:
row, column = self._show_image_and_label(item, results, inventory_items, COLOR_FG_WHITE, f'x{len(inventory_items)} {item}', True, row, column)
return column
def _show_recipes_list(self, results: Dict[str, List[Tuple[int, int]]], recipes: List[Tuple[str, List[Tuple[int, int]], bool, bool]], column: int) -> None:
row = 0
for item, inventory_items, exists_in_inventory, available in recipes:
if exists_in_inventory:
if available:
fg = COLOR_FG_GREEN
else:
fg = COLOR_FG_LIGHT_GREEN
else:
if available:
fg = COLOR_FG_ORANGE
else:
fg = COLOR_FG_WHITE
row, column = self._show_image_and_label(item, results, inventory_items, fg, item, available, row, column)
def _show_image_and_label(self, item: str, results: Dict[str, List[Tuple[int, int]]], inventory_items: Tuple[int, int], highlight_color: str, label_text: str, highlight, row: int, column: int) -> Tuple[int, int]:
image = tk.Label(self._scan_results_window, image=self._items_map.get_display_small_image(item), bg=COLOR_BG, pady=5)
if highlight:
image.bind('<Enter>', lambda _, arg=inventory_items, color=highlight_color: self._highlight_items_in_inventory(arg, color))
image.bind('<Leave>', self._clear_highlights)
image.bind('<Button-1>', lambda _, arg1=item, arg2=results: self._show_recipe_browser_tree(arg1, arg2))
image.bind('<B3-Motion>', self._scan_results_window_drag_and_save)
image.grid(row=row, column=column)
tk.Label(self._scan_results_window, text=label_text, font=FONT_BIG, fg=highlight_color, bg=COLOR_BG).grid(row=row, column=column + 1, sticky='w', padx=5)
row += 1
if row % 10 == 0:
column += 2
row = 0
return (row, column)
def _scan_results_window_drag_and_save(self, event) -> None:
self._scan_results_window_saved_position = self._drag(self._scan_results_window, -5, -5, event)
def _show_recipe_browser_tree(self, item: str, results: Dict[str, List[Tuple[int, int]]]) -> None:
if self._recipe_browser_window is not None:
self._recipe_browser_window.destroy()
self._destroy_tooltip_and_clear_highlights(None)
# If the user clicks on the current root then close the tree
if self._recipe_browser_current_root == item:
return
self._recipe_browser_current_root = item
self._recipe_browser_window = UIOverlay.create_toplevel_window()
self._recipe_browser_window.geometry(f'+{self._scan_results_window.winfo_x()}+{self._scan_results_window.winfo_y() + self._scan_results_window.winfo_height() + 40}')
tree = self._items_map.get_subtree_for(item)
if self._settings.should_copy_recipe_to_clipboard():
self._copy_tree_items_to_clipboard(tree)
def draw_tree(node, row, column):
children_column = column
for c in node.components:
children_column = draw_tree(c, row + 2, children_column)
columnspan = max(1, children_column - column)
if node.item in results:
bg = COLOR_FG_GREEN
else:
bg = COLOR_BG
l = tk.Label(self._recipe_browser_window, image=self._items_map.get_display_small_image(node.item), bg=bg, relief=tk.SUNKEN)
l.bind('<Button-1>', lambda _, arg1=node.item, arg2=results: self._show_recipe_browser_tree(arg1, arg2))
l.bind('<B3-Motion>', lambda event: self._drag(self._recipe_browser_window, -5, -5, event))
l.bind('<Enter>', lambda _, arg1=self._recipe_browser_window, arg2=results.get(node.item), arg3=node.item: self._create_tooltip_and_highlight(arg1, arg2, arg3))
l.bind('<Leave>', self._destroy_tooltip_and_clear_highlights)
l.grid(row=row, column=column, columnspan=columnspan)
if len(node.components) > 0:
f = tk.Frame(self._recipe_browser_window, bg=COLOR_BG, width=(self._items_map.small_image_size + 4) * columnspan, height=3)
f.grid(row=row + 1, column=column, columnspan=columnspan)
return children_column + 1
total_columns = draw_tree(tree, 1, 0)
for c in range(total_columns):
self._recipe_browser_window.grid_columnconfigure(c, minsize=self._items_map.small_image_size)
# Show parents on row 0
parents = [RecipeItemNode(p, []) for p in self._items_map.get_parent_recipes_for(item)]
if len(parents) > 0:
tk.Label(self._recipe_browser_window, text='Used in:', bg=COLOR_BG, fg=COLOR_FG_GREEN, font=FONT_BIG).grid(row=0, column=0)
for column, p in enumerate(parents):
# Reuse the same function for convenience
draw_tree(p, 0, column + 1)
def _highlight_items_in_inventory(self, inventory_items: List[Tuple[int, int]], color: str) -> None:
self._highlight_windows_to_show = list()
for (x, y) in inventory_items:
x_offset, y_offset, _, _ = self._image_scanner.scanner_window_size
x += x_offset
y += y_offset
width = int(self._items_map.image_size[0] * 0.7)
height = int(self._items_map.image_size[1] * 0.7)
w = UIOverlay.create_toplevel_window(bg=color)
w.geometry(f'{width}x{height}+{x}+{y}')
self._highlight_windows_to_show.append(w)
def _clear_highlights(self, _) -> None:
for w in self._highlight_windows_to_show:
w.destroy()
def _create_tooltip_and_highlight(self, window, inventory_items, text) -> None:
if self._tooltip_window is not None:
self._tooltip_window.destroy()
self._tooltip_window = UIOverlay.create_toplevel_window()
self._tooltip_window.geometry(f'+{window.winfo_x()}+{window.winfo_y() - 40}')
tk.Label(self._tooltip_window, text=text, font=FONT_BIG, bg=COLOR_BG, fg=COLOR_FG_GREEN).pack()
if inventory_items is not None:
self._highlight_items_in_inventory(inventory_items, COLOR_FG_GREEN)
def _copy_tree_items_to_clipboard(self, tree):
if len(tree.components) > 0:
search_string = '|'.join((str(x.item) for x in tree.components))
else:
search_string = tree.item
OpenClipboard()
EmptyClipboard()
SetClipboardText('^('+search_string+')')
CloseClipboard()
def _destroy_tooltip_and_clear_highlights(self, _) -> None:
if self._tooltip_window is not None:
self._tooltip_window.destroy()
self._clear_highlights(None)
def run(self) -> None:
self._root.mainloop()
class Settings:
def __init__(self, root: tk.Tk, items_map: ArchnemesisItemsMap, image_scanner, on_scan_hotkey):
self._root = root
self._items_map = items_map
self._image_scanner = image_scanner
self._on_scan_hotkey = on_scan_hotkey
self._window = None
self._config = ConfigParser()
self._config_file = 'settings.ini'
self._config.read(self._config_file)
if 'settings' not in self._config:
self._config.add_section('settings')
s = self._config['settings']
scanner_window_size = s.get('scanner_window')
if scanner_window_size is not None:
self._image_scanner.scanner_window_size = tuple(map(int, scanner_window_size.replace('(', '').replace(')', '').replace(',', '').split()))
self._items_map.scale = float(s.get('image_scale', self._items_map.scale))
self._image_scanner.confidence_threshold = float(s.get('confidence_threshold', self._image_scanner.confidence_threshold))
b = s.get('display_inventory_items')
self._display_inventory_items = True if b is not None and b == 'True' else False
b = s.get('display_unavailable_recipes')
self._display_unavailable_recipes = True if b is not None and b == 'True' else False
b = s.get('copy_recipe_to_clipboard')
self._copy_recipe_to_clipboard = True if b is not None and b == 'True' else False
b = s.get('scan_hotkey')
self._scan_hotkey = b if b is not None else ''
self._set_scan_hotkey()
b = s.get('run_as_overlay')
self._run_as_overlay = True if b is None or b == 'True' else False
b = s.get('shopping_list_mode')
self._shopping_list_mode = False if b is None or b == 'False' else True
b = s.get('shopping_list')
self._shopping_list = '' if b is None else b
def show(self) -> None:
if self._window is not None:
return
self._window = tk.Toplevel()
self._window.geometry('+100+200')
self._window.protocol('WM_DELETE_WINDOW', self._close)
current_scanner_window = f'{self._image_scanner.scanner_window_size}'.replace('(', '').replace(')', '')
v = tk.StringVar(self._window, value=current_scanner_window)
self._scanner_window_entry = tk.Entry(self._window, textvariable=v)
self._scanner_window_entry.grid(row=0, column=0)
tk.Button(self._window, text='Set scanner window', command=self._update_scanner_window).grid(row=0, column=1)
v = tk.DoubleVar(self._window, value=self._items_map.scale)
self._scale_entry = tk.Entry(self._window, textvariable=v)
self._scale_entry.grid(row=1, column=0)
tk.Button(self._window, text='Set image scale', command=self._update_scale).grid(row=1, column=1)
v = tk.DoubleVar(self._window, value=self._image_scanner.confidence_threshold)
self._confidence_threshold_entry = tk.Entry(self._window, textvariable=v)
self._confidence_threshold_entry.grid(row=2, column=0)
tk.Button(self._window, text='Set confidence threshold', command=self._update_confidence_threshold).grid(row=2, column=1)
v = tk.StringVar(self._window, value=self._scan_hotkey)
self._scan_hotkey_entry = tk.Entry(self._window, textvariable=v)
self._scan_hotkey_entry.grid(row=3, column=0)
tk.Button(self._window, text='Set scan/hide hotkey', command=self._update_scan_hotkey).grid(row=3, column=1)
c = tk.Checkbutton(self._window, text='Display inventory items', command=self._update_display_inventory_items)
c.grid(row=4, column=0, columnspan=2)
if self._display_inventory_items:
c.select()
c = tk.Checkbutton(self._window, text='Display unavailable recipes', command=self._update_display_unavailable_recipes)
c.grid(row=5, column=0, columnspan=2)
if self._display_unavailable_recipes:
c.select()
c = tk.Checkbutton(self._window, text='Copy recipe to clipboard', command=self._update_copy_recipe_to_clipboard)
c.grid(row=6, column=0, columnspan=2)
if self._copy_recipe_to_clipboard:
c.select()
c = tk.Checkbutton(self._window, text='Run as overlay', command=self._update_run_as_overlay)
c.grid(row=7, column=0, columnspan=2)
if self._run_as_overlay:
c.select()
c = tk.Checkbutton(self._window, text='Shopping List Mode', command=self._update_shopping_list_mode)
c.grid(row=8, column=0, columnspan=2)
if self._shopping_list_mode:
c.select()
self._shopping_list_label = tk.StringVar()
self._shopping_list_label.set("Enter a comma separated list of items")
c = tk.Label(self._window, textvariable=self._shopping_list_label).grid(row=9, column=0, columnspan=2)
v = tk.StringVar(self._window, value=self._shopping_list)
self._shopping_list_entry = tk.Entry(self._window, textvariable=v)
self._shopping_list_entry.grid(row=10, column=0)
tk.Button(self._window, text='Set shopping list', command=self._update_shopping_list).grid(row=10, column=1)
def _close(self) -> None:
if self._window is not None:
self._window.destroy()
self._window = None
def _save_config(self) -> None:
self._config['settings']['scanner_window'] = str(self._image_scanner.scanner_window_size)
self._config['settings']['image_scale'] = str(self._items_map.scale)
self._config['settings']['confidence_threshold'] = str(self._image_scanner.confidence_threshold)
self._config['settings']['display_inventory_items'] = str(self._display_inventory_items)
self._config['settings']['display_unavailable_recipes'] = str(self._display_unavailable_recipes)
self._config['settings']['copy_recipe_to_clipboard'] = str(self._copy_recipe_to_clipboard)
self._config['settings']['scan_hotkey'] = str(self._scan_hotkey)
self._config['settings']['run_as_overlay'] = str(self._run_as_overlay)
self._config['settings']['shopping_list_mode'] = str(self._shopping_list_mode)
self._config['settings']['shopping_list'] = str(self._shopping_list)
with open(self._config_file, 'w') as f:
self._config.write(f)
def _update_scanner_window(self) -> None:
try:
x, y, width, height = map(int, self._scanner_window_entry.get().replace(',', '').split())
except ValueError:
print('Unable to parse scanner window parameters')
return
scanner_window_to_show = UIOverlay.create_toplevel_window(bg='white')
scanner_window_to_show.geometry(f'{width}x{height}+{x}+{y}')
self._image_scanner.scanner_window_size = (x, y, width, height)
scanner_window_to_show.after(200, scanner_window_to_show.destroy)
self._save_config()
def _update_scale(self) -> None:
try:
new_scale = float(self._scale_entry.get())
except ValueError:
print('Unable to parse image scale parameter')
return
self._items_map.scale = new_scale
self._save_config()
def _update_confidence_threshold(self) -> None:
try:
new_threshold = float(self._confidence_threshold_entry.get())
except ValueError:
print('Unable to parse confidence threshold parameter')
return
self._image_scanner.confidence_threshold = new_threshold
self._save_config()
def _update_display_inventory_items(self) -> None:
self._display_inventory_items = not self._display_inventory_items
self._save_config()
def _update_display_unavailable_recipes(self) -> None:
self._display_unavailable_recipes = not self._display_unavailable_recipes
self._save_config()
def _update_copy_recipe_to_clipboard(self) -> None:
self._copy_recipe_to_clipboard = not self._copy_recipe_to_clipboard
self._save_config()
def _update_scan_hotkey(self) -> None:
try:
keyboard.remove_hotkey(self._scan_hotkey)
except KeyError:
# The hotkey didn't exist or self._scan_hotkey had invalid hotkey
pass
self._scan_hotkey = self._scan_hotkey_entry.get()
self._set_scan_hotkey()
self._save_config()
def _set_scan_hotkey(self) -> None:
if self._scan_hotkey:
try:
keyboard.add_hotkey(self._scan_hotkey, self._on_scan_hotkey)
except ValueError:
# TODO: show the error in the ui
print('Invalid scan hotkey!')
def _update_run_as_overlay(self) -> None:
self._run_as_overlay = not self._run_as_overlay
self._save_config()
def _update_shopping_list_mode(self) -> None:
self._shopping_list_mode = not self._shopping_list_mode
self._save_config()
def _update_shopping_list(self) -> None:
shopping_list = list(map(lambda x: x.strip(), self._shopping_list_entry.get().split(",")))
if len(shopping_list) == 0 or len(self._shopping_list_entry.get().strip()) == 0:
self._update_shopping_list_label("Error: Must enter at least one item")
return
for item in shopping_list:
if item not in self._items_map.items():
self._update_shopping_list_label('Error: unknown item "{0}"'.format(item))
return
self._update_shopping_list_label("Shopping list updated!")
self._shopping_list = ",".join(shopping_list)
self._save_config()
def _update_shopping_list_label(self, value) -> None:
self._shopping_list_label.set(value)
self._window.update_idletasks()
def should_display_inventory_items(self) -> bool:
return self._display_inventory_items
def should_display_unavailable_recipes(self) -> bool:
return self._display_unavailable_recipes
def should_copy_recipe_to_clipboard(self) -> bool:
return self._copy_recipe_to_clipboard
def should_run_as_overlay(self) -> bool:
return self._run_as_overlay
def is_shopping_list_mode(self) -> bool:
return self._shopping_list_mode
def get_shopping_list(self) -> str:
return self._shopping_list
| 47.52495
| 216
| 0.663251
|
from configparser import ConfigParser
import keyboard
import tkinter as tk
import sys
from win32clipboard import OpenClipboard, EmptyClipboard, SetClipboardText, CloseClipboard
from typing import Dict, List, Tuple
from DataClasses import PoeWindowInfo
from ArchnemesisItemsMap import ArchnemesisItemsMap
from ImageScanner import ImageScanner
from DataClasses import RecipeItemNode
from RecipeShopper import RecipeShopper
from constants import COLOR_BG, COLOR_FG_GREEN, COLOR_FG_LIGHT_GREEN, COLOR_FG_ORANGE, COLOR_FG_WHITE, FONT_BIG, FONT_SMALL
class UIOverlay:
def __init__(self, root: tk.Tk, info: PoeWindowInfo, items_map: ArchnemesisItemsMap, image_scanner: ImageScanner, recipe_shopper: RecipeShopper):
self._window_info = info
self._items_map = items_map
self._image_scanner = image_scanner
self._root = root
self._recipe_shopper = recipe_shopper
self._scan_results_window = None
self._recipe_browser_window = None
self._recipe_browser_current_root = ''
self._tooltip_window = None
self._highlight_windows_to_show = list()
self._scan_results_window_saved_position = (-1, 0)
self._settings = Settings(root, items_map, image_scanner, on_scan_hotkey=self._hotkey_pressed)
self._create_controls()
self._root.configure(bg='')
self._root.geometry(f'+{info.x + 5}+{info.y + info.title_bar_height + 5}')
if self._settings.should_run_as_overlay():
self._root.overrideredirect(True)
self._root.wm_attributes('-topmost', True)
self._root.deiconify()
@staticmethod
def create_toplevel_window(bg=''):
w = tk.Toplevel()
w.configure(bg=bg)
w.overrideredirect(True)
w.wm_attributes('-topmost', True)
return w
def _hotkey_pressed(self) -> None:
if self._scan_label_text.get() == 'Scan':
self._scan(None)
elif self._scan_label_text.get() == 'Hide':
self._hide(None)
def _create_controls(self) -> None:
l = tk.Button(self._root, text='[X]', fg=COLOR_FG_GREEN, bg=COLOR_BG, font=FONT_SMALL)
l.bind('<Button-1>', sys.exit)
l.bind('<B3-Motion>', lambda event: self._drag(self._root, -5, -5, event))
l.grid(row=0, column=0)
settings = tk.Button(self._root, text='Settings', fg=COLOR_FG_GREEN, bg=COLOR_BG, font=FONT_SMALL)
settings.bind('<Button-1>', lambda _: self._settings.show())
settings.bind('<B3-Motion>', lambda event: self._drag(self._root, -5, -5, event))
settings.grid(row=0, column=1)
self._scan_label_text = tk.StringVar(self._root, value='Scan')
self._scan_label = tk.Button(self._root, textvariable=self._scan_label_text, fg=COLOR_FG_GREEN, bg=COLOR_BG, font=FONT_SMALL)
self._scan_label.bind("<Button-1>", self._scan)
self._scan_label.bind('<B3-Motion>', lambda event: self._drag(self._root, -5, -5, event))
self._scan_label.grid(row=0, column=2)
def _drag(self, window, offset_x: int, offset_y: int, event) -> Tuple[int, int]:
x = offset_x + event.x + window.winfo_x()
y = offset_y + event.y + window.winfo_y()
window.geometry(f'+{x}+{y}')
return (x, y)
def _scan(self, _) -> None:
self._scan_label_text.set('Scanning...')
self._root.update()
results = self._image_scanner.scan()
shopping_list_mode = self._settings.is_shopping_list_mode() is True
desired_items = [x for x in self._settings.get_shopping_list().split(",") if x]
shopping_list = self._recipe_shopper.get_missing_items(desired_items, results)
print("Missing Items:", shopping_list)
main_recipe_list = self._items_map.recipes()
if shopping_list_mode:
recipe_list = [x for x in self._items_map.recipes() if x[0] in self._recipe_shopper._get_full_shopping_list(desired_items)]
else:
recipe_list = main_recipe_list
if len(results) > 0:
recipes = list()
for item, recipe in recipe_list:
screen_items = [results.get(x) for x in recipe]
if (all(screen_items) or self._settings.should_display_unavailable_recipes()):
recipes.append((item, [x[0] for x in screen_items if x is not None], item in results, all(screen_items)))
if shopping_list_mode:
trash_inventory = self._recipe_shopper.get_trash_inventory(desired_items, results)
trash_recipe_items = [None] * min(4, len(trash_inventory.keys()))
trash_recipe_items = [trash_inventory[list(trash_inventory.keys())[i]][0] for i,x in enumerate(trash_recipe_items)]
trash_recipe = ('Trash', trash_recipe_items, False, True)
recipes.append(trash_recipe)
self._show_scan_results(results, recipes)
self._scan_label_text.set('Hide')
self._scan_label.bind('<Button-1>', self._hide)
else:
self._hide(None)
def _hide(self, _) -> None:
if self._scan_results_window is not None:
self._scan_results_window.destroy()
if self._recipe_browser_window is not None:
self._recipe_browser_window.destroy()
if self._tooltip_window is not None:
self._tooltip_window.destroy()
self._clear_highlights(None)
self._scan_label_text.set('Scan')
self._scan_label.bind('<Button-1>', self._scan)
def _show_scan_results(self, results: Dict[str, List[Tuple[int, int]]], recipes: List[Tuple[str, List[Tuple[int, int]], bool, bool]]) -> None:
self._scan_results_window = UIOverlay.create_toplevel_window()
x, y = self._scan_results_window_saved_position
if x == -1:
x = self._window_info.x + int(self._window_info.client_width / 3)
y = self._window_info.y + self._window_info.title_bar_height
self._scan_results_window.geometry(f'+{x}+{y}')
last_column = 0
if self._settings.should_display_inventory_items():
last_column = self._show_inventory_list(results)
self._show_recipes_list(results, recipes, last_column + 2)
def _show_inventory_list(self, results: Dict[str, List[Tuple[int, int]]]) -> int:
row = 0
column = 0
for item in self._items_map.items():
inventory_items = results.get(item)
if inventory_items is not None:
row, column = self._show_image_and_label(item, results, inventory_items, COLOR_FG_WHITE, f'x{len(inventory_items)} {item}', True, row, column)
return column
def _show_recipes_list(self, results: Dict[str, List[Tuple[int, int]]], recipes: List[Tuple[str, List[Tuple[int, int]], bool, bool]], column: int) -> None:
row = 0
for item, inventory_items, exists_in_inventory, available in recipes:
if exists_in_inventory:
if available:
fg = COLOR_FG_GREEN
else:
fg = COLOR_FG_LIGHT_GREEN
else:
if available:
fg = COLOR_FG_ORANGE
else:
fg = COLOR_FG_WHITE
row, column = self._show_image_and_label(item, results, inventory_items, fg, item, available, row, column)
def _show_image_and_label(self, item: str, results: Dict[str, List[Tuple[int, int]]], inventory_items: Tuple[int, int], highlight_color: str, label_text: str, highlight, row: int, column: int) -> Tuple[int, int]:
image = tk.Label(self._scan_results_window, image=self._items_map.get_display_small_image(item), bg=COLOR_BG, pady=5)
if highlight:
image.bind('<Enter>', lambda _, arg=inventory_items, color=highlight_color: self._highlight_items_in_inventory(arg, color))
image.bind('<Leave>', self._clear_highlights)
image.bind('<Button-1>', lambda _, arg1=item, arg2=results: self._show_recipe_browser_tree(arg1, arg2))
image.bind('<B3-Motion>', self._scan_results_window_drag_and_save)
image.grid(row=row, column=column)
tk.Label(self._scan_results_window, text=label_text, font=FONT_BIG, fg=highlight_color, bg=COLOR_BG).grid(row=row, column=column + 1, sticky='w', padx=5)
row += 1
if row % 10 == 0:
column += 2
row = 0
return (row, column)
def _scan_results_window_drag_and_save(self, event) -> None:
self._scan_results_window_saved_position = self._drag(self._scan_results_window, -5, -5, event)
def _show_recipe_browser_tree(self, item: str, results: Dict[str, List[Tuple[int, int]]]) -> None:
if self._recipe_browser_window is not None:
self._recipe_browser_window.destroy()
self._destroy_tooltip_and_clear_highlights(None)
if self._recipe_browser_current_root == item:
return
self._recipe_browser_current_root = item
self._recipe_browser_window = UIOverlay.create_toplevel_window()
self._recipe_browser_window.geometry(f'+{self._scan_results_window.winfo_x()}+{self._scan_results_window.winfo_y() + self._scan_results_window.winfo_height() + 40}')
tree = self._items_map.get_subtree_for(item)
if self._settings.should_copy_recipe_to_clipboard():
self._copy_tree_items_to_clipboard(tree)
def draw_tree(node, row, column):
children_column = column
for c in node.components:
children_column = draw_tree(c, row + 2, children_column)
columnspan = max(1, children_column - column)
if node.item in results:
bg = COLOR_FG_GREEN
else:
bg = COLOR_BG
l = tk.Label(self._recipe_browser_window, image=self._items_map.get_display_small_image(node.item), bg=bg, relief=tk.SUNKEN)
l.bind('<Button-1>', lambda _, arg1=node.item, arg2=results: self._show_recipe_browser_tree(arg1, arg2))
l.bind('<B3-Motion>', lambda event: self._drag(self._recipe_browser_window, -5, -5, event))
l.bind('<Enter>', lambda _, arg1=self._recipe_browser_window, arg2=results.get(node.item), arg3=node.item: self._create_tooltip_and_highlight(arg1, arg2, arg3))
l.bind('<Leave>', self._destroy_tooltip_and_clear_highlights)
l.grid(row=row, column=column, columnspan=columnspan)
if len(node.components) > 0:
f = tk.Frame(self._recipe_browser_window, bg=COLOR_BG, width=(self._items_map.small_image_size + 4) * columnspan, height=3)
f.grid(row=row + 1, column=column, columnspan=columnspan)
return children_column + 1
total_columns = draw_tree(tree, 1, 0)
for c in range(total_columns):
self._recipe_browser_window.grid_columnconfigure(c, minsize=self._items_map.small_image_size)
parents = [RecipeItemNode(p, []) for p in self._items_map.get_parent_recipes_for(item)]
if len(parents) > 0:
tk.Label(self._recipe_browser_window, text='Used in:', bg=COLOR_BG, fg=COLOR_FG_GREEN, font=FONT_BIG).grid(row=0, column=0)
for column, p in enumerate(parents):
draw_tree(p, 0, column + 1)
def _highlight_items_in_inventory(self, inventory_items: List[Tuple[int, int]], color: str) -> None:
self._highlight_windows_to_show = list()
for (x, y) in inventory_items:
x_offset, y_offset, _, _ = self._image_scanner.scanner_window_size
x += x_offset
y += y_offset
width = int(self._items_map.image_size[0] * 0.7)
height = int(self._items_map.image_size[1] * 0.7)
w = UIOverlay.create_toplevel_window(bg=color)
w.geometry(f'{width}x{height}+{x}+{y}')
self._highlight_windows_to_show.append(w)
def _clear_highlights(self, _) -> None:
for w in self._highlight_windows_to_show:
w.destroy()
def _create_tooltip_and_highlight(self, window, inventory_items, text) -> None:
if self._tooltip_window is not None:
self._tooltip_window.destroy()
self._tooltip_window = UIOverlay.create_toplevel_window()
self._tooltip_window.geometry(f'+{window.winfo_x()}+{window.winfo_y() - 40}')
tk.Label(self._tooltip_window, text=text, font=FONT_BIG, bg=COLOR_BG, fg=COLOR_FG_GREEN).pack()
if inventory_items is not None:
self._highlight_items_in_inventory(inventory_items, COLOR_FG_GREEN)
def _copy_tree_items_to_clipboard(self, tree):
if len(tree.components) > 0:
search_string = '|'.join((str(x.item) for x in tree.components))
else:
search_string = tree.item
OpenClipboard()
EmptyClipboard()
SetClipboardText('^('+search_string+')')
CloseClipboard()
def _destroy_tooltip_and_clear_highlights(self, _) -> None:
if self._tooltip_window is not None:
self._tooltip_window.destroy()
self._clear_highlights(None)
def run(self) -> None:
self._root.mainloop()
class Settings:
def __init__(self, root: tk.Tk, items_map: ArchnemesisItemsMap, image_scanner, on_scan_hotkey):
self._root = root
self._items_map = items_map
self._image_scanner = image_scanner
self._on_scan_hotkey = on_scan_hotkey
self._window = None
self._config = ConfigParser()
self._config_file = 'settings.ini'
self._config.read(self._config_file)
if 'settings' not in self._config:
self._config.add_section('settings')
s = self._config['settings']
scanner_window_size = s.get('scanner_window')
if scanner_window_size is not None:
self._image_scanner.scanner_window_size = tuple(map(int, scanner_window_size.replace('(', '').replace(')', '').replace(',', '').split()))
self._items_map.scale = float(s.get('image_scale', self._items_map.scale))
self._image_scanner.confidence_threshold = float(s.get('confidence_threshold', self._image_scanner.confidence_threshold))
b = s.get('display_inventory_items')
self._display_inventory_items = True if b is not None and b == 'True' else False
b = s.get('display_unavailable_recipes')
self._display_unavailable_recipes = True if b is not None and b == 'True' else False
b = s.get('copy_recipe_to_clipboard')
self._copy_recipe_to_clipboard = True if b is not None and b == 'True' else False
b = s.get('scan_hotkey')
self._scan_hotkey = b if b is not None else ''
self._set_scan_hotkey()
b = s.get('run_as_overlay')
self._run_as_overlay = True if b is None or b == 'True' else False
b = s.get('shopping_list_mode')
self._shopping_list_mode = False if b is None or b == 'False' else True
b = s.get('shopping_list')
self._shopping_list = '' if b is None else b
def show(self) -> None:
if self._window is not None:
return
self._window = tk.Toplevel()
self._window.geometry('+100+200')
self._window.protocol('WM_DELETE_WINDOW', self._close)
current_scanner_window = f'{self._image_scanner.scanner_window_size}'.replace('(', '').replace(')', '')
v = tk.StringVar(self._window, value=current_scanner_window)
self._scanner_window_entry = tk.Entry(self._window, textvariable=v)
self._scanner_window_entry.grid(row=0, column=0)
tk.Button(self._window, text='Set scanner window', command=self._update_scanner_window).grid(row=0, column=1)
v = tk.DoubleVar(self._window, value=self._items_map.scale)
self._scale_entry = tk.Entry(self._window, textvariable=v)
self._scale_entry.grid(row=1, column=0)
tk.Button(self._window, text='Set image scale', command=self._update_scale).grid(row=1, column=1)
v = tk.DoubleVar(self._window, value=self._image_scanner.confidence_threshold)
self._confidence_threshold_entry = tk.Entry(self._window, textvariable=v)
self._confidence_threshold_entry.grid(row=2, column=0)
tk.Button(self._window, text='Set confidence threshold', command=self._update_confidence_threshold).grid(row=2, column=1)
v = tk.StringVar(self._window, value=self._scan_hotkey)
self._scan_hotkey_entry = tk.Entry(self._window, textvariable=v)
self._scan_hotkey_entry.grid(row=3, column=0)
tk.Button(self._window, text='Set scan/hide hotkey', command=self._update_scan_hotkey).grid(row=3, column=1)
c = tk.Checkbutton(self._window, text='Display inventory items', command=self._update_display_inventory_items)
c.grid(row=4, column=0, columnspan=2)
if self._display_inventory_items:
c.select()
c = tk.Checkbutton(self._window, text='Display unavailable recipes', command=self._update_display_unavailable_recipes)
c.grid(row=5, column=0, columnspan=2)
if self._display_unavailable_recipes:
c.select()
c = tk.Checkbutton(self._window, text='Copy recipe to clipboard', command=self._update_copy_recipe_to_clipboard)
c.grid(row=6, column=0, columnspan=2)
if self._copy_recipe_to_clipboard:
c.select()
c = tk.Checkbutton(self._window, text='Run as overlay', command=self._update_run_as_overlay)
c.grid(row=7, column=0, columnspan=2)
if self._run_as_overlay:
c.select()
c = tk.Checkbutton(self._window, text='Shopping List Mode', command=self._update_shopping_list_mode)
c.grid(row=8, column=0, columnspan=2)
if self._shopping_list_mode:
c.select()
self._shopping_list_label = tk.StringVar()
self._shopping_list_label.set("Enter a comma separated list of items")
c = tk.Label(self._window, textvariable=self._shopping_list_label).grid(row=9, column=0, columnspan=2)
v = tk.StringVar(self._window, value=self._shopping_list)
self._shopping_list_entry = tk.Entry(self._window, textvariable=v)
self._shopping_list_entry.grid(row=10, column=0)
tk.Button(self._window, text='Set shopping list', command=self._update_shopping_list).grid(row=10, column=1)
def _close(self) -> None:
if self._window is not None:
self._window.destroy()
self._window = None
def _save_config(self) -> None:
self._config['settings']['scanner_window'] = str(self._image_scanner.scanner_window_size)
self._config['settings']['image_scale'] = str(self._items_map.scale)
self._config['settings']['confidence_threshold'] = str(self._image_scanner.confidence_threshold)
self._config['settings']['display_inventory_items'] = str(self._display_inventory_items)
self._config['settings']['display_unavailable_recipes'] = str(self._display_unavailable_recipes)
self._config['settings']['copy_recipe_to_clipboard'] = str(self._copy_recipe_to_clipboard)
self._config['settings']['scan_hotkey'] = str(self._scan_hotkey)
self._config['settings']['run_as_overlay'] = str(self._run_as_overlay)
self._config['settings']['shopping_list_mode'] = str(self._shopping_list_mode)
self._config['settings']['shopping_list'] = str(self._shopping_list)
with open(self._config_file, 'w') as f:
self._config.write(f)
def _update_scanner_window(self) -> None:
try:
x, y, width, height = map(int, self._scanner_window_entry.get().replace(',', '').split())
except ValueError:
print('Unable to parse scanner window parameters')
return
scanner_window_to_show = UIOverlay.create_toplevel_window(bg='white')
scanner_window_to_show.geometry(f'{width}x{height}+{x}+{y}')
self._image_scanner.scanner_window_size = (x, y, width, height)
scanner_window_to_show.after(200, scanner_window_to_show.destroy)
self._save_config()
def _update_scale(self) -> None:
try:
new_scale = float(self._scale_entry.get())
except ValueError:
print('Unable to parse image scale parameter')
return
self._items_map.scale = new_scale
self._save_config()
def _update_confidence_threshold(self) -> None:
try:
new_threshold = float(self._confidence_threshold_entry.get())
except ValueError:
print('Unable to parse confidence threshold parameter')
return
self._image_scanner.confidence_threshold = new_threshold
self._save_config()
def _update_display_inventory_items(self) -> None:
self._display_inventory_items = not self._display_inventory_items
self._save_config()
def _update_display_unavailable_recipes(self) -> None:
self._display_unavailable_recipes = not self._display_unavailable_recipes
self._save_config()
def _update_copy_recipe_to_clipboard(self) -> None:
self._copy_recipe_to_clipboard = not self._copy_recipe_to_clipboard
self._save_config()
def _update_scan_hotkey(self) -> None:
try:
keyboard.remove_hotkey(self._scan_hotkey)
except KeyError:
pass
self._scan_hotkey = self._scan_hotkey_entry.get()
self._set_scan_hotkey()
self._save_config()
def _set_scan_hotkey(self) -> None:
if self._scan_hotkey:
try:
keyboard.add_hotkey(self._scan_hotkey, self._on_scan_hotkey)
except ValueError:
# TODO: show the error in the ui
print('Invalid scan hotkey!')
def _update_run_as_overlay(self) -> None:
self._run_as_overlay = not self._run_as_overlay
self._save_config()
def _update_shopping_list_mode(self) -> None:
self._shopping_list_mode = not self._shopping_list_mode
self._save_config()
def _update_shopping_list(self) -> None:
shopping_list = list(map(lambda x: x.strip(), self._shopping_list_entry.get().split(",")))
if len(shopping_list) == 0 or len(self._shopping_list_entry.get().strip()) == 0:
self._update_shopping_list_label("Error: Must enter at least one item")
return
for item in shopping_list:
if item not in self._items_map.items():
self._update_shopping_list_label('Error: unknown item "{0}"'.format(item))
return
self._update_shopping_list_label("Shopping list updated!")
self._shopping_list = ",".join(shopping_list)
self._save_config()
def _update_shopping_list_label(self, value) -> None:
self._shopping_list_label.set(value)
self._window.update_idletasks()
def should_display_inventory_items(self) -> bool:
return self._display_inventory_items
def should_display_unavailable_recipes(self) -> bool:
return self._display_unavailable_recipes
def should_copy_recipe_to_clipboard(self) -> bool:
return self._copy_recipe_to_clipboard
def should_run_as_overlay(self) -> bool:
return self._run_as_overlay
def is_shopping_list_mode(self) -> bool:
return self._shopping_list_mode
def get_shopping_list(self) -> str:
return self._shopping_list
| true
| true
|
1c3ff6b906e239981544effbc56300d40607524e
| 1,365
|
py
|
Python
|
pysynphot/test/test_spectral_element.py
|
lheinke/pysynphot
|
b4a5eda2a6227b2f5782da22140f00fc087439cb
|
[
"BSD-3-Clause"
] | 24
|
2015-01-04T23:38:21.000Z
|
2022-02-01T00:11:07.000Z
|
pysynphot/test/test_spectral_element.py
|
lheinke/pysynphot
|
b4a5eda2a6227b2f5782da22140f00fc087439cb
|
[
"BSD-3-Clause"
] | 126
|
2015-01-29T14:50:37.000Z
|
2022-02-15T01:58:13.000Z
|
pysynphot/test/test_spectral_element.py
|
lheinke/pysynphot
|
b4a5eda2a6227b2f5782da22140f00fc087439cb
|
[
"BSD-3-Clause"
] | 25
|
2015-02-09T12:12:02.000Z
|
2021-09-09T13:06:54.000Z
|
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from numpy.testing import assert_allclose
from ..obsbandpass import ObsBandpass
from ..spectrum import ArraySpectralElement
def test_sample_units():
"""Test that SpectralElement.sample respects internal units."""
defwave = np.linspace(0.1, 1, 10)
s = ArraySpectralElement(defwave, defwave, 'm', 'TestArray')
assert_allclose(s(defwave * 1E10), s.sample(defwave))
@pytest.mark.remote_data
@pytest.mark.parametrize(
('obsmode', 'ans'),
[('acs,hrc,f555w', 357.17),
('acs,sbc,f122m', 86.209624),
('acs,wfc1,f775w,pol_v', 444.05),
('cos,boa,nuv,mirrora', 370.65),
('nicmos,1,f090m,dn', 559.59),
('stis,0.2x29,mirror,fuvmama', 134.977476),
('wfc3,ir,f164n', 700.05),
('wfc3,uvis1,f336w', 158.44),
('wfc3,uvis2,f336w', 158.36)])
def test_photbw(obsmode, ans):
"""
Test that SpectralElement.photbw returns results similar to
Synphot to within 0.1%.
.. note::
For stis,0.2x29,mirror,fuvmama, Synphot value was 134.79.
New ref value from STIS data update some time after April 2017.
For acs,sbc,f122m, new ref value from ACS data update in
Oct 2019 (Avila et al.).
"""
band = ObsBandpass(obsmode)
assert_allclose(band.photbw(), ans, rtol=1E-3)
| 29.673913
| 71
| 0.668864
|
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from numpy.testing import assert_allclose
from ..obsbandpass import ObsBandpass
from ..spectrum import ArraySpectralElement
def test_sample_units():
defwave = np.linspace(0.1, 1, 10)
s = ArraySpectralElement(defwave, defwave, 'm', 'TestArray')
assert_allclose(s(defwave * 1E10), s.sample(defwave))
@pytest.mark.remote_data
@pytest.mark.parametrize(
('obsmode', 'ans'),
[('acs,hrc,f555w', 357.17),
('acs,sbc,f122m', 86.209624),
('acs,wfc1,f775w,pol_v', 444.05),
('cos,boa,nuv,mirrora', 370.65),
('nicmos,1,f090m,dn', 559.59),
('stis,0.2x29,mirror,fuvmama', 134.977476),
('wfc3,ir,f164n', 700.05),
('wfc3,uvis1,f336w', 158.44),
('wfc3,uvis2,f336w', 158.36)])
def test_photbw(obsmode, ans):
band = ObsBandpass(obsmode)
assert_allclose(band.photbw(), ans, rtol=1E-3)
| true
| true
|
1c3ff702ef6060894785080eb31a5db45d17ab8c
| 1,321
|
py
|
Python
|
Manny.CIFAR/CIFAR/CIFARPlotter.py
|
MannyGrewal/Manny.CIFAR
|
03aefd7d89728a31e9bf6d0e44f083315816d289
|
[
"MIT"
] | null | null | null |
Manny.CIFAR/CIFAR/CIFARPlotter.py
|
MannyGrewal/Manny.CIFAR
|
03aefd7d89728a31e9bf6d0e44f083315816d289
|
[
"MIT"
] | null | null | null |
Manny.CIFAR/CIFAR/CIFARPlotter.py
|
MannyGrewal/Manny.CIFAR
|
03aefd7d89728a31e9bf6d0e44f083315816d289
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pylab
########################################################################
# 2017 - Manny Grewal
# Purpose of this class is to visualise a list of images from the CIFAR dataset
# How many columns to show in a grid
MAX_COLS = 5
#PlotImages method takes an list of Images and their respective labels in the second parameter
#Then it renders them using matplotlib imshow method in a 5 column matrix
def PlotImages(arrayImages,arrayClassLabels,reShapeRequired=False):
totalImages=len(arrayImages)
if(reShapeRequired==True):
arrayImages = np.reshape(arrayImages, (totalImages,32,32,3))
totalRows= math.ceil(totalImages/MAX_COLS)
fig = plt.figure(figsize=(5,5))
gs = gridspec.GridSpec(totalImages, MAX_COLS)
# set the space between subplots and the position of the subplots in the figure
gs.update(wspace=0.1, hspace=0.4, left = 0.1, right = 0.7, bottom = 0.1, top = 0.9)
arrayIndex=0
for g in gs:
if(arrayIndex<totalImages):
axes=plt.subplot(g)
axes.set_axis_off()
axes.set_title(arrayClassLabels[arrayIndex])
axes.imshow(arrayImages[arrayIndex])
arrayIndex+=1
#plt.show()
| 34.763158
| 94
| 0.660106
|
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pylab
| true
| true
|
1c3ff82c8412663f83da8432c464a6225eacbbd2
| 23,071
|
py
|
Python
|
tests/test_dataset_common.py
|
dkajtoch/datasets
|
12ef7f0d541a5aca5b29ebc2dddf5e1214f0e3e9
|
[
"Apache-2.0"
] | 2
|
2021-01-27T15:43:23.000Z
|
2021-03-13T11:04:30.000Z
|
tests/test_dataset_common.py
|
dkajtoch/datasets
|
12ef7f0d541a5aca5b29ebc2dddf5e1214f0e3e9
|
[
"Apache-2.0"
] | null | null | null |
tests/test_dataset_common.py
|
dkajtoch/datasets
|
12ef7f0d541a5aca5b29ebc2dddf5e1214f0e3e9
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import tempfile
import warnings
from functools import wraps
from multiprocessing import Pool
from typing import List, Optional
from unittest import TestCase
from absl.testing import parameterized
from datasets import (
BuilderConfig,
DatasetBuilder,
DownloadConfig,
Features,
GenerateMode,
MockDownloadManager,
Value,
cached_path,
hf_api,
import_main_class,
load_dataset,
prepare_module,
)
from datasets.packaged_modules import _PACKAGED_DATASETS_MODULES
from datasets.search import _has_faiss
from datasets.utils.file_utils import is_remote_url
from datasets.utils.logging import get_logger
from .utils import OfflineSimulationMode, for_all_test_methods, local, offline, packaged, remote, slow
logger = get_logger(__name__)
REQUIRE_FAISS = {"wiki_dpr"}
def skip_if_dataset_requires_faiss(test_case):
@wraps(test_case)
def wrapper(self, dataset_name):
if not _has_faiss and dataset_name in REQUIRE_FAISS:
self.skipTest('"test requires Faiss"')
else:
test_case(self, dataset_name)
return wrapper
def skip_if_not_compatible_with_windows(test_case):
if os.name == "nt": # windows
@wraps(test_case)
def wrapper(self, dataset_name):
try:
test_case(self, dataset_name)
except FileNotFoundError as e:
if "[WinError 206]" in str(e): # if there's a path that exceeds windows' 256 characters limit
warnings.warn("test not compatible with windows ([WinError 206] error)")
self.skipTest('"test not compatible with windows ([WinError 206] error)"')
else:
raise
return wrapper
else:
return test_case
def get_packaged_dataset_dummy_data_files(dataset_name, path_to_dummy_data):
extensions = {"text": "txt", "json": "json", "pandas": "pkl", "csv": "csv"}
return {
"train": os.path.join(path_to_dummy_data, "train." + extensions[dataset_name]),
"test": os.path.join(path_to_dummy_data, "test." + extensions[dataset_name]),
"dev": os.path.join(path_to_dummy_data, "dev." + extensions[dataset_name]),
}
class DatasetTester(object):
def __init__(self, parent):
self.parent = parent if parent is not None else TestCase()
def load_builder_class(self, dataset_name, is_local=False):
# Download/copy dataset script
if is_local is True:
module_path, _ = prepare_module("./datasets/" + dataset_name)
else:
module_path, _ = prepare_module(dataset_name, download_config=DownloadConfig(force_download=True))
# Get dataset builder class
builder_cls = import_main_class(module_path)
return builder_cls
def load_all_configs(self, dataset_name, is_local=False) -> List[Optional[BuilderConfig]]:
# get builder class
builder_cls = self.load_builder_class(dataset_name, is_local=is_local)
builder = builder_cls
if len(builder.BUILDER_CONFIGS) == 0:
return [None]
return builder.BUILDER_CONFIGS
def check_load_dataset(self, dataset_name, configs, is_local=False, use_local_dummy_data=False):
for config in configs:
with tempfile.TemporaryDirectory() as processed_temp_dir, tempfile.TemporaryDirectory() as raw_temp_dir:
# create config and dataset
dataset_builder_cls = self.load_builder_class(dataset_name, is_local=is_local)
name = config.name if config is not None else None
dataset_builder = dataset_builder_cls(name=name, cache_dir=processed_temp_dir)
# TODO: skip Beam datasets and datasets that lack dummy data for now
if not dataset_builder.test_dummy_data:
logger.info("Skip tests for this dataset for now")
return
if config is not None:
version = config.version
else:
version = dataset_builder.VERSION
def check_if_url_is_valid(url):
if is_remote_url(url) and "\\" in url:
raise ValueError(f"Bad remote url '{url} since it contains a backslash")
# create mock data loader manager that has a special download_and_extract() method to download dummy data instead of real data
mock_dl_manager = MockDownloadManager(
dataset_name=dataset_name,
config=config,
version=version,
cache_dir=raw_temp_dir,
use_local_dummy_data=use_local_dummy_data,
download_callbacks=[check_if_url_is_valid],
)
# packaged datasets like csv, text, json or pandas require some data files
if dataset_builder.__class__.__name__.lower() in _PACKAGED_DATASETS_MODULES:
mock_dl_manager.download_dummy_data()
path_to_dummy_data = mock_dl_manager.dummy_file
dataset_builder.config.data_files = get_packaged_dataset_dummy_data_files(
dataset_builder.__class__.__name__.lower(), path_to_dummy_data
)
# mock size needed for dummy data instead of actual dataset
if dataset_builder.info is not None:
# approximate upper bound of order of magnitude of dummy data files
one_mega_byte = 2 << 19
dataset_builder.info.size_in_bytes = 2 * one_mega_byte
dataset_builder.info.download_size = one_mega_byte
dataset_builder.info.dataset_size = one_mega_byte
# generate examples from dummy data
dataset_builder.download_and_prepare(
dl_manager=mock_dl_manager,
download_mode=GenerateMode.FORCE_REDOWNLOAD,
ignore_verifications=True,
try_from_hf_gcs=False,
)
# get dataset
dataset = dataset_builder.as_dataset(ignore_verifications=True)
# check that dataset is not empty
self.parent.assertListEqual(sorted(dataset_builder.info.splits.keys()), sorted(dataset))
for split in dataset_builder.info.splits.keys():
# check that loaded datset is not empty
self.parent.assertTrue(len(dataset[split]) > 0)
del dataset
def test_datasets_dir_and_script_names():
for dataset_dir in glob.glob("./datasets/*/"):
name = dataset_dir.split(os.sep)[-2]
if not name.startswith("__") and len(os.listdir(dataset_dir)) > 0: # ignore __pycache__ and empty dirs
if name in _PACKAGED_DATASETS_MODULES:
continue
else:
# check that the script name is the same as the dir name
assert os.path.exists(
os.path.join(dataset_dir, name + ".py")
), f"Bad structure for dataset '{name}'. Please check that the directory name is a valid dataset and that the same the same as the dataset script name."
def get_local_dataset_names():
datasets = [
dataset_dir.split(os.sep)[-2]
for dataset_dir in glob.glob("./datasets/*/")
if os.path.exists(os.path.join(dataset_dir, dataset_dir.split(os.sep)[-2] + ".py"))
]
return [{"testcase_name": x, "dataset_name": x} for x in datasets]
@parameterized.named_parameters(get_local_dataset_names())
@for_all_test_methods(skip_if_dataset_requires_faiss, skip_if_not_compatible_with_windows)
@local
class LocalDatasetTest(parameterized.TestCase):
dataset_name = None
def setUp(self):
self.dataset_tester = DatasetTester(self)
def test_load_dataset(self, dataset_name):
configs = self.dataset_tester.load_all_configs(dataset_name, is_local=True)[:1]
self.dataset_tester.check_load_dataset(dataset_name, configs, is_local=True, use_local_dummy_data=True)
def test_builder_class(self, dataset_name):
builder_cls = self.dataset_tester.load_builder_class(dataset_name, is_local=True)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = builder_cls(name=name, cache_dir=tmp_cache_dir)
self.assertTrue(isinstance(builder, DatasetBuilder))
def test_builder_configs(self, dataset_name):
builder_configs = self.dataset_tester.load_all_configs(dataset_name, is_local=True)
self.assertTrue(len(builder_configs) > 0)
if builder_configs[0] is not None:
all(self.assertTrue(isinstance(config, BuilderConfig)) for config in builder_configs)
@slow
def test_load_dataset_all_configs(self, dataset_name):
configs = self.dataset_tester.load_all_configs(dataset_name, is_local=True)
self.dataset_tester.check_load_dataset(dataset_name, configs, is_local=True, use_local_dummy_data=True)
@slow
def test_load_real_dataset(self, dataset_name):
path = "./datasets/" + dataset_name
module_path, hash = prepare_module(path, download_config=DownloadConfig(local_files_only=True), dataset=True)
builder_cls = import_main_class(module_path, dataset=True)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as temp_cache_dir:
dataset = load_dataset(
path, name=name, cache_dir=temp_cache_dir, download_mode=GenerateMode.FORCE_REDOWNLOAD
)
for split in dataset.keys():
self.assertTrue(len(dataset[split]) > 0)
del dataset
@slow
def test_load_real_dataset_all_configs(self, dataset_name):
path = "./datasets/" + dataset_name
module_path, hash = prepare_module(path, download_config=DownloadConfig(local_files_only=True), dataset=True)
builder_cls = import_main_class(module_path, dataset=True)
config_names = (
[config.name for config in builder_cls.BUILDER_CONFIGS] if len(builder_cls.BUILDER_CONFIGS) > 0 else [None]
)
for name in config_names:
with tempfile.TemporaryDirectory() as temp_cache_dir:
dataset = load_dataset(
path, name=name, cache_dir=temp_cache_dir, download_mode=GenerateMode.FORCE_REDOWNLOAD
)
for split in dataset.keys():
self.assertTrue(len(dataset[split]) > 0)
del dataset
def get_packaged_dataset_names():
return [{"testcase_name": x, "dataset_name": x} for x in _PACKAGED_DATASETS_MODULES.keys()]
@parameterized.named_parameters(get_packaged_dataset_names())
@packaged
class PackagedDatasetTest(parameterized.TestCase):
dataset_name = None
def setUp(self):
self.dataset_tester = DatasetTester(self)
def test_load_dataset_offline(self, dataset_name):
for offline_simulation_mode in list(OfflineSimulationMode):
with offline(offline_simulation_mode):
configs = self.dataset_tester.load_all_configs(dataset_name)[:1]
self.dataset_tester.check_load_dataset(dataset_name, configs, use_local_dummy_data=True)
def test_builder_class(self, dataset_name):
builder_cls = self.dataset_tester.load_builder_class(dataset_name)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = builder_cls(name=name, cache_dir=tmp_cache_dir)
self.assertTrue(isinstance(builder, DatasetBuilder))
def test_builder_configs(self, dataset_name):
builder_configs = self.dataset_tester.load_all_configs(dataset_name)
self.assertTrue(len(builder_configs) > 0)
if builder_configs[0] is not None:
all(self.assertTrue(isinstance(config, BuilderConfig)) for config in builder_configs)
def distributed_load_dataset(args):
data_name, tmp_dir, datafiles = args
dataset = load_dataset(data_name, cache_dir=tmp_dir, data_files=datafiles)
return dataset
class DistributedDatasetTest(TestCase):
def test_load_dataset_distributed(self):
num_workers = 5
with tempfile.TemporaryDirectory() as tmp_dir:
data_name = "csv"
data_base_path = os.path.join("datasets", data_name, "dummy", "0.0.0", "dummy_data.zip")
local_path = cached_path(
data_base_path, cache_dir=tmp_dir, extract_compressed_file=True, force_extract=True
)
datafiles = {
"train": os.path.join(local_path, "dummy_data/train.csv"),
"dev": os.path.join(local_path, "dummy_data/dev.csv"),
"test": os.path.join(local_path, "dummy_data/test.csv"),
}
args = data_name, tmp_dir, datafiles
with Pool(processes=num_workers) as pool: # start num_workers processes
result = pool.apply_async(distributed_load_dataset, (args,))
dataset = result.get(timeout=20)
del result, dataset
datasets = pool.map(distributed_load_dataset, [args] * num_workers)
for _ in range(len(datasets)):
dataset = datasets.pop()
del dataset
def get_remote_dataset_names():
api = hf_api.HfApi()
# fetch all dataset names
datasets = api.dataset_list(with_community_datasets=False, id_only=True)
return [{"testcase_name": x, "dataset_name": x} for x in datasets]
@parameterized.named_parameters(get_remote_dataset_names())
@for_all_test_methods(skip_if_dataset_requires_faiss, skip_if_not_compatible_with_windows)
@remote
class RemoteDatasetTest(parameterized.TestCase):
dataset_name = None
def setUp(self):
self.dataset_tester = DatasetTester(self)
def test_builder_class(self, dataset_name):
builder_cls = self.dataset_tester.load_builder_class(dataset_name)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = builder_cls(name=name, cache_dir=tmp_cache_dir)
self.assertTrue(isinstance(builder, DatasetBuilder))
def test_builder_configs(self, dataset_name):
builder_configs = self.dataset_tester.load_all_configs(dataset_name)
self.assertTrue(len(builder_configs) > 0)
if builder_configs[0] is not None:
all(self.assertTrue(isinstance(config, BuilderConfig)) for config in builder_configs)
def test_load_dataset(self, dataset_name):
configs = self.dataset_tester.load_all_configs(dataset_name)[:1]
self.dataset_tester.check_load_dataset(dataset_name, configs)
@slow
def test_load_real_dataset(self, dataset_name):
path = dataset_name
module_path, hash = prepare_module(path, download_config=DownloadConfig(force_download=True), dataset=True)
builder_cls = import_main_class(module_path, dataset=True)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as temp_cache_dir:
dataset = load_dataset(
path, name=name, cache_dir=temp_cache_dir, download_mode=GenerateMode.FORCE_REDOWNLOAD
)
for split in dataset.keys():
self.assertTrue(len(dataset[split]) > 0)
del dataset
@slow
def test_load_real_dataset_all_configs(self, dataset_name):
path = dataset_name
module_path, hash = prepare_module(path, download_config=DownloadConfig(force_download=True), dataset=True)
builder_cls = import_main_class(module_path, dataset=True)
config_names = (
[config.name for config in builder_cls.BUILDER_CONFIGS] if len(builder_cls.BUILDER_CONFIGS) > 0 else [None]
)
for name in config_names:
with tempfile.TemporaryDirectory() as temp_cache_dir:
dataset = load_dataset(
path, name=name, cache_dir=temp_cache_dir, download_mode=GenerateMode.FORCE_REDOWNLOAD
)
for split in dataset.keys():
self.assertTrue(len(dataset[split]) > 0)
del dataset
class TextTest(TestCase):
def test_caching(self):
n_samples = 10
with tempfile.TemporaryDirectory() as tmp_dir:
# Use \n for newline. Windows automatically adds the \r when writing the file
# see https://docs.python.org/3/library/os.html#os.linesep
open(os.path.join(tmp_dir, "text.txt"), "w", encoding="utf-8").write(
"\n".join("foo" for _ in range(n_samples))
)
ds = load_dataset("text", data_files=os.path.join(tmp_dir, "text.txt"), cache_dir=tmp_dir, split="train")
data_file = ds.cache_files[0]
fingerprint = ds._fingerprint
self.assertEqual(len(ds), n_samples)
del ds
ds = load_dataset("text", data_files=os.path.join(tmp_dir, "text.txt"), cache_dir=tmp_dir, split="train")
self.assertEqual(ds.cache_files[0], data_file)
self.assertEqual(ds._fingerprint, fingerprint)
del ds
open(os.path.join(tmp_dir, "text.txt"), "w", encoding="utf-8").write(
"\n".join("bar" for _ in range(n_samples))
)
ds = load_dataset("text", data_files=os.path.join(tmp_dir, "text.txt"), cache_dir=tmp_dir, split="train")
self.assertNotEqual(ds.cache_files[0], data_file)
self.assertNotEqual(ds._fingerprint, fingerprint)
self.assertEqual(len(ds), n_samples)
del ds
class CsvTest(TestCase):
def test_caching(self):
n_rows = 10
features = Features({"foo": Value("string"), "bar": Value("string")})
with tempfile.TemporaryDirectory() as tmp_dir:
# Use \n for newline. Windows automatically adds the \r when writing the file
# see https://docs.python.org/3/library/os.html#os.linesep
open(os.path.join(tmp_dir, "table.csv"), "w", encoding="utf-8").write(
"\n".join(",".join(["foo", "bar"]) for _ in range(n_rows + 1))
)
ds = load_dataset("csv", data_files=os.path.join(tmp_dir, "table.csv"), cache_dir=tmp_dir, split="train")
data_file = ds.cache_files[0]
fingerprint = ds._fingerprint
self.assertEqual(len(ds), n_rows)
del ds
ds = load_dataset("csv", data_files=os.path.join(tmp_dir, "table.csv"), cache_dir=tmp_dir, split="train")
self.assertEqual(ds.cache_files[0], data_file)
self.assertEqual(ds._fingerprint, fingerprint)
del ds
ds = load_dataset(
"csv",
data_files=os.path.join(tmp_dir, "table.csv"),
cache_dir=tmp_dir,
split="train",
features=features,
)
self.assertNotEqual(ds.cache_files[0], data_file)
self.assertNotEqual(ds._fingerprint, fingerprint)
del ds
open(os.path.join(tmp_dir, "table.csv"), "w", encoding="utf-8").write(
"\n".join(",".join(["Foo", "Bar"]) for _ in range(n_rows + 1))
)
ds = load_dataset("csv", data_files=os.path.join(tmp_dir, "table.csv"), cache_dir=tmp_dir, split="train")
self.assertNotEqual(ds.cache_files[0], data_file)
self.assertNotEqual(ds._fingerprint, fingerprint)
self.assertEqual(len(ds), n_rows)
del ds
def test_sep(self):
n_rows = 10
n_cols = 3
with tempfile.TemporaryDirectory() as tmp_dir:
open(os.path.join(tmp_dir, "table_comma.csv"), "w", encoding="utf-8").write(
"\n".join(",".join([str(i) for i in range(n_cols)]) for _ in range(n_rows + 1))
)
open(os.path.join(tmp_dir, "table_tab.csv"), "w", encoding="utf-8").write(
"\n".join("\t".join([str(i) for i in range(n_cols)]) for _ in range(n_rows + 1))
)
ds = load_dataset(
"csv",
data_files=os.path.join(tmp_dir, "table_comma.csv"),
cache_dir=tmp_dir,
split="train",
sep=",",
)
self.assertEqual(len(ds), n_rows)
self.assertEqual(len(ds.column_names), n_cols)
del ds
ds = load_dataset(
"csv",
data_files=os.path.join(tmp_dir, "table_tab.csv"),
cache_dir=tmp_dir,
split="train",
sep="\t",
)
self.assertEqual(len(ds), n_rows)
self.assertEqual(len(ds.column_names), n_cols)
del ds
ds = load_dataset(
"csv",
data_files=os.path.join(tmp_dir, "table_comma.csv"),
cache_dir=tmp_dir,
split="train",
sep="\t",
)
self.assertEqual(len(ds), n_rows)
self.assertEqual(len(ds.column_names), 1)
del ds
def test_features(self):
n_rows = 10
n_cols = 3
def get_features(type):
return Features({str(i): Value(type) for i in range(n_cols)})
with tempfile.TemporaryDirectory() as tmp_dir:
open(os.path.join(tmp_dir, "table.csv"), "w", encoding="utf-8").write(
"\n".join(",".join([str(i) for i in range(n_cols)]) for _ in range(n_rows + 1))
)
for type in ["float64", "int8"]:
features = get_features(type)
ds = load_dataset(
"csv",
data_files=os.path.join(tmp_dir, "table.csv"),
cache_dir=tmp_dir,
split="train",
features=features,
)
self.assertEqual(len(ds), n_rows)
self.assertDictEqual(ds.features, features)
del ds
| 43.04291
| 168
| 0.635473
|
import glob
import os
import tempfile
import warnings
from functools import wraps
from multiprocessing import Pool
from typing import List, Optional
from unittest import TestCase
from absl.testing import parameterized
from datasets import (
BuilderConfig,
DatasetBuilder,
DownloadConfig,
Features,
GenerateMode,
MockDownloadManager,
Value,
cached_path,
hf_api,
import_main_class,
load_dataset,
prepare_module,
)
from datasets.packaged_modules import _PACKAGED_DATASETS_MODULES
from datasets.search import _has_faiss
from datasets.utils.file_utils import is_remote_url
from datasets.utils.logging import get_logger
from .utils import OfflineSimulationMode, for_all_test_methods, local, offline, packaged, remote, slow
logger = get_logger(__name__)
REQUIRE_FAISS = {"wiki_dpr"}
def skip_if_dataset_requires_faiss(test_case):
@wraps(test_case)
def wrapper(self, dataset_name):
if not _has_faiss and dataset_name in REQUIRE_FAISS:
self.skipTest('"test requires Faiss"')
else:
test_case(self, dataset_name)
return wrapper
def skip_if_not_compatible_with_windows(test_case):
if os.name == "nt":
@wraps(test_case)
def wrapper(self, dataset_name):
try:
test_case(self, dataset_name)
except FileNotFoundError as e:
if "[WinError 206]" in str(e):
warnings.warn("test not compatible with windows ([WinError 206] error)")
self.skipTest('"test not compatible with windows ([WinError 206] error)"')
else:
raise
return wrapper
else:
return test_case
def get_packaged_dataset_dummy_data_files(dataset_name, path_to_dummy_data):
extensions = {"text": "txt", "json": "json", "pandas": "pkl", "csv": "csv"}
return {
"train": os.path.join(path_to_dummy_data, "train." + extensions[dataset_name]),
"test": os.path.join(path_to_dummy_data, "test." + extensions[dataset_name]),
"dev": os.path.join(path_to_dummy_data, "dev." + extensions[dataset_name]),
}
class DatasetTester(object):
def __init__(self, parent):
self.parent = parent if parent is not None else TestCase()
def load_builder_class(self, dataset_name, is_local=False):
if is_local is True:
module_path, _ = prepare_module("./datasets/" + dataset_name)
else:
module_path, _ = prepare_module(dataset_name, download_config=DownloadConfig(force_download=True))
builder_cls = import_main_class(module_path)
return builder_cls
def load_all_configs(self, dataset_name, is_local=False) -> List[Optional[BuilderConfig]]:
builder_cls = self.load_builder_class(dataset_name, is_local=is_local)
builder = builder_cls
if len(builder.BUILDER_CONFIGS) == 0:
return [None]
return builder.BUILDER_CONFIGS
def check_load_dataset(self, dataset_name, configs, is_local=False, use_local_dummy_data=False):
for config in configs:
with tempfile.TemporaryDirectory() as processed_temp_dir, tempfile.TemporaryDirectory() as raw_temp_dir:
dataset_builder_cls = self.load_builder_class(dataset_name, is_local=is_local)
name = config.name if config is not None else None
dataset_builder = dataset_builder_cls(name=name, cache_dir=processed_temp_dir)
if not dataset_builder.test_dummy_data:
logger.info("Skip tests for this dataset for now")
return
if config is not None:
version = config.version
else:
version = dataset_builder.VERSION
def check_if_url_is_valid(url):
if is_remote_url(url) and "\\" in url:
raise ValueError(f"Bad remote url '{url} since it contains a backslash")
# create mock data loader manager that has a special download_and_extract() method to download dummy data instead of real data
mock_dl_manager = MockDownloadManager(
dataset_name=dataset_name,
config=config,
version=version,
cache_dir=raw_temp_dir,
use_local_dummy_data=use_local_dummy_data,
download_callbacks=[check_if_url_is_valid],
)
# packaged datasets like csv, text, json or pandas require some data files
if dataset_builder.__class__.__name__.lower() in _PACKAGED_DATASETS_MODULES:
mock_dl_manager.download_dummy_data()
path_to_dummy_data = mock_dl_manager.dummy_file
dataset_builder.config.data_files = get_packaged_dataset_dummy_data_files(
dataset_builder.__class__.__name__.lower(), path_to_dummy_data
)
# mock size needed for dummy data instead of actual dataset
if dataset_builder.info is not None:
# approximate upper bound of order of magnitude of dummy data files
one_mega_byte = 2 << 19
dataset_builder.info.size_in_bytes = 2 * one_mega_byte
dataset_builder.info.download_size = one_mega_byte
dataset_builder.info.dataset_size = one_mega_byte
# generate examples from dummy data
dataset_builder.download_and_prepare(
dl_manager=mock_dl_manager,
download_mode=GenerateMode.FORCE_REDOWNLOAD,
ignore_verifications=True,
try_from_hf_gcs=False,
)
# get dataset
dataset = dataset_builder.as_dataset(ignore_verifications=True)
# check that dataset is not empty
self.parent.assertListEqual(sorted(dataset_builder.info.splits.keys()), sorted(dataset))
for split in dataset_builder.info.splits.keys():
# check that loaded datset is not empty
self.parent.assertTrue(len(dataset[split]) > 0)
del dataset
def test_datasets_dir_and_script_names():
for dataset_dir in glob.glob("./datasets/*/"):
name = dataset_dir.split(os.sep)[-2]
if not name.startswith("__") and len(os.listdir(dataset_dir)) > 0: # ignore __pycache__ and empty dirs
if name in _PACKAGED_DATASETS_MODULES:
continue
else:
# check that the script name is the same as the dir name
assert os.path.exists(
os.path.join(dataset_dir, name + ".py")
), f"Bad structure for dataset '{name}'. Please check that the directory name is a valid dataset and that the same the same as the dataset script name."
def get_local_dataset_names():
datasets = [
dataset_dir.split(os.sep)[-2]
for dataset_dir in glob.glob("./datasets/*/")
if os.path.exists(os.path.join(dataset_dir, dataset_dir.split(os.sep)[-2] + ".py"))
]
return [{"testcase_name": x, "dataset_name": x} for x in datasets]
@parameterized.named_parameters(get_local_dataset_names())
@for_all_test_methods(skip_if_dataset_requires_faiss, skip_if_not_compatible_with_windows)
@local
class LocalDatasetTest(parameterized.TestCase):
dataset_name = None
def setUp(self):
self.dataset_tester = DatasetTester(self)
def test_load_dataset(self, dataset_name):
configs = self.dataset_tester.load_all_configs(dataset_name, is_local=True)[:1]
self.dataset_tester.check_load_dataset(dataset_name, configs, is_local=True, use_local_dummy_data=True)
def test_builder_class(self, dataset_name):
builder_cls = self.dataset_tester.load_builder_class(dataset_name, is_local=True)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = builder_cls(name=name, cache_dir=tmp_cache_dir)
self.assertTrue(isinstance(builder, DatasetBuilder))
def test_builder_configs(self, dataset_name):
builder_configs = self.dataset_tester.load_all_configs(dataset_name, is_local=True)
self.assertTrue(len(builder_configs) > 0)
if builder_configs[0] is not None:
all(self.assertTrue(isinstance(config, BuilderConfig)) for config in builder_configs)
@slow
def test_load_dataset_all_configs(self, dataset_name):
configs = self.dataset_tester.load_all_configs(dataset_name, is_local=True)
self.dataset_tester.check_load_dataset(dataset_name, configs, is_local=True, use_local_dummy_data=True)
@slow
def test_load_real_dataset(self, dataset_name):
path = "./datasets/" + dataset_name
module_path, hash = prepare_module(path, download_config=DownloadConfig(local_files_only=True), dataset=True)
builder_cls = import_main_class(module_path, dataset=True)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as temp_cache_dir:
dataset = load_dataset(
path, name=name, cache_dir=temp_cache_dir, download_mode=GenerateMode.FORCE_REDOWNLOAD
)
for split in dataset.keys():
self.assertTrue(len(dataset[split]) > 0)
del dataset
@slow
def test_load_real_dataset_all_configs(self, dataset_name):
path = "./datasets/" + dataset_name
module_path, hash = prepare_module(path, download_config=DownloadConfig(local_files_only=True), dataset=True)
builder_cls = import_main_class(module_path, dataset=True)
config_names = (
[config.name for config in builder_cls.BUILDER_CONFIGS] if len(builder_cls.BUILDER_CONFIGS) > 0 else [None]
)
for name in config_names:
with tempfile.TemporaryDirectory() as temp_cache_dir:
dataset = load_dataset(
path, name=name, cache_dir=temp_cache_dir, download_mode=GenerateMode.FORCE_REDOWNLOAD
)
for split in dataset.keys():
self.assertTrue(len(dataset[split]) > 0)
del dataset
def get_packaged_dataset_names():
return [{"testcase_name": x, "dataset_name": x} for x in _PACKAGED_DATASETS_MODULES.keys()]
@parameterized.named_parameters(get_packaged_dataset_names())
@packaged
class PackagedDatasetTest(parameterized.TestCase):
dataset_name = None
def setUp(self):
self.dataset_tester = DatasetTester(self)
def test_load_dataset_offline(self, dataset_name):
for offline_simulation_mode in list(OfflineSimulationMode):
with offline(offline_simulation_mode):
configs = self.dataset_tester.load_all_configs(dataset_name)[:1]
self.dataset_tester.check_load_dataset(dataset_name, configs, use_local_dummy_data=True)
def test_builder_class(self, dataset_name):
builder_cls = self.dataset_tester.load_builder_class(dataset_name)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = builder_cls(name=name, cache_dir=tmp_cache_dir)
self.assertTrue(isinstance(builder, DatasetBuilder))
def test_builder_configs(self, dataset_name):
builder_configs = self.dataset_tester.load_all_configs(dataset_name)
self.assertTrue(len(builder_configs) > 0)
if builder_configs[0] is not None:
all(self.assertTrue(isinstance(config, BuilderConfig)) for config in builder_configs)
def distributed_load_dataset(args):
data_name, tmp_dir, datafiles = args
dataset = load_dataset(data_name, cache_dir=tmp_dir, data_files=datafiles)
return dataset
class DistributedDatasetTest(TestCase):
def test_load_dataset_distributed(self):
num_workers = 5
with tempfile.TemporaryDirectory() as tmp_dir:
data_name = "csv"
data_base_path = os.path.join("datasets", data_name, "dummy", "0.0.0", "dummy_data.zip")
local_path = cached_path(
data_base_path, cache_dir=tmp_dir, extract_compressed_file=True, force_extract=True
)
datafiles = {
"train": os.path.join(local_path, "dummy_data/train.csv"),
"dev": os.path.join(local_path, "dummy_data/dev.csv"),
"test": os.path.join(local_path, "dummy_data/test.csv"),
}
args = data_name, tmp_dir, datafiles
with Pool(processes=num_workers) as pool: # start num_workers processes
result = pool.apply_async(distributed_load_dataset, (args,))
dataset = result.get(timeout=20)
del result, dataset
datasets = pool.map(distributed_load_dataset, [args] * num_workers)
for _ in range(len(datasets)):
dataset = datasets.pop()
del dataset
def get_remote_dataset_names():
api = hf_api.HfApi()
# fetch all dataset names
datasets = api.dataset_list(with_community_datasets=False, id_only=True)
return [{"testcase_name": x, "dataset_name": x} for x in datasets]
@parameterized.named_parameters(get_remote_dataset_names())
@for_all_test_methods(skip_if_dataset_requires_faiss, skip_if_not_compatible_with_windows)
@remote
class RemoteDatasetTest(parameterized.TestCase):
dataset_name = None
def setUp(self):
self.dataset_tester = DatasetTester(self)
def test_builder_class(self, dataset_name):
builder_cls = self.dataset_tester.load_builder_class(dataset_name)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as tmp_cache_dir:
builder = builder_cls(name=name, cache_dir=tmp_cache_dir)
self.assertTrue(isinstance(builder, DatasetBuilder))
def test_builder_configs(self, dataset_name):
builder_configs = self.dataset_tester.load_all_configs(dataset_name)
self.assertTrue(len(builder_configs) > 0)
if builder_configs[0] is not None:
all(self.assertTrue(isinstance(config, BuilderConfig)) for config in builder_configs)
def test_load_dataset(self, dataset_name):
configs = self.dataset_tester.load_all_configs(dataset_name)[:1]
self.dataset_tester.check_load_dataset(dataset_name, configs)
@slow
def test_load_real_dataset(self, dataset_name):
path = dataset_name
module_path, hash = prepare_module(path, download_config=DownloadConfig(force_download=True), dataset=True)
builder_cls = import_main_class(module_path, dataset=True)
name = builder_cls.BUILDER_CONFIGS[0].name if builder_cls.BUILDER_CONFIGS else None
with tempfile.TemporaryDirectory() as temp_cache_dir:
dataset = load_dataset(
path, name=name, cache_dir=temp_cache_dir, download_mode=GenerateMode.FORCE_REDOWNLOAD
)
for split in dataset.keys():
self.assertTrue(len(dataset[split]) > 0)
del dataset
@slow
def test_load_real_dataset_all_configs(self, dataset_name):
path = dataset_name
module_path, hash = prepare_module(path, download_config=DownloadConfig(force_download=True), dataset=True)
builder_cls = import_main_class(module_path, dataset=True)
config_names = (
[config.name for config in builder_cls.BUILDER_CONFIGS] if len(builder_cls.BUILDER_CONFIGS) > 0 else [None]
)
for name in config_names:
with tempfile.TemporaryDirectory() as temp_cache_dir:
dataset = load_dataset(
path, name=name, cache_dir=temp_cache_dir, download_mode=GenerateMode.FORCE_REDOWNLOAD
)
for split in dataset.keys():
self.assertTrue(len(dataset[split]) > 0)
del dataset
class TextTest(TestCase):
def test_caching(self):
n_samples = 10
with tempfile.TemporaryDirectory() as tmp_dir:
# Use \n for newline. Windows automatically adds the \r when writing the file
# see https://docs.python.org/3/library/os.html#os.linesep
open(os.path.join(tmp_dir, "text.txt"), "w", encoding="utf-8").write(
"\n".join("foo" for _ in range(n_samples))
)
ds = load_dataset("text", data_files=os.path.join(tmp_dir, "text.txt"), cache_dir=tmp_dir, split="train")
data_file = ds.cache_files[0]
fingerprint = ds._fingerprint
self.assertEqual(len(ds), n_samples)
del ds
ds = load_dataset("text", data_files=os.path.join(tmp_dir, "text.txt"), cache_dir=tmp_dir, split="train")
self.assertEqual(ds.cache_files[0], data_file)
self.assertEqual(ds._fingerprint, fingerprint)
del ds
open(os.path.join(tmp_dir, "text.txt"), "w", encoding="utf-8").write(
"\n".join("bar" for _ in range(n_samples))
)
ds = load_dataset("text", data_files=os.path.join(tmp_dir, "text.txt"), cache_dir=tmp_dir, split="train")
self.assertNotEqual(ds.cache_files[0], data_file)
self.assertNotEqual(ds._fingerprint, fingerprint)
self.assertEqual(len(ds), n_samples)
del ds
class CsvTest(TestCase):
def test_caching(self):
n_rows = 10
features = Features({"foo": Value("string"), "bar": Value("string")})
with tempfile.TemporaryDirectory() as tmp_dir:
# Use \n for newline. Windows automatically adds the \r when writing the file
# see https://docs.python.org/3/library/os.html#os.linesep
open(os.path.join(tmp_dir, "table.csv"), "w", encoding="utf-8").write(
"\n".join(",".join(["foo", "bar"]) for _ in range(n_rows + 1))
)
ds = load_dataset("csv", data_files=os.path.join(tmp_dir, "table.csv"), cache_dir=tmp_dir, split="train")
data_file = ds.cache_files[0]
fingerprint = ds._fingerprint
self.assertEqual(len(ds), n_rows)
del ds
ds = load_dataset("csv", data_files=os.path.join(tmp_dir, "table.csv"), cache_dir=tmp_dir, split="train")
self.assertEqual(ds.cache_files[0], data_file)
self.assertEqual(ds._fingerprint, fingerprint)
del ds
ds = load_dataset(
"csv",
data_files=os.path.join(tmp_dir, "table.csv"),
cache_dir=tmp_dir,
split="train",
features=features,
)
self.assertNotEqual(ds.cache_files[0], data_file)
self.assertNotEqual(ds._fingerprint, fingerprint)
del ds
open(os.path.join(tmp_dir, "table.csv"), "w", encoding="utf-8").write(
"\n".join(",".join(["Foo", "Bar"]) for _ in range(n_rows + 1))
)
ds = load_dataset("csv", data_files=os.path.join(tmp_dir, "table.csv"), cache_dir=tmp_dir, split="train")
self.assertNotEqual(ds.cache_files[0], data_file)
self.assertNotEqual(ds._fingerprint, fingerprint)
self.assertEqual(len(ds), n_rows)
del ds
def test_sep(self):
n_rows = 10
n_cols = 3
with tempfile.TemporaryDirectory() as tmp_dir:
open(os.path.join(tmp_dir, "table_comma.csv"), "w", encoding="utf-8").write(
"\n".join(",".join([str(i) for i in range(n_cols)]) for _ in range(n_rows + 1))
)
open(os.path.join(tmp_dir, "table_tab.csv"), "w", encoding="utf-8").write(
"\n".join("\t".join([str(i) for i in range(n_cols)]) for _ in range(n_rows + 1))
)
ds = load_dataset(
"csv",
data_files=os.path.join(tmp_dir, "table_comma.csv"),
cache_dir=tmp_dir,
split="train",
sep=",",
)
self.assertEqual(len(ds), n_rows)
self.assertEqual(len(ds.column_names), n_cols)
del ds
ds = load_dataset(
"csv",
data_files=os.path.join(tmp_dir, "table_tab.csv"),
cache_dir=tmp_dir,
split="train",
sep="\t",
)
self.assertEqual(len(ds), n_rows)
self.assertEqual(len(ds.column_names), n_cols)
del ds
ds = load_dataset(
"csv",
data_files=os.path.join(tmp_dir, "table_comma.csv"),
cache_dir=tmp_dir,
split="train",
sep="\t",
)
self.assertEqual(len(ds), n_rows)
self.assertEqual(len(ds.column_names), 1)
del ds
def test_features(self):
n_rows = 10
n_cols = 3
def get_features(type):
return Features({str(i): Value(type) for i in range(n_cols)})
with tempfile.TemporaryDirectory() as tmp_dir:
open(os.path.join(tmp_dir, "table.csv"), "w", encoding="utf-8").write(
"\n".join(",".join([str(i) for i in range(n_cols)]) for _ in range(n_rows + 1))
)
for type in ["float64", "int8"]:
features = get_features(type)
ds = load_dataset(
"csv",
data_files=os.path.join(tmp_dir, "table.csv"),
cache_dir=tmp_dir,
split="train",
features=features,
)
self.assertEqual(len(ds), n_rows)
self.assertDictEqual(ds.features, features)
del ds
| true
| true
|
1c3ff83f7aed1ddf9f7b04a5468bb69fb1e9f9d7
| 4,389
|
py
|
Python
|
chia/wallet/transaction_record.py
|
Hydrangea-Network/hydrangea-blockchain-1.3.0
|
241381a87366602ab35136c49536ff6e3e84c3da
|
[
"Apache-2.0"
] | 1
|
2022-03-20T14:52:23.000Z
|
2022-03-20T14:52:23.000Z
|
chia/wallet/transaction_record.py
|
Hydrangea-Network/hydrangea-blockchain-1.3.0
|
241381a87366602ab35136c49536ff6e3e84c3da
|
[
"Apache-2.0"
] | null | null | null |
chia/wallet/transaction_record.py
|
Hydrangea-Network/hydrangea-blockchain-1.3.0
|
241381a87366602ab35136c49536ff6e3e84c3da
|
[
"Apache-2.0"
] | 1
|
2022-03-20T14:51:39.000Z
|
2022-03-20T14:51:39.000Z
|
from dataclasses import dataclass
from typing import List, Optional, Tuple, Dict
from chia.consensus.coinbase import pool_parent_id, farmer_parent_id, timelord_parent_id
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.spend_bundle import SpendBundle
from chia.util.bech32m import encode_puzzle_hash, decode_puzzle_hash
from chia.util.ints import uint8, uint32, uint64
from chia.util.streamable import Streamable, streamable
from chia.wallet.util.transaction_type import TransactionType
@dataclass(frozen=True)
@streamable
class TransactionRecord(Streamable):
"""
Used for storing transaction data and status in wallets.
"""
confirmed_at_height: uint32
created_at_time: uint64
to_puzzle_hash: bytes32
amount: uint64
fee_amount: uint64
confirmed: bool
sent: uint32
spend_bundle: Optional[SpendBundle]
additions: List[Coin]
removals: List[Coin]
wallet_id: uint32
# Represents the list of peers that we sent the transaction to, whether each one
# included it in the mempool, and what the error message (if any) was
sent_to: List[Tuple[str, uint8, Optional[str]]]
trade_id: Optional[bytes32]
type: uint32 # TransactionType
name: bytes32
memos: List[Tuple[bytes32, List[bytes]]]
def is_in_mempool(self) -> bool:
# If one of the nodes we sent it to responded with success, we set it to success
for (_, mis, _) in self.sent_to:
if MempoolInclusionStatus(mis) == MempoolInclusionStatus.SUCCESS:
return True
# Note, transactions pending inclusion (pending) return false
return False
def height_farmed(self, genesis_challenge: bytes32) -> Optional[uint32]:
if not self.confirmed:
return None
if self.type == TransactionType.FEE_REWARD or self.type == TransactionType.COINBASE_REWARD:
for block_index in range(self.confirmed_at_height, self.confirmed_at_height - 100, -1):
if block_index < 0:
return None
timelord_parent = timelord_parent_id(uint32(block_index), genesis_challenge)
pool_parent = pool_parent_id(uint32(block_index), genesis_challenge)
farmer_parent = farmer_parent_id(uint32(block_index), genesis_challenge)
if timelord_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
if pool_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
if farmer_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
return None
def get_memos(self) -> Dict[bytes32, List[bytes]]:
return {coin_id: ms for coin_id, ms in self.memos}
@classmethod
def from_json_dict_convenience(cls, modified_tx_input: Dict):
modified_tx = modified_tx_input.copy()
if "to_address" in modified_tx:
modified_tx["to_puzzle_hash"] = decode_puzzle_hash(modified_tx["to_address"]).hex()
if "to_address" in modified_tx:
del modified_tx["to_address"]
# Converts memos from a flat dict into a nested list
memos_dict: Dict[str, List[str]] = {}
memos_list: List = []
if "memos" in modified_tx:
for coin_id, memo in modified_tx["memos"].items():
if coin_id not in memos_dict:
memos_dict[coin_id] = []
memos_dict[coin_id].append(memo)
for coin_id, memos in memos_dict.items():
memos_list.append((coin_id, memos))
modified_tx["memos"] = memos_list
return cls.from_json_dict(modified_tx)
def to_json_dict_convenience(self, config: Dict) -> Dict:
selected = config["selected_network"]
prefix = config["network_overrides"]["config"][selected]["address_prefix"]
formatted = self.to_json_dict()
formatted["to_address"] = encode_puzzle_hash(self.to_puzzle_hash, prefix)
formatted["memos"] = {
coin_id.hex(): memo.hex()
for coin_id, memos in self.get_memos().items()
for memo in memos
if memo is not None
}
return formatted
| 42.61165
| 99
| 0.673046
|
from dataclasses import dataclass
from typing import List, Optional, Tuple, Dict
from chia.consensus.coinbase import pool_parent_id, farmer_parent_id, timelord_parent_id
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.mempool_inclusion_status import MempoolInclusionStatus
from chia.types.spend_bundle import SpendBundle
from chia.util.bech32m import encode_puzzle_hash, decode_puzzle_hash
from chia.util.ints import uint8, uint32, uint64
from chia.util.streamable import Streamable, streamable
from chia.wallet.util.transaction_type import TransactionType
@dataclass(frozen=True)
@streamable
class TransactionRecord(Streamable):
confirmed_at_height: uint32
created_at_time: uint64
to_puzzle_hash: bytes32
amount: uint64
fee_amount: uint64
confirmed: bool
sent: uint32
spend_bundle: Optional[SpendBundle]
additions: List[Coin]
removals: List[Coin]
wallet_id: uint32
sent_to: List[Tuple[str, uint8, Optional[str]]]
trade_id: Optional[bytes32]
type: uint32
name: bytes32
memos: List[Tuple[bytes32, List[bytes]]]
def is_in_mempool(self) -> bool:
for (_, mis, _) in self.sent_to:
if MempoolInclusionStatus(mis) == MempoolInclusionStatus.SUCCESS:
return True
return False
def height_farmed(self, genesis_challenge: bytes32) -> Optional[uint32]:
if not self.confirmed:
return None
if self.type == TransactionType.FEE_REWARD or self.type == TransactionType.COINBASE_REWARD:
for block_index in range(self.confirmed_at_height, self.confirmed_at_height - 100, -1):
if block_index < 0:
return None
timelord_parent = timelord_parent_id(uint32(block_index), genesis_challenge)
pool_parent = pool_parent_id(uint32(block_index), genesis_challenge)
farmer_parent = farmer_parent_id(uint32(block_index), genesis_challenge)
if timelord_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
if pool_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
if farmer_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
return None
def get_memos(self) -> Dict[bytes32, List[bytes]]:
return {coin_id: ms for coin_id, ms in self.memos}
@classmethod
def from_json_dict_convenience(cls, modified_tx_input: Dict):
modified_tx = modified_tx_input.copy()
if "to_address" in modified_tx:
modified_tx["to_puzzle_hash"] = decode_puzzle_hash(modified_tx["to_address"]).hex()
if "to_address" in modified_tx:
del modified_tx["to_address"]
memos_dict: Dict[str, List[str]] = {}
memos_list: List = []
if "memos" in modified_tx:
for coin_id, memo in modified_tx["memos"].items():
if coin_id not in memos_dict:
memos_dict[coin_id] = []
memos_dict[coin_id].append(memo)
for coin_id, memos in memos_dict.items():
memos_list.append((coin_id, memos))
modified_tx["memos"] = memos_list
return cls.from_json_dict(modified_tx)
def to_json_dict_convenience(self, config: Dict) -> Dict:
selected = config["selected_network"]
prefix = config["network_overrides"]["config"][selected]["address_prefix"]
formatted = self.to_json_dict()
formatted["to_address"] = encode_puzzle_hash(self.to_puzzle_hash, prefix)
formatted["memos"] = {
coin_id.hex(): memo.hex()
for coin_id, memos in self.get_memos().items()
for memo in memos
if memo is not None
}
return formatted
| true
| true
|
1c3ff86e371ed76b9e4a252ad92846827c7c3820
| 7,026
|
py
|
Python
|
venv/Lib/site-packages/sklearn/neural_network/_base.py
|
Jos33y/student-performance-knn
|
4e965434f52dd6a1380904aa257df1edfaebb3c4
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/sklearn/neural_network/_base.py
|
Jos33y/student-performance-knn
|
4e965434f52dd6a1380904aa257df1edfaebb3c4
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/sklearn/neural_network/_base.py
|
Jos33y/student-performance-knn
|
4e965434f52dd6a1380904aa257df1edfaebb3c4
|
[
"MIT"
] | null | null | null |
"""Utilities for the neural network modules
"""
# Author: Issam H. Laradji <issam.laradji@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy.special import expit as logistic_sigmoid
from scipy.special import xlogy
def identity(X):
"""Simply return the input array.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Same as the input data.
"""
return X
def logistic(X):
"""Compute the logistic function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
return logistic_sigmoid(X, out=X)
def tanh(X):
"""Compute the hyperbolic tan function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
return np.tanh(X, out=X)
def relu(X):
"""Compute the rectified linear unit function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
np.clip(X, 0, np.finfo(X.dtype).max, out=X)
return X
def softmax(X):
"""Compute the K-way softmax function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
tmp = X - X.max(axis=1)[:, np.newaxis]
np.exp(tmp, out=X)
X /= X.sum(axis=1)[:, np.newaxis]
return X
ACTIVATIONS = {'identity': identity, 'tanh': tanh, 'logistic': logistic,
'relu': relu, 'softmax': softmax}
def inplace_identity_derivative(Z, delta):
"""Apply the derivative of the identity function: do nothing.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the identity activation function during
the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
# Nothing to do
def inplace_logistic_derivative(Z, delta):
"""Apply the derivative of the logistic sigmoid function.
It exploits the fact that the derivative is a simple function of the output
value from logistic function.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the logistic activation function during
the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta *= Z
delta *= (1 - Z)
def inplace_tanh_derivative(Z, delta):
"""Apply the derivative of the hyperbolic tanh function.
It exploits the fact that the derivative is a simple function of the output
value from hyperbolic tangent.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the hyperbolic tangent activation
function during the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta *= (1 - Z ** 2)
def inplace_relu_derivative(Z, delta):
"""Apply the derivative of the relu function.
It exploits the fact that the derivative is a simple function of the output
value from rectified linear units activation function.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the rectified linear units activation
function during the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta[Z == 0] = 0
DERIVATIVES = {'identity': inplace_identity_derivative,
'tanh': inplace_tanh_derivative,
'logistic': inplace_logistic_derivative,
'relu': inplace_relu_derivative}
def squared_loss(y_true, y_pred):
"""Compute the squared loss for regression.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) values.
y_pred : array-like or label indicator matrix
Predicted values, as returned by a regression estimator.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
return ((y_true - y_pred) ** 2).mean() / 2
def log_loss(y_true, y_prob):
"""Compute Logistic loss for classification.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
if y_true.shape[1] == 1:
y_true = np.append(1 - y_true, y_true, axis=1)
return - xlogy(y_true, y_prob).sum() / y_prob.shape[0]
def binary_log_loss(y_true, y_prob):
"""Compute binary logistic loss for classification.
This is identical to log_loss in binary classification case,
but is kept for its use in multilabel case.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
return -(xlogy(y_true, y_prob) +
xlogy(1 - y_true, 1 - y_prob)).sum() / y_prob.shape[0]
LOSS_FUNCTIONS = {'squared_loss': squared_loss, 'log_loss': log_loss,
'binary_log_loss': binary_log_loss}
| 28.104
| 80
| 0.615571
|
import numpy as np
from scipy.special import expit as logistic_sigmoid
from scipy.special import xlogy
def identity(X):
return X
def logistic(X):
return logistic_sigmoid(X, out=X)
def tanh(X):
return np.tanh(X, out=X)
def relu(X):
np.clip(X, 0, np.finfo(X.dtype).max, out=X)
return X
def softmax(X):
tmp = X - X.max(axis=1)[:, np.newaxis]
np.exp(tmp, out=X)
X /= X.sum(axis=1)[:, np.newaxis]
return X
ACTIVATIONS = {'identity': identity, 'tanh': tanh, 'logistic': logistic,
'relu': relu, 'softmax': softmax}
def inplace_identity_derivative(Z, delta):
def inplace_logistic_derivative(Z, delta):
delta *= Z
delta *= (1 - Z)
def inplace_tanh_derivative(Z, delta):
delta *= (1 - Z ** 2)
def inplace_relu_derivative(Z, delta):
delta[Z == 0] = 0
DERIVATIVES = {'identity': inplace_identity_derivative,
'tanh': inplace_tanh_derivative,
'logistic': inplace_logistic_derivative,
'relu': inplace_relu_derivative}
def squared_loss(y_true, y_pred):
return ((y_true - y_pred) ** 2).mean() / 2
def log_loss(y_true, y_prob):
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
if y_true.shape[1] == 1:
y_true = np.append(1 - y_true, y_true, axis=1)
return - xlogy(y_true, y_prob).sum() / y_prob.shape[0]
def binary_log_loss(y_true, y_prob):
return -(xlogy(y_true, y_prob) +
xlogy(1 - y_true, 1 - y_prob)).sum() / y_prob.shape[0]
LOSS_FUNCTIONS = {'squared_loss': squared_loss, 'log_loss': log_loss,
'binary_log_loss': binary_log_loss}
| true
| true
|
1c3ff8ac816503ffde423cc95cb5ce90b8da1b2e
| 3,424
|
py
|
Python
|
ptm/lda_gibbs.py
|
YLTsai0609/python-topic-modeling
|
13f6e22d31ebc581cc1bd68e1b05ec560020248d
|
[
"Apache-2.0"
] | null | null | null |
ptm/lda_gibbs.py
|
YLTsai0609/python-topic-modeling
|
13f6e22d31ebc581cc1bd68e1b05ec560020248d
|
[
"Apache-2.0"
] | null | null | null |
ptm/lda_gibbs.py
|
YLTsai0609/python-topic-modeling
|
13f6e22d31ebc581cc1bd68e1b05ec560020248d
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import time
import numpy as np
from scipy.special import gammaln
from six.moves import xrange
from .base import BaseGibbsParamTopicModel
from .formatted_logger import formatted_logger
from .utils import sampling_from_dist
logger = formatted_logger("GibbsLDA")
class GibbsLDA(BaseGibbsParamTopicModel):
"""
Latent dirichlet allocation,
Blei, David M and Ng, Andrew Y and Jordan, Michael I, 2003
Latent Dirichlet allocation with collapsed Gibbs sampling
Attributes
----------
topic_assignment:
list of topic assignment for each word token
"""
def __init__(self, n_doc, n_voca, n_topic, alpha=0.1, beta=0.01, **kwargs):
super(GibbsLDA, self).__init__(
n_doc=n_doc,
n_voca=n_voca,
n_topic=n_topic,
alpha=alpha,
beta=beta,
**kwargs
)
def random_init(self, docs):
"""
Parameters
----------
docs: list, size=n_doc
"""
for di in range(len(docs)):
doc = docs[di]
topics = np.random.randint(self.n_topic, size=len(doc))
self.topic_assignment.append(topics)
for wi in range(len(doc)):
topic = topics[wi]
word = doc[wi]
self.TW[topic, word] += 1
self.sum_T[topic] += 1
self.DT[di, topic] += 1
def fit(self, docs, max_iter=100):
""" Gibbs sampling for LDA
Parameters
----------
docs
max_iter: int
maximum number of Gibbs sampling iteration
"""
self.random_init(docs)
for iteration in xrange(max_iter):
prev = time.clock()
for di in xrange(len(docs)):
doc = docs[di]
for wi in xrange(len(doc)):
word = doc[wi]
old_topic = self.topic_assignment[di][wi]
self.TW[old_topic, word] -= 1
self.sum_T[old_topic] -= 1
self.DT[di, old_topic] -= 1
# compute conditional probability of a topic of current word wi
prob = (self.TW[:, word] / self.sum_T) * (self.DT[di, :])
new_topic = sampling_from_dist(prob)
self.topic_assignment[di][wi] = new_topic
self.TW[new_topic, word] += 1
self.sum_T[new_topic] += 1
self.DT[di, new_topic] += 1
if self.verbose:
logger.info(
"[ITER] %d,\telapsed time:%.2f,\tlog_likelihood:%.2f",
iteration,
time.clock() - prev,
self.log_likelihood(docs),
)
def log_likelihood(self, docs):
"""
likelihood function
"""
ll = len(docs) * gammaln(self.alpha * self.n_topic)
ll -= len(docs) * self.n_topic * gammaln(self.alpha)
ll += self.n_topic * gammaln(self.beta * self.n_voca)
ll -= self.n_topic * self.n_voca * gammaln(self.beta)
for di in xrange(len(docs)):
ll += gammaln(self.DT[di, :]).sum() - gammaln(self.DT[di, :].sum())
for ki in xrange(self.n_topic):
ll += gammaln(self.TW[ki, :]).sum() - gammaln(self.TW[ki, :].sum())
return ll
| 29.016949
| 83
| 0.521904
|
from __future__ import print_function
import time
import numpy as np
from scipy.special import gammaln
from six.moves import xrange
from .base import BaseGibbsParamTopicModel
from .formatted_logger import formatted_logger
from .utils import sampling_from_dist
logger = formatted_logger("GibbsLDA")
class GibbsLDA(BaseGibbsParamTopicModel):
def __init__(self, n_doc, n_voca, n_topic, alpha=0.1, beta=0.01, **kwargs):
super(GibbsLDA, self).__init__(
n_doc=n_doc,
n_voca=n_voca,
n_topic=n_topic,
alpha=alpha,
beta=beta,
**kwargs
)
def random_init(self, docs):
for di in range(len(docs)):
doc = docs[di]
topics = np.random.randint(self.n_topic, size=len(doc))
self.topic_assignment.append(topics)
for wi in range(len(doc)):
topic = topics[wi]
word = doc[wi]
self.TW[topic, word] += 1
self.sum_T[topic] += 1
self.DT[di, topic] += 1
def fit(self, docs, max_iter=100):
self.random_init(docs)
for iteration in xrange(max_iter):
prev = time.clock()
for di in xrange(len(docs)):
doc = docs[di]
for wi in xrange(len(doc)):
word = doc[wi]
old_topic = self.topic_assignment[di][wi]
self.TW[old_topic, word] -= 1
self.sum_T[old_topic] -= 1
self.DT[di, old_topic] -= 1
prob = (self.TW[:, word] / self.sum_T) * (self.DT[di, :])
new_topic = sampling_from_dist(prob)
self.topic_assignment[di][wi] = new_topic
self.TW[new_topic, word] += 1
self.sum_T[new_topic] += 1
self.DT[di, new_topic] += 1
if self.verbose:
logger.info(
"[ITER] %d,\telapsed time:%.2f,\tlog_likelihood:%.2f",
iteration,
time.clock() - prev,
self.log_likelihood(docs),
)
def log_likelihood(self, docs):
ll = len(docs) * gammaln(self.alpha * self.n_topic)
ll -= len(docs) * self.n_topic * gammaln(self.alpha)
ll += self.n_topic * gammaln(self.beta * self.n_voca)
ll -= self.n_topic * self.n_voca * gammaln(self.beta)
for di in xrange(len(docs)):
ll += gammaln(self.DT[di, :]).sum() - gammaln(self.DT[di, :].sum())
for ki in xrange(self.n_topic):
ll += gammaln(self.TW[ki, :]).sum() - gammaln(self.TW[ki, :].sum())
return ll
| true
| true
|
1c3ff995ba9abf5e372fb993b2378255bd9a7db1
| 4,026
|
py
|
Python
|
model/resnet.py
|
RKorzeniowski/Cutout
|
932a612d80071dd378c568a1633c711690de8608
|
[
"ECL-2.0"
] | 460
|
2017-11-30T14:36:51.000Z
|
2022-03-26T10:24:07.000Z
|
model/resnet.py
|
RKorzeniowski/Cutout
|
932a612d80071dd378c568a1633c711690de8608
|
[
"ECL-2.0"
] | 15
|
2018-06-12T13:42:36.000Z
|
2022-02-18T01:57:52.000Z
|
model/resnet.py
|
RKorzeniowski/Cutout
|
932a612d80071dd378c568a1633c711690de8608
|
[
"ECL-2.0"
] | 150
|
2018-01-22T14:35:16.000Z
|
2022-03-30T18:42:10.000Z
|
'''ResNet18/34/50/101/152 in Pytorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = conv3x3(3,64)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18(num_classes=10):
return ResNet(BasicBlock, [2,2,2,2], num_classes)
def ResNet34(num_classes=10):
return ResNet(BasicBlock, [3,4,6,3], num_classes)
def ResNet50(num_classes=10):
return ResNet(Bottleneck, [3,4,6,3], num_classes)
def ResNet101(num_classes=10):
return ResNet(Bottleneck, [3,4,23,3], num_classes)
def ResNet152(num_classes=10):
return ResNet(Bottleneck, [3,8,36,3], num_classes)
def test_resnet():
net = ResNet50()
y = net(Variable(torch.randn(1,3,32,32)))
print(y.size())
# test_resnet()
| 33.831933
| 102
| 0.631148
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = conv3x3(3,64)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18(num_classes=10):
return ResNet(BasicBlock, [2,2,2,2], num_classes)
def ResNet34(num_classes=10):
return ResNet(BasicBlock, [3,4,6,3], num_classes)
def ResNet50(num_classes=10):
return ResNet(Bottleneck, [3,4,6,3], num_classes)
def ResNet101(num_classes=10):
return ResNet(Bottleneck, [3,4,23,3], num_classes)
def ResNet152(num_classes=10):
return ResNet(Bottleneck, [3,8,36,3], num_classes)
def test_resnet():
net = ResNet50()
y = net(Variable(torch.randn(1,3,32,32)))
print(y.size())
| true
| true
|
1c3ffa41c6f3e5bfc799516f57d14fa6f67806a9
| 4,315
|
py
|
Python
|
vae.py
|
MarkintoshZ/FontTransformer
|
5051db0d38a4b8ae7602fb22c75c008f9f59d2d1
|
[
"MIT"
] | null | null | null |
vae.py
|
MarkintoshZ/FontTransformer
|
5051db0d38a4b8ae7602fb22c75c008f9f59d2d1
|
[
"MIT"
] | null | null | null |
vae.py
|
MarkintoshZ/FontTransformer
|
5051db0d38a4b8ae7602fb22c75c008f9f59d2d1
|
[
"MIT"
] | null | null | null |
from keras.layers import Dense, Conv2D, Deconvolution2D, \
MaxPool2D, UpSampling2D, Flatten, Dropout, Reshape,\
Concatenate, Lambda
from keras.models import Sequential, Model, load_model, Input
from keras.losses import mse, categorical_crossentropy
from keras.utils import to_categorical
from keras import backend as K
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
import pandas as pd
import numpy as np
from PIL import Image
import os
import cv2
X = []
y_label = []
for path in os.listdir('./datasets'):
print(path)
if path == '.DS_Store':
continue
for image_path in os.listdir('./datasets/' + path):
try:
image = Image.open(os.path.join('./datasets/' + path, image_path))
except OSError:
continue
data = np.asarray(image.convert('L'))
data = data / 255
data = np.clip(data, 0, 1)
assert(data.max() <= 1)
assert(data.min() >= 0)
X.append(data)
y_label.append(image_path[0])
X = np.array(X).reshape(-1, 40, 24, 1)
lb = LabelEncoder()
y_label_transformed = lb.fit_transform(y_label)
y = to_categorical(y_label_transformed)
# reparameterization trick
# instead of sampling from Q(z|X), sample epsilon = N(0,I)
# z = z_mean + sqrt(var) * epsilon
def sampling(args):
"""Reparameterization trick by sampling from an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
# encoder
input_img = Input(shape=(40, 24, 1))
x = Conv2D(16, 3, activation='selu', padding='same')(input_img)
x = Conv2D(16, 3, activation='selu', padding='same')(x)
# x = MaxPool2D(2, padding='same')(x)
x = Conv2D(16, 3, activation='selu', padding='same')(x)
# x = MaxPool2D(2, padding='same')(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='selu')(x)
x = Dense(256, activation='selu')(x)
encoded = Dense(26, activation='softmax')(x)
z_mean = Dense(16, name='z_mean')(x)
z_log_var = Dense(16, name='z_log_var')(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(16,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(input_img, [encoded, z_mean, z_log_var, z], name='encoder')
encoder.summary()
# decoder
alphabet_inputs = Input(shape=(26,))
latent_inputs = Input(shape=(16,), name='z_sampling')
x = Concatenate()([alphabet_inputs, latent_inputs])
x = Dense(256, activation='selu')(x)
x = Dense(512, activation='selu')(x)
x = Dense(15360)(x)
x = Reshape((40, 24, 16))(x)
# x = UpSampling2D(2)(x)
x = Conv2D(16, 2, activation='selu', padding='same')(x)
# x = UpSampling2D(2)(x)
x = Conv2D(16, 2, activation='selu', padding='same')(x)
x = Conv2D(16, 2, activation='selu', padding='same')(x)
decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x)
# instantiate decoder model
decoder = Model([alphabet_inputs, latent_inputs], decoded, name='decoder')
decoder.summary()
# instantiate VAE model
encoder_out = encoder(input_img)
outputs = decoder([encoder_out[0], encoder_out[3]])
vae = Model(input_img, [encoder_out[0], outputs], name='vae_mlp')
def custom_loss(y_true, y_pred):
reconstruction_loss = mse(input_img, y_pred[1])
reconstruction_loss *= 960
reconstruction_loss = K.sum(reconstruction_loss, axis=-1)
classification_loss = categorical_crossentropy(y_true, y_pred[0])
classification_loss = K.sum(classification_loss, axis=-1)
kl_loss = 1 + K.mean(z_log_var) - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss + classification_loss)
return vae_loss
# vae.add_loss(vae_loss)
vae.compile(optimizer='adam', loss=custom_loss)
vae.summary()
vae.fit(X, [y, X], batch_size=32, epochs=100, shuffle=True)
vae.save('vae.h5')
| 33.192308
| 78
| 0.689455
|
from keras.layers import Dense, Conv2D, Deconvolution2D, \
MaxPool2D, UpSampling2D, Flatten, Dropout, Reshape,\
Concatenate, Lambda
from keras.models import Sequential, Model, load_model, Input
from keras.losses import mse, categorical_crossentropy
from keras.utils import to_categorical
from keras import backend as K
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
import pandas as pd
import numpy as np
from PIL import Image
import os
import cv2
X = []
y_label = []
for path in os.listdir('./datasets'):
print(path)
if path == '.DS_Store':
continue
for image_path in os.listdir('./datasets/' + path):
try:
image = Image.open(os.path.join('./datasets/' + path, image_path))
except OSError:
continue
data = np.asarray(image.convert('L'))
data = data / 255
data = np.clip(data, 0, 1)
assert(data.max() <= 1)
assert(data.min() >= 0)
X.append(data)
y_label.append(image_path[0])
X = np.array(X).reshape(-1, 40, 24, 1)
lb = LabelEncoder()
y_label_transformed = lb.fit_transform(y_label)
y = to_categorical(y_label_transformed)
def sampling(args):
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
input_img = Input(shape=(40, 24, 1))
x = Conv2D(16, 3, activation='selu', padding='same')(input_img)
x = Conv2D(16, 3, activation='selu', padding='same')(x)
x = Conv2D(16, 3, activation='selu', padding='same')(x)
x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='selu')(x)
x = Dense(256, activation='selu')(x)
encoded = Dense(26, activation='softmax')(x)
z_mean = Dense(16, name='z_mean')(x)
z_log_var = Dense(16, name='z_log_var')(x)
z = Lambda(sampling, output_shape=(16,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(input_img, [encoded, z_mean, z_log_var, z], name='encoder')
encoder.summary()
# decoder
alphabet_inputs = Input(shape=(26,))
latent_inputs = Input(shape=(16,), name='z_sampling')
x = Concatenate()([alphabet_inputs, latent_inputs])
x = Dense(256, activation='selu')(x)
x = Dense(512, activation='selu')(x)
x = Dense(15360)(x)
x = Reshape((40, 24, 16))(x)
# x = UpSampling2D(2)(x)
x = Conv2D(16, 2, activation='selu', padding='same')(x)
# x = UpSampling2D(2)(x)
x = Conv2D(16, 2, activation='selu', padding='same')(x)
x = Conv2D(16, 2, activation='selu', padding='same')(x)
decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x)
# instantiate decoder model
decoder = Model([alphabet_inputs, latent_inputs], decoded, name='decoder')
decoder.summary()
# instantiate VAE model
encoder_out = encoder(input_img)
outputs = decoder([encoder_out[0], encoder_out[3]])
vae = Model(input_img, [encoder_out[0], outputs], name='vae_mlp')
def custom_loss(y_true, y_pred):
reconstruction_loss = mse(input_img, y_pred[1])
reconstruction_loss *= 960
reconstruction_loss = K.sum(reconstruction_loss, axis=-1)
classification_loss = categorical_crossentropy(y_true, y_pred[0])
classification_loss = K.sum(classification_loss, axis=-1)
kl_loss = 1 + K.mean(z_log_var) - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss + classification_loss)
return vae_loss
# vae.add_loss(vae_loss)
vae.compile(optimizer='adam', loss=custom_loss)
vae.summary()
vae.fit(X, [y, X], batch_size=32, epochs=100, shuffle=True)
vae.save('vae.h5')
| true
| true
|
1c3ffaec1a29ef25b021b63a4fcd78bed2b78692
| 3,785
|
py
|
Python
|
thorpy/miscgui/functions.py
|
YannThorimbert/Torus
|
367b1de7b6a2751a8dd5ecbfa8946bbd6bf2e580
|
[
"MIT"
] | 2
|
2019-05-29T16:17:29.000Z
|
2021-08-15T23:41:52.000Z
|
thorpy/miscgui/functions.py
|
YannThorimbert/Torus
|
367b1de7b6a2751a8dd5ecbfa8946bbd6bf2e580
|
[
"MIT"
] | null | null | null |
thorpy/miscgui/functions.py
|
YannThorimbert/Torus
|
367b1de7b6a2751a8dd5ecbfa8946bbd6bf2e580
|
[
"MIT"
] | 1
|
2019-04-22T18:42:35.000Z
|
2019-04-22T18:42:35.000Z
|
import pygame.event as pygame_event
import pygame
from thorpy.miscgui import constants, application, parameters
def obtain_valid_painter(painter_class, **kwargs):
"""Returns a valid painter whose class is <painter_class>. You can try any
argument you want ; only arguments existing in painter's __init__ method
will be used.
"""
try:
painter = painter_class(**kwargs)
except TypeError:
painter = painter_class()
args_okay = {}
for arg in kwargs:
if hasattr(painter, arg):
args_okay[arg] = kwargs[arg]
painter = painter_class(**args_okay)
return painter
def keypress(element, newstate):
"""Make <element> goes in state <newstate>, refreshing the display."""
element.change_state(newstate)
element.unblit()
element.blit()
element.update()
def quit_func():
"""Post quit event."""
pygame_event.post(constants.EVENT_QUIT)
def set_current_menu(menu):
debug_msg("Set current menu: ", menu)
application._OLD_MENUS.append(application._CURRENT_MENU)
application._CURRENT_MENU = menu
def quit_menu_func():
"""Leaves the current menu and set the new one as the previous one."""
debug_msg("Quit menu func", application._CURRENT_MENU)
application._CURRENT_MENU.set_leave()
application._CURRENT_MENU = application._OLD_MENUS.pop()
def add_element_to_current_menu(element):
debug_msg("add element to current menu: " + element.get_text())
application._CURRENT_MENU.add_to_population(element)
def get_current_menu():
return application._CURRENT_MENU
def get_current_application():
return application._CURRENT_APPLICATION
def get_screen():
return application._SCREEN
def get_screen_size():
return get_current_application().size
def refresh_current_menu():
"""Refreshes the current menu events. Use it to include newly added
elements. Returns True if a menu has been refreshed, else returns False.
"""
debug_msg("Refreshing current menu.")
current_menu = get_current_menu()
if current_menu:
current_menu.refresh()
return True
else:
return False
def debug_msg(*content):
if application.DEBUG_MODE:
str_content = list()
for e in content:
str_content.append(str(e) + " ")
print("THOR DEBUG : " + ''.join(str_content))
def info_msg(*content):
if application.DEBUG_MODE:
str_content = list()
for e in content:
str_content.append(str(e) + " ")
print("THOR INFO : " + ''.join(str_content))
def get_fps():
return application._CURRENT_MENU.clock.get_fps()
def remove_element(element):
removed = False
current_menu = get_current_menu()
if current_menu:
for e in current_menu.get_population():
if element in e.get_elements():
e.remove_elements([element])
removed = True
if element in current_menu.get_population():
population = get_current_menu.get_population()
population.remove(element)
removed = True
else:
debug_msg("Could not remove element", element, " since there is no\
current menu.")
if removed:
refresh_current_menu()
def get_default_font_infos():
from thorpy.painting.writer import get_font_name
from thorpy.miscgui import style
return {"name":get_font_name(None), "size":style.FONT_SIZE}
def writing(delay=30,interval=100,interval_pygame=500):
parameters.KEY_DELAY = delay
parameters.KEY_INTERVAL = interval
pygame.key.set_repeat(delay,interval_pygame)
def playing(delay,interval):
parameters.KEY_DELAY = delay
parameters.KEY_INTERVAL = interval
pygame.key.set_repeat(delay,interval)
| 31.280992
| 78
| 0.684808
|
import pygame.event as pygame_event
import pygame
from thorpy.miscgui import constants, application, parameters
def obtain_valid_painter(painter_class, **kwargs):
try:
painter = painter_class(**kwargs)
except TypeError:
painter = painter_class()
args_okay = {}
for arg in kwargs:
if hasattr(painter, arg):
args_okay[arg] = kwargs[arg]
painter = painter_class(**args_okay)
return painter
def keypress(element, newstate):
element.change_state(newstate)
element.unblit()
element.blit()
element.update()
def quit_func():
pygame_event.post(constants.EVENT_QUIT)
def set_current_menu(menu):
debug_msg("Set current menu: ", menu)
application._OLD_MENUS.append(application._CURRENT_MENU)
application._CURRENT_MENU = menu
def quit_menu_func():
debug_msg("Quit menu func", application._CURRENT_MENU)
application._CURRENT_MENU.set_leave()
application._CURRENT_MENU = application._OLD_MENUS.pop()
def add_element_to_current_menu(element):
debug_msg("add element to current menu: " + element.get_text())
application._CURRENT_MENU.add_to_population(element)
def get_current_menu():
return application._CURRENT_MENU
def get_current_application():
return application._CURRENT_APPLICATION
def get_screen():
return application._SCREEN
def get_screen_size():
return get_current_application().size
def refresh_current_menu():
debug_msg("Refreshing current menu.")
current_menu = get_current_menu()
if current_menu:
current_menu.refresh()
return True
else:
return False
def debug_msg(*content):
if application.DEBUG_MODE:
str_content = list()
for e in content:
str_content.append(str(e) + " ")
print("THOR DEBUG : " + ''.join(str_content))
def info_msg(*content):
if application.DEBUG_MODE:
str_content = list()
for e in content:
str_content.append(str(e) + " ")
print("THOR INFO : " + ''.join(str_content))
def get_fps():
return application._CURRENT_MENU.clock.get_fps()
def remove_element(element):
removed = False
current_menu = get_current_menu()
if current_menu:
for e in current_menu.get_population():
if element in e.get_elements():
e.remove_elements([element])
removed = True
if element in current_menu.get_population():
population = get_current_menu.get_population()
population.remove(element)
removed = True
else:
debug_msg("Could not remove element", element, " since there is no\
current menu.")
if removed:
refresh_current_menu()
def get_default_font_infos():
from thorpy.painting.writer import get_font_name
from thorpy.miscgui import style
return {"name":get_font_name(None), "size":style.FONT_SIZE}
def writing(delay=30,interval=100,interval_pygame=500):
parameters.KEY_DELAY = delay
parameters.KEY_INTERVAL = interval
pygame.key.set_repeat(delay,interval_pygame)
def playing(delay,interval):
parameters.KEY_DELAY = delay
parameters.KEY_INTERVAL = interval
pygame.key.set_repeat(delay,interval)
| true
| true
|
1c3ffe0e7312e14c944b616c7e6c57c5b159664d
| 2,662
|
py
|
Python
|
mango/placedorder.py
|
dendisuhubdy/mango-explorer
|
d49294f2cd8d3521062e228013e240b193909ca8
|
[
"MIT"
] | null | null | null |
mango/placedorder.py
|
dendisuhubdy/mango-explorer
|
d49294f2cd8d3521062e228013e240b193909ca8
|
[
"MIT"
] | null | null | null |
mango/placedorder.py
|
dendisuhubdy/mango-explorer
|
d49294f2cd8d3521062e228013e240b193909ca8
|
[
"MIT"
] | null | null | null |
# # ⚠ Warning
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# [🥭 Mango Markets](https://mango.markets/) support is available at:
# [Docs](https://docs.mango.markets/)
# [Discord](https://discord.gg/67jySBhxrg)
# [Twitter](https://twitter.com/mangomarkets)
# [Github](https://github.com/blockworks-foundation)
# [Email](mailto:hello@blockworks.foundation)
import typing
from decimal import Decimal
from .orders import Side
# # 🥭 PlacedOrder tuple
#
# A `PlacedOrder` is a representation of all the data available from an Open Orders account pertaining to a
# particular order.
#
# The information is usually split across 3 collections - 'is bid', 'orders' and 'client ID's. That can be a
# little awkward to use, so this tuple packages it all together, per order.
#
class PlacedOrder(typing.NamedTuple):
id: int
client_id: int
side: Side
@staticmethod
def build_from_open_orders_data(free_slot_bits: Decimal, is_bid_bits: Decimal, order_ids: typing.Sequence[Decimal], client_order_ids: typing.Sequence[Decimal]) -> typing.Sequence["PlacedOrder"]:
int_free_slot_bits = int(free_slot_bits)
int_is_bid_bits = int(is_bid_bits)
placed_orders: typing.List[PlacedOrder] = []
for index in range(len(order_ids)):
if not (int_free_slot_bits & (1 << index)):
order_id = int(order_ids[index])
client_id = int(client_order_ids[index])
side = Side.BUY if int_is_bid_bits & (1 << index) else Side.SELL
placed_orders += [PlacedOrder(id=order_id, client_id=client_id, side=side)]
return placed_orders
def __repr__(self) -> str:
return f"{self}"
def __str__(self) -> str:
return f"« PlacedOrder {self.side} [{self.id}] {self.client_id} »"
# # 🥭 PlacedOrdersContainer protocol
#
# The `PlacedOrdersContainer` protocol exposes commonality between the regular Serum `OpenOrders` class and the
# internally-different `PerpOpenOrders` class. Both have their own `placed_orders` member, but are otherwise
# different enough that a common abstract base class would be a bit kludgy.
#
class PlacedOrdersContainer(typing.Protocol):
placed_orders: typing.Sequence[PlacedOrder]
| 40.953846
| 198
| 0.717506
|
import typing
from decimal import Decimal
from .orders import Side
rder(typing.NamedTuple):
id: int
client_id: int
side: Side
@staticmethod
def build_from_open_orders_data(free_slot_bits: Decimal, is_bid_bits: Decimal, order_ids: typing.Sequence[Decimal], client_order_ids: typing.Sequence[Decimal]) -> typing.Sequence["PlacedOrder"]:
int_free_slot_bits = int(free_slot_bits)
int_is_bid_bits = int(is_bid_bits)
placed_orders: typing.List[PlacedOrder] = []
for index in range(len(order_ids)):
if not (int_free_slot_bits & (1 << index)):
order_id = int(order_ids[index])
client_id = int(client_order_ids[index])
side = Side.BUY if int_is_bid_bits & (1 << index) else Side.SELL
placed_orders += [PlacedOrder(id=order_id, client_id=client_id, side=side)]
return placed_orders
def __repr__(self) -> str:
return f"{self}"
def __str__(self) -> str:
return f"« PlacedOrder {self.side} [{self.id}] {self.client_id} »"
typing.Protocol):
placed_orders: typing.Sequence[PlacedOrder]
| true
| true
|
1c3ffe11d33e1d6f639ee0ab88dbc29ee217b249
| 82,816
|
py
|
Python
|
tools/tensorflow_docs/api_generator/parser.py
|
abhi13-nitb/docs
|
8deb998a040ecf17b8887cf568fa919e16243560
|
[
"Apache-2.0"
] | null | null | null |
tools/tensorflow_docs/api_generator/parser.py
|
abhi13-nitb/docs
|
8deb998a040ecf17b8887cf568fa919e16243560
|
[
"Apache-2.0"
] | null | null | null |
tools/tensorflow_docs/api_generator/parser.py
|
abhi13-nitb/docs
|
8deb998a040ecf17b8887cf568fa919e16243560
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Turn Python docstrings into Markdown for TensorFlow documentation."""
import ast
import collections
import enum
import functools
import inspect
import itertools
import json
import os
import re
import textwrap
import typing
from typing import Any, Dict, List, Tuple, Iterable, NamedTuple, Optional, Union
import astor
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import doc_generator_visitor
from google.protobuf.message import Message as ProtoMessage
class ObjType(enum.Enum):
"""Enum to standardize object type checks."""
TYPE_ALIAS = 'type_alias'
MODULE = 'module'
CLASS = 'class'
CALLABLE = 'callable'
PROPERTY = 'property'
OTHER = 'other'
def get_obj_type(py_obj: Any) -> ObjType:
"""Get the `ObjType` for the `py_object`."""
if hasattr(py_obj, '__args__') and hasattr(py_obj, '__origin__'):
return ObjType.TYPE_ALIAS
elif inspect.ismodule(py_obj):
return ObjType.MODULE
elif inspect.isclass(py_obj):
return ObjType.CLASS
elif callable(py_obj):
return ObjType.CALLABLE
elif isinstance(py_obj, property):
return ObjType.PROPERTY
else:
return ObjType.OTHER
class ParserConfig(object):
"""Stores all indexes required to parse the docs."""
def __init__(self, reference_resolver, duplicates, duplicate_of, tree, index,
reverse_index, base_dir, code_url_prefix):
"""Object with the common config for docs_for_object() calls.
Args:
reference_resolver: An instance of ReferenceResolver.
duplicates: A `dict` mapping fully qualified names to a set of all aliases
of this name. This is used to automatically generate a list of all
aliases for each name.
duplicate_of: A map from duplicate names to preferred names of API
symbols.
tree: A `dict` mapping a fully qualified name to the names of all its
members. Used to populate the members section of a class or module page.
index: A `dict` mapping full names to objects.
reverse_index: A `dict` mapping object ids to full names.
base_dir: A base path that is stripped from file locations written to the
docs.
code_url_prefix: A Url to pre-pend to the links to file locations.
"""
self.reference_resolver = reference_resolver
self.duplicates = duplicates
self.duplicate_of = duplicate_of
self.tree = tree
self.reverse_index = reverse_index
self.index = index
self.base_dir = base_dir
self.code_url_prefix = code_url_prefix
def py_name_to_object(self, full_name):
"""Return the Python object for a Python symbol name."""
return self.index[full_name]
class _FileLocation(object):
"""This class indicates that the object is defined in a regular file.
This can be used for the `defined_in` slot of the `PageInfo` objects.
"""
GITHUB_LINE_NUMBER_TEMPLATE = '#L{start_line:d}-L{end_line:d}'
def __init__(self, rel_path, url=None, start_line=None, end_line=None):
self.rel_path = rel_path
self.url = url
self.start_line = start_line
self.end_line = end_line
github_main_re = 'github.com.*?(blob|tree)/master'
suffix = ''
# Only attach a line number for github URLs that are not using "main"
if self.start_line and not re.search(github_main_re, self.url):
if 'github.com' in self.url:
suffix = self.GITHUB_LINE_NUMBER_TEMPLATE.format(
start_line=self.start_line, end_line=self.end_line)
self.url = self.url + suffix
def is_class_attr(full_name, index):
"""Check if the object's parent is a class.
Args:
full_name: The full name of the object, like `tf.module.symbol`.
index: The {full_name:py_object} dictionary for the public API.
Returns:
True if the object is a class attribute.
"""
parent_name = full_name.rsplit('.', 1)[0]
if inspect.isclass(index[parent_name]):
return True
return False
class TFDocsError(Exception):
pass
def documentation_path(full_name, is_fragment=False):
"""Returns the file path for the documentation for the given API symbol.
Given the fully qualified name of a library symbol, compute the path to which
to write the documentation for that symbol (relative to a base directory).
Documentation files are organized into directories that mirror the python
module/class structure.
Args:
full_name: Fully qualified name of a library symbol.
is_fragment: If `False` produce a direct markdown link (`tf.a.b.c` -->
`tf/a/b/c.md`). If `True` produce fragment link, `tf.a.b.c` -->
`tf/a/b.md#c`
Returns:
The file path to which to write the documentation for `full_name`.
"""
parts = full_name.split('.')
if is_fragment:
parts, fragment = parts[:-1], parts[-1]
result = os.path.join(*parts) + '.md'
if is_fragment:
result = result + '#' + fragment
return result
def _get_raw_docstring(py_object):
"""Get the docs for a given python object.
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
Returns:
The docstring, or the empty string if no docstring was found.
"""
if get_obj_type(py_object) is ObjType.TYPE_ALIAS:
if inspect.getdoc(py_object) != inspect.getdoc(py_object.__origin__):
result = inspect.getdoc(py_object)
else:
result = ''
elif get_obj_type(py_object) is not ObjType.OTHER:
result = inspect.getdoc(py_object) or ''
else:
result = ''
result = _StripTODOs()(result)
result = _StripPylints()(result)
result = _AddDoctestFences()(result + '\n')
return result
class _AddDoctestFences(object):
"""Adds ``` fences around doctest caret blocks >>> that don't have them."""
CARET_BLOCK_RE = re.compile(
r"""
(?<=\n)\ *\n # After a blank line.
(?P<indent>\ *)(?P<content>\>\>\>.*?) # Whitespace and a triple caret.
\n\s*?(?=\n|$) # Followed by a blank line""",
re.VERBOSE | re.DOTALL)
def _sub(self, match):
groups = match.groupdict()
fence = f"\n{groups['indent']}```\n"
content = groups['indent'] + groups['content']
return ''.join([fence, content, fence])
def __call__(self, content):
return self.CARET_BLOCK_RE.sub(self._sub, content)
class _StripTODOs(object):
TODO_RE = re.compile('#? *TODO.*')
def __call__(self, content: str) -> str:
return self.TODO_RE.sub('', content)
class _StripPylints(object):
PYLINT_RE = re.compile('# *?pylint:.*')
def __call__(self, content: str) -> str:
return self.PYLINT_RE.sub('', content)
class IgnoreLineInBlock(object):
"""Ignores the lines in a block.
Attributes:
block_start: Contains the start string of a block to ignore.
block_end: Contains the end string of a block to ignore.
"""
def __init__(self, block_start, block_end):
self._block_start = block_start
self._block_end = block_end
self._in_block = False
self._start_end_regex = re.escape(self._block_start) + r'.*?' + re.escape(
self._block_end)
def __call__(self, line):
# If start and end block are on the same line, return True.
if re.match(self._start_end_regex, line):
return True
if not self._in_block:
if self._block_start in line:
self._in_block = True
elif self._block_end in line:
self._in_block = False
# True is being returned here because the last line in the block should
# also be ignored.
return True
return self._in_block
# ?P<...> helps to find the match by entering the group name instead of the
# index. For example, instead of doing match.group(1) we can do
# match.group('brackets')
AUTO_REFERENCE_RE = re.compile(
r"""
(?P<brackets>\[.*?\]) # find characters inside '[]'
|
`(?P<backticks>[\w\(\[\)\]\{\}.,=\s]+?)` # or find characters inside '``'
""",
flags=re.VERBOSE)
class ReferenceResolver(object):
"""Class for replacing `tf.symbol` references with Markdown links."""
def __init__(
self,
duplicate_of: Dict[str, str],
is_fragment: Dict[str, bool],
py_module_names: List[str],
site_link: Optional[str] = None,
):
"""Initializes a Reference Resolver.
Args:
duplicate_of: A map from duplicate names to preferred names of API
symbols.
is_fragment: A map from full names to bool for each symbol. If True the
object lives at a page fragment `tf.a.b.c` --> `tf/a/b#c`. If False
object has a page to itself: `tf.a.b.c` --> `tf/a/b/c`.
py_module_names: A list of string names of Python modules.
site_link: The website to which these symbols should link to. A prefix
is added before the links to enable cross-site linking if `site_link`
is not None.
"""
self._duplicate_of = duplicate_of
self._is_fragment = is_fragment
self._py_module_names = py_module_names
self._site_link = site_link
self._all_names = set(is_fragment.keys())
self._partial_symbols_dict = self._create_partial_symbols_dict()
@classmethod
def from_visitor(cls, visitor, **kwargs):
"""A factory function for building a ReferenceResolver from a visitor.
Args:
visitor: an instance of `DocGeneratorVisitor`
**kwargs: all remaining args are passed to the constructor
Returns:
an instance of `ReferenceResolver` ()
"""
is_fragment = {}
for full_name, obj in visitor.index.items():
obj_type = get_obj_type(obj)
if obj_type in (ObjType.CLASS, ObjType.MODULE):
is_fragment[full_name] = False
elif obj_type in (ObjType.CALLABLE, ObjType.TYPE_ALIAS):
if is_class_attr(full_name, visitor.index):
is_fragment[full_name] = True
else:
is_fragment[full_name] = False
else:
is_fragment[full_name] = True
return cls(
duplicate_of=visitor.duplicate_of, is_fragment=is_fragment, **kwargs)
def is_fragment(self, full_name: str):
"""Returns True if the object's doc is a subsection of another page."""
return self._is_fragment[full_name]
@classmethod
def from_json_file(cls, filepath):
"""Initialize the reference resolver via _api_cache.json."""
with open(filepath) as f:
json_dict = json.load(f)
return cls(**json_dict)
def _partial_symbols(self, symbol):
"""Finds the partial symbols given the true symbol.
For example, symbol: `tf.keras.layers.Conv2D`, then the partial dictionary
returned will be:
partials = ["tf.keras.layers.Conv2D","keras.layers.Conv2D","layers.Conv2D"]
There should at least be one '.' in the partial symbol generated so as to
avoid guessing for the true symbol.
Args:
symbol: String, representing the true symbol.
Returns:
A list of partial symbol names
"""
split_symbol = symbol.split('.')
partials = [
'.'.join(split_symbol[i:]) for i in range(1,
len(split_symbol) - 1)
]
return partials
def _create_partial_symbols_dict(self):
"""Creates a partial symbols dictionary for all the symbols in TensorFlow.
Returns:
A dictionary mapping {partial_symbol: real_symbol}
"""
partial_symbols_dict = collections.defaultdict(list)
for name in sorted(self._all_names):
if 'tf.compat.v' in name or 'tf.contrib' in name:
continue
# TODO(yashkatariya): Remove `tf.experimental.numpy` after `tf.numpy` is
# in not in experimental namespace.
if 'tf.experimental.numpy' in name or 'tf.numpy' in name:
continue
partials = self._partial_symbols(name)
for partial in partials:
partial_symbols_dict[partial].append(name)
new_partial_dict = {}
for partial, full_names in partial_symbols_dict.items():
if not full_names:
continue
full_names = [
self._duplicate_of.get(full_name, full_name)
for full_name in full_names
]
new_partial_dict[partial] = full_names[0]
return new_partial_dict
def to_json_file(self, filepath):
"""Converts the RefenceResolver to json and writes it to the specified file.
Args:
filepath: The file path to write the json to.
"""
try:
os.makedirs(os.path.dirname(filepath))
except OSError:
pass
json_dict = {}
for key, value in self.__dict__.items():
# Drop these fields, they are generated by the constructor.
if key == '_all_names' or key == '_partial_symbols_dict':
continue
# Strip off any leading underscores on field names as these are not
# recognized by the constructor.
json_dict[key.lstrip('_')] = value
with open(filepath, 'w') as f:
json.dump(json_dict, f, indent=2, sort_keys=True)
f.write('\n')
def replace_references(self, string, relative_path_to_root, full_name=None):
"""Replace `tf.symbol` references with links to symbol's documentation page.
This function finds all occurrences of "`tf.symbol`" in `string`
and replaces them with markdown links to the documentation page
for "symbol".
`relative_path_to_root` is the relative path from the document
that contains the "`tf.symbol`" reference to the root of the API
documentation that is linked to. If the containing page is part of
the same API docset, `relative_path_to_root` can be set to
`os.path.dirname(documentation_path(name))`, where `name` is the
python name of the object whose documentation page the reference
lives on.
Args:
string: A string in which "`tf.symbol`" references should be replaced.
relative_path_to_root: The relative path from the containing document to
the root of the API documentation that is being linked to.
full_name: (optional) The full name of current object, so replacements can
depend on context.
Returns:
`string`, with "`tf.symbol`" references replaced by Markdown links.
"""
def one_ref(match):
return self._one_ref(match, relative_path_to_root, full_name)
fixed_lines = []
filters = [
IgnoreLineInBlock('<pre class="tfo-notebook-code-cell-output">',
'</pre>'),
IgnoreLineInBlock('```', '```'),
IgnoreLineInBlock(
'<pre class="devsite-click-to-copy prettyprint lang-py">', '</pre>')
]
for line in string.splitlines():
if not any(filter_block(line) for filter_block in filters):
line = re.sub(AUTO_REFERENCE_RE, one_ref, line)
fixed_lines.append(line)
return '\n'.join(fixed_lines)
def python_link(self,
link_text,
ref_full_name,
relative_path_to_root,
code_ref=True):
"""Resolve a "`tf.symbol`" reference to a Markdown link.
This will pick the canonical location for duplicate symbols. The
input to this function should already be stripped of the '@' and
'{}'. This function returns a Markdown link. If `code_ref` is
true, it is assumed that this is a code reference, so the link
text will be rendered as code (using backticks).
`link_text` should refer to a library symbol, starting with 'tf.'.
Args:
link_text: The text of the Markdown link.
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
code_ref: If true (the default), put `link_text` in `...`.
Returns:
A markdown link to the documentation page of `ref_full_name`.
"""
url = self.reference_to_url(ref_full_name, relative_path_to_root)
if self._site_link is not None:
if os.path.isabs(url):
url = os.path.join(self._site_link, url[1:])
else:
url = os.path.join(self._site_link, url)
url = url.replace('.md', '')
if code_ref:
link_text = link_text.join(['<code>', '</code>'])
else:
link_text = self._link_text_to_html(link_text)
return f'<a href="{url}">{link_text}</a>'
@staticmethod
def _link_text_to_html(link_text):
code_re = '`(.*?)`'
return re.sub(code_re, r'<code>\1</code>', link_text)
def py_main_name(self, full_name):
"""Return the main name for a Python symbol name."""
return self._duplicate_of.get(full_name, full_name)
def reference_to_url(self, ref_full_name, relative_path_to_root):
"""Resolve a "`tf.symbol`" reference to a relative path.
The input to this function should already be stripped of the '@'
and '{}', and its output is only the link, not the full Markdown.
If `ref_full_name` is the name of a class member, method, or property, the
link will point to the page of the containing class, and it will include the
method name as an anchor. For example, `tf.module.MyClass.my_method` will be
translated into a link to
`os.join.path(relative_path_to_root, 'tf/module/MyClass.md#my_method')`.
Args:
ref_full_name: The fully qualified name of the symbol to link to.
relative_path_to_root: The relative path from the location of the current
document to the root of the API documentation.
Returns:
A relative path that links from the documentation page of `from_full_name`
to the documentation page of `ref_full_name`.
Raises:
TFDocsError: If the symbol is not found.
"""
if self._is_fragment.get(ref_full_name, False):
# methods and constants get duplicated. And that's okay.
# Use the main name of their parent.
parent_name, short_name = ref_full_name.rsplit('.', 1)
parent_main_name = self._duplicate_of.get(parent_name, parent_name)
main_name = '.'.join([parent_main_name, short_name])
else:
main_name = self._duplicate_of.get(ref_full_name, ref_full_name)
# Check whether this link exists
if main_name not in self._all_names:
raise TFDocsError(f'Cannot make link to {main_name!r}: Not in index.')
ref_path = documentation_path(main_name, self._is_fragment[main_name])
return os.path.join(relative_path_to_root, ref_path)
def _one_ref(self, match, relative_path_to_root, full_name=None):
"""Return a link for a single "`tf.symbol`" reference."""
if match.group(1):
# Found a '[]' group, return it unmodified.
return match.group('brackets')
# Found a '``' group.
string = match.group('backticks')
link_text = string
string = re.sub(r'(.*)[\(\[].*', r'\1', string)
if string.startswith('compat.v1') or string.startswith('compat.v2'):
string = 'tf.' + string
elif string.startswith('v1') or string.startswith('v2'):
string = 'tf.compat.' + string
elif full_name is None or ('tf.compat.v' not in full_name and
'tf.contrib' not in full_name):
string = self._partial_symbols_dict.get(string, string)
try:
if string.startswith('tensorflow::'):
# C++ symbol
return self._cc_link(string, link_text, relative_path_to_root)
is_python = False
for py_module_name in self._py_module_names:
if string == py_module_name or string.startswith(py_module_name + '.'):
is_python = True
break
if is_python: # Python symbol
return self.python_link(
link_text, string, relative_path_to_root, code_ref=True)
except TFDocsError:
pass
return match.group(0)
def _cc_link(self, string, link_text, relative_path_to_root):
"""Generate a link for a `tensorflow::...` reference."""
# TODO(joshl): Fix this hard-coding of paths.
if string == 'tensorflow::ClientSession':
ret = 'class/tensorflow/client-session.md'
elif string == 'tensorflow::Scope':
ret = 'class/tensorflow/scope.md'
elif string == 'tensorflow::Status':
ret = 'class/tensorflow/status.md'
elif string == 'tensorflow::Tensor':
ret = 'class/tensorflow/tensor.md'
elif string == 'tensorflow::ops::Const':
ret = 'namespace/tensorflow/ops.md#const'
else:
raise TFDocsError(f'C++ reference not understood: "{string}"')
# relative_path_to_root gets you to api_docs/python, we go from there
# to api_docs/cc, and then add ret.
cc_relative_path = os.path.normpath(
os.path.join(relative_path_to_root, '../cc', ret))
return f'<a href="{cc_relative_path}"><code>{link_text}</code></a>'
def _handle_compatibility(doc) -> Tuple[str, Dict[str, str]]:
"""Parse and remove compatibility blocks from the main docstring.
Args:
doc: The docstring that contains compatibility notes.
Returns:
A tuple of the modified doc string and a hash that maps from compatibility
note type to the text of the note.
"""
compatibility_notes = {}
match_compatibility = re.compile(r'[ \t]*@compatibility\((\w+)\)\s*\n'
r'((?:[^@\n]*\n)+)'
r'\s*@end_compatibility')
for f in match_compatibility.finditer(doc):
compatibility_notes[f.group(1)] = f.group(2)
return match_compatibility.subn(r'', doc)[0], compatibility_notes
def _pairs(items):
"""Given an list of items [a,b,a,b...], generate pairs [(a,b),(a,b)...].
Args:
items: A list of items (length must be even)
Returns:
A list of pairs.
"""
assert len(items) % 2 == 0
return list(zip(items[::2], items[1::2]))
# Don't change the width="214px" without consulting with the devsite-team.
TABLE_TEMPLATE = textwrap.dedent("""
<!-- Tabular view -->
<table class="responsive fixed orange">
<colgroup><col width="214px"><col></colgroup>
<tr><th colspan="2">{title}</th></tr>
{text}
{items}
</table>
""")
ITEMS_TEMPLATE = textwrap.dedent("""\
<tr>
<td>
{name}{anchor}
</td>
<td>
{description}
</td>
</tr>""")
TEXT_TEMPLATE = textwrap.dedent("""\
<tr class="alt">
<td colspan="2">
{text}
</td>
</tr>""")
class TitleBlock(object):
"""A class to parse title blocks (like `Args:`) and convert them to markdown.
This handles the "Args/Returns/Raises" blocks and anything similar.
These are used to extract metadata (argument descriptions, etc), and upgrade
This `TitleBlock` to markdown.
These blocks are delimited by indentation. There must be a blank line before
the first `TitleBlock` in a series.
The expected format is:
```
Title:
Freeform text
arg1: value1
arg2: value1
```
These are represented as:
```
TitleBlock(
title = "Arguments",
text = "Freeform text",
items=[('arg1', 'value1'), ('arg2', 'value2')])
```
The "text" and "items" fields may be empty. When both are empty the generated
markdown only serves to upgrade the title to a <h4>.
Attributes:
title: The title line, without the colon.
text: Freeform text. Anything between the `title`, and the `items`.
items: A list of (name, value) string pairs. All items must have the same
indentation.
"""
_INDENTATION_REMOVAL_RE = re.compile(r'( *)(.+)')
def __init__(self,
*,
title: Optional[str] = None,
text: str,
items: Iterable[Tuple[str, str]]):
self.title = title
self.text = text
self.items = items
def table_view(self, title_template: Optional[str] = None) -> str:
"""Returns a tabular markdown version of the TitleBlock.
Tabular view is only for `Args`, `Returns`, `Raises` and `Attributes`. If
anything else is encountered, redirect to list view.
Args:
title_template: Template for title detailing how to display it.
Returns:
Table containing the content to display.
"""
if title_template is not None:
title = title_template.format(title=self.title)
else:
title = self.title
text = self.text.strip()
if text:
text = TEXT_TEMPLATE.format(text=text)
text = self._INDENTATION_REMOVAL_RE.sub(r'\2', text)
items = []
for name, description in self.items:
if not description:
description = ''
else:
description = description.strip()
item_table = ITEMS_TEMPLATE.format(
name=f'`{name}`', anchor='', description=description)
item_table = self._INDENTATION_REMOVAL_RE.sub(r'\2', item_table)
items.append(item_table)
return '\n' + TABLE_TEMPLATE.format(
title=title, text=text, items=''.join(items)) + '\n'
def list_view(self, title_template: str) -> str:
"""Returns a List markdown version of the TitleBlock.
Args:
title_template: Template for title detailing how to display it.
Returns:
Markdown list containing the content to display.
"""
sub = []
sub.append(title_template.format(title=self.title))
sub.append(textwrap.dedent(self.text))
sub.append('\n')
for name, description in self.items:
description = description.strip()
if not description:
sub.append(f'* <b>`{name}`</b>\n')
else:
sub.append(f'* <b>`{name}`</b>: {description}\n')
return ''.join(sub)
# This regex matches an entire title-block.
BLOCK_RE = re.compile(
r"""
(?:^|^\n|\n\n) # After a blank line (non-capturing):
(?P<title>[A-Z][\s\w]{0,20}) # Find a sentence case title, followed by
\s*:\s*?(?=\n) # whitespace, a colon and a new line.
(?P<content>.*?) # Then take everything until
(?=\n\S|$) # look ahead finds a non-indented line
# (a new-line followed by non-whitespace)
""", re.VERBOSE | re.DOTALL)
# This
ITEM_RE = re.compile(
r"""
^(\*?\*? # Capture optional *s to allow *args, **kwargs.
\w[\w.]*? # Capture a word character followed by word characters
# or "."s.
)\s*:\s # Allow any whitespace around the colon.""",
re.MULTILINE | re.VERBOSE)
@classmethod
def split_string(cls, docstring: str):
r"""Given a docstring split it into a list of `str` or `TitleBlock` chunks.
For example the docstring of `tf.nn.relu`:
'''
Computes `max(features, 0)`.
Args:
features: A `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.
name: A name for the operation (optional).
More freeform markdown text.
'''
This is parsed, and returned as:
```
[
"Computes rectified linear: `max(features, 0)`.",
TitleBlock(
title='Args',
text='',
items=[
('features', ' A `Tensor`. Must be...'),
('name', ' A name for the operation (optional).\n\n')]
),
"More freeform markdown text."
]
```
Args:
docstring: The docstring to parse
Returns:
The docstring split into chunks. Each chunk produces valid markdown when
`str` is called on it (each chunk is a python `str`, or a `TitleBlock`).
"""
parts = []
while docstring:
split = re.split(cls.BLOCK_RE, docstring, maxsplit=1)
# The first chunk in split is all text before the TitleBlock.
before = split.pop(0)
parts.append(before)
# If `split` is empty, there were no other matches, and we're done.
if not split:
break
# If there was a match, split contains three items. The two capturing
# groups in the RE, and the remainder.
title, content, docstring = split
# Now `content` contains the text and the name-value item pairs.
# separate these two parts.
content = textwrap.dedent(content)
split = cls.ITEM_RE.split(content)
text = split.pop(0)
items = _pairs(split)
title_block = cls(title=title, text=text, items=items)
parts.append(title_block)
return parts
class _DocstringInfo(typing.NamedTuple):
brief: str
docstring_parts: List[Union[TitleBlock, str]]
compatibility: Dict[str, str]
def _get_other_member_doc(
obj: Any,
parser_config: ParserConfig,
extra_docs: Optional[Dict[int, str]],
) -> str:
"""Returns the docs for other members of a module."""
if extra_docs is not None:
other_member_extra_doc = extra_docs.get(id(obj), None)
else:
other_member_extra_doc = None
if other_member_extra_doc is not None:
description = other_member_extra_doc
elif doc_generator_visitor.maybe_singleton(obj):
description = f'`{repr(obj)}`'
else:
class_name = parser_config.reverse_index.get(id(type(obj)), None)
if class_name is not None:
description = f'`{class_name}`'
else:
description = ''
return description
def _parse_md_docstring(
py_object: Any,
relative_path_to_root: str,
full_name: str,
parser_config: ParserConfig,
extra_docs: Optional[Dict[int, str]] = None,
) -> _DocstringInfo:
"""Parse the object's docstring and return a `_DocstringInfo`.
This function clears @@'s from the docstring, and replaces `` references
with markdown links.
For links within the same set of docs, the `relative_path_to_root` for a
docstring on the page for `full_name` can be set to:
```python
relative_path_to_root = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
```
Args:
py_object: A python object to retrieve the docs for (class, function/method,
or module).
relative_path_to_root: The relative path from the location of the current
document to the root of the Python API documentation. This is used to
compute links for "`tf.symbol`" references.
full_name: (optional) The api path to the current object, so replacements
can depend on context.
parser_config: An instance of `ParserConfig`.
extra_docs: Extra docs for symbols like public constants(list, tuple, etc)
that need to be added to the markdown pages created.
Returns:
A _DocstringInfo object, all fields will be empty if no docstring was found.
"""
if get_obj_type(py_object) is ObjType.OTHER:
raw_docstring = _get_other_member_doc(
obj=py_object, parser_config=parser_config, extra_docs=extra_docs)
else:
raw_docstring = _get_raw_docstring(py_object)
raw_docstring = parser_config.reference_resolver.replace_references(
raw_docstring,
relative_path_to_root,
full_name,
)
atat_re = re.compile(r' *@@[a-zA-Z_.0-9]+ *$')
raw_docstring = '\n'.join(
line for line in raw_docstring.split('\n') if not atat_re.match(line))
docstring, compatibility = _handle_compatibility(raw_docstring)
if 'Generated by: tensorflow/tools/api/generator' in docstring:
docstring = ''
# Remove the first-line "brief" docstring.
lines = docstring.split('\n')
brief = lines.pop(0)
docstring = '\n'.join(lines)
docstring_parts = TitleBlock.split_string(docstring)
return _DocstringInfo(brief, docstring_parts, compatibility)
class TypeAnnotationExtractor(ast.NodeVisitor):
"""Extracts the type annotations by parsing the AST of a function."""
def __init__(self):
self.annotation_dict = {}
self.arguments_typehint_exists = False
self.return_typehint_exists = False
def visit_FunctionDef(self, node) -> None: # pylint: disable=invalid-name
"""Visits the `FunctionDef` node in AST tree and extracts the typehints."""
# Capture the return type annotation.
if node.returns:
self.annotation_dict['return'] = astor.to_source(
node.returns).strip().replace('"""', '"')
self.return_typehint_exists = True
# Capture the args type annotation.
for arg in node.args.args:
if arg.annotation:
self.annotation_dict[arg.arg] = astor.to_source(
arg.annotation).strip().replace('"""', '"')
self.arguments_typehint_exists = True
# Capture the kwarg only args type annotation.
for kwarg in node.args.kwonlyargs:
if kwarg.annotation:
self.annotation_dict[kwarg.arg] = astor.to_source(
kwarg.annotation).strip().replace('"""', '"')
self.arguments_typehint_exists = True
class ASTDefaultValueExtractor(ast.NodeVisitor):
"""Extracts the default values by parsing the AST of a function."""
_PAREN_NUMBER_RE = re.compile(r'^\(([0-9.e-]+)\)')
def __init__(self):
self.ast_args_defaults = []
self.ast_kw_only_defaults = []
def _preprocess(self, val: str) -> str:
text_default_val = astor.to_source(val).strip().replace(
'\t', '\\t').replace('\n', '\\n').replace('"""', "'")
text_default_val = self._PAREN_NUMBER_RE.sub('\\1', text_default_val)
return text_default_val
def visit_FunctionDef(self, node) -> None: # pylint: disable=invalid-name
"""Visits the `FunctionDef` node and extracts the default values."""
for default_val in node.args.defaults:
if default_val is not None:
text_default_val = self._preprocess(default_val)
self.ast_args_defaults.append(text_default_val)
for default_val in node.args.kw_defaults:
if default_val is not None:
text_default_val = self._preprocess(default_val)
self.ast_kw_only_defaults.append(text_default_val)
class FormatArguments(object):
"""Formats the arguments and adds type annotations if they exist."""
_INTERNAL_NAMES = {
'ops.GraphKeys': 'tf.GraphKeys',
'_ops.GraphKeys': 'tf.GraphKeys',
'init_ops.zeros_initializer': 'tf.zeros_initializer',
'init_ops.ones_initializer': 'tf.ones_initializer',
'saver_pb2.SaverDef': 'tf.train.SaverDef',
}
_OBJECT_MEMORY_ADDRESS_RE = re.compile(r'<(?P<type>.+) object at 0x[\da-f]+>')
# A regular expression capturing a python identifier.
_IDENTIFIER_RE = r'[a-zA-Z_]\w*'
_INDIVIDUAL_TYPES_RE = re.compile(
r"""
(?P<single_type>
([\w.]*)
(?=$|,| |\]|\[)
)
""", re.IGNORECASE | re.VERBOSE)
_TYPING = frozenset(
list(typing.__dict__.keys()) +
['int', 'str', 'bytes', 'float', 'complex', 'bool', 'None'])
_IMMUTABLE_TYPES = frozenset(
[int, str, bytes, float, complex, bool,
type(None), tuple, frozenset])
def __init__(
self,
type_annotations: Dict[str, str],
parser_config: ParserConfig,
func_full_name: str,
) -> None:
self._type_annotations = type_annotations
self._reverse_index = parser_config.reverse_index
self._reference_resolver = parser_config.reference_resolver
# func_full_name is used to calculate the relative path.
self._func_full_name = func_full_name
self._is_fragment = self._reference_resolver._is_fragment.get(
self._func_full_name, None)
def get_link(self, obj_full_name: str) -> str:
relative_path_to_root = os.path.relpath(
path='.',
start=os.path.dirname(
documentation_path(self._func_full_name, self._is_fragment)) or '.')
return self._reference_resolver.python_link(
link_text=obj_full_name,
ref_full_name=obj_full_name,
relative_path_to_root=relative_path_to_root,
code_ref=True)
def _extract_non_builtin_types(self, arg_obj: Any,
non_builtin_types: List[Any]) -> List[Any]:
"""Extracts the non-builtin types from a type annotations object.
Recurses if an object contains `__args__` attribute. If an object is
an inbuilt object or an `Ellipsis` then its skipped.
Args:
arg_obj: Type annotation object.
non_builtin_types: List to keep track of the non-builtin types extracted.
Returns:
List of non-builtin types.
"""
annotations = getattr(arg_obj, '__args__', [arg_obj])
if annotations is None:
annotations = [arg_obj]
for anno in annotations:
if self._reverse_index.get(id(anno), None):
non_builtin_types.append(anno)
elif (anno in self._IMMUTABLE_TYPES or anno in typing.__dict__.values() or
anno is Ellipsis):
continue
elif hasattr(anno, '__args__'):
self._extract_non_builtin_types(anno, non_builtin_types)
else:
non_builtin_types.append(anno)
return non_builtin_types
def _get_non_builtin_ast_types(self, ast_typehint: str) -> List[str]:
"""Extracts non-builtin types from a string AST type annotation.
If the type is an inbuilt type or an `...`(Ellipsis) then its skipped.
Args:
ast_typehint: AST extracted type annotation.
Returns:
List of non-builtin ast types.
"""
non_builtin_ast_types = []
for single_type, _ in self._INDIVIDUAL_TYPES_RE.findall(ast_typehint):
if (not single_type or single_type in self._TYPING or
single_type == '...'):
continue
non_builtin_ast_types.append(single_type)
return non_builtin_ast_types
def _linkify(self, non_builtin_map: Dict[str, Any], match) -> str:
"""Links off to types that can be linked.
Args:
non_builtin_map: Dictionary mapping non-builtin_ast_types to
non_builtin_type_objs
match: Match object returned by `re.sub`.
Returns:
Linked type annotation if the type annotation object exists.
"""
group = match.groupdict()
ast_single_typehint = group['single_type']
# If the AST type hint is a built-in type hint or an `Ellipsis`,
# return it as is.
if ast_single_typehint not in non_builtin_map:
return ast_single_typehint
if not non_builtin_map:
return ast_single_typehint
# Get the type object from the ast_single_typehint and lookup the object
# in reverse_index to get its full name.
obj_full_name = self._reverse_index.get(
id(non_builtin_map[ast_single_typehint]), None)
if obj_full_name is None:
return ast_single_typehint
return self.get_link(obj_full_name)
def preprocess(self, ast_typehint: str, obj_anno: Any) -> str:
"""Links type annotations to its page if it exists.
Args:
ast_typehint: AST extracted type annotation.
obj_anno: Type annotation object.
Returns:
Linked type annotation if the type annotation object exists.
"""
# If the object annotations exists in the reverse_index, get the link
# directly for the entire annotation.
obj_anno_full_name = self._reverse_index.get(id(obj_anno), None)
if obj_anno_full_name is not None:
return self.get_link(obj_anno_full_name)
non_builtin_ast_types = self._get_non_builtin_ast_types(ast_typehint)
try:
non_builtin_type_objs = self._extract_non_builtin_types(obj_anno, [])
except RecursionError:
non_builtin_type_objs = {}
# If the length doesn't match then don't linkify any type annotation. This
# is done to avoid linking to wrong pages instead of guessing.
if len(non_builtin_type_objs) != len(non_builtin_ast_types):
non_builtin_map = {}
else:
non_builtin_map = dict(zip(non_builtin_ast_types, non_builtin_type_objs))
partial_func = functools.partial(self._linkify, non_builtin_map)
return self._INDIVIDUAL_TYPES_RE.sub(partial_func, ast_typehint)
def _replace_internal_names(self, default_text: str) -> str:
full_name_re = f'^{self._IDENTIFIER_RE}(.{self._IDENTIFIER_RE})+'
match = re.match(full_name_re, default_text)
if match:
for internal_name, public_name in self._INTERNAL_NAMES.items():
if match.group(0).startswith(internal_name):
return public_name + default_text[len(internal_name):]
return default_text
def format_return(self, return_anno: Any) -> str:
return self.preprocess(self._type_annotations['return'], return_anno)
def format_args(self, args: List[inspect.Parameter]) -> List[str]:
"""Creates a text representation of the args in a method/function.
Args:
args: List of args to format.
Returns:
Formatted args with type annotations if they exist.
"""
args_text_repr = []
for arg in args:
arg_name = arg.name
if arg_name in self._type_annotations:
typeanno = self.preprocess(self._type_annotations[arg_name],
arg.annotation)
args_text_repr.append(f'{arg_name}: {typeanno}')
else:
args_text_repr.append(f'{arg_name}')
return args_text_repr
def format_kwargs(self, kwargs: List[inspect.Parameter],
ast_defaults: List[str]) -> List[str]:
"""Creates a text representation of the kwargs in a method/function.
Args:
kwargs: List of kwargs to format.
ast_defaults: Default values extracted from the function's AST tree.
Returns:
Formatted kwargs with type annotations if they exist.
"""
kwargs_text_repr = []
if len(ast_defaults) < len(kwargs):
ast_defaults.extend([None] * (len(kwargs) - len(ast_defaults)))
for kwarg, ast_default in zip(kwargs, ast_defaults):
kname = kwarg.name
default_val = kwarg.default
if id(default_val) in self._reverse_index:
default_text = self._reverse_index[id(default_val)]
elif ast_default is not None:
default_text = ast_default
if default_text != repr(default_val):
default_text = self._replace_internal_names(default_text)
# Kwarg without default value.
elif default_val is kwarg.empty:
kwargs_text_repr.extend(self.format_args([kwarg]))
continue
else:
# Strip object memory addresses to avoid unnecessary doc churn.
default_text = self._OBJECT_MEMORY_ADDRESS_RE.sub(
r'<\g<type>>', repr(default_val))
# Format the kwargs to add the type annotation and default values.
if kname in self._type_annotations:
typeanno = self.preprocess(self._type_annotations[kname],
kwarg.annotation)
kwargs_text_repr.append(f'{kname}: {typeanno} = {default_text}')
else:
kwargs_text_repr.append(f'{kname}={default_text}')
return kwargs_text_repr
class _SignatureComponents(NamedTuple):
"""Contains the components that make up the signature of a function/method."""
arguments: List[str]
arguments_typehint_exists: bool
return_typehint_exists: bool
return_type: Optional[str] = None
def __str__(self):
arguments_signature = ''
if self.arguments:
str_signature = ',\n'.join(self.arguments)
# If there is no type annotation on arguments, then wrap the entire
# signature to width 80.
if not self.arguments_typehint_exists:
str_signature = textwrap.fill(str_signature, width=80)
arguments_signature = '\n' + textwrap.indent(
str_signature, prefix=' ') + '\n'
full_signature = f'({arguments_signature})'
if self.return_typehint_exists:
full_signature += f' -> {self.return_type}'
return full_signature
def generate_signature(func: Any, parser_config: ParserConfig,
func_full_name: str) -> _SignatureComponents:
"""Given a function, returns a list of strings representing its args.
This function uses `__name__` for callables if it is available. This can lead
to poor results for functools.partial and other callable objects.
The returned string is Python code, so if it is included in a Markdown
document, it should be typeset as code (using backticks), or escaped.
Args:
func: A function, method, or functools.partial to extract the signature for.
parser_config: `ParserConfig` for the method/function whose signature is
generated.
func_full_name: The full name of a function whose signature is generated.
Returns:
A `_SignatureComponents` NamedTuple.
"""
all_args_list = []
try:
sig = inspect.signature(func)
sig_values = sig.parameters.values()
return_anno = sig.return_annotation
except (ValueError, TypeError):
sig_values = []
return_anno = None
type_annotation_visitor = TypeAnnotationExtractor()
ast_defaults_visitor = ASTDefaultValueExtractor()
try:
func_source = textwrap.dedent(inspect.getsource(func))
func_ast = ast.parse(func_source)
# Extract the type annotation from the parsed ast.
type_annotation_visitor.visit(func_ast)
ast_defaults_visitor.visit(func_ast)
except Exception: # pylint: disable=broad-except
# A wide-variety of errors can be thrown here.
pass
type_annotations = type_annotation_visitor.annotation_dict
arguments_typehint_exists = type_annotation_visitor.arguments_typehint_exists
return_typehint_exists = type_annotation_visitor.return_typehint_exists
#############################################################################
# Process the information about the func.
#############################################################################
pos_only_args = []
args = []
kwargs = []
only_kwargs = []
varargs = None
varkwargs = None
skip_self_cls = True
for index, param in enumerate(sig_values):
kind = param.kind
default = param.default
if skip_self_cls and param.name in ('self', 'cls', '_cls'):
# Only skip the first parameter. If the function contains both
# `self` and `cls`, skip only the first one.
skip_self_cls = False
elif kind == param.POSITIONAL_ONLY:
pos_only_args.append(param)
elif default is param.empty and kind == param.POSITIONAL_OR_KEYWORD:
args.append(param)
elif default is not param.empty and kind == param.POSITIONAL_OR_KEYWORD:
kwargs.append(param)
elif kind == param.VAR_POSITIONAL:
varargs = (index, param)
elif kind == param.KEYWORD_ONLY:
only_kwargs.append(param)
elif kind == param.VAR_KEYWORD:
varkwargs = param
#############################################################################
# Build the text representation of Args and Kwargs.
#############################################################################
formatter = FormatArguments(
type_annotations, parser_config, func_full_name=func_full_name)
if pos_only_args:
all_args_list.extend(formatter.format_args(pos_only_args))
all_args_list.append('/')
if args:
all_args_list.extend(formatter.format_args(args))
if kwargs:
all_args_list.extend(
formatter.format_kwargs(kwargs, ast_defaults_visitor.ast_args_defaults))
if only_kwargs:
if varargs is None:
all_args_list.append('*')
all_args_list.extend(
formatter.format_kwargs(only_kwargs,
ast_defaults_visitor.ast_kw_only_defaults))
if varargs is not None:
all_args_list.insert(varargs[0], '*' + varargs[1].name)
if varkwargs is not None:
all_args_list.append('**' + varkwargs.name)
if return_anno and return_anno is not sig.empty and type_annotations.get(
'return', None):
return_type = formatter.format_return(return_anno)
else:
return_type = 'None'
return _SignatureComponents(
arguments=all_args_list,
arguments_typehint_exists=arguments_typehint_exists,
return_typehint_exists=return_typehint_exists,
return_type=return_type)
def _get_defining_class(py_class, name):
for cls in inspect.getmro(py_class):
if name in cls.__dict__:
return cls
return None
class MemberInfo(NamedTuple):
"""Describes an attribute of a class or module."""
short_name: str
full_name: str
py_object: Any
doc: _DocstringInfo
url: str
class MethodInfo(NamedTuple):
"""Described a method."""
short_name: str
full_name: str
py_object: Any
doc: _DocstringInfo
url: str
signature: _SignatureComponents
decorators: List[str]
defined_in: Optional[_FileLocation]
@classmethod
def from_member_info(cls, method_info: MemberInfo,
signature: _SignatureComponents, decorators: List[str],
defined_in: Optional[_FileLocation]):
"""Upgrades a `MemberInfo` to a `MethodInfo`."""
return cls(
**method_info._asdict(),
signature=signature,
decorators=decorators,
defined_in=defined_in)
def extract_decorators(func: Any) -> List[str]:
"""Extracts the decorators on top of functions/methods.
Args:
func: The function to extract the decorators from.
Returns:
A List of decorators.
"""
class ASTDecoratorExtractor(ast.NodeVisitor):
def __init__(self):
self.decorator_list = []
def visit_FunctionDef(self, node): # pylint: disable=invalid-name
for dec in node.decorator_list:
self.decorator_list.append(astor.to_source(dec).strip())
visitor = ASTDecoratorExtractor()
try:
func_source = textwrap.dedent(inspect.getsource(func))
func_ast = ast.parse(func_source)
visitor.visit(func_ast)
except Exception: # pylint: disable=broad-except
# A wide-variety of errors can be thrown here.
pass
return visitor.decorator_list
class PageInfo:
"""Base-class for api_pages objects.
Converted to markdown by pretty_docs.py.
Attributes:
full_name: The full, main name, of the object being documented.
short_name: The last part of the full name.
py_object: The object being documented.
defined_in: A _FileLocation describing where the object was defined.
aliases: A list of full-name for all aliases for this object.
doc: A list of objects representing the docstring. These can all be
converted to markdown using str().
"""
def __init__(
self,
full_name: str,
py_object: Any,
extra_docs: Optional[Dict[int, str]] = None,
):
"""Initialize a PageInfo.
Args:
full_name: The full, main name, of the object being documented.
py_object: The object being documented.
extra_docs: Extra docs for symbols like public constants(list, tuple, etc)
that need to be added to the markdown pages created.
"""
self.full_name = full_name
self.py_object = py_object
self._extra_docs = extra_docs
self._defined_in = None
self._aliases = None
self._doc = None
@property
def short_name(self):
"""Returns the documented object's short name."""
return self.full_name.split('.')[-1]
@property
def defined_in(self):
"""Returns the path to the file where the documented object is defined."""
return self._defined_in
def set_defined_in(self, defined_in):
"""Sets the `defined_in` path."""
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
"""Returns a list of all full names for the documented object."""
return self._aliases
def set_aliases(self, aliases):
"""Sets the `aliases` list.
Args:
aliases: A list of strings. Containing all the object's full names.
"""
assert self.aliases is None
self._aliases = aliases
@property
def doc(self) -> _DocstringInfo:
"""Returns a `_DocstringInfo` created from the object's docstring."""
return self._doc
def set_doc(self, doc: _DocstringInfo):
"""Sets the `doc` field.
Args:
doc: An instance of `_DocstringInfo`.
"""
assert self.doc is None
self._doc = doc
class FunctionPageInfo(PageInfo):
"""Collects docs For a function Page.
Attributes:
full_name: The full, main name, of the object being documented.
short_name: The last part of the full name.
py_object: The object being documented.
defined_in: A _FileLocation describing where the object was defined.
aliases: A list of full-name for all aliases for this object.
doc: A list of objects representing the docstring. These can all be
converted to markdown using str().
signature: the parsed signature (see: generate_signature)
decorators: A list of decorator names.
"""
def __init__(self, *, full_name: str, py_object: Any, **kwargs):
"""Initialize a FunctionPageInfo.
Args:
full_name: The full, main name, of the object being documented.
py_object: The object being documented.
**kwargs: Extra arguments.
"""
super().__init__(full_name, py_object, **kwargs)
self._signature = None
self._decorators = []
@property
def signature(self):
return self._signature
def collect_docs(self, parser_config):
"""Collect all information necessary to genertate the function page.
Mainly this is details about the function signature.
Args:
parser_config: The ParserConfig for the module being documented.
"""
assert self.signature is None
self._signature = generate_signature(self.py_object, parser_config,
self.full_name)
self._decorators = extract_decorators(self.py_object)
@property
def decorators(self):
return list(self._decorators)
def add_decorator(self, dec):
self._decorators.append(dec)
def get_metadata_html(self):
return Metadata(self.full_name).build_html()
class TypeAliasPageInfo(PageInfo):
"""Collects docs For a type alias page.
Attributes:
full_name: The full, main name, of the object being documented.
short_name: The last part of the full name.
py_object: The object being documented.
defined_in: A _FileLocation describing where the object was defined.
aliases: A list of full-name for all aliases for this object.
doc: A list of objects representing the docstring. These can all be
converted to markdown using str().
signature: the parsed signature (see: generate_signature)
decorators: A list of decorator names.
"""
def __init__(self, *, full_name: str, py_object: Any, **kwargs) -> None:
"""Initialize a `TypeAliasPageInfo`.
Args:
full_name: The full, main name, of the object being documented.
py_object: The object being documented.
**kwargs: Extra arguments.
"""
super().__init__(full_name, py_object, **kwargs)
self._signature = None
@property
def signature(self) -> None:
return self._signature
def _custom_join(self, args: List[str], origin: str) -> str:
"""Custom join for Callable and other type hints.
Args:
args: Args of a type annotation object returned by `__args__`.
origin: Origin of a type annotation object returned by `__origin__`.
Returns:
A joined string containing the right representation of a type annotation.
"""
if 'Callable' in origin:
if args[0] == '...':
return ', '.join(args)
else:
return f"[{', '.join(args[:-1])}], {args[-1]}"
return ', '.join(args)
def _link_type_args(self, obj: Any, reverse_index: Dict[int, str],
linker: FormatArguments) -> str:
"""Recurses into typehint object and links known objects to their pages."""
arg_full_name = reverse_index.get(id(obj), None)
if arg_full_name is not None:
return linker.get_link(arg_full_name)
result = []
if getattr(obj, '__args__', None):
for arg in obj.__args__:
result.append(self._link_type_args(arg, reverse_index, linker))
origin_str = typing._type_repr(obj.__origin__) # pylint: disable=protected-access # pytype: disable=module-attr
result = self._custom_join(result, origin_str)
return f'{origin_str}[{result}]'
else:
return typing._type_repr(obj) # pylint: disable=protected-access # pytype: disable=module-attr
def collect_docs(self, parser_config) -> None:
"""Collect all information necessary to genertate the function page.
Mainly this is details about the function signature.
For the type alias signature, the args are extracted and replaced with the
full_name if the object is present in `parser_config.reverse_index`. They
are also linkified to point to that symbol's page.
For example (If generating docs for symbols in TF library):
```
X = Union[int, str, bool, tf.Tensor, np.ndarray]
```
In this case `tf.Tensor` will get linked to that symbol's page.
Note: In the signature `tf.Tensor` is an object, so it will show up as
`tensorflow.python.framework.ops.Tensor`. That's why we need to query
`parser_config.reverse_index` to get the full_name of the object which will
be `tf.Tensor`. Hence the signature will be:
```
X = Union[int, str, bool, <a href="URL">tf.Tensor</a>, np.ndarray]
```
Args:
parser_config: The ParserConfig for the module being documented.
"""
assert self.signature is None
linker = FormatArguments(
type_annotations={},
parser_config=parser_config,
func_full_name=self.full_name)
sig_args = []
if self.py_object.__origin__:
for arg_obj in self.py_object.__args__:
sig_args.append(
self._link_type_args(arg_obj, parser_config.reverse_index, linker))
sig_args_str = textwrap.indent(',\n'.join(sig_args), ' ')
if self.py_object.__origin__:
sig = f'{self.py_object.__origin__}[\n{sig_args_str}\n]'
else:
sig = repr(self.py_object)
# pytype: enable=module-attr
self._signature = sig.replace('typing.', '')
def get_metadata_html(self) -> str:
return Metadata(self.full_name).build_html()
class ClassPageInfo(PageInfo):
"""Collects docs for a class page.
Attributes:
full_name: The full, main name, of the object being documented.
short_name: The last part of the full name.
py_object: The object being documented.
defined_in: A _FileLocation describing where the object was defined.
aliases: A list of full-name for all aliases for this object.
doc: A list of objects representing the docstring. These can all be
converted to markdown using str().
attributes: A dict mapping from "name" to a docstring
bases: A list of `MemberInfo` objects pointing to the docs for the parent
classes.
methods: A list of `MethodInfo` objects documenting the class' methods.
classes: A list of `MemberInfo` objects pointing to docs for any nested
classes.
other_members: A list of `MemberInfo` objects documenting any other object's
defined inside the class object (mostly enum style fields).
attr_block: A `TitleBlock` containing information about the Attributes of
the class.
"""
def __init__(self, *, full_name, py_object, **kwargs):
"""Initialize a ClassPageInfo.
Args:
full_name: The full, main name, of the object being documented.
py_object: The object being documented.
**kwargs: Extra arguments.
"""
super().__init__(full_name, py_object, **kwargs)
self._namedtuplefields = collections.OrderedDict()
if issubclass(py_object, tuple):
namedtuple_attrs = ('_asdict', '_fields', '_make', '_replace')
if all(hasattr(py_object, attr) for attr in namedtuple_attrs):
for name in py_object._fields:
self._namedtuplefields[name] = None
self._properties = collections.OrderedDict()
self._bases = None
self._methods = []
self._classes = []
self._other_members = []
self.attr_block = None
@property
def bases(self):
"""Returns a list of `MemberInfo` objects pointing to the class' parents."""
return self._bases
def set_attr_block(self, attr_block):
assert self.attr_block is None
self.attr_block = attr_block
def _set_bases(self, relative_path, parser_config):
"""Builds the `bases` attribute, to document this class' parent-classes.
This method sets the `bases` to a list of `MemberInfo` objects point to the
doc pages for the class' parents.
Args:
relative_path: The relative path from the doc this object describes to the
documentation root.
parser_config: An instance of `ParserConfig`.
"""
bases = []
obj = parser_config.py_name_to_object(self.full_name)
for base in obj.__bases__:
base_full_name = parser_config.reverse_index.get(id(base), None)
if base_full_name is None:
continue
base_doc = _parse_md_docstring(base, relative_path, self.full_name,
parser_config, self._extra_docs)
base_url = parser_config.reference_resolver.reference_to_url(
base_full_name, relative_path)
link_info = MemberInfo(
short_name=base_full_name.split('.')[-1],
full_name=base_full_name,
py_object=base,
doc=base_doc,
url=base_url)
bases.append(link_info)
self._bases = bases
def _add_property(self, member_info: MemberInfo):
"""Adds an entry to the `properties` list.
Args:
member_info: a `MemberInfo` describing the property.
"""
doc = member_info.doc
# Hide useless namedtuple docs-trings.
if re.match('Alias for field number [0-9]+', doc.brief):
doc = doc._replace(docstring_parts=[], brief='')
new_parts = [doc.brief]
# Strip args/returns/raises from property
new_parts.extend([
str(part)
for part in doc.docstring_parts
if not isinstance(part, TitleBlock)
])
new_parts = [textwrap.indent(part, ' ') for part in new_parts]
new_parts.append('')
desc = '\n'.join(new_parts)
if member_info.short_name in self._namedtuplefields:
self._namedtuplefields[member_info.short_name] = desc
else:
self._properties[member_info.short_name] = desc
@property
def methods(self):
"""Returns a list of `MethodInfo` describing the class' methods."""
return self._methods
def _add_method(
self,
member_info: MemberInfo,
defining_class: Optional[type], # pylint: disable=g-bare-generic
parser_config: ParserConfig) -> None:
"""Adds a `MethodInfo` entry to the `methods` list.
Args:
member_info: a `MemberInfo` describing the method.
defining_class: The `type` object where this method is defined.
parser_config: A `ParserConfig`.
"""
if defining_class is None:
return
# Omit methods defined by namedtuple.
original_method = defining_class.__dict__[member_info.short_name]
if (hasattr(original_method, '__module__') and
(original_method.__module__ or '').startswith('namedtuple')):
return
# Some methods are often overridden without documentation. Because it's
# obvious what they do, don't include them in the docs if there's no
# docstring.
if (not member_info.doc.brief.strip() and
member_info.short_name in ['__del__', '__copy__']):
return
signature = generate_signature(member_info.py_object, parser_config,
member_info.full_name)
decorators = extract_decorators(member_info.py_object)
defined_in = _get_defined_in(member_info.py_object, parser_config)
method_info = MethodInfo.from_member_info(member_info, signature,
decorators, defined_in)
self._methods.append(method_info)
@property
def classes(self):
"""Returns a list of `MemberInfo` pointing to any nested classes."""
return self._classes
def get_metadata_html(self) -> str:
meta_data = Metadata(self.full_name)
for item in itertools.chain(self.classes, self.methods, self.other_members):
meta_data.append(item)
return meta_data.build_html()
def _add_class(self, member_info):
"""Adds a `MemberInfo` for a nested class to `classes` list.
Args:
member_info: a `MemberInfo` describing the class.
"""
self._classes.append(member_info)
@property
def other_members(self):
"""Returns a list of `MemberInfo` describing any other contents."""
return self._other_members
def _add_other_member(self, member_info: MemberInfo):
"""Adds an `MemberInfo` entry to the `other_members` list.
Args:
member_info: a `MemberInfo` describing the object.
"""
self._other_members.append(member_info)
def _add_member(
self,
member_info: MemberInfo,
defining_class: Optional[type], # pylint: disable=g-bare-generic
parser_config: ParserConfig,
) -> None:
"""Adds a member to the class page."""
obj_type = get_obj_type(member_info.py_object)
if obj_type is ObjType.PROPERTY:
self._add_property(member_info)
elif obj_type is ObjType.CLASS:
if defining_class is None:
return
self._add_class(member_info)
elif obj_type is ObjType.CALLABLE:
self._add_method(member_info, defining_class, parser_config)
elif obj_type is ObjType.OTHER:
# Exclude members defined by protobuf that are useless
if issubclass(self.py_object, ProtoMessage):
if (member_info.short_name.endswith('_FIELD_NUMBER') or
member_info.short_name in ['__slots__', 'DESCRIPTOR']):
return
self._add_other_member(member_info)
def collect_docs(self, parser_config):
"""Collects information necessary specifically for a class's doc page.
Mainly, this is details about the class's members.
Args:
parser_config: An instance of ParserConfig.
"""
py_class = self.py_object
doc_path = documentation_path(self.full_name)
relative_path = os.path.relpath(
path='.', start=os.path.dirname(doc_path) or '.')
self._set_bases(relative_path, parser_config)
for child_short_name in parser_config.tree[self.full_name]:
child_full_name = '.'.join([self.full_name, child_short_name])
child = parser_config.py_name_to_object(child_full_name)
# Don't document anything that is defined in object or by protobuf.
defining_class = _get_defining_class(py_class, child_short_name)
if defining_class in [object, type, tuple, BaseException, Exception]:
continue
# The following condition excludes most protobuf-defined symbols.
if (defining_class and
defining_class.__name__ in ['CMessage', 'Message', 'MessageMeta']):
continue
if doc_controls.should_skip_class_attr(py_class, child_short_name):
continue
child_doc = _parse_md_docstring(child, relative_path, self.full_name,
parser_config, self._extra_docs)
child_url = parser_config.reference_resolver.reference_to_url(
child_full_name, relative_path)
member_info = MemberInfo(child_short_name, child_full_name, child,
child_doc, child_url)
self._add_member(member_info, defining_class, parser_config)
self.set_attr_block(self._augment_attributes(self.doc.docstring_parts))
def _augment_attributes(self,
docstring_parts: List[Any]) -> Optional[TitleBlock]:
"""Augments and deletes the "Attr" block of the docstring.
The augmented block is returned and then added to the markdown page by
pretty_docs.py. The existing Attribute block is deleted from the docstring.
Merges `namedtuple` fields and properties into the attrs block.
+ `namedtuple` fields first, in order.
+ Then the docstring `Attr:` block.
+ Then any `properties` not mentioned above.
Args:
docstring_parts: A list of docstring parts.
Returns:
Augmented "Attr" block.
"""
attribute_block = None
for attr_block_index, part in enumerate(docstring_parts):
if isinstance(part, TitleBlock) and part.title.startswith('Attr'):
raw_attrs = collections.OrderedDict(part.items)
break
else:
# Didn't find the attributes block, there may still be attributes so
# add a placeholder for them at the end.
raw_attrs = collections.OrderedDict()
attr_block_index = len(docstring_parts)
docstring_parts.append(None)
attrs = collections.OrderedDict()
# namedtuple fields first.
attrs.update(self._namedtuplefields)
# the contents of the `Attrs:` block from the docstring
attrs.update(raw_attrs)
# properties last.
for name, desc in self._properties.items():
# Don't overwrite existing items
attrs.setdefault(name, desc)
if attrs:
attribute_block = TitleBlock(
title='Attributes', text='', items=attrs.items())
# Delete the Attrs block if it exists or delete the placeholder.
del docstring_parts[attr_block_index]
return attribute_block
class ModulePageInfo(PageInfo):
"""Collects docs for a module page.
Attributes:
full_name: The full, main name, of the object being documented.
short_name: The last part of the full name.
py_object: The object being documented.
defined_in: A _FileLocation describing where the object was defined.
aliases: A list of full-name for all aliases for this object.
doc: A list of objects representing the docstring. These can all be
converted to markdown using str().
classes: A list of `MemberInfo` objects pointing to docs for the classes in
this module.
functions: A list of `MemberInfo` objects pointing to docs for the functions
in this module
modules: A list of `MemberInfo` objects pointing to docs for the modules in
this module.
type_alias: A list of `MemberInfo` objects pointing to docs for the type
aliases in this module.
other_members: A list of `MemberInfo` objects documenting any other object's
defined on the module object (mostly enum style fields).
"""
def __init__(self, *, full_name, py_object, **kwargs):
"""Initialize a `ModulePageInfo`.
Args:
full_name: The full, main name, of the object being documented.
py_object: The object being documented.
**kwargs: Extra arguments.
"""
super().__init__(full_name, py_object, **kwargs)
self._modules = []
self._classes = []
self._functions = []
self._other_members = []
self._type_alias = []
@property
def modules(self):
return self._modules
@property
def functions(self):
return self._functions
@property
def classes(self):
return self._classes
@property
def type_alias(self):
return self._type_alias
@property
def other_members(self):
return self._other_members
def _add_module(self, member_info: MemberInfo):
self._modules.append(member_info)
def _add_class(self, member_info: MemberInfo):
self._classes.append(member_info)
def _add_function(self, member_info: MemberInfo):
self._functions.append(member_info)
def _add_type_alias(self, member_info: MemberInfo):
self._type_alias.append(member_info)
def _add_other_member(self, member_info: MemberInfo):
self._other_members.append(member_info)
def get_metadata_html(self):
meta_data = Metadata(self.full_name)
# Objects with their own pages are not added to the metadata list for the
# module, the module only has a link to the object page. No docs.
for item in self.other_members:
meta_data.append(item)
return meta_data.build_html()
def _add_member(self, member_info: MemberInfo) -> None:
"""Adds members of the modules to the respective lists."""
obj_type = get_obj_type(member_info.py_object)
if obj_type is ObjType.MODULE:
self._add_module(member_info)
elif obj_type is ObjType.CLASS:
self._add_class(member_info)
elif obj_type is ObjType.CALLABLE:
self._add_function(member_info)
elif obj_type is ObjType.TYPE_ALIAS:
self._add_type_alias(member_info)
elif obj_type is ObjType.OTHER:
self._add_other_member(member_info)
def collect_docs(self, parser_config):
"""Collect information necessary specifically for a module's doc page.
Mainly this is information about the members of the module.
Args:
parser_config: An instance of ParserConfig.
"""
relative_path = os.path.relpath(
path='.',
start=os.path.dirname(documentation_path(self.full_name)) or '.')
member_names = parser_config.tree.get(self.full_name, [])
for member_short_name in member_names:
if member_short_name in [
'__builtins__', '__doc__', '__file__', '__name__', '__path__',
'__package__', '__cached__', '__loader__', '__spec__',
'absolute_import', 'division', 'print_function', 'unicode_literals'
]:
continue
if self.full_name:
member_full_name = self.full_name + '.' + member_short_name
else:
member_full_name = member_short_name
member = parser_config.py_name_to_object(member_full_name)
member_doc = _parse_md_docstring(member, relative_path, self.full_name,
parser_config, self._extra_docs)
url = parser_config.reference_resolver.reference_to_url(
member_full_name, relative_path)
member_info = MemberInfo(member_short_name, member_full_name, member,
member_doc, url)
self._add_member(member_info)
def docs_for_object(
full_name: str,
py_object: Any,
parser_config: ParserConfig,
extra_docs: Optional[Dict[int, str]] = None,
) -> PageInfo:
"""Return a PageInfo object describing a given object from the TF API.
This function uses _parse_md_docstring to parse the docs pertaining to
`object`.
This function resolves '`tf.symbol`' references in the docstrings into links
to the appropriate location. It also adds a list of alternative names for the
symbol automatically.
It assumes that the docs for each object live in a file given by
`documentation_path`, and that relative links to files within the
documentation are resolvable.
Args:
full_name: The fully qualified name of the symbol to be documented.
py_object: The Python object to be documented. Its documentation is sourced
from `py_object`'s docstring.
parser_config: A ParserConfig object.
extra_docs: Extra docs for symbols like public constants(list, tuple, etc)
that need to be added to the markdown pages created.
Returns:
Either a `FunctionPageInfo`, `ClassPageInfo`, or a `ModulePageInfo`
depending on the type of the python object being documented.
Raises:
RuntimeError: If an object is encountered for which we don't know how
to make docs.
"""
# Which other aliases exist for the object referenced by full_name?
main_name = parser_config.reference_resolver.py_main_name(full_name)
duplicate_names = parser_config.duplicates.get(main_name, [])
if main_name in duplicate_names:
duplicate_names.remove(main_name)
obj_type = get_obj_type(py_object)
if obj_type is ObjType.CLASS:
page_info = ClassPageInfo(
full_name=main_name, py_object=py_object, extra_docs=extra_docs)
elif obj_type is ObjType.CALLABLE:
page_info = FunctionPageInfo(
full_name=main_name, py_object=py_object, extra_docs=extra_docs)
elif obj_type is ObjType.MODULE:
page_info = ModulePageInfo(
full_name=main_name, py_object=py_object, extra_docs=extra_docs)
elif obj_type is ObjType.TYPE_ALIAS:
page_info = TypeAliasPageInfo(
full_name=main_name, py_object=py_object, extra_docs=extra_docs)
else:
raise RuntimeError('Cannot make docs for object {full_name}: {py_object!r}')
relative_path = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
page_info.set_doc(
_parse_md_docstring(
py_object,
relative_path,
full_name,
parser_config,
extra_docs,
))
page_info.collect_docs(parser_config)
page_info.set_aliases(duplicate_names)
page_info.set_defined_in(_get_defined_in(py_object, parser_config))
return page_info
def _unwrap_obj(obj):
while True:
unwrapped_obj = getattr(obj, '__wrapped__', None)
if unwrapped_obj is None:
break
obj = unwrapped_obj
return obj
def _get_defined_in(py_object: Any,
parser_config: ParserConfig) -> Optional[_FileLocation]:
"""Returns a description of where the passed in python object was defined.
Args:
py_object: The Python object.
parser_config: A ParserConfig object.
Returns:
A `_FileLocation`
"""
# Every page gets a note about where this object is defined
base_dirs_and_prefixes = zip(parser_config.base_dir,
parser_config.code_url_prefix)
try:
obj_path = inspect.getfile(_unwrap_obj(py_object))
except TypeError: # getfile throws TypeError if py_object is a builtin.
return None
if not obj_path.endswith(('.py', '.pyc')):
return None
code_url_prefix = None
for base_dir, temp_prefix in base_dirs_and_prefixes:
rel_path = os.path.relpath(path=obj_path, start=base_dir)
# A leading ".." indicates that the file is not inside `base_dir`, and
# the search should continue.
if rel_path.startswith('..'):
continue
else:
code_url_prefix = temp_prefix
break
# No link if the file was not found in a `base_dir`, or the prefix is None.
if code_url_prefix is None:
return None
try:
lines, start_line = inspect.getsourcelines(py_object)
end_line = start_line + len(lines) - 1
if 'MACHINE GENERATED' in lines[0]:
# don't link to files generated by tf_export
return None
except (IOError, TypeError, IndexError):
start_line = None
end_line = None
# In case this is compiled, point to the original
if rel_path.endswith('.pyc'):
# If a PY3 __pycache__/ subdir is being used, omit it.
rel_path = rel_path.replace('__pycache__' + os.sep, '')
# Strip everything after the first . so that variants such as .pyc and
# .cpython-3x.pyc or similar are all handled.
rel_path = rel_path.partition('.')[0] + '.py'
if re.search(r'<[\w\s]+>', rel_path):
# Built-ins emit paths like <embedded stdlib>, <string>, etc.
return None
if '<attrs generated' in rel_path:
return None
if re.match(r'.*/gen_[^/]*\.py$', rel_path):
return _FileLocation(rel_path)
if 'genfiles' in rel_path:
return _FileLocation(rel_path)
elif re.match(r'.*_pb2\.py$', rel_path):
# The _pb2.py files all appear right next to their defining .proto file.
rel_path = rel_path[:-7] + '.proto'
return _FileLocation(
rel_path=rel_path, url=os.path.join(code_url_prefix, rel_path)) # pylint: disable=undefined-loop-variable
else:
return _FileLocation(
rel_path=rel_path,
url=os.path.join(code_url_prefix, rel_path),
start_line=start_line,
end_line=end_line) # pylint: disable=undefined-loop-variable
# TODO(markdaoust): This should just parse, pretty_docs should generate the md.
def generate_global_index(library_name, index, reference_resolver):
"""Given a dict of full names to python objects, generate an index page.
The index page generated contains a list of links for all symbols in `index`
that have their own documentation page.
Args:
library_name: The name for the documented library to use in the title.
index: A dict mapping full names to python objects.
reference_resolver: An instance of ReferenceResolver.
Returns:
A string containing an index page as Markdown.
"""
symbol_links = []
for full_name, py_object in index.items():
obj_type = get_obj_type(py_object)
if obj_type in (ObjType.OTHER, ObjType.PROPERTY):
continue
# In Python 3, unbound methods are functions, so eliminate those.
if obj_type is ObjType.CALLABLE:
if is_class_attr(full_name, index):
continue
symbol_links.append(
(full_name, reference_resolver.python_link(full_name, full_name, '..')))
lines = [f'# All symbols in {library_name}', '']
lines.append('<!-- Insert buttons and diff -->\n')
# Sort all the symbols once, so that the ordering is preserved when its broken
# up into main symbols and compat symbols and sorting the sublists is not
# required.
symbol_links = sorted(symbol_links, key=lambda x: x[0])
compat_v1_symbol_links = []
compat_v2_symbol_links = []
primary_symbol_links = []
for symbol, link in symbol_links:
if symbol.startswith('tf.compat.v1'):
if 'raw_ops' not in symbol:
compat_v1_symbol_links.append(link)
elif symbol.startswith('tf.compat.v2'):
compat_v2_symbol_links.append(link)
else:
primary_symbol_links.append(link)
lines.append('## Primary symbols')
for link in primary_symbol_links:
lines.append(f'* {link}')
if compat_v2_symbol_links:
lines.append('\n## Compat v2 symbols\n')
for link in compat_v2_symbol_links:
lines.append(f'* {link}')
if compat_v1_symbol_links:
lines.append('\n## Compat v1 symbols\n')
for link in compat_v1_symbol_links:
lines.append(f'* {link}')
# TODO(markdaoust): use a _ModulePageInfo -> prety_docs.build_md_page()
return '\n'.join(lines)
class Metadata(object):
"""A class for building a page's Metadata block.
Attributes:
name: The name of the page being described by the Metadata block.
version: The source version.
"""
def __init__(self, name, version=None, content=None):
"""Creates a Metadata builder.
Args:
name: The name of the page being described by the Metadata block.
version: The source version.
content: Content to create the metadata from.
"""
self.name = name
self.version = version
if self.version is None:
self.version = 'Stable'
self._content = content
if self._content is None:
self._content = []
def append(self, item):
"""Adds an item from the page to the Metadata block.
Args:
item: The parsed page section to add.
"""
self._content.append(item.short_name)
def build_html(self):
"""Returns the Metadata block as an Html string."""
# Note: A schema is not a URL. It is defined with http: but doesn't resolve.
schema = 'http://developers.google.com/ReferenceObject'
parts = [f'<div itemscope itemtype="{schema}">']
parts.append(f'<meta itemprop="name" content="{self.name}" />')
parts.append(f'<meta itemprop="path" content="{self.version}" />')
for item in self._content:
parts.append(f'<meta itemprop="property" content="{item}"/>')
parts.extend(['</div>', ''])
return '\n'.join(parts)
| 32.798416
| 118
| 0.676137
|
import ast
import collections
import enum
import functools
import inspect
import itertools
import json
import os
import re
import textwrap
import typing
from typing import Any, Dict, List, Tuple, Iterable, NamedTuple, Optional, Union
import astor
from tensorflow_docs.api_generator import doc_controls
from tensorflow_docs.api_generator import doc_generator_visitor
from google.protobuf.message import Message as ProtoMessage
class ObjType(enum.Enum):
TYPE_ALIAS = 'type_alias'
MODULE = 'module'
CLASS = 'class'
CALLABLE = 'callable'
PROPERTY = 'property'
OTHER = 'other'
def get_obj_type(py_obj: Any) -> ObjType:
if hasattr(py_obj, '__args__') and hasattr(py_obj, '__origin__'):
return ObjType.TYPE_ALIAS
elif inspect.ismodule(py_obj):
return ObjType.MODULE
elif inspect.isclass(py_obj):
return ObjType.CLASS
elif callable(py_obj):
return ObjType.CALLABLE
elif isinstance(py_obj, property):
return ObjType.PROPERTY
else:
return ObjType.OTHER
class ParserConfig(object):
def __init__(self, reference_resolver, duplicates, duplicate_of, tree, index,
reverse_index, base_dir, code_url_prefix):
self.reference_resolver = reference_resolver
self.duplicates = duplicates
self.duplicate_of = duplicate_of
self.tree = tree
self.reverse_index = reverse_index
self.index = index
self.base_dir = base_dir
self.code_url_prefix = code_url_prefix
def py_name_to_object(self, full_name):
return self.index[full_name]
class _FileLocation(object):
GITHUB_LINE_NUMBER_TEMPLATE = '#L{start_line:d}-L{end_line:d}'
def __init__(self, rel_path, url=None, start_line=None, end_line=None):
self.rel_path = rel_path
self.url = url
self.start_line = start_line
self.end_line = end_line
github_main_re = 'github.com.*?(blob|tree)/master'
suffix = ''
if self.start_line and not re.search(github_main_re, self.url):
if 'github.com' in self.url:
suffix = self.GITHUB_LINE_NUMBER_TEMPLATE.format(
start_line=self.start_line, end_line=self.end_line)
self.url = self.url + suffix
def is_class_attr(full_name, index):
parent_name = full_name.rsplit('.', 1)[0]
if inspect.isclass(index[parent_name]):
return True
return False
class TFDocsError(Exception):
pass
def documentation_path(full_name, is_fragment=False):
parts = full_name.split('.')
if is_fragment:
parts, fragment = parts[:-1], parts[-1]
result = os.path.join(*parts) + '.md'
if is_fragment:
result = result + '#' + fragment
return result
def _get_raw_docstring(py_object):
if get_obj_type(py_object) is ObjType.TYPE_ALIAS:
if inspect.getdoc(py_object) != inspect.getdoc(py_object.__origin__):
result = inspect.getdoc(py_object)
else:
result = ''
elif get_obj_type(py_object) is not ObjType.OTHER:
result = inspect.getdoc(py_object) or ''
else:
result = ''
result = _StripTODOs()(result)
result = _StripPylints()(result)
result = _AddDoctestFences()(result + '\n')
return result
class _AddDoctestFences(object):
CARET_BLOCK_RE = re.compile(
r"""
(?<=\n)\ *\n # After a blank line.
(?P<indent>\ *)(?P<content>\>\>\>.*?) # Whitespace and a triple caret.
\n\s*?(?=\n|$) # Followed by a blank line""",
re.VERBOSE | re.DOTALL)
def _sub(self, match):
groups = match.groupdict()
fence = f"\n{groups['indent']}```\n"
content = groups['indent'] + groups['content']
return ''.join([fence, content, fence])
def __call__(self, content):
return self.CARET_BLOCK_RE.sub(self._sub, content)
class _StripTODOs(object):
TODO_RE = re.compile('#? *TODO.*')
def __call__(self, content: str) -> str:
return self.TODO_RE.sub('', content)
class _StripPylints(object):
PYLINT_RE = re.compile('# *?pylint:.*')
def __call__(self, content: str) -> str:
return self.PYLINT_RE.sub('', content)
class IgnoreLineInBlock(object):
def __init__(self, block_start, block_end):
self._block_start = block_start
self._block_end = block_end
self._in_block = False
self._start_end_regex = re.escape(self._block_start) + r'.*?' + re.escape(
self._block_end)
def __call__(self, line):
if re.match(self._start_end_regex, line):
return True
if not self._in_block:
if self._block_start in line:
self._in_block = True
elif self._block_end in line:
self._in_block = False
return True
return self._in_block
AUTO_REFERENCE_RE = re.compile(
r"""
(?P<brackets>\[.*?\]) # find characters inside '[]'
|
`(?P<backticks>[\w\(\[\)\]\{\}.,=\s]+?)` # or find characters inside '``'
""",
flags=re.VERBOSE)
class ReferenceResolver(object):
def __init__(
self,
duplicate_of: Dict[str, str],
is_fragment: Dict[str, bool],
py_module_names: List[str],
site_link: Optional[str] = None,
):
self._duplicate_of = duplicate_of
self._is_fragment = is_fragment
self._py_module_names = py_module_names
self._site_link = site_link
self._all_names = set(is_fragment.keys())
self._partial_symbols_dict = self._create_partial_symbols_dict()
@classmethod
def from_visitor(cls, visitor, **kwargs):
is_fragment = {}
for full_name, obj in visitor.index.items():
obj_type = get_obj_type(obj)
if obj_type in (ObjType.CLASS, ObjType.MODULE):
is_fragment[full_name] = False
elif obj_type in (ObjType.CALLABLE, ObjType.TYPE_ALIAS):
if is_class_attr(full_name, visitor.index):
is_fragment[full_name] = True
else:
is_fragment[full_name] = False
else:
is_fragment[full_name] = True
return cls(
duplicate_of=visitor.duplicate_of, is_fragment=is_fragment, **kwargs)
def is_fragment(self, full_name: str):
return self._is_fragment[full_name]
@classmethod
def from_json_file(cls, filepath):
with open(filepath) as f:
json_dict = json.load(f)
return cls(**json_dict)
def _partial_symbols(self, symbol):
split_symbol = symbol.split('.')
partials = [
'.'.join(split_symbol[i:]) for i in range(1,
len(split_symbol) - 1)
]
return partials
def _create_partial_symbols_dict(self):
partial_symbols_dict = collections.defaultdict(list)
for name in sorted(self._all_names):
if 'tf.compat.v' in name or 'tf.contrib' in name:
continue
if 'tf.experimental.numpy' in name or 'tf.numpy' in name:
continue
partials = self._partial_symbols(name)
for partial in partials:
partial_symbols_dict[partial].append(name)
new_partial_dict = {}
for partial, full_names in partial_symbols_dict.items():
if not full_names:
continue
full_names = [
self._duplicate_of.get(full_name, full_name)
for full_name in full_names
]
new_partial_dict[partial] = full_names[0]
return new_partial_dict
def to_json_file(self, filepath):
try:
os.makedirs(os.path.dirname(filepath))
except OSError:
pass
json_dict = {}
for key, value in self.__dict__.items():
if key == '_all_names' or key == '_partial_symbols_dict':
continue
json_dict[key.lstrip('_')] = value
with open(filepath, 'w') as f:
json.dump(json_dict, f, indent=2, sort_keys=True)
f.write('\n')
def replace_references(self, string, relative_path_to_root, full_name=None):
def one_ref(match):
return self._one_ref(match, relative_path_to_root, full_name)
fixed_lines = []
filters = [
IgnoreLineInBlock('<pre class="tfo-notebook-code-cell-output">',
'</pre>'),
IgnoreLineInBlock('```', '```'),
IgnoreLineInBlock(
'<pre class="devsite-click-to-copy prettyprint lang-py">', '</pre>')
]
for line in string.splitlines():
if not any(filter_block(line) for filter_block in filters):
line = re.sub(AUTO_REFERENCE_RE, one_ref, line)
fixed_lines.append(line)
return '\n'.join(fixed_lines)
def python_link(self,
link_text,
ref_full_name,
relative_path_to_root,
code_ref=True):
url = self.reference_to_url(ref_full_name, relative_path_to_root)
if self._site_link is not None:
if os.path.isabs(url):
url = os.path.join(self._site_link, url[1:])
else:
url = os.path.join(self._site_link, url)
url = url.replace('.md', '')
if code_ref:
link_text = link_text.join(['<code>', '</code>'])
else:
link_text = self._link_text_to_html(link_text)
return f'<a href="{url}">{link_text}</a>'
@staticmethod
def _link_text_to_html(link_text):
code_re = '`(.*?)`'
return re.sub(code_re, r'<code>\1</code>', link_text)
def py_main_name(self, full_name):
return self._duplicate_of.get(full_name, full_name)
def reference_to_url(self, ref_full_name, relative_path_to_root):
if self._is_fragment.get(ref_full_name, False):
# Use the main name of their parent.
parent_name, short_name = ref_full_name.rsplit('.', 1)
parent_main_name = self._duplicate_of.get(parent_name, parent_name)
main_name = '.'.join([parent_main_name, short_name])
else:
main_name = self._duplicate_of.get(ref_full_name, ref_full_name)
# Check whether this link exists
if main_name not in self._all_names:
raise TFDocsError(f'Cannot make link to {main_name!r}: Not in index.')
ref_path = documentation_path(main_name, self._is_fragment[main_name])
return os.path.join(relative_path_to_root, ref_path)
def _one_ref(self, match, relative_path_to_root, full_name=None):
if match.group(1):
# Found a '[]' group, return it unmodified.
return match.group('brackets')
# Found a '``' group.
string = match.group('backticks')
link_text = string
string = re.sub(r'(.*)[\(\[].*', r'\1', string)
if string.startswith('compat.v1') or string.startswith('compat.v2'):
string = 'tf.' + string
elif string.startswith('v1') or string.startswith('v2'):
string = 'tf.compat.' + string
elif full_name is None or ('tf.compat.v' not in full_name and
'tf.contrib' not in full_name):
string = self._partial_symbols_dict.get(string, string)
try:
if string.startswith('tensorflow::'):
# C++ symbol
return self._cc_link(string, link_text, relative_path_to_root)
is_python = False
for py_module_name in self._py_module_names:
if string == py_module_name or string.startswith(py_module_name + '.'):
is_python = True
break
if is_python: # Python symbol
return self.python_link(
link_text, string, relative_path_to_root, code_ref=True)
except TFDocsError:
pass
return match.group(0)
def _cc_link(self, string, link_text, relative_path_to_root):
# TODO(joshl): Fix this hard-coding of paths.
if string == 'tensorflow::ClientSession':
ret = 'class/tensorflow/client-session.md'
elif string == 'tensorflow::Scope':
ret = 'class/tensorflow/scope.md'
elif string == 'tensorflow::Status':
ret = 'class/tensorflow/status.md'
elif string == 'tensorflow::Tensor':
ret = 'class/tensorflow/tensor.md'
elif string == 'tensorflow::ops::Const':
ret = 'namespace/tensorflow/ops.md
else:
raise TFDocsError(f'C++ reference not understood: "{string}"')
# relative_path_to_root gets you to api_docs/python, we go from there
# to api_docs/cc, and then add ret.
cc_relative_path = os.path.normpath(
os.path.join(relative_path_to_root, '../cc', ret))
return f'<a href="{cc_relative_path}"><code>{link_text}</code></a>'
def _handle_compatibility(doc) -> Tuple[str, Dict[str, str]]:
compatibility_notes = {}
match_compatibility = re.compile(r'[ \t]*@compatibility\((\w+)\)\s*\n'
r'((?:[^@\n]*\n)+)'
r'\s*@end_compatibility')
for f in match_compatibility.finditer(doc):
compatibility_notes[f.group(1)] = f.group(2)
return match_compatibility.subn(r'', doc)[0], compatibility_notes
def _pairs(items):
assert len(items) % 2 == 0
return list(zip(items[::2], items[1::2]))
# Don't change the width="214px" without consulting with the devsite-team.
TABLE_TEMPLATE = textwrap.dedent("""
<!-- Tabular view -->
<table class="responsive fixed orange">
<colgroup><col width="214px"><col></colgroup>
<tr><th colspan="2">{title}</th></tr>
{text}
{items}
</table>
""")
ITEMS_TEMPLATE = textwrap.dedent("""\
<tr>
<td>
{name}{anchor}
</td>
<td>
{description}
</td>
</tr>""")
TEXT_TEMPLATE = textwrap.dedent("""\
<tr class="alt">
<td colspan="2">
{text}
</td>
</tr>""")
class TitleBlock(object):
_INDENTATION_REMOVAL_RE = re.compile(r'( *)(.+)')
def __init__(self,
*,
title: Optional[str] = None,
text: str,
items: Iterable[Tuple[str, str]]):
self.title = title
self.text = text
self.items = items
def table_view(self, title_template: Optional[str] = None) -> str:
if title_template is not None:
title = title_template.format(title=self.title)
else:
title = self.title
text = self.text.strip()
if text:
text = TEXT_TEMPLATE.format(text=text)
text = self._INDENTATION_REMOVAL_RE.sub(r'\2', text)
items = []
for name, description in self.items:
if not description:
description = ''
else:
description = description.strip()
item_table = ITEMS_TEMPLATE.format(
name=f'`{name}`', anchor='', description=description)
item_table = self._INDENTATION_REMOVAL_RE.sub(r'\2', item_table)
items.append(item_table)
return '\n' + TABLE_TEMPLATE.format(
title=title, text=text, items=''.join(items)) + '\n'
def list_view(self, title_template: str) -> str:
sub = []
sub.append(title_template.format(title=self.title))
sub.append(textwrap.dedent(self.text))
sub.append('\n')
for name, description in self.items:
description = description.strip()
if not description:
sub.append(f'* <b>`{name}`</b>\n')
else:
sub.append(f'* <b>`{name}`</b>: {description}\n')
return ''.join(sub)
BLOCK_RE = re.compile(
r"""
(?:^|^\n|\n\n) # After a blank line (non-capturing):
(?P<title>[A-Z][\s\w]{0,20}) # Find a sentence case title, followed by
\s*:\s*?(?=\n) # whitespace, a colon and a new line.
(?P<content>.*?) # Then take everything until
(?=\n\S|$) # look ahead finds a non-indented line
# (a new-line followed by non-whitespace)
""", re.VERBOSE | re.DOTALL)
ITEM_RE = re.compile(
r"""
^(\*?\*? # Capture optional *s to allow *args, **kwargs.
\w[\w.]*? # Capture a word character followed by word characters
# or "."s.
)\s*:\s # Allow any whitespace around the colon.""",
re.MULTILINE | re.VERBOSE)
@classmethod
def split_string(cls, docstring: str):
parts = []
while docstring:
split = re.split(cls.BLOCK_RE, docstring, maxsplit=1)
before = split.pop(0)
parts.append(before)
if not split:
break
# If there was a match, split contains three items. The two capturing
# groups in the RE, and the remainder.
title, content, docstring = split
# Now `content` contains the text and the name-value item pairs.
# separate these two parts.
content = textwrap.dedent(content)
split = cls.ITEM_RE.split(content)
text = split.pop(0)
items = _pairs(split)
title_block = cls(title=title, text=text, items=items)
parts.append(title_block)
return parts
class _DocstringInfo(typing.NamedTuple):
brief: str
docstring_parts: List[Union[TitleBlock, str]]
compatibility: Dict[str, str]
def _get_other_member_doc(
obj: Any,
parser_config: ParserConfig,
extra_docs: Optional[Dict[int, str]],
) -> str:
if extra_docs is not None:
other_member_extra_doc = extra_docs.get(id(obj), None)
else:
other_member_extra_doc = None
if other_member_extra_doc is not None:
description = other_member_extra_doc
elif doc_generator_visitor.maybe_singleton(obj):
description = f'`{repr(obj)}`'
else:
class_name = parser_config.reverse_index.get(id(type(obj)), None)
if class_name is not None:
description = f'`{class_name}`'
else:
description = ''
return description
def _parse_md_docstring(
py_object: Any,
relative_path_to_root: str,
full_name: str,
parser_config: ParserConfig,
extra_docs: Optional[Dict[int, str]] = None,
) -> _DocstringInfo:
if get_obj_type(py_object) is ObjType.OTHER:
raw_docstring = _get_other_member_doc(
obj=py_object, parser_config=parser_config, extra_docs=extra_docs)
else:
raw_docstring = _get_raw_docstring(py_object)
raw_docstring = parser_config.reference_resolver.replace_references(
raw_docstring,
relative_path_to_root,
full_name,
)
atat_re = re.compile(r' *@@[a-zA-Z_.0-9]+ *$')
raw_docstring = '\n'.join(
line for line in raw_docstring.split('\n') if not atat_re.match(line))
docstring, compatibility = _handle_compatibility(raw_docstring)
if 'Generated by: tensorflow/tools/api/generator' in docstring:
docstring = ''
# Remove the first-line "brief" docstring.
lines = docstring.split('\n')
brief = lines.pop(0)
docstring = '\n'.join(lines)
docstring_parts = TitleBlock.split_string(docstring)
return _DocstringInfo(brief, docstring_parts, compatibility)
class TypeAnnotationExtractor(ast.NodeVisitor):
def __init__(self):
self.annotation_dict = {}
self.arguments_typehint_exists = False
self.return_typehint_exists = False
def visit_FunctionDef(self, node) -> None: # pylint: disable=invalid-name
# Capture the return type annotation.
if node.returns:
self.annotation_dict['return'] = astor.to_source(
node.returns).strip().replace('"""', '"')
self.return_typehint_exists = True
# Capture the args type annotation.
for arg in node.args.args:
if arg.annotation:
self.annotation_dict[arg.arg] = astor.to_source(
arg.annotation).strip().replace('"""', '"')
self.arguments_typehint_exists = True
# Capture the kwarg only args type annotation.
for kwarg in node.args.kwonlyargs:
if kwarg.annotation:
self.annotation_dict[kwarg.arg] = astor.to_source(
kwarg.annotation).strip().replace('"""', '"')
self.arguments_typehint_exists = True
class ASTDefaultValueExtractor(ast.NodeVisitor):
_PAREN_NUMBER_RE = re.compile(r'^\(([0-9.e-]+)\)')
def __init__(self):
self.ast_args_defaults = []
self.ast_kw_only_defaults = []
def _preprocess(self, val: str) -> str:
text_default_val = astor.to_source(val).strip().replace(
'\t', '\\t').replace('\n', '\\n').replace('"""', "'")
text_default_val = self._PAREN_NUMBER_RE.sub('\\1', text_default_val)
return text_default_val
def visit_FunctionDef(self, node) -> None: # pylint: disable=invalid-name
for default_val in node.args.defaults:
if default_val is not None:
text_default_val = self._preprocess(default_val)
self.ast_args_defaults.append(text_default_val)
for default_val in node.args.kw_defaults:
if default_val is not None:
text_default_val = self._preprocess(default_val)
self.ast_kw_only_defaults.append(text_default_val)
class FormatArguments(object):
_INTERNAL_NAMES = {
'ops.GraphKeys': 'tf.GraphKeys',
'_ops.GraphKeys': 'tf.GraphKeys',
'init_ops.zeros_initializer': 'tf.zeros_initializer',
'init_ops.ones_initializer': 'tf.ones_initializer',
'saver_pb2.SaverDef': 'tf.train.SaverDef',
}
_OBJECT_MEMORY_ADDRESS_RE = re.compile(r'<(?P<type>.+) object at 0x[\da-f]+>')
# A regular expression capturing a python identifier.
_IDENTIFIER_RE = r'[a-zA-Z_]\w*'
_INDIVIDUAL_TYPES_RE = re.compile(
r"""
(?P<single_type>
([\w.]*)
(?=$|,| |\]|\[)
)
""", re.IGNORECASE | re.VERBOSE)
_TYPING = frozenset(
list(typing.__dict__.keys()) +
['int', 'str', 'bytes', 'float', 'complex', 'bool', 'None'])
_IMMUTABLE_TYPES = frozenset(
[int, str, bytes, float, complex, bool,
type(None), tuple, frozenset])
def __init__(
self,
type_annotations: Dict[str, str],
parser_config: ParserConfig,
func_full_name: str,
) -> None:
self._type_annotations = type_annotations
self._reverse_index = parser_config.reverse_index
self._reference_resolver = parser_config.reference_resolver
# func_full_name is used to calculate the relative path.
self._func_full_name = func_full_name
self._is_fragment = self._reference_resolver._is_fragment.get(
self._func_full_name, None)
def get_link(self, obj_full_name: str) -> str:
relative_path_to_root = os.path.relpath(
path='.',
start=os.path.dirname(
documentation_path(self._func_full_name, self._is_fragment)) or '.')
return self._reference_resolver.python_link(
link_text=obj_full_name,
ref_full_name=obj_full_name,
relative_path_to_root=relative_path_to_root,
code_ref=True)
def _extract_non_builtin_types(self, arg_obj: Any,
non_builtin_types: List[Any]) -> List[Any]:
annotations = getattr(arg_obj, '__args__', [arg_obj])
if annotations is None:
annotations = [arg_obj]
for anno in annotations:
if self._reverse_index.get(id(anno), None):
non_builtin_types.append(anno)
elif (anno in self._IMMUTABLE_TYPES or anno in typing.__dict__.values() or
anno is Ellipsis):
continue
elif hasattr(anno, '__args__'):
self._extract_non_builtin_types(anno, non_builtin_types)
else:
non_builtin_types.append(anno)
return non_builtin_types
def _get_non_builtin_ast_types(self, ast_typehint: str) -> List[str]:
non_builtin_ast_types = []
for single_type, _ in self._INDIVIDUAL_TYPES_RE.findall(ast_typehint):
if (not single_type or single_type in self._TYPING or
single_type == '...'):
continue
non_builtin_ast_types.append(single_type)
return non_builtin_ast_types
def _linkify(self, non_builtin_map: Dict[str, Any], match) -> str:
group = match.groupdict()
ast_single_typehint = group['single_type']
# If the AST type hint is a built-in type hint or an `Ellipsis`,
# return it as is.
if ast_single_typehint not in non_builtin_map:
return ast_single_typehint
if not non_builtin_map:
return ast_single_typehint
# Get the type object from the ast_single_typehint and lookup the object
# in reverse_index to get its full name.
obj_full_name = self._reverse_index.get(
id(non_builtin_map[ast_single_typehint]), None)
if obj_full_name is None:
return ast_single_typehint
return self.get_link(obj_full_name)
def preprocess(self, ast_typehint: str, obj_anno: Any) -> str:
# If the object annotations exists in the reverse_index, get the link
# directly for the entire annotation.
obj_anno_full_name = self._reverse_index.get(id(obj_anno), None)
if obj_anno_full_name is not None:
return self.get_link(obj_anno_full_name)
non_builtin_ast_types = self._get_non_builtin_ast_types(ast_typehint)
try:
non_builtin_type_objs = self._extract_non_builtin_types(obj_anno, [])
except RecursionError:
non_builtin_type_objs = {}
# If the length doesn't match then don't linkify any type annotation. This
# is done to avoid linking to wrong pages instead of guessing.
if len(non_builtin_type_objs) != len(non_builtin_ast_types):
non_builtin_map = {}
else:
non_builtin_map = dict(zip(non_builtin_ast_types, non_builtin_type_objs))
partial_func = functools.partial(self._linkify, non_builtin_map)
return self._INDIVIDUAL_TYPES_RE.sub(partial_func, ast_typehint)
def _replace_internal_names(self, default_text: str) -> str:
full_name_re = f'^{self._IDENTIFIER_RE}(.{self._IDENTIFIER_RE})+'
match = re.match(full_name_re, default_text)
if match:
for internal_name, public_name in self._INTERNAL_NAMES.items():
if match.group(0).startswith(internal_name):
return public_name + default_text[len(internal_name):]
return default_text
def format_return(self, return_anno: Any) -> str:
return self.preprocess(self._type_annotations['return'], return_anno)
def format_args(self, args: List[inspect.Parameter]) -> List[str]:
args_text_repr = []
for arg in args:
arg_name = arg.name
if arg_name in self._type_annotations:
typeanno = self.preprocess(self._type_annotations[arg_name],
arg.annotation)
args_text_repr.append(f'{arg_name}: {typeanno}')
else:
args_text_repr.append(f'{arg_name}')
return args_text_repr
def format_kwargs(self, kwargs: List[inspect.Parameter],
ast_defaults: List[str]) -> List[str]:
kwargs_text_repr = []
if len(ast_defaults) < len(kwargs):
ast_defaults.extend([None] * (len(kwargs) - len(ast_defaults)))
for kwarg, ast_default in zip(kwargs, ast_defaults):
kname = kwarg.name
default_val = kwarg.default
if id(default_val) in self._reverse_index:
default_text = self._reverse_index[id(default_val)]
elif ast_default is not None:
default_text = ast_default
if default_text != repr(default_val):
default_text = self._replace_internal_names(default_text)
# Kwarg without default value.
elif default_val is kwarg.empty:
kwargs_text_repr.extend(self.format_args([kwarg]))
continue
else:
# Strip object memory addresses to avoid unnecessary doc churn.
default_text = self._OBJECT_MEMORY_ADDRESS_RE.sub(
r'<\g<type>>', repr(default_val))
# Format the kwargs to add the type annotation and default values.
if kname in self._type_annotations:
typeanno = self.preprocess(self._type_annotations[kname],
kwarg.annotation)
kwargs_text_repr.append(f'{kname}: {typeanno} = {default_text}')
else:
kwargs_text_repr.append(f'{kname}={default_text}')
return kwargs_text_repr
class _SignatureComponents(NamedTuple):
arguments: List[str]
arguments_typehint_exists: bool
return_typehint_exists: bool
return_type: Optional[str] = None
def __str__(self):
arguments_signature = ''
if self.arguments:
str_signature = ',\n'.join(self.arguments)
# If there is no type annotation on arguments, then wrap the entire
# signature to width 80.
if not self.arguments_typehint_exists:
str_signature = textwrap.fill(str_signature, width=80)
arguments_signature = '\n' + textwrap.indent(
str_signature, prefix=' ') + '\n'
full_signature = f'({arguments_signature})'
if self.return_typehint_exists:
full_signature += f' -> {self.return_type}'
return full_signature
def generate_signature(func: Any, parser_config: ParserConfig,
func_full_name: str) -> _SignatureComponents:
all_args_list = []
try:
sig = inspect.signature(func)
sig_values = sig.parameters.values()
return_anno = sig.return_annotation
except (ValueError, TypeError):
sig_values = []
return_anno = None
type_annotation_visitor = TypeAnnotationExtractor()
ast_defaults_visitor = ASTDefaultValueExtractor()
try:
func_source = textwrap.dedent(inspect.getsource(func))
func_ast = ast.parse(func_source)
# Extract the type annotation from the parsed ast.
type_annotation_visitor.visit(func_ast)
ast_defaults_visitor.visit(func_ast)
except Exception: # pylint: disable=broad-except
# A wide-variety of errors can be thrown here.
pass
type_annotations = type_annotation_visitor.annotation_dict
arguments_typehint_exists = type_annotation_visitor.arguments_typehint_exists
return_typehint_exists = type_annotation_visitor.return_typehint_exists
#############################################################################
# Process the information about the func.
#############################################################################
pos_only_args = []
args = []
kwargs = []
only_kwargs = []
varargs = None
varkwargs = None
skip_self_cls = True
for index, param in enumerate(sig_values):
kind = param.kind
default = param.default
if skip_self_cls and param.name in ('self', 'cls', '_cls'):
# Only skip the first parameter. If the function contains both
# `self` and `cls`, skip only the first one.
skip_self_cls = False
elif kind == param.POSITIONAL_ONLY:
pos_only_args.append(param)
elif default is param.empty and kind == param.POSITIONAL_OR_KEYWORD:
args.append(param)
elif default is not param.empty and kind == param.POSITIONAL_OR_KEYWORD:
kwargs.append(param)
elif kind == param.VAR_POSITIONAL:
varargs = (index, param)
elif kind == param.KEYWORD_ONLY:
only_kwargs.append(param)
elif kind == param.VAR_KEYWORD:
varkwargs = param
#############################################################################
# Build the text representation of Args and Kwargs.
#############################################################################
formatter = FormatArguments(
type_annotations, parser_config, func_full_name=func_full_name)
if pos_only_args:
all_args_list.extend(formatter.format_args(pos_only_args))
all_args_list.append('/')
if args:
all_args_list.extend(formatter.format_args(args))
if kwargs:
all_args_list.extend(
formatter.format_kwargs(kwargs, ast_defaults_visitor.ast_args_defaults))
if only_kwargs:
if varargs is None:
all_args_list.append('*')
all_args_list.extend(
formatter.format_kwargs(only_kwargs,
ast_defaults_visitor.ast_kw_only_defaults))
if varargs is not None:
all_args_list.insert(varargs[0], '*' + varargs[1].name)
if varkwargs is not None:
all_args_list.append('**' + varkwargs.name)
if return_anno and return_anno is not sig.empty and type_annotations.get(
'return', None):
return_type = formatter.format_return(return_anno)
else:
return_type = 'None'
return _SignatureComponents(
arguments=all_args_list,
arguments_typehint_exists=arguments_typehint_exists,
return_typehint_exists=return_typehint_exists,
return_type=return_type)
def _get_defining_class(py_class, name):
for cls in inspect.getmro(py_class):
if name in cls.__dict__:
return cls
return None
class MemberInfo(NamedTuple):
short_name: str
full_name: str
py_object: Any
doc: _DocstringInfo
url: str
class MethodInfo(NamedTuple):
short_name: str
full_name: str
py_object: Any
doc: _DocstringInfo
url: str
signature: _SignatureComponents
decorators: List[str]
defined_in: Optional[_FileLocation]
@classmethod
def from_member_info(cls, method_info: MemberInfo,
signature: _SignatureComponents, decorators: List[str],
defined_in: Optional[_FileLocation]):
return cls(
**method_info._asdict(),
signature=signature,
decorators=decorators,
defined_in=defined_in)
def extract_decorators(func: Any) -> List[str]:
class ASTDecoratorExtractor(ast.NodeVisitor):
def __init__(self):
self.decorator_list = []
def visit_FunctionDef(self, node): # pylint: disable=invalid-name
for dec in node.decorator_list:
self.decorator_list.append(astor.to_source(dec).strip())
visitor = ASTDecoratorExtractor()
try:
func_source = textwrap.dedent(inspect.getsource(func))
func_ast = ast.parse(func_source)
visitor.visit(func_ast)
except Exception: # pylint: disable=broad-except
# A wide-variety of errors can be thrown here.
pass
return visitor.decorator_list
class PageInfo:
def __init__(
self,
full_name: str,
py_object: Any,
extra_docs: Optional[Dict[int, str]] = None,
):
self.full_name = full_name
self.py_object = py_object
self._extra_docs = extra_docs
self._defined_in = None
self._aliases = None
self._doc = None
@property
def short_name(self):
return self.full_name.split('.')[-1]
@property
def defined_in(self):
return self._defined_in
def set_defined_in(self, defined_in):
assert self.defined_in is None
self._defined_in = defined_in
@property
def aliases(self):
return self._aliases
def set_aliases(self, aliases):
assert self.aliases is None
self._aliases = aliases
@property
def doc(self) -> _DocstringInfo:
return self._doc
def set_doc(self, doc: _DocstringInfo):
assert self.doc is None
self._doc = doc
class FunctionPageInfo(PageInfo):
def __init__(self, *, full_name: str, py_object: Any, **kwargs):
super().__init__(full_name, py_object, **kwargs)
self._signature = None
self._decorators = []
@property
def signature(self):
return self._signature
def collect_docs(self, parser_config):
assert self.signature is None
self._signature = generate_signature(self.py_object, parser_config,
self.full_name)
self._decorators = extract_decorators(self.py_object)
@property
def decorators(self):
return list(self._decorators)
def add_decorator(self, dec):
self._decorators.append(dec)
def get_metadata_html(self):
return Metadata(self.full_name).build_html()
class TypeAliasPageInfo(PageInfo):
def __init__(self, *, full_name: str, py_object: Any, **kwargs) -> None:
super().__init__(full_name, py_object, **kwargs)
self._signature = None
@property
def signature(self) -> None:
return self._signature
def _custom_join(self, args: List[str], origin: str) -> str:
if 'Callable' in origin:
if args[0] == '...':
return ', '.join(args)
else:
return f"[{', '.join(args[:-1])}], {args[-1]}"
return ', '.join(args)
def _link_type_args(self, obj: Any, reverse_index: Dict[int, str],
linker: FormatArguments) -> str:
arg_full_name = reverse_index.get(id(obj), None)
if arg_full_name is not None:
return linker.get_link(arg_full_name)
result = []
if getattr(obj, '__args__', None):
for arg in obj.__args__:
result.append(self._link_type_args(arg, reverse_index, linker))
origin_str = typing._type_repr(obj.__origin__) # pylint: disable=protected-access # pytype: disable=module-attr
result = self._custom_join(result, origin_str)
return f'{origin_str}[{result}]'
else:
return typing._type_repr(obj) # pylint: disable=protected-access # pytype: disable=module-attr
def collect_docs(self, parser_config) -> None:
assert self.signature is None
linker = FormatArguments(
type_annotations={},
parser_config=parser_config,
func_full_name=self.full_name)
sig_args = []
if self.py_object.__origin__:
for arg_obj in self.py_object.__args__:
sig_args.append(
self._link_type_args(arg_obj, parser_config.reverse_index, linker))
sig_args_str = textwrap.indent(',\n'.join(sig_args), ' ')
if self.py_object.__origin__:
sig = f'{self.py_object.__origin__}[\n{sig_args_str}\n]'
else:
sig = repr(self.py_object)
# pytype: enable=module-attr
self._signature = sig.replace('typing.', '')
def get_metadata_html(self) -> str:
return Metadata(self.full_name).build_html()
class ClassPageInfo(PageInfo):
def __init__(self, *, full_name, py_object, **kwargs):
super().__init__(full_name, py_object, **kwargs)
self._namedtuplefields = collections.OrderedDict()
if issubclass(py_object, tuple):
namedtuple_attrs = ('_asdict', '_fields', '_make', '_replace')
if all(hasattr(py_object, attr) for attr in namedtuple_attrs):
for name in py_object._fields:
self._namedtuplefields[name] = None
self._properties = collections.OrderedDict()
self._bases = None
self._methods = []
self._classes = []
self._other_members = []
self.attr_block = None
@property
def bases(self):
return self._bases
def set_attr_block(self, attr_block):
assert self.attr_block is None
self.attr_block = attr_block
def _set_bases(self, relative_path, parser_config):
bases = []
obj = parser_config.py_name_to_object(self.full_name)
for base in obj.__bases__:
base_full_name = parser_config.reverse_index.get(id(base), None)
if base_full_name is None:
continue
base_doc = _parse_md_docstring(base, relative_path, self.full_name,
parser_config, self._extra_docs)
base_url = parser_config.reference_resolver.reference_to_url(
base_full_name, relative_path)
link_info = MemberInfo(
short_name=base_full_name.split('.')[-1],
full_name=base_full_name,
py_object=base,
doc=base_doc,
url=base_url)
bases.append(link_info)
self._bases = bases
def _add_property(self, member_info: MemberInfo):
doc = member_info.doc
# Hide useless namedtuple docs-trings.
if re.match('Alias for field number [0-9]+', doc.brief):
doc = doc._replace(docstring_parts=[], brief='')
new_parts = [doc.brief]
# Strip args/returns/raises from property
new_parts.extend([
str(part)
for part in doc.docstring_parts
if not isinstance(part, TitleBlock)
])
new_parts = [textwrap.indent(part, ' ') for part in new_parts]
new_parts.append('')
desc = '\n'.join(new_parts)
if member_info.short_name in self._namedtuplefields:
self._namedtuplefields[member_info.short_name] = desc
else:
self._properties[member_info.short_name] = desc
@property
def methods(self):
return self._methods
def _add_method(
self,
member_info: MemberInfo,
defining_class: Optional[type], # pylint: disable=g-bare-generic
parser_config: ParserConfig) -> None:
if defining_class is None:
return
# Omit methods defined by namedtuple.
original_method = defining_class.__dict__[member_info.short_name]
if (hasattr(original_method, '__module__') and
(original_method.__module__ or '').startswith('namedtuple')):
return
# Some methods are often overridden without documentation. Because it's
# obvious what they do, don't include them in the docs if there's no
# docstring.
if (not member_info.doc.brief.strip() and
member_info.short_name in ['__del__', '__copy__']):
return
signature = generate_signature(member_info.py_object, parser_config,
member_info.full_name)
decorators = extract_decorators(member_info.py_object)
defined_in = _get_defined_in(member_info.py_object, parser_config)
method_info = MethodInfo.from_member_info(member_info, signature,
decorators, defined_in)
self._methods.append(method_info)
@property
def classes(self):
return self._classes
def get_metadata_html(self) -> str:
meta_data = Metadata(self.full_name)
for item in itertools.chain(self.classes, self.methods, self.other_members):
meta_data.append(item)
return meta_data.build_html()
def _add_class(self, member_info):
self._classes.append(member_info)
@property
def other_members(self):
return self._other_members
def _add_other_member(self, member_info: MemberInfo):
self._other_members.append(member_info)
def _add_member(
self,
member_info: MemberInfo,
defining_class: Optional[type], # pylint: disable=g-bare-generic
parser_config: ParserConfig,
) -> None:
obj_type = get_obj_type(member_info.py_object)
if obj_type is ObjType.PROPERTY:
self._add_property(member_info)
elif obj_type is ObjType.CLASS:
if defining_class is None:
return
self._add_class(member_info)
elif obj_type is ObjType.CALLABLE:
self._add_method(member_info, defining_class, parser_config)
elif obj_type is ObjType.OTHER:
# Exclude members defined by protobuf that are useless
if issubclass(self.py_object, ProtoMessage):
if (member_info.short_name.endswith('_FIELD_NUMBER') or
member_info.short_name in ['__slots__', 'DESCRIPTOR']):
return
self._add_other_member(member_info)
def collect_docs(self, parser_config):
py_class = self.py_object
doc_path = documentation_path(self.full_name)
relative_path = os.path.relpath(
path='.', start=os.path.dirname(doc_path) or '.')
self._set_bases(relative_path, parser_config)
for child_short_name in parser_config.tree[self.full_name]:
child_full_name = '.'.join([self.full_name, child_short_name])
child = parser_config.py_name_to_object(child_full_name)
# Don't document anything that is defined in object or by protobuf.
defining_class = _get_defining_class(py_class, child_short_name)
if defining_class in [object, type, tuple, BaseException, Exception]:
continue
# The following condition excludes most protobuf-defined symbols.
if (defining_class and
defining_class.__name__ in ['CMessage', 'Message', 'MessageMeta']):
continue
if doc_controls.should_skip_class_attr(py_class, child_short_name):
continue
child_doc = _parse_md_docstring(child, relative_path, self.full_name,
parser_config, self._extra_docs)
child_url = parser_config.reference_resolver.reference_to_url(
child_full_name, relative_path)
member_info = MemberInfo(child_short_name, child_full_name, child,
child_doc, child_url)
self._add_member(member_info, defining_class, parser_config)
self.set_attr_block(self._augment_attributes(self.doc.docstring_parts))
def _augment_attributes(self,
docstring_parts: List[Any]) -> Optional[TitleBlock]:
attribute_block = None
for attr_block_index, part in enumerate(docstring_parts):
if isinstance(part, TitleBlock) and part.title.startswith('Attr'):
raw_attrs = collections.OrderedDict(part.items)
break
else:
# Didn't find the attributes block, there may still be attributes so
# add a placeholder for them at the end.
raw_attrs = collections.OrderedDict()
attr_block_index = len(docstring_parts)
docstring_parts.append(None)
attrs = collections.OrderedDict()
# namedtuple fields first.
attrs.update(self._namedtuplefields)
# the contents of the `Attrs:` block from the docstring
attrs.update(raw_attrs)
# properties last.
for name, desc in self._properties.items():
# Don't overwrite existing items
attrs.setdefault(name, desc)
if attrs:
attribute_block = TitleBlock(
title='Attributes', text='', items=attrs.items())
# Delete the Attrs block if it exists or delete the placeholder.
del docstring_parts[attr_block_index]
return attribute_block
class ModulePageInfo(PageInfo):
def __init__(self, *, full_name, py_object, **kwargs):
super().__init__(full_name, py_object, **kwargs)
self._modules = []
self._classes = []
self._functions = []
self._other_members = []
self._type_alias = []
@property
def modules(self):
return self._modules
@property
def functions(self):
return self._functions
@property
def classes(self):
return self._classes
@property
def type_alias(self):
return self._type_alias
@property
def other_members(self):
return self._other_members
def _add_module(self, member_info: MemberInfo):
self._modules.append(member_info)
def _add_class(self, member_info: MemberInfo):
self._classes.append(member_info)
def _add_function(self, member_info: MemberInfo):
self._functions.append(member_info)
def _add_type_alias(self, member_info: MemberInfo):
self._type_alias.append(member_info)
def _add_other_member(self, member_info: MemberInfo):
self._other_members.append(member_info)
def get_metadata_html(self):
meta_data = Metadata(self.full_name)
# Objects with their own pages are not added to the metadata list for the
# module, the module only has a link to the object page. No docs.
for item in self.other_members:
meta_data.append(item)
return meta_data.build_html()
def _add_member(self, member_info: MemberInfo) -> None:
obj_type = get_obj_type(member_info.py_object)
if obj_type is ObjType.MODULE:
self._add_module(member_info)
elif obj_type is ObjType.CLASS:
self._add_class(member_info)
elif obj_type is ObjType.CALLABLE:
self._add_function(member_info)
elif obj_type is ObjType.TYPE_ALIAS:
self._add_type_alias(member_info)
elif obj_type is ObjType.OTHER:
self._add_other_member(member_info)
def collect_docs(self, parser_config):
relative_path = os.path.relpath(
path='.',
start=os.path.dirname(documentation_path(self.full_name)) or '.')
member_names = parser_config.tree.get(self.full_name, [])
for member_short_name in member_names:
if member_short_name in [
'__builtins__', '__doc__', '__file__', '__name__', '__path__',
'__package__', '__cached__', '__loader__', '__spec__',
'absolute_import', 'division', 'print_function', 'unicode_literals'
]:
continue
if self.full_name:
member_full_name = self.full_name + '.' + member_short_name
else:
member_full_name = member_short_name
member = parser_config.py_name_to_object(member_full_name)
member_doc = _parse_md_docstring(member, relative_path, self.full_name,
parser_config, self._extra_docs)
url = parser_config.reference_resolver.reference_to_url(
member_full_name, relative_path)
member_info = MemberInfo(member_short_name, member_full_name, member,
member_doc, url)
self._add_member(member_info)
def docs_for_object(
full_name: str,
py_object: Any,
parser_config: ParserConfig,
extra_docs: Optional[Dict[int, str]] = None,
) -> PageInfo:
# Which other aliases exist for the object referenced by full_name?
main_name = parser_config.reference_resolver.py_main_name(full_name)
duplicate_names = parser_config.duplicates.get(main_name, [])
if main_name in duplicate_names:
duplicate_names.remove(main_name)
obj_type = get_obj_type(py_object)
if obj_type is ObjType.CLASS:
page_info = ClassPageInfo(
full_name=main_name, py_object=py_object, extra_docs=extra_docs)
elif obj_type is ObjType.CALLABLE:
page_info = FunctionPageInfo(
full_name=main_name, py_object=py_object, extra_docs=extra_docs)
elif obj_type is ObjType.MODULE:
page_info = ModulePageInfo(
full_name=main_name, py_object=py_object, extra_docs=extra_docs)
elif obj_type is ObjType.TYPE_ALIAS:
page_info = TypeAliasPageInfo(
full_name=main_name, py_object=py_object, extra_docs=extra_docs)
else:
raise RuntimeError('Cannot make docs for object {full_name}: {py_object!r}')
relative_path = os.path.relpath(
path='.', start=os.path.dirname(documentation_path(full_name)) or '.')
page_info.set_doc(
_parse_md_docstring(
py_object,
relative_path,
full_name,
parser_config,
extra_docs,
))
page_info.collect_docs(parser_config)
page_info.set_aliases(duplicate_names)
page_info.set_defined_in(_get_defined_in(py_object, parser_config))
return page_info
def _unwrap_obj(obj):
while True:
unwrapped_obj = getattr(obj, '__wrapped__', None)
if unwrapped_obj is None:
break
obj = unwrapped_obj
return obj
def _get_defined_in(py_object: Any,
parser_config: ParserConfig) -> Optional[_FileLocation]:
# Every page gets a note about where this object is defined
base_dirs_and_prefixes = zip(parser_config.base_dir,
parser_config.code_url_prefix)
try:
obj_path = inspect.getfile(_unwrap_obj(py_object))
except TypeError: # getfile throws TypeError if py_object is a builtin.
return None
if not obj_path.endswith(('.py', '.pyc')):
return None
code_url_prefix = None
for base_dir, temp_prefix in base_dirs_and_prefixes:
rel_path = os.path.relpath(path=obj_path, start=base_dir)
# A leading ".." indicates that the file is not inside `base_dir`, and
# the search should continue.
if rel_path.startswith('..'):
continue
else:
code_url_prefix = temp_prefix
break
# No link if the file was not found in a `base_dir`, or the prefix is None.
if code_url_prefix is None:
return None
try:
lines, start_line = inspect.getsourcelines(py_object)
end_line = start_line + len(lines) - 1
if 'MACHINE GENERATED' in lines[0]:
# don't link to files generated by tf_export
return None
except (IOError, TypeError, IndexError):
start_line = None
end_line = None
# In case this is compiled, point to the original
if rel_path.endswith('.pyc'):
# If a PY3 __pycache__/ subdir is being used, omit it.
rel_path = rel_path.replace('__pycache__' + os.sep, '')
# Strip everything after the first . so that variants such as .pyc and
# .cpython-3x.pyc or similar are all handled.
rel_path = rel_path.partition('.')[0] + '.py'
if re.search(r'<[\w\s]+>', rel_path):
# Built-ins emit paths like <embedded stdlib>, <string>, etc.
return None
if '<attrs generated' in rel_path:
return None
if re.match(r'.*/gen_[^/]*\.py$', rel_path):
return _FileLocation(rel_path)
if 'genfiles' in rel_path:
return _FileLocation(rel_path)
elif re.match(r'.*_pb2\.py$', rel_path):
# The _pb2.py files all appear right next to their defining .proto file.
rel_path = rel_path[:-7] + '.proto'
return _FileLocation(
rel_path=rel_path, url=os.path.join(code_url_prefix, rel_path)) # pylint: disable=undefined-loop-variable
else:
return _FileLocation(
rel_path=rel_path,
url=os.path.join(code_url_prefix, rel_path),
start_line=start_line,
end_line=end_line) # pylint: disable=undefined-loop-variable
# TODO(markdaoust): This should just parse, pretty_docs should generate the md.
def generate_global_index(library_name, index, reference_resolver):
symbol_links = []
for full_name, py_object in index.items():
obj_type = get_obj_type(py_object)
if obj_type in (ObjType.OTHER, ObjType.PROPERTY):
continue
# In Python 3, unbound methods are functions, so eliminate those.
if obj_type is ObjType.CALLABLE:
if is_class_attr(full_name, index):
continue
symbol_links.append(
(full_name, reference_resolver.python_link(full_name, full_name, '..')))
lines = [f'# All symbols in {library_name}', '']
lines.append('<!-- Insert buttons and diff -->\n')
# Sort all the symbols once, so that the ordering is preserved when its broken
# up into main symbols and compat symbols and sorting the sublists is not
# required.
symbol_links = sorted(symbol_links, key=lambda x: x[0])
compat_v1_symbol_links = []
compat_v2_symbol_links = []
primary_symbol_links = []
for symbol, link in symbol_links:
if symbol.startswith('tf.compat.v1'):
if 'raw_ops' not in symbol:
compat_v1_symbol_links.append(link)
elif symbol.startswith('tf.compat.v2'):
compat_v2_symbol_links.append(link)
else:
primary_symbol_links.append(link)
lines.append('## Primary symbols')
for link in primary_symbol_links:
lines.append(f'* {link}')
if compat_v2_symbol_links:
lines.append('\n## Compat v2 symbols\n')
for link in compat_v2_symbol_links:
lines.append(f'* {link}')
if compat_v1_symbol_links:
lines.append('\n## Compat v1 symbols\n')
for link in compat_v1_symbol_links:
lines.append(f'* {link}')
# TODO(markdaoust): use a _ModulePageInfo -> prety_docs.build_md_page()
return '\n'.join(lines)
class Metadata(object):
def __init__(self, name, version=None, content=None):
self.name = name
self.version = version
if self.version is None:
self.version = 'Stable'
self._content = content
if self._content is None:
self._content = []
def append(self, item):
self._content.append(item.short_name)
def build_html(self):
# Note: A schema is not a URL. It is defined with http: but doesn't resolve.
schema = 'http://developers.google.com/ReferenceObject'
parts = [f'<div itemscope itemtype="{schema}">']
parts.append(f'<meta itemprop="name" content="{self.name}" />')
parts.append(f'<meta itemprop="path" content="{self.version}" />')
for item in self._content:
parts.append(f'<meta itemprop="property" content="{item}"/>')
parts.extend(['</div>', ''])
return '\n'.join(parts)
| true
| true
|
1c3ffe2cf70624885065508ede4bcba03719650b
| 93
|
py
|
Python
|
UpDownMethods/__init__.py
|
codles/UpDownMethods
|
948e469e1d26da85ff620e566be7a104cdf33530
|
[
"MIT"
] | 1
|
2016-10-31T02:16:39.000Z
|
2016-10-31T02:16:39.000Z
|
UpDownMethods/__init__.py
|
codles/UpDownMethods
|
948e469e1d26da85ff620e566be7a104cdf33530
|
[
"MIT"
] | null | null | null |
UpDownMethods/__init__.py
|
codles/UpDownMethods
|
948e469e1d26da85ff620e566be7a104cdf33530
|
[
"MIT"
] | null | null | null |
from plot import *
from process import *
from updownmethods import *
from procedure import *
| 18.6
| 27
| 0.784946
|
from plot import *
from process import *
from updownmethods import *
from procedure import *
| true
| true
|
1c3ffe2e7ff0758cfd78cf696fe6c81519bb4e70
| 2,660
|
py
|
Python
|
scripts/pulse-view/pygraphGL_multsine.py
|
jsheedy/biofeedback-cube
|
178a518d70fdf0dfa3b51226a2a97dbfa68a0543
|
[
"Unlicense"
] | 2
|
2022-03-03T04:07:06.000Z
|
2022-03-15T15:59:20.000Z
|
scripts/pulse-view/pygraphGL_multsine.py
|
jsheedy/biofeedback-cube
|
178a518d70fdf0dfa3b51226a2a97dbfa68a0543
|
[
"Unlicense"
] | null | null | null |
scripts/pulse-view/pygraphGL_multsine.py
|
jsheedy/biofeedback-cube
|
178a518d70fdf0dfa3b51226a2a97dbfa68a0543
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Animated 3D sinc function
requires:
1. pyqtgraph
- download from here http://www.pyqtgraph.org/
2. pyopenGL
- if you have Anaconda, run the following command
>>> conda install -c anaconda pyopengl
"""
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
import sys
import time
class Visualizer(object):
def __init__(self):
self.traces = dict()
self.app = QtGui.QApplication(sys.argv)
self.w = gl.GLViewWidget()
self.w.opts['distance'] = 40
self.w.setWindowTitle('pyqtgraph example: GLLinePlotItem')
self.w.setGeometry(0, 110, 1920, 1080)
self.w.show()
self.phase = 0
self.lines = 50
self.points = 1000
self.y = np.linspace(-10, 10, self.lines)
self.x = np.linspace(-10, 10, self.points)
for i, line in enumerate(self.y):
y = np.array([line] * self.points)
d = np.sqrt(self.x ** 2 + y ** 2)
sine = 10 * np.sin(d + self.phase)
pts = np.vstack([self.x, y, sine]).transpose()
self.traces[i] = gl.GLLinePlotItem(
pos=pts,
color=pg.glColor((i, self.lines * 1.3)),
width=(i + 1) / 10,
antialias=True
)
self.w.addItem(self.traces[i])
def start(self):
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
def set_plotdata(self, name, points, color, width):
self.traces[name].setData(pos=points, color=color, width=width)
def update(self):
stime = time.time()
for i, line in enumerate(self.y):
y = np.array([line] * self.points)
amp = 10 / (i + 1)
phase = self.phase * (i + 1) - 10
freq = self.x * (i + 1) / 10
sine = amp * np.sin(freq - phase)
pts = np.vstack([self.x, y, sine]).transpose()
self.set_plotdata(
name=i, points=pts,
color=pg.glColor((i, self.lines * 1.3)),
width=3
)
self.phase -= .0002
print('{:.0f} FPS'.format(1 / (time.time() - stime)))
def animation(self):
timer = QtCore.QTimer()
timer.timeout.connect(self.update)
timer.start(10)
self.start()
# Start event loop.
if __name__ == '__main__':
v = Visualizer()
v.animation()
| 29.88764
| 80
| 0.515038
|
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
import sys
import time
class Visualizer(object):
def __init__(self):
self.traces = dict()
self.app = QtGui.QApplication(sys.argv)
self.w = gl.GLViewWidget()
self.w.opts['distance'] = 40
self.w.setWindowTitle('pyqtgraph example: GLLinePlotItem')
self.w.setGeometry(0, 110, 1920, 1080)
self.w.show()
self.phase = 0
self.lines = 50
self.points = 1000
self.y = np.linspace(-10, 10, self.lines)
self.x = np.linspace(-10, 10, self.points)
for i, line in enumerate(self.y):
y = np.array([line] * self.points)
d = np.sqrt(self.x ** 2 + y ** 2)
sine = 10 * np.sin(d + self.phase)
pts = np.vstack([self.x, y, sine]).transpose()
self.traces[i] = gl.GLLinePlotItem(
pos=pts,
color=pg.glColor((i, self.lines * 1.3)),
width=(i + 1) / 10,
antialias=True
)
self.w.addItem(self.traces[i])
def start(self):
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
def set_plotdata(self, name, points, color, width):
self.traces[name].setData(pos=points, color=color, width=width)
def update(self):
stime = time.time()
for i, line in enumerate(self.y):
y = np.array([line] * self.points)
amp = 10 / (i + 1)
phase = self.phase * (i + 1) - 10
freq = self.x * (i + 1) / 10
sine = amp * np.sin(freq - phase)
pts = np.vstack([self.x, y, sine]).transpose()
self.set_plotdata(
name=i, points=pts,
color=pg.glColor((i, self.lines * 1.3)),
width=3
)
self.phase -= .0002
print('{:.0f} FPS'.format(1 / (time.time() - stime)))
def animation(self):
timer = QtCore.QTimer()
timer.timeout.connect(self.update)
timer.start(10)
self.start()
if __name__ == '__main__':
v = Visualizer()
v.animation()
| true
| true
|
1c3ffe94b14dd3e7b2ece143d99bc6e06579c84c
| 19,813
|
py
|
Python
|
fairseq/data/iterators.py
|
Epsilon-Lee/fairseq-da
|
fbe7a39717afcb60dd4a3e1cd6abd3c763354fe1
|
[
"MIT"
] | 6
|
2021-07-03T10:16:13.000Z
|
2021-09-22T18:15:23.000Z
|
fairseq/data/iterators.py
|
Epsilon-Lee/fairseq-da
|
fbe7a39717afcb60dd4a3e1cd6abd3c763354fe1
|
[
"MIT"
] | null | null | null |
fairseq/data/iterators.py
|
Epsilon-Lee/fairseq-da
|
fbe7a39717afcb60dd4a3e1cd6abd3c763354fe1
|
[
"MIT"
] | 3
|
2021-07-14T13:12:19.000Z
|
2021-12-04T08:46:29.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import numpy as np
import torch
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
# Object used by _background_consumer to signal the source is exhausted
# to the main thread.
_sentinel = object()
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
start (int): starting iteration count. Note that this doesn't
actually advance the iterator.
total (int): override the iterator length returned by
``__len__``. This can be used to truncate *iterator*.
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, start=None, total=None):
self.iterable = iterable
self.itr = iter(self)
if start is None:
self.n = getattr(iterable, "n", 0)
else:
self.n = start
if total is None:
self.total = self.n + len(iterable)
else:
self.total = total
def __len__(self):
return self.total
def __iter__(self):
for x in self.iterable:
if self.n >= self.total:
raise RuntimeError(
"Mismatch between actual and expected iterable length. "
"Please report this to the fairseq developers."
)
self.n += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.n < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
"""
Truncates the iterator to n elements at most.
"""
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
# Only take after what we have already consumed (i.e. after restarting
# from checkpoint mid epoch, we have to subtract self.n which is the
# starting point)
#
# This to maintain the invariant self.total = self.n + len(iterable),
# before calling __next__ or __iter__
propagated_take = max(n - self.n, 0)
if hasattr(self.iterable, "take"):
self.iterable.take(propagated_take)
else:
self.iterable = itertools.islice(self.iterable, propagated_take)
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
"""
raise NotImplementedError
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
raise NotImplementedError
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
raise NotImplementedError
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
raise NotImplementedError
class StreamingEpochBatchIterator(EpochBatchIterating):
def __init__(
self,
dataset,
epoch=1,
num_shards=1,
shard_id=0,
):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self._current_epoch_iterator = None
self.num_shards = num_shards
self.shard_id = shard_id
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._current_epoch_iterator is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = CountingIterator(
iterable=ShardedIterator(
iterable=self.dataset,
num_shards=self.num_shards,
shard_id=self.shard_id,
),
)
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return not self._current_epoch_iterator.has_next()
@property
def iterations_in_epoch(self) -> int:
if self._current_epoch_iterator is not None:
return self._current_epoch_iterator.n
return 0
def state_dict(self):
return {
"epoch": self.epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
class EpochBatchIterator(EpochBatchIterating):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of
indices, or a callable to create such an iterator (~torch.utils.data.Sampler).
A callable batch_sampler will be called for each epoch to enable per epoch dynamic
batch iterators defined by this callable batch_sampler.
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
"""
def __init__(
self,
dataset,
collate_fn,
batch_sampler,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
buffer_size=0,
timeout=0,
):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.batch_sampler = batch_sampler
self._frozen_batches = (
tuple(batch_sampler) if not callable(batch_sampler) else None
)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.shuffle = True
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, "supports_prefetch", False)
@property
def frozen_batches(self):
if self._frozen_batches is None:
self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))
return self._frozen_batches
@property
def first_batch(self):
if len(self.frozen_batches) == 0:
raise Exception(
"The dataset is empty. This could indicate "
"that all elements in the dataset have been skipped. "
"Try increasing the max number of allowed tokens or using "
"a larger dataset."
)
if self.dataset.supports_fetch_outside_dataloader:
return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0]])
else:
return "DUMMY"
def __len__(self):
return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))
@property
def n(self):
return self.iterations_in_epoch
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
"""
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch)
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
if callable(self.batch_sampler):
# reset _frozen_batches to refresh the next epoch
self._frozen_batches = None
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle,
fix_batches_to_gpus=fix_batches_to_gpus,
)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
if self.end_of_epoch():
epoch = self.epoch + 1
iter_in_epoch = 0
else:
epoch = self.epoch
iter_in_epoch = self.iterations_in_epoch
return {
"version": 2,
"epoch": epoch,
"iterations_in_epoch": iter_in_epoch,
"shuffle": self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict["epoch"]
itr_pos = state_dict.get("iterations_in_epoch", 0)
version = state_dict.get("version", 1)
if itr_pos > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get("shuffle", True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
if version == 1:
# legacy behavior: we finished the epoch, increment epoch counter
self.epoch += 1
else:
raise RuntimeError(
"Cannot resume training due to dataloader mismatch, please "
"report this to the fairseq developers. You can relaunch "
"training with `--reset-dataloader` and it should work."
)
else:
self._next_epoch_itr = None
def _get_iterator_for_epoch(
self, epoch, shuffle, fix_batches_to_gpus=False, offset=0
):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and not fix_batches_to_gpus:
batches = shuffle_batches(list(batches), self.seed + epoch)
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = self.frozen_batches
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
# Create data loader
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
timeout=self.timeout,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CoutingIterator
itr = CountingIterator(itr, start=offset)
return itr
class GroupedIterator(CountingIterator):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, chunk_size):
itr = _chunk_iterator(iterable, chunk_size)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(chunk_size))),
total=int(math.ceil(len(iterable) / float(chunk_size))),
)
self.chunk_size = chunk_size
def _chunk_iterator(itr, chunk_size):
chunk = []
for x in itr:
chunk.append(x)
if len(chunk) == chunk_size:
yield chunk
chunk = []
if len(chunk) > 0:
yield chunk
class ShardedIterator(CountingIterator):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards* (default: None).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError("shard_id must be between 0 and num_shards")
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
itr = map(
operator.itemgetter(1),
itertools.zip_longest(
range(sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
),
)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
def run(self):
try:
for item in self._source:
self._queue.put(item)
# Stop if we reached the maximum length
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
# Signal the consumer we are done.
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self._consumer = None
self.start_time = time.time()
self.warning_time = None
self.total = len(iterable)
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.total,
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return self.total
def take(self, n):
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self._iterable, "take"):
self._iterable.take(n)
def __next__(self):
# Create consumer if not created yet
if self._consumer is None:
self._create_consumer()
# Notify the user if there is a data loading bottleneck
if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):
if time.time() - self.start_time > 5 * 60:
if (
self.warning_time is None
or time.time() - self.warning_time > 15 * 60
):
logger.debug(
"Data loading buffer is empty or nearly empty. This may "
"indicate a data loading bottleneck, and increasing the "
"number of workers (--num-workers) may help."
)
self.warning_time = time.time()
# Get next example
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item
| 33.984563
| 94
| 0.604401
|
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import numpy as np
import torch
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
_sentinel = object()
class CountingIterator(object):
def __init__(self, iterable, start=None, total=None):
self.iterable = iterable
self.itr = iter(self)
if start is None:
self.n = getattr(iterable, "n", 0)
else:
self.n = start
if total is None:
self.total = self.n + len(iterable)
else:
self.total = total
def __len__(self):
return self.total
def __iter__(self):
for x in self.iterable:
if self.n >= self.total:
raise RuntimeError(
"Mismatch between actual and expected iterable length. "
"Please report this to the fairseq developers."
)
self.n += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
return self.n < len(self)
def skip(self, num_to_skip):
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
self.total = min(self.total, n)
propagated_take = max(n - self.n, 0)
if hasattr(self.iterable, "take"):
self.iterable.take(propagated_take)
else:
self.iterable = itertools.islice(self.iterable, propagated_take)
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
raise NotImplementedError
def end_of_epoch(self) -> bool:
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
raise NotImplementedError
def state_dict(self):
raise NotImplementedError
def load_state_dict(self, state_dict):
raise NotImplementedError
class StreamingEpochBatchIterator(EpochBatchIterating):
def __init__(
self,
dataset,
epoch=1,
num_shards=1,
shard_id=0,
):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.epoch = max(epoch, 1)
self._current_epoch_iterator = None
self.num_shards = num_shards
self.shard_id = shard_id
@property
def next_epoch_idx(self):
if self._current_epoch_iterator is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = CountingIterator(
iterable=ShardedIterator(
iterable=self.dataset,
num_shards=self.num_shards,
shard_id=self.shard_id,
),
)
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return not self._current_epoch_iterator.has_next()
@property
def iterations_in_epoch(self) -> int:
if self._current_epoch_iterator is not None:
return self._current_epoch_iterator.n
return 0
def state_dict(self):
return {
"epoch": self.epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
class EpochBatchIterator(EpochBatchIterating):
def __init__(
self,
dataset,
collate_fn,
batch_sampler,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
buffer_size=0,
timeout=0,
):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.batch_sampler = batch_sampler
self._frozen_batches = (
tuple(batch_sampler) if not callable(batch_sampler) else None
)
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self.epoch = max(epoch, 1)
self.shuffle = True
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, "supports_prefetch", False)
@property
def frozen_batches(self):
if self._frozen_batches is None:
self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))
return self._frozen_batches
@property
def first_batch(self):
if len(self.frozen_batches) == 0:
raise Exception(
"The dataset is empty. This could indicate "
"that all elements in the dataset have been skipped. "
"Try increasing the max number of allowed tokens or using "
"a larger dataset."
)
if self.dataset.supports_fetch_outside_dataloader:
return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0]])
else:
return "DUMMY"
def __len__(self):
return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))
@property
def n(self):
return self.iterations_in_epoch
@property
def next_epoch_idx(self):
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch)
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
if callable(self.batch_sampler):
self._frozen_batches = None
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle,
fix_batches_to_gpus=fix_batches_to_gpus,
)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
if self.end_of_epoch():
epoch = self.epoch + 1
iter_in_epoch = 0
else:
epoch = self.epoch
iter_in_epoch = self.iterations_in_epoch
return {
"version": 2,
"epoch": epoch,
"iterations_in_epoch": iter_in_epoch,
"shuffle": self.shuffle,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
itr_pos = state_dict.get("iterations_in_epoch", 0)
version = state_dict.get("version", 1)
if itr_pos > 0:
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get("shuffle", True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
if version == 1:
self.epoch += 1
else:
raise RuntimeError(
"Cannot resume training due to dataloader mismatch, please "
"report this to the fairseq developers. You can relaunch "
"training with `--reset-dataloader` and it should work."
)
else:
self._next_epoch_itr = None
def _get_iterator_for_epoch(
self, epoch, shuffle, fix_batches_to_gpus=False, offset=0
):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and not fix_batches_to_gpus:
batches = shuffle_batches(list(batches), self.seed + epoch)
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = self.frozen_batches
batches = list(
ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[])
)
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0:
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
timeout=self.timeout,
)
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
itr = CountingIterator(itr, start=offset)
return itr
class GroupedIterator(CountingIterator):
def __init__(self, iterable, chunk_size):
itr = _chunk_iterator(iterable, chunk_size)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(chunk_size))),
total=int(math.ceil(len(iterable) / float(chunk_size))),
)
self.chunk_size = chunk_size
def _chunk_iterator(itr, chunk_size):
chunk = []
for x in itr:
chunk.append(x)
if len(chunk) == chunk_size:
yield chunk
chunk = []
if len(chunk) > 0:
yield chunk
class ShardedIterator(CountingIterator):
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError("shard_id must be between 0 and num_shards")
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
itr = map(
operator.itemgetter(1),
itertools.zip_longest(
range(sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
),
)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, "n", 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
def run(self):
try:
for item in self._source:
self._queue.put(item)
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self._consumer = None
self.start_time = time.time()
self.warning_time = None
self.total = len(iterable)
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.total,
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return self.total
def take(self, n):
self.total = min(self.total, n)
if hasattr(self._iterable, "take"):
self._iterable.take(n)
def __next__(self):
if self._consumer is None:
self._create_consumer()
if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):
if time.time() - self.start_time > 5 * 60:
if (
self.warning_time is None
or time.time() - self.warning_time > 15 * 60
):
logger.debug(
"Data loading buffer is empty or nearly empty. This may "
"indicate a data loading bottleneck, and increasing the "
"number of workers (--num-workers) may help."
)
self.warning_time = time.time()
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item
| true
| true
|
1c4001234382c3c924fd3f3bd6638f4fdf34b0b8
| 287
|
py
|
Python
|
ctfproblems/Lovelace/90_lottery_pt2/grader.py
|
milesmcc/pactf-2018
|
cfd9d94a7b6828259220f52ab3c5893a28429c62
|
[
"MIT"
] | null | null | null |
ctfproblems/Lovelace/90_lottery_pt2/grader.py
|
milesmcc/pactf-2018
|
cfd9d94a7b6828259220f52ab3c5893a28429c62
|
[
"MIT"
] | null | null | null |
ctfproblems/Lovelace/90_lottery_pt2/grader.py
|
milesmcc/pactf-2018
|
cfd9d94a7b6828259220f52ab3c5893a28429c62
|
[
"MIT"
] | null | null | null |
def grade(key, submission):
if submission == "3956993139":
return True, "You untwisted it and mastered fate itself! That's worth 65 points; sort of anticlimactic..."
else:
return False, "Fate remains untwisted! Is there any way to master random chance itself..."
| 47.833333
| 114
| 0.69338
|
def grade(key, submission):
if submission == "3956993139":
return True, "You untwisted it and mastered fate itself! That's worth 65 points; sort of anticlimactic..."
else:
return False, "Fate remains untwisted! Is there any way to master random chance itself..."
| true
| true
|
1c400335e23bb7eebf107a65485cbca1209d221c
| 517
|
py
|
Python
|
array/python3/21_subarray_with_sum_zero.py
|
suvambasak/cp
|
e015b662a4b8906d3363322c10a896e3cef0e69f
|
[
"MIT"
] | null | null | null |
array/python3/21_subarray_with_sum_zero.py
|
suvambasak/cp
|
e015b662a4b8906d3363322c10a896e3cef0e69f
|
[
"MIT"
] | 1
|
2021-02-28T20:17:32.000Z
|
2021-02-28T20:17:32.000Z
|
array/python3/21_subarray_with_sum_zero.py
|
scodebox/cp
|
e015b662a4b8906d3363322c10a896e3cef0e69f
|
[
"MIT"
] | 1
|
2020-12-12T18:36:24.000Z
|
2020-12-12T18:36:24.000Z
|
# Calculate all prefix sum
# 1 > If any prefix sum repeats -> True
# 2 > If any prefix sum is zero -> True
# TC: O(n) | SC: O(n)
def solution_1(arr):
sum = 0
sum_set = set()
for num in arr:
sum += num
if 0 == sum or sum in sum_set:
return True
sum_set.add(sum)
return False
if __name__ == '__main__':
arr = [4, 2, -3, 1, 6] # True
arr = [4, 2, 0, 1, 6] # True
arr = [-3, 2, 3, 1, 6] # False
print('solution_1: ', solution_1(arr))
| 19.884615
| 43
| 0.510638
|
def solution_1(arr):
sum = 0
sum_set = set()
for num in arr:
sum += num
if 0 == sum or sum in sum_set:
return True
sum_set.add(sum)
return False
if __name__ == '__main__':
arr = [4, 2, -3, 1, 6]
arr = [4, 2, 0, 1, 6]
arr = [-3, 2, 3, 1, 6]
print('solution_1: ', solution_1(arr))
| true
| true
|
1c4003585f72e25173366417eabfafae167b215a
| 27,603
|
py
|
Python
|
session0/ecc.py
|
jimmysong/pw-exercises
|
8d7fc065e9fe01399fa240ff88a7b1557901defb
|
[
"MIT"
] | 8
|
2019-02-21T04:22:48.000Z
|
2020-07-24T11:03:16.000Z
|
session0/ecc.py
|
jimmysong/pw-exercises
|
8d7fc065e9fe01399fa240ff88a7b1557901defb
|
[
"MIT"
] | null | null | null |
session0/ecc.py
|
jimmysong/pw-exercises
|
8d7fc065e9fe01399fa240ff88a7b1557901defb
|
[
"MIT"
] | 2
|
2020-01-23T16:24:16.000Z
|
2020-02-10T23:00:29.000Z
|
from io import BytesIO
from random import randint
from unittest import TestCase
import hmac
import hashlib
from helper import (
big_endian_to_int,
encode_base58_checksum,
hash160,
hash256,
int_to_big_endian,
raw_decode_base58,
)
class FieldElement:
def __init__(self, num, prime):
if num >= prime or num < 0:
error = 'Num {} not in field range 0 to {}'.format(
num, prime - 1)
raise ValueError(error)
self.num = num
self.prime = prime
def __eq__(self, other):
if other is None:
return False
return self.num == other.num and self.prime == other.prime
def __ne__(self, other):
# this should be the inverse of the == operator
return not (self == other)
def __repr__(self):
return 'FieldElement_{}({})'.format(self.prime, self.num)
def __add__(self, other):
if self.prime != other.prime:
raise TypeError('Cannot add two numbers in different Fields')
# self.num and other.num are the actual values
num = (self.num + other.num) % self.prime
# self.prime is what you'll need to mod against
prime = self.prime
# You need to return an element of the same class
# use: self.__class__(num, prime)
return self.__class__(num, prime)
def __sub__(self, other):
if self.prime != other.prime:
raise TypeError('Cannot add two numbers in different Fields')
# self.num and other.num are the actual values
num = (self.num - other.num) % self.prime
# self.prime is what you'll need to mod against
prime = self.prime
# You need to return an element of the same class
# use: self.__class__(num, prime)
return self.__class__(num, prime)
def __mul__(self, other):
if self.prime != other.prime:
raise TypeError('Cannot add two numbers in different Fields')
# self.num and other.num are the actual values
num = (self.num * other.num) % self.prime
# self.prime is what you'll need to mod against
prime = self.prime
# You need to return an element of the same class
# use: self.__class__(num, prime)
return self.__class__(num, prime)
def __pow__(self, n):
# remember Fermat's Little Theorem:
# self.num**(p-1) % p == 1
# you might want to use % operator on n
prime = self.prime
num = pow(self.num, n % (prime - 1), prime)
return self.__class__(num, prime)
def __truediv__(self, other):
if self.prime != other.prime:
raise TypeError('Cannot add two numbers in different Fields')
# self.num and other.num are the actual values
num = (self.num * pow(other.num, self.prime - 2, self.prime)) % self.prime
# self.prime is what you'll need to mod against
prime = self.prime
# use fermat's little theorem:
# self.num**(p-1) % p == 1
# this means:
# 1/n == pow(n, p-2, p)
# You need to return an element of the same class
# use: self.__class__(num, prime)
return self.__class__(num, prime)
def __rmul__(self, coefficient):
num = (self.num * coefficient) % self.prime
return self.__class__(num=num, prime=self.prime)
class FieldElementTest(TestCase):
def test_ne(self):
a = FieldElement(2, 31)
b = FieldElement(2, 31)
c = FieldElement(15, 31)
self.assertEqual(a, b)
self.assertTrue(a != c)
self.assertFalse(a != b)
def test_add(self):
a = FieldElement(2, 31)
b = FieldElement(15, 31)
self.assertEqual(a + b, FieldElement(17, 31))
a = FieldElement(17, 31)
b = FieldElement(21, 31)
self.assertEqual(a + b, FieldElement(7, 31))
def test_sub(self):
a = FieldElement(29, 31)
b = FieldElement(4, 31)
self.assertEqual(a - b, FieldElement(25, 31))
a = FieldElement(15, 31)
b = FieldElement(30, 31)
self.assertEqual(a - b, FieldElement(16, 31))
def test_mul(self):
a = FieldElement(24, 31)
b = FieldElement(19, 31)
self.assertEqual(a * b, FieldElement(22, 31))
def test_pow(self):
a = FieldElement(17, 31)
self.assertEqual(a**3, FieldElement(15, 31))
a = FieldElement(5, 31)
b = FieldElement(18, 31)
self.assertEqual(a**5 * b, FieldElement(16, 31))
def test_div(self):
a = FieldElement(3, 31)
b = FieldElement(24, 31)
self.assertEqual(a / b, FieldElement(4, 31))
a = FieldElement(17, 31)
self.assertEqual(a**-3, FieldElement(29, 31))
a = FieldElement(4, 31)
b = FieldElement(11, 31)
self.assertEqual(a**-4 * b, FieldElement(13, 31))
class Point:
def __init__(self, x, y, a, b):
self.a = a
self.b = b
self.x = x
self.y = y
# x being None and y being None represents the point at infinity
# Check for that here since the equation below won't make sense
# with None values for both.
if self.x is None and self.y is None:
return
# make sure that the elliptic curve equation is satisfied
# y**2 == x**3 + a*x + b
if self.y**2 != self.x**3 + a * x + b:
# if not, raise a ValueError
raise ValueError('({}, {}) is not on the curve'.format(self.x, self.y))
def __eq__(self, other):
return self.x == other.x and self.y == other.y \
and self.a == other.a and self.b == other.b
def __ne__(self, other):
# this should be the inverse of the == operator
return not (self == other)
def __repr__(self):
if self.x is None:
return 'Point(infinity)'
else:
return 'Point({},{})_{}'.format(self.x.num, self.y.num, self.x.prime)
def __add__(self, other):
if self.a != other.a or self.b != other.b:
raise TypeError('Points {}, {} are not on the same curve'.format(self, other))
# Case 0.0: self is the point at infinity, return other
if self.x is None:
return other
# Case 0.1: other is the point at infinity, return self
if other.x is None:
return self
# Case 1: self.x == other.x, self.y != other.y
# Result is point at infinity
if self.x == other.x and self.y != other.y:
# Remember to return an instance of this class:
# self.__class__(x, y, a, b)
return self.__class__(None, None, self.a, self.b)
# Case 2: self.x != other.x
if self.x != other.x:
# Formula (x3,y3)==(x1,y1)+(x2,y2)
# s=(y2-y1)/(x2-x1)
s = (other.y - self.y) / (other.x - self.x)
# x3=s**2-x1-x2
x = s**2 - self.x - other.x
# y3=s*(x1-x3)-y1
y = s * (self.x - x) - self.y
return self.__class__(x, y, self.a, self.b)
# Case 3: self.x == other.x, self.y == other.y
else:
# Formula (x3,y3)=(x1,y1)+(x1,y1)
# s=(3*x1**2+a)/(2*y1)
s = (3 * self.x**2 + self.a) / (2 * self.y)
# x3=s**2-2*x1
x = s**2 - 2 * self.x
# y3=s*(x1-x3)-y1
y = s * (self.x - x) - self.y
return self.__class__(x, y, self.a, self.b)
def __rmul__(self, coefficient):
# rmul calculates coefficient * self
coef = coefficient
current = self
# start at 0
result = self.__class__(None, None, self.a, self.b)
while coef:
# if the bit at this binary expansion is 1, add
if coef & 1:
result += current
# double the point
current += current
coef >>= 1
return result
class PointTest(TestCase):
def test_ne(self):
a = Point(x=3, y=-7, a=5, b=7)
b = Point(x=18, y=77, a=5, b=7)
self.assertTrue(a != b)
self.assertFalse(a != a)
def test_on_curve(self):
with self.assertRaises(ValueError):
Point(x=-2, y=4, a=5, b=7)
# these should not raise an error
Point(x=3, y=-7, a=5, b=7)
Point(x=18, y=77, a=5, b=7)
def test_add0(self):
a = Point(x=None, y=None, a=5, b=7)
b = Point(x=2, y=5, a=5, b=7)
c = Point(x=2, y=-5, a=5, b=7)
self.assertEqual(a + b, b)
self.assertEqual(b + a, b)
self.assertEqual(b + c, a)
def test_add1(self):
a = Point(x=3, y=7, a=5, b=7)
b = Point(x=-1, y=-1, a=5, b=7)
self.assertEqual(a + b, Point(x=2, y=-5, a=5, b=7))
def test_add2(self):
a = Point(x=-1, y=1, a=5, b=7)
self.assertEqual(a + a, Point(x=18, y=-77, a=5, b=7))
class ECCTest(TestCase):
def test_on_curve(self):
# tests the following points whether they are on the curve or not
# on curve y^2=x^3-7 over F_223:
# (192,105) (17,56) (200,119) (1,193) (42,99)
# the ones that aren't should raise a ValueError
prime = 223
a = FieldElement(0, prime)
b = FieldElement(7, prime)
valid_points = ((192, 105), (17, 56), (1, 193))
invalid_points = ((200, 119), (42, 99))
# iterate over valid points
for x_raw, y_raw in valid_points:
# Initialize points this way:
# x = FieldElement(x_raw, prime)
# y = FieldElement(y_raw, prime)
# Point(x, y, a, b)
x = FieldElement(x_raw, prime)
y = FieldElement(y_raw, prime)
# Creating the point should not result in an error
Point(x, y, a, b)
# iterate over invalid points
for x_raw, y_raw in invalid_points:
# Initialize points this way:
# x = FieldElement(x_raw, prime)
# y = FieldElement(y_raw, prime)
# Point(x, y, a, b)
x = FieldElement(x_raw, prime)
y = FieldElement(y_raw, prime)
# check that creating the point results in a ValueError
# with self.assertRaises(ValueError):
# Point(x, y, a, b)
with self.assertRaises(ValueError):
Point(x, y, a, b)
def test_add(self):
# tests the following additions on curve y^2=x^3-7 over F_223:
# (192,105) + (17,56)
# (47,71) + (117,141)
# (143,98) + (76,66)
prime = 223
a = FieldElement(0, prime)
b = FieldElement(7, prime)
additions = (
# (x1, y1, x2, y2, x3, y3)
(192, 105, 17, 56, 170, 142),
(47, 71, 117, 141, 60, 139),
(143, 98, 76, 66, 47, 71),
)
# iterate over the additions
for x1_raw, y1_raw, x2_raw, y2_raw, x3_raw, y3_raw in additions:
# Initialize points this way:
# x1 = FieldElement(x1_raw, prime)
# y1 = FieldElement(y1_raw, prime)
# p1 = Point(x1, y1, a, b)
# x2 = FieldElement(x2_raw, prime)
# y2 = FieldElement(y2_raw, prime)
# p2 = Point(x2, y2, a, b)
# x3 = FieldElement(x3_raw, prime)
# y3 = FieldElement(y3_raw, prime)
# p3 = Point(x3, y3, a, b)
x1 = FieldElement(x1_raw, prime)
y1 = FieldElement(y1_raw, prime)
p1 = Point(x1, y1, a, b)
x2 = FieldElement(x2_raw, prime)
y2 = FieldElement(y2_raw, prime)
p2 = Point(x2, y2, a, b)
x3 = FieldElement(x3_raw, prime)
y3 = FieldElement(y3_raw, prime)
p3 = Point(x3, y3, a, b)
# check that p1 + p2 == p3
self.assertEqual(p1 + p2, p3)
def test_rmul(self):
# tests the following scalar multiplications
# 2*(192,105)
# 2*(143,98)
# 2*(47,71)
# 4*(47,71)
# 8*(47,71)
# 21*(47,71)
prime = 223
a = FieldElement(0, prime)
b = FieldElement(7, prime)
multiplications = (
# (coefficient, x1, y1, x2, y2)
(2, 192, 105, 49, 71),
(2, 143, 98, 64, 168),
(2, 47, 71, 36, 111),
(4, 47, 71, 194, 51),
(8, 47, 71, 116, 55),
(21, 47, 71, None, None),
)
# iterate over the multiplications
for s, x1_raw, y1_raw, x2_raw, y2_raw in multiplications:
# Initialize points this way:
# x1 = FieldElement(x1_raw, prime)
# y1 = FieldElement(y1_raw, prime)
# p1 = Point(x1, y1, a, b)
x1 = FieldElement(x1_raw, prime)
y1 = FieldElement(y1_raw, prime)
p1 = Point(x1, y1, a, b)
# initialize the second point based on whether it's the point at infinity
# x2 = FieldElement(x2_raw, prime)
# y2 = FieldElement(y2_raw, prime)
# p2 = Point(x2, y2, a, b)
if x2_raw is None:
p2 = Point(None, None, a, b)
else:
x2 = FieldElement(x2_raw, prime)
y2 = FieldElement(y2_raw, prime)
p2 = Point(x2, y2, a, b)
# check that the product is equal to the expected point
self.assertEqual(s * p1, p2)
A = 0
B = 7
P = 2**256 - 2**32 - 977
N = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
class S256Field(FieldElement):
def __init__(self, num, prime=None):
super().__init__(num=num, prime=P)
def hex(self):
return '{:x}'.format(self.num).zfill(64)
def __repr__(self):
return self.hex()
def sqrt(self):
return self**((P + 1) // 4)
class S256Point(Point):
def __init__(self, x, y, a=None, b=None):
a, b = S256Field(A), S256Field(B)
if type(x) == int:
super().__init__(x=S256Field(x), y=S256Field(y), a=a, b=b)
else:
super().__init__(x=x, y=y, a=a, b=b)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __repr__(self):
if self.x is None:
return 'S256Point(infinity)'
else:
return 'S256Point({},{})'.format(hex(self.x.num), hex(self.y.num))
def __rmul__(self, coefficient):
# we want to mod by N to make this simple
coef = coefficient % N
return super().__rmul__(coef)
def sec(self, compressed=True):
# returns the binary version of the sec format, NOT hex
# if compressed, starts with b'\x02' if self.y.num is even, b'\x03' if self.y is odd
# then self.x.num
# remember, you have to convert self.x.num/self.y.num to binary using int_to_big_endian
x = int_to_big_endian(self.x.num, 32)
if compressed:
if self.y.num % 2 == 0:
return b'\x02' + x
else:
return b'\x03' + x
else:
# if non-compressed, starts with b'\x04' followod by self.x and then self.y
y = int_to_big_endian(self.y.num, 32)
return b'\x04' + x + y
def hash160(self, compressed=True):
# get the sec
sec = self.sec(compressed)
# hash160 the sec
return hash160(sec)
def address(self, compressed=True, testnet=False):
'''Returns the p2pkh address string'''
h160 = self.hash160(compressed)
from script import P2PKHScriptPubKey
return P2PKHScriptPubKey(h160).address(testnet)
def verify(self, z, sig):
# remember sig.r and sig.s are the main things we're checking
# remember 1/s = pow(s, N-2, N)
s_inv = pow(sig.s, N - 2, N)
# u = z / s
u = z * s_inv % N
# v = r / s
v = sig.r * s_inv % N
# u*G + v*P should have as the x coordinate, r
total = u * G + v * self
return total.x.num == sig.r
def verify_message(self, message, sig):
'''Verify a message in the form of bytes. Assumes that the z
is calculated using hash256 interpreted as a big-endian integer'''
# calculate the hash256 of the message
# z is the big-endian interpretation. use big_endian_to_int
# verify the message using the self.verify method
raise NotImplementedError
@classmethod
def parse(self, sec_bin):
'''returns a Point object from a compressed sec binary (not hex)
'''
if sec_bin[0] == 4:
x = int(sec_bin[1:33].hex(), 16)
y = int(sec_bin[33:65].hex(), 16)
return S256Point(x=x, y=y)
is_even = sec_bin[0] == 2
x = S256Field(int(sec_bin[1:].hex(), 16))
# right side of the equation y^2 = x^3 + 7
alpha = x**3 + S256Field(B)
# solve for left side
beta = alpha.sqrt()
if beta.num % 2 == 0:
even_beta = beta
odd_beta = S256Field(P - beta.num)
else:
even_beta = S256Field(P - beta.num)
odd_beta = beta
if is_even:
return S256Point(x, even_beta)
else:
return S256Point(x, odd_beta)
G = S256Point(
0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798,
0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8)
class S256Test(TestCase):
def test_order(self):
point = N * G
self.assertIsNone(point.x)
def test_pubpoint(self):
# write a test that tests the public point for the following
points = (
# secret, x, y
(7, 0x5cbdf0646e5db4eaa398f365f2ea7a0e3d419b7e0330e39ce92bddedcac4f9bc, 0x6aebca40ba255960a3178d6d861a54dba813d0b813fde7b5a5082628087264da),
(1485, 0xc982196a7466fbbbb0e27a940b6af926c1a74d5ad07128c82824a11b5398afda, 0x7a91f9eae64438afb9ce6448a1c133db2d8fb9254e4546b6f001637d50901f55),
(2**128, 0x8f68b9d2f63b5f339239c1ad981f162ee88c5678723ea3351b7b444c9ec4c0da, 0x662a9f2dba063986de1d90c2b6be215dbbea2cfe95510bfdf23cbf79501fff82),
(2**240 + 2**31, 0x9577ff57c8234558f293df502ca4f09cbc65a6572c842b39b366f21717945116, 0x10b49c67fa9365ad7b90dab070be339a1daf9052373ec30ffae4f72d5e66d053),
)
# iterate over points
for secret, x, y in points:
# initialize the secp256k1 point (S256Point)
point = S256Point(x, y)
# check that the secret*G is the same as the point
self.assertEqual(secret * G, point)
def test_sec(self):
coefficient = 999**3
uncompressed = '049d5ca49670cbe4c3bfa84c96a8c87df086c6ea6a24ba6b809c9de234496808d56fa15cc7f3d38cda98dee2419f415b7513dde1301f8643cd9245aea7f3f911f9'
compressed = '039d5ca49670cbe4c3bfa84c96a8c87df086c6ea6a24ba6b809c9de234496808d5'
point = coefficient * G
self.assertEqual(point.sec(compressed=False), bytes.fromhex(uncompressed))
self.assertEqual(point.sec(compressed=True), bytes.fromhex(compressed))
coefficient = 123
uncompressed = '04a598a8030da6d86c6bc7f2f5144ea549d28211ea58faa70ebf4c1e665c1fe9b5204b5d6f84822c307e4b4a7140737aec23fc63b65b35f86a10026dbd2d864e6b'
compressed = '03a598a8030da6d86c6bc7f2f5144ea549d28211ea58faa70ebf4c1e665c1fe9b5'
point = coefficient * G
self.assertEqual(point.sec(compressed=False), bytes.fromhex(uncompressed))
self.assertEqual(point.sec(compressed=True), bytes.fromhex(compressed))
coefficient = 42424242
uncompressed = '04aee2e7d843f7430097859e2bc603abcc3274ff8169c1a469fee0f20614066f8e21ec53f40efac47ac1c5211b2123527e0e9b57ede790c4da1e72c91fb7da54a3'
compressed = '03aee2e7d843f7430097859e2bc603abcc3274ff8169c1a469fee0f20614066f8e'
point = coefficient * G
self.assertEqual(point.sec(compressed=False), bytes.fromhex(uncompressed))
self.assertEqual(point.sec(compressed=True), bytes.fromhex(compressed))
def test_address(self):
tests = (
(
888**3,
'148dY81A9BmdpMhvYEVznrM45kWN32vSCN',
'mnabU9NCcRE5zcNZ2C16CnvKPELrFvisn3',
),
(
321,
'1FNgueDbMYjNQ8HT77sHKxTwdrHMdTGwyN',
'mfx3y63A7TfTtXKkv7Y6QzsPFY6QCBCXiP',
),
(
4242424242,
'1HUYfVCXEmp76uh17bE2gA72Vuqv4wrM1a',
'mgY3bVusRUL6ZB2Ss999CSrGVbdRwVpM8s',
),
)
for secret, mainnet_legacy, testnet_legacy in tests:
point = secret * G
self.assertEqual(
point.address(testnet=False), mainnet_legacy)
self.assertEqual(
point.address(compressed=False, testnet=True), testnet_legacy)
def test_verify(self):
point = S256Point(
0x887387e452b8eacc4acfde10d9aaf7f6d9a0f975aabb10d006e4da568744d06c,
0x61de6d95231cd89026e286df3b6ae4a894a3378e393e93a0f45b666329a0ae34)
z = 0xec208baa0fc1c19f708a9ca96fdeff3ac3f230bb4a7ba4aede4942ad003c0f60
r = 0xac8d1c87e51d0d441be8b3dd5b05c8795b48875dffe00b7ffcfac23010d3a395
s = 0x68342ceff8935ededd102dd876ffd6ba72d6a427a3edb13d26eb0781cb423c4
self.assertTrue(point.verify(z, Signature(r, s)))
z = 0x7c076ff316692a3d7eb3c3bb0f8b1488cf72e1afcd929e29307032997a838a3d
r = 0xeff69ef2b1bd93a66ed5219add4fb51e11a840f404876325a1e8ffe0529a2c
s = 0xc7207fee197d27c618aea621406f6bf5ef6fca38681d82b2f06fddbdce6feab6
self.assertTrue(point.verify(z, Signature(r, s)))
def test_parse(self):
sec = bytes.fromhex('0349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278a')
point = S256Point.parse(sec)
want = 0xa56c896489c71dfc65701ce25050f542f336893fb8cd15f4e8e5c124dbf58e47
self.assertEqual(point.y.num, want)
class Signature:
def __init__(self, r, s):
self.r = r
self.s = s
def __repr__(self):
return 'Signature({:x},{:x})'.format(self.r, self.s)
def der(self):
# convert the r part to bytes
rbin = int_to_big_endian(self.r, 32)
# if rbin has a high bit, add a 00
if rbin[0] >= 128:
rbin = b'\x00' + rbin
while rbin[0] == 0:
if rbin[1] >= 128:
break
else:
rbin = rbin[1:]
result = bytes([2, len(rbin)]) + rbin
sbin = int_to_big_endian(self.s, 32)
# if sbin has a high bit, add a 00
if sbin[0] >= 128:
sbin = b'\x00' + sbin
while sbin[0] == 0:
if sbin[1] >= 128:
break
else:
sbin = sbin[1:]
result += bytes([2, len(sbin)]) + sbin
return bytes([0x30, len(result)]) + result
@classmethod
def parse(cls, signature_bin):
s = BytesIO(signature_bin)
compound = s.read(1)[0]
if compound != 0x30:
raise RuntimeError("Bad Signature")
length = s.read(1)[0]
if length + 2 != len(signature_bin):
raise RuntimeError("Bad Signature Length")
marker = s.read(1)[0]
if marker != 0x02:
raise RuntimeError("Bad Signature")
rlength = s.read(1)[0]
r = int(s.read(rlength).hex(), 16)
marker = s.read(1)[0]
if marker != 0x02:
raise RuntimeError("Bad Signature")
slength = s.read(1)[0]
s = int(s.read(slength).hex(), 16)
if len(signature_bin) != 6 + rlength + slength:
raise RuntimeError("Signature too long")
return cls(r, s)
class SignatureTest(TestCase):
def test_der(self):
testcases = (
(1, 2),
(randint(0, 2**256), randint(0, 2**255)),
(randint(0, 2**256), randint(0, 2**255)),
)
for r, s in testcases:
sig = Signature(r, s)
der = sig.der()
sig2 = Signature.parse(der)
self.assertEqual(sig2.r, r)
self.assertEqual(sig2.s, s)
class PrivateKey:
def __init__(self, secret, testnet=False):
self.secret = secret
self.point = secret * G
self.testnet = testnet
def hex(self):
return '{:x}'.format(self.secret).zfill(64)
def sign(self, z):
# we need use deterministic k
k = self.deterministic_k(z)
# r is the x coordinate of the resulting point k*G
r = (k * G).x.num
# remember 1/k = pow(k, N-2, N)
k_inv = pow(k, N - 2, N)
# s = (z+r*secret) / k
s = (z + r * self.secret) * k_inv % N
if s > N / 2:
s = N - s
# return an instance of Signature:
# Signature(r, s)
return Signature(r, s)
def deterministic_k(self, z):
k = b'\x00' * 32
v = b'\x01' * 32
if z > N:
z -= N
z_bytes = int_to_big_endian(z, 32)
secret_bytes = int_to_big_endian(self.secret, 32)
s256 = hashlib.sha256
k = hmac.new(k, v + b'\x00' + secret_bytes + z_bytes, s256).digest()
v = hmac.new(k, v, s256).digest()
k = hmac.new(k, v + b'\x01' + secret_bytes + z_bytes, s256).digest()
v = hmac.new(k, v, s256).digest()
while True:
v = hmac.new(k, v, s256).digest()
candidate = big_endian_to_int(v)
if candidate >= 1 and candidate < N:
return candidate
k = hmac.new(k, v + b'\x00', s256).digest()
v = hmac.new(k, v, s256).digest()
def sign_message(self, message):
'''Sign a message in the form of bytes instead of the z. The z should
be assumed to be the hash256 of the message interpreted as a big-endian
integer.'''
# compute the hash256 of the message
# z is the big-endian interpretation. use big_endian_to_int
# sign the message using the self.sign method
raise NotImplementedError
def wif(self, compressed=True):
# convert the secret from integer to a 32-bytes in big endian using int_to_big_endian(x, 32)
secret_bytes = int_to_big_endian(self.secret, 32)
# prepend b'\xef' on testnet, b'\x80' on mainnet
if self.testnet:
prefix = b'\xef'
else:
prefix = b'\x80'
# append b'\x01' if compressed
if compressed:
suffix = b'\x01'
else:
suffix = b''
# encode_base58_checksum the whole thing
return encode_base58_checksum(prefix + secret_bytes + suffix)
@classmethod
def parse(cls, wif):
'''Converts WIF to a PrivateKey object'''
raw = raw_decode_base58(wif)
if len(raw) == 34: # compressed
if raw[-1] != 1:
raise ValueError('Invalid WIF')
raw = raw[:-1]
secret = big_endian_to_int(raw[1:])
if raw[0] == 0xef:
testnet = True
elif raw[0] == 0x80:
testnet = False
else:
raise ValueError('Invalid WIF')
return cls(secret, testnet=testnet)
class PrivateKeyTest(TestCase):
def test_sign(self):
pk = PrivateKey(randint(0, 2**256))
z = randint(0, 2**256)
sig = pk.sign(z)
self.assertTrue(pk.point.verify(z, sig))
def test_sign_message(self):
pk = PrivateKey(randint(0, 2**256))
message = b'This is a test message'
sig = pk.sign_message(message)
self.assertTrue(pk.point.verify_message(message, sig))
| 35.479434
| 165
| 0.566786
|
from io import BytesIO
from random import randint
from unittest import TestCase
import hmac
import hashlib
from helper import (
big_endian_to_int,
encode_base58_checksum,
hash160,
hash256,
int_to_big_endian,
raw_decode_base58,
)
class FieldElement:
def __init__(self, num, prime):
if num >= prime or num < 0:
error = 'Num {} not in field range 0 to {}'.format(
num, prime - 1)
raise ValueError(error)
self.num = num
self.prime = prime
def __eq__(self, other):
if other is None:
return False
return self.num == other.num and self.prime == other.prime
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return 'FieldElement_{}({})'.format(self.prime, self.num)
def __add__(self, other):
if self.prime != other.prime:
raise TypeError('Cannot add two numbers in different Fields')
num = (self.num + other.num) % self.prime
prime = self.prime
# You need to return an element of the same class
# use: self.__class__(num, prime)
return self.__class__(num, prime)
def __sub__(self, other):
if self.prime != other.prime:
raise TypeError('Cannot add two numbers in different Fields')
# self.num and other.num are the actual values
num = (self.num - other.num) % self.prime
# self.prime is what you'll need to mod against
prime = self.prime
return self.__class__(num, prime)
def __mul__(self, other):
if self.prime != other.prime:
raise TypeError('Cannot add two numbers in different Fields')
num = (self.num * other.num) % self.prime
prime = self.prime
# You need to return an element of the same class
# use: self.__class__(num, prime)
return self.__class__(num, prime)
def __pow__(self, n):
# remember Fermat's Little Theorem:
prime = self.prime
num = pow(self.num, n % (prime - 1), prime)
return self.__class__(num, prime)
def __truediv__(self, other):
if self.prime != other.prime:
raise TypeError('Cannot add two numbers in different Fields')
num = (self.num * pow(other.num, self.prime - 2, self.prime)) % self.prime
prime = self.prime
# use fermat's little theorem:
return self.__class__(num, prime)
def __rmul__(self, coefficient):
num = (self.num * coefficient) % self.prime
return self.__class__(num=num, prime=self.prime)
class FieldElementTest(TestCase):
def test_ne(self):
a = FieldElement(2, 31)
b = FieldElement(2, 31)
c = FieldElement(15, 31)
self.assertEqual(a, b)
self.assertTrue(a != c)
self.assertFalse(a != b)
def test_add(self):
a = FieldElement(2, 31)
b = FieldElement(15, 31)
self.assertEqual(a + b, FieldElement(17, 31))
a = FieldElement(17, 31)
b = FieldElement(21, 31)
self.assertEqual(a + b, FieldElement(7, 31))
def test_sub(self):
a = FieldElement(29, 31)
b = FieldElement(4, 31)
self.assertEqual(a - b, FieldElement(25, 31))
a = FieldElement(15, 31)
b = FieldElement(30, 31)
self.assertEqual(a - b, FieldElement(16, 31))
def test_mul(self):
a = FieldElement(24, 31)
b = FieldElement(19, 31)
self.assertEqual(a * b, FieldElement(22, 31))
def test_pow(self):
a = FieldElement(17, 31)
self.assertEqual(a**3, FieldElement(15, 31))
a = FieldElement(5, 31)
b = FieldElement(18, 31)
self.assertEqual(a**5 * b, FieldElement(16, 31))
def test_div(self):
a = FieldElement(3, 31)
b = FieldElement(24, 31)
self.assertEqual(a / b, FieldElement(4, 31))
a = FieldElement(17, 31)
self.assertEqual(a**-3, FieldElement(29, 31))
a = FieldElement(4, 31)
b = FieldElement(11, 31)
self.assertEqual(a**-4 * b, FieldElement(13, 31))
class Point:
def __init__(self, x, y, a, b):
self.a = a
self.b = b
self.x = x
self.y = y
# with None values for both.
if self.x is None and self.y is None:
return
# make sure that the elliptic curve equation is satisfied
# y**2 == x**3 + a*x + b
if self.y**2 != self.x**3 + a * x + b:
# if not, raise a ValueError
raise ValueError('({}, {}) is not on the curve'.format(self.x, self.y))
def __eq__(self, other):
return self.x == other.x and self.y == other.y \
and self.a == other.a and self.b == other.b
def __ne__(self, other):
# this should be the inverse of the == operator
return not (self == other)
def __repr__(self):
if self.x is None:
return 'Point(infinity)'
else:
return 'Point({},{})_{}'.format(self.x.num, self.y.num, self.x.prime)
def __add__(self, other):
if self.a != other.a or self.b != other.b:
raise TypeError('Points {}, {} are not on the same curve'.format(self, other))
# Case 0.0: self is the point at infinity, return other
if self.x is None:
return other
# Case 0.1: other is the point at infinity, return self
if other.x is None:
return self
# Case 1: self.x == other.x, self.y != other.y
# Result is point at infinity
if self.x == other.x and self.y != other.y:
# Remember to return an instance of this class:
# self.__class__(x, y, a, b)
return self.__class__(None, None, self.a, self.b)
# Case 2: self.x != other.x
if self.x != other.x:
# Formula (x3,y3)==(x1,y1)+(x2,y2)
# s=(y2-y1)/(x2-x1)
s = (other.y - self.y) / (other.x - self.x)
# x3=s**2-x1-x2
x = s**2 - self.x - other.x
# y3=s*(x1-x3)-y1
y = s * (self.x - x) - self.y
return self.__class__(x, y, self.a, self.b)
# Case 3: self.x == other.x, self.y == other.y
else:
# Formula (x3,y3)=(x1,y1)+(x1,y1)
# s=(3*x1**2+a)/(2*y1)
s = (3 * self.x**2 + self.a) / (2 * self.y)
# x3=s**2-2*x1
x = s**2 - 2 * self.x
# y3=s*(x1-x3)-y1
y = s * (self.x - x) - self.y
return self.__class__(x, y, self.a, self.b)
def __rmul__(self, coefficient):
# rmul calculates coefficient * self
coef = coefficient
current = self
# start at 0
result = self.__class__(None, None, self.a, self.b)
while coef:
# if the bit at this binary expansion is 1, add
if coef & 1:
result += current
# double the point
current += current
coef >>= 1
return result
class PointTest(TestCase):
def test_ne(self):
a = Point(x=3, y=-7, a=5, b=7)
b = Point(x=18, y=77, a=5, b=7)
self.assertTrue(a != b)
self.assertFalse(a != a)
def test_on_curve(self):
with self.assertRaises(ValueError):
Point(x=-2, y=4, a=5, b=7)
# these should not raise an error
Point(x=3, y=-7, a=5, b=7)
Point(x=18, y=77, a=5, b=7)
def test_add0(self):
a = Point(x=None, y=None, a=5, b=7)
b = Point(x=2, y=5, a=5, b=7)
c = Point(x=2, y=-5, a=5, b=7)
self.assertEqual(a + b, b)
self.assertEqual(b + a, b)
self.assertEqual(b + c, a)
def test_add1(self):
a = Point(x=3, y=7, a=5, b=7)
b = Point(x=-1, y=-1, a=5, b=7)
self.assertEqual(a + b, Point(x=2, y=-5, a=5, b=7))
def test_add2(self):
a = Point(x=-1, y=1, a=5, b=7)
self.assertEqual(a + a, Point(x=18, y=-77, a=5, b=7))
class ECCTest(TestCase):
def test_on_curve(self):
# tests the following points whether they are on the curve or not
# on curve y^2=x^3-7 over F_223:
# (192,105) (17,56) (200,119) (1,193) (42,99)
# the ones that aren't should raise a ValueError
prime = 223
a = FieldElement(0, prime)
b = FieldElement(7, prime)
valid_points = ((192, 105), (17, 56), (1, 193))
invalid_points = ((200, 119), (42, 99))
for x_raw, y_raw in valid_points:
x = FieldElement(x_raw, prime)
y = FieldElement(y_raw, prime)
Point(x, y, a, b)
for x_raw, y_raw in invalid_points:
x = FieldElement(x_raw, prime)
y = FieldElement(y_raw, prime)
with self.assertRaises(ValueError):
Point(x, y, a, b)
def test_add(self):
prime = 223
a = FieldElement(0, prime)
b = FieldElement(7, prime)
additions = (
(192, 105, 17, 56, 170, 142),
(47, 71, 117, 141, 60, 139),
(143, 98, 76, 66, 47, 71),
)
for x1_raw, y1_raw, x2_raw, y2_raw, x3_raw, y3_raw in additions:
x1 = FieldElement(x1_raw, prime)
y1 = FieldElement(y1_raw, prime)
p1 = Point(x1, y1, a, b)
x2 = FieldElement(x2_raw, prime)
y2 = FieldElement(y2_raw, prime)
p2 = Point(x2, y2, a, b)
x3 = FieldElement(x3_raw, prime)
y3 = FieldElement(y3_raw, prime)
p3 = Point(x3, y3, a, b)
self.assertEqual(p1 + p2, p3)
def test_rmul(self):
prime = 223
a = FieldElement(0, prime)
b = FieldElement(7, prime)
multiplications = (
(2, 192, 105, 49, 71),
(2, 143, 98, 64, 168),
(2, 47, 71, 36, 111),
(4, 47, 71, 194, 51),
(8, 47, 71, 116, 55),
(21, 47, 71, None, None),
)
for s, x1_raw, y1_raw, x2_raw, y2_raw in multiplications:
x1 = FieldElement(x1_raw, prime)
y1 = FieldElement(y1_raw, prime)
p1 = Point(x1, y1, a, b)
# x2 = FieldElement(x2_raw, prime)
# y2 = FieldElement(y2_raw, prime)
# p2 = Point(x2, y2, a, b)
if x2_raw is None:
p2 = Point(None, None, a, b)
else:
x2 = FieldElement(x2_raw, prime)
y2 = FieldElement(y2_raw, prime)
p2 = Point(x2, y2, a, b)
# check that the product is equal to the expected point
self.assertEqual(s * p1, p2)
A = 0
B = 7
P = 2**256 - 2**32 - 977
N = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
class S256Field(FieldElement):
def __init__(self, num, prime=None):
super().__init__(num=num, prime=P)
def hex(self):
return '{:x}'.format(self.num).zfill(64)
def __repr__(self):
return self.hex()
def sqrt(self):
return self**((P + 1) // 4)
class S256Point(Point):
def __init__(self, x, y, a=None, b=None):
a, b = S256Field(A), S256Field(B)
if type(x) == int:
super().__init__(x=S256Field(x), y=S256Field(y), a=a, b=b)
else:
super().__init__(x=x, y=y, a=a, b=b)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __repr__(self):
if self.x is None:
return 'S256Point(infinity)'
else:
return 'S256Point({},{})'.format(hex(self.x.num), hex(self.y.num))
def __rmul__(self, coefficient):
# we want to mod by N to make this simple
coef = coefficient % N
return super().__rmul__(coef)
def sec(self, compressed=True):
# returns the binary version of the sec format, NOT hex
# if compressed, starts with b'\x02' if self.y.num is even, b'\x03' if self.y is odd
# then self.x.num
# remember, you have to convert self.x.num/self.y.num to binary using int_to_big_endian
x = int_to_big_endian(self.x.num, 32)
if compressed:
if self.y.num % 2 == 0:
return b'\x02' + x
else:
return b'\x03' + x
else:
# if non-compressed, starts with b'\x04' followod by self.x and then self.y
y = int_to_big_endian(self.y.num, 32)
return b'\x04' + x + y
def hash160(self, compressed=True):
# get the sec
sec = self.sec(compressed)
# hash160 the sec
return hash160(sec)
def address(self, compressed=True, testnet=False):
h160 = self.hash160(compressed)
from script import P2PKHScriptPubKey
return P2PKHScriptPubKey(h160).address(testnet)
def verify(self, z, sig):
# remember sig.r and sig.s are the main things we're checking
s_inv = pow(sig.s, N - 2, N)
u = z * s_inv % N
v = sig.r * s_inv % N
total = u * G + v * self
return total.x.num == sig.r
def verify_message(self, message, sig):
raise NotImplementedError
@classmethod
def parse(self, sec_bin):
if sec_bin[0] == 4:
x = int(sec_bin[1:33].hex(), 16)
y = int(sec_bin[33:65].hex(), 16)
return S256Point(x=x, y=y)
is_even = sec_bin[0] == 2
x = S256Field(int(sec_bin[1:].hex(), 16))
alpha = x**3 + S256Field(B)
beta = alpha.sqrt()
if beta.num % 2 == 0:
even_beta = beta
odd_beta = S256Field(P - beta.num)
else:
even_beta = S256Field(P - beta.num)
odd_beta = beta
if is_even:
return S256Point(x, even_beta)
else:
return S256Point(x, odd_beta)
G = S256Point(
0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798,
0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8)
class S256Test(TestCase):
def test_order(self):
point = N * G
self.assertIsNone(point.x)
def test_pubpoint(self):
points = (
(7, 0x5cbdf0646e5db4eaa398f365f2ea7a0e3d419b7e0330e39ce92bddedcac4f9bc, 0x6aebca40ba255960a3178d6d861a54dba813d0b813fde7b5a5082628087264da),
(1485, 0xc982196a7466fbbbb0e27a940b6af926c1a74d5ad07128c82824a11b5398afda, 0x7a91f9eae64438afb9ce6448a1c133db2d8fb9254e4546b6f001637d50901f55),
(2**128, 0x8f68b9d2f63b5f339239c1ad981f162ee88c5678723ea3351b7b444c9ec4c0da, 0x662a9f2dba063986de1d90c2b6be215dbbea2cfe95510bfdf23cbf79501fff82),
(2**240 + 2**31, 0x9577ff57c8234558f293df502ca4f09cbc65a6572c842b39b366f21717945116, 0x10b49c67fa9365ad7b90dab070be339a1daf9052373ec30ffae4f72d5e66d053),
)
for secret, x, y in points:
point = S256Point(x, y)
self.assertEqual(secret * G, point)
def test_sec(self):
coefficient = 999**3
uncompressed = '049d5ca49670cbe4c3bfa84c96a8c87df086c6ea6a24ba6b809c9de234496808d56fa15cc7f3d38cda98dee2419f415b7513dde1301f8643cd9245aea7f3f911f9'
compressed = '039d5ca49670cbe4c3bfa84c96a8c87df086c6ea6a24ba6b809c9de234496808d5'
point = coefficient * G
self.assertEqual(point.sec(compressed=False), bytes.fromhex(uncompressed))
self.assertEqual(point.sec(compressed=True), bytes.fromhex(compressed))
coefficient = 123
uncompressed = '04a598a8030da6d86c6bc7f2f5144ea549d28211ea58faa70ebf4c1e665c1fe9b5204b5d6f84822c307e4b4a7140737aec23fc63b65b35f86a10026dbd2d864e6b'
compressed = '03a598a8030da6d86c6bc7f2f5144ea549d28211ea58faa70ebf4c1e665c1fe9b5'
point = coefficient * G
self.assertEqual(point.sec(compressed=False), bytes.fromhex(uncompressed))
self.assertEqual(point.sec(compressed=True), bytes.fromhex(compressed))
coefficient = 42424242
uncompressed = '04aee2e7d843f7430097859e2bc603abcc3274ff8169c1a469fee0f20614066f8e21ec53f40efac47ac1c5211b2123527e0e9b57ede790c4da1e72c91fb7da54a3'
compressed = '03aee2e7d843f7430097859e2bc603abcc3274ff8169c1a469fee0f20614066f8e'
point = coefficient * G
self.assertEqual(point.sec(compressed=False), bytes.fromhex(uncompressed))
self.assertEqual(point.sec(compressed=True), bytes.fromhex(compressed))
def test_address(self):
tests = (
(
888**3,
'148dY81A9BmdpMhvYEVznrM45kWN32vSCN',
'mnabU9NCcRE5zcNZ2C16CnvKPELrFvisn3',
),
(
321,
'1FNgueDbMYjNQ8HT77sHKxTwdrHMdTGwyN',
'mfx3y63A7TfTtXKkv7Y6QzsPFY6QCBCXiP',
),
(
4242424242,
'1HUYfVCXEmp76uh17bE2gA72Vuqv4wrM1a',
'mgY3bVusRUL6ZB2Ss999CSrGVbdRwVpM8s',
),
)
for secret, mainnet_legacy, testnet_legacy in tests:
point = secret * G
self.assertEqual(
point.address(testnet=False), mainnet_legacy)
self.assertEqual(
point.address(compressed=False, testnet=True), testnet_legacy)
def test_verify(self):
point = S256Point(
0x887387e452b8eacc4acfde10d9aaf7f6d9a0f975aabb10d006e4da568744d06c,
0x61de6d95231cd89026e286df3b6ae4a894a3378e393e93a0f45b666329a0ae34)
z = 0xec208baa0fc1c19f708a9ca96fdeff3ac3f230bb4a7ba4aede4942ad003c0f60
r = 0xac8d1c87e51d0d441be8b3dd5b05c8795b48875dffe00b7ffcfac23010d3a395
s = 0x68342ceff8935ededd102dd876ffd6ba72d6a427a3edb13d26eb0781cb423c4
self.assertTrue(point.verify(z, Signature(r, s)))
z = 0x7c076ff316692a3d7eb3c3bb0f8b1488cf72e1afcd929e29307032997a838a3d
r = 0xeff69ef2b1bd93a66ed5219add4fb51e11a840f404876325a1e8ffe0529a2c
s = 0xc7207fee197d27c618aea621406f6bf5ef6fca38681d82b2f06fddbdce6feab6
self.assertTrue(point.verify(z, Signature(r, s)))
def test_parse(self):
sec = bytes.fromhex('0349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278a')
point = S256Point.parse(sec)
want = 0xa56c896489c71dfc65701ce25050f542f336893fb8cd15f4e8e5c124dbf58e47
self.assertEqual(point.y.num, want)
class Signature:
def __init__(self, r, s):
self.r = r
self.s = s
def __repr__(self):
return 'Signature({:x},{:x})'.format(self.r, self.s)
def der(self):
rbin = int_to_big_endian(self.r, 32)
if rbin[0] >= 128:
rbin = b'\x00' + rbin
while rbin[0] == 0:
if rbin[1] >= 128:
break
else:
rbin = rbin[1:]
result = bytes([2, len(rbin)]) + rbin
sbin = int_to_big_endian(self.s, 32)
if sbin[0] >= 128:
sbin = b'\x00' + sbin
while sbin[0] == 0:
if sbin[1] >= 128:
break
else:
sbin = sbin[1:]
result += bytes([2, len(sbin)]) + sbin
return bytes([0x30, len(result)]) + result
@classmethod
def parse(cls, signature_bin):
s = BytesIO(signature_bin)
compound = s.read(1)[0]
if compound != 0x30:
raise RuntimeError("Bad Signature")
length = s.read(1)[0]
if length + 2 != len(signature_bin):
raise RuntimeError("Bad Signature Length")
marker = s.read(1)[0]
if marker != 0x02:
raise RuntimeError("Bad Signature")
rlength = s.read(1)[0]
r = int(s.read(rlength).hex(), 16)
marker = s.read(1)[0]
if marker != 0x02:
raise RuntimeError("Bad Signature")
slength = s.read(1)[0]
s = int(s.read(slength).hex(), 16)
if len(signature_bin) != 6 + rlength + slength:
raise RuntimeError("Signature too long")
return cls(r, s)
class SignatureTest(TestCase):
def test_der(self):
testcases = (
(1, 2),
(randint(0, 2**256), randint(0, 2**255)),
(randint(0, 2**256), randint(0, 2**255)),
)
for r, s in testcases:
sig = Signature(r, s)
der = sig.der()
sig2 = Signature.parse(der)
self.assertEqual(sig2.r, r)
self.assertEqual(sig2.s, s)
class PrivateKey:
def __init__(self, secret, testnet=False):
self.secret = secret
self.point = secret * G
self.testnet = testnet
def hex(self):
return '{:x}'.format(self.secret).zfill(64)
def sign(self, z):
k = self.deterministic_k(z)
r = (k * G).x.num
k_inv = pow(k, N - 2, N)
s = (z + r * self.secret) * k_inv % N
if s > N / 2:
s = N - s
return Signature(r, s)
def deterministic_k(self, z):
k = b'\x00' * 32
v = b'\x01' * 32
if z > N:
z -= N
z_bytes = int_to_big_endian(z, 32)
secret_bytes = int_to_big_endian(self.secret, 32)
s256 = hashlib.sha256
k = hmac.new(k, v + b'\x00' + secret_bytes + z_bytes, s256).digest()
v = hmac.new(k, v, s256).digest()
k = hmac.new(k, v + b'\x01' + secret_bytes + z_bytes, s256).digest()
v = hmac.new(k, v, s256).digest()
while True:
v = hmac.new(k, v, s256).digest()
candidate = big_endian_to_int(v)
if candidate >= 1 and candidate < N:
return candidate
k = hmac.new(k, v + b'\x00', s256).digest()
v = hmac.new(k, v, s256).digest()
def sign_message(self, message):
raise NotImplementedError
def wif(self, compressed=True):
secret_bytes = int_to_big_endian(self.secret, 32)
if self.testnet:
prefix = b'\xef'
else:
prefix = b'\x80'
if compressed:
suffix = b'\x01'
else:
suffix = b''
return encode_base58_checksum(prefix + secret_bytes + suffix)
@classmethod
def parse(cls, wif):
raw = raw_decode_base58(wif)
if len(raw) == 34:
if raw[-1] != 1:
raise ValueError('Invalid WIF')
raw = raw[:-1]
secret = big_endian_to_int(raw[1:])
if raw[0] == 0xef:
testnet = True
elif raw[0] == 0x80:
testnet = False
else:
raise ValueError('Invalid WIF')
return cls(secret, testnet=testnet)
class PrivateKeyTest(TestCase):
def test_sign(self):
pk = PrivateKey(randint(0, 2**256))
z = randint(0, 2**256)
sig = pk.sign(z)
self.assertTrue(pk.point.verify(z, sig))
def test_sign_message(self):
pk = PrivateKey(randint(0, 2**256))
message = b'This is a test message'
sig = pk.sign_message(message)
self.assertTrue(pk.point.verify_message(message, sig))
| true
| true
|
1c4006aa0d55930f34f8dd6c895baeb0c2c04c55
| 964
|
py
|
Python
|
infinitd_server/handler/debug_battle_input.py
|
rhofour/InfiniTDBackend
|
8763d64a82d02e4282abff5419e1ab256af41d7e
|
[
"MIT"
] | null | null | null |
infinitd_server/handler/debug_battle_input.py
|
rhofour/InfiniTDBackend
|
8763d64a82d02e4282abff5419e1ab256af41d7e
|
[
"MIT"
] | null | null | null |
infinitd_server/handler/debug_battle_input.py
|
rhofour/InfiniTDBackend
|
8763d64a82d02e4282abff5419e1ab256af41d7e
|
[
"MIT"
] | null | null | null |
import cattr
from infinitd_server.battle_computer import BattleCalculationException
from infinitd_server.game import Game
from infinitd_server.handler.base import BaseHandler
class DebugBattleInputHandler(BaseHandler):
game: Game # See https://github.com/google/pytype/issues/652
def get(self, attackerName, defenderName):
self.logInfo(f"Trying to download battle input for {attackerName} vs {defenderName}")
attacker = self.game.getUserByName(attackerName)
if attacker is None:
self.set_status(404)
self.write(f"Unknown user: {attackerName}")
return
defender = self.game.getUserByName(defenderName)
if defender is None:
self.set_status(404)
self.write(f"Unknown user: {defenderName}")
return
data = {
'battleground': defender.battleground.to_dict(),
'wave': attacker.wave,
}
self.write(data)
| 35.703704
| 93
| 0.665975
|
import cattr
from infinitd_server.battle_computer import BattleCalculationException
from infinitd_server.game import Game
from infinitd_server.handler.base import BaseHandler
class DebugBattleInputHandler(BaseHandler):
game: Game
def get(self, attackerName, defenderName):
self.logInfo(f"Trying to download battle input for {attackerName} vs {defenderName}")
attacker = self.game.getUserByName(attackerName)
if attacker is None:
self.set_status(404)
self.write(f"Unknown user: {attackerName}")
return
defender = self.game.getUserByName(defenderName)
if defender is None:
self.set_status(404)
self.write(f"Unknown user: {defenderName}")
return
data = {
'battleground': defender.battleground.to_dict(),
'wave': attacker.wave,
}
self.write(data)
| true
| true
|
1c4007c0021d8943500c88dc970ef3782332364a
| 146
|
py
|
Python
|
6kyu/(6 kyu) CamelCase Method/(6 kyu) CamelCase Method.py
|
e1r0nd/codewars
|
dc98484281345e7675eb5e8a51c192e2fa77c443
|
[
"MIT"
] | 49
|
2018-04-30T06:42:45.000Z
|
2021-07-22T16:39:02.000Z
|
(6 kyu) CamelCase Method/(6 kyu) CamelCase Method.py
|
novsunheng/codewars
|
c54b1d822356889b91587b088d02ca0bd3d8dc9e
|
[
"MIT"
] | 1
|
2020-08-31T02:36:53.000Z
|
2020-08-31T10:14:00.000Z
|
(6 kyu) CamelCase Method/(6 kyu) CamelCase Method.py
|
novsunheng/codewars
|
c54b1d822356889b91587b088d02ca0bd3d8dc9e
|
[
"MIT"
] | 25
|
2018-04-02T20:57:58.000Z
|
2021-05-28T15:24:51.000Z
|
def camel_case(string):
# #1
# return "".join(c.capitalize() for c in string.split())
# #2
return string.title().replace(" ", "")
| 24.333333
| 60
| 0.568493
|
def camel_case(string):
return string.title().replace(" ", "")
| true
| true
|
1c4007ea8923b4de35b22c4e30e314948e3d4ee2
| 1,604
|
py
|
Python
|
setup.py
|
frafra/django-dataporten
|
4236017611e08d08bd810be0beae1b994cb5fc67
|
[
"MIT"
] | null | null | null |
setup.py
|
frafra/django-dataporten
|
4236017611e08d08bd810be0beae1b994cb5fc67
|
[
"MIT"
] | null | null | null |
setup.py
|
frafra/django-dataporten
|
4236017611e08d08bd810be0beae1b994cb5fc67
|
[
"MIT"
] | null | null | null |
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-dataporten',
version='0.4.0',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='A simple Django app to fetch and parse data from Dataporten.',
long_description=README,
url='https://github.com/JakobGM/django-dataporten',
author='Jakob Gerhard Martinussen',
author_email='jakobgm@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
],
install_requires=[
'django',
'pip-tools',
'pytest',
'pytest-django',
'requests',
'requests-cache',
'responses',
'mypy',
'mypy_extensions',
'freezegun',
'factory_boy',
'django-allauth'
]
)
| 30.846154
| 79
| 0.596633
|
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-dataporten',
version='0.4.0',
packages=find_packages(),
include_package_data=True,
license='MIT License',
description='A simple Django app to fetch and parse data from Dataporten.',
long_description=README,
url='https://github.com/JakobGM/django-dataporten',
author='Jakob Gerhard Martinussen',
author_email='jakobgm@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
],
install_requires=[
'django',
'pip-tools',
'pytest',
'pytest-django',
'requests',
'requests-cache',
'responses',
'mypy',
'mypy_extensions',
'freezegun',
'factory_boy',
'django-allauth'
]
)
| true
| true
|
1c40084536a1e2b5687248de68f5c8bf4fea9c84
| 1,863
|
py
|
Python
|
src/ripe_rainbow/domain/wrappers.py
|
ripe-tech/ripe-rainbow
|
12b430d15102ed6a731d239db00d32dae87384df
|
[
"Apache-2.0"
] | 2
|
2019-06-11T09:19:48.000Z
|
2020-06-30T09:30:29.000Z
|
src/ripe_rainbow/domain/wrappers.py
|
ripe-tech/ripe-rainbow
|
12b430d15102ed6a731d239db00d32dae87384df
|
[
"Apache-2.0"
] | 43
|
2019-06-06T10:06:46.000Z
|
2022-02-02T10:47:53.000Z
|
src/ripe_rainbow/domain/wrappers.py
|
ripe-tech/ripe-rainbow
|
12b430d15102ed6a731d239db00d32dae87384df
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from . import base
from . import logic
BASE_TUPLES = (
("logic", base.LogicPart),
("interactions", base.InteractionsPart),
("waits", base.WaitsPart)
)
COPPER_TUPLES = (
("provision", logic.ProvisionPart),
("id", logic.RipeIdPart),
("core", logic.RipeCorePart),
("copper", logic.RipeCopperPart)
)
PULSE_TUPLES = (
("provision", logic.ProvisionPart),
("id", logic.RipeIdPart),
("core", logic.RipeCorePart),
("pulse", logic.RipePulsePart)
)
RETAIL_TUPLES = (
("provision", logic.ProvisionPart),
("admin", logic.AdminPart),
("core", logic.RipeCorePart),
("retail", logic.RipeRetailPart)
)
UTIL_VUE_TUPLES = (
("provision", logic.ProvisionPart),
("id", logic.RipeIdPart),
("core", logic.RipeCorePart),
("util_vue", logic.RipeUtilVuePart)
)
WHITE_TUPLES = (
("provision", logic.ProvisionPart),
("id", logic.RipeIdPart),
("core", logic.RipeCorePart),
("white", logic.RipeWhitePart)
)
class DomainWrapper(object):
@classmethod
def wrap(cls, instance, tuples):
for name, _cls in tuples:
part = _cls(instance)
setattr(instance, name, part)
@classmethod
def wrap_base(cls, instance):
cls.wrap(instance, BASE_TUPLES)
@classmethod
def wrap_copper(cls, instance):
cls.wrap(instance, COPPER_TUPLES)
@classmethod
def wrap_pulse(cls, instance):
cls.wrap(instance, PULSE_TUPLES)
@classmethod
def wrap_retail(cls, instance):
cls.wrap(instance, RETAIL_TUPLES)
@classmethod
def wrap_util_vue(cls, instance):
cls.wrap(instance, UTIL_VUE_TUPLES)
@classmethod
def wrap_white(cls, instance):
cls.wrap(instance, WHITE_TUPLES)
| 23.582278
| 45
| 0.613527
|
from . import base
from . import logic
BASE_TUPLES = (
("logic", base.LogicPart),
("interactions", base.InteractionsPart),
("waits", base.WaitsPart)
)
COPPER_TUPLES = (
("provision", logic.ProvisionPart),
("id", logic.RipeIdPart),
("core", logic.RipeCorePart),
("copper", logic.RipeCopperPart)
)
PULSE_TUPLES = (
("provision", logic.ProvisionPart),
("id", logic.RipeIdPart),
("core", logic.RipeCorePart),
("pulse", logic.RipePulsePart)
)
RETAIL_TUPLES = (
("provision", logic.ProvisionPart),
("admin", logic.AdminPart),
("core", logic.RipeCorePart),
("retail", logic.RipeRetailPart)
)
UTIL_VUE_TUPLES = (
("provision", logic.ProvisionPart),
("id", logic.RipeIdPart),
("core", logic.RipeCorePart),
("util_vue", logic.RipeUtilVuePart)
)
WHITE_TUPLES = (
("provision", logic.ProvisionPart),
("id", logic.RipeIdPart),
("core", logic.RipeCorePart),
("white", logic.RipeWhitePart)
)
class DomainWrapper(object):
@classmethod
def wrap(cls, instance, tuples):
for name, _cls in tuples:
part = _cls(instance)
setattr(instance, name, part)
@classmethod
def wrap_base(cls, instance):
cls.wrap(instance, BASE_TUPLES)
@classmethod
def wrap_copper(cls, instance):
cls.wrap(instance, COPPER_TUPLES)
@classmethod
def wrap_pulse(cls, instance):
cls.wrap(instance, PULSE_TUPLES)
@classmethod
def wrap_retail(cls, instance):
cls.wrap(instance, RETAIL_TUPLES)
@classmethod
def wrap_util_vue(cls, instance):
cls.wrap(instance, UTIL_VUE_TUPLES)
@classmethod
def wrap_white(cls, instance):
cls.wrap(instance, WHITE_TUPLES)
| true
| true
|
1c4008e1576b02de561702fde7cd6805892c7693
| 11,504
|
py
|
Python
|
depreciated/AlexNet-paddle/paddlevision/datasets/folder.py
|
dyning/AlexNet-Prod
|
54de9dfcf540997ff227bd92d0c7a73dc73c45aa
|
[
"Apache-2.0"
] | 17
|
2021-08-11T13:42:03.000Z
|
2022-03-30T03:50:27.000Z
|
ResNet_paddle/paddlevision/datasets/folder.py
|
livingbody/resnet-livingbody
|
a8c04faf9cc6896f7c3aef06cddfe38ce74f00ee
|
[
"Apache-2.0"
] | 11
|
2021-08-12T06:29:17.000Z
|
2021-12-23T03:15:39.000Z
|
ResNet_paddle/paddlevision/datasets/folder.py
|
livingbody/resnet-livingbody
|
a8c04faf9cc6896f7c3aef06cddfe38ce74f00ee
|
[
"Apache-2.0"
] | 17
|
2021-08-11T14:12:38.000Z
|
2022-03-30T03:50:31.000Z
|
from .vision import VisionDataset
from PIL import Image
import os
import os.path
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
def has_file_allowed_extension(filename: str,
extensions: Tuple[str, ...]) -> bool:
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return filename.lower().endswith(extensions)
def is_image_file(filename: str) -> bool:
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def find_classes(directory: str) -> Tuple[List[str], Dict[str, int]]:
"""Finds the class folders in a dataset.
See :class:`DatasetFolder` for details.
"""
classes = sorted(
entry.name for entry in os.scandir(directory) if entry.is_dir())
if not classes:
raise FileNotFoundError(
f"Couldn't find any class folder in {directory}.")
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def make_dataset(
directory: str,
class_to_idx: Optional[Dict[str, int]]=None,
extensions: Optional[Tuple[str, ...]]=None,
is_valid_file: Optional[Callable[[str], bool]]=None, ) -> List[Tuple[
str, int]]:
"""Generates a list of samples of a form (path_to_sample, class).
See :class:`DatasetFolder` for details.
Note: The class_to_idx parameter is here optional and will use the logic of the ``find_classes`` function
by default.
"""
directory = os.path.expanduser(directory)
if class_to_idx is None:
_, class_to_idx = find_classes(directory)
elif not class_to_idx:
raise ValueError(
"'class_to_index' must have at least one entry to collect any samples."
)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError(
"Both extensions and is_valid_file cannot be None or not None at the same time"
)
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(
x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
instances = []
available_classes = set()
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
if is_valid_file(fname):
path = os.path.join(root, fname)
item = path, class_index
instances.append(item)
if target_class not in available_classes:
available_classes.add(target_class)
empty_classes = set(class_to_idx.keys()) - available_classes
if empty_classes:
msg = f"Found no valid file for the classes {', '.join(sorted(empty_classes))}. "
if extensions is not None:
msg += f"Supported extensions are: {', '.join(extensions)}"
raise FileNotFoundError(msg)
return instances
class DatasetFolder(VisionDataset):
"""A generic data loader.
This default directory structure can be customized by overriding the
:meth:`find_classes` method.
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (tuple[string]): A list of allowed extensions.
both extensions and is_valid_file should not be passed.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
is_valid_file (callable, optional): A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed.
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(
self,
root: str,
loader: Callable[[str], Any],
extensions: Optional[Tuple[str, ...]]=None,
transform: Optional[Callable]=None,
target_transform: Optional[Callable]=None,
is_valid_file: Optional[Callable[[str], bool]]=None, ) -> None:
super(DatasetFolder, self).__init__(
root, transform=transform, target_transform=target_transform)
classes, class_to_idx = self.find_classes(self.root)
samples = self.make_dataset(self.root, class_to_idx, extensions,
is_valid_file)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
@staticmethod
def make_dataset(
directory: str,
class_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]]=None,
is_valid_file: Optional[Callable[[str], bool]]=None, ) -> List[
Tuple[str, int]]:
"""Generates a list of samples of a form (path_to_sample, class).
This can be overridden to e.g. read files from a compressed zip file instead of from the disk.
Args:
directory (str): root dataset directory, corresponding to ``self.root``.
class_to_idx (Dict[str, int]): Dictionary mapping class name to class index.
extensions (optional): A list of allowed extensions.
Either extensions or is_valid_file should be passed. Defaults to None.
is_valid_file (optional): A function that takes path of a file
and checks if the file is a valid file
(used to check of corrupt files) both extensions and
is_valid_file should not be passed. Defaults to None.
Raises:
ValueError: In case ``class_to_idx`` is empty.
ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None.
FileNotFoundError: In case no valid file was found for any class.
Returns:
List[Tuple[str, int]]: samples of a form (path_to_sample, class)
"""
if class_to_idx is None:
# prevent potential bug since make_dataset() would use the class_to_idx logic of the
# find_classes() function, instead of using that of the find_classes() method, which
# is potentially overridden and thus could have a different logic.
raise ValueError("The class_to_idx parameter cannot be None.")
return make_dataset(
directory,
class_to_idx,
extensions=extensions,
is_valid_file=is_valid_file)
def find_classes(self, directory: str) -> Tuple[List[str], Dict[str, int]]:
"""Find the class folders in a dataset structured as follows::
directory/
├── class_x
│ ├── xxx.ext
│ ├── xxy.ext
│ └── ...
│ └── xxz.ext
└── class_y
├── 123.ext
├── nsdf3.ext
└── ...
└── asd932_.ext
This method can be overridden to only consider
a subset of classes, or to adapt to a different dataset directory structure.
Args:
directory(str): Root directory path, corresponding to ``self.root``
Raises:
FileNotFoundError: If ``dir`` has no class folders.
Returns:
(Tuple[List[str], Dict[str, int]]): List of all classes and dictionary mapping each class to an index.
"""
return find_classes(directory)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self) -> int:
return len(self.samples)
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
'.tiff', '.webp')
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def default_loader(path: str) -> Any:
return pil_loader(path)
class ImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way by default: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/[...]/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/[...]/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
is_valid_file (callable, optional): A function that takes path of an Image file
and check if the file is a valid file (used to check of corrupt files)
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(
self,
root: str,
transform: Optional[Callable]=None,
target_transform: Optional[Callable]=None,
loader: Callable[[str], Any]=default_loader,
is_valid_file: Optional[Callable[[str], bool]]=None, ):
super(ImageFolder, self).__init__(
root,
loader,
IMG_EXTENSIONS if is_valid_file is None else None,
transform=transform,
target_transform=target_transform,
is_valid_file=is_valid_file)
self.imgs = self.samples
| 37.109677
| 114
| 0.617872
|
from .vision import VisionDataset
from PIL import Image
import os
import os.path
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
def has_file_allowed_extension(filename: str,
extensions: Tuple[str, ...]) -> bool:
return filename.lower().endswith(extensions)
def is_image_file(filename: str) -> bool:
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def find_classes(directory: str) -> Tuple[List[str], Dict[str, int]]:
classes = sorted(
entry.name for entry in os.scandir(directory) if entry.is_dir())
if not classes:
raise FileNotFoundError(
f"Couldn't find any class folder in {directory}.")
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def make_dataset(
directory: str,
class_to_idx: Optional[Dict[str, int]]=None,
extensions: Optional[Tuple[str, ...]]=None,
is_valid_file: Optional[Callable[[str], bool]]=None, ) -> List[Tuple[
str, int]]:
directory = os.path.expanduser(directory)
if class_to_idx is None:
_, class_to_idx = find_classes(directory)
elif not class_to_idx:
raise ValueError(
"'class_to_index' must have at least one entry to collect any samples."
)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError(
"Both extensions and is_valid_file cannot be None or not None at the same time"
)
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(
x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
instances = []
available_classes = set()
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
if is_valid_file(fname):
path = os.path.join(root, fname)
item = path, class_index
instances.append(item)
if target_class not in available_classes:
available_classes.add(target_class)
empty_classes = set(class_to_idx.keys()) - available_classes
if empty_classes:
msg = f"Found no valid file for the classes {', '.join(sorted(empty_classes))}. "
if extensions is not None:
msg += f"Supported extensions are: {', '.join(extensions)}"
raise FileNotFoundError(msg)
return instances
class DatasetFolder(VisionDataset):
def __init__(
self,
root: str,
loader: Callable[[str], Any],
extensions: Optional[Tuple[str, ...]]=None,
transform: Optional[Callable]=None,
target_transform: Optional[Callable]=None,
is_valid_file: Optional[Callable[[str], bool]]=None, ) -> None:
super(DatasetFolder, self).__init__(
root, transform=transform, target_transform=target_transform)
classes, class_to_idx = self.find_classes(self.root)
samples = self.make_dataset(self.root, class_to_idx, extensions,
is_valid_file)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
@staticmethod
def make_dataset(
directory: str,
class_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]]=None,
is_valid_file: Optional[Callable[[str], bool]]=None, ) -> List[
Tuple[str, int]]:
if class_to_idx is None:
# prevent potential bug since make_dataset() would use the class_to_idx logic of the
# find_classes() function, instead of using that of the find_classes() method, which
# is potentially overridden and thus could have a different logic.
raise ValueError("The class_to_idx parameter cannot be None.")
return make_dataset(
directory,
class_to_idx,
extensions=extensions,
is_valid_file=is_valid_file)
def find_classes(self, directory: str) -> Tuple[List[str], Dict[str, int]]:
return find_classes(directory)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self) -> int:
return len(self.samples)
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
'.tiff', '.webp')
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def default_loader(path: str) -> Any:
return pil_loader(path)
class ImageFolder(DatasetFolder):
def __init__(
self,
root: str,
transform: Optional[Callable]=None,
target_transform: Optional[Callable]=None,
loader: Callable[[str], Any]=default_loader,
is_valid_file: Optional[Callable[[str], bool]]=None, ):
super(ImageFolder, self).__init__(
root,
loader,
IMG_EXTENSIONS if is_valid_file is None else None,
transform=transform,
target_transform=target_transform,
is_valid_file=is_valid_file)
self.imgs = self.samples
| true
| true
|
1c400d658bd27193884881656fdae4b634a9be03
| 5,179
|
py
|
Python
|
rest_framework_tus/middleware.py
|
v01dXYZ/drf-tus
|
50146fdfcfa062421671e7dee283c7905e91da17
|
[
"MIT"
] | 21
|
2017-03-09T14:38:15.000Z
|
2021-10-18T21:45:11.000Z
|
rest_framework_tus/middleware.py
|
v01dXYZ/drf-tus
|
50146fdfcfa062421671e7dee283c7905e91da17
|
[
"MIT"
] | 10
|
2017-05-29T09:22:42.000Z
|
2020-07-08T10:03:35.000Z
|
rest_framework_tus/middleware.py
|
v01dXYZ/drf-tus
|
50146fdfcfa062421671e7dee283c7905e91da17
|
[
"MIT"
] | 19
|
2017-06-15T13:03:17.000Z
|
2021-08-08T03:30:39.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http.response import HttpResponse
from rest_framework import status
from . import tus_api_version, constants
from .compat import decode_base64
class TusMiddleware(object):
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
if not response:
response = self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
def process_request(self, request):
# Parse tus client version
self.parse_tus_version(request)
# Parse upload length
self.parse_upload_length(request)
# Parse upload upload_offset
self.parse_upload_offset(request)
# Parse defer upload length
self.parse_upload_defer_length(request)
# Parse upload metadata
self.parse_upload_metadata(request)
# Parse upload checksum
self.parse_upload_checksum(request)
def process_response(self, request, response):
if 'Tus-Resumable' not in response:
response['Tus-Resumable'] = tus_api_version
return response
@classmethod
def parse_tus_version(cls, request):
tus_version = cls.get_header(request, 'Tus-Resumable', None)
if tus_version is None:
return
# Set upload length
setattr(request, constants.TUS_RESUMABLE_FIELD_NAME, tus_version)
@classmethod
def parse_upload_defer_length(cls, request,):
upload_defer_length = cls.get_header(request, 'Upload-Defer-Length', None)
if not upload_defer_length:
return
upload_defer_length = int(upload_defer_length)
if upload_defer_length != 1:
return HttpResponse('Invalid value for "Upload-Defer-Length" header: {}.'.format(upload_defer_length),
status=status.HTTP_400_BAD_REQUEST)
# Set upload defer length
setattr(request, constants.UPLOAD_DEFER_LENGTH_FIELD_NAME, upload_defer_length)
@classmethod
def parse_upload_offset(cls, request):
upload_offset = cls.get_header(request, 'Upload-Offset', None)
if upload_offset is None:
return
# Set upload length
setattr(request, constants.UPLOAD_OFFSET_NAME, int(upload_offset))
@classmethod
def parse_upload_length(cls, request):
upload_length = cls.get_header(request, 'Upload-Length', None)
if upload_length is None:
return
# Set upload length
setattr(request, constants.UPLOAD_LENGTH_FIELD_NAME, int(upload_length))
@classmethod
def parse_upload_checksum(cls, request):
upload_checksum_header = cls.get_header(request, 'Upload-Checksum', None)
if upload_checksum_header is None:
return
upload_checksum = list(upload_checksum_header.split(' '))
if len(upload_checksum) != 2:
return HttpResponse('Invalid value for "Upload-Checksum" header: {}.'.format(upload_checksum_header),
status=status.HTTP_400_BAD_REQUEST)
# Set upload checksum
setattr(request, constants.UPLOAD_CHECKSUM_FIELD_NAME, upload_checksum)
@classmethod
def parse_upload_metadata(cls, request):
upload_meta_header = cls.get_header(request, 'Upload-Metadata', None)
if upload_meta_header is None:
return
upload_metadata = {}
for key_value_pair in upload_meta_header.split(','):
# Trim whitespace
key_value_pair = key_value_pair.strip()
# Split key and value
key, value = key_value_pair.split(' ')
# Store data
upload_metadata[key] = decode_base64(value.encode('ascii')).decode('utf-8')
# Set upload_metadata
setattr(request, constants.UPLOAD_METADATA_FIELD_NAME, upload_metadata)
@classmethod
def get_header(cls, request, key, default_value=None):
# First, we try to retrieve the key in the "headers" dictionary
result = request.META.get('headers', {}).get(key, None)
# If we didn't find the key, or the value was "None", try to use the "HTTP_{uppercased-key}" key
if result is None:
custom_value = 'HTTP_{}'.format(key.replace('-', '_').upper())
result = request.META.get(custom_value, default_value)
# If we didn't find the key, or the value was "None", try to use the "HTTP_X_{uppercased-key}" key
if result is None:
# https://tools.ietf.org/html/rfc6648
custom_value = 'HTTP_X_{}'.format(key.replace('-', '_').upper())
result = request.META.get(custom_value, default_value)
# If we still didn't find the key, or the value was "None", return the default value
if result is None:
result = default_value
# Return the result
return result
| 32.987261
| 114
| 0.652056
|
from __future__ import unicode_literals
from django.http.response import HttpResponse
from rest_framework import status
from . import tus_api_version, constants
from .compat import decode_base64
class TusMiddleware(object):
def __init__(self, get_response=None):
self.get_response = get_response
def __call__(self, request):
response = None
if hasattr(self, 'process_request'):
response = self.process_request(request)
if not response:
response = self.get_response(request)
if hasattr(self, 'process_response'):
response = self.process_response(request, response)
return response
def process_request(self, request):
self.parse_tus_version(request)
self.parse_upload_length(request)
self.parse_upload_offset(request)
self.parse_upload_defer_length(request)
self.parse_upload_metadata(request)
self.parse_upload_checksum(request)
def process_response(self, request, response):
if 'Tus-Resumable' not in response:
response['Tus-Resumable'] = tus_api_version
return response
@classmethod
def parse_tus_version(cls, request):
tus_version = cls.get_header(request, 'Tus-Resumable', None)
if tus_version is None:
return
setattr(request, constants.TUS_RESUMABLE_FIELD_NAME, tus_version)
@classmethod
def parse_upload_defer_length(cls, request,):
upload_defer_length = cls.get_header(request, 'Upload-Defer-Length', None)
if not upload_defer_length:
return
upload_defer_length = int(upload_defer_length)
if upload_defer_length != 1:
return HttpResponse('Invalid value for "Upload-Defer-Length" header: {}.'.format(upload_defer_length),
status=status.HTTP_400_BAD_REQUEST)
setattr(request, constants.UPLOAD_DEFER_LENGTH_FIELD_NAME, upload_defer_length)
@classmethod
def parse_upload_offset(cls, request):
upload_offset = cls.get_header(request, 'Upload-Offset', None)
if upload_offset is None:
return
setattr(request, constants.UPLOAD_OFFSET_NAME, int(upload_offset))
@classmethod
def parse_upload_length(cls, request):
upload_length = cls.get_header(request, 'Upload-Length', None)
if upload_length is None:
return
setattr(request, constants.UPLOAD_LENGTH_FIELD_NAME, int(upload_length))
@classmethod
def parse_upload_checksum(cls, request):
upload_checksum_header = cls.get_header(request, 'Upload-Checksum', None)
if upload_checksum_header is None:
return
upload_checksum = list(upload_checksum_header.split(' '))
if len(upload_checksum) != 2:
return HttpResponse('Invalid value for "Upload-Checksum" header: {}.'.format(upload_checksum_header),
status=status.HTTP_400_BAD_REQUEST)
setattr(request, constants.UPLOAD_CHECKSUM_FIELD_NAME, upload_checksum)
@classmethod
def parse_upload_metadata(cls, request):
upload_meta_header = cls.get_header(request, 'Upload-Metadata', None)
if upload_meta_header is None:
return
upload_metadata = {}
for key_value_pair in upload_meta_header.split(','):
key_value_pair = key_value_pair.strip()
key, value = key_value_pair.split(' ')
upload_metadata[key] = decode_base64(value.encode('ascii')).decode('utf-8')
setattr(request, constants.UPLOAD_METADATA_FIELD_NAME, upload_metadata)
@classmethod
def get_header(cls, request, key, default_value=None):
result = request.META.get('headers', {}).get(key, None)
if result is None:
custom_value = 'HTTP_{}'.format(key.replace('-', '_').upper())
result = request.META.get(custom_value, default_value)
# If we didn't find the key, or the value was "None", try to use the "HTTP_X_{uppercased-key}" key
if result is None:
custom_value = 'HTTP_X_{}'.format(key.replace('-', '_').upper())
result = request.META.get(custom_value, default_value)
if result is None:
result = default_value
# Return the result
return result
| true
| true
|
1c400ded776e891dffc01de25e92db74f075460f
| 3,957
|
py
|
Python
|
CIM14/IEC61970/Core/Substation.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 58
|
2015-04-22T10:41:03.000Z
|
2022-03-29T16:04:34.000Z
|
CIM14/IEC61970/Core/Substation.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 12
|
2015-08-26T03:57:23.000Z
|
2020-12-11T20:14:42.000Z
|
CIM14/IEC61970/Core/Substation.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 35
|
2015-01-10T12:21:03.000Z
|
2020-09-09T08:18:16.000Z
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.IEC61970.Core.EquipmentContainer import EquipmentContainer
class Substation(EquipmentContainer):
"""A collection of equipment for purposes other than generation or utilization, through which electric energy in bulk is passed for the purposes of switching or modifying its characteristics.
"""
def __init__(self, VoltageLevels=None, Bays=None, Region=None, *args, **kw_args):
"""Initialises a new 'Substation' instance.
@param VoltageLevels: The association is used in the naming hierarchy.
@param Bays: The association is used in the naming hierarchy.
@param Region: The association is used in the naming hierarchy.
"""
self._VoltageLevels = []
self.VoltageLevels = [] if VoltageLevels is None else VoltageLevels
self._Bays = []
self.Bays = [] if Bays is None else Bays
self._Region = None
self.Region = Region
super(Substation, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["VoltageLevels", "Bays", "Region"]
_many_refs = ["VoltageLevels", "Bays"]
def getVoltageLevels(self):
"""The association is used in the naming hierarchy.
"""
return self._VoltageLevels
def setVoltageLevels(self, value):
for x in self._VoltageLevels:
x.Substation = None
for y in value:
y._Substation = self
self._VoltageLevels = value
VoltageLevels = property(getVoltageLevels, setVoltageLevels)
def addVoltageLevels(self, *VoltageLevels):
for obj in VoltageLevels:
obj.Substation = self
def removeVoltageLevels(self, *VoltageLevels):
for obj in VoltageLevels:
obj.Substation = None
def getBays(self):
"""The association is used in the naming hierarchy.
"""
return self._Bays
def setBays(self, value):
for x in self._Bays:
x.Substation = None
for y in value:
y._Substation = self
self._Bays = value
Bays = property(getBays, setBays)
def addBays(self, *Bays):
for obj in Bays:
obj.Substation = self
def removeBays(self, *Bays):
for obj in Bays:
obj.Substation = None
def getRegion(self):
"""The association is used in the naming hierarchy.
"""
return self._Region
def setRegion(self, value):
if self._Region is not None:
filtered = [x for x in self.Region.Substations if x != self]
self._Region._Substations = filtered
self._Region = value
if self._Region is not None:
if self not in self._Region._Substations:
self._Region._Substations.append(self)
Region = property(getRegion, setRegion)
| 35.017699
| 195
| 0.669194
|
from CIM14.IEC61970.Core.EquipmentContainer import EquipmentContainer
class Substation(EquipmentContainer):
def __init__(self, VoltageLevels=None, Bays=None, Region=None, *args, **kw_args):
self._VoltageLevels = []
self.VoltageLevels = [] if VoltageLevels is None else VoltageLevels
self._Bays = []
self.Bays = [] if Bays is None else Bays
self._Region = None
self.Region = Region
super(Substation, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["VoltageLevels", "Bays", "Region"]
_many_refs = ["VoltageLevels", "Bays"]
def getVoltageLevels(self):
return self._VoltageLevels
def setVoltageLevels(self, value):
for x in self._VoltageLevels:
x.Substation = None
for y in value:
y._Substation = self
self._VoltageLevels = value
VoltageLevels = property(getVoltageLevels, setVoltageLevels)
def addVoltageLevels(self, *VoltageLevels):
for obj in VoltageLevels:
obj.Substation = self
def removeVoltageLevels(self, *VoltageLevels):
for obj in VoltageLevels:
obj.Substation = None
def getBays(self):
return self._Bays
def setBays(self, value):
for x in self._Bays:
x.Substation = None
for y in value:
y._Substation = self
self._Bays = value
Bays = property(getBays, setBays)
def addBays(self, *Bays):
for obj in Bays:
obj.Substation = self
def removeBays(self, *Bays):
for obj in Bays:
obj.Substation = None
def getRegion(self):
return self._Region
def setRegion(self, value):
if self._Region is not None:
filtered = [x for x in self.Region.Substations if x != self]
self._Region._Substations = filtered
self._Region = value
if self._Region is not None:
if self not in self._Region._Substations:
self._Region._Substations.append(self)
Region = property(getRegion, setRegion)
| true
| true
|
1c400e32a35995c7e40f35eeb37445d32d6544e3
| 5,201
|
py
|
Python
|
chaospy/quadrature/gauss_legendre.py
|
krystophny/chaospy
|
e09f8e3f6dfc26145f15774edd5b03665140712f
|
[
"MIT"
] | 1
|
2019-12-20T00:32:44.000Z
|
2019-12-20T00:32:44.000Z
|
chaospy/quadrature/gauss_legendre.py
|
QianWanghhu/chaospy
|
18ff6c4fc56c632825e53fb24e17de51a7febd7d
|
[
"MIT"
] | null | null | null |
chaospy/quadrature/gauss_legendre.py
|
QianWanghhu/chaospy
|
18ff6c4fc56c632825e53fb24e17de51a7febd7d
|
[
"MIT"
] | null | null | null |
r"""
The Gauss-Legendre quadrature rule is properly supported by in :ref:`gaussian`.
However, as Gauss-Legendre is a special case where the weight function is
constant, it can in principle be used to integrate any weighting function. In
other words, this is the same Gauss-Legendre integration rule, but only in the
context of uniform distribution as weight function. Normalization of the
weights will be used to achieve the general integration form.
It is also worth noting that this specific implementation of Gauss-Legendre is
faster to compute than the general version in :ref:`gaussian`.
Example usage
-------------
The first few orders::
>>> distribution = chaospy.Uniform(0, 1)
>>> for order in [0, 1, 2, 3]:
... abscissas, weights = chaospy.generate_quadrature(
... order, distribution, rule="gauss_legendre")
... print(order, numpy.around(abscissas, 3),
... numpy.around(weights, 3))
0 [[0.5]] [1.]
1 [[0.211 0.789]] [0.5 0.5]
2 [[0.113 0.5 0.887]] [0.278 0.444 0.278]
3 [[0.069 0.33 0.67 0.931]] [0.174 0.326 0.326 0.174]
Using an alternative distribution::
>>> distribution = chaospy.Beta(2, 4)
>>> for order in [0, 1, 2, 3]:
... abscissas, weights = chaospy.generate_quadrature(
... order, distribution, rule="gauss_legendre")
... print(order, numpy.around(abscissas, 3),
... numpy.around(weights, 3))
0 [[0.5]] [1.]
1 [[0.211 0.789]] [0.933 0.067]
2 [[0.113 0.5 0.887]] [0.437 0.556 0.007]
3 [[0.069 0.33 0.67 0.931]] [0.195 0.647 0.157 0.001]
The abscissas stays the same, but the weights are re-adjusted for the new
weight function.
"""
from __future__ import print_function
import numpy
from .recurrence import (
construct_recurrence_coefficients, coefficients_to_quadrature)
from .combine import combine_quadrature
def quad_gauss_legendre(
order,
domain=(0, 1),
rule="fejer",
accuracy=100,
recurrence_algorithm="",
):
r"""
Generate the quadrature nodes and weights in Gauss-Legendre quadrature.
Note that this rule exists to allow for integrating functions with weight
functions without actually adding the quadrature. Like:
.. math:
\int_a^b p(x) f(x) dx \approx \sum_i p(X_i) f(X_i) W_i
instead of the more traditional:
.. math:
\int_a^b p(x) f(x) dx \approx \sum_i f(X_i) W_i
To get the behavior where the weight function is taken into consideration,
use :func:`~chaospy.quadrature.gaussian.quad_gaussian`.
Args:
order (int, numpy.ndarray):
Quadrature order.
domain (chaospy.distributions.baseclass.Dist, numpy.ndarray):
Either distribution or bounding of interval to integrate over.
rule (str):
In the case of ``lanczos`` or ``stieltjes``, defines the
proxy-integration scheme.
accuracy (int):
In the case ``rule`` is used, defines the quadrature order of the
scheme used. In practice, must be at least as large as ``order``.
recurrence_algorithm (str):
Name of the algorithm used to generate abscissas and weights. If
omitted, ``analytical`` will be tried first, and ``stieltjes`` used
if that fails.
Returns:
(numpy.ndarray, numpy.ndarray):
abscissas:
The quadrature points for where to evaluate the model function
with ``abscissas.shape == (len(dist), N)`` where ``N`` is the
number of samples.
weights:
The quadrature weights with ``weights.shape == (N,)``.
Example:
>>> abscissas, weights = quad_gauss_legendre(3)
>>> print(numpy.around(abscissas, 4))
[[0.0694 0.33 0.67 0.9306]]
>>> print(numpy.around(weights, 4))
[0.1739 0.3261 0.3261 0.1739]
"""
from ..distributions.baseclass import Dist
from ..distributions.collection import Uniform
if isinstance(domain, Dist):
abscissas, weights = quad_gauss_legendre(
order, domain.range(), rule, accuracy, recurrence_algorithm)
pdf = domain.pdf(abscissas)
if len(domain) > 1:
weights = (weights.T*pdf).T
else:
weights *= pdf.flatten()
weights /= numpy.sum(weights)
return abscissas, weights
order = numpy.asarray(order, dtype=int).flatten()
lower, upper = numpy.array(domain)
lower = numpy.asarray(lower).flatten()
upper = numpy.asarray(upper).flatten()
dim = max(lower.size, upper.size, order.size)
order = numpy.ones(dim, dtype=int)*order
lower = numpy.ones(dim)*lower
upper = numpy.ones(dim)*upper
coefficients = construct_recurrence_coefficients(
numpy.max(order), Uniform(0, 1), rule, accuracy, recurrence_algorithm)
abscissas, weights = zip(*[coefficients_to_quadrature(
coefficients[:order_+1]) for order_ in order])
abscissas = list(numpy.asarray(abscissas).reshape(dim, -1))
weights = list(numpy.asarray(weights).reshape(dim, -1))
return combine_quadrature(abscissas, weights, (lower, upper))
| 36.626761
| 79
| 0.63757
|
from __future__ import print_function
import numpy
from .recurrence import (
construct_recurrence_coefficients, coefficients_to_quadrature)
from .combine import combine_quadrature
def quad_gauss_legendre(
order,
domain=(0, 1),
rule="fejer",
accuracy=100,
recurrence_algorithm="",
):
from ..distributions.baseclass import Dist
from ..distributions.collection import Uniform
if isinstance(domain, Dist):
abscissas, weights = quad_gauss_legendre(
order, domain.range(), rule, accuracy, recurrence_algorithm)
pdf = domain.pdf(abscissas)
if len(domain) > 1:
weights = (weights.T*pdf).T
else:
weights *= pdf.flatten()
weights /= numpy.sum(weights)
return abscissas, weights
order = numpy.asarray(order, dtype=int).flatten()
lower, upper = numpy.array(domain)
lower = numpy.asarray(lower).flatten()
upper = numpy.asarray(upper).flatten()
dim = max(lower.size, upper.size, order.size)
order = numpy.ones(dim, dtype=int)*order
lower = numpy.ones(dim)*lower
upper = numpy.ones(dim)*upper
coefficients = construct_recurrence_coefficients(
numpy.max(order), Uniform(0, 1), rule, accuracy, recurrence_algorithm)
abscissas, weights = zip(*[coefficients_to_quadrature(
coefficients[:order_+1]) for order_ in order])
abscissas = list(numpy.asarray(abscissas).reshape(dim, -1))
weights = list(numpy.asarray(weights).reshape(dim, -1))
return combine_quadrature(abscissas, weights, (lower, upper))
| true
| true
|
1c400e6dc56b89f7054a5122914fe338bc57dd00
| 12,317
|
py
|
Python
|
management/test/test_endpoints/test_endpoint_utils.py
|
poussa/inference-model-manager
|
33e487be32b8487290a296daaf90529263a09801
|
[
"Apache-2.0"
] | null | null | null |
management/test/test_endpoints/test_endpoint_utils.py
|
poussa/inference-model-manager
|
33e487be32b8487290a296daaf90529263a09801
|
[
"Apache-2.0"
] | null | null | null |
management/test/test_endpoints/test_endpoint_utils.py
|
poussa/inference-model-manager
|
33e487be32b8487290a296daaf90529263a09801
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from management_api.endpoints.endpoint_utils import create_endpoint, delete_endpoint, \
create_url_to_service, update_endpoint, scale_endpoint, list_endpoints, view_endpoint
from kubernetes.client.rest import ApiException
import pytest
from unittest.mock import Mock
from test_utils.token_stuff import user_token
from management_api.utils.errors_handling import KubernetesCreateException, \
KubernetesDeleteException, KubernetesUpdateException, KubernetesGetException, \
TenantDoesNotExistException, EndpointDoesNotExistException
@pytest.mark.parametrize("tenant_exception, raise_error", [(True, False), (False, True)])
def test_create_endpoint(mocker, url_to_service_endpoint_utils,
custom_client_mock_endpoint_utils, tenant_exception,
raise_error, api_client_mock_endpoint_utils,
apps_client_mock_endpoint_utils):
tenant_exists_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.tenant_exists')
if tenant_exception:
with pytest.raises(TenantDoesNotExistException):
tenant_exists_mock.return_value = False
create_endpoint(parameters={'endpointName': "test", 'resources': {}}, namespace="test",
id_token=user_token)
ing_ip_mock, ing_ip_mock_return_values = url_to_service_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
create_apps_client_mock, apps_client = apps_client_mock_endpoint_utils
verify_endpoint_amount_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.verify_endpoint_amount')
verify_endpoint_amount_mock.return_value = None
validate_quota_compliance_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.validate_quota_compliance')
parameters_resources_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.transform_quota')
if raise_error:
with pytest.raises(KubernetesCreateException):
custom_client.create_namespaced_custom_object.side_effect = ApiException()
create_endpoint(parameters={'endpointName': "test", 'resources': {}}, namespace="test",
id_token=user_token)
else:
tenant_exists_mock.return_value = True
create_endpoint(parameters={'endpointName': "test", 'resources': {}}, namespace="test",
id_token=user_token)
ing_ip_mock.assert_called_once()
validate_quota_compliance_mock.assert_called_once()
parameters_resources_mock.assert_called_once()
custom_client.create_namespaced_custom_object.assert_called_once()
@pytest.mark.parametrize("raise_error", [(False), (True)])
def test_delete_endpoint(custom_client_mock_endpoint_utils,
url_to_service_endpoint_utils, raise_error):
ing_ip_mock, ing_ip_mock_return_values = url_to_service_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
if raise_error:
with pytest.raises(KubernetesDeleteException):
custom_client.delete_namespaced_custom_object.side_effect = ApiException()
delete_endpoint(parameters={'endpointName': 'test'}, namespace="test",
id_token=user_token)
else:
delete_endpoint(parameters={'endpointName': 'test'}, namespace="test",
id_token=user_token)
ing_ip_mock.assert_called_once()
custom_client.delete_namespaced_custom_object.assert_called_once()
create_custom_client_mock.assert_called_once()
call_data = [(scale_endpoint, {'replicas': 2}),
(update_endpoint, {'modelName': 'test', 'modelVersion': 2})]
@pytest.mark.parametrize("method, arguments", call_data)
def test_read_endpoint_fail(custom_client_mock_endpoint_utils, api_client_mock_endpoint_utils,
url_to_service_endpoint_utils, method, arguments):
ing_ip_mock, ing_ip_mock_return_values = url_to_service_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
with pytest.raises(KubernetesGetException):
custom_client.get_namespaced_custom_object.side_effect = ApiException()
method(parameters=arguments, namespace="test", endpoint_name="test",
id_token=user_token)
create_custom_client_mock.assert_called_once()
custom_client.get_namespaced_custom_object.assert_called_once()
@pytest.mark.parametrize("method, arguments", call_data)
def test_patch_endpoint_fail(custom_client_mock_endpoint_utils,
url_to_service_endpoint_utils, method, arguments):
ing_ip_mock, ing_ip_mock_return_values = url_to_service_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
with pytest.raises(KubernetesUpdateException):
custom_client.get_namespaced_custom_object.return_value = {'spec': {}}
custom_client.patch_namespaced_custom_object.side_effect = ApiException()
method(parameters=arguments, namespace="test", endpoint_name="test",
id_token=user_token)
method(parameters=arguments, namespace="test", id_token=user_token)
create_custom_client_mock.assert_called_once()
custom_client.get_namespaced_custom_object.assert_called_once()
custom_client.patch_namespaced_custom_object.assert_called_once()
@pytest.mark.parametrize("method, arguments", call_data)
def test_patch_endpoint_success(custom_client_mock_endpoint_utils,
url_to_service_endpoint_utils, method, arguments):
ing_ip_mock, ing_ip_mock_return_values = url_to_service_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
custom_client.get_namespaced_custom_object.return_value = {'spec': {}}
method(parameters=arguments, namespace="test", endpoint_name="test",
id_token=user_token)
create_custom_client_mock.assert_called_once()
custom_client.get_namespaced_custom_object.assert_called_once()
custom_client.patch_namespaced_custom_object.assert_called_once()
ing_ip_mock.assert_called_once()
@pytest.mark.parametrize("tenant_exception, k8s_exception",
[(True, False),
(False, True),
(False, False)])
def test_list_endpoints(mocker, apps_client_mock_endpoint_utils, tenant_exception, k8s_exception):
tenant_exists_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.tenant_exists')
create_apps_client_mock, apps_client = apps_client_mock_endpoint_utils
if tenant_exception:
with pytest.raises(TenantDoesNotExistException):
tenant_exists_mock.return_value = False
list_endpoints(namespace="test", id_token=user_token)
else:
tenant_exists_mock.return_value = True
if k8s_exception:
with pytest.raises(KubernetesGetException):
apps_client.list_namespaced_deployment.side_effect = ApiException()
list_endpoints(namespace="test", id_token=user_token)
else:
endpoints_name_status_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.get_endpoints_name_status')
endpoints_name_status_mock.return_value = {}
apps_client.list_namespaced_deployment.return_value = {}
list_endpoints(namespace="test", id_token=user_token)
endpoints_name_status_mock.assert_called_once()
create_apps_client_mock.assert_called_once()
apps_client.list_namespaced_deployment.assert_called_once()
tenant_exists_mock.assert_called_once()
def test_create_url_to_service(mocker):
api_client = Mock()
create_custom_client_mock = mocker.patch('management_api.endpoints.endpoint_utils.'
'get_k8s_api_client')
create_custom_client_mock.return_value = api_client
mock_return_value = ['t_end-t_ns.default', 443]
external_address_mock = mocker.patch('management_api.utils.kubernetes_resources.'
'get_ingress_external_ip')
external_address_mock.return_value = mock_return_value
external_address = "{}:{}".format(mock_return_value[0], mock_return_value[1])
expected_output = {'url': external_address}
output = create_url_to_service(endpoint_name='t_end', namespace="t_ns")
assert expected_output == output
@pytest.mark.parametrize("tenant_exception, endpoint_exception",
[(True, False),
(False, True)])
def test_view_endpoint_fail(mocker, tenant_exception, endpoint_exception):
tenant_exists_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.tenant_exists')
endpoint_exists_mock = mocker.patch('management_api.endpoints.endpoint_utils.endpoint_exists')
if tenant_exception:
with pytest.raises(TenantDoesNotExistException):
tenant_exists_mock.return_value = False
view_endpoint(namespace="test", endpoint_name="test", id_token=user_token)
elif endpoint_exception:
with pytest.raises(EndpointDoesNotExistException):
endpoint_exists_mock.return_value = False
view_endpoint(namespace="test", endpoint_name="test", id_token=user_token)
tenant_exists_mock.assert_called_once()
endpoint_exists_mock.assert_called_once()
tenant_exists_mock.assert_called_once()
def test_view_endpoint_success(mocker, api_client_mock_endpoint_utils,
custom_client_mock_endpoint_utils, apps_client_mock_endpoint_utils):
tenant_exists_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.tenant_exists')
endpoint_exists_mock = mocker.patch('management_api.endpoints.endpoint_utils.endpoint_exists')
tenant_exists_mock.return_value = True
endpoint_exists_mock.return_value = True
create_api_client_mock, api_client = api_client_mock_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
create_apps_client_mock, apps_client = apps_client_mock_endpoint_utils
endpoint_status_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.get_endpoint_status', api_instance=api_client)
endpoint_status_mock.return_value = {}
model_path_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.create_url_to_service')
model_path_mock.return_value = {}
subject_name_resources_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.get_crd_subject_name_and_resources',
custom_api_instance=custom_client)
subject_name_resources_mock.return_value = "", ""
replicas_mock = mocker.patch('management_api.endpoints.endpoint_utils.get_replicas',
apps_api_instance=apps_client)
replicas_mock.return_value = 1
view_endpoint(namespace="test", endpoint_name="test", id_token=user_token)
tenant_exists_mock.assert_called_once()
endpoint_exists_mock.assert_called_once()
endpoint_status_mock.assert_called_once()
model_path_mock.assert_called_once()
subject_name_resources_mock.assert_called_once()
replicas_mock.assert_called_once()
create_api_client_mock.assert_called_once()
create_custom_client_mock.assert_called_once()
create_apps_client_mock.assert_called_once()
| 51.320833
| 100
| 0.731022
|
from management_api.endpoints.endpoint_utils import create_endpoint, delete_endpoint, \
create_url_to_service, update_endpoint, scale_endpoint, list_endpoints, view_endpoint
from kubernetes.client.rest import ApiException
import pytest
from unittest.mock import Mock
from test_utils.token_stuff import user_token
from management_api.utils.errors_handling import KubernetesCreateException, \
KubernetesDeleteException, KubernetesUpdateException, KubernetesGetException, \
TenantDoesNotExistException, EndpointDoesNotExistException
@pytest.mark.parametrize("tenant_exception, raise_error", [(True, False), (False, True)])
def test_create_endpoint(mocker, url_to_service_endpoint_utils,
custom_client_mock_endpoint_utils, tenant_exception,
raise_error, api_client_mock_endpoint_utils,
apps_client_mock_endpoint_utils):
tenant_exists_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.tenant_exists')
if tenant_exception:
with pytest.raises(TenantDoesNotExistException):
tenant_exists_mock.return_value = False
create_endpoint(parameters={'endpointName': "test", 'resources': {}}, namespace="test",
id_token=user_token)
ing_ip_mock, ing_ip_mock_return_values = url_to_service_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
create_apps_client_mock, apps_client = apps_client_mock_endpoint_utils
verify_endpoint_amount_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.verify_endpoint_amount')
verify_endpoint_amount_mock.return_value = None
validate_quota_compliance_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.validate_quota_compliance')
parameters_resources_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.transform_quota')
if raise_error:
with pytest.raises(KubernetesCreateException):
custom_client.create_namespaced_custom_object.side_effect = ApiException()
create_endpoint(parameters={'endpointName': "test", 'resources': {}}, namespace="test",
id_token=user_token)
else:
tenant_exists_mock.return_value = True
create_endpoint(parameters={'endpointName': "test", 'resources': {}}, namespace="test",
id_token=user_token)
ing_ip_mock.assert_called_once()
validate_quota_compliance_mock.assert_called_once()
parameters_resources_mock.assert_called_once()
custom_client.create_namespaced_custom_object.assert_called_once()
@pytest.mark.parametrize("raise_error", [(False), (True)])
def test_delete_endpoint(custom_client_mock_endpoint_utils,
url_to_service_endpoint_utils, raise_error):
ing_ip_mock, ing_ip_mock_return_values = url_to_service_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
if raise_error:
with pytest.raises(KubernetesDeleteException):
custom_client.delete_namespaced_custom_object.side_effect = ApiException()
delete_endpoint(parameters={'endpointName': 'test'}, namespace="test",
id_token=user_token)
else:
delete_endpoint(parameters={'endpointName': 'test'}, namespace="test",
id_token=user_token)
ing_ip_mock.assert_called_once()
custom_client.delete_namespaced_custom_object.assert_called_once()
create_custom_client_mock.assert_called_once()
call_data = [(scale_endpoint, {'replicas': 2}),
(update_endpoint, {'modelName': 'test', 'modelVersion': 2})]
@pytest.mark.parametrize("method, arguments", call_data)
def test_read_endpoint_fail(custom_client_mock_endpoint_utils, api_client_mock_endpoint_utils,
url_to_service_endpoint_utils, method, arguments):
ing_ip_mock, ing_ip_mock_return_values = url_to_service_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
with pytest.raises(KubernetesGetException):
custom_client.get_namespaced_custom_object.side_effect = ApiException()
method(parameters=arguments, namespace="test", endpoint_name="test",
id_token=user_token)
create_custom_client_mock.assert_called_once()
custom_client.get_namespaced_custom_object.assert_called_once()
@pytest.mark.parametrize("method, arguments", call_data)
def test_patch_endpoint_fail(custom_client_mock_endpoint_utils,
url_to_service_endpoint_utils, method, arguments):
ing_ip_mock, ing_ip_mock_return_values = url_to_service_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
with pytest.raises(KubernetesUpdateException):
custom_client.get_namespaced_custom_object.return_value = {'spec': {}}
custom_client.patch_namespaced_custom_object.side_effect = ApiException()
method(parameters=arguments, namespace="test", endpoint_name="test",
id_token=user_token)
method(parameters=arguments, namespace="test", id_token=user_token)
create_custom_client_mock.assert_called_once()
custom_client.get_namespaced_custom_object.assert_called_once()
custom_client.patch_namespaced_custom_object.assert_called_once()
@pytest.mark.parametrize("method, arguments", call_data)
def test_patch_endpoint_success(custom_client_mock_endpoint_utils,
url_to_service_endpoint_utils, method, arguments):
ing_ip_mock, ing_ip_mock_return_values = url_to_service_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
custom_client.get_namespaced_custom_object.return_value = {'spec': {}}
method(parameters=arguments, namespace="test", endpoint_name="test",
id_token=user_token)
create_custom_client_mock.assert_called_once()
custom_client.get_namespaced_custom_object.assert_called_once()
custom_client.patch_namespaced_custom_object.assert_called_once()
ing_ip_mock.assert_called_once()
@pytest.mark.parametrize("tenant_exception, k8s_exception",
[(True, False),
(False, True),
(False, False)])
def test_list_endpoints(mocker, apps_client_mock_endpoint_utils, tenant_exception, k8s_exception):
tenant_exists_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.tenant_exists')
create_apps_client_mock, apps_client = apps_client_mock_endpoint_utils
if tenant_exception:
with pytest.raises(TenantDoesNotExistException):
tenant_exists_mock.return_value = False
list_endpoints(namespace="test", id_token=user_token)
else:
tenant_exists_mock.return_value = True
if k8s_exception:
with pytest.raises(KubernetesGetException):
apps_client.list_namespaced_deployment.side_effect = ApiException()
list_endpoints(namespace="test", id_token=user_token)
else:
endpoints_name_status_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.get_endpoints_name_status')
endpoints_name_status_mock.return_value = {}
apps_client.list_namespaced_deployment.return_value = {}
list_endpoints(namespace="test", id_token=user_token)
endpoints_name_status_mock.assert_called_once()
create_apps_client_mock.assert_called_once()
apps_client.list_namespaced_deployment.assert_called_once()
tenant_exists_mock.assert_called_once()
def test_create_url_to_service(mocker):
api_client = Mock()
create_custom_client_mock = mocker.patch('management_api.endpoints.endpoint_utils.'
'get_k8s_api_client')
create_custom_client_mock.return_value = api_client
mock_return_value = ['t_end-t_ns.default', 443]
external_address_mock = mocker.patch('management_api.utils.kubernetes_resources.'
'get_ingress_external_ip')
external_address_mock.return_value = mock_return_value
external_address = "{}:{}".format(mock_return_value[0], mock_return_value[1])
expected_output = {'url': external_address}
output = create_url_to_service(endpoint_name='t_end', namespace="t_ns")
assert expected_output == output
@pytest.mark.parametrize("tenant_exception, endpoint_exception",
[(True, False),
(False, True)])
def test_view_endpoint_fail(mocker, tenant_exception, endpoint_exception):
tenant_exists_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.tenant_exists')
endpoint_exists_mock = mocker.patch('management_api.endpoints.endpoint_utils.endpoint_exists')
if tenant_exception:
with pytest.raises(TenantDoesNotExistException):
tenant_exists_mock.return_value = False
view_endpoint(namespace="test", endpoint_name="test", id_token=user_token)
elif endpoint_exception:
with pytest.raises(EndpointDoesNotExistException):
endpoint_exists_mock.return_value = False
view_endpoint(namespace="test", endpoint_name="test", id_token=user_token)
tenant_exists_mock.assert_called_once()
endpoint_exists_mock.assert_called_once()
tenant_exists_mock.assert_called_once()
def test_view_endpoint_success(mocker, api_client_mock_endpoint_utils,
custom_client_mock_endpoint_utils, apps_client_mock_endpoint_utils):
tenant_exists_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.tenant_exists')
endpoint_exists_mock = mocker.patch('management_api.endpoints.endpoint_utils.endpoint_exists')
tenant_exists_mock.return_value = True
endpoint_exists_mock.return_value = True
create_api_client_mock, api_client = api_client_mock_endpoint_utils
create_custom_client_mock, custom_client = custom_client_mock_endpoint_utils
create_apps_client_mock, apps_client = apps_client_mock_endpoint_utils
endpoint_status_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.get_endpoint_status', api_instance=api_client)
endpoint_status_mock.return_value = {}
model_path_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.create_url_to_service')
model_path_mock.return_value = {}
subject_name_resources_mock = mocker.patch(
'management_api.endpoints.endpoint_utils.get_crd_subject_name_and_resources',
custom_api_instance=custom_client)
subject_name_resources_mock.return_value = "", ""
replicas_mock = mocker.patch('management_api.endpoints.endpoint_utils.get_replicas',
apps_api_instance=apps_client)
replicas_mock.return_value = 1
view_endpoint(namespace="test", endpoint_name="test", id_token=user_token)
tenant_exists_mock.assert_called_once()
endpoint_exists_mock.assert_called_once()
endpoint_status_mock.assert_called_once()
model_path_mock.assert_called_once()
subject_name_resources_mock.assert_called_once()
replicas_mock.assert_called_once()
create_api_client_mock.assert_called_once()
create_custom_client_mock.assert_called_once()
create_apps_client_mock.assert_called_once()
| true
| true
|
1c400f0eed5bedcf15ac83b8b0358c7c54ae6b43
| 21
|
py
|
Python
|
salad/__init__.py
|
Work4Labs/salad
|
176869a4437103d501feb3035beaf162c2507435
|
[
"BSD-3-Clause"
] | null | null | null |
salad/__init__.py
|
Work4Labs/salad
|
176869a4437103d501feb3035beaf162c2507435
|
[
"BSD-3-Clause"
] | null | null | null |
salad/__init__.py
|
Work4Labs/salad
|
176869a4437103d501feb3035beaf162c2507435
|
[
"BSD-3-Clause"
] | null | null | null |
VERSION = "0.4.14.2"
| 10.5
| 20
| 0.571429
|
VERSION = "0.4.14.2"
| true
| true
|
1c400fafd6f6f7bdff3f1627fb61ae1acfd933ac
| 3,354
|
py
|
Python
|
python/dgl/nn/mxnet/conv/ginconv.py
|
zhengdao-chen/dgl
|
39503d879e6427d3c9677b3b1fa6df33c60e2f21
|
[
"Apache-2.0"
] | 2
|
2021-12-09T12:36:13.000Z
|
2022-03-01T21:22:36.000Z
|
python/dgl/nn/mxnet/conv/ginconv.py
|
zhengdao-chen/dgl
|
39503d879e6427d3c9677b3b1fa6df33c60e2f21
|
[
"Apache-2.0"
] | null | null | null |
python/dgl/nn/mxnet/conv/ginconv.py
|
zhengdao-chen/dgl
|
39503d879e6427d3c9677b3b1fa6df33c60e2f21
|
[
"Apache-2.0"
] | 2
|
2020-12-07T09:34:01.000Z
|
2020-12-13T06:18:58.000Z
|
"""MXNet Module for Graph Isomorphism Network layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
import mxnet as mx
from mxnet.gluon import nn
from .... import function as fn
from ....utils import expand_as_pair
class GINConv(nn.Block):
r"""Graph Isomorphism Network layer from paper `How Powerful are Graph
Neural Networks? <https://arxiv.org/pdf/1810.00826.pdf>`__.
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
\mathrm{aggregate}\left(\left\{h_j^{l}, j\in\mathcal{N}(i)
\right\}\right)\right)
Parameters
----------
apply_func : callable activation function/layer or None
If not None, apply this function to the updated node feature,
the :math:`f_\Theta` in the formula.
aggregator_type : str
Aggregator type to use (``sum``, ``max`` or ``mean``).
init_eps : float, optional
Initial :math:`\epsilon` value, default: ``0``.
learn_eps : bool, optional
If True, :math:`\epsilon` will be a learnable parameter.
"""
def __init__(self,
apply_func,
aggregator_type,
init_eps=0,
learn_eps=False):
super(GINConv, self).__init__()
if aggregator_type == 'sum':
self._reducer = fn.sum
elif aggregator_type == 'max':
self._reducer = fn.max
elif aggregator_type == 'mean':
self._reducer = fn.mean
else:
raise KeyError('Aggregator type {} not recognized.'.format(aggregator_type))
with self.name_scope():
self.apply_func = apply_func
self.eps = self.params.get('eps',
shape=(1,),
grad_req='write' if learn_eps else 'null',
init=mx.init.Constant(init_eps))
def forward(self, graph, feat):
r"""Compute Graph Isomorphism Network layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : mxnet.NDArray or a pair of mxnet.NDArray
If a mxnet.NDArray is given, the input feature of shape :math:`(N, D_{in})`
where :math:`D_{in}` is size of input feature, :math:`N` is the number of
nodes.
If a pair of mxnet.NDArray is given, the pair must contain two tensors of
shape :math:`(N_{in}, D_{in})` and :math:`(N_{out}, D_{in})`.
If ``apply_func`` is not None, :math:`D_{in}` should
fit the input dimensionality requirement of ``apply_func``.
Returns
-------
mxnet.NDArray
The output feature of shape :math:`(N, D_{out})` where
:math:`D_{out}` is the output dimensionality of ``apply_func``.
If ``apply_func`` is None, :math:`D_{out}` should be the same
as input dimensionality.
"""
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat)
graph.srcdata['h'] = feat_src
graph.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
rst = (1 + self.eps.data(feat_dst.context)) * feat_dst + graph.dstdata['neigh']
if self.apply_func is not None:
rst = self.apply_func(rst)
return rst
| 39.458824
| 91
| 0.560525
|
import mxnet as mx
from mxnet.gluon import nn
from .... import function as fn
from ....utils import expand_as_pair
class GINConv(nn.Block):
def __init__(self,
apply_func,
aggregator_type,
init_eps=0,
learn_eps=False):
super(GINConv, self).__init__()
if aggregator_type == 'sum':
self._reducer = fn.sum
elif aggregator_type == 'max':
self._reducer = fn.max
elif aggregator_type == 'mean':
self._reducer = fn.mean
else:
raise KeyError('Aggregator type {} not recognized.'.format(aggregator_type))
with self.name_scope():
self.apply_func = apply_func
self.eps = self.params.get('eps',
shape=(1,),
grad_req='write' if learn_eps else 'null',
init=mx.init.Constant(init_eps))
def forward(self, graph, feat):
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat)
graph.srcdata['h'] = feat_src
graph.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
rst = (1 + self.eps.data(feat_dst.context)) * feat_dst + graph.dstdata['neigh']
if self.apply_func is not None:
rst = self.apply_func(rst)
return rst
| true
| true
|
1c400fb076e4857384f6b36220fa468373d8ff69
| 399
|
py
|
Python
|
backend/yalla_33420/wsgi.py
|
crowdbotics-apps/yalla-33420
|
5a0c521f76a50f01012c4fb838cebb45779a939e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/yalla_33420/wsgi.py
|
crowdbotics-apps/yalla-33420
|
5a0c521f76a50f01012c4fb838cebb45779a939e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/yalla_33420/wsgi.py
|
crowdbotics-apps/yalla-33420
|
5a0c521f76a50f01012c4fb838cebb45779a939e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for yalla_33420 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yalla_33420.settings')
application = get_wsgi_application()
| 23.470588
| 78
| 0.789474
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yalla_33420.settings')
application = get_wsgi_application()
| true
| true
|
1c400fd841135e23efbd58b8d45e46398faadef3
| 9,093
|
py
|
Python
|
tests/inventory/pipelines/test_data/fake_backend_services.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | 1
|
2018-03-26T08:15:21.000Z
|
2018-03-26T08:15:21.000Z
|
tests/inventory/pipelines/test_data/fake_backend_services.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | null | null | null |
tests/inventory/pipelines/test_data/fake_backend_services.py
|
pombredanne/forseti-security
|
68a9a88243460065e00b6c131b3d9abd0331fb37
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test backend services data."""
FAKE_API_RESPONSE1 = [
{
"kind": "compute#backendService",
"id": "3072061062494750400",
"creationTimestamp": "2017-04-03T14:01:35.687-07:00",
"name": "bs-1",
"description": "bs-1-desc",
"selfLink": "https://www.googleapis.com/compute/v1/projects/project1/global/backendServices/bs-1",
"backends": [
{
"group": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/instanceGroups/bs-1-ig-1",
"balancingMode": "UTILIZATION",
"capacityScaler": 1.0
}
],
"healthChecks": [
"https://www.googleapis.com/compute/v1/projects/project1/global/httpsHealthChecks/hc-1"
],
"timeoutSec": 3610,
"port": 8443,
"protocol": "HTTPS",
"portName": "https",
"enableCDN": False,
"sessionAffinity": "NONE",
"affinityCookieTtlSec": 0,
"loadBalancingScheme": "EXTERNAL",
"connectionDraining": {
"drainingTimeoutSec": 0
}
},
]
FAKE_API_RESPONSE2 = [
{
"kind": "compute#backendService",
"id": "6071052922189792661",
"creationTimestamp": "2017-05-12T11:14:18.559-07:00",
"name": "iap-bs",
"description": "",
"selfLink": "https://www.googleapis.com/compute/v1/projects/project2/global/backendServices/iap-bs",
"backends": [
{
"description": "",
"group": "https://www.googleapis.com/compute/v1/projects/project2/zones/us-east1-c/instanceGroups/instance-group-1",
"balancingMode": "UTILIZATION",
"maxUtilization": 0.8,
"capacityScaler": 1.0
}
],
"healthChecks": [
"https://www.googleapis.com/compute/v1/projects/project2/global/healthChecks/iap-hc"
],
"timeoutSec": 30,
"port": 80,
"protocol": "HTTP",
"portName": "http",
"enableCDN": False,
"sessionAffinity": "NONE",
"affinityCookieTtlSec": 0,
"loadBalancingScheme": "EXTERNAL",
"connectionDraining": {
"drainingTimeoutSec": 300
},
"iap": {
"enabled": True,
"oauth2ClientId": "foo",
"oauth2ClientSecretSha256": "bar"
}
},
]
FAKE_PROJECT_BACKEND_SERVICES_MAP = {
'project1': [
{'kind': 'compute#backendService',
'id': '3072061062494750400',
'creationTimestamp': '2017-04-03T14:01:35.687-07:00',
'name': 'bs-1',
'description': 'bs-1-desc',
'selfLink': 'https://www.googleapis.com/compute/v1/projects/project1/global/backendServices/bs-1',
'backends': [
{
'group': 'https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/instanceGroups/bs-1-ig-1',
'balancingMode': 'UTILIZATION',
'capacityScaler': 1.0
}
],
'healthChecks': [
'https://www.googleapis.com/compute/v1/projects/project1/global/httpsHealthChecks/hc-1'
],
'timeoutSec': 3610,
'port': 8443,
'protocol': 'HTTPS',
'portName': 'https',
'enableCDN': False,
'sessionAffinity': 'NONE',
'affinityCookieTtlSec': 0,
'loadBalancingScheme': 'EXTERNAL',
'connectionDraining': {
'drainingTimeoutSec': 0
},
},
],
'project2': [
{'kind': 'compute#backendService',
'id': '6071052922189792661',
'creationTimestamp': '2017-05-12T11:14:18.559-07:00',
'name': 'iap-bs',
'description': '',
'selfLink': 'https://www.googleapis.com/compute/v1/projects/project2/global/backendServices/iap-bs',
'backends': [
{
'description': '',
'group': 'https://www.googleapis.com/compute/v1/projects/project2/zones/us-east1-c/instanceGroups/instance-group-1',
'balancingMode': 'UTILIZATION',
'maxUtilization': 0.8,
'capacityScaler': 1.0
}
],
'healthChecks': [
'https://www.googleapis.com/compute/v1/projects/project2/global/healthChecks/iap-hc'
],
'timeoutSec': 30,
'port': 80,
'protocol': 'HTTP',
'portName': 'http',
'enableCDN': False,
'sessionAffinity': 'NONE',
'affinityCookieTtlSec': 0,
'loadBalancingScheme': 'EXTERNAL',
'connectionDraining': {
'drainingTimeoutSec': 300
},
'iap': {
'enabled': True,
'oauth2ClientId': 'foo',
'oauth2ClientSecretSha256': 'bar'
}
},
],
}
EXPECTED_LOADABLE_BACKEND_SERVICES = [
{'affinity_cookie_ttl_sec': 0,
'backends': '[{"capacityScaler": 1.0, "group": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/instanceGroups/bs-1-ig-1", "balancingMode": "UTILIZATION"}]',
'cdn_policy': '{}',
'connection_draining': '{"drainingTimeoutSec": 0}',
'creation_timestamp': '2017-04-03 14:01:35',
'description': 'bs-1-desc',
'enable_cdn': False,
'health_checks': '["https://www.googleapis.com/compute/v1/projects/project1/global/httpsHealthChecks/hc-1"]',
'iap': '{}',
'id': '3072061062494750400',
'load_balancing_scheme': 'EXTERNAL',
'name': 'bs-1',
'port': 8443,
'port_name': 'https',
'project_id': 'project1',
'protocol': 'HTTPS',
'region': None,
'session_affinity': 'NONE',
'timeout_sec': 3610,
'raw_backend_service': '{"connectionDraining": {"drainingTimeoutSec": 0}, "kind": "compute#backendService", "protocol": "HTTPS", "name": "bs-1", "timeoutSec": 3610, "enableCDN": false, "loadBalancingScheme": "EXTERNAL", "affinityCookieTtlSec": 0, "port": 8443, "backends": [{"capacityScaler": 1.0, "group": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/instanceGroups/bs-1-ig-1", "balancingMode": "UTILIZATION"}], "portName": "https", "healthChecks": ["https://www.googleapis.com/compute/v1/projects/project1/global/httpsHealthChecks/hc-1"], "sessionAffinity": "NONE", "creationTimestamp": "2017-04-03T14:01:35.687-07:00", "id": "3072061062494750400", "selfLink": "https://www.googleapis.com/compute/v1/projects/project1/global/backendServices/bs-1", "description": "bs-1-desc"}'},
{'affinity_cookie_ttl_sec': 0,
'backends': '[{"maxUtilization": 0.8, "capacityScaler": 1.0, "group": "https://www.googleapis.com/compute/v1/projects/project2/zones/us-east1-c/instanceGroups/instance-group-1", "description": "", "balancingMode": "UTILIZATION"}]',
'cdn_policy': '{}',
'connection_draining': '{"drainingTimeoutSec": 300}',
'creation_timestamp': '2017-05-12 11:14:18',
'description': '',
'enable_cdn': False,
'health_checks': '["https://www.googleapis.com/compute/v1/projects/project2/global/healthChecks/iap-hc"]',
'iap': '{"oauth2ClientId": "foo", "enabled": true, "oauth2ClientSecretSha256": "bar"}',
'id': '6071052922189792661',
'load_balancing_scheme': 'EXTERNAL',
'name': 'iap-bs',
'port': 80,
'port_name': 'http',
'project_id': 'project2',
'protocol': 'HTTP',
'region': None,
'session_affinity': 'NONE',
'timeout_sec': 30,
'raw_backend_service': '{"connectionDraining": {"drainingTimeoutSec": 300}, "kind": "compute#backendService", "protocol": "HTTP", "name": "iap-bs", "timeoutSec": 30, "enableCDN": false, "loadBalancingScheme": "EXTERNAL", "affinityCookieTtlSec": 0, "port": 80, "backends": [{"maxUtilization": 0.8, "capacityScaler": 1.0, "group": "https://www.googleapis.com/compute/v1/projects/project2/zones/us-east1-c/instanceGroups/instance-group-1", "description": "", "balancingMode": "UTILIZATION"}], "iap": {"oauth2ClientId": "foo", "enabled": true, "oauth2ClientSecretSha256": "bar"}, "portName": "http", "healthChecks": ["https://www.googleapis.com/compute/v1/projects/project2/global/healthChecks/iap-hc"], "sessionAffinity": "NONE", "creationTimestamp": "2017-05-12T11:14:18.559-07:00", "id": "6071052922189792661", "selfLink": "https://www.googleapis.com/compute/v1/projects/project2/global/backendServices/iap-bs", "description": ""}',},
]
| 45.238806
| 938
| 0.599692
|
FAKE_API_RESPONSE1 = [
{
"kind": "compute#backendService",
"id": "3072061062494750400",
"creationTimestamp": "2017-04-03T14:01:35.687-07:00",
"name": "bs-1",
"description": "bs-1-desc",
"selfLink": "https://www.googleapis.com/compute/v1/projects/project1/global/backendServices/bs-1",
"backends": [
{
"group": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/instanceGroups/bs-1-ig-1",
"balancingMode": "UTILIZATION",
"capacityScaler": 1.0
}
],
"healthChecks": [
"https://www.googleapis.com/compute/v1/projects/project1/global/httpsHealthChecks/hc-1"
],
"timeoutSec": 3610,
"port": 8443,
"protocol": "HTTPS",
"portName": "https",
"enableCDN": False,
"sessionAffinity": "NONE",
"affinityCookieTtlSec": 0,
"loadBalancingScheme": "EXTERNAL",
"connectionDraining": {
"drainingTimeoutSec": 0
}
},
]
FAKE_API_RESPONSE2 = [
{
"kind": "compute#backendService",
"id": "6071052922189792661",
"creationTimestamp": "2017-05-12T11:14:18.559-07:00",
"name": "iap-bs",
"description": "",
"selfLink": "https://www.googleapis.com/compute/v1/projects/project2/global/backendServices/iap-bs",
"backends": [
{
"description": "",
"group": "https://www.googleapis.com/compute/v1/projects/project2/zones/us-east1-c/instanceGroups/instance-group-1",
"balancingMode": "UTILIZATION",
"maxUtilization": 0.8,
"capacityScaler": 1.0
}
],
"healthChecks": [
"https://www.googleapis.com/compute/v1/projects/project2/global/healthChecks/iap-hc"
],
"timeoutSec": 30,
"port": 80,
"protocol": "HTTP",
"portName": "http",
"enableCDN": False,
"sessionAffinity": "NONE",
"affinityCookieTtlSec": 0,
"loadBalancingScheme": "EXTERNAL",
"connectionDraining": {
"drainingTimeoutSec": 300
},
"iap": {
"enabled": True,
"oauth2ClientId": "foo",
"oauth2ClientSecretSha256": "bar"
}
},
]
FAKE_PROJECT_BACKEND_SERVICES_MAP = {
'project1': [
{'kind': 'compute#backendService',
'id': '3072061062494750400',
'creationTimestamp': '2017-04-03T14:01:35.687-07:00',
'name': 'bs-1',
'description': 'bs-1-desc',
'selfLink': 'https://www.googleapis.com/compute/v1/projects/project1/global/backendServices/bs-1',
'backends': [
{
'group': 'https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/instanceGroups/bs-1-ig-1',
'balancingMode': 'UTILIZATION',
'capacityScaler': 1.0
}
],
'healthChecks': [
'https://www.googleapis.com/compute/v1/projects/project1/global/httpsHealthChecks/hc-1'
],
'timeoutSec': 3610,
'port': 8443,
'protocol': 'HTTPS',
'portName': 'https',
'enableCDN': False,
'sessionAffinity': 'NONE',
'affinityCookieTtlSec': 0,
'loadBalancingScheme': 'EXTERNAL',
'connectionDraining': {
'drainingTimeoutSec': 0
},
},
],
'project2': [
{'kind': 'compute#backendService',
'id': '6071052922189792661',
'creationTimestamp': '2017-05-12T11:14:18.559-07:00',
'name': 'iap-bs',
'description': '',
'selfLink': 'https://www.googleapis.com/compute/v1/projects/project2/global/backendServices/iap-bs',
'backends': [
{
'description': '',
'group': 'https://www.googleapis.com/compute/v1/projects/project2/zones/us-east1-c/instanceGroups/instance-group-1',
'balancingMode': 'UTILIZATION',
'maxUtilization': 0.8,
'capacityScaler': 1.0
}
],
'healthChecks': [
'https://www.googleapis.com/compute/v1/projects/project2/global/healthChecks/iap-hc'
],
'timeoutSec': 30,
'port': 80,
'protocol': 'HTTP',
'portName': 'http',
'enableCDN': False,
'sessionAffinity': 'NONE',
'affinityCookieTtlSec': 0,
'loadBalancingScheme': 'EXTERNAL',
'connectionDraining': {
'drainingTimeoutSec': 300
},
'iap': {
'enabled': True,
'oauth2ClientId': 'foo',
'oauth2ClientSecretSha256': 'bar'
}
},
],
}
EXPECTED_LOADABLE_BACKEND_SERVICES = [
{'affinity_cookie_ttl_sec': 0,
'backends': '[{"capacityScaler": 1.0, "group": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/instanceGroups/bs-1-ig-1", "balancingMode": "UTILIZATION"}]',
'cdn_policy': '{}',
'connection_draining': '{"drainingTimeoutSec": 0}',
'creation_timestamp': '2017-04-03 14:01:35',
'description': 'bs-1-desc',
'enable_cdn': False,
'health_checks': '["https://www.googleapis.com/compute/v1/projects/project1/global/httpsHealthChecks/hc-1"]',
'iap': '{}',
'id': '3072061062494750400',
'load_balancing_scheme': 'EXTERNAL',
'name': 'bs-1',
'port': 8443,
'port_name': 'https',
'project_id': 'project1',
'protocol': 'HTTPS',
'region': None,
'session_affinity': 'NONE',
'timeout_sec': 3610,
'raw_backend_service': '{"connectionDraining": {"drainingTimeoutSec": 0}, "kind": "compute#backendService", "protocol": "HTTPS", "name": "bs-1", "timeoutSec": 3610, "enableCDN": false, "loadBalancingScheme": "EXTERNAL", "affinityCookieTtlSec": 0, "port": 8443, "backends": [{"capacityScaler": 1.0, "group": "https://www.googleapis.com/compute/v1/projects/project1/regions/us-central1/instanceGroups/bs-1-ig-1", "balancingMode": "UTILIZATION"}], "portName": "https", "healthChecks": ["https://www.googleapis.com/compute/v1/projects/project1/global/httpsHealthChecks/hc-1"], "sessionAffinity": "NONE", "creationTimestamp": "2017-04-03T14:01:35.687-07:00", "id": "3072061062494750400", "selfLink": "https://www.googleapis.com/compute/v1/projects/project1/global/backendServices/bs-1", "description": "bs-1-desc"}'},
{'affinity_cookie_ttl_sec': 0,
'backends': '[{"maxUtilization": 0.8, "capacityScaler": 1.0, "group": "https://www.googleapis.com/compute/v1/projects/project2/zones/us-east1-c/instanceGroups/instance-group-1", "description": "", "balancingMode": "UTILIZATION"}]',
'cdn_policy': '{}',
'connection_draining': '{"drainingTimeoutSec": 300}',
'creation_timestamp': '2017-05-12 11:14:18',
'description': '',
'enable_cdn': False,
'health_checks': '["https://www.googleapis.com/compute/v1/projects/project2/global/healthChecks/iap-hc"]',
'iap': '{"oauth2ClientId": "foo", "enabled": true, "oauth2ClientSecretSha256": "bar"}',
'id': '6071052922189792661',
'load_balancing_scheme': 'EXTERNAL',
'name': 'iap-bs',
'port': 80,
'port_name': 'http',
'project_id': 'project2',
'protocol': 'HTTP',
'region': None,
'session_affinity': 'NONE',
'timeout_sec': 30,
'raw_backend_service': '{"connectionDraining": {"drainingTimeoutSec": 300}, "kind": "compute#backendService", "protocol": "HTTP", "name": "iap-bs", "timeoutSec": 30, "enableCDN": false, "loadBalancingScheme": "EXTERNAL", "affinityCookieTtlSec": 0, "port": 80, "backends": [{"maxUtilization": 0.8, "capacityScaler": 1.0, "group": "https://www.googleapis.com/compute/v1/projects/project2/zones/us-east1-c/instanceGroups/instance-group-1", "description": "", "balancingMode": "UTILIZATION"}], "iap": {"oauth2ClientId": "foo", "enabled": true, "oauth2ClientSecretSha256": "bar"}, "portName": "http", "healthChecks": ["https://www.googleapis.com/compute/v1/projects/project2/global/healthChecks/iap-hc"], "sessionAffinity": "NONE", "creationTimestamp": "2017-05-12T11:14:18.559-07:00", "id": "6071052922189792661", "selfLink": "https://www.googleapis.com/compute/v1/projects/project2/global/backendServices/iap-bs", "description": ""}',},
]
| true
| true
|
1c40117a7c4ffc2d0fd1610c995de60fbfac1c4d
| 13,101
|
py
|
Python
|
main.py
|
boazjohn/pyspark-job-server
|
bda2fa454b7875494869be81c9d75802df194feb
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
boazjohn/pyspark-job-server
|
bda2fa454b7875494869be81c9d75802df194feb
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
boazjohn/pyspark-job-server
|
bda2fa454b7875494869be81c9d75802df194feb
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# Standard Library
import argparse
import json
import time
import os
import sys
import logging
import socket
import traceback
import uuid
from collections import defaultdict
from threading import Thread, Lock
# Third Party
import mixingboard
import pip
from chassis.models import JobHistory
from flask import Flask, jsonify, request
from werkzeug.utils import secure_filename
# Local
# set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# parse args
argParser = argparse.ArgumentParser(description='Run the Quarry server.')
argParser.add_argument('-l', '--local', action='store_true', help='Run spark in local mode')
argParser.add_argument('-p', '--port', type=int, default=1989, help='Set the port')
argParser.add_argument('-H', '--host', type=str, default='127.0.0.1', help='Set the host')
argParser.add_argument('-u', '--iam-username', type=str, required=True, help='The IAM username for this account')
argParser.add_argument('--access-key-id', type=str, required=True, help='The access key id for this account')
argParser.add_argument('--access-key-secret', type=str, required=True, help='The access key secret for this account')
argParser.add_argument('-s', '--spark-home', type=str, required=True, help='The location of the local spark directory')
argParser.add_argument('-S', '--shark-home', type=str, required=True, help='The location of the local shark directory')
argParser.add_argument('-m', '--mysql-jar', type=str, required=True, help='The location of the mysql connector jar')
argParser.add_argument('-C', '--cluster', type=str, required=True, help='The cluster to add this instance to')
argParser.add_argument('-A', '--account', type=str, required=True, help='The account to add this instance to')
args, _ = argParser.parse_known_args()
# put args in sensible all caps variables
LOCAL = args.local
HOST = args.host
PORT = args.port
IAM_USERNAME = args.iam_username
ACCESS_KEY_ID = args.access_key_id
ACCESS_KEY_SECRET = args.access_key_secret
SPARK_HOME = os.path.abspath(args.spark_home)
SHARK_HOME = os.path.abspath(args.shark_home)
MYSQL_JAR = os.path.abspath(args.mysql_jar)
CLUSTER = args.cluster
ACCOUNT = args.account
# add spark/shark home to the os environ
os.environ["SPARK_HOME"] = SPARK_HOME
os.environ["SHARK_HOME"] = SHARK_HOME
os.environ["SPARK_CLASSPATH"] = MYSQL_JAR
# add the pyspark/pyshark directory to our python path
sys.path.append(os.path.join(SHARK_HOME, "python"))
sys.path.append(os.path.join(SPARK_HOME, "python"))
sys.path.append(os.path.join(SPARK_HOME, "python/lib/py4j-0.8.1-src.zip"))
# import the job runner
from lib.runner import JobRunner
# helpers for file uploads
UPLOAD_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'jobs')
ALLOWED_EXTENSIONS = set(['py'])
def extension(filename):
return filename.rsplit('.', 1)[1]
def allowed_file(filename):
return '.' in filename and \
extension(filename) in ALLOWED_EXTENSIONS
# instantiate flask and configure some shit
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
sys.path.append(UPLOAD_FOLDER)
# set up and instantiate our globals so we can use them from within flask routes
global jobs
global runner
jobs = defaultdict(dict)
runner = None
###
# Context Management Methods
###
def getSparkMasters():
"""
Retrieve the spark masters.
Returns:
spark master uri
"""
return mixingboard.getService("spark-master", account=ACCOUNT, cluster=CLUSTER)
def getSparkMaster():
"""
Retrieve the master for a given
user/account.
Returns:
spark master uri
"""
return getSparkMasters()[0]
def createRunner(name, account, user, conf={}):
"""
Creates a reusable spark runner
Params:
name: a name for the runner
account: an account
user: a user
conf: a json encoded configuration dictionary
"""
master = ""
if LOCAL:
master = "local"
else:
masterInfos = getSparkMasters()
master = "spark://%s" % ','.join(["%s:%s" % (info['host'], info['port']) for info in masterInfos])
return JobRunner(account=account, master=master, conf=conf, uploadFolder=UPLOAD_FOLDER,
iamUsername=IAM_USERNAME, accessKeyId=ACCESS_KEY_ID, accessKeySecret=ACCESS_KEY_SECRET)
###
# Job Management Methods
###
@app.route('/spark/jobs/saved')
def list_saved_jobs():
"""
Get a list of saved jobs
GetParams:
account: an account
user: a user
Returns:
a list of saved jobs
"""
global jobs
account = request.form['account']
user = request.form['user']
return jsonify(jobs[account])
@app.route('/spark/jobs/upload', methods = ['POST'])
def upload_job():
"""
Upload a new job
PostParams:
account: an account
user: a user
file: a python file containing a runnable job
name: a name to assign to your job
Returns:
a message and a status code
"""
global jobs
account = request.form['account']
user = request.form['user']
uploadedFile = request.files['file']
jobName = request.form['name']
if uploadedFile and allowed_file(uploadedFile.filename):
jobFolder = os.path.join(app.config['UPLOAD_FOLDER'], account, jobName)
try:
os.makedirs(os.path.join(jobFolder, "deps"))
except OSError:
pass
savePath = os.path.join(jobFolder, "%s_main.py" % jobName)
uploadedFile.save(savePath)
jobDesc = {
"name": jobName,
"created": int(time.time()*1000),
"lastRun": 0,
"timesRan": 0,
"jobFolder": jobFolder,
"jobModule": jobName+"_main",
"jobPath": savePath,
"jobFiles": []
}
jobs[account][jobName] = jobDesc
return jsonify({"success":True,"message":"Successfully created a job named '%s'" % jobName})
else:
return jsonify({"success":False,"message":"You must upload a .py file"}), 400
@app.route('/spark/jobs/<jobName>/upload', methods = ['POST'])
def upload_file_to_job(jobName):
"""
Upload a file to an existing job
PostParams:
account: an account
user: a user
file: a python file containing a runnable job
Returns:
a message and a status code
"""
global jobs
account = request.form['account']
user = request.form['user']
job = jobs[account][jobName]
for uploadedFile in request.files.values():
if not allowed_file(uploadedFile.filename):
return jsonify({"success":False,"message":"You must upload a .py file"}), 400
for uploadedFile in request.files.values():
filename = secure_filename(uploadedFile.filename)
jobFolder = os.path.join(app.config['UPLOAD_FOLDER'], account, jobName)
savePath = os.path.join(jobFolder, filename)
uploadedFile.save(savePath)
job["jobFiles"].append(savePath)
return jsonify({"success":True,"message":"Successfully added %s file(s) to job '%s'" % (len(request.files), jobName)})
@app.route("/spark/job/async/status")
def async_job_status():
"""
Get the status of an async job
GetParams:
account: an account
user: a user
handle: an async job handle
Returns:
a json-object represneting a job's status
"""
global runner
handle = request.args['handle']
account = request.args['account']
user = request.args['user']
return jsonify(runner.getHandleStatus(handle))
@app.route("/spark/job/async/progress")
def async_job_progress():
"""
Get the progress of an async job
GetParams:
account: an account
user: a user
handle: an async job handle
Returns:
a json-object represneting a job's progress
"""
global runner
handle = request.args['handle']
account = request.args['account']
user = request.args['user']
return jsonify(runner.getHandleProgress(handle))
@app.route("/spark/job/async/cancel", methods=["POST"])
def async_job_cancel():
"""
Cancel an async job
GetParams:
account: an account
user: a user
handle: an async job handle
Returns:
a status code
"""
global runner
handle = request.form['handle']
account = request.form['account']
user = request.form['user']
runner.cancelJobWithHandle(handle)
return jsonify({})
@app.route("/spark/job/<jobName>/run", methods=['POST'])
def run_job_async(jobName):
"""
Asynchronously run a previously uploaded job
RouteParams:
jobName: the name of the job to run
GetParams:
account: an account
user: a user
options: options to provide to the job
Returns:
a job handle for getting status updates and results
"""
global jobs
global runner
account = request.form['account']
user = request.form['user']
options = json.loads(request.form.get("options","{}"))
options['user'] = user
try:
job = jobs[account][jobName]
except KeyError:
return jsonify({
"error": "Job '%s' does not exist" % jobName
})
handle = runner.runJobAsync(job, options)
return jsonify({
"handle": handle,
"history": {
"jobHandle": handle,
"accountId": account,
"userId": user,
"event": "job_start",
"data": {
"jobName": options.get("jobName", "Untitled")
},
"jobType": options.get("jobType", "spark")
}
})
@app.route("/spark/job/<jobName>/run/sync", methods=['POST'])
def run_job_sync(jobName):
"""
Synchronously run a previously uploaded job
RouteParams:
jobName: the name of the job to run
GetParams:
account: an account
user: a user
options: options to provide to the job
Returns:
a json object containing the result of the job
"""
global runner
global jobs
account = request.form['account']
user = request.form['user']
options = json.loads(request.form.get("options","{}"))
try:
job = jobs[account][jobName]
except KeyError:
return jsonify({
"error": "Job '%s' does not exist" % jobName
})
result = runner.runJobSync(job, options)
return jsonify({
"result": result,
"history": {
"jobHandle": handle,
"accountId": account,
"userId": user,
"event": "job_start",
"data": {
"jobName": options.get("jobName", "Untitled")
}
}
})
@app.route("/spark/sql/run", methods=['POST'])
def run_sql_async():
"""
Asynchronously run a sql query
GetParams:
account: an account
user: a user
sql: the sql query to run
Returns:
a job handle for getting status updates and results
"""
global runner
account = request.form['account']
user = request.form['user']
options = json.loads(request.form.get("options","{}"))
options['user'] = user
sql = request.form['sql']
handle = runner.runSQLAsync(sql, options)
return jsonify({
"handle": handle
})
@app.route("/spark/sql/run/sync", methods=['POST'])
def run_sql_sync():
"""
Synchronously run a SQL query
GetParams:
account: an account
user: a user
sql: a sql query
Returns:
a json object containing the result of the job
"""
global runner
account = request.form['account']
user = request.form['user']
sql = request.form['sql']
result = runner.runSQLSync(sql)
return jsonify({
"rows": result
})
@app.route("/spark/status")
def status():
"""
Get the overall status for the runner
GetParams:
account: an account
user: a user
Returns:
a json object containing the overall status
"""
global runner
account = request.args['account']
user = request.args['user']
status = runner.getStatus()
return jsonify({
"status": status
})
@app.route("/spark/progress")
def progress():
"""
Get the overall progress for the runner
GetParams:
account: an account
user: a user
Returns:
a json object containing the overall progress
"""
global runner
account = request.args['account']
user = request.args['user']
jobType = request.args.get('jobType', None)
progress = runner.getProgress(jobType)
return jsonify({
"progress": progress
})
if __name__ == "__main__":
# NO DEBUG MODE ON PURPOSE. IT FUCKS THINGS UP.
# JUST RESTART MANUALLY. YES IT'S A PAIN. JUST DEAL WITH IT
runner = createRunner('default', ACCOUNT, None)
mixingboard.exposeService('job-server', port=PORT, account=ACCOUNT, cluster=CLUSTER)
app.run(host=HOST, port=PORT, threaded=True)
| 24.906844
| 122
| 0.635982
|
import argparse
import json
import time
import os
import sys
import logging
import socket
import traceback
import uuid
from collections import defaultdict
from threading import Thread, Lock
import mixingboard
import pip
from chassis.models import JobHistory
from flask import Flask, jsonify, request
from werkzeug.utils import secure_filename
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
argParser = argparse.ArgumentParser(description='Run the Quarry server.')
argParser.add_argument('-l', '--local', action='store_true', help='Run spark in local mode')
argParser.add_argument('-p', '--port', type=int, default=1989, help='Set the port')
argParser.add_argument('-H', '--host', type=str, default='127.0.0.1', help='Set the host')
argParser.add_argument('-u', '--iam-username', type=str, required=True, help='The IAM username for this account')
argParser.add_argument('--access-key-id', type=str, required=True, help='The access key id for this account')
argParser.add_argument('--access-key-secret', type=str, required=True, help='The access key secret for this account')
argParser.add_argument('-s', '--spark-home', type=str, required=True, help='The location of the local spark directory')
argParser.add_argument('-S', '--shark-home', type=str, required=True, help='The location of the local shark directory')
argParser.add_argument('-m', '--mysql-jar', type=str, required=True, help='The location of the mysql connector jar')
argParser.add_argument('-C', '--cluster', type=str, required=True, help='The cluster to add this instance to')
argParser.add_argument('-A', '--account', type=str, required=True, help='The account to add this instance to')
args, _ = argParser.parse_known_args()
LOCAL = args.local
HOST = args.host
PORT = args.port
IAM_USERNAME = args.iam_username
ACCESS_KEY_ID = args.access_key_id
ACCESS_KEY_SECRET = args.access_key_secret
SPARK_HOME = os.path.abspath(args.spark_home)
SHARK_HOME = os.path.abspath(args.shark_home)
MYSQL_JAR = os.path.abspath(args.mysql_jar)
CLUSTER = args.cluster
ACCOUNT = args.account
os.environ["SPARK_HOME"] = SPARK_HOME
os.environ["SHARK_HOME"] = SHARK_HOME
os.environ["SPARK_CLASSPATH"] = MYSQL_JAR
sys.path.append(os.path.join(SHARK_HOME, "python"))
sys.path.append(os.path.join(SPARK_HOME, "python"))
sys.path.append(os.path.join(SPARK_HOME, "python/lib/py4j-0.8.1-src.zip"))
from lib.runner import JobRunner
UPLOAD_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'jobs')
ALLOWED_EXTENSIONS = set(['py'])
def extension(filename):
return filename.rsplit('.', 1)[1]
def allowed_file(filename):
return '.' in filename and \
extension(filename) in ALLOWED_EXTENSIONS
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
sys.path.append(UPLOAD_FOLDER)
global jobs
global runner
jobs = defaultdict(dict)
runner = None
ef getSparkMasters():
return mixingboard.getService("spark-master", account=ACCOUNT, cluster=CLUSTER)
def getSparkMaster():
return getSparkMasters()[0]
def createRunner(name, account, user, conf={}):
master = ""
if LOCAL:
master = "local"
else:
masterInfos = getSparkMasters()
master = "spark://%s" % ','.join(["%s:%s" % (info['host'], info['port']) for info in masterInfos])
return JobRunner(account=account, master=master, conf=conf, uploadFolder=UPLOAD_FOLDER,
iamUsername=IAM_USERNAME, accessKeyId=ACCESS_KEY_ID, accessKeySecret=ACCESS_KEY_SECRET)
app.route('/spark/jobs/saved')
def list_saved_jobs():
global jobs
account = request.form['account']
user = request.form['user']
return jsonify(jobs[account])
@app.route('/spark/jobs/upload', methods = ['POST'])
def upload_job():
global jobs
account = request.form['account']
user = request.form['user']
uploadedFile = request.files['file']
jobName = request.form['name']
if uploadedFile and allowed_file(uploadedFile.filename):
jobFolder = os.path.join(app.config['UPLOAD_FOLDER'], account, jobName)
try:
os.makedirs(os.path.join(jobFolder, "deps"))
except OSError:
pass
savePath = os.path.join(jobFolder, "%s_main.py" % jobName)
uploadedFile.save(savePath)
jobDesc = {
"name": jobName,
"created": int(time.time()*1000),
"lastRun": 0,
"timesRan": 0,
"jobFolder": jobFolder,
"jobModule": jobName+"_main",
"jobPath": savePath,
"jobFiles": []
}
jobs[account][jobName] = jobDesc
return jsonify({"success":True,"message":"Successfully created a job named '%s'" % jobName})
else:
return jsonify({"success":False,"message":"You must upload a .py file"}), 400
@app.route('/spark/jobs/<jobName>/upload', methods = ['POST'])
def upload_file_to_job(jobName):
global jobs
account = request.form['account']
user = request.form['user']
job = jobs[account][jobName]
for uploadedFile in request.files.values():
if not allowed_file(uploadedFile.filename):
return jsonify({"success":False,"message":"You must upload a .py file"}), 400
for uploadedFile in request.files.values():
filename = secure_filename(uploadedFile.filename)
jobFolder = os.path.join(app.config['UPLOAD_FOLDER'], account, jobName)
savePath = os.path.join(jobFolder, filename)
uploadedFile.save(savePath)
job["jobFiles"].append(savePath)
return jsonify({"success":True,"message":"Successfully added %s file(s) to job '%s'" % (len(request.files), jobName)})
@app.route("/spark/job/async/status")
def async_job_status():
global runner
handle = request.args['handle']
account = request.args['account']
user = request.args['user']
return jsonify(runner.getHandleStatus(handle))
@app.route("/spark/job/async/progress")
def async_job_progress():
global runner
handle = request.args['handle']
account = request.args['account']
user = request.args['user']
return jsonify(runner.getHandleProgress(handle))
@app.route("/spark/job/async/cancel", methods=["POST"])
def async_job_cancel():
global runner
handle = request.form['handle']
account = request.form['account']
user = request.form['user']
runner.cancelJobWithHandle(handle)
return jsonify({})
@app.route("/spark/job/<jobName>/run", methods=['POST'])
def run_job_async(jobName):
global jobs
global runner
account = request.form['account']
user = request.form['user']
options = json.loads(request.form.get("options","{}"))
options['user'] = user
try:
job = jobs[account][jobName]
except KeyError:
return jsonify({
"error": "Job '%s' does not exist" % jobName
})
handle = runner.runJobAsync(job, options)
return jsonify({
"handle": handle,
"history": {
"jobHandle": handle,
"accountId": account,
"userId": user,
"event": "job_start",
"data": {
"jobName": options.get("jobName", "Untitled")
},
"jobType": options.get("jobType", "spark")
}
})
@app.route("/spark/job/<jobName>/run/sync", methods=['POST'])
def run_job_sync(jobName):
global runner
global jobs
account = request.form['account']
user = request.form['user']
options = json.loads(request.form.get("options","{}"))
try:
job = jobs[account][jobName]
except KeyError:
return jsonify({
"error": "Job '%s' does not exist" % jobName
})
result = runner.runJobSync(job, options)
return jsonify({
"result": result,
"history": {
"jobHandle": handle,
"accountId": account,
"userId": user,
"event": "job_start",
"data": {
"jobName": options.get("jobName", "Untitled")
}
}
})
@app.route("/spark/sql/run", methods=['POST'])
def run_sql_async():
global runner
account = request.form['account']
user = request.form['user']
options = json.loads(request.form.get("options","{}"))
options['user'] = user
sql = request.form['sql']
handle = runner.runSQLAsync(sql, options)
return jsonify({
"handle": handle
})
@app.route("/spark/sql/run/sync", methods=['POST'])
def run_sql_sync():
global runner
account = request.form['account']
user = request.form['user']
sql = request.form['sql']
result = runner.runSQLSync(sql)
return jsonify({
"rows": result
})
@app.route("/spark/status")
def status():
global runner
account = request.args['account']
user = request.args['user']
status = runner.getStatus()
return jsonify({
"status": status
})
@app.route("/spark/progress")
def progress():
global runner
account = request.args['account']
user = request.args['user']
jobType = request.args.get('jobType', None)
progress = runner.getProgress(jobType)
return jsonify({
"progress": progress
})
if __name__ == "__main__":
runner = createRunner('default', ACCOUNT, None)
mixingboard.exposeService('job-server', port=PORT, account=ACCOUNT, cluster=CLUSTER)
app.run(host=HOST, port=PORT, threaded=True)
| true
| true
|
1c40131505a9c1c3ada0e0182de0005470fab0cb
| 366
|
py
|
Python
|
catch/datasets/jugra.py
|
broadinstitute/catch
|
2fedca15f921116f580de8b2ae7ac9972932e59e
|
[
"MIT"
] | 58
|
2018-01-24T16:31:37.000Z
|
2022-02-25T07:46:35.000Z
|
catch/datasets/jugra.py
|
broadinstitute/catch
|
2fedca15f921116f580de8b2ae7ac9972932e59e
|
[
"MIT"
] | 29
|
2018-04-17T17:36:06.000Z
|
2022-02-25T11:48:58.000Z
|
catch/datasets/jugra.py
|
broadinstitute/catch
|
2fedca15f921116f580de8b2ae7ac9972932e59e
|
[
"MIT"
] | 16
|
2018-05-23T12:19:41.000Z
|
2021-08-09T04:16:00.000Z
|
"""Dataset with 'Jugra virus' sequences.
A dataset with 1 'Jugra virus' genomes.
THIS PYTHON FILE WAS GENERATED BY A COMPUTER PROGRAM! DO NOT EDIT!
"""
import sys
from catch.datasets import GenomesDatasetSingleChrom
ds = GenomesDatasetSingleChrom(__name__, __file__, __spec__)
ds.add_fasta_path("data/jugra.fasta.gz", relative=True)
sys.modules[__name__] = ds
| 22.875
| 66
| 0.778689
|
import sys
from catch.datasets import GenomesDatasetSingleChrom
ds = GenomesDatasetSingleChrom(__name__, __file__, __spec__)
ds.add_fasta_path("data/jugra.fasta.gz", relative=True)
sys.modules[__name__] = ds
| true
| true
|
1c40161c7ff249c0a4e40c65d2c621db8b13c028
| 64,078
|
py
|
Python
|
django/db/migrations/autodetector.py
|
terceiro/django
|
5931d2e96ae94b204d146b7f751e0e804da74953
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 2
|
2019-09-29T20:42:14.000Z
|
2019-09-29T20:42:18.000Z
|
django/db/migrations/autodetector.py
|
terceiro/django
|
5931d2e96ae94b204d146b7f751e0e804da74953
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/db/migrations/autodetector.py
|
terceiro/django
|
5931d2e96ae94b204d146b7f751e0e804da74953
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2020-10-26T09:40:10.000Z
|
2020-10-26T09:40:10.000Z
|
import functools
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.utils import (
COMPILED_REGEX_TYPE, RegexObject, get_migration_name_timestamp,
)
from django.utils.topological_sort import stable_topological_sort
class MigrationAutodetector:
"""
Take a pair of ProjectStates and compare them to see what the first would
need doing to make it match the second (the second usually being the
project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of applicable changes.
Take a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {
key: self.deep_deconstruct(value)
for key, value in obj.items()
}
elif isinstance(obj, functools.partial):
return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords))
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance method,
# avoid treating this as being deconstructible itself - see #22951
return obj
elif hasattr(obj, 'deconstruct'):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to. Used for detecting renames (as,
of course, the related fields change during renames).
"""
fields_def = []
for name, field in sorted(fields):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Return a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# Then go through that list, order it, and split into migrations to
# resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
self.altered_indexes = {}
self.altered_constraints = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = set()
self.old_proxy_keys = set()
self.old_unmanaged_keys = set()
self.new_model_keys = set()
self.new_proxy_keys = set()
self.new_unmanaged_keys = set()
for al, mn in self.from_state.models:
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.add((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.add((al, mn))
else:
self.old_model_keys.add((al, mn))
for al, mn in self.to_state.models:
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.add((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.add((al, mn))
else:
self.new_model_keys.add((al, mn))
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
# Create the altered indexes and store them in self.altered_indexes.
# This avoids the same computation in generate_removed_indexes()
# and generate_added_indexes().
self.create_altered_indexes()
self.create_altered_constraints()
# Generate index removal operations before field is removed
self.generate_removed_constraints()
self.generate_removed_indexes()
# Generate field operations
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_added_indexes()
self.generate_added_constraints()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists and a list of the fields that used through models
in the old state so dependencies can be made from the through model
deletion to the field that uses it.
"""
self.kept_model_keys = self.old_model_keys & self.new_model_keys
self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys
self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys
self.through_users = {}
self.old_field_keys = {
(app_label, model_name, x)
for app_label, model_name in self.kept_model_keys
for x, y in self.from_state.models[
app_label,
self.renamed_models.get((app_label, model_name), model_name)
].fields
}
self.new_field_keys = {
(app_label, model_name, x)
for app_label, model_name in self.kept_model_keys
for x, y in self.to_state.models[app_label, model_name].fields
}
def _generate_through_model_map(self):
"""Through model map generation."""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and
not old_field.remote_field.through._meta.auto_created):
through_key = (
old_field.remote_field.through._meta.app_label,
old_field.remote_field.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
@staticmethod
def _resolve_dependency(dependency):
"""
Return the resolved dependency and a boolean denoting whether or not
it was swappable.
"""
if dependency[0] != '__setting__':
return dependency, False
resolved_app_label, resolved_object_name = getattr(settings, dependency[1]).split('.')
return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True
def _build_migration_list(self, graph=None):
"""
Chop the lists of operations up into migrations with dependencies on
each other. Do this by going through an app's list of operations until
one is found that has an outgoing dependency that isn't in another
app's migration yet (hasn't been chopped off its list). Then chop off
the operations before it into a migration and move onto the next app.
If the loops completes without doing anything, there's a circular
dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
# Temporarily resolve the swappable dependency to
# prevent circular references. While keeping the
# dependency checks on the resolved model, add the
# swappable dependencies.
original_dep = dep
dep, is_swappable_dep = self._resolve_dependency(dep)
if dep[0] != app_label:
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# If we can't find the other app, we add a first/last dependency,
# but only if we've already been through once and checked everything
if chop_mode:
# If the app already exists, we add a dependency on the last migration,
# as we don't know which migration contains the target field.
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
del self.generated_operations[app_label][0]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. Reordering may be needed so FKs work
nicely inside the same app.
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
# Resolve intra-app dependencies to handle circular
# references involving a swappable model.
dep = self._resolve_dependency(dep)[0]
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for migrations in self.migrations.values():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
def check_dependency(self, operation, dependency):
"""
Return True if the given operation depends on the given dependency,
False otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency,))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Place potential swappable models first in lists of created models (only
real way to solve #22783).
"""
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Find any renamed models, generate the operations for them, and remove
the old entry from the model lists. Must be run before other
model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = self.new_model_keys - self.old_model_keys
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = self.old_model_keys - self.new_model_keys
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
model_opts = self.new_apps.get_model(app_label, model_name)._meta
dependencies = []
for field in model_opts.get_fields():
if field.is_relation:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
),
dependencies=dependencies,
)
self.renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel_key = '%s.%s' % (rem_model_state.app_label, rem_model_state.name)
self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % (
model_state.app_label,
model_state.name,
)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.add((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (these are optimized later, if
possible).
Defer any model options that refer to collections of fields that might
be deferred (e.g. unique_together, index_together).
"""
old_keys = self.old_model_keys | self.old_unmanaged_keys
added_models = self.new_model_keys - old_keys
added_unmanaged_models = self.new_unmanaged_keys - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
# Gather related fields
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Are there indexes/unique|index_together to defer?
indexes = model_state.options.pop('indexes')
constraints = model_state.options.pop('constraints')
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
# Depend on the deletion of any possible proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
dependencies = self._get_dependencies_for_foreign_key(field)
# Depend on our own model being created
dependencies.append((app_label, model_name, None, True))
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
related_dependencies = [
(app_label, model_name, name, True)
for name in sorted(related_fields)
]
related_dependencies.append((app_label, model_name, None, True))
for index in indexes:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
dependencies=related_dependencies,
)
for constraint in constraints:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
),
dependencies=related_dependencies,
)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
# Fix relationships if the model changed from a proxy model to a
# concrete model.
if (app_label, model_name) in self.old_proxy_keys:
for related_object in model_opts.related_objects:
self.add_operation(
related_object.related_model._meta.app_label,
operations.AlterField(
model_name=related_object.related_model._meta.object_name,
name=related_object.field.name,
field=related_object.field,
),
dependencies=[(app_label, model_name, None, True)],
)
def generate_created_proxies(self):
"""
Make CreateModel statements for proxy models. Use the same statements
as that way there's less code duplication, but of course for proxy
models it's safe to skip all the pointless field stuff and just chuck
out an operation.
"""
added = self.new_proxy_keys - self.old_proxy_keys
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
(app_label, model_name, None, False),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (these are optimized later, if
possible).
Also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = self.new_model_keys | self.new_unmanaged_keys
deleted_models = self.old_model_keys - new_keys
deleted_unmanaged_models = self.old_unmanaged_keys - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
# Gather related fields
related_fields = {}
for field in model._meta.local_fields:
if field.remote_field:
if field.remote_field.model:
related_fields[field.name] = field
# through will be none on M2Ms on swapped-out models;
# we can treat lack of through as auto_created=True, though.
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
# Generate option removal first
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
# Then remove each related field
for name in sorted(related_fields):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming fields
# and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name in sorted(related_fields):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""Make DeleteModel options for proxy models."""
deleted = self.old_proxy_keys - self.new_proxy_keys
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
"""Work out renamed fields."""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field = old_model_state.get_field_by_name(rem_field_name)
old_field_dec = self.deep_deconstruct(old_field)
if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
old_field.set_attributes_from_name(rem_field_name)
old_db_column = old_field.get_attname_column()[1]
if (old_field_dec == field_dec or (
# Was the field renamed and db_column equal to the
# old field's column added?
old_field_dec[0:2] == field_dec[0:2] and
dict(old_field_dec[2], db_column=old_db_column) == field_dec[2])):
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
"""Make AddField operations."""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Fields that are foreignkeys/m2ms depend on stuff
dependencies = []
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
time_fields = (models.DateField, models.DateTimeField, models.TimeField)
preserve_default = (
field.null or field.has_default() or field.many_to_many or
(field.blank and field.empty_strings_allowed) or
(isinstance(field, time_fields) and field.auto_now)
)
if not preserve_default:
field = field.clone()
if isinstance(field, time_fields) and field.auto_now_add:
field.default = self.questioner.ask_auto_now_add_addition(field_name, model_name)
else:
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""Make RemoveField operations."""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# We might need to depend on the removal of an
# order_with_respect_to or index/unique_together operation;
# this is safely ignored if there isn't one
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
"""
Make AlterField operations, or possibly RemovedField/AddField if alter
isn's possible.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys & self.new_field_keys):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
dependencies = []
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None):
rename_key = (
new_field.remote_field.model._meta.app_label,
new_field.remote_field.model._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
# Handle ForeignKey which can only have a single to_field.
remote_field_name = getattr(new_field.remote_field, 'field_name', None)
if remote_field_name:
to_field_rename_key = rename_key + (remote_field_name,)
if to_field_rename_key in self.renamed_fields:
new_field.remote_field.field_name = old_field.remote_field.field_name
# Handle ForeignObjects which can have multiple from_fields/to_fields.
from_fields = getattr(new_field, 'from_fields', None)
if from_fields:
from_rename_key = (app_label, model_name)
new_field.from_fields = tuple([
self.renamed_fields.get(from_rename_key + (from_field,), from_field)
for from_field in from_fields
])
new_field.to_fields = tuple([
self.renamed_fields.get(rename_key + (to_field,), to_field)
for to_field in new_field.to_fields
])
dependencies.extend(self._get_dependencies_for_foreign_key(new_field))
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "through", None):
rename_key = (
new_field.remote_field.through._meta.app_label,
new_field.remote_field.through._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.through = old_field.remote_field.through
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = old_field.many_to_many and new_field.many_to_many
neither_m2m = not old_field.many_to_many and not new_field.many_to_many
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not new_field.many_to_many):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def create_altered_indexes(self):
option_name = operations.AddIndex.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_indexes = old_model_state.options[option_name]
new_indexes = new_model_state.options[option_name]
add_idx = [idx for idx in new_indexes if idx not in old_indexes]
rem_idx = [idx for idx in old_indexes if idx not in new_indexes]
self.altered_indexes.update({
(app_label, model_name): {
'added_indexes': add_idx, 'removed_indexes': rem_idx,
}
})
def generate_added_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['added_indexes']:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
)
)
def generate_removed_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['removed_indexes']:
self.add_operation(
app_label,
operations.RemoveIndex(
model_name=model_name,
name=index.name,
)
)
def create_altered_constraints(self):
option_name = operations.AddConstraint.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_constraints = old_model_state.options[option_name]
new_constraints = new_model_state.options[option_name]
add_constraints = [c for c in new_constraints if c not in old_constraints]
rem_constraints = [c for c in old_constraints if c not in new_constraints]
self.altered_constraints.update({
(app_label, model_name): {
'added_constraints': add_constraints, 'removed_constraints': rem_constraints,
}
})
def generate_added_constraints(self):
for (app_label, model_name), alt_constraints in self.altered_constraints.items():
for constraint in alt_constraints['added_constraints']:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
)
)
def generate_removed_constraints(self):
for (app_label, model_name), alt_constraints in self.altered_constraints.items():
for constraint in alt_constraints['removed_constraints']:
self.add_operation(
app_label,
operations.RemoveConstraint(
model_name=model_name,
name=constraint.name,
)
)
def _get_dependencies_for_foreign_key(self, field):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True,
))
return dependencies
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name)
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
} if old_value else set()
new_value = new_model_state.options.get(option_name)
new_value = set(new_value) if new_value else set()
if old_value != new_value:
dependencies = []
for foo_togethers in new_value:
for field_name in foo_togethers:
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
),
dependencies=dependencies,
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys, self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
"""
Work out if any non-schema-affecting options have changed and make an
operation to represent them in state changes (in case Python code in
migrations needs them).
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys,
self.kept_unmanaged_keys,
# unmanaged converted to managed
self.old_unmanaged_keys & self.new_model_keys,
# managed converted to unmanaged
self.old_model_keys & self.new_unmanaged_keys,
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = {
key: value for key, value in old_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
new_options = {
key: value for key, value in new_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to") !=
new_model_state.options.get("order_with_respect_to")):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to'),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Take a result from changes() and a MigrationGraph, and fix the names
and dependencies of the changes so they extend the graph from the leaf
nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for migrations in changes.values():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Take changes from arrange_for_graph() and set of app labels, and return
a modified set of changes which trims out as many migrations that are
not in app_labels as possible. Note that some other migrations may
still be present as they may be required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
required_apps.update(*[app_dependencies.get(app_label, ()) for app_label in required_apps])
# Remove all migrations that aren't needed
for app_label in list(changes):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggest a name for the migration they might
represent. Names are not guaranteed to be unique, but put some effort
into the fallback name to avoid VCS conflicts if possible.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif ops:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % get_migration_name_timestamp()
@classmethod
def parse_number(cls, name):
"""
Given a migration name, try to extract a number from the beginning of
it. If no number is found, return None.
"""
match = re.match(r'^\d+', name)
if match:
return int(match.group())
return None
| 48.397281
| 118
| 0.578545
|
import functools
import re
from itertools import chain
from django.conf import settings
from django.db import models
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.migrations.operations.models import AlterModelOptions
from django.db.migrations.optimizer import MigrationOptimizer
from django.db.migrations.questioner import MigrationQuestioner
from django.db.migrations.utils import (
COMPILED_REGEX_TYPE, RegexObject, get_migration_name_timestamp,
)
from django.utils.topological_sort import stable_topological_sort
class MigrationAutodetector:
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {
key: self.deep_deconstruct(value)
for key, value in obj.items()
}
elif isinstance(obj, functools.partial):
return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords))
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
return obj
elif hasattr(obj, 'deconstruct'):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{
key: self.deep_deconstruct(value)
for key, value in kwargs.items()
},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
fields_def = []
for name, field in sorted(fields):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
del deconstruction[2]['to']
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
self.generated_operations = {}
self.altered_indexes = {}
self.altered_constraints = {}
self.old_apps = self.from_state.concrete_apps
self.new_apps = self.to_state.apps
self.old_model_keys = set()
self.old_proxy_keys = set()
self.old_unmanaged_keys = set()
self.new_model_keys = set()
self.new_proxy_keys = set()
self.new_unmanaged_keys = set()
for al, mn in self.from_state.models:
model = self.old_apps.get_model(al, mn)
if not model._meta.managed:
self.old_unmanaged_keys.add((al, mn))
elif al not in self.from_state.real_apps:
if model._meta.proxy:
self.old_proxy_keys.add((al, mn))
else:
self.old_model_keys.add((al, mn))
for al, mn in self.to_state.models:
model = self.new_apps.get_model(al, mn)
if not model._meta.managed:
self.new_unmanaged_keys.add((al, mn))
elif (
al not in self.from_state.real_apps or
(convert_apps and al in convert_apps)
):
if model._meta.proxy:
self.new_proxy_keys.add((al, mn))
else:
self.new_model_keys.add((al, mn))
self.generate_renamed_models()
self._prepare_field_lists()
self._generate_through_model_map()
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
self.create_altered_indexes()
self.create_altered_constraints()
self.generate_removed_constraints()
self.generate_removed_indexes()
self.generate_renamed_fields()
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_unique_together()
self.generate_altered_index_together()
self.generate_added_indexes()
self.generate_added_constraints()
self.generate_altered_db_table()
self.generate_altered_order_with_respect_to()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
self.kept_model_keys = self.old_model_keys & self.new_model_keys
self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys
self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys
self.through_users = {}
self.old_field_keys = {
(app_label, model_name, x)
for app_label, model_name in self.kept_model_keys
for x, y in self.from_state.models[
app_label,
self.renamed_models.get((app_label, model_name), model_name)
].fields
}
self.new_field_keys = {
(app_label, model_name, x)
for app_label, model_name in self.kept_model_keys
for x, y in self.to_state.models[app_label, model_name].fields
}
def _generate_through_model_map(self):
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields:
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name)
if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and
not old_field.remote_field.through._meta.auto_created):
through_key = (
old_field.remote_field.through._meta.app_label,
old_field.remote_field.through._meta.model_name,
)
self.through_users[through_key] = (app_label, old_model_name, field_name)
@staticmethod
def _resolve_dependency(dependency):
if dependency[0] != '__setting__':
return dependency, False
resolved_app_label, resolved_object_name = getattr(settings, dependency[1]).split('.')
return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True
def _build_migration_list(self, graph=None):
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
# Temporarily resolve the swappable dependency to
# prevent circular references. While keeping the
# dependency checks on the resolved model, add the
# swappable dependencies.
original_dep = dep
dep, is_swappable_dep = self._resolve_dependency(dep)
if dep[0] != app_label:
# External app dependency. See if it's not yet
for other_operation in self.generated_operations.get(dep[0], []):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add((original_dep[0], original_dep[1]))
elif dep[0] in self.migrations:
operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name))
else:
# but only if we've already been through once and checked everything
if chop_mode:
# If it's not yet migrated or has no migrations, we use __first__
if graph and graph.leaf_nodes(dep[0]):
operation_dependencies.add(graph.leaf_nodes(dep[0])[0])
else:
operation_dependencies.add((dep[0], "__first__"))
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
del self.generated_operations[app_label][0]
else:
break
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = chopped + self.generated_operations[app_label]
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations)
num_ops = new_num_ops
def _sort_migrations(self):
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
dependency_graph = {op: set() for op in ops}
for op in ops:
for dep in op._auto_deps:
# Resolve intra-app dependencies to handle circular
# references involving a swappable model.
dep = self._resolve_dependency(dep)[0]
if dep[0] == app_label:
for op2 in ops:
if self.check_dependency(op2, dep):
dependency_graph[op].add(op2)
# we use a stable sort for deterministic tests & general behavior
self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph)
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for migrations in self.migrations.values():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label)
def check_dependency(self, operation, dependency):
# Created model
if dependency[2] is None and dependency[3] is True:
return (
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower()
)
# Created field
elif dependency[2] is not None and dependency[3] is True:
return (
(
isinstance(operation, operations.CreateModel) and
operation.name_lower == dependency[1].lower() and
any(dependency[2] == x for x, y in operation.fields)
) or
(
isinstance(operation, operations.AddField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
)
# Removed field
elif dependency[2] is not None and dependency[3] is False:
return (
isinstance(operation, operations.RemoveField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# Removed model
elif dependency[2] is None and dependency[3] is False:
return (
isinstance(operation, operations.DeleteModel) and
operation.name_lower == dependency[1].lower()
)
# Field being altered
elif dependency[2] is not None and dependency[3] == "alter":
return (
isinstance(operation, operations.AlterField) and
operation.model_name_lower == dependency[1].lower() and
operation.name_lower == dependency[2].lower()
)
# order_with_respect_to being unset for a field
elif dependency[2] is not None and dependency[3] == "order_wrt_unset":
return (
isinstance(operation, operations.AlterOrderWithRespectTo) and
operation.name_lower == dependency[1].lower() and
(operation.order_with_respect_to or "").lower() != dependency[2].lower()
)
# Field is removed and part of an index/unique_together
elif dependency[2] is not None and dependency[3] == "foo_together_change":
return (
isinstance(operation, (operations.AlterUniqueTogether,
operations.AlterIndexTogether)) and
operation.name_lower == dependency[1].lower()
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency,))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
try:
model = self.new_apps.get_model(item[0], item[1])
base_names = [base.__name__ for base in model.__bases__]
string_version = "%s.%s" % (item[0], item[1])
if (
model._meta.swappable or
"AbstractUser" in base_names or
"AbstractBaseUser" in base_names or
settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = self.new_model_keys - self.old_model_keys
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = self.old_model_keys - self.new_model_keys
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[rem_app_label, rem_model_name]
rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(rem_model_state, model_state):
model_opts = self.new_apps.get_model(app_label, model_name)._meta
dependencies = []
for field in model_opts.get_fields():
if field.is_relation:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
),
dependencies=dependencies,
)
self.renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel_key = '%s.%s' % (rem_model_state.app_label, rem_model_state.name)
self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % (
model_state.app_label,
model_state.name,
)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.add((app_label, model_name))
break
def generate_created_models(self):
old_keys = self.old_model_keys | self.old_unmanaged_keys
added_models = self.new_model_keys - old_keys
added_unmanaged_models = self.new_unmanaged_keys - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True)
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
model_opts = self.new_apps.get_model(app_label, model_name)._meta
related_fields = {}
primary_key_rel = None
for field in model_opts.local_fields:
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field.name] = field
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model_opts.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
indexes = model_state.options.pop('indexes')
constraints = model_state.options.pop('constraints')
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
order_with_respect_to = model_state.options.pop('order_with_respect_to', None)
dependencies = [
(app_label, model_name, None, False),
]
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
if primary_key_rel:
dependencies.append((
primary_key_rel._meta.app_label,
primary_key_rel._meta.object_name,
None,
True
))
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[d for d in model_state.fields if d[0] not in related_fields],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged models
if not model_opts.managed:
continue
for name, field in sorted(related_fields.items()):
dependencies = self._get_dependencies_for_foreign_key(field)
dependencies.append((app_label, model_name, None, True))
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
related_dependencies = [
(app_label, model_name, name, True)
for name in sorted(related_fields)
]
related_dependencies.append((app_label, model_name, None, True))
for index in indexes:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
dependencies=related_dependencies,
)
for constraint in constraints:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
),
dependencies=related_dependencies,
)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=index_together,
),
dependencies=related_dependencies
)
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
(app_label, model_name, order_with_respect_to, True),
(app_label, model_name, None, True),
]
)
if (app_label, model_name) in self.old_proxy_keys:
for related_object in model_opts.related_objects:
self.add_operation(
related_object.related_model._meta.app_label,
operations.AlterField(
model_name=related_object.related_model._meta.object_name,
name=related_object.field.name,
field=related_object.field,
),
dependencies=[(app_label, model_name, None, True)],
)
def generate_created_proxies(self):
added = self.new_proxy_keys - self.old_proxy_keys
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
dependencies = [
(app_label, model_name, None, False),
]
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append((base_app_label, base_name, None, True))
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
)
def generate_deleted_models(self):
new_keys = self.new_model_keys | self.new_unmanaged_keys
deleted_models = self.old_model_keys - new_keys
deleted_unmanaged_models = self.old_unmanaged_keys - new_keys
all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models))
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
model = self.old_apps.get_model(app_label, model_name)
related_fields = {}
for field in model._meta.local_fields:
if field.remote_field:
if field.remote_field.model:
related_fields[field.name] = field
if (getattr(field.remote_field, "through", None) and
not field.remote_field.through._meta.auto_created):
related_fields[field.name] = field
for field in model._meta.local_many_to_many:
if field.remote_field.model:
related_fields[field.name] = field
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
related_fields[field.name] = field
unique_together = model_state.options.pop('unique_together', None)
index_together = model_state.options.pop('index_together', None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
)
)
if index_together:
self.add_operation(
app_label,
operations.AlterIndexTogether(
name=model_name,
index_together=None,
)
)
for name in sorted(related_fields):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
)
)
# a through model the field that references it.
dependencies = []
for related_object in model._meta.related_objects:
related_object_app_label = related_object.related_model._meta.app_label
object_name = related_object.related_model._meta.object_name
field_name = related_object.field.name
dependencies.append((related_object_app_label, object_name, field_name, False))
if not related_object.many_to_many:
dependencies.append((related_object_app_label, object_name, field_name, "alter"))
for name in sorted(related_fields):
dependencies.append((app_label, model_name, name, False))
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append((through_user[0], through_user[1], through_user[2], False))
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
deleted = self.old_proxy_keys - self.new_proxy_keys
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def generate_renamed_fields(self):
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys):
if rem_app_label == app_label and rem_model_name == model_name:
old_field = old_model_state.get_field_by_name(rem_field_name)
old_field_dec = self.deep_deconstruct(old_field)
if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]:
old_rel_to = old_field_dec[2]['to']
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to]
old_field.set_attributes_from_name(rem_field_name)
old_db_column = old_field.get_attname_column()[1]
if (old_field_dec == field_dec or (
# Was the field renamed and db_column equal to the
# old field's column added?
old_field_dec[0:2] == field_dec[0:2] and
dict(old_field_dec[2], db_column=old_db_column) == field_dec[2])):
if self.questioner.ask_rename(model_name, rem_field_name, field_name, field):
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
)
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = rem_field_name
break
def generate_added_fields(self):
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
dependencies = []
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
# which don't allow empty strings as default.
time_fields = (models.DateField, models.DateTimeField, models.TimeField)
preserve_default = (
field.null or field.has_default() or field.many_to_many or
(field.blank and field.empty_strings_allowed) or
(isinstance(field, time_fields) and field.auto_now)
)
if not preserve_default:
field = field.clone()
if isinstance(field, time_fields) and field.auto_now_add:
field.default = self.questioner.ask_auto_now_add_addition(field_name, model_name)
else:
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
dependencies=[
(app_label, model_name, field_name, "order_wrt_unset"),
(app_label, model_name, field_name, "foo_together_change"),
],
)
def generate_altered_fields(self):
for app_label, model_name, field_name in sorted(self.old_field_keys & self.new_field_keys):
# Did the field change?
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name)
old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name)
new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
dependencies = []
# Implement any model renames on relations; these are handled by RenameModel
# so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None):
rename_key = (
new_field.remote_field.model._meta.app_label,
new_field.remote_field.model._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
# Handle ForeignKey which can only have a single to_field.
remote_field_name = getattr(new_field.remote_field, 'field_name', None)
if remote_field_name:
to_field_rename_key = rename_key + (remote_field_name,)
if to_field_rename_key in self.renamed_fields:
new_field.remote_field.field_name = old_field.remote_field.field_name
# Handle ForeignObjects which can have multiple from_fields/to_fields.
from_fields = getattr(new_field, 'from_fields', None)
if from_fields:
from_rename_key = (app_label, model_name)
new_field.from_fields = tuple([
self.renamed_fields.get(from_rename_key + (from_field,), from_field)
for from_field in from_fields
])
new_field.to_fields = tuple([
self.renamed_fields.get(rename_key + (to_field,), to_field)
for to_field in new_field.to_fields
])
dependencies.extend(self._get_dependencies_for_foreign_key(new_field))
if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "through", None):
rename_key = (
new_field.remote_field.through._meta.app_label,
new_field.remote_field.through._meta.model_name,
)
if rename_key in self.renamed_models:
new_field.remote_field.through = old_field.remote_field.through
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
if old_field_dec != new_field_dec:
both_m2m = old_field.many_to_many and new_field.many_to_many
neither_m2m = not old_field.many_to_many and not new_field.many_to_many
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (old_field.null and not new_field.null and not new_field.has_default() and
not new_field.many_to_many):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(field_name, model_name)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def create_altered_indexes(self):
option_name = operations.AddIndex.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_indexes = old_model_state.options[option_name]
new_indexes = new_model_state.options[option_name]
add_idx = [idx for idx in new_indexes if idx not in old_indexes]
rem_idx = [idx for idx in old_indexes if idx not in new_indexes]
self.altered_indexes.update({
(app_label, model_name): {
'added_indexes': add_idx, 'removed_indexes': rem_idx,
}
})
def generate_added_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['added_indexes']:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
)
)
def generate_removed_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes['removed_indexes']:
self.add_operation(
app_label,
operations.RemoveIndex(
model_name=model_name,
name=index.name,
)
)
def create_altered_constraints(self):
option_name = operations.AddConstraint.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_constraints = old_model_state.options[option_name]
new_constraints = new_model_state.options[option_name]
add_constraints = [c for c in new_constraints if c not in old_constraints]
rem_constraints = [c for c in old_constraints if c not in new_constraints]
self.altered_constraints.update({
(app_label, model_name): {
'added_constraints': add_constraints, 'removed_constraints': rem_constraints,
}
})
def generate_added_constraints(self):
for (app_label, model_name), alt_constraints in self.altered_constraints.items():
for constraint in alt_constraints['added_constraints']:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
)
)
def generate_removed_constraints(self):
for (app_label, model_name), alt_constraints in self.altered_constraints.items():
for constraint in alt_constraints['removed_constraints']:
self.add_operation(
app_label,
operations.RemoveConstraint(
model_name=model_name,
name=constraint.name,
)
)
def _get_dependencies_for_foreign_key(self, field):
# Account for FKs to swappable models
swappable_setting = getattr(field, 'swappable_setting', None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label = field.remote_field.model._meta.app_label
dep_object_name = field.remote_field.model._meta.object_name
dependencies = [(dep_app_label, dep_object_name, None, True)]
if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created:
dependencies.append((
field.remote_field.through._meta.app_label,
field.remote_field.through._meta.object_name,
None,
True,
))
return dependencies
def _generate_altered_foo_together(self, operation):
option_name = operation.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for those
old_value = old_model_state.options.get(option_name)
old_value = {
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
} if old_value else set()
new_value = new_model_state.options.get(option_name)
new_value = set(new_value) if new_value else set()
if old_value != new_value:
dependencies = []
for foo_togethers in new_value:
for field_name in foo_togethers:
field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name)
if field.remote_field and field.remote_field.model:
dependencies.extend(self._get_dependencies_for_foreign_key(field))
self.add_operation(
app_label,
operation(
name=model_name,
**{option_name: new_value}
),
dependencies=dependencies,
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_index_together(self):
self._generate_altered_foo_together(operations.AlterIndexTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(self.kept_proxy_keys, self.kept_unmanaged_keys)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get('db_table')
new_db_table_name = new_model_state.options.get('db_table')
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
)
)
def generate_altered_options(self):
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys,
self.kept_unmanaged_keys,
# unmanaged converted to managed
self.old_unmanaged_keys & self.new_model_keys,
# managed converted to unmanaged
self.old_model_keys & self.new_unmanaged_keys,
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = {
key: value for key, value in old_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
new_options = {
key: value for key, value in new_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
)
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if (old_model_state.options.get("order_with_respect_to") !=
new_model_state.options.get("order_with_respect_to")):
# Make sure it comes second if we're adding
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append((
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
True,
))
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get('order_with_respect_to'),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
)
)
def arrange_for_graph(self, changes, graph, migration_name=None):
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_%s" % migration_name if migration_name else "0001_initial"
else:
new_name = "%04i_%s" % (
next_number,
migration_name or self.suggest_name(migration.operations)[:100],
)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
for migrations in changes.values():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
required_apps.update(*[app_dependencies.get(app_label, ()) for app_label in required_apps])
# Remove all migrations that aren't needed
for app_label in list(changes):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name_lower
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name_lower
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower)
elif ops:
if all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name_lower for o in ops))
return "auto_%s" % get_migration_name_timestamp()
@classmethod
def parse_number(cls, name):
match = re.match(r'^\d+', name)
if match:
return int(match.group())
return None
| true
| true
|
1c40166384101a304b9d9f488d064e203b8ca472
| 10,242
|
py
|
Python
|
venv/Lib/site-packages/prawcore/sessions.py
|
GuilhermeJC13/storIA
|
eeecbe9030426f70c6aa73ca0ce8382860c8495c
|
[
"MIT"
] | 4
|
2021-07-27T23:39:02.000Z
|
2021-09-23T04:17:08.000Z
|
venv/Lib/site-packages/prawcore/sessions.py
|
GuilhermeJC13/storIA
|
eeecbe9030426f70c6aa73ca0ce8382860c8495c
|
[
"MIT"
] | 12
|
2021-04-11T19:46:06.000Z
|
2021-06-18T16:08:37.000Z
|
venv/Lib/site-packages/prawcore/sessions.py
|
GuilhermeJC13/storIA
|
eeecbe9030426f70c6aa73ca0ce8382860c8495c
|
[
"MIT"
] | 3
|
2021-07-27T17:33:58.000Z
|
2021-07-29T12:46:59.000Z
|
"""prawcore.sessions: Provides prawcore.Session and prawcore.session."""
import logging
import random
import time
from copy import deepcopy
from urllib.parse import urljoin
from requests.exceptions import (
ChunkedEncodingError,
ConnectionError,
ReadTimeout,
)
from requests.status_codes import codes
from .auth import BaseAuthorizer
from .const import TIMEOUT
from .exceptions import (
BadJSON,
BadRequest,
Conflict,
InvalidInvocation,
NotFound,
Redirect,
RequestException,
ServerError,
SpecialError,
TooLarge,
TooManyRequests,
UnavailableForLegalReasons,
URITooLong,
)
from .rate_limit import RateLimiter
from .util import authorization_error_class
log = logging.getLogger(__package__)
class RetryStrategy(object):
"""An abstract class for scheduling request retries.
The strategy controls both the number and frequency of retry attempts.
Instances of this class are immutable.
"""
def sleep(self):
"""Sleep until we are ready to attempt the request."""
sleep_seconds = self._sleep_seconds()
if sleep_seconds is not None:
message = f"Sleeping: {sleep_seconds:0.2f} seconds prior to retry"
log.debug(message)
time.sleep(sleep_seconds)
class FiniteRetryStrategy(RetryStrategy):
"""A ``RetryStrategy`` that retries requests a finite number of times."""
def _sleep_seconds(self):
if self._retries < 3:
base = 0 if self._retries == 2 else 2
return base + 2 * random.random()
return None
def __init__(self, retries=3):
"""Initialize the strategy.
:param retries: Number of times to attempt a request.
"""
self._retries = retries
def consume_available_retry(self):
"""Allow one fewer retry."""
return type(self)(self._retries - 1)
def should_retry_on_failure(self):
"""Return ``True`` if and only if the strategy will allow another retry."""
return self._retries > 1
class Session(object):
"""The low-level connection interface to reddit's API."""
RETRY_EXCEPTIONS = (ChunkedEncodingError, ConnectionError, ReadTimeout)
RETRY_STATUSES = {
520,
522,
codes["bad_gateway"],
codes["gateway_timeout"],
codes["internal_server_error"],
codes["service_unavailable"],
}
STATUS_EXCEPTIONS = {
codes["bad_gateway"]: ServerError,
codes["bad_request"]: BadRequest,
codes["conflict"]: Conflict,
codes["found"]: Redirect,
codes["forbidden"]: authorization_error_class,
codes["gateway_timeout"]: ServerError,
codes["internal_server_error"]: ServerError,
codes["media_type"]: SpecialError,
codes["not_found"]: NotFound,
codes["request_entity_too_large"]: TooLarge,
codes["request_uri_too_large"]: URITooLong,
codes["service_unavailable"]: ServerError,
codes["too_many_requests"]: TooManyRequests,
codes["unauthorized"]: authorization_error_class,
codes["unavailable_for_legal_reasons"]: UnavailableForLegalReasons,
# Cloudflare status (not named in requests)
520: ServerError,
522: ServerError,
}
SUCCESS_STATUSES = {codes["accepted"], codes["created"], codes["ok"]}
@staticmethod
def _log_request(data, method, params, url):
log.debug(f"Fetching: {method} {url}")
log.debug(f"Data: {data}")
log.debug(f"Params: {params}")
def __init__(self, authorizer):
"""Prepare the connection to reddit's API.
:param authorizer: An instance of :class:`Authorizer`.
"""
if not isinstance(authorizer, BaseAuthorizer):
raise InvalidInvocation(f"invalid Authorizer: {authorizer}")
self._authorizer = authorizer
self._rate_limiter = RateLimiter()
self._retry_strategy_class = FiniteRetryStrategy
def __enter__(self):
"""Allow this object to be used as a context manager."""
return self
def __exit__(self, *_args):
"""Allow this object to be used as a context manager."""
self.close()
def _do_retry(
self,
data,
files,
json,
method,
params,
response,
retry_strategy_state,
saved_exception,
timeout,
url,
):
if saved_exception:
status = repr(saved_exception)
else:
status = response.status_code
log.warning(f"Retrying due to {status} status: {method} {url}")
return self._request_with_retries(
data=data,
files=files,
json=json,
method=method,
params=params,
timeout=timeout,
url=url,
retry_strategy_state=retry_strategy_state.consume_available_retry(), # noqa: E501
)
def _make_request(
self,
data,
files,
json,
method,
params,
retry_strategy_state,
timeout,
url,
):
try:
response = self._rate_limiter.call(
self._requestor.request,
self._set_header_callback,
method,
url,
allow_redirects=False,
data=data,
files=files,
json=json,
params=params,
timeout=timeout,
)
log.debug(
f"Response: {response.status_code}"
f" ({response.headers.get('content-length')} bytes)"
)
return response, None
except RequestException as exception:
if (
not retry_strategy_state.should_retry_on_failure()
or not isinstance( # noqa: E501
exception.original_exception, self.RETRY_EXCEPTIONS
)
):
raise
return None, exception.original_exception
def _request_with_retries(
self,
data,
files,
json,
method,
params,
timeout,
url,
retry_strategy_state=None,
):
if retry_strategy_state is None:
retry_strategy_state = self._retry_strategy_class()
retry_strategy_state.sleep()
self._log_request(data, method, params, url)
response, saved_exception = self._make_request(
data,
files,
json,
method,
params,
retry_strategy_state,
timeout,
url,
)
do_retry = False
if (
response is not None
and response.status_code == codes["unauthorized"]
):
self._authorizer._clear_access_token()
if hasattr(self._authorizer, "refresh"):
do_retry = True
if retry_strategy_state.should_retry_on_failure() and (
do_retry
or response is None
or response.status_code in self.RETRY_STATUSES
):
return self._do_retry(
data,
files,
json,
method,
params,
response,
retry_strategy_state,
saved_exception,
timeout,
url,
)
elif response.status_code in self.STATUS_EXCEPTIONS:
raise self.STATUS_EXCEPTIONS[response.status_code](response)
elif response.status_code == codes["no_content"]:
return
assert (
response.status_code in self.SUCCESS_STATUSES
), f"Unexpected status code: {response.status_code}"
if response.headers.get("content-length") == "0":
return ""
try:
return response.json()
except ValueError:
raise BadJSON(response)
def _set_header_callback(self):
if not self._authorizer.is_valid() and hasattr(
self._authorizer, "refresh"
):
self._authorizer.refresh()
return {"Authorization": f"bearer {self._authorizer.access_token}"}
@property
def _requestor(self):
return self._authorizer._authenticator._requestor
def close(self):
"""Close the session and perform any clean up."""
self._requestor.close()
def request(
self,
method,
path,
data=None,
files=None,
json=None,
params=None,
timeout=TIMEOUT,
):
"""Return the json content from the resource at ``path``.
:param method: The request verb. E.g., get, post, put.
:param path: The path of the request. This path will be combined with the
``oauth_url`` of the Requestor.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request.
:param files: Dictionary, mapping ``filename`` to file-like object.
:param json: Object to be serialized to JSON in the body of the request.
:param params: The query parameters to send with the request.
Automatically refreshes the access token if it becomes invalid and a refresh
token is available. Raises InvalidInvocation in such a case if a refresh token
is not available.
"""
params = deepcopy(params) or {}
params["raw_json"] = 1
if isinstance(data, dict):
data = deepcopy(data)
data["api_type"] = "json"
data = sorted(data.items())
if isinstance(json, dict):
json = deepcopy(json)
json["api_type"] = "json"
url = urljoin(self._requestor.oauth_url, path)
return self._request_with_retries(
data=data,
files=files,
json=json,
method=method,
params=params,
timeout=timeout,
url=url,
)
def session(authorizer=None):
"""Return a :class:`Session` instance.
:param authorizer: An instance of :class:`Authorizer`.
"""
return Session(authorizer=authorizer)
| 29.601156
| 94
| 0.58514
|
import logging
import random
import time
from copy import deepcopy
from urllib.parse import urljoin
from requests.exceptions import (
ChunkedEncodingError,
ConnectionError,
ReadTimeout,
)
from requests.status_codes import codes
from .auth import BaseAuthorizer
from .const import TIMEOUT
from .exceptions import (
BadJSON,
BadRequest,
Conflict,
InvalidInvocation,
NotFound,
Redirect,
RequestException,
ServerError,
SpecialError,
TooLarge,
TooManyRequests,
UnavailableForLegalReasons,
URITooLong,
)
from .rate_limit import RateLimiter
from .util import authorization_error_class
log = logging.getLogger(__package__)
class RetryStrategy(object):
def sleep(self):
sleep_seconds = self._sleep_seconds()
if sleep_seconds is not None:
message = f"Sleeping: {sleep_seconds:0.2f} seconds prior to retry"
log.debug(message)
time.sleep(sleep_seconds)
class FiniteRetryStrategy(RetryStrategy):
def _sleep_seconds(self):
if self._retries < 3:
base = 0 if self._retries == 2 else 2
return base + 2 * random.random()
return None
def __init__(self, retries=3):
self._retries = retries
def consume_available_retry(self):
return type(self)(self._retries - 1)
def should_retry_on_failure(self):
return self._retries > 1
class Session(object):
RETRY_EXCEPTIONS = (ChunkedEncodingError, ConnectionError, ReadTimeout)
RETRY_STATUSES = {
520,
522,
codes["bad_gateway"],
codes["gateway_timeout"],
codes["internal_server_error"],
codes["service_unavailable"],
}
STATUS_EXCEPTIONS = {
codes["bad_gateway"]: ServerError,
codes["bad_request"]: BadRequest,
codes["conflict"]: Conflict,
codes["found"]: Redirect,
codes["forbidden"]: authorization_error_class,
codes["gateway_timeout"]: ServerError,
codes["internal_server_error"]: ServerError,
codes["media_type"]: SpecialError,
codes["not_found"]: NotFound,
codes["request_entity_too_large"]: TooLarge,
codes["request_uri_too_large"]: URITooLong,
codes["service_unavailable"]: ServerError,
codes["too_many_requests"]: TooManyRequests,
codes["unauthorized"]: authorization_error_class,
codes["unavailable_for_legal_reasons"]: UnavailableForLegalReasons,
520: ServerError,
522: ServerError,
}
SUCCESS_STATUSES = {codes["accepted"], codes["created"], codes["ok"]}
@staticmethod
def _log_request(data, method, params, url):
log.debug(f"Fetching: {method} {url}")
log.debug(f"Data: {data}")
log.debug(f"Params: {params}")
def __init__(self, authorizer):
if not isinstance(authorizer, BaseAuthorizer):
raise InvalidInvocation(f"invalid Authorizer: {authorizer}")
self._authorizer = authorizer
self._rate_limiter = RateLimiter()
self._retry_strategy_class = FiniteRetryStrategy
def __enter__(self):
return self
def __exit__(self, *_args):
self.close()
def _do_retry(
self,
data,
files,
json,
method,
params,
response,
retry_strategy_state,
saved_exception,
timeout,
url,
):
if saved_exception:
status = repr(saved_exception)
else:
status = response.status_code
log.warning(f"Retrying due to {status} status: {method} {url}")
return self._request_with_retries(
data=data,
files=files,
json=json,
method=method,
params=params,
timeout=timeout,
url=url,
retry_strategy_state=retry_strategy_state.consume_available_retry(),
)
def _make_request(
self,
data,
files,
json,
method,
params,
retry_strategy_state,
timeout,
url,
):
try:
response = self._rate_limiter.call(
self._requestor.request,
self._set_header_callback,
method,
url,
allow_redirects=False,
data=data,
files=files,
json=json,
params=params,
timeout=timeout,
)
log.debug(
f"Response: {response.status_code}"
f" ({response.headers.get('content-length')} bytes)"
)
return response, None
except RequestException as exception:
if (
not retry_strategy_state.should_retry_on_failure()
or not isinstance(
exception.original_exception, self.RETRY_EXCEPTIONS
)
):
raise
return None, exception.original_exception
def _request_with_retries(
self,
data,
files,
json,
method,
params,
timeout,
url,
retry_strategy_state=None,
):
if retry_strategy_state is None:
retry_strategy_state = self._retry_strategy_class()
retry_strategy_state.sleep()
self._log_request(data, method, params, url)
response, saved_exception = self._make_request(
data,
files,
json,
method,
params,
retry_strategy_state,
timeout,
url,
)
do_retry = False
if (
response is not None
and response.status_code == codes["unauthorized"]
):
self._authorizer._clear_access_token()
if hasattr(self._authorizer, "refresh"):
do_retry = True
if retry_strategy_state.should_retry_on_failure() and (
do_retry
or response is None
or response.status_code in self.RETRY_STATUSES
):
return self._do_retry(
data,
files,
json,
method,
params,
response,
retry_strategy_state,
saved_exception,
timeout,
url,
)
elif response.status_code in self.STATUS_EXCEPTIONS:
raise self.STATUS_EXCEPTIONS[response.status_code](response)
elif response.status_code == codes["no_content"]:
return
assert (
response.status_code in self.SUCCESS_STATUSES
), f"Unexpected status code: {response.status_code}"
if response.headers.get("content-length") == "0":
return ""
try:
return response.json()
except ValueError:
raise BadJSON(response)
def _set_header_callback(self):
if not self._authorizer.is_valid() and hasattr(
self._authorizer, "refresh"
):
self._authorizer.refresh()
return {"Authorization": f"bearer {self._authorizer.access_token}"}
@property
def _requestor(self):
return self._authorizer._authenticator._requestor
def close(self):
self._requestor.close()
def request(
self,
method,
path,
data=None,
files=None,
json=None,
params=None,
timeout=TIMEOUT,
):
params = deepcopy(params) or {}
params["raw_json"] = 1
if isinstance(data, dict):
data = deepcopy(data)
data["api_type"] = "json"
data = sorted(data.items())
if isinstance(json, dict):
json = deepcopy(json)
json["api_type"] = "json"
url = urljoin(self._requestor.oauth_url, path)
return self._request_with_retries(
data=data,
files=files,
json=json,
method=method,
params=params,
timeout=timeout,
url=url,
)
def session(authorizer=None):
return Session(authorizer=authorizer)
| true
| true
|
1c40171d2ddf0f16141403f2b22f08acf9f1f5a1
| 14,283
|
py
|
Python
|
behave/runner_util.py
|
DisruptiveLabs/behave
|
04ef02550bdf90fad4e073fe39d1730ee2152d31
|
[
"BSD-2-Clause"
] | null | null | null |
behave/runner_util.py
|
DisruptiveLabs/behave
|
04ef02550bdf90fad4e073fe39d1730ee2152d31
|
[
"BSD-2-Clause"
] | null | null | null |
behave/runner_util.py
|
DisruptiveLabs/behave
|
04ef02550bdf90fad4e073fe39d1730ee2152d31
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Contains utility functions and classes for Runners.
"""
from behave import parser
from behave.model import FileLocation
from bisect import bisect
from six import string_types
import glob
import os.path
import re
import sys
# -----------------------------------------------------------------------------
# EXCEPTIONS:
# -----------------------------------------------------------------------------
class FileNotFoundError(LookupError):
pass
class InvalidFileLocationError(LookupError):
pass
class InvalidFilenameError(ValueError):
pass
# -----------------------------------------------------------------------------
# CLASS: FileLocationParser
# -----------------------------------------------------------------------------
class FileLocationParser:
# -- pylint: disable=W0232
# W0232: 84,0:FileLocationParser: Class has no __init__ method
pattern = re.compile(r"^\s*(?P<filename>.*):(?P<line>\d+)\s*$", re.UNICODE)
@classmethod
def parse(cls, text):
match = cls.pattern.match(text)
if match:
filename = match.group("filename").strip()
line = int(match.group("line"))
return FileLocation(filename, line)
else:
# -- NORMAL PATH/FILENAME:
filename = text.strip()
return FileLocation(filename)
# @classmethod
# def compare(cls, location1, location2):
# loc1 = cls.parse(location1)
# loc2 = cls.parse(location2)
# return cmp(loc1, loc2)
# -----------------------------------------------------------------------------
# CLASSES:
# -----------------------------------------------------------------------------
class FeatureScenarioLocationCollector(object):
"""
Collects FileLocation objects for a feature.
This is used to select a subset of scenarios in a feature that should run.
USE CASE:
behave feature/foo.feature:10
behave @selected_features.txt
behave @rerun_failed_scenarios.txt
With features configuration files, like:
# -- file:rerun_failed_scenarios.txt
feature/foo.feature:10
feature/foo.feature:25
feature/bar.feature
# -- EOF
"""
def __init__(self, feature=None, location=None, filename=None):
if not filename and location:
filename = location.filename
self.feature = feature
self.filename = filename
self.use_all_scenarios = False
self.scenario_lines = set()
self.all_scenarios = set()
self.selected_scenarios = set()
if location:
self.add_location(location)
def clear(self):
self.feature = None
self.filename = None
self.use_all_scenarios = False
self.scenario_lines = set()
self.all_scenarios = set()
self.selected_scenarios = set()
def add_location(self, location):
if not self.filename:
self.filename = location.filename
# if self.feature and False:
# self.filename = self.feature.filename
# -- NORMAL CASE:
assert self.filename == location.filename, \
"%s <=> %s" % (self.filename, location.filename)
if location.line:
self.scenario_lines.add(location.line)
else:
# -- LOCATION WITHOUT LINE NUMBER:
# Selects all scenarios in a feature.
self.use_all_scenarios = True
@staticmethod
def select_scenario_line_for(line, scenario_lines):
"""
Select scenario line for any given line.
ALGORITHM: scenario.line <= line < next_scenario.line
:param line: A line number in the file (as number).
:param scenario_lines: Sorted list of scenario lines.
:return: Scenario.line (first line) for the given line.
"""
if not scenario_lines:
return 0 # -- Select all scenarios.
pos = bisect(scenario_lines, line) - 1
if pos < 0:
pos = 0
return scenario_lines[pos]
def discover_selected_scenarios(self, strict=False):
"""
Discovers selected scenarios based on the provided file locations.
In addition:
* discover all scenarios
* auto-correct BAD LINE-NUMBERS
:param strict: If true, raises exception if file location is invalid.
:return: List of selected scenarios of this feature (as set).
:raises InvalidFileLocationError:
If file location is no exactly correct and strict is true.
"""
assert self.feature
if not self.all_scenarios:
self.all_scenarios = self.feature.walk_scenarios()
# -- STEP: Check if lines are correct.
existing_lines = [scenario.line for scenario in self.all_scenarios]
selected_lines = list(self.scenario_lines)
for line in selected_lines:
new_line = self.select_scenario_line_for(line, existing_lines)
if new_line != line:
# -- AUTO-CORRECT BAD-LINE:
self.scenario_lines.remove(line)
self.scenario_lines.add(new_line)
if strict:
msg = "Scenario location '...:%d' should be: '%s:%d'" % \
(line, self.filename, new_line)
raise InvalidFileLocationError(msg)
# -- STEP: Determine selected scenarios and store them.
scenario_lines = set(self.scenario_lines)
selected_scenarios = set()
for scenario in self.all_scenarios:
if scenario.line in scenario_lines:
selected_scenarios.add(scenario)
scenario_lines.remove(scenario.line)
# -- CHECK ALL ARE RESOLVED:
assert not scenario_lines
return selected_scenarios
def build_feature(self):
"""
Determines which scenarios in the feature are selected and marks the
remaining scenarios as skipped. Scenarios with the following tags
are excluded from skipped-marking:
* @setup
* @teardown
If no file locations are stored, the unmodified feature is returned.
:return: Feature object to use.
"""
use_all_scenarios = not self.scenario_lines or self.use_all_scenarios
if not self.feature or use_all_scenarios:
return self.feature
# -- CASE: Select subset of all scenarios of this feature.
# Mark other scenarios as skipped (except in a few cases).
self.all_scenarios = self.feature.walk_scenarios()
self.selected_scenarios = self.discover_selected_scenarios()
unselected_scenarios = set(self.all_scenarios) - self.selected_scenarios
for scenario in unselected_scenarios:
if "setup" in scenario.tags or "teardown" in scenario.tags:
continue
scenario.mark_skipped()
return self.feature
class FeatureListParser(object):
"""
Read textual file, ala '@features.txt'. This file contains:
* a feature filename or FileLocation on each line
* empty lines (skipped)
* comment lines (skipped)
* wildcards are expanded to select 0..N filenames or directories
Relative path names are evaluated relative to the listfile directory.
A leading '@' (AT) character is removed from the listfile name.
"""
@staticmethod
def parse(text, here=None):
"""
Parse contents of a features list file as text.
:param text: Contents of a features list(file).
:param here: Current working directory to use (optional).
:return: List of FileLocation objects
"""
locations = []
for line in text.splitlines():
filename = line.strip()
if not filename:
continue # SKIP: Over empty line(s).
elif filename.startswith('#'):
continue # SKIP: Over comment line(s).
if here and not os.path.isabs(filename):
filename = os.path.join(here, line)
filename = os.path.normpath(filename)
if glob.has_magic(filename):
# -- WITH WILDCARDS:
for filename2 in glob.iglob(filename):
location = FileLocationParser.parse(filename2)
locations.append(location)
else:
location = FileLocationParser.parse(filename)
locations.append(location)
return locations
@classmethod
def parse_file(cls, filename):
"""
Read textual file, ala '@features.txt'.
:param filename: Name of feature list file.
:return: List of feature file locations.
"""
if filename.startswith('@'):
filename = filename[1:]
if not os.path.isfile(filename):
raise FileNotFoundError(filename)
here = os.path.dirname(filename) or "."
contents = open(filename).read()
return cls.parse(contents, here)
# -----------------------------------------------------------------------------
# FUNCTIONS:
# -----------------------------------------------------------------------------
def parse_features(feature_files, language=None):
"""
Parse feature files and return list of Feature model objects.
Handles:
* feature file names, ala "alice.feature"
* feature file locations, ala: "alice.feature:10"
:param feature_files: List of feature file names to parse.
:param language: Default language to use.
:return: List of feature objects.
"""
scenario_collector = FeatureScenarioLocationCollector()
features = []
for location in feature_files:
if not isinstance(location, FileLocation):
assert isinstance(location, string_types)
location = FileLocation(os.path.normpath(location))
if location.filename == scenario_collector.filename:
scenario_collector.add_location(location)
continue
elif scenario_collector.feature:
# -- ADD CURRENT FEATURE: As collection of scenarios.
current_feature = scenario_collector.build_feature()
features.append(current_feature)
scenario_collector.clear()
# -- NEW FEATURE:
assert isinstance(location, FileLocation)
filename = os.path.abspath(location.filename)
feature = parser.parse_file(filename, language=language)
if feature:
# -- VALID FEATURE:
# SKIP CORNER-CASE: Feature file without any feature(s).
scenario_collector.feature = feature
scenario_collector.add_location(location)
# -- FINALLY:
if scenario_collector.feature:
current_feature = scenario_collector.build_feature()
features.append(current_feature)
return features
def collect_feature_locations(paths, strict=True):
"""
Collect feature file names by processing list of paths (from command line).
A path can be a:
* filename (ending with ".feature")
* location, ala "{filename}:{line_number}"
* features configuration filename, ala "@features.txt"
* directory, to discover and collect all "*.feature" files below.
:param paths: Paths to process.
:return: Feature file locations to use (as list of FileLocations).
"""
locations = []
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
dirnames.sort()
for filename in sorted(filenames):
if filename.endswith(".feature"):
location = FileLocation(os.path.join(dirpath, filename))
locations.append(location)
elif path.startswith('@'):
# -- USE: behave @list_of_features.txt
locations.extend(FeatureListParser.parse_file(path[1:]))
else:
# -- OTHERWISE: Normal filename or location (schema: filename:line)
location = FileLocationParser.parse(path)
if not location.filename.endswith(".feature"):
raise InvalidFilenameError(location.filename)
elif location.exists():
locations.append(location)
elif strict:
raise FileNotFoundError(path)
return locations
def make_undefined_step_snippet(step, language=None):
"""
Helper function to create an undefined-step snippet for a step.
:param step: Step to use (as Step object or step text).
:param language: i18n language, optionally needed for step text parsing.
:return: Undefined-step snippet (as string).
"""
if isinstance(step, string_types):
step_text = step
steps = parser.parse_steps(step_text, language=language)
step = steps[0]
assert step, "ParseError: %s" % step_text
prefix = u""
if sys.version_info[0] == 2:
prefix = u"u"
single_quote = "'"
if single_quote in step.name:
step.name = step.name.replace(single_quote, r"\'")
schema = u"@%s(%s'%s')\ndef step_impl(context):\n assert False\n\n"
snippet = schema % (step.step_type, prefix, step.name)
return snippet
def print_undefined_step_snippets(undefined_steps, stream=None, colored=True):
"""
Print snippets for the undefined steps that were discovered.
:param undefined_steps: List of undefined steps (as list<string>).
:param stream: Output stream to use (default: sys.stderr).
:param colored: Indicates if coloring should be used (default: True)
"""
if not undefined_steps:
return
if not stream:
stream = sys.stderr
msg = u"\nYou can implement step definitions for undefined steps with "
msg += u"these snippets:\n\n"
printed = set()
for step in undefined_steps:
if step in printed:
continue
printed.add(step)
msg += make_undefined_step_snippet(step)
if colored:
# -- OOPS: Unclear if stream supports ANSI coloring.
from behave.formatter.ansi_escapes import escapes
msg = escapes['undefined'] + msg + escapes['reset']
stream.write(msg)
stream.flush()
| 35.796992
| 80
| 0.595743
|
from behave import parser
from behave.model import FileLocation
from bisect import bisect
from six import string_types
import glob
import os.path
import re
import sys
class FileNotFoundError(LookupError):
pass
class InvalidFileLocationError(LookupError):
pass
class InvalidFilenameError(ValueError):
pass
class FileLocationParser:
pattern = re.compile(r"^\s*(?P<filename>.*):(?P<line>\d+)\s*$", re.UNICODE)
@classmethod
def parse(cls, text):
match = cls.pattern.match(text)
if match:
filename = match.group("filename").strip()
line = int(match.group("line"))
return FileLocation(filename, line)
else:
filename = text.strip()
return FileLocation(filename)
class FeatureScenarioLocationCollector(object):
def __init__(self, feature=None, location=None, filename=None):
if not filename and location:
filename = location.filename
self.feature = feature
self.filename = filename
self.use_all_scenarios = False
self.scenario_lines = set()
self.all_scenarios = set()
self.selected_scenarios = set()
if location:
self.add_location(location)
def clear(self):
self.feature = None
self.filename = None
self.use_all_scenarios = False
self.scenario_lines = set()
self.all_scenarios = set()
self.selected_scenarios = set()
def add_location(self, location):
if not self.filename:
self.filename = location.filename
assert self.filename == location.filename, \
"%s <=> %s" % (self.filename, location.filename)
if location.line:
self.scenario_lines.add(location.line)
else:
self.use_all_scenarios = True
@staticmethod
def select_scenario_line_for(line, scenario_lines):
if not scenario_lines:
return 0
pos = bisect(scenario_lines, line) - 1
if pos < 0:
pos = 0
return scenario_lines[pos]
def discover_selected_scenarios(self, strict=False):
assert self.feature
if not self.all_scenarios:
self.all_scenarios = self.feature.walk_scenarios()
existing_lines = [scenario.line for scenario in self.all_scenarios]
selected_lines = list(self.scenario_lines)
for line in selected_lines:
new_line = self.select_scenario_line_for(line, existing_lines)
if new_line != line:
self.scenario_lines.remove(line)
self.scenario_lines.add(new_line)
if strict:
msg = "Scenario location '...:%d' should be: '%s:%d'" % \
(line, self.filename, new_line)
raise InvalidFileLocationError(msg)
scenario_lines = set(self.scenario_lines)
selected_scenarios = set()
for scenario in self.all_scenarios:
if scenario.line in scenario_lines:
selected_scenarios.add(scenario)
scenario_lines.remove(scenario.line)
assert not scenario_lines
return selected_scenarios
def build_feature(self):
use_all_scenarios = not self.scenario_lines or self.use_all_scenarios
if not self.feature or use_all_scenarios:
return self.feature
self.all_scenarios = self.feature.walk_scenarios()
self.selected_scenarios = self.discover_selected_scenarios()
unselected_scenarios = set(self.all_scenarios) - self.selected_scenarios
for scenario in unselected_scenarios:
if "setup" in scenario.tags or "teardown" in scenario.tags:
continue
scenario.mark_skipped()
return self.feature
class FeatureListParser(object):
@staticmethod
def parse(text, here=None):
locations = []
for line in text.splitlines():
filename = line.strip()
if not filename:
continue
elif filename.startswith('#'):
continue
if here and not os.path.isabs(filename):
filename = os.path.join(here, line)
filename = os.path.normpath(filename)
if glob.has_magic(filename):
for filename2 in glob.iglob(filename):
location = FileLocationParser.parse(filename2)
locations.append(location)
else:
location = FileLocationParser.parse(filename)
locations.append(location)
return locations
@classmethod
def parse_file(cls, filename):
if filename.startswith('@'):
filename = filename[1:]
if not os.path.isfile(filename):
raise FileNotFoundError(filename)
here = os.path.dirname(filename) or "."
contents = open(filename).read()
return cls.parse(contents, here)
def parse_features(feature_files, language=None):
scenario_collector = FeatureScenarioLocationCollector()
features = []
for location in feature_files:
if not isinstance(location, FileLocation):
assert isinstance(location, string_types)
location = FileLocation(os.path.normpath(location))
if location.filename == scenario_collector.filename:
scenario_collector.add_location(location)
continue
elif scenario_collector.feature:
current_feature = scenario_collector.build_feature()
features.append(current_feature)
scenario_collector.clear()
assert isinstance(location, FileLocation)
filename = os.path.abspath(location.filename)
feature = parser.parse_file(filename, language=language)
if feature:
scenario_collector.feature = feature
scenario_collector.add_location(location)
if scenario_collector.feature:
current_feature = scenario_collector.build_feature()
features.append(current_feature)
return features
def collect_feature_locations(paths, strict=True):
locations = []
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
dirnames.sort()
for filename in sorted(filenames):
if filename.endswith(".feature"):
location = FileLocation(os.path.join(dirpath, filename))
locations.append(location)
elif path.startswith('@'):
locations.extend(FeatureListParser.parse_file(path[1:]))
else:
location = FileLocationParser.parse(path)
if not location.filename.endswith(".feature"):
raise InvalidFilenameError(location.filename)
elif location.exists():
locations.append(location)
elif strict:
raise FileNotFoundError(path)
return locations
def make_undefined_step_snippet(step, language=None):
if isinstance(step, string_types):
step_text = step
steps = parser.parse_steps(step_text, language=language)
step = steps[0]
assert step, "ParseError: %s" % step_text
prefix = u""
if sys.version_info[0] == 2:
prefix = u"u"
single_quote = "'"
if single_quote in step.name:
step.name = step.name.replace(single_quote, r"\'")
schema = u"@%s(%s'%s')\ndef step_impl(context):\n assert False\n\n"
snippet = schema % (step.step_type, prefix, step.name)
return snippet
def print_undefined_step_snippets(undefined_steps, stream=None, colored=True):
if not undefined_steps:
return
if not stream:
stream = sys.stderr
msg = u"\nYou can implement step definitions for undefined steps with "
msg += u"these snippets:\n\n"
printed = set()
for step in undefined_steps:
if step in printed:
continue
printed.add(step)
msg += make_undefined_step_snippet(step)
if colored:
from behave.formatter.ansi_escapes import escapes
msg = escapes['undefined'] + msg + escapes['reset']
stream.write(msg)
stream.flush()
| true
| true
|
1c40177e30c7a93d9b6a4a223742ba6d3c4047f6
| 18,865
|
py
|
Python
|
venv/lib/python3.6/site-packages/twilio/rest/messaging/v1/session/participant.py
|
fernandoleira/stocktext
|
f755f83ffdaee3b179e21de955854354aced9134
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/twilio/rest/messaging/v1/session/participant.py
|
fernandoleira/stocktext
|
f755f83ffdaee3b179e21de955854354aced9134
|
[
"MIT"
] | 11
|
2019-12-26T17:21:03.000Z
|
2022-03-21T22:17:07.000Z
|
venv/Lib/python3.6/site-packages/twilio/rest/messaging/v1/session/participant.py
|
chinmaya-dev/ttbdonation
|
1ea4cb2c279db86465040b68f1fa48dbb5f7e17c
|
[
"MIT"
] | null | null | null |
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ParticipantList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, session_sid):
"""
Initialize the ParticipantList
:param Version version: Version that contains the resource
:param session_sid: The unique id of the Session for this participant.
:returns: twilio.rest.messaging.v1.session.participant.ParticipantList
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantList
"""
super(ParticipantList, self).__init__(version)
# Path Solution
self._solution = {'session_sid': session_sid, }
self._uri = '/Sessions/{session_sid}/Participants'.format(**self._solution)
def create(self, attributes=values.unset, twilio_address=values.unset,
date_created=values.unset, date_updated=values.unset,
identity=values.unset, user_address=values.unset):
"""
Create a new ParticipantInstance
:param unicode attributes: An optional string metadata field you can use to store any data you wish.
:param unicode twilio_address: The address of the Twilio phone number that the participant is in contact with.
:param datetime date_created: The date that this resource was created.
:param datetime date_updated: The date that this resource was last updated.
:param unicode identity: A unique string identifier for the session participant as Chat User.
:param unicode user_address: The address of the participant's device.
:returns: Newly created ParticipantInstance
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantInstance
"""
data = values.of({
'Identity': identity,
'UserAddress': user_address,
'Attributes': attributes,
'TwilioAddress': twilio_address,
'DateCreated': serialize.iso8601_datetime(date_created),
'DateUpdated': serialize.iso8601_datetime(date_updated),
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ParticipantInstance(self._version, payload, session_sid=self._solution['session_sid'], )
def stream(self, limit=None, page_size=None):
"""
Streams ParticipantInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.messaging.v1.session.participant.ParticipantInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists ParticipantInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.messaging.v1.session.participant.ParticipantInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of ParticipantInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ParticipantInstance
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantPage
"""
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return ParticipantPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ParticipantInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ParticipantInstance
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ParticipantPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a ParticipantContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.messaging.v1.session.participant.ParticipantContext
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantContext
"""
return ParticipantContext(self._version, session_sid=self._solution['session_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a ParticipantContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.messaging.v1.session.participant.ParticipantContext
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantContext
"""
return ParticipantContext(self._version, session_sid=self._solution['session_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Messaging.V1.ParticipantList>'
class ParticipantPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the ParticipantPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param session_sid: The unique id of the Session for this participant.
:returns: twilio.rest.messaging.v1.session.participant.ParticipantPage
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantPage
"""
super(ParticipantPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ParticipantInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.messaging.v1.session.participant.ParticipantInstance
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantInstance
"""
return ParticipantInstance(self._version, payload, session_sid=self._solution['session_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Messaging.V1.ParticipantPage>'
class ParticipantContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, session_sid, sid):
"""
Initialize the ParticipantContext
:param Version version: Version that contains the resource
:param session_sid: The unique id of the Session for this participant.
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.messaging.v1.session.participant.ParticipantContext
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantContext
"""
super(ParticipantContext, self).__init__(version)
# Path Solution
self._solution = {'session_sid': session_sid, 'sid': sid, }
self._uri = '/Sessions/{session_sid}/Participants/{sid}'.format(**self._solution)
def update(self, attributes=values.unset, date_created=values.unset,
date_updated=values.unset):
"""
Update the ParticipantInstance
:param unicode attributes: An optional string metadata field you can use to store any data you wish.
:param datetime date_created: The date that this resource was created.
:param datetime date_updated: The date that this resource was last updated.
:returns: Updated ParticipantInstance
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantInstance
"""
data = values.of({
'Attributes': attributes,
'DateCreated': serialize.iso8601_datetime(date_created),
'DateUpdated': serialize.iso8601_datetime(date_updated),
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ParticipantInstance(
self._version,
payload,
session_sid=self._solution['session_sid'],
sid=self._solution['sid'],
)
def fetch(self):
"""
Fetch a ParticipantInstance
:returns: Fetched ParticipantInstance
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return ParticipantInstance(
self._version,
payload,
session_sid=self._solution['session_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the ParticipantInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Messaging.V1.ParticipantContext {}>'.format(context)
class ParticipantInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
class ParticipantType(object):
CHAT = "chat"
SMS = "sms"
def __init__(self, version, payload, session_sid, sid=None):
"""
Initialize the ParticipantInstance
:returns: twilio.rest.messaging.v1.session.participant.ParticipantInstance
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantInstance
"""
super(ParticipantInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'service_sid': payload['service_sid'],
'messaging_service_sid': payload['messaging_service_sid'],
'session_sid': payload['session_sid'],
'sid': payload['sid'],
'identity': payload['identity'],
'twilio_address': payload['twilio_address'],
'user_address': payload['user_address'],
'attributes': payload['attributes'],
'type': payload['type'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'url': payload['url'],
}
# Context
self._context = None
self._solution = {'session_sid': session_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ParticipantContext for this ParticipantInstance
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantContext
"""
if self._context is None:
self._context = ParticipantContext(
self._version,
session_sid=self._solution['session_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The unique id of the Account responsible for this session.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def service_sid(self):
"""
:returns: The unique id of the Chat Service this session belongs to.
:rtype: unicode
"""
return self._properties['service_sid']
@property
def messaging_service_sid(self):
"""
:returns: The unique id of the SMS Service this session belongs to.
:rtype: unicode
"""
return self._properties['messaging_service_sid']
@property
def session_sid(self):
"""
:returns: The unique id of the Session for this participant.
:rtype: unicode
"""
return self._properties['session_sid']
@property
def sid(self):
"""
:returns: A 34 character string that uniquely identifies this resource.
:rtype: unicode
"""
return self._properties['sid']
@property
def identity(self):
"""
:returns: A unique string identifier for the session participant as Chat User.
:rtype: unicode
"""
return self._properties['identity']
@property
def twilio_address(self):
"""
:returns: The address of the Twilio phone number that the participant is in contact with.
:rtype: unicode
"""
return self._properties['twilio_address']
@property
def user_address(self):
"""
:returns: The address of the participant's device.
:rtype: unicode
"""
return self._properties['user_address']
@property
def attributes(self):
"""
:returns: An optional string metadata field you can use to store any data you wish.
:rtype: unicode
"""
return self._properties['attributes']
@property
def type(self):
"""
:returns: The type of twilio product, participant is a user of.
:rtype: ParticipantInstance.ParticipantType
"""
return self._properties['type']
@property
def date_created(self):
"""
:returns: The date that this resource was created.
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date that this resource was last updated.
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: An absolute URL for this participant.
:rtype: unicode
"""
return self._properties['url']
def update(self, attributes=values.unset, date_created=values.unset,
date_updated=values.unset):
"""
Update the ParticipantInstance
:param unicode attributes: An optional string metadata field you can use to store any data you wish.
:param datetime date_created: The date that this resource was created.
:param datetime date_updated: The date that this resource was last updated.
:returns: Updated ParticipantInstance
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantInstance
"""
return self._proxy.update(
attributes=attributes,
date_created=date_created,
date_updated=date_updated,
)
def fetch(self):
"""
Fetch a ParticipantInstance
:returns: Fetched ParticipantInstance
:rtype: twilio.rest.messaging.v1.session.participant.ParticipantInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the ParticipantInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Messaging.V1.ParticipantInstance {}>'.format(context)
| 35.796964
| 118
| 0.638219
|
from twilio.base import deserialize
from twilio.base import serialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ParticipantList(ListResource):
def __init__(self, version, session_sid):
super(ParticipantList, self).__init__(version)
self._solution = {'session_sid': session_sid, }
self._uri = '/Sessions/{session_sid}/Participants'.format(**self._solution)
def create(self, attributes=values.unset, twilio_address=values.unset,
date_created=values.unset, date_updated=values.unset,
identity=values.unset, user_address=values.unset):
data = values.of({
'Identity': identity,
'UserAddress': user_address,
'Attributes': attributes,
'TwilioAddress': twilio_address,
'DateCreated': serialize.iso8601_datetime(date_created),
'DateUpdated': serialize.iso8601_datetime(date_updated),
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return ParticipantInstance(self._version, payload, session_sid=self._solution['session_sid'], )
def stream(self, limit=None, page_size=None):
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
params = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(
'GET',
self._uri,
params=params,
)
return ParticipantPage(self._version, response, self._solution)
def get_page(self, target_url):
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ParticipantPage(self._version, response, self._solution)
def get(self, sid):
return ParticipantContext(self._version, session_sid=self._solution['session_sid'], sid=sid, )
def __call__(self, sid):
return ParticipantContext(self._version, session_sid=self._solution['session_sid'], sid=sid, )
def __repr__(self):
return '<Twilio.Messaging.V1.ParticipantList>'
class ParticipantPage(Page):
def __init__(self, version, response, solution):
super(ParticipantPage, self).__init__(version, response)
self._solution = solution
def get_instance(self, payload):
return ParticipantInstance(self._version, payload, session_sid=self._solution['session_sid'], )
def __repr__(self):
return '<Twilio.Messaging.V1.ParticipantPage>'
class ParticipantContext(InstanceContext):
def __init__(self, version, session_sid, sid):
super(ParticipantContext, self).__init__(version)
self._solution = {'session_sid': session_sid, 'sid': sid, }
self._uri = '/Sessions/{session_sid}/Participants/{sid}'.format(**self._solution)
def update(self, attributes=values.unset, date_created=values.unset,
date_updated=values.unset):
data = values.of({
'Attributes': attributes,
'DateCreated': serialize.iso8601_datetime(date_created),
'DateUpdated': serialize.iso8601_datetime(date_updated),
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return ParticipantInstance(
self._version,
payload,
session_sid=self._solution['session_sid'],
sid=self._solution['sid'],
)
def fetch(self):
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return ParticipantInstance(
self._version,
payload,
session_sid=self._solution['session_sid'],
sid=self._solution['sid'],
)
def delete(self):
return self._version.delete('delete', self._uri)
def __repr__(self):
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Messaging.V1.ParticipantContext {}>'.format(context)
class ParticipantInstance(InstanceResource):
class ParticipantType(object):
CHAT = "chat"
SMS = "sms"
def __init__(self, version, payload, session_sid, sid=None):
super(ParticipantInstance, self).__init__(version)
self._properties = {
'account_sid': payload['account_sid'],
'service_sid': payload['service_sid'],
'messaging_service_sid': payload['messaging_service_sid'],
'session_sid': payload['session_sid'],
'sid': payload['sid'],
'identity': payload['identity'],
'twilio_address': payload['twilio_address'],
'user_address': payload['user_address'],
'attributes': payload['attributes'],
'type': payload['type'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'url': payload['url'],
}
self._context = None
self._solution = {'session_sid': session_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
if self._context is None:
self._context = ParticipantContext(
self._version,
session_sid=self._solution['session_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
return self._properties['account_sid']
@property
def service_sid(self):
return self._properties['service_sid']
@property
def messaging_service_sid(self):
return self._properties['messaging_service_sid']
@property
def session_sid(self):
return self._properties['session_sid']
@property
def sid(self):
return self._properties['sid']
@property
def identity(self):
return self._properties['identity']
@property
def twilio_address(self):
return self._properties['twilio_address']
@property
def user_address(self):
return self._properties['user_address']
@property
def attributes(self):
return self._properties['attributes']
@property
def type(self):
return self._properties['type']
@property
def date_created(self):
return self._properties['date_created']
@property
def date_updated(self):
return self._properties['date_updated']
@property
def url(self):
return self._properties['url']
def update(self, attributes=values.unset, date_created=values.unset,
date_updated=values.unset):
return self._proxy.update(
attributes=attributes,
date_created=date_created,
date_updated=date_updated,
)
def fetch(self):
return self._proxy.fetch()
def delete(self):
return self._proxy.delete()
def __repr__(self):
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Messaging.V1.ParticipantInstance {}>'.format(context)
| true
| true
|
1c4017ff39b79daa31f30c496461589a2d01b756
| 7,938
|
py
|
Python
|
examples/source_separation/utils/metrics.py
|
popcornell/audio
|
7b6b2d000023e2aa3365b769866c5f375e0d5fda
|
[
"BSD-2-Clause"
] | 1,718
|
2017-05-05T01:15:00.000Z
|
2022-03-31T10:33:51.000Z
|
examples/source_separation/utils/metrics.py
|
popcornell/audio
|
7b6b2d000023e2aa3365b769866c5f375e0d5fda
|
[
"BSD-2-Clause"
] | 1,590
|
2017-05-07T18:38:39.000Z
|
2022-03-31T22:22:10.000Z
|
examples/source_separation/utils/metrics.py
|
popcornell/audio
|
7b6b2d000023e2aa3365b769866c5f375e0d5fda
|
[
"BSD-2-Clause"
] | 464
|
2017-05-05T04:42:43.000Z
|
2022-03-29T20:32:00.000Z
|
import math
from itertools import permutations
from typing import Optional
import torch
def sdr(
estimate: torch.Tensor, reference: torch.Tensor, mask: Optional[torch.Tensor] = None, epsilon: float = 1e-8
) -> torch.Tensor:
"""Computes source-to-distortion ratio.
1. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)
2. compute SNR between adjusted estimate and reference.
Args:
estimate (torch.Tensor): Estimtaed signal.
Shape: [batch, speakers (can be 1), time frame]
reference (torch.Tensor): Reference signal.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: scale-invariant source-to-distortion ratio.
Shape: [batch, speaker]
References:
- Single-channel multi-speaker separation using deep clustering
Y. Isik, J. Le Roux, Z. Chen, S. Watanabe, and J. R. Hershey,
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
Notes:
This function is tested to produce the exact same result as
https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L34-L56
"""
reference_pow = reference.pow(2).mean(axis=2, keepdim=True)
mix_pow = (estimate * reference).mean(axis=2, keepdim=True)
scale = mix_pow / (reference_pow + epsilon)
reference = scale * reference
error = estimate - reference
reference_pow = reference.pow(2)
error_pow = error.pow(2)
if mask is None:
reference_pow = reference_pow.mean(axis=2)
error_pow = error_pow.mean(axis=2)
else:
denom = mask.sum(axis=2)
reference_pow = (mask * reference_pow).sum(axis=2) / denom
error_pow = (mask * error_pow).sum(axis=2) / denom
return 10 * torch.log10(reference_pow) - 10 * torch.log10(error_pow)
class PIT(torch.nn.Module):
"""Applies utterance-level speaker permutation
Computes the maxium possible value of the given utility function
over the permutations of the speakers.
Args:
utility_func (function):
Function that computes the utility (opposite of loss) with signature of
(extimate: torch.Tensor, reference: torch.Tensor) -> torch.Tensor
where input Tensors are shape of [batch, speakers, frame] and
the output Tensor is shape of [batch, speakers].
References:
- Multi-talker Speech Separation with Utterance-level Permutation Invariant Training of
Deep Recurrent Neural Networks
Morten Kolbæk, Dong Yu, Zheng-Hua Tan and Jesper Jensen
https://arxiv.org/abs/1703.06284
"""
def __init__(self, utility_func):
super().__init__()
self.utility_func = utility_func
def forward(
self,
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8,
) -> torch.Tensor:
"""Compute utterance-level PIT Loss
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [bacth, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: Maximum criterion over the speaker permutation.
Shape: [batch, ]
"""
assert estimate.shape == reference.shape
batch_size, num_speakers = reference.shape[:2]
num_permute = math.factorial(num_speakers)
util_mat = torch.zeros(batch_size, num_permute, dtype=estimate.dtype, device=estimate.device)
for i, idx in enumerate(permutations(range(num_speakers))):
util = self.utility_func(estimate, reference[:, idx, :], mask=mask, epsilon=epsilon)
util_mat[:, i] = util.mean(dim=1) # take the average over speaker dimension
return util_mat.max(dim=1).values
_sdr_pit = PIT(utility_func=sdr)
def sdr_pit(
estimate: torch.Tensor, reference: torch.Tensor, mask: Optional[torch.Tensor] = None, epsilon: float = 1e-8
):
"""Computes scale-invariant source-to-distortion ratio.
1. adjust both estimate and reference to have 0-mean
2. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)
3. compute SNR between adjusted estimate and reference.
Args:
estimate (torch.Tensor): Estimtaed signal.
Shape: [batch, speakers (can be 1), time frame]
reference (torch.Tensor): Reference signal.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: scale-invariant source-to-distortion ratio.
Shape: [batch, speaker]
References:
- Single-channel multi-speaker separation using deep clustering
Y. Isik, J. Le Roux, Z. Chen, S. Watanabe, and J. R. Hershey,
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
Notes:
This function is tested to produce the exact same result as the reference implementation,
*when the inputs have 0-mean*
https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L107-L153
"""
return _sdr_pit(estimate, reference, mask, epsilon)
def sdri(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8,
) -> torch.Tensor:
"""Compute the improvement of SDR (SDRi).
This function compute how much SDR is improved if the estimation is changed from
the original mixture signal to the actual estimated source signals. That is,
``SDR(estimate, reference) - SDR(mix, reference)``.
For computing ``SDR(estimate, reference)``, PIT (permutation invariant training) is applied,
so that best combination of sources between the reference signals and the esimate signals
are picked.
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [batch, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.
Shape: [batch, speakers == 1, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: Improved SDR. Shape: [batch, ]
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
"""
sdr_ = sdr_pit(estimate, reference, mask=mask, epsilon=epsilon) # [batch, ]
base_sdr = sdr(mix, reference, mask=mask, epsilon=epsilon) # [batch, speaker]
return sdr_ - base_sdr.mean(dim=1)
| 40.090909
| 116
| 0.660998
|
import math
from itertools import permutations
from typing import Optional
import torch
def sdr(
estimate: torch.Tensor, reference: torch.Tensor, mask: Optional[torch.Tensor] = None, epsilon: float = 1e-8
) -> torch.Tensor:
reference_pow = reference.pow(2).mean(axis=2, keepdim=True)
mix_pow = (estimate * reference).mean(axis=2, keepdim=True)
scale = mix_pow / (reference_pow + epsilon)
reference = scale * reference
error = estimate - reference
reference_pow = reference.pow(2)
error_pow = error.pow(2)
if mask is None:
reference_pow = reference_pow.mean(axis=2)
error_pow = error_pow.mean(axis=2)
else:
denom = mask.sum(axis=2)
reference_pow = (mask * reference_pow).sum(axis=2) / denom
error_pow = (mask * error_pow).sum(axis=2) / denom
return 10 * torch.log10(reference_pow) - 10 * torch.log10(error_pow)
class PIT(torch.nn.Module):
def __init__(self, utility_func):
super().__init__()
self.utility_func = utility_func
def forward(
self,
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8,
) -> torch.Tensor:
assert estimate.shape == reference.shape
batch_size, num_speakers = reference.shape[:2]
num_permute = math.factorial(num_speakers)
util_mat = torch.zeros(batch_size, num_permute, dtype=estimate.dtype, device=estimate.device)
for i, idx in enumerate(permutations(range(num_speakers))):
util = self.utility_func(estimate, reference[:, idx, :], mask=mask, epsilon=epsilon)
util_mat[:, i] = util.mean(dim=1)
return util_mat.max(dim=1).values
_sdr_pit = PIT(utility_func=sdr)
def sdr_pit(
estimate: torch.Tensor, reference: torch.Tensor, mask: Optional[torch.Tensor] = None, epsilon: float = 1e-8
):
return _sdr_pit(estimate, reference, mask, epsilon)
def sdri(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8,
) -> torch.Tensor:
sdr_ = sdr_pit(estimate, reference, mask=mask, epsilon=epsilon)
base_sdr = sdr(mix, reference, mask=mask, epsilon=epsilon)
return sdr_ - base_sdr.mean(dim=1)
| true
| true
|
1c401881165075b2fee60265029483b41229a85b
| 6,234
|
py
|
Python
|
code.py
|
Venkat-77/Dr-VVR-Greyatom_olympic-hero
|
695f93628fa1c69022cf55b7fe9b4bd1b86dfd28
|
[
"MIT"
] | null | null | null |
code.py
|
Venkat-77/Dr-VVR-Greyatom_olympic-hero
|
695f93628fa1c69022cf55b7fe9b4bd1b86dfd28
|
[
"MIT"
] | null | null | null |
code.py
|
Venkat-77/Dr-VVR-Greyatom_olympic-hero
|
695f93628fa1c69022cf55b7fe9b4bd1b86dfd28
|
[
"MIT"
] | null | null | null |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
data.head(10)
#Code starts here
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data['Better_Event'] = None
data['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'], 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] == data['Total_Winter'],'Both',data['Better_Event'])
better_event = data['Better_Event'].value_counts().idxmax()
print(better_event)
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
set1 = []
set2 = []
set3 = []
s1 = []
common = []
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
top_countries = data.loc[:, ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] ]
print(top_countries.head())
top_countries.drop(top_countries.tail(1).index,inplace=True)
def top_ten(df,col):
country_list = []
top_10=df.nlargest(10, col)
#print(top_10)
print("="*50)
country_list = top_10['Country_Name'].values.tolist()
return country_list
top_10_summer = top_ten(top_countries,"Total_Summer")
top_10_winter = top_ten(top_countries,"Total_Winter")
top_10 = top_ten(top_countries,"Total_Medals")
set1 = set(top_10_summer)
set2 = set(top_10_winter)
set3 = set(top_10)
s1 = set1.intersection(set2)
common = list(s1.intersection(set3))
print(common)
# --------------
#Code starts here
import matplotlib.pyplot
path
set1 = []
set2 = []
set3 = []
s1 = []
common = []
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
top_countries = data.loc[:, ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] ]
print(top_countries.head())
top_countries.drop(top_countries.tail(1).index,inplace=True)
def top_ten(df,col):
country_list = []
top_10=df.nlargest(10, col)
#print(top_10)
print("="*50)
country_list = top_10['Country_Name'].values.tolist()
return country_list
top_10_summer = top_ten(top_countries,"Total_Summer")
top_10_winter = top_ten(top_countries,"Total_Winter")
top_10 = top_ten(top_countries,"Total_Medals")
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
plt.figure(figsize=[14,8])
plt.xlabel("Country_Summer")
plt.ylabel("No of Medals")
plt.bar(summer_df['Country_Name'],summer_df['Total_Summer'])
plt.show()
plt.figure(figsize=[14,8])
plt.xlabel("Country_Winter")
plt.ylabel("No of Medals")
plt.bar(summer_df['Country_Name'],winter_df['Total_Winter'])
plt.show()
plt.figure(figsize=[14,8])
plt.xlabel("Country_Summer")
plt.ylabel("No of Medals")
plt.bar(summer_df['Country_Name'],top_df['Total_Medals'])
plt.show()
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
top_countries = data.loc[:, ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] ]
top_countries.drop(top_countries.tail(1).index,inplace=True)
def top_ten(df,col):
country_list = []
top_10=df.nlargest(10, col)
#print(top_10)
print("="*50)
country_list = top_10['Country_Name'].values.tolist()
return country_list
top_10_summer = top_ten(top_countries,"Total_Summer")
top_10_winter = top_ten(top_countries,"Total_Winter")
top_10 = top_ten(top_countries,"Total_Medals")
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
print(summer_df.head())
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio=max(summer_df['Golden_Ratio'])
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio=max(winter_df['Golden_Ratio'])
winter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio=max(top_df['Golden_Ratio'])
top_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
print("="*50)
print(summer_max_ratio)
print(summer_country_gold)
print("="*50)
print(winter_max_ratio)
print(winter_country_gold)
print("="*50)
print(top_max_ratio)
print(top_country_gold)
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
data_1 = data.drop(data.tail(1).index)
data_1 = pd.DataFrame(data_1)
print(data_1.head())
data_1['Total_Points'] = 3*data_1['Gold_Total'] + 2*data_1['Silver_Total'] + data_1['Bronze_Total']
most_points = max(data_1['Total_Points'])
best_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
print(most_points)
print(best_country)
# --------------
#Code starts here
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
best_country = "United States"
best = data.loc[data['Country_Name']=="United States",:]
best = best[['Gold_Total','Silver_Total','Bronze_Total']]
print(best)
best.plot.bar()
plt.xlabel("United States")
plt.ylabel("Medals Tally")
plt.xticks(rotation=45)
plt.show()
| 29.40566
| 105
| 0.703722
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
data.head(10)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data['Better_Event'] = None
data['Better_Event'] = np.where(data['Total_Summer']>data['Total_Winter'], 'Summer', 'Winter')
data['Better_Event'] =np.where(data['Total_Summer'] == data['Total_Winter'],'Both',data['Better_Event'])
better_event = data['Better_Event'].value_counts().idxmax()
print(better_event)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path
set1 = []
set2 = []
set3 = []
s1 = []
common = []
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
top_countries = data.loc[:, ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] ]
print(top_countries.head())
top_countries.drop(top_countries.tail(1).index,inplace=True)
def top_ten(df,col):
country_list = []
top_10=df.nlargest(10, col)
print("="*50)
country_list = top_10['Country_Name'].values.tolist()
return country_list
top_10_summer = top_ten(top_countries,"Total_Summer")
top_10_winter = top_ten(top_countries,"Total_Winter")
top_10 = top_ten(top_countries,"Total_Medals")
set1 = set(top_10_summer)
set2 = set(top_10_winter)
set3 = set(top_10)
s1 = set1.intersection(set2)
common = list(s1.intersection(set3))
print(common)
import matplotlib.pyplot
path
set1 = []
set2 = []
set3 = []
s1 = []
common = []
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
top_countries = data.loc[:, ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] ]
print(top_countries.head())
top_countries.drop(top_countries.tail(1).index,inplace=True)
def top_ten(df,col):
country_list = []
top_10=df.nlargest(10, col)
print("="*50)
country_list = top_10['Country_Name'].values.tolist()
return country_list
top_10_summer = top_ten(top_countries,"Total_Summer")
top_10_winter = top_ten(top_countries,"Total_Winter")
top_10 = top_ten(top_countries,"Total_Medals")
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
plt.figure(figsize=[14,8])
plt.xlabel("Country_Summer")
plt.ylabel("No of Medals")
plt.bar(summer_df['Country_Name'],summer_df['Total_Summer'])
plt.show()
plt.figure(figsize=[14,8])
plt.xlabel("Country_Winter")
plt.ylabel("No of Medals")
plt.bar(summer_df['Country_Name'],winter_df['Total_Winter'])
plt.show()
plt.figure(figsize=[14,8])
plt.xlabel("Country_Summer")
plt.ylabel("No of Medals")
plt.bar(summer_df['Country_Name'],top_df['Total_Medals'])
plt.show()
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
top_countries = data.loc[:, ['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'] ]
top_countries.drop(top_countries.tail(1).index,inplace=True)
def top_ten(df,col):
country_list = []
top_10=df.nlargest(10, col)
print("="*50)
country_list = top_10['Country_Name'].values.tolist()
return country_list
top_10_summer = top_ten(top_countries,"Total_Summer")
top_10_winter = top_ten(top_countries,"Total_Winter")
top_10 = top_ten(top_countries,"Total_Medals")
summer_df = data[data['Country_Name'].isin(top_10_summer)]
winter_df = data[data['Country_Name'].isin(top_10_winter)]
top_df = data[data['Country_Name'].isin(top_10)]
print(summer_df.head())
summer_df['Golden_Ratio']=summer_df['Gold_Summer']/summer_df['Total_Summer']
summer_max_ratio=max(summer_df['Golden_Ratio'])
summer_country_gold=summer_df.loc[summer_df['Golden_Ratio'].idxmax(),'Country_Name']
winter_df['Golden_Ratio']=winter_df['Gold_Winter']/winter_df['Total_Winter']
winter_max_ratio=max(winter_df['Golden_Ratio'])
winter_country_gold=winter_df.loc[winter_df['Golden_Ratio'].idxmax(),'Country_Name']
top_df['Golden_Ratio']=top_df['Gold_Total']/top_df['Total_Medals']
top_max_ratio=max(top_df['Golden_Ratio'])
top_country_gold=top_df.loc[top_df['Golden_Ratio'].idxmax(),'Country_Name']
print("="*50)
print(summer_max_ratio)
print(summer_country_gold)
print("="*50)
print(winter_max_ratio)
print(winter_country_gold)
print("="*50)
print(top_max_ratio)
print(top_country_gold)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
data_1 = data.drop(data.tail(1).index)
data_1 = pd.DataFrame(data_1)
print(data_1.head())
data_1['Total_Points'] = 3*data_1['Gold_Total'] + 2*data_1['Silver_Total'] + data_1['Bronze_Total']
most_points = max(data_1['Total_Points'])
best_country = data_1.loc[data_1['Total_Points'].idxmax(),'Country_Name']
print(most_points)
print(best_country)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
path
data = pd.read_csv(path)
data = pd.DataFrame(data)
data.rename(columns = {'Total':'Total_Medals'}, inplace = True)
best_country = "United States"
best = data.loc[data['Country_Name']=="United States",:]
best = best[['Gold_Total','Silver_Total','Bronze_Total']]
print(best)
best.plot.bar()
plt.xlabel("United States")
plt.ylabel("Medals Tally")
plt.xticks(rotation=45)
plt.show()
| true
| true
|
1c4018ebeed7341777b2f8ab65eb0279eba32088
| 1,005
|
py
|
Python
|
official/modeling/hyperparams/__init__.py
|
davidnugent2425/models
|
4b266855705212c21af762df72783d816596a790
|
[
"Apache-2.0"
] | 1
|
2021-05-06T16:04:17.000Z
|
2021-05-06T16:04:17.000Z
|
official/modeling/hyperparams/__init__.py
|
parthsaxena1909/models
|
440e7851f50cc7a7bcc8f4d7a4d6ae3861f60ade
|
[
"Apache-2.0"
] | null | null | null |
official/modeling/hyperparams/__init__.py
|
parthsaxena1909/models
|
440e7851f50cc7a7bcc8f4d7a4d6ae3861f60ade
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hyperparams package definition."""
# pylint: disable=g-multiple-import
from official.modeling.hyperparams.base_config import *
from official.modeling.hyperparams.config_definitions import CallbacksConfig, RuntimeConfig, TensorboardConfig
from official.modeling.hyperparams.params_dict import *
| 47.857143
| 110
| 0.727363
|
from official.modeling.hyperparams.base_config import *
from official.modeling.hyperparams.config_definitions import CallbacksConfig, RuntimeConfig, TensorboardConfig
from official.modeling.hyperparams.params_dict import *
| true
| true
|
1c4019063e9304e210ed555cf2e82f2ec4f42c1b
| 8,620
|
py
|
Python
|
ytopt/search/async_search.py
|
Kerilk/ytopt
|
05cc166d76dbf2a9ec77f3c9ed435ea3ebcb104c
|
[
"BSD-2-Clause"
] | null | null | null |
ytopt/search/async_search.py
|
Kerilk/ytopt
|
05cc166d76dbf2a9ec77f3c9ed435ea3ebcb104c
|
[
"BSD-2-Clause"
] | null | null | null |
ytopt/search/async_search.py
|
Kerilk/ytopt
|
05cc166d76dbf2a9ec77f3c9ed435ea3ebcb104c
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
#from NeuralNetworksDropoutRegressor import NeuralNetworksDropoutRegressor
from mpi4py import MPI
import re
import os
import sys
import time
import json
import math
from skopt import Optimizer
import os
import argparse
from skopt.acquisition import gaussian_ei, gaussian_pi, gaussian_lcb
import numpy as np
from ytopt.search.NeuralNetworksDropoutRegressor import NeuralNetworksDropoutRegressor
from ytopt.search.search import Search
from ytopt.search.utils import tags, saveResults
seed = 1234
class AsyncSearch(Search):
def __init__(self, **kwargs):
super().__init__(**kwargs)
param_dict = kwargs
self.acq_func = param_dict['acq_func']
self.base_estimator=param_dict['base_estimator']
self.kappa = param_dict['kappa']
self.patience_fac = param_dict['patience_fac']
self.acq_optimizer = param_dict['acq_optimizer']
self.n_initial_points = param_dict['n_initial_points']
@staticmethod
def _extend_parser(parser):
parser.add_argument('--base_estimator', action='store', dest='base_estimator',
nargs='?', type=str, default='RF',
help='which base estimator')
parser.add_argument('--kappa', action='store', dest='kappa',
nargs='?', const=2, type=float, default='0',
help='kappa value')
parser.add_argument('--acq_func', action='store', dest='acq_func',
nargs='?', type=str, default='gp_hedge',
help='which acquisition function')
parser.add_argument('--patience_fac', action='store', dest='patience_fac',
nargs='?', const=2, type=float, default='10',
help='patience_fac for early stopping; search stops when no improvement \
is seen for patience_fac * n evals')
parser.add_argument('--acq_optimizer', action='store', dest='acq_optimizer',
nargs='?', type=str, default='sampling',
help='method to minimize acquisition function sampling or lbfgs')
parser.add_argument('--n_initial_points', action='store', dest='n_initial_points',
nargs='?', const=2, type=float, default='10',
help='number of initial points')
return parser
def main(self):
# Initializations and preliminaries
comm = MPI.COMM_WORLD # get MPI communicator object
size = comm.size # total number of processes
rank = comm.rank # rank of this process
status = MPI.Status() # get MPI status object
comm.Barrier()
start_time = time.time()
# Master process executes code below
if rank == 0:
num_workers = size - 1
closed_workers = 0
space = [self.spaceDict[key] for key in self.params]
print("space: ", space)
eval_counter = 0
parDict = {}
evalDict = {}
resultsList = []
parDict['kappa']=self.kappa
init_x = []
delta = 0.05
#patience = max(100, 3 * num_workers-1)
patience = len(self.params) * self.patience_fac
last_imp = 0
curr_best = math.inf
if self.base_estimator =='NND':
opt = Optimizer(space, base_estimator=NeuralNetworksDropoutRegressor(), acq_optimizer='sampling',
acq_func = self.acq_func, acq_func_kwargs=parDict, random_state=seed)
else:
opt = Optimizer(space,
base_estimator=self.base_estimator,
acq_optimizer=self.acq_optimizer,
acq_func=self.acq_func,
acq_func_kwargs=parDict,
random_state=seed,
n_initial_points=self.n_initial_points)
print('Master starting with {} workers'.format(num_workers))
while closed_workers < num_workers:
data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
source = status.Get_source()
tag = status.Get_tag()
elapsed_time = float(time.time() - start_time)
print('elapsed_time:%1.3f'%elapsed_time)
if tag == tags.READY:
if last_imp < patience and eval_counter < self.max_evals and elapsed_time < self.max_time:
if self.starting_point is not None:
x = self.starting_point
if num_workers-1 > 0:
init_x = opt.ask(n_points=num_workers-1)
self.starting_point = None
else:
if len(init_x) > 0:
x = init_x.pop(0)
else:
x = opt.ask(n_points=1, strategy='cl_min')[0]
key = str(x)
print('sample %s' % key)
if key in evalDict.keys():
print('%s already evalauted' % key)
evalDict[key] = None
task = {}
task['x'] = x
task['eval_counter'] = eval_counter
task['rank_master'] = rank
#task['start_time'] = elapsed_time
print('Sending task {} to worker {}'.format (eval_counter, source))
comm.send(task, dest=source, tag=tags.START)
eval_counter = eval_counter + 1
else:
comm.send(None, dest=source, tag=tags.EXIT)
elif tag == tags.DONE:
result = data
result['end_time'] = elapsed_time
print('Got data from worker {}'.format(source))
resultsList.append(result)
x = result['x']
y = result['cost']
opt.tell(x, y)
percent_improv = -100*((y+0.1) - (curr_best+0.1))/(curr_best+0.1)
if y < curr_best:
if percent_improv >= delta or curr_best==math.inf:
curr_best = y
last_imp = 0
else:
last_imp = last_imp+1
print('curr_best={} percent_improv={} patience={}/{}'.format(curr_best, percent_improv, last_imp, patience))
elif tag == tags.EXIT:
print('Worker {} exited.'.format(source))
closed_workers = closed_workers + 1
resultsList = data
print('Search finished..')
#resultsList = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status) #comm.recv(source=MPI.ANY_SOURCE, tag=tags.EXIT)
#print(resultsList)
saveResults(resultsList, self.results_json_fname, self.results_csv_fname)
y_best = np.min(opt.yi)
best_index = np.where(opt.yi==y_best)[0][0]
x_best = opt.Xi[best_index]
print('Best: x = {}; y={}'.format(y_best, x_best))
else:
# Worker processes execute code below
name = MPI.Get_processor_name()
print("worker with rank %d on %s." % (rank, name))
resultsList = []
while True:
comm.send(None, dest=0, tag=tags.READY)
task = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
tag = status.Get_tag()
if tag == tags.START:
result = self.evaluate(self.problem, task, self.jobs_dir, self.results_dir)
elapsed_time = float(time.time() - start_time)
result['elapsed_time'] = elapsed_time
print(result)
resultsList.append(result)
comm.send(result, dest=0, tag=tags.DONE)
elif tag == tags.EXIT:
print(f'Exit rank={comm.rank}')
break
comm.send(resultsList, dest=0, tag=tags.EXIT)
if __name__ == "__main__":
args = AsyncSearch.parse_args()
search = AsyncSearch(**vars(args))
search.main()
| 46.847826
| 148
| 0.520302
|
from __future__ import print_function
from mpi4py import MPI
import re
import os
import sys
import time
import json
import math
from skopt import Optimizer
import os
import argparse
from skopt.acquisition import gaussian_ei, gaussian_pi, gaussian_lcb
import numpy as np
from ytopt.search.NeuralNetworksDropoutRegressor import NeuralNetworksDropoutRegressor
from ytopt.search.search import Search
from ytopt.search.utils import tags, saveResults
seed = 1234
class AsyncSearch(Search):
def __init__(self, **kwargs):
super().__init__(**kwargs)
param_dict = kwargs
self.acq_func = param_dict['acq_func']
self.base_estimator=param_dict['base_estimator']
self.kappa = param_dict['kappa']
self.patience_fac = param_dict['patience_fac']
self.acq_optimizer = param_dict['acq_optimizer']
self.n_initial_points = param_dict['n_initial_points']
@staticmethod
def _extend_parser(parser):
parser.add_argument('--base_estimator', action='store', dest='base_estimator',
nargs='?', type=str, default='RF',
help='which base estimator')
parser.add_argument('--kappa', action='store', dest='kappa',
nargs='?', const=2, type=float, default='0',
help='kappa value')
parser.add_argument('--acq_func', action='store', dest='acq_func',
nargs='?', type=str, default='gp_hedge',
help='which acquisition function')
parser.add_argument('--patience_fac', action='store', dest='patience_fac',
nargs='?', const=2, type=float, default='10',
help='patience_fac for early stopping; search stops when no improvement \
is seen for patience_fac * n evals')
parser.add_argument('--acq_optimizer', action='store', dest='acq_optimizer',
nargs='?', type=str, default='sampling',
help='method to minimize acquisition function sampling or lbfgs')
parser.add_argument('--n_initial_points', action='store', dest='n_initial_points',
nargs='?', const=2, type=float, default='10',
help='number of initial points')
return parser
def main(self):
comm = MPI.COMM_WORLD
size = comm.size
rank = comm.rank
status = MPI.Status()
comm.Barrier()
start_time = time.time()
if rank == 0:
num_workers = size - 1
closed_workers = 0
space = [self.spaceDict[key] for key in self.params]
print("space: ", space)
eval_counter = 0
parDict = {}
evalDict = {}
resultsList = []
parDict['kappa']=self.kappa
init_x = []
delta = 0.05
patience = len(self.params) * self.patience_fac
last_imp = 0
curr_best = math.inf
if self.base_estimator =='NND':
opt = Optimizer(space, base_estimator=NeuralNetworksDropoutRegressor(), acq_optimizer='sampling',
acq_func = self.acq_func, acq_func_kwargs=parDict, random_state=seed)
else:
opt = Optimizer(space,
base_estimator=self.base_estimator,
acq_optimizer=self.acq_optimizer,
acq_func=self.acq_func,
acq_func_kwargs=parDict,
random_state=seed,
n_initial_points=self.n_initial_points)
print('Master starting with {} workers'.format(num_workers))
while closed_workers < num_workers:
data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
source = status.Get_source()
tag = status.Get_tag()
elapsed_time = float(time.time() - start_time)
print('elapsed_time:%1.3f'%elapsed_time)
if tag == tags.READY:
if last_imp < patience and eval_counter < self.max_evals and elapsed_time < self.max_time:
if self.starting_point is not None:
x = self.starting_point
if num_workers-1 > 0:
init_x = opt.ask(n_points=num_workers-1)
self.starting_point = None
else:
if len(init_x) > 0:
x = init_x.pop(0)
else:
x = opt.ask(n_points=1, strategy='cl_min')[0]
key = str(x)
print('sample %s' % key)
if key in evalDict.keys():
print('%s already evalauted' % key)
evalDict[key] = None
task = {}
task['x'] = x
task['eval_counter'] = eval_counter
task['rank_master'] = rank
print('Sending task {} to worker {}'.format (eval_counter, source))
comm.send(task, dest=source, tag=tags.START)
eval_counter = eval_counter + 1
else:
comm.send(None, dest=source, tag=tags.EXIT)
elif tag == tags.DONE:
result = data
result['end_time'] = elapsed_time
print('Got data from worker {}'.format(source))
resultsList.append(result)
x = result['x']
y = result['cost']
opt.tell(x, y)
percent_improv = -100*((y+0.1) - (curr_best+0.1))/(curr_best+0.1)
if y < curr_best:
if percent_improv >= delta or curr_best==math.inf:
curr_best = y
last_imp = 0
else:
last_imp = last_imp+1
print('curr_best={} percent_improv={} patience={}/{}'.format(curr_best, percent_improv, last_imp, patience))
elif tag == tags.EXIT:
print('Worker {} exited.'.format(source))
closed_workers = closed_workers + 1
resultsList = data
print('Search finished..')
sults(resultsList, self.results_json_fname, self.results_csv_fname)
y_best = np.min(opt.yi)
best_index = np.where(opt.yi==y_best)[0][0]
x_best = opt.Xi[best_index]
print('Best: x = {}; y={}'.format(y_best, x_best))
else:
name = MPI.Get_processor_name()
print("worker with rank %d on %s." % (rank, name))
resultsList = []
while True:
comm.send(None, dest=0, tag=tags.READY)
task = comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
tag = status.Get_tag()
if tag == tags.START:
result = self.evaluate(self.problem, task, self.jobs_dir, self.results_dir)
elapsed_time = float(time.time() - start_time)
result['elapsed_time'] = elapsed_time
print(result)
resultsList.append(result)
comm.send(result, dest=0, tag=tags.DONE)
elif tag == tags.EXIT:
print(f'Exit rank={comm.rank}')
break
comm.send(resultsList, dest=0, tag=tags.EXIT)
if __name__ == "__main__":
args = AsyncSearch.parse_args()
search = AsyncSearch(**vars(args))
search.main()
| true
| true
|
1c40190902964cf6e1c37651233e06b92a761122
| 1,379
|
py
|
Python
|
src/entities/challenge.py
|
koddas/python-oop-consistency-lab
|
8ee3124aa230359d296fdfbe0c23773602769c8c
|
[
"MIT"
] | null | null | null |
src/entities/challenge.py
|
koddas/python-oop-consistency-lab
|
8ee3124aa230359d296fdfbe0c23773602769c8c
|
[
"MIT"
] | null | null | null |
src/entities/challenge.py
|
koddas/python-oop-consistency-lab
|
8ee3124aa230359d296fdfbe0c23773602769c8c
|
[
"MIT"
] | null | null | null |
from entities.token import Token
class Challenge:
'''
Challenge represents a challenge for the participants to solve.
'''
# Please don't fiddle with these variables!
#__question: str = ""
#__response: int = 0
#__token: Token = None
#__counter: int = 0
def __init__(self, token: Token):
self.__question = ""
self.__response = 0
self.__token = token
self.__counter = 0
def set_question(self, question: str) -> None:
'''
Sets the content of the question.
'''
self.__counter += 1
self.__question = question
def get_question(self) -> str:
'''
Returns the question.
'''
self.__counter += 1
return self.__question
def set_response(self, response: int) -> None:
'''
Sets the response to the question.
'''
self.__counter += 1
if type(response) is int:
self.__response = response
def get_response(self) -> int:
'''
Returns the response from this object.
'''
self.__counter += 1
return self.__response + self.__counter
def get_token(self) -> Token:
'''
Returns the token associated with this challenge.
'''
self.__counter += 1
return self.__token
| 25.072727
| 68
| 0.546048
|
from entities.token import Token
class Challenge:
#__question: str = ""
#__response: int = 0
#__token: Token = None
#__counter: int = 0
def __init__(self, token: Token):
self.__question = ""
self.__response = 0
self.__token = token
self.__counter = 0
def set_question(self, question: str) -> None:
self.__counter += 1
self.__question = question
def get_question(self) -> str:
self.__counter += 1
return self.__question
def set_response(self, response: int) -> None:
self.__counter += 1
if type(response) is int:
self.__response = response
def get_response(self) -> int:
self.__counter += 1
return self.__response + self.__counter
def get_token(self) -> Token:
self.__counter += 1
return self.__token
| true
| true
|
1c401a62963e50ff04a74be5b15427dba187edf6
| 20,060
|
py
|
Python
|
nova/virt/vmwareapi/images.py
|
belmiromoreira/nova
|
d03ef34b0b1ed96a2f2bea1f5f01f09436c55125
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/vmwareapi/images.py
|
belmiromoreira/nova
|
d03ef34b0b1ed96a2f2bea1f5f01f09436c55125
|
[
"Apache-2.0"
] | 1
|
2019-01-02T01:30:35.000Z
|
2019-01-02T01:38:02.000Z
|
nova/virt/vmwareapi/images.py
|
jeffrey4l/nova
|
35375133398d862a61334783c1e7a90b95f34cdb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility functions for Image transfer and manipulation.
"""
import os
import tarfile
import tempfile
from lxml import etree
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import units
from oslo_vmware import rw_handles
import six
from nova import exception
from nova.i18n import _, _LE, _LI
from nova import image
from nova.objects import fields
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import io_util
# NOTE(mdbooth): We use use_linked_clone below, but don't have to import it
# because nova.virt.vmwareapi.driver is imported first. In fact, it is not
# possible to import it here, as nova.virt.vmwareapi.driver calls
# CONF.register_opts() after the import chain which imports this module. This
# is not a problem as long as the import order doesn't change.
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
QUEUE_BUFFER_SIZE = 10
class VMwareImage(object):
def __init__(self, image_id,
file_size=0,
os_type=constants.DEFAULT_OS_TYPE,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE,
container_format=constants.CONTAINER_FORMAT_BARE,
file_type=constants.DEFAULT_DISK_FORMAT,
linked_clone=None,
vif_model=constants.DEFAULT_VIF_MODEL):
"""VMwareImage holds values for use in building VMs.
image_id (str): uuid of the image
file_size (int): size of file in bytes
os_type (str): name of guest os (use vSphere names only)
adapter_type (str): name of the adapter's type
disk_type (str): type of disk in thin, thick, etc
container_format (str): container format (bare or ova)
file_type (str): vmdk or iso
linked_clone (bool): use linked clone, or don't
vif_model (str): virtual machine network interface
"""
self.image_id = image_id
self.file_size = file_size
self.os_type = os_type
self.adapter_type = adapter_type
self.container_format = container_format
self.disk_type = disk_type
self.file_type = file_type
# NOTE(vui): This should be removed when we restore the
# descriptor-based validation.
if (self.file_type is not None and
self.file_type not in constants.DISK_FORMATS_ALL):
raise exception.InvalidDiskFormat(disk_format=self.file_type)
if linked_clone is not None:
self.linked_clone = linked_clone
else:
self.linked_clone = CONF.vmware.use_linked_clone
self.vif_model = vif_model
@property
def file_size_in_kb(self):
return self.file_size / units.Ki
@property
def is_sparse(self):
return self.disk_type == constants.DISK_TYPE_SPARSE
@property
def is_iso(self):
return self.file_type == constants.DISK_FORMAT_ISO
@property
def is_ova(self):
return self.container_format == constants.CONTAINER_FORMAT_OVA
@classmethod
def from_image(cls, image_id, image_meta):
"""Returns VMwareImage, the subset of properties the driver uses.
:param image_id - image id of image
:param image_meta - image metadata object we are working with
:return: vmware image object
:rtype: nova.virt.vmwareapi.images.VmwareImage
"""
properties = image_meta.properties
# calculate linked_clone flag, allow image properties to override the
# global property set in the configurations.
image_linked_clone = properties.get('img_linked_clone',
CONF.vmware.use_linked_clone)
# catch any string values that need to be interpreted as boolean values
linked_clone = strutils.bool_from_string(image_linked_clone)
if image_meta.obj_attr_is_set('container_format'):
container_format = image_meta.container_format
else:
container_format = None
props = {
'image_id': image_id,
'linked_clone': linked_clone,
'container_format': container_format
}
if image_meta.obj_attr_is_set('size'):
props['file_size'] = image_meta.size
if image_meta.obj_attr_is_set('disk_format'):
props['file_type'] = image_meta.disk_format
hw_disk_bus = properties.get('hw_disk_bus')
if hw_disk_bus:
mapping = {
fields.SCSIModel.LSILOGIC:
constants.DEFAULT_ADAPTER_TYPE,
fields.SCSIModel.LSISAS1068:
constants.ADAPTER_TYPE_LSILOGICSAS,
fields.SCSIModel.BUSLOGIC:
constants.ADAPTER_TYPE_BUSLOGIC,
fields.SCSIModel.VMPVSCSI:
constants.ADAPTER_TYPE_PARAVIRTUAL,
}
if hw_disk_bus == fields.DiskBus.IDE:
props['adapter_type'] = constants.ADAPTER_TYPE_IDE
elif hw_disk_bus == fields.DiskBus.SCSI:
hw_scsi_model = properties.get('hw_scsi_model')
props['adapter_type'] = mapping.get(hw_scsi_model)
props_map = {
'os_distro': 'os_type',
'hw_disk_type': 'disk_type',
'hw_vif_model': 'vif_model'
}
for k, v in six.iteritems(props_map):
if properties.obj_attr_is_set(k):
props[v] = properties.get(k)
return cls(**props)
def start_transfer(context, read_file_handle, data_size,
write_file_handle=None, image_id=None, image_meta=None):
"""Start the data transfer from the reader to the writer.
Reader writes to the pipe and the writer reads from the pipe. This means
that the total transfer time boils down to the slower of the read/write
and not the addition of the two times.
"""
if not image_meta:
image_meta = {}
# The pipe that acts as an intermediate store of data for reader to write
# to and writer to grab from.
thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
# The read thread. In case of glance it is the instance of the
# GlanceFileRead class. The glance client read returns an iterator
# and this class wraps that iterator to provide datachunks in calls
# to read.
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
# In case of Glance - VMware transfer, we just need a handle to the
# HTTP Connection that is to send transfer data to the VMware datastore.
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
# In case of VMware - Glance transfer, we relinquish VMware HTTP file read
# handle to Glance Client instance, but to be sure of the transfer we need
# to be sure of the status of the image on glance changing to active.
# The GlanceWriteThread handles the same for us.
elif image_id:
write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
image_id, image_meta)
# Start the read and write threads.
read_event = read_thread.start()
write_event = write_thread.start()
try:
# Wait on the read and write events to signal their end
read_event.wait()
write_event.wait()
except Exception as exc:
# In case of any of the reads or writes raising an exception,
# stop the threads so that we un-necessarily don't keep the other one
# waiting.
read_thread.stop()
write_thread.stop()
# Log and raise the exception.
LOG.exception(_LE('Transfer data failed'))
raise exception.NovaException(exc)
finally:
# No matter what, try closing the read and write handles, if it so
# applies.
read_file_handle.close()
if write_file_handle:
write_file_handle.close()
def upload_iso_to_datastore(iso_path, instance, **kwargs):
LOG.debug("Uploading iso %s to datastore", iso_path,
instance=instance)
with open(iso_path, 'r') as iso_file:
write_file_handle = rw_handles.FileWriteHandle(
kwargs.get("host"),
kwargs.get("port"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"),
os.fstat(iso_file.fileno()).st_size)
LOG.debug("Uploading iso of size : %s ",
os.fstat(iso_file.fileno()).st_size)
block_size = 0x10000
data = iso_file.read(block_size)
while len(data) > 0:
write_file_handle.write(data)
data = iso_file.read(block_size)
write_file_handle.close()
LOG.debug("Uploaded iso %s to datastore", iso_path,
instance=instance)
def fetch_image(context, instance, host, port, dc_name, ds_name, file_path,
cookies=None):
"""Download image from the glance image server."""
image_ref = instance.image_ref
LOG.debug("Downloading image file data %(image_ref)s to the "
"data store %(data_store_name)s",
{'image_ref': image_ref,
'data_store_name': ds_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
read_iter = IMAGE_API.download(context, image_ref)
read_file_handle = rw_handles.ImageReadHandle(read_iter)
write_file_handle = rw_handles.FileWriteHandle(
host, port, dc_name, ds_name, cookies, file_path, file_size)
start_transfer(context, read_file_handle, file_size,
write_file_handle=write_file_handle)
LOG.debug("Downloaded image file data %(image_ref)s to "
"%(upload_name)s on the data store "
"%(data_store_name)s",
{'image_ref': image_ref,
'upload_name': 'n/a' if file_path is None else file_path,
'data_store_name': 'n/a' if ds_name is None else ds_name},
instance=instance)
def _build_shadow_vm_config_spec(session, name, size_kb, disk_type, ds_name):
"""Return spec for creating a shadow VM for image disk.
The VM is never meant to be powered on. When used in importing
a disk it governs the directory name created for the VM
and the disk type of the disk image to convert to.
:param name: Name of the backing
:param size_kb: Size in KB of the backing
:param disk_type: VMDK type for the disk
:param ds_name: Datastore name where the disk is to be provisioned
:return: Spec for creation
"""
cf = session.vim.client.factory
controller_device = cf.create('ns0:VirtualLsiLogicController')
controller_device.key = -100
controller_device.busNumber = 0
controller_device.sharedBus = 'noSharing'
controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
controller_spec.operation = 'add'
controller_spec.device = controller_device
disk_device = cf.create('ns0:VirtualDisk')
# for very small disks allocate at least 1KB
disk_device.capacityInKB = max(1, int(size_kb))
disk_device.key = -101
disk_device.unitNumber = 0
disk_device.controllerKey = -100
disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
if disk_type == constants.DISK_TYPE_EAGER_ZEROED_THICK:
disk_device_bkng.eagerlyScrub = True
elif disk_type == constants.DISK_TYPE_THIN:
disk_device_bkng.thinProvisioned = True
disk_device_bkng.fileName = '[%s]' % ds_name
disk_device_bkng.diskMode = 'persistent'
disk_device.backing = disk_device_bkng
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'add'
disk_spec.fileOperation = 'create'
disk_spec.device = disk_device
vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = '[%s]' % ds_name
create_spec = cf.create('ns0:VirtualMachineConfigSpec')
create_spec.name = name
create_spec.guestId = 'otherGuest'
create_spec.numCPUs = 1
create_spec.memoryMB = 128
create_spec.deviceChange = [controller_spec, disk_spec]
create_spec.files = vm_file_info
return create_spec
def _build_import_spec_for_import_vapp(session, vm_name, datastore_name):
vm_create_spec = _build_shadow_vm_config_spec(
session, vm_name, 0, constants.DISK_TYPE_THIN, datastore_name)
client_factory = session.vim.client.factory
vm_import_spec = client_factory.create('ns0:VirtualMachineImportSpec')
vm_import_spec.configSpec = vm_create_spec
return vm_import_spec
def fetch_image_stream_optimized(context, instance, session, vm_name,
ds_name, vm_folder_ref, res_pool_ref):
"""Fetch image from Glance to ESX datastore."""
image_ref = instance.image_ref
LOG.debug("Downloading image file data %(image_ref)s to the ESX "
"as VM named '%(vm_name)s'",
{'image_ref': image_ref, 'vm_name': vm_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
vm_import_spec = _build_import_spec_for_import_vapp(
session, vm_name, ds_name)
read_iter = IMAGE_API.download(context, image_ref)
read_handle = rw_handles.ImageReadHandle(read_iter)
write_handle = rw_handles.VmdkWriteHandle(session,
session._host,
session._port,
res_pool_ref,
vm_folder_ref,
vm_import_spec,
file_size)
start_transfer(context,
read_handle,
file_size,
write_file_handle=write_handle)
imported_vm_ref = write_handle.get_imported_vm()
LOG.info(_LI("Downloaded image file data %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
session._call_method(session.vim, "UnregisterVM", imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"), instance=instance)
def get_vmdk_name_from_ovf(xmlstr):
"""Parse the OVA descriptor to extract the vmdk name."""
ovf = etree.fromstring(xmlstr)
nsovf = "{%s}" % ovf.nsmap["ovf"]
disk = ovf.find("./%sDiskSection/%sDisk" % (nsovf, nsovf))
file_id = disk.get("%sfileRef" % nsovf)
file = ovf.find('./%sReferences/%sFile[@%sid="%s"]' % (nsovf, nsovf,
nsovf, file_id))
vmdk_name = file.get("%shref" % nsovf)
return vmdk_name
def fetch_image_ova(context, instance, session, vm_name, ds_name,
vm_folder_ref, res_pool_ref):
"""Download the OVA image from the glance image server to the
Nova compute node.
"""
image_ref = instance.image_ref
LOG.debug("Downloading OVA image file %(image_ref)s to the ESX "
"as VM named '%(vm_name)s'",
{'image_ref': image_ref, 'vm_name': vm_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
vm_import_spec = _build_import_spec_for_import_vapp(
session, vm_name, ds_name)
read_iter = IMAGE_API.download(context, image_ref)
ova_fd, ova_path = tempfile.mkstemp()
try:
# NOTE(arnaud): Look to eliminate first writing OVA to file system
with os.fdopen(ova_fd, 'w') as fp:
for chunk in read_iter:
fp.write(chunk)
with tarfile.open(ova_path, mode="r") as tar:
vmdk_name = None
for tar_info in tar:
if tar_info and tar_info.name.endswith(".ovf"):
extracted = tar.extractfile(tar_info.name)
xmlstr = extracted.read()
vmdk_name = get_vmdk_name_from_ovf(xmlstr)
elif vmdk_name and tar_info.name.startswith(vmdk_name):
# Actual file name is <vmdk_name>.XXXXXXX
extracted = tar.extractfile(tar_info.name)
write_handle = rw_handles.VmdkWriteHandle(
session,
session._host,
session._port,
res_pool_ref,
vm_folder_ref,
vm_import_spec,
file_size)
start_transfer(context,
extracted,
file_size,
write_file_handle=write_handle)
extracted.close()
LOG.info(_LI("Downloaded OVA image file %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
imported_vm_ref = write_handle.get_imported_vm()
session._call_method(session.vim, "UnregisterVM",
imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"),
instance=instance)
return
raise exception.ImageUnacceptable(
reason=_("Extracting vmdk from OVA failed."),
image_id=image_ref)
finally:
os.unlink(ova_path)
def upload_image_stream_optimized(context, image_id, instance, session,
vm, vmdk_size):
"""Upload the snapshotted vm disk file to Glance image server."""
LOG.debug("Uploading image %s", image_id, instance=instance)
metadata = IMAGE_API.get(context, image_id)
read_handle = rw_handles.VmdkReadHandle(session,
session._host,
session._port,
vm,
None,
vmdk_size)
# Set the image properties. It is important to set the 'size' to 0.
# Otherwise, the image service client will use the VM's disk capacity
# which will not be the image size after upload, since it is converted
# to a stream-optimized sparse disk.
image_metadata = {'disk_format': 'vmdk',
'is_public': metadata['is_public'],
'name': metadata['name'],
'status': 'active',
'container_format': 'bare',
'size': 0,
'properties': {'vmware_image_version': 1,
'vmware_disktype': 'streamOptimized',
'owner_id': instance.project_id}}
# Passing 0 as the file size since data size to be transferred cannot be
# predetermined.
start_transfer(context,
read_handle,
0,
image_id=image_id,
image_meta=image_metadata)
LOG.debug("Uploaded image %s to the Glance image server", image_id,
instance=instance)
| 39.960159
| 79
| 0.625174
|
import os
import tarfile
import tempfile
from lxml import etree
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from oslo_utils import units
from oslo_vmware import rw_handles
import six
from nova import exception
from nova.i18n import _, _LE, _LI
from nova import image
from nova.objects import fields
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import io_util
# because nova.virt.vmwareapi.driver is imported first. In fact, it is not
# possible to import it here, as nova.virt.vmwareapi.driver calls
# CONF.register_opts() after the import chain which imports this module. This
# is not a problem as long as the import order doesn't change.
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
IMAGE_API = image.API()
QUEUE_BUFFER_SIZE = 10
class VMwareImage(object):
def __init__(self, image_id,
file_size=0,
os_type=constants.DEFAULT_OS_TYPE,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE,
container_format=constants.CONTAINER_FORMAT_BARE,
file_type=constants.DEFAULT_DISK_FORMAT,
linked_clone=None,
vif_model=constants.DEFAULT_VIF_MODEL):
self.image_id = image_id
self.file_size = file_size
self.os_type = os_type
self.adapter_type = adapter_type
self.container_format = container_format
self.disk_type = disk_type
self.file_type = file_type
if (self.file_type is not None and
self.file_type not in constants.DISK_FORMATS_ALL):
raise exception.InvalidDiskFormat(disk_format=self.file_type)
if linked_clone is not None:
self.linked_clone = linked_clone
else:
self.linked_clone = CONF.vmware.use_linked_clone
self.vif_model = vif_model
@property
def file_size_in_kb(self):
return self.file_size / units.Ki
@property
def is_sparse(self):
return self.disk_type == constants.DISK_TYPE_SPARSE
@property
def is_iso(self):
return self.file_type == constants.DISK_FORMAT_ISO
@property
def is_ova(self):
return self.container_format == constants.CONTAINER_FORMAT_OVA
@classmethod
def from_image(cls, image_id, image_meta):
properties = image_meta.properties
image_linked_clone = properties.get('img_linked_clone',
CONF.vmware.use_linked_clone)
linked_clone = strutils.bool_from_string(image_linked_clone)
if image_meta.obj_attr_is_set('container_format'):
container_format = image_meta.container_format
else:
container_format = None
props = {
'image_id': image_id,
'linked_clone': linked_clone,
'container_format': container_format
}
if image_meta.obj_attr_is_set('size'):
props['file_size'] = image_meta.size
if image_meta.obj_attr_is_set('disk_format'):
props['file_type'] = image_meta.disk_format
hw_disk_bus = properties.get('hw_disk_bus')
if hw_disk_bus:
mapping = {
fields.SCSIModel.LSILOGIC:
constants.DEFAULT_ADAPTER_TYPE,
fields.SCSIModel.LSISAS1068:
constants.ADAPTER_TYPE_LSILOGICSAS,
fields.SCSIModel.BUSLOGIC:
constants.ADAPTER_TYPE_BUSLOGIC,
fields.SCSIModel.VMPVSCSI:
constants.ADAPTER_TYPE_PARAVIRTUAL,
}
if hw_disk_bus == fields.DiskBus.IDE:
props['adapter_type'] = constants.ADAPTER_TYPE_IDE
elif hw_disk_bus == fields.DiskBus.SCSI:
hw_scsi_model = properties.get('hw_scsi_model')
props['adapter_type'] = mapping.get(hw_scsi_model)
props_map = {
'os_distro': 'os_type',
'hw_disk_type': 'disk_type',
'hw_vif_model': 'vif_model'
}
for k, v in six.iteritems(props_map):
if properties.obj_attr_is_set(k):
props[v] = properties.get(k)
return cls(**props)
def start_transfer(context, read_file_handle, data_size,
write_file_handle=None, image_id=None, image_meta=None):
if not image_meta:
image_meta = {}
thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
if write_file_handle:
write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
elif image_id:
write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe,
image_id, image_meta)
read_event = read_thread.start()
write_event = write_thread.start()
try:
read_event.wait()
write_event.wait()
except Exception as exc:
# waiting.
read_thread.stop()
write_thread.stop()
# Log and raise the exception.
LOG.exception(_LE('Transfer data failed'))
raise exception.NovaException(exc)
finally:
# No matter what, try closing the read and write handles, if it so
# applies.
read_file_handle.close()
if write_file_handle:
write_file_handle.close()
def upload_iso_to_datastore(iso_path, instance, **kwargs):
LOG.debug("Uploading iso %s to datastore", iso_path,
instance=instance)
with open(iso_path, 'r') as iso_file:
write_file_handle = rw_handles.FileWriteHandle(
kwargs.get("host"),
kwargs.get("port"),
kwargs.get("data_center_name"),
kwargs.get("datastore_name"),
kwargs.get("cookies"),
kwargs.get("file_path"),
os.fstat(iso_file.fileno()).st_size)
LOG.debug("Uploading iso of size : %s ",
os.fstat(iso_file.fileno()).st_size)
block_size = 0x10000
data = iso_file.read(block_size)
while len(data) > 0:
write_file_handle.write(data)
data = iso_file.read(block_size)
write_file_handle.close()
LOG.debug("Uploaded iso %s to datastore", iso_path,
instance=instance)
def fetch_image(context, instance, host, port, dc_name, ds_name, file_path,
cookies=None):
image_ref = instance.image_ref
LOG.debug("Downloading image file data %(image_ref)s to the "
"data store %(data_store_name)s",
{'image_ref': image_ref,
'data_store_name': ds_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
read_iter = IMAGE_API.download(context, image_ref)
read_file_handle = rw_handles.ImageReadHandle(read_iter)
write_file_handle = rw_handles.FileWriteHandle(
host, port, dc_name, ds_name, cookies, file_path, file_size)
start_transfer(context, read_file_handle, file_size,
write_file_handle=write_file_handle)
LOG.debug("Downloaded image file data %(image_ref)s to "
"%(upload_name)s on the data store "
"%(data_store_name)s",
{'image_ref': image_ref,
'upload_name': 'n/a' if file_path is None else file_path,
'data_store_name': 'n/a' if ds_name is None else ds_name},
instance=instance)
def _build_shadow_vm_config_spec(session, name, size_kb, disk_type, ds_name):
cf = session.vim.client.factory
controller_device = cf.create('ns0:VirtualLsiLogicController')
controller_device.key = -100
controller_device.busNumber = 0
controller_device.sharedBus = 'noSharing'
controller_spec = cf.create('ns0:VirtualDeviceConfigSpec')
controller_spec.operation = 'add'
controller_spec.device = controller_device
disk_device = cf.create('ns0:VirtualDisk')
# for very small disks allocate at least 1KB
disk_device.capacityInKB = max(1, int(size_kb))
disk_device.key = -101
disk_device.unitNumber = 0
disk_device.controllerKey = -100
disk_device_bkng = cf.create('ns0:VirtualDiskFlatVer2BackingInfo')
if disk_type == constants.DISK_TYPE_EAGER_ZEROED_THICK:
disk_device_bkng.eagerlyScrub = True
elif disk_type == constants.DISK_TYPE_THIN:
disk_device_bkng.thinProvisioned = True
disk_device_bkng.fileName = '[%s]' % ds_name
disk_device_bkng.diskMode = 'persistent'
disk_device.backing = disk_device_bkng
disk_spec = cf.create('ns0:VirtualDeviceConfigSpec')
disk_spec.operation = 'add'
disk_spec.fileOperation = 'create'
disk_spec.device = disk_device
vm_file_info = cf.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = '[%s]' % ds_name
create_spec = cf.create('ns0:VirtualMachineConfigSpec')
create_spec.name = name
create_spec.guestId = 'otherGuest'
create_spec.numCPUs = 1
create_spec.memoryMB = 128
create_spec.deviceChange = [controller_spec, disk_spec]
create_spec.files = vm_file_info
return create_spec
def _build_import_spec_for_import_vapp(session, vm_name, datastore_name):
vm_create_spec = _build_shadow_vm_config_spec(
session, vm_name, 0, constants.DISK_TYPE_THIN, datastore_name)
client_factory = session.vim.client.factory
vm_import_spec = client_factory.create('ns0:VirtualMachineImportSpec')
vm_import_spec.configSpec = vm_create_spec
return vm_import_spec
def fetch_image_stream_optimized(context, instance, session, vm_name,
ds_name, vm_folder_ref, res_pool_ref):
image_ref = instance.image_ref
LOG.debug("Downloading image file data %(image_ref)s to the ESX "
"as VM named '%(vm_name)s'",
{'image_ref': image_ref, 'vm_name': vm_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
vm_import_spec = _build_import_spec_for_import_vapp(
session, vm_name, ds_name)
read_iter = IMAGE_API.download(context, image_ref)
read_handle = rw_handles.ImageReadHandle(read_iter)
write_handle = rw_handles.VmdkWriteHandle(session,
session._host,
session._port,
res_pool_ref,
vm_folder_ref,
vm_import_spec,
file_size)
start_transfer(context,
read_handle,
file_size,
write_file_handle=write_handle)
imported_vm_ref = write_handle.get_imported_vm()
LOG.info(_LI("Downloaded image file data %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
session._call_method(session.vim, "UnregisterVM", imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"), instance=instance)
def get_vmdk_name_from_ovf(xmlstr):
ovf = etree.fromstring(xmlstr)
nsovf = "{%s}" % ovf.nsmap["ovf"]
disk = ovf.find("./%sDiskSection/%sDisk" % (nsovf, nsovf))
file_id = disk.get("%sfileRef" % nsovf)
file = ovf.find('./%sReferences/%sFile[@%sid="%s"]' % (nsovf, nsovf,
nsovf, file_id))
vmdk_name = file.get("%shref" % nsovf)
return vmdk_name
def fetch_image_ova(context, instance, session, vm_name, ds_name,
vm_folder_ref, res_pool_ref):
image_ref = instance.image_ref
LOG.debug("Downloading OVA image file %(image_ref)s to the ESX "
"as VM named '%(vm_name)s'",
{'image_ref': image_ref, 'vm_name': vm_name},
instance=instance)
metadata = IMAGE_API.get(context, image_ref)
file_size = int(metadata['size'])
vm_import_spec = _build_import_spec_for_import_vapp(
session, vm_name, ds_name)
read_iter = IMAGE_API.download(context, image_ref)
ova_fd, ova_path = tempfile.mkstemp()
try:
# NOTE(arnaud): Look to eliminate first writing OVA to file system
with os.fdopen(ova_fd, 'w') as fp:
for chunk in read_iter:
fp.write(chunk)
with tarfile.open(ova_path, mode="r") as tar:
vmdk_name = None
for tar_info in tar:
if tar_info and tar_info.name.endswith(".ovf"):
extracted = tar.extractfile(tar_info.name)
xmlstr = extracted.read()
vmdk_name = get_vmdk_name_from_ovf(xmlstr)
elif vmdk_name and tar_info.name.startswith(vmdk_name):
# Actual file name is <vmdk_name>.XXXXXXX
extracted = tar.extractfile(tar_info.name)
write_handle = rw_handles.VmdkWriteHandle(
session,
session._host,
session._port,
res_pool_ref,
vm_folder_ref,
vm_import_spec,
file_size)
start_transfer(context,
extracted,
file_size,
write_file_handle=write_handle)
extracted.close()
LOG.info(_LI("Downloaded OVA image file %(image_ref)s"),
{'image_ref': instance.image_ref}, instance=instance)
imported_vm_ref = write_handle.get_imported_vm()
session._call_method(session.vim, "UnregisterVM",
imported_vm_ref)
LOG.info(_LI("The imported VM was unregistered"),
instance=instance)
return
raise exception.ImageUnacceptable(
reason=_("Extracting vmdk from OVA failed."),
image_id=image_ref)
finally:
os.unlink(ova_path)
def upload_image_stream_optimized(context, image_id, instance, session,
vm, vmdk_size):
LOG.debug("Uploading image %s", image_id, instance=instance)
metadata = IMAGE_API.get(context, image_id)
read_handle = rw_handles.VmdkReadHandle(session,
session._host,
session._port,
vm,
None,
vmdk_size)
# Set the image properties. It is important to set the 'size' to 0.
# Otherwise, the image service client will use the VM's disk capacity
image_metadata = {'disk_format': 'vmdk',
'is_public': metadata['is_public'],
'name': metadata['name'],
'status': 'active',
'container_format': 'bare',
'size': 0,
'properties': {'vmware_image_version': 1,
'vmware_disktype': 'streamOptimized',
'owner_id': instance.project_id}}
start_transfer(context,
read_handle,
0,
image_id=image_id,
image_meta=image_metadata)
LOG.debug("Uploaded image %s to the Glance image server", image_id,
instance=instance)
| true
| true
|
1c401b0419c86d9f826bfaacb0a8386932017d6d
| 6,167
|
py
|
Python
|
webapp/survey_molecular_similarity/retrieve_user_data.py
|
enricogandini/paper_similarity_prediction
|
ef7762edc8c55ccfcb5c791685eac8ef93f0d554
|
[
"MIT"
] | null | null | null |
webapp/survey_molecular_similarity/retrieve_user_data.py
|
enricogandini/paper_similarity_prediction
|
ef7762edc8c55ccfcb5c791685eac8ef93f0d554
|
[
"MIT"
] | null | null | null |
webapp/survey_molecular_similarity/retrieve_user_data.py
|
enricogandini/paper_similarity_prediction
|
ef7762edc8c55ccfcb5c791685eac8ef93f0d554
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Created on Fri Mar 26 16:35:26 2021
# Copyright © Enrico Gandini <enricogandini93@gmail.com>
#
# Distributed under terms of the MIT License.
"""Retrieve data from database, and save it as CSV files
that can be further analyzed.
Files will be saved in a separate directory each day this script is executed.
So, various versions of the same queries will be present on the disk, and you
can monitor the progress of the survey.
"""
import argparse
import datetime
from os import environ
from pathlib import Path
import subprocess
import pandas as pd
from sqlalchemy import func, case, cast, Integer
from database_utils import MolecularPair, User, Answer
from database_utils import create_db_engine_and_session
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--where",
action="store",
choices=["local", "heroku"],
help=("Whether to initiate tables on a local database"
" or an existing Postgres database on Heroku"),
required=True,
)
args = parser.parse_args()
location_db = args.where
db_objects = {}
#Get Database URL
if location_db == "local":
db_objects["url"] = environ.get("DATABASE_URL") #Use local Database
elif location_db == "heroku":
process_get_db_url = subprocess.run("heroku config:get DATABASE_URL",
capture_output=True,
shell=True,
)
db_objects["url"] = process_get_db_url.stdout.decode()
#SQLAlchemy >= 1.4 has changed Postgres URL,
#but Heroku still has old style Postgres URL.
db_objects["url"] = db_objects["url"].replace("postgres:", "postgresql:")
else:
pass
create_db_engine_and_session(db_objects)
session = db_objects["session"]
dir_results = Path("../results_survey_molecular_similarity")
today = datetime.date.today()
dir_today = Path(dir_results,
f"queried_{location_db}_{today.isoformat()}"
)
dir_today.mkdir(parents=True, exist_ok=True)
print(f"Saving results to: `{dir_today.as_posix()}`")
#Query the database and insert results in DataFrames.
#See this:
#https://stackoverflow.com/questions/29525808/sqlalchemy-orm-conversion-to-pandas-dataframe
#for help on `pd.read_sql` usage with SQLAlchemy.
#All Users.
query_all_users = session.query(User)
df_all_users = pd.read_sql(query_all_users.statement,
session.bind,
index_col="id",
)
df_all_users.to_csv(Path(dir_today, "all_users.csv"))
#All Answers.
query_all_answers = session.query(Answer)
df_all_answers = pd.read_sql(query_all_answers.statement,
session.bind,
index_col="id",
)
df_all_answers.to_csv(Path(dir_today, "all_answers.csv"))
#Define a time interval: only answers in the time interval will be considered
#for further analysis.
start_date = datetime.date(year=2021,
month=4,
day=14,
)
query_time_interval = (session
.query(User.id)
.filter(User.date >= start_date)
)
#Save information about used time interval.
file_ti = Path(dir_today, "used_time_interval.txt")
with open(file_ti, "w") as f:
print(f"Start Date: {start_date}", file=f)
#All Users (in time interval).
query_ti_users = (query_all_users
.filter(User.id.in_(query_time_interval))
)
df_ti_users = pd.read_sql(query_ti_users.statement,
session.bind,
index_col="id",
)
df_ti_users.to_csv(Path(dir_today, "time_interval_users.csv"))
#All Answers (in time interval).
query_ti_answers = (query_all_answers
.filter(Answer.id_user.in_(query_time_interval))
)
df_ti_answers = pd.read_sql(query_ti_answers.statement,
session.bind,
index_col="id",
)
df_ti_answers.to_csv(Path(dir_today, "time_interval_answers.csv"))
#Calculate aggregated properties.
count_answers = func.count(Answer.id)
#Get number of answers that each molecular pair received during the course
#of the whole survey, and fraction of "Yes" answers for each molecular pair.
similar_to_1 = case([(Answer.similar == "Yes", 1.0),
(Answer.similar == "No", 0.0),
])
sum_similar = func.sum(similar_to_1)
frac_similar = sum_similar / count_answers
query_agg = (session
.query(MolecularPair.id,
count_answers.label("n_answers"),
cast(sum_similar, Integer).label("n_similar"),
frac_similar.label("frac_similar"),
)
.outerjoin(Answer)
.filter(Answer.id_user.in_(query_time_interval))
.group_by(MolecularPair.id)
)
df_agg = pd.read_sql(query_agg.statement,
session.bind,
index_col="id",
)
#Close connection to Database.
db_objects["engine"].dispose()
#Read DataFrame of manually chosen pairs
basename_files_divergence = "similarity_divergence_interesting_targets_compounds"
file_chosen = Path(f"manuallyChosen_{basename_files_divergence}.csv")
df_chosen = pd.read_csv(file_chosen,
index_col="id_chosenPair",
)
#Merge DataFrame of manually chosen pairs with DataFrame
#of aggregated answers.
df_merged = pd.merge(left=df_chosen,
right=df_agg,
how="left",
left_index=True,
right_index=True,
)
df_merged.to_csv(Path(dir_today, "aggregated_survey_answers.csv"))
| 33.335135
| 91
| 0.60921
|
import argparse
import datetime
from os import environ
from pathlib import Path
import subprocess
import pandas as pd
from sqlalchemy import func, case, cast, Integer
from database_utils import MolecularPair, User, Answer
from database_utils import create_db_engine_and_session
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--where",
action="store",
choices=["local", "heroku"],
help=("Whether to initiate tables on a local database"
" or an existing Postgres database on Heroku"),
required=True,
)
args = parser.parse_args()
location_db = args.where
db_objects = {}
if location_db == "local":
db_objects["url"] = environ.get("DATABASE_URL")
elif location_db == "heroku":
process_get_db_url = subprocess.run("heroku config:get DATABASE_URL",
capture_output=True,
shell=True,
)
db_objects["url"] = process_get_db_url.stdout.decode()
db_objects["url"] = db_objects["url"].replace("postgres:", "postgresql:")
else:
pass
create_db_engine_and_session(db_objects)
session = db_objects["session"]
dir_results = Path("../results_survey_molecular_similarity")
today = datetime.date.today()
dir_today = Path(dir_results,
f"queried_{location_db}_{today.isoformat()}"
)
dir_today.mkdir(parents=True, exist_ok=True)
print(f"Saving results to: `{dir_today.as_posix()}`")
query_all_users = session.query(User)
df_all_users = pd.read_sql(query_all_users.statement,
session.bind,
index_col="id",
)
df_all_users.to_csv(Path(dir_today, "all_users.csv"))
query_all_answers = session.query(Answer)
df_all_answers = pd.read_sql(query_all_answers.statement,
session.bind,
index_col="id",
)
df_all_answers.to_csv(Path(dir_today, "all_answers.csv"))
start_date = datetime.date(year=2021,
month=4,
day=14,
)
query_time_interval = (session
.query(User.id)
.filter(User.date >= start_date)
)
file_ti = Path(dir_today, "used_time_interval.txt")
with open(file_ti, "w") as f:
print(f"Start Date: {start_date}", file=f)
query_ti_users = (query_all_users
.filter(User.id.in_(query_time_interval))
)
df_ti_users = pd.read_sql(query_ti_users.statement,
session.bind,
index_col="id",
)
df_ti_users.to_csv(Path(dir_today, "time_interval_users.csv"))
query_ti_answers = (query_all_answers
.filter(Answer.id_user.in_(query_time_interval))
)
df_ti_answers = pd.read_sql(query_ti_answers.statement,
session.bind,
index_col="id",
)
df_ti_answers.to_csv(Path(dir_today, "time_interval_answers.csv"))
count_answers = func.count(Answer.id)
similar_to_1 = case([(Answer.similar == "Yes", 1.0),
(Answer.similar == "No", 0.0),
])
sum_similar = func.sum(similar_to_1)
frac_similar = sum_similar / count_answers
query_agg = (session
.query(MolecularPair.id,
count_answers.label("n_answers"),
cast(sum_similar, Integer).label("n_similar"),
frac_similar.label("frac_similar"),
)
.outerjoin(Answer)
.filter(Answer.id_user.in_(query_time_interval))
.group_by(MolecularPair.id)
)
df_agg = pd.read_sql(query_agg.statement,
session.bind,
index_col="id",
)
db_objects["engine"].dispose()
basename_files_divergence = "similarity_divergence_interesting_targets_compounds"
file_chosen = Path(f"manuallyChosen_{basename_files_divergence}.csv")
df_chosen = pd.read_csv(file_chosen,
index_col="id_chosenPair",
)
df_merged = pd.merge(left=df_chosen,
right=df_agg,
how="left",
left_index=True,
right_index=True,
)
df_merged.to_csv(Path(dir_today, "aggregated_survey_answers.csv"))
| true
| true
|
1c401cab3fd9f8fe9bd8a9d3bfb697d74c325a68
| 310
|
py
|
Python
|
other/dingding/dingtalk/api/rest/OapiEduPeriodGetRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiEduPeriodGetRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiEduPeriodGetRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
'''
Created by auto_sdk on 2019.07.09
'''
from dingtalk.api.base import RestApi
class OapiEduPeriodGetRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.period_id = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.edu.period.get'
| 20.666667
| 39
| 0.748387
|
from dingtalk.api.base import RestApi
class OapiEduPeriodGetRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.period_id = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.edu.period.get'
| true
| true
|
1c401cb24281af4a5f86e6e43da587c11b8cb10d
| 11,564
|
py
|
Python
|
rplugin/python3/deoplete/sources/deoplete_go.py
|
khogeland/deoplete-go
|
71f8363b179bc24c2d85185ee72362051d1041e1
|
[
"MIT"
] | null | null | null |
rplugin/python3/deoplete/sources/deoplete_go.py
|
khogeland/deoplete-go
|
71f8363b179bc24c2d85185ee72362051d1041e1
|
[
"MIT"
] | null | null | null |
rplugin/python3/deoplete/sources/deoplete_go.py
|
khogeland/deoplete-go
|
71f8363b179bc24c2d85185ee72362051d1041e1
|
[
"MIT"
] | null | null | null |
import os
import re
import platform
import subprocess
from collections import OrderedDict
from .base import Base
from deoplete.util import charpos2bytepos, expand, getlines, load_external_module
load_external_module(__file__, 'sources/deoplete_go')
from cgo import cgo
from stdlib import stdlib
try:
load_external_module(__file__, '')
from ujson import loads
except ImportError:
from json import loads
known_goos = (
'appengine', 'android', 'darwin', 'dragonfly', 'freebsd', 'linux', 'nacl',
'netbsd', 'openbsd', 'plan9', 'solaris', 'windows')
class Source(Base):
def __init__(self, vim):
super(Source, self).__init__(vim)
self.name = 'go'
self.mark = '[Go]'
self.filetypes = ['go']
self.input_pattern = r'(?:\b[^\W\d]\w*|[\]\)])\.(?:[^\W\d]\w*)?'
self.rank = 500
def on_init(self, context):
vars = context['vars']
self.gocode_binary = \
expand(vars.get('deoplete#sources#go#gocode_binary', ''))
self.package_dot = \
vars.get('deoplete#sources#go#package_dot', False)
self.sort_class = \
vars.get('deoplete#sources#go#sort_class', [])
self.pointer = \
vars.get('deoplete#sources#go#pointer', False)
self.auto_goos = \
vars.get('deoplete#sources#go#auto_goos', False)
self.goos = \
vars.get('deoplete#sources#go#goos', '')
self.goarch = \
vars.get('deoplete#sources#go#goarch', '')
self.sock = \
vars.get('deoplete#sources#go#gocode_sock', '')
self.cgo = \
vars.get('deoplete#sources#go#cgo', False)
self.source_importer = \
vars.get('deoplete#sources#go#source_importer', False)
self.builtin_objects = \
vars.get('deoplete#sources#go#builtin_objects', False)
self.unimported_packages = \
vars.get('deoplete#sources#go#unimported_packages', False)
self.fallback_to_source = \
vars.get('deoplete#sources#go#fallback_to_source', False)
self.loaded_gocode_binary = False
self.complete_pos = re.compile(r'\w*$|(?<=")[./\-\w]*$')
if self.pointer:
self.complete_pos = re.compile(self.complete_pos.pattern + r'|\*$')
self.input_pattern += r'|\*'
if self.cgo:
load_external_module(__file__, 'clang')
import clang.cindex as clang
self.libclang_path = \
vars.get('deoplete#sources#go#cgo#libclang_path', '')
if self.libclang_path == '':
return
self.cgo_options = {
'std':
vars.get('deoplete#sources#go#cgo#std', 'c11'),
'sort_algo':
vars.get('deoplete#sources#cgo#sort_algo', None)
}
if not clang.Config.loaded and \
clang.Config.library_path != self.libclang_path:
clang.Config.set_library_file(self.libclang_path)
clang.Config.set_compatibility_check(False)
# Set 'C.' complete pattern
self.cgo_complete_pattern = re.compile(r'[^\W\d]*C\.')
# Create clang.cindex.Index database
self.index = clang.Index.create(0)
# initialize in-memory cache
self.cgo_cache, self.cgo_inline_source = dict(), None
def get_complete_position(self, context):
m = self.complete_pos.search(context['input'])
return m.start() if m else -1
def gather_candidates(self, context):
# If enabled self.cgo, and matched self.cgo_complete_pattern pattern
if self.cgo and self.cgo_complete_pattern.search(context['input']):
return self.cgo_completion(getlines(self.vim))
bufname = self.vim.current.buffer.name
if not os.path.isfile(bufname):
bufname = self.vim.call('tempname')
result = self.get_complete_result(
context, getlines(self.vim), bufname)
try:
if result[1][0]['class'] == 'PANIC':
self.print_error('gocode panicked')
return []
if self.sort_class:
class_dict = OrderedDict((x, []) for x in self.sort_class)
out = []
sep = ' '
for complete in result[1]:
word = complete['name']
info = complete['type']
_class = complete['class']
abbr = str(word + sep + info).replace(' func', '', 1)
kind = _class
if _class == 'package' and self.package_dot:
word += '.'
if self.pointer and \
str(context['input']
[context['complete_position']:]) == '*':
word = '*' + word
candidates = dict(
word=word, abbr=abbr, kind=kind, info=info, dup=1
)
if not self.sort_class or _class == 'import':
out.append(candidates)
elif _class in class_dict.keys():
class_dict[_class].append(candidates)
if self.sort_class:
for v in class_dict.values():
out += v
return out
except Exception:
return []
def cgo_completion(self, buffer):
# No include header
if cgo.get_inline_source(buffer)[0] == 0:
return
count, inline_source = cgo.get_inline_source(buffer)
# exists 'self.cgo_inline_source', same inline sources and
# already cached cgo complete candidates
if self.cgo_inline_source is not None and \
self.cgo_inline_source == inline_source and \
self.cgo_cache[self.cgo_inline_source]:
# Use in-memory(self.cgo_headers) cacahe
return self.cgo_cache[self.cgo_inline_source]
else:
self.cgo_inline_source = inline_source
# return candidates use libclang-python3
return cgo.complete(
self.index, self.cgo_cache, self.cgo_options, count,
self.cgo_inline_source
)
def get_complete_result(self, context, buffer, bufname):
offset = self.get_cursor_offset(context)
env = os.environ.copy()
env['GOPATH'] = self.vim.eval('$GOPATH')
if self.auto_goos:
name = os.path.basename(os.path.splitext(bufname)[0])
if '_' in name:
for part in name.rsplit('_', 2):
if part in known_goos:
env['GOOS'] = part
break
if 'GOOS' not in env:
for line in buffer:
if line.startswith('package '):
break
elif not line.startswith('// +build'):
continue
directives = [
x.split(',', 1)[0] for x in line[9:].strip().split()
]
if platform.system().lower() not in directives:
for plat in directives:
if plat in known_goos:
env['GOOS'] = plat
break
elif self.goos != '':
env['GOOS'] = self.goos
if 'GOOS' in env and env['GOOS'] != platform.system().lower():
env['CGO_ENABLED'] = '0'
if self.goarch != '':
env['GOARCH'] = self.goarch
gocode = self.find_gocode_binary()
if not gocode:
return []
args = [gocode, '-f=json']
if self.source_importer:
args.append('-source')
if self.builtin_objects:
args.append('-builtin')
if self.unimported_packages:
args.append('-unimported-packages')
if self.fallback_to_source:
args.append('-fallback-to-source')
# basically, '-sock' option for mdempsky/gocode.
# probably meaningless in nsf/gocode that already run the rpc server
if self.sock != '' and self.sock in ['unix', 'tcp', 'none']:
args.append('-sock={}'.format(self.sock))
args += ['autocomplete', bufname, str(offset)]
process = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True,
env=env
)
stdout_data, stderr_data = process.communicate(
'\n'.join(buffer).encode()
)
result = []
try:
result = loads(stdout_data.decode())
except Exception as e:
self.print_error('gocode decode error')
self.print_error(stdout_data.decode())
self.print_error(stderr_data.decode())
return result
def get_cursor_offset(self, context):
line = self.vim.current.window.cursor[0]
column = context['complete_position']
count = self.vim.call('line2byte', line)
if self.vim.current.buffer.options['fileformat'] == 'dos':
# Note: line2byte() counts "\r\n" in DOS format. It must be "\n"
# in gocode.
count -= line - 1
return count + charpos2bytepos(
'utf-8', context['input'][: column], column) - 1
def parse_import_package(self, buffer):
start = 0
packages = []
for line, b in enumerate(buffer):
if re.match(r'^\s*import \w*|^\s*import \(', b):
start = line
continue
elif re.match(r'\)', b):
break
elif line > start:
package_name = re.sub(r'\t|"', '', b)
if str(package_name).find(r'/', 0) > 0:
full_package_name = str(package_name).split('/', -1)
package_name = \
full_package_name[len(full_package_name) - 1]
library = '/'.join(
full_package_name[:len(full_package_name) - 1]
),
packages.append(
dict(
library=library, package=package_name
)
)
else:
packages.append(dict(library='none', package=package_name))
return packages
def find_gocode_binary(self):
if self.gocode_binary != '' and self.loaded_gocode_binary:
return self.gocode_binary
self.loaded_gocode_binary = os.path.isfile(self.gocode_binary)
if self.loaded_gocode_binary:
return self.gocode_binary
elif platform.system().lower() == 'windows':
return self.find_binary_path('gocode.exe')
else:
return self.find_binary_path('gocode')
def find_binary_path(self, path):
def is_exec(bin_path):
return os.path.isfile(bin_path) and os.access(bin_path, os.X_OK)
dirpath, binary = os.path.split(path)
if dirpath:
if is_exec(path):
return path
else:
for p in os.environ["PATH"].split(os.pathsep):
p = p.strip('"')
binary = os.path.join(p, path)
if is_exec(binary):
return binary
return self.print_error(path + ' binary not found')
| 35.472393
| 81
| 0.52992
|
import os
import re
import platform
import subprocess
from collections import OrderedDict
from .base import Base
from deoplete.util import charpos2bytepos, expand, getlines, load_external_module
load_external_module(__file__, 'sources/deoplete_go')
from cgo import cgo
from stdlib import stdlib
try:
load_external_module(__file__, '')
from ujson import loads
except ImportError:
from json import loads
known_goos = (
'appengine', 'android', 'darwin', 'dragonfly', 'freebsd', 'linux', 'nacl',
'netbsd', 'openbsd', 'plan9', 'solaris', 'windows')
class Source(Base):
def __init__(self, vim):
super(Source, self).__init__(vim)
self.name = 'go'
self.mark = '[Go]'
self.filetypes = ['go']
self.input_pattern = r'(?:\b[^\W\d]\w*|[\]\)])\.(?:[^\W\d]\w*)?'
self.rank = 500
def on_init(self, context):
vars = context['vars']
self.gocode_binary = \
expand(vars.get('deoplete#sources#go#gocode_binary', ''))
self.package_dot = \
vars.get('deoplete#sources#go#package_dot', False)
self.sort_class = \
vars.get('deoplete#sources#go#sort_class', [])
self.pointer = \
vars.get('deoplete#sources#go#pointer', False)
self.auto_goos = \
vars.get('deoplete#sources#go#auto_goos', False)
self.goos = \
vars.get('deoplete#sources#go#goos', '')
self.goarch = \
vars.get('deoplete#sources#go#goarch', '')
self.sock = \
vars.get('deoplete#sources#go#gocode_sock', '')
self.cgo = \
vars.get('deoplete#sources#go#cgo', False)
self.source_importer = \
vars.get('deoplete#sources#go#source_importer', False)
self.builtin_objects = \
vars.get('deoplete#sources#go#builtin_objects', False)
self.unimported_packages = \
vars.get('deoplete#sources#go#unimported_packages', False)
self.fallback_to_source = \
vars.get('deoplete#sources#go#fallback_to_source', False)
self.loaded_gocode_binary = False
self.complete_pos = re.compile(r'\w*$|(?<=")[./\-\w]*$')
if self.pointer:
self.complete_pos = re.compile(self.complete_pos.pattern + r'|\*$')
self.input_pattern += r'|\*'
if self.cgo:
load_external_module(__file__, 'clang')
import clang.cindex as clang
self.libclang_path = \
vars.get('deoplete#sources#go#cgo#libclang_path', '')
if self.libclang_path == '':
return
self.cgo_options = {
'std':
vars.get('deoplete#sources#go#cgo#std', 'c11'),
'sort_algo':
vars.get('deoplete#sources#cgo#sort_algo', None)
}
if not clang.Config.loaded and \
clang.Config.library_path != self.libclang_path:
clang.Config.set_library_file(self.libclang_path)
clang.Config.set_compatibility_check(False)
# Set 'C.' complete pattern
self.cgo_complete_pattern = re.compile(r'[^\W\d]*C\.')
# Create clang.cindex.Index database
self.index = clang.Index.create(0)
# initialize in-memory cache
self.cgo_cache, self.cgo_inline_source = dict(), None
def get_complete_position(self, context):
m = self.complete_pos.search(context['input'])
return m.start() if m else -1
def gather_candidates(self, context):
# If enabled self.cgo, and matched self.cgo_complete_pattern pattern
if self.cgo and self.cgo_complete_pattern.search(context['input']):
return self.cgo_completion(getlines(self.vim))
bufname = self.vim.current.buffer.name
if not os.path.isfile(bufname):
bufname = self.vim.call('tempname')
result = self.get_complete_result(
context, getlines(self.vim), bufname)
try:
if result[1][0]['class'] == 'PANIC':
self.print_error('gocode panicked')
return []
if self.sort_class:
class_dict = OrderedDict((x, []) for x in self.sort_class)
out = []
sep = ' '
for complete in result[1]:
word = complete['name']
info = complete['type']
_class = complete['class']
abbr = str(word + sep + info).replace(' func', '', 1)
kind = _class
if _class == 'package' and self.package_dot:
word += '.'
if self.pointer and \
str(context['input']
[context['complete_position']:]) == '*':
word = '*' + word
candidates = dict(
word=word, abbr=abbr, kind=kind, info=info, dup=1
)
if not self.sort_class or _class == 'import':
out.append(candidates)
elif _class in class_dict.keys():
class_dict[_class].append(candidates)
if self.sort_class:
for v in class_dict.values():
out += v
return out
except Exception:
return []
def cgo_completion(self, buffer):
# No include header
if cgo.get_inline_source(buffer)[0] == 0:
return
count, inline_source = cgo.get_inline_source(buffer)
# exists 'self.cgo_inline_source', same inline sources and
# already cached cgo complete candidates
if self.cgo_inline_source is not None and \
self.cgo_inline_source == inline_source and \
self.cgo_cache[self.cgo_inline_source]:
# Use in-memory(self.cgo_headers) cacahe
return self.cgo_cache[self.cgo_inline_source]
else:
self.cgo_inline_source = inline_source
# return candidates use libclang-python3
return cgo.complete(
self.index, self.cgo_cache, self.cgo_options, count,
self.cgo_inline_source
)
def get_complete_result(self, context, buffer, bufname):
offset = self.get_cursor_offset(context)
env = os.environ.copy()
env['GOPATH'] = self.vim.eval('$GOPATH')
if self.auto_goos:
name = os.path.basename(os.path.splitext(bufname)[0])
if '_' in name:
for part in name.rsplit('_', 2):
if part in known_goos:
env['GOOS'] = part
break
if 'GOOS' not in env:
for line in buffer:
if line.startswith('package '):
break
elif not line.startswith('// +build'):
continue
directives = [
x.split(',', 1)[0] for x in line[9:].strip().split()
]
if platform.system().lower() not in directives:
for plat in directives:
if plat in known_goos:
env['GOOS'] = plat
break
elif self.goos != '':
env['GOOS'] = self.goos
if 'GOOS' in env and env['GOOS'] != platform.system().lower():
env['CGO_ENABLED'] = '0'
if self.goarch != '':
env['GOARCH'] = self.goarch
gocode = self.find_gocode_binary()
if not gocode:
return []
args = [gocode, '-f=json']
if self.source_importer:
args.append('-source')
if self.builtin_objects:
args.append('-builtin')
if self.unimported_packages:
args.append('-unimported-packages')
if self.fallback_to_source:
args.append('-fallback-to-source')
# basically, '-sock' option for mdempsky/gocode.
# probably meaningless in nsf/gocode that already run the rpc server
if self.sock != '' and self.sock in ['unix', 'tcp', 'none']:
args.append('-sock={}'.format(self.sock))
args += ['autocomplete', bufname, str(offset)]
process = subprocess.Popen(
args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
start_new_session=True,
env=env
)
stdout_data, stderr_data = process.communicate(
'\n'.join(buffer).encode()
)
result = []
try:
result = loads(stdout_data.decode())
except Exception as e:
self.print_error('gocode decode error')
self.print_error(stdout_data.decode())
self.print_error(stderr_data.decode())
return result
def get_cursor_offset(self, context):
line = self.vim.current.window.cursor[0]
column = context['complete_position']
count = self.vim.call('line2byte', line)
if self.vim.current.buffer.options['fileformat'] == 'dos':
# Note: line2byte() counts "\r\n" in DOS format. It must be "\n"
# in gocode.
count -= line - 1
return count + charpos2bytepos(
'utf-8', context['input'][: column], column) - 1
def parse_import_package(self, buffer):
start = 0
packages = []
for line, b in enumerate(buffer):
if re.match(r'^\s*import \w*|^\s*import \(', b):
start = line
continue
elif re.match(r'\)', b):
break
elif line > start:
package_name = re.sub(r'\t|"', '', b)
if str(package_name).find(r'/', 0) > 0:
full_package_name = str(package_name).split('/', -1)
package_name = \
full_package_name[len(full_package_name) - 1]
library = '/'.join(
full_package_name[:len(full_package_name) - 1]
),
packages.append(
dict(
library=library, package=package_name
)
)
else:
packages.append(dict(library='none', package=package_name))
return packages
def find_gocode_binary(self):
if self.gocode_binary != '' and self.loaded_gocode_binary:
return self.gocode_binary
self.loaded_gocode_binary = os.path.isfile(self.gocode_binary)
if self.loaded_gocode_binary:
return self.gocode_binary
elif platform.system().lower() == 'windows':
return self.find_binary_path('gocode.exe')
else:
return self.find_binary_path('gocode')
def find_binary_path(self, path):
def is_exec(bin_path):
return os.path.isfile(bin_path) and os.access(bin_path, os.X_OK)
dirpath, binary = os.path.split(path)
if dirpath:
if is_exec(path):
return path
else:
for p in os.environ["PATH"].split(os.pathsep):
p = p.strip('"')
binary = os.path.join(p, path)
if is_exec(binary):
return binary
return self.print_error(path + ' binary not found')
| true
| true
|
1c401dd16d84eb14e77c53697579fd3180578310
| 31
|
py
|
Python
|
army_ant/server/__init__.py
|
feup-infolab/army-ant
|
7b33120d5160f73d7a41a05e6336489c917fb75c
|
[
"BSD-3-Clause"
] | 5
|
2018-01-18T14:11:52.000Z
|
2020-10-23T16:02:25.000Z
|
army_ant/server/__init__.py
|
feup-infolab/army-ant
|
7b33120d5160f73d7a41a05e6336489c917fb75c
|
[
"BSD-3-Clause"
] | 10
|
2018-02-02T20:19:36.000Z
|
2020-10-05T08:46:36.000Z
|
army_ant/server/__init__.py
|
feup-infolab/army-ant
|
7b33120d5160f73d7a41a05e6336489c917fb75c
|
[
"BSD-3-Clause"
] | null | null | null |
from .server import * # noqa
| 15.5
| 30
| 0.645161
|
from .server import *
| true
| true
|
1c401ebfa08df6fdf5e1fa623582ec6382f5c9e9
| 2,738
|
py
|
Python
|
pysnmp-with-texts/ADTX-SMI-S2.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/ADTX-SMI-S2.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/ADTX-SMI-S2.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module ADTX-SMI-S2 (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ADTX-SMI-S2
# Produced by pysmi-0.3.4 at Wed May 1 11:15:06 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, MibIdentifier, enterprises, ModuleIdentity, Integer32, Gauge32, TimeTicks, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, ObjectIdentity, IpAddress, Unsigned32, iso, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "MibIdentifier", "enterprises", "ModuleIdentity", "Integer32", "Gauge32", "TimeTicks", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "ObjectIdentity", "IpAddress", "Unsigned32", "iso", "Counter32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
adtx = ModuleIdentity((1, 3, 6, 1, 4, 1, 2653))
adtx.setRevisions(('2003-01-22 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: adtx.setRevisionsDescriptions(('The initial revision of this MIB module.',))
if mibBuilder.loadTexts: adtx.setLastUpdated('200304220000Z')
if mibBuilder.loadTexts: adtx.setOrganization('ADTX (Advanced Technology and Systems Co., Ltd.)')
if mibBuilder.loadTexts: adtx.setContactInfo('Customer Service Officer Postal: Yokohama Business Park East Tower 9F 134 Goudo-cho, Hodogaya-ku, Yokohama-shi, Kanagawa-ken 240-0005 Japan Tel: +81-45-334-0040 E-mail: cso@adtx.com')
if mibBuilder.loadTexts: adtx.setDescription('The Structure of Management Information for the ADTX enterprise.')
adtxReg = MibIdentifier((1, 3, 6, 1, 4, 1, 2653, 1))
adtxGeneric = MibIdentifier((1, 3, 6, 1, 4, 1, 2653, 2))
adtxProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 2653, 3))
adtxExpr = MibIdentifier((1, 3, 6, 1, 4, 1, 2653, 4))
avc = MibIdentifier((1, 3, 6, 1, 4, 1, 2653, 3, 1))
mibBuilder.exportSymbols("ADTX-SMI-S2", adtxExpr=adtxExpr, adtxGeneric=adtxGeneric, adtxReg=adtxReg, adtxProducts=adtxProducts, avc=avc, PYSNMP_MODULE_ID=adtx, adtx=adtx)
| 94.413793
| 505
| 0.762966
|
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, MibIdentifier, enterprises, ModuleIdentity, Integer32, Gauge32, TimeTicks, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, ObjectIdentity, IpAddress, Unsigned32, iso, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "MibIdentifier", "enterprises", "ModuleIdentity", "Integer32", "Gauge32", "TimeTicks", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "ObjectIdentity", "IpAddress", "Unsigned32", "iso", "Counter32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
adtx = ModuleIdentity((1, 3, 6, 1, 4, 1, 2653))
adtx.setRevisions(('2003-01-22 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: adtx.setRevisionsDescriptions(('The initial revision of this MIB module.',))
if mibBuilder.loadTexts: adtx.setLastUpdated('200304220000Z')
if mibBuilder.loadTexts: adtx.setOrganization('ADTX (Advanced Technology and Systems Co., Ltd.)')
if mibBuilder.loadTexts: adtx.setContactInfo('Customer Service Officer Postal: Yokohama Business Park East Tower 9F 134 Goudo-cho, Hodogaya-ku, Yokohama-shi, Kanagawa-ken 240-0005 Japan Tel: +81-45-334-0040 E-mail: cso@adtx.com')
if mibBuilder.loadTexts: adtx.setDescription('The Structure of Management Information for the ADTX enterprise.')
adtxReg = MibIdentifier((1, 3, 6, 1, 4, 1, 2653, 1))
adtxGeneric = MibIdentifier((1, 3, 6, 1, 4, 1, 2653, 2))
adtxProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 2653, 3))
adtxExpr = MibIdentifier((1, 3, 6, 1, 4, 1, 2653, 4))
avc = MibIdentifier((1, 3, 6, 1, 4, 1, 2653, 3, 1))
mibBuilder.exportSymbols("ADTX-SMI-S2", adtxExpr=adtxExpr, adtxGeneric=adtxGeneric, adtxReg=adtxReg, adtxProducts=adtxProducts, avc=avc, PYSNMP_MODULE_ID=adtx, adtx=adtx)
| true
| true
|
1c401f08edba009994f2bcd60453c7d9b0eae9e9
| 3,850
|
py
|
Python
|
validation/test_cluster_install.py
|
afcollins/openshift-toolkit
|
16bf5b054fd5bdfb5018c1f4e06b80470fde716f
|
[
"Apache-2.0"
] | 227
|
2017-05-20T05:33:32.000Z
|
2022-01-17T01:42:36.000Z
|
validation/test_cluster_install.py
|
afcollins/openshift-toolkit
|
16bf5b054fd5bdfb5018c1f4e06b80470fde716f
|
[
"Apache-2.0"
] | 88
|
2017-04-10T20:43:12.000Z
|
2020-07-17T12:15:39.000Z
|
validation/test_cluster_install.py
|
afcollins/openshift-toolkit
|
16bf5b054fd5bdfb5018c1f4e06b80470fde716f
|
[
"Apache-2.0"
] | 163
|
2017-04-07T17:10:11.000Z
|
2021-07-06T17:20:54.000Z
|
from .lib import k8sHelper
import pytest
# Instantiate k8s_helper class from k8s_helper library.
k8s_client = k8sHelper.k8sHelper()
# Master Test Section #
@pytest.mark.master
def test_master_controllers(master_node_count):
assert k8s_client.get_running_pods_by_label(
'kube-system', 'openshift.io/component=controllers') == int(master_node_count), \
"Should have {} master controller pods".format(master_node_count)
@pytest.mark.master
def test_master_api(master_node_count):
assert k8s_client.get_running_pods_by_label(
'kube-system', 'openshift.io/component=api') == int(master_node_count), \
"Should have {} master api pods".format(master_node_count)
@pytest.mark.master
def test_master_etcd(etcd_node_count):
assert k8s_client.get_running_pods_by_label(
'kube-system', 'openshift.io/component=etcd') == int(etcd_node_count), \
"Should have {} etcd pods".format(etcd_node_count)
# Infra Test Section #
@pytest.mark.infra
def test_infra_router(router_node_count):
assert k8s_client.get_running_pods_by_label(
'default', 'deploymentconfig=router') == int(router_node_count), \
"Should have {} router pods".format(router_node_count)
@pytest.mark.infra
def test_infra_registry(registry_pod_count):
assert k8s_client.get_running_pods_by_label(
'default', 'deploymentconfig=docker-registry') == int(registry_pod_count), \
"Should have {} registry pods".format(registry_pod_count)
# Logging Test Section #
@pytest.mark.logging
def test_logging_fluentd():
assert k8s_client.get_running_pods_by_label(
'openshift-logging', 'component=fluentd') == k8s_client.get_node_count(), \
"Should have one fluentd pod for every node in the cluster"
@pytest.mark.logging
def test_logging_elasticsearch(es_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-logging', 'component=es') >= int(es_pod_count), \
"Should have {} Elasticsearch pod in the cluster".format(es_pod_count)
@pytest.mark.logging
def test_logging_kibana(kibana_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-logging', 'component=kibana') == int(kibana_pod_count), \
"Should have {} Kibana pod in the cluster".format(kibana_pod_count)
# Monitoring Test Section #
@pytest.mark.monitoring
def test_monitoring_node_exporter():
assert k8s_client.get_running_pods_by_label(
'openshift-monitoring', 'app=node-exporter') == k8s_client.get_node_count(), \
"Should have {} fluentd pod in the cluster as total number of node is {}.".format(k8s_client.get_node_count())
@pytest.mark.monitoring
def test_monitoring_prometheus(prom_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-monitoring', 'app=prometheus') == int(prom_pod_count), \
"Should have {} prometheus pod in the cluster".format(prom_pod_count)
@pytest.mark.monitoring
def test_monitoring_alertmanager(alertmanager_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-monitoring', 'app=alertmanager') == int(alertmanager_pod_count), \
"Should have {} alertmanager pod in the cluster".format(alertmanager_pod_count)
@pytest.mark.monitoring
def test_monitoring_grafana(grafana_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-monitoring', 'app=grafana') == int(grafana_pod_count), \
"Should have {} grafana pod in the cluster".format(grafana_pod_count)
@pytest.mark.monitoring
def test_monitoring_kube_state_metrics(kube_state_metrics_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-monitoring', 'app=kube-state-metrics') == int(kube_state_metrics_pod_count), \
"Should have {} kube-state-metrics pod in the cluster".format(kube_state_metrics_pod_count)
| 38.118812
| 118
| 0.750909
|
from .lib import k8sHelper
import pytest
k8s_client = k8sHelper.k8sHelper()
@pytest.mark.master
def test_master_controllers(master_node_count):
assert k8s_client.get_running_pods_by_label(
'kube-system', 'openshift.io/component=controllers') == int(master_node_count), \
"Should have {} master controller pods".format(master_node_count)
@pytest.mark.master
def test_master_api(master_node_count):
assert k8s_client.get_running_pods_by_label(
'kube-system', 'openshift.io/component=api') == int(master_node_count), \
"Should have {} master api pods".format(master_node_count)
@pytest.mark.master
def test_master_etcd(etcd_node_count):
assert k8s_client.get_running_pods_by_label(
'kube-system', 'openshift.io/component=etcd') == int(etcd_node_count), \
"Should have {} etcd pods".format(etcd_node_count)
@pytest.mark.infra
def test_infra_router(router_node_count):
assert k8s_client.get_running_pods_by_label(
'default', 'deploymentconfig=router') == int(router_node_count), \
"Should have {} router pods".format(router_node_count)
@pytest.mark.infra
def test_infra_registry(registry_pod_count):
assert k8s_client.get_running_pods_by_label(
'default', 'deploymentconfig=docker-registry') == int(registry_pod_count), \
"Should have {} registry pods".format(registry_pod_count)
@pytest.mark.logging
def test_logging_fluentd():
assert k8s_client.get_running_pods_by_label(
'openshift-logging', 'component=fluentd') == k8s_client.get_node_count(), \
"Should have one fluentd pod for every node in the cluster"
@pytest.mark.logging
def test_logging_elasticsearch(es_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-logging', 'component=es') >= int(es_pod_count), \
"Should have {} Elasticsearch pod in the cluster".format(es_pod_count)
@pytest.mark.logging
def test_logging_kibana(kibana_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-logging', 'component=kibana') == int(kibana_pod_count), \
"Should have {} Kibana pod in the cluster".format(kibana_pod_count)
@pytest.mark.monitoring
def test_monitoring_node_exporter():
assert k8s_client.get_running_pods_by_label(
'openshift-monitoring', 'app=node-exporter') == k8s_client.get_node_count(), \
"Should have {} fluentd pod in the cluster as total number of node is {}.".format(k8s_client.get_node_count())
@pytest.mark.monitoring
def test_monitoring_prometheus(prom_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-monitoring', 'app=prometheus') == int(prom_pod_count), \
"Should have {} prometheus pod in the cluster".format(prom_pod_count)
@pytest.mark.monitoring
def test_monitoring_alertmanager(alertmanager_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-monitoring', 'app=alertmanager') == int(alertmanager_pod_count), \
"Should have {} alertmanager pod in the cluster".format(alertmanager_pod_count)
@pytest.mark.monitoring
def test_monitoring_grafana(grafana_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-monitoring', 'app=grafana') == int(grafana_pod_count), \
"Should have {} grafana pod in the cluster".format(grafana_pod_count)
@pytest.mark.monitoring
def test_monitoring_kube_state_metrics(kube_state_metrics_pod_count):
assert k8s_client.get_running_pods_by_label(
'openshift-monitoring', 'app=kube-state-metrics') == int(kube_state_metrics_pod_count), \
"Should have {} kube-state-metrics pod in the cluster".format(kube_state_metrics_pod_count)
| true
| true
|
1c401fc39fc8f8e717d1bc2feb82071a4adfba43
| 13,328
|
py
|
Python
|
airflow/providers/google/cloud/operators/bigquery_dts.py
|
ncolomer/airflow
|
cb7c67dea9cd9b9c5de10e355b63039446003149
|
[
"Apache-2.0"
] | 2
|
2021-07-30T17:25:56.000Z
|
2021-08-03T13:51:09.000Z
|
airflow/providers/google/cloud/operators/bigquery_dts.py
|
ncolomer/airflow
|
cb7c67dea9cd9b9c5de10e355b63039446003149
|
[
"Apache-2.0"
] | null | null | null |
airflow/providers/google/cloud/operators/bigquery_dts.py
|
ncolomer/airflow
|
cb7c67dea9cd9b9c5de10e355b63039446003149
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google BigQuery Data Transfer Service operators.
"""
from typing import Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.protobuf.json_format import MessageToDict
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery_dts import BiqQueryDataTransferServiceHook, get_object_id
from airflow.utils.decorators import apply_defaults
class BigQueryCreateDataTransferOperator(BaseOperator):
"""
Creates a new data transfer configuration.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryCreateDataTransferOperator`
:param transfer_config: Data transfer configuration to create.
:type transfer_config: dict
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection
is used.
:type project_id: str
:param authorization_code: authorization code to use with this transfer configuration.
This is required if new credentials are needed.
:type authorization_code: Optional[str]
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
"transfer_config",
"project_id",
"authorization_code",
"gcp_conn_id",
"impersonation_chain",
)
@apply_defaults
def __init__(
self,
*,
transfer_config: dict,
project_id: Optional[str] = None,
authorization_code: Optional[str] = None,
retry: Retry = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id="google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.transfer_config = transfer_config
self.authorization_code = authorization_code
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = BiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Creating DTS transfer config")
response = hook.create_transfer_config(
project_id=self.project_id,
transfer_config=self.transfer_config,
authorization_code=self.authorization_code,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = MessageToDict(response)
self.log.info("Created DTS transfer config %s", get_object_id(result))
self.xcom_push(context, key="transfer_config_id", value=get_object_id(result))
return result
class BigQueryDeleteDataTransferConfigOperator(BaseOperator):
"""
Deletes transfer configuration.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryDeleteDataTransferConfigOperator`
:param transfer_config_id: Id of transfer config to be used.
:type transfer_config_id: str
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
"transfer_config_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
@apply_defaults
def __init__(
self,
*,
transfer_config_id: str,
project_id: Optional[str] = None,
retry: Retry = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id="google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.transfer_config_id = transfer_config_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
hook = BiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.delete_transfer_config(
transfer_config_id=self.transfer_config_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class BigQueryDataTransferServiceStartTransferRunsOperator(BaseOperator):
"""
Start manual transfer runs to be executed now with schedule_time equal
to current time. The transfer runs can be created for a time range where
the run_time is between start_time (inclusive) and end_time
(exclusive), or for a specific run_time.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BigQueryDataTransferServiceStartTransferRunsOperator`
:param transfer_config_id: Id of transfer config to be used.
:type transfer_config_id: str
:param requested_time_range: Time range for the transfer runs that should be started.
If a dict is provided, it must be of the same form as the protobuf
message `~google.cloud.bigquery_datatransfer_v1.types.TimeRange`
:type requested_time_range: Union[dict, ~google.cloud.bigquery_datatransfer_v1.types.TimeRange]
:param requested_run_time: Specific run_time for a transfer run to be started. The
requested_run_time must not be in the future. If a dict is provided, it
must be of the same form as the protobuf message
`~google.cloud.bigquery_datatransfer_v1.types.Timestamp`
:type requested_run_time: Union[dict, ~google.cloud.bigquery_datatransfer_v1.types.Timestamp]
:param project_id: The BigQuery project id where the transfer configuration should be
created. If set to None or missing, the default project_id from the Google Cloud connection is used.
:type project_id: str
:param retry: A retry object used to retry requests. If `None` is
specified, requests will not be retried.
:type retry: Optional[google.api_core.retry.Retry]
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each individual
attempt.
:type timeout: Optional[float]
:param metadata: Additional metadata that is provided to the method.
:type metadata: Optional[Sequence[Tuple[str, str]]]
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields = (
"transfer_config_id",
"project_id",
"requested_time_range",
"requested_run_time",
"gcp_conn_id",
"impersonation_chain",
)
@apply_defaults
def __init__(
self,
*,
transfer_config_id: str,
project_id: Optional[str] = None,
requested_time_range: Optional[dict] = None,
requested_run_time: Optional[dict] = None,
retry: Retry = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id="google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.transfer_config_id = transfer_config_id
self.requested_time_range = requested_time_range
self.requested_run_time = requested_run_time
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = BiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info('Submitting manual transfer for %s', self.transfer_config_id)
response = hook.start_manual_transfer_runs(
transfer_config_id=self.transfer_config_id,
requested_time_range=self.requested_time_range,
requested_run_time=self.requested_run_time,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = MessageToDict(response)
run_id = None
if 'runs' in result:
run_id = get_object_id(result['runs'][0])
self.xcom_push(context, key="run_id", value=run_id)
self.log.info('Transfer run %s submitted successfully.', run_id)
return result
| 44.27907
| 108
| 0.698454
|
from typing import Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.protobuf.json_format import MessageToDict
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery_dts import BiqQueryDataTransferServiceHook, get_object_id
from airflow.utils.decorators import apply_defaults
class BigQueryCreateDataTransferOperator(BaseOperator):
template_fields = (
"transfer_config",
"project_id",
"authorization_code",
"gcp_conn_id",
"impersonation_chain",
)
@apply_defaults
def __init__(
self,
*,
transfer_config: dict,
project_id: Optional[str] = None,
authorization_code: Optional[str] = None,
retry: Retry = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id="google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.transfer_config = transfer_config
self.authorization_code = authorization_code
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = BiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info("Creating DTS transfer config")
response = hook.create_transfer_config(
project_id=self.project_id,
transfer_config=self.transfer_config,
authorization_code=self.authorization_code,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = MessageToDict(response)
self.log.info("Created DTS transfer config %s", get_object_id(result))
self.xcom_push(context, key="transfer_config_id", value=get_object_id(result))
return result
class BigQueryDeleteDataTransferConfigOperator(BaseOperator):
template_fields = (
"transfer_config_id",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
@apply_defaults
def __init__(
self,
*,
transfer_config_id: str,
project_id: Optional[str] = None,
retry: Retry = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id="google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.transfer_config_id = transfer_config_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context) -> None:
hook = BiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
hook.delete_transfer_config(
transfer_config_id=self.transfer_config_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
class BigQueryDataTransferServiceStartTransferRunsOperator(BaseOperator):
template_fields = (
"transfer_config_id",
"project_id",
"requested_time_range",
"requested_run_time",
"gcp_conn_id",
"impersonation_chain",
)
@apply_defaults
def __init__(
self,
*,
transfer_config_id: str,
project_id: Optional[str] = None,
requested_time_range: Optional[dict] = None,
requested_run_time: Optional[dict] = None,
retry: Retry = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
gcp_conn_id="google_cloud_default",
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.transfer_config_id = transfer_config_id
self.requested_time_range = requested_time_range
self.requested_run_time = requested_run_time
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context):
hook = BiqQueryDataTransferServiceHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
self.log.info('Submitting manual transfer for %s', self.transfer_config_id)
response = hook.start_manual_transfer_runs(
transfer_config_id=self.transfer_config_id,
requested_time_range=self.requested_time_range,
requested_run_time=self.requested_run_time,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = MessageToDict(response)
run_id = None
if 'runs' in result:
run_id = get_object_id(result['runs'][0])
self.xcom_push(context, key="run_id", value=run_id)
self.log.info('Transfer run %s submitted successfully.', run_id)
return result
| true
| true
|
1c40202510961b7ae320ca6faf2d5bab4a73c076
| 20,855
|
py
|
Python
|
src/residual_anomaly_detector/exps/StarAiFlow.py
|
eliavw/residual-anomaly-detector
|
8840a56aa226120456d0af8e6cca927a7e0e712b
|
[
"MIT"
] | null | null | null |
src/residual_anomaly_detector/exps/StarAiFlow.py
|
eliavw/residual-anomaly-detector
|
8840a56aa226120456d0af8e6cca927a7e0e712b
|
[
"MIT"
] | null | null | null |
src/residual_anomaly_detector/exps/StarAiFlow.py
|
eliavw/residual-anomaly-detector
|
8840a56aa226120456d0af8e6cca927a7e0e712b
|
[
"MIT"
] | null | null | null |
import time
import warnings
from pathlib import Path
import mercs
import numpy as np
from mercs import Mercs
import pandas as pd
from mercs.utils.encoding import code_to_query, query_to_code
from sklearn.metrics import (
accuracy_score,
average_precision_score,
f1_score,
roc_auc_score,
)
from affe.flow import Flow
from affe.io import (
FN_TEMPLATE_CLASSIC_FLOW,
abspath,
check_existence_of_directory,
dump_object,
get_default_model_filename,
get_filepath,
get_flow_directory,
get_subdirectory_paths,
get_template_filenames,
insert_subdirectory,
load_object,
mimic_fs,
)
from .io import dataset_filepath, query_filepath
from copy import deepcopy
class StarAiFlow(Flow):
STR = "STARAIFlow"
def __init__(self, timeout_s=60, verbose=False, **kwargs):
self._data = None
self._metadata = None
self._qry = None
self._queries = None
self._y_true = None
self._analysis = None
self._retention = None
self.verbose = verbose
# Init all configs
self.config = dict()
self.config["io"] = self._init_io(**kwargs)
self.config["data"] = self._init_data_config(**kwargs)
self.config["qry"] = self._init_qry_config(**kwargs)
self.config["algo"] = self._init_algo_config(**kwargs)
self.config["analysis"] = self._init_analysis_config(**kwargs)
self.config["retention"] = self._init_retention_config(**kwargs)
# Superclass init
log_filepath = self.io["flow_filepaths"]["logs"]
flow_filepath = self.io["flow_filepaths"]["flows"]
super().__init__(
config=self.config,
log_filepath=log_filepath,
flow_filepath=flow_filepath,
timeout_s=timeout_s,
)
return
# IO
@property
def io(self):
return self.config["io"]
@property
def io_config(self):
return self.io
# (Meta)Data
@property
def metadata(self):
if self._metadata is None:
name = self.data_config["data_identifier"]
n_features = self.data["train"].shape[1]
n_instances = self.data["train"].shape[0]
n_instances_train = n_instances
n_instances_test = self.data["test"].shape[0]
self._metadata = dict(
name=name,
n_features=n_features,
n_instances=n_instances,
n_instances_train=n_instances_train,
n_instances_test=n_instances_test,
)
else:
pass
return self._metadata
@property
def data_config(self):
return self.config["data"]
@property
def data(self):
if self._data is None:
self._data = dict(
train=pd.read_csv(self.data_config["train_fpath"]),
test=pd.read_csv(self.data_config["test_fpath"]),
)
else:
pass
return self._data
# Query
@property
def qry_config(self):
return self.config["qry"]
@property
def qry(self):
if self._qry is None:
self._qry = load_object(self.qry_config["filepath"])
else:
pass
return self._qry
@property
def q_codes(self):
return self.qry
def q_code(self, n):
return self.qry[n, :]
@property
def queries(self):
if self._queries is None:
q_desc = []
q_targ = []
q_miss = []
for q_code in self.q_codes:
d, t, m = code_to_query(q_code, return_list=True)
q_desc.append(d)
q_targ.append(t)
q_miss.append(m)
self._queries = (q_desc, q_targ, q_miss)
else:
pass
return self._queries
@property
def q_desc(self):
return self.queries[0]
@property
def q_targ(self):
return self.queries[1]
@property
def q_miss(self):
return self.queries[2]
def get_q_desc(self, n=None):
if n is None:
return self.q_desc
else:
return self.q_desc[n]
def get_q_targ(self, n=None):
if n is None:
return self.q_targ
else:
return self.q_targ[n]
def get_q_miss(self, n=None):
if n is None:
return self.q_miss
else:
return self.q_miss[n]
@property
def n_qrys(self):
return self.qry.shape[0]
# Algo
@property
def algo_config(self):
return self.config["algo"]
@property
def model(self):
m_algo = getattr(self, "m_algo", None)
if m_algo is None:
return None
else:
return m_algo.get("model", None)
@property
def algo(self):
return self.model
# Predictions
@property
def predictions(self):
a_algo = getattr(self, "a_algo", None)
if a_algo is None:
return None
else:
return a_algo.get("predictions", None)
@property
def y_pred(self):
return self.predictions
def get_y_pred(self, n):
if n is None:
return self.predictions
else:
return self.predictions[n]
@property
def y_true(self):
if self._y_true is None:
self._y_true = dict()
for q_idx, q_targ in enumerate(self.q_targ):
self._y_true[q_idx] = self.data["test"].values[:, q_targ]
else:
pass
return self._y_true
def get_y_true(self, n):
if n is None:
return self.y_true
else:
return self.y_true[n]
# Analysis
@property
def analysis_config(self):
return self.config["analysis"]
@property
def analysis(self):
if self._analysis is None:
self._analysis = self.get_analysis()
else:
pass
return self._analysis
@analysis.setter
def analysis(self, analysis):
assert isinstance(analysis, dict), "Analysis needs to be a dict"
self._analysis = analysis
return
@property
def results(self):
analysis = self.analysis
metadata = self.metadata
return dict(analysis=analysis, metadata=metadata)
# Retention
@property
def retention_config(self):
return self.config["retention"]
@property
def retention(self):
if self._retention is None:
self._retention = self.get_retention()
else:
pass
return self._retention
@retention.setter
def retention(self, retention):
assert isinstance(retention, bool), "Retention is a bool"
self._retention = retention
return
# Inits
def _init_io(
self,
flow_id=0,
flow_identifier="manual",
root_levels_up=2,
fs_depth=1,
out_directory="out",
out_parent="root",
basename=None,
save_model=False,
load_model=False,
model_identifier=None,
data_identifier=None,
exclude_in_scan={"notebooks", "visualisation", "tests", "admercs"},
**kwargs,
):
# Perform duties
fs = mimic_fs(
root_levels_up=root_levels_up, depth=fs_depth, exclude=exclude_in_scan,
)
## Build the filesystem we desire
fs, out_key = insert_subdirectory(
fs, parent=out_parent, child=out_directory, return_key=True
)
flow_directory = get_flow_directory(keyword=flow_identifier)
fs, flow_key = insert_subdirectory(
fs, parent=out_key, child=flow_directory, return_key=True
)
check_existence_of_directory(fs)
flow_dirpaths = get_subdirectory_paths(fs, flow_key)
flow_filepaths = get_template_filenames(
flow_dirpaths,
basename=basename,
idx=flow_id,
template=FN_TEMPLATE_CLASSIC_FLOW,
)
## Model IO
model_filepath = self._get_model_filepath(
fs,
load_model,
save_model,
data_identifier=data_identifier,
model_identifier=model_identifier,
basename=basename,
)
# collect outgoing information
io = dict(
flow_id=flow_id,
flow_identifier=flow_identifier,
fs=fs,
flow_key=flow_key,
flow_dirpaths=flow_dirpaths,
flow_filepaths=flow_filepaths,
model_filepath=model_filepath,
load_model=load_model,
save_model=save_model,
)
return io
def _init_data_config(self, data_identifier=None, step=1, **kwargs):
data_dir_filepath = Path(abspath(self.io["fs"], node="data"))
train_fpath = dataset_filepath(
name=data_identifier,
kind="train",
step=step,
data_dir_filepath=data_dir_filepath,
extension="csv",
check=True,
)
test_fpath = dataset_filepath(
name=data_identifier,
kind="test",
step=step,
data_dir_filepath=data_dir_filepath,
extension="csv",
check=True,
)
data_config = dict(
data_identifier=data_identifier,
step=step,
train_fpath=train_fpath,
test_fpath=test_fpath,
)
return data_config
def _init_qry_config(
self, data_identifier=None, qry_keyword="default", n_queries=None, **kwargs
):
qry_dir_filepath = Path(abspath(self.io["fs"], node="query"))
qry_filepath = query_filepath(
name=data_identifier,
keyword=qry_keyword,
query_dir_filepath=qry_dir_filepath,
extension="npy",
)
qry_config = dict(
filepath=qry_filepath, keyword=qry_keyword, n_queries=n_queries
)
return qry_config
def _init_algo_config(self, **kwargs):
algo_config = dict()
return algo_config
def _init_analysis_config(
self, macro_f1_score=True, micro_f1_score=False, accuracy_score=True, **kwargs
):
return dict(
macro_f1_score=macro_f1_score,
micro_f1_score=micro_f1_score,
accuracy_score=accuracy_score,
)
def _init_retention_config(
self, save_results=True, save_model=False, save_config=True, **kwargs
):
return dict(
save_results=save_results, save_model=save_model, save_config=save_config
)
# Actual algorithm
def get_algo(self, train, model=None):
return dict(model=None, fit_time_s=None)
def ask_algo(self, test):
assert model is not None, "You need a model before you can call this function"
return dict(predictions=None, predict_time_s=None)
# Analysis
def get_analysis(self):
cfg = self.analysis_config
analysis = dict()
if cfg["macro_f1_score"]:
analysis["macro_f1_score"] = []
if cfg["micro_f1_score"]:
analysis["micro_f1_score"] = []
if cfg["accuracy_score"]:
analysis["accuracy_score"] = []
for q_idx in range(self.n_qrys):
y_true = self.get_y_true(q_idx)
y_pred = self.get_y_pred(q_idx)
if cfg["macro_f1_score"]:
macro_f1_score = f1_score(y_true, y_pred, average="macro")
analysis["macro_f1_score"].append(macro_f1_score)
if cfg["micro_f1_score"]:
micro_f1_score = f1_score(y_true, y_pred, average="micro")
analysis["micro_f1_score"].append(micro_f1_score)
if cfg["accuracy_score"]:
accuracy = accuracy_score(y_true, y_pred)
analysis["accuracy_score"].append(accuracy)
return analysis
# Save
def get_retention(self):
# collect ingoing information
oks = []
cfg = self.retention_config
io = self.io
if cfg["save_results"]:
results = self.results
fp_results = io["flow_filepaths"]["results"]
ok = dump_object(results, fp_results)
oks.append(ok)
if cfg["save_model"]:
model = self.model
fp_model = io["model_filepath"]
ok = dump_object(model, fp_model)
oks.append(ok)
if cfg["save_config"]:
dcfg = self._get_dumpable_config()
fp_config = io["flow_filepaths"]["config"]
ok = dump_object(dcfg, fp_config)
oks.append(ok)
return all(oks)
# Flows
def flow(self):
# Get data
train, test = self._get_train_test()
# Load model
if self.io["load_model"]:
model = load_object(self.io["model_filepath"])
else:
model = None
# Train your model
self.m_algo = self.get_algo(train, model=model)
# Use your model
self.a_algo = self.ask_algo(test)
# Get analysis
self.analysis = self.get_analysis()
# Get retention (=Save the things you want to save)
self.retention = self.get_retention()
return
# Helpers
def _get_model_filepath(
self,
fs,
load_model,
save_model,
data_identifier=None,
model_identifier=None,
basename=None,
):
model_filename = self._get_model_filename(
data_identifier=data_identifier,
model_identifier=model_identifier,
basename=basename,
)
if load_model:
return get_filepath(
tree=fs, node="models", filename=model_filename, check_file=True
)
elif save_model:
return get_filepath(
tree=fs, node="models", filename=model_filename, check_file=False
)
else:
return
@staticmethod
def _get_model_filename(
data_identifier=None, model_identifier=None, basename=None,
):
if model_identifier is not None:
model_filename = get_default_model_filename(
data_identifier=data_identifier, model_identifier=model_identifier
)
else:
model_filename = get_default_model_filename(
data_identifier=data_identifier, model_identifier=basename
)
return model_filename
def _get_train_test(self):
return self.data["train"], self.data["test"]
def _get_dumpable_config(self):
dumpable_config = deepcopy(self.config)
def _convert_entries(d):
for k, v in d.items():
if isinstance(v, type(Path())):
# PosixPath to String conversion
d[k] = str(v)
elif isinstance(v, dict):
d[k] = _convert_entries(v)
else:
pass
return d
return _convert_entries(dumpable_config)
class MercsStarAiFlow(StarAiFlow):
def _init_algo_config(
self,
reconfigure_algo=True,
max_depth=None,
min_samples_leaf=5,
criterion="gini",
min_impurity_decrease=0.0,
**kwargs,
):
return {k: v for k, v in dict(locals()).items() if k not in {"kwargs", "self"}}
# Actual algorithm
def get_algo(self, train, model=None):
algo_config = self.algo_config
# collect ingoing information
X = train.values
X = X.astype(float)
nominal_ids = set(range(X.shape[1]))
# perform duty
if model is None:
model = Mercs(**algo_config)
tick = time.time()
model.fit(X, nominal_attributes=nominal_ids, **algo_config)
tock = time.time()
fit_time_s = tock - tick
elif isinstance(model, Mercs):
if algo_config["reconfigure_algo"]:
model = self.reconfigure_algo(model, **algo_config)
fit_time_s = model.model_data["ind_time"]
else:
raise ValueError(
"I expect either no model or a Mercs model. Not {}".format(model)
)
return dict(model=model, fit_time_s=fit_time_s)
def reconfigure_algo(self, model, **algo_config):
raise NotImplementedError
def ask_algo(self, test):
algo_config = self.algo_config
model = self.model
q_codes = self.q_codes
assert isinstance(q_codes, np.ndarray)
assert model is not None, "You need a model before you can call this function"
# Preprocessing
X = test.copy().values
X = X.astype(float)
predictions = dict()
predict_time_s = dict()
for q_idx, q_code in enumerate(q_codes):
targ_ids = list(self.get_q_targ(q_idx))
miss_ids = list(self.get_q_miss(q_idx))
if self.verbose:
msg = """
targ_ids: {}
miss_ids: {}
""".format(
targ_ids, miss_ids
)
print(msg)
X_test = X.copy()
X_test[:, targ_ids] = np.nan
X_test[:, miss_ids] = np.nan
assert np.sum(np.isnan(X_test[0, :])) == len(targ_ids) + len(
miss_ids
), "Not the correct amount of missing data"
if algo_config["reconfigure_algo"]:
y_pred = model.predict(X_test, q_code=q_code, **algo_config)
else:
y_pred = model.predict(X_test, q_code=q_code)
inf_time = model.model_data["inf_time"]
predictions[q_idx] = y_pred
predict_time_s[q_idx] = inf_time
return dict(predictions=predictions, predict_time_s=predict_time_s)
class BayesFusionStarAiFlow(StarAiFlow):
def _init_algo_config(
self,
reconfigure_algo=True,
max_depth=None,
min_samples_leaf=5,
criterion="gini",
min_impurity_decrease=0.0,
**kwargs,
):
return {k: v for k, v in dict(locals()).items() if k not in {"kwargs", "self"}}
# Actual algorithm
def get_algo(self, train, model=None):
algo_config = self.algo_config
# collect ingoing information
X = train.values
X = X.astype(float)
nominal_ids = set(range(X.shape[1]))
# perform duty
if model is None:
model = Mercs(**algo_config)
tick = time.time()
model.fit(X, nominal_attributes=nominal_ids, **algo_config)
tock = time.time()
fit_time_s = tock - tick
elif isinstance(model, Mercs):
if algo_config["reconfigure_algo"]:
model = self.reconfigure_algo(model, **algo_config)
fit_time_s = model.model_data["ind_time"]
else:
raise ValueError(
"I expect either no model or a Mercs model. Not {}".format(model)
)
return dict(model=model, fit_time_s=fit_time_s)
def reconfigure_algo(self, model, **algo_config):
raise NotImplementedError
def ask_algo(self, test):
algo_config = self.algo_config
model = self.model
q_codes = self.q_codes
assert isinstance(q_codes, np.ndarray)
assert model is not None, "You need a model before you can call this function"
# Preprocessing
X = test.copy().values
X = X.astype(float)
predictions = dict()
predict_time_s = dict()
for q_idx, q_code in enumerate(q_codes):
targ_ids = list(self.get_q_targ(q_idx))
miss_ids = list(self.get_q_miss(q_idx))
if self.verbose:
msg = """
targ_ids: {}
miss_ids: {}
""".format(
targ_ids, miss_ids
)
print(msg)
X_test = X.copy()
X_test[:, targ_ids] = np.nan
X_test[:, miss_ids] = np.nan
assert np.sum(np.isnan(X_test[0, :])) == len(targ_ids) + len(
miss_ids
), "Not the correct amount of missing data"
if algo_config["reconfigure_algo"]:
y_pred = model.predict(X_test, q_code=q_code, **algo_config)
else:
y_pred = model.predict(X_test, q_code=q_code)
inf_time = model.model_data["inf_time"]
predictions[q_idx] = y_pred
predict_time_s[q_idx] = inf_time
return dict(predictions=predictions, predict_time_s=predict_time_s)
| 27.659151
| 87
| 0.569456
|
import time
import warnings
from pathlib import Path
import mercs
import numpy as np
from mercs import Mercs
import pandas as pd
from mercs.utils.encoding import code_to_query, query_to_code
from sklearn.metrics import (
accuracy_score,
average_precision_score,
f1_score,
roc_auc_score,
)
from affe.flow import Flow
from affe.io import (
FN_TEMPLATE_CLASSIC_FLOW,
abspath,
check_existence_of_directory,
dump_object,
get_default_model_filename,
get_filepath,
get_flow_directory,
get_subdirectory_paths,
get_template_filenames,
insert_subdirectory,
load_object,
mimic_fs,
)
from .io import dataset_filepath, query_filepath
from copy import deepcopy
class StarAiFlow(Flow):
STR = "STARAIFlow"
def __init__(self, timeout_s=60, verbose=False, **kwargs):
self._data = None
self._metadata = None
self._qry = None
self._queries = None
self._y_true = None
self._analysis = None
self._retention = None
self.verbose = verbose
self.config = dict()
self.config["io"] = self._init_io(**kwargs)
self.config["data"] = self._init_data_config(**kwargs)
self.config["qry"] = self._init_qry_config(**kwargs)
self.config["algo"] = self._init_algo_config(**kwargs)
self.config["analysis"] = self._init_analysis_config(**kwargs)
self.config["retention"] = self._init_retention_config(**kwargs)
log_filepath = self.io["flow_filepaths"]["logs"]
flow_filepath = self.io["flow_filepaths"]["flows"]
super().__init__(
config=self.config,
log_filepath=log_filepath,
flow_filepath=flow_filepath,
timeout_s=timeout_s,
)
return
@property
def io(self):
return self.config["io"]
@property
def io_config(self):
return self.io
@property
def metadata(self):
if self._metadata is None:
name = self.data_config["data_identifier"]
n_features = self.data["train"].shape[1]
n_instances = self.data["train"].shape[0]
n_instances_train = n_instances
n_instances_test = self.data["test"].shape[0]
self._metadata = dict(
name=name,
n_features=n_features,
n_instances=n_instances,
n_instances_train=n_instances_train,
n_instances_test=n_instances_test,
)
else:
pass
return self._metadata
@property
def data_config(self):
return self.config["data"]
@property
def data(self):
if self._data is None:
self._data = dict(
train=pd.read_csv(self.data_config["train_fpath"]),
test=pd.read_csv(self.data_config["test_fpath"]),
)
else:
pass
return self._data
@property
def qry_config(self):
return self.config["qry"]
@property
def qry(self):
if self._qry is None:
self._qry = load_object(self.qry_config["filepath"])
else:
pass
return self._qry
@property
def q_codes(self):
return self.qry
def q_code(self, n):
return self.qry[n, :]
@property
def queries(self):
if self._queries is None:
q_desc = []
q_targ = []
q_miss = []
for q_code in self.q_codes:
d, t, m = code_to_query(q_code, return_list=True)
q_desc.append(d)
q_targ.append(t)
q_miss.append(m)
self._queries = (q_desc, q_targ, q_miss)
else:
pass
return self._queries
@property
def q_desc(self):
return self.queries[0]
@property
def q_targ(self):
return self.queries[1]
@property
def q_miss(self):
return self.queries[2]
def get_q_desc(self, n=None):
if n is None:
return self.q_desc
else:
return self.q_desc[n]
def get_q_targ(self, n=None):
if n is None:
return self.q_targ
else:
return self.q_targ[n]
def get_q_miss(self, n=None):
if n is None:
return self.q_miss
else:
return self.q_miss[n]
@property
def n_qrys(self):
return self.qry.shape[0]
@property
def algo_config(self):
return self.config["algo"]
@property
def model(self):
m_algo = getattr(self, "m_algo", None)
if m_algo is None:
return None
else:
return m_algo.get("model", None)
@property
def algo(self):
return self.model
@property
def predictions(self):
a_algo = getattr(self, "a_algo", None)
if a_algo is None:
return None
else:
return a_algo.get("predictions", None)
@property
def y_pred(self):
return self.predictions
def get_y_pred(self, n):
if n is None:
return self.predictions
else:
return self.predictions[n]
@property
def y_true(self):
if self._y_true is None:
self._y_true = dict()
for q_idx, q_targ in enumerate(self.q_targ):
self._y_true[q_idx] = self.data["test"].values[:, q_targ]
else:
pass
return self._y_true
def get_y_true(self, n):
if n is None:
return self.y_true
else:
return self.y_true[n]
@property
def analysis_config(self):
return self.config["analysis"]
@property
def analysis(self):
if self._analysis is None:
self._analysis = self.get_analysis()
else:
pass
return self._analysis
@analysis.setter
def analysis(self, analysis):
assert isinstance(analysis, dict), "Analysis needs to be a dict"
self._analysis = analysis
return
@property
def results(self):
analysis = self.analysis
metadata = self.metadata
return dict(analysis=analysis, metadata=metadata)
@property
def retention_config(self):
return self.config["retention"]
@property
def retention(self):
if self._retention is None:
self._retention = self.get_retention()
else:
pass
return self._retention
@retention.setter
def retention(self, retention):
assert isinstance(retention, bool), "Retention is a bool"
self._retention = retention
return
def _init_io(
self,
flow_id=0,
flow_identifier="manual",
root_levels_up=2,
fs_depth=1,
out_directory="out",
out_parent="root",
basename=None,
save_model=False,
load_model=False,
model_identifier=None,
data_identifier=None,
exclude_in_scan={"notebooks", "visualisation", "tests", "admercs"},
**kwargs,
):
fs = mimic_fs(
root_levels_up=root_levels_up, depth=fs_depth, exclude=exclude_in_scan,
)
bdirectory(
fs, parent=out_parent, child=out_directory, return_key=True
)
flow_directory = get_flow_directory(keyword=flow_identifier)
fs, flow_key = insert_subdirectory(
fs, parent=out_key, child=flow_directory, return_key=True
)
check_existence_of_directory(fs)
flow_dirpaths = get_subdirectory_paths(fs, flow_key)
flow_filepaths = get_template_filenames(
flow_dirpaths,
basename=basename,
idx=flow_id,
template=FN_TEMPLATE_CLASSIC_FLOW,
)
odel_filepath = self._get_model_filepath(
fs,
load_model,
save_model,
data_identifier=data_identifier,
model_identifier=model_identifier,
basename=basename,
)
io = dict(
flow_id=flow_id,
flow_identifier=flow_identifier,
fs=fs,
flow_key=flow_key,
flow_dirpaths=flow_dirpaths,
flow_filepaths=flow_filepaths,
model_filepath=model_filepath,
load_model=load_model,
save_model=save_model,
)
return io
def _init_data_config(self, data_identifier=None, step=1, **kwargs):
data_dir_filepath = Path(abspath(self.io["fs"], node="data"))
train_fpath = dataset_filepath(
name=data_identifier,
kind="train",
step=step,
data_dir_filepath=data_dir_filepath,
extension="csv",
check=True,
)
test_fpath = dataset_filepath(
name=data_identifier,
kind="test",
step=step,
data_dir_filepath=data_dir_filepath,
extension="csv",
check=True,
)
data_config = dict(
data_identifier=data_identifier,
step=step,
train_fpath=train_fpath,
test_fpath=test_fpath,
)
return data_config
def _init_qry_config(
self, data_identifier=None, qry_keyword="default", n_queries=None, **kwargs
):
qry_dir_filepath = Path(abspath(self.io["fs"], node="query"))
qry_filepath = query_filepath(
name=data_identifier,
keyword=qry_keyword,
query_dir_filepath=qry_dir_filepath,
extension="npy",
)
qry_config = dict(
filepath=qry_filepath, keyword=qry_keyword, n_queries=n_queries
)
return qry_config
def _init_algo_config(self, **kwargs):
algo_config = dict()
return algo_config
def _init_analysis_config(
self, macro_f1_score=True, micro_f1_score=False, accuracy_score=True, **kwargs
):
return dict(
macro_f1_score=macro_f1_score,
micro_f1_score=micro_f1_score,
accuracy_score=accuracy_score,
)
def _init_retention_config(
self, save_results=True, save_model=False, save_config=True, **kwargs
):
return dict(
save_results=save_results, save_model=save_model, save_config=save_config
)
def get_algo(self, train, model=None):
return dict(model=None, fit_time_s=None)
def ask_algo(self, test):
assert model is not None, "You need a model before you can call this function"
return dict(predictions=None, predict_time_s=None)
def get_analysis(self):
cfg = self.analysis_config
analysis = dict()
if cfg["macro_f1_score"]:
analysis["macro_f1_score"] = []
if cfg["micro_f1_score"]:
analysis["micro_f1_score"] = []
if cfg["accuracy_score"]:
analysis["accuracy_score"] = []
for q_idx in range(self.n_qrys):
y_true = self.get_y_true(q_idx)
y_pred = self.get_y_pred(q_idx)
if cfg["macro_f1_score"]:
macro_f1_score = f1_score(y_true, y_pred, average="macro")
analysis["macro_f1_score"].append(macro_f1_score)
if cfg["micro_f1_score"]:
micro_f1_score = f1_score(y_true, y_pred, average="micro")
analysis["micro_f1_score"].append(micro_f1_score)
if cfg["accuracy_score"]:
accuracy = accuracy_score(y_true, y_pred)
analysis["accuracy_score"].append(accuracy)
return analysis
def get_retention(self):
oks = []
cfg = self.retention_config
io = self.io
if cfg["save_results"]:
results = self.results
fp_results = io["flow_filepaths"]["results"]
ok = dump_object(results, fp_results)
oks.append(ok)
if cfg["save_model"]:
model = self.model
fp_model = io["model_filepath"]
ok = dump_object(model, fp_model)
oks.append(ok)
if cfg["save_config"]:
dcfg = self._get_dumpable_config()
fp_config = io["flow_filepaths"]["config"]
ok = dump_object(dcfg, fp_config)
oks.append(ok)
return all(oks)
def flow(self):
train, test = self._get_train_test()
if self.io["load_model"]:
model = load_object(self.io["model_filepath"])
else:
model = None
self.m_algo = self.get_algo(train, model=model)
self.a_algo = self.ask_algo(test)
self.analysis = self.get_analysis()
self.retention = self.get_retention()
return
def _get_model_filepath(
self,
fs,
load_model,
save_model,
data_identifier=None,
model_identifier=None,
basename=None,
):
model_filename = self._get_model_filename(
data_identifier=data_identifier,
model_identifier=model_identifier,
basename=basename,
)
if load_model:
return get_filepath(
tree=fs, node="models", filename=model_filename, check_file=True
)
elif save_model:
return get_filepath(
tree=fs, node="models", filename=model_filename, check_file=False
)
else:
return
@staticmethod
def _get_model_filename(
data_identifier=None, model_identifier=None, basename=None,
):
if model_identifier is not None:
model_filename = get_default_model_filename(
data_identifier=data_identifier, model_identifier=model_identifier
)
else:
model_filename = get_default_model_filename(
data_identifier=data_identifier, model_identifier=basename
)
return model_filename
def _get_train_test(self):
return self.data["train"], self.data["test"]
def _get_dumpable_config(self):
dumpable_config = deepcopy(self.config)
def _convert_entries(d):
for k, v in d.items():
if isinstance(v, type(Path())):
d[k] = str(v)
elif isinstance(v, dict):
d[k] = _convert_entries(v)
else:
pass
return d
return _convert_entries(dumpable_config)
class MercsStarAiFlow(StarAiFlow):
def _init_algo_config(
self,
reconfigure_algo=True,
max_depth=None,
min_samples_leaf=5,
criterion="gini",
min_impurity_decrease=0.0,
**kwargs,
):
return {k: v for k, v in dict(locals()).items() if k not in {"kwargs", "self"}}
def get_algo(self, train, model=None):
algo_config = self.algo_config
X = train.values
X = X.astype(float)
nominal_ids = set(range(X.shape[1]))
if model is None:
model = Mercs(**algo_config)
tick = time.time()
model.fit(X, nominal_attributes=nominal_ids, **algo_config)
tock = time.time()
fit_time_s = tock - tick
elif isinstance(model, Mercs):
if algo_config["reconfigure_algo"]:
model = self.reconfigure_algo(model, **algo_config)
fit_time_s = model.model_data["ind_time"]
else:
raise ValueError(
"I expect either no model or a Mercs model. Not {}".format(model)
)
return dict(model=model, fit_time_s=fit_time_s)
def reconfigure_algo(self, model, **algo_config):
raise NotImplementedError
def ask_algo(self, test):
algo_config = self.algo_config
model = self.model
q_codes = self.q_codes
assert isinstance(q_codes, np.ndarray)
assert model is not None, "You need a model before you can call this function"
X = test.copy().values
X = X.astype(float)
predictions = dict()
predict_time_s = dict()
for q_idx, q_code in enumerate(q_codes):
targ_ids = list(self.get_q_targ(q_idx))
miss_ids = list(self.get_q_miss(q_idx))
if self.verbose:
msg = """
targ_ids: {}
miss_ids: {}
""".format(
targ_ids, miss_ids
)
print(msg)
X_test = X.copy()
X_test[:, targ_ids] = np.nan
X_test[:, miss_ids] = np.nan
assert np.sum(np.isnan(X_test[0, :])) == len(targ_ids) + len(
miss_ids
), "Not the correct amount of missing data"
if algo_config["reconfigure_algo"]:
y_pred = model.predict(X_test, q_code=q_code, **algo_config)
else:
y_pred = model.predict(X_test, q_code=q_code)
inf_time = model.model_data["inf_time"]
predictions[q_idx] = y_pred
predict_time_s[q_idx] = inf_time
return dict(predictions=predictions, predict_time_s=predict_time_s)
class BayesFusionStarAiFlow(StarAiFlow):
def _init_algo_config(
self,
reconfigure_algo=True,
max_depth=None,
min_samples_leaf=5,
criterion="gini",
min_impurity_decrease=0.0,
**kwargs,
):
return {k: v for k, v in dict(locals()).items() if k not in {"kwargs", "self"}}
def get_algo(self, train, model=None):
algo_config = self.algo_config
X = train.values
X = X.astype(float)
nominal_ids = set(range(X.shape[1]))
if model is None:
model = Mercs(**algo_config)
tick = time.time()
model.fit(X, nominal_attributes=nominal_ids, **algo_config)
tock = time.time()
fit_time_s = tock - tick
elif isinstance(model, Mercs):
if algo_config["reconfigure_algo"]:
model = self.reconfigure_algo(model, **algo_config)
fit_time_s = model.model_data["ind_time"]
else:
raise ValueError(
"I expect either no model or a Mercs model. Not {}".format(model)
)
return dict(model=model, fit_time_s=fit_time_s)
def reconfigure_algo(self, model, **algo_config):
raise NotImplementedError
def ask_algo(self, test):
algo_config = self.algo_config
model = self.model
q_codes = self.q_codes
assert isinstance(q_codes, np.ndarray)
assert model is not None, "You need a model before you can call this function"
X = test.copy().values
X = X.astype(float)
predictions = dict()
predict_time_s = dict()
for q_idx, q_code in enumerate(q_codes):
targ_ids = list(self.get_q_targ(q_idx))
miss_ids = list(self.get_q_miss(q_idx))
if self.verbose:
msg = """
targ_ids: {}
miss_ids: {}
""".format(
targ_ids, miss_ids
)
print(msg)
X_test = X.copy()
X_test[:, targ_ids] = np.nan
X_test[:, miss_ids] = np.nan
assert np.sum(np.isnan(X_test[0, :])) == len(targ_ids) + len(
miss_ids
), "Not the correct amount of missing data"
if algo_config["reconfigure_algo"]:
y_pred = model.predict(X_test, q_code=q_code, **algo_config)
else:
y_pred = model.predict(X_test, q_code=q_code)
inf_time = model.model_data["inf_time"]
predictions[q_idx] = y_pred
predict_time_s[q_idx] = inf_time
return dict(predictions=predictions, predict_time_s=predict_time_s)
| true
| true
|
1c4020f22073f142cf5c3deab3c63e1457cb284f
| 440
|
py
|
Python
|
payments/forms.py
|
ugohuche/ugohshopping
|
331ecdbbc8a6aae9d16d49fc4fc94cc285edc39f
|
[
"MIT"
] | 1
|
2020-09-09T16:29:26.000Z
|
2020-09-09T16:29:26.000Z
|
payments/forms.py
|
ugohuche/ugohshopping
|
331ecdbbc8a6aae9d16d49fc4fc94cc285edc39f
|
[
"MIT"
] | 9
|
2021-03-30T14:17:30.000Z
|
2022-03-12T00:44:55.000Z
|
payments/forms.py
|
ugohuche/ugohshopping
|
331ecdbbc8a6aae9d16d49fc4fc94cc285edc39f
|
[
"MIT"
] | null | null | null |
from django import forms
class CouponForm(forms.Form):
code = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Promo code',
'aria-label': 'Recipient\'s username',
'aria-describedby': 'basic-addon2'
}))
class RefundForm(forms.Form):
reference_code = forms.CharField()
message = forms.CharField(widget=forms.Textarea(attrs={
'rows': 4
}))
email = forms.EmailField()
| 24.444444
| 57
| 0.679545
|
from django import forms
class CouponForm(forms.Form):
code = forms.CharField(widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Promo code',
'aria-label': 'Recipient\'s username',
'aria-describedby': 'basic-addon2'
}))
class RefundForm(forms.Form):
reference_code = forms.CharField()
message = forms.CharField(widget=forms.Textarea(attrs={
'rows': 4
}))
email = forms.EmailField()
| true
| true
|
1c40214352ed30453b417f615dbf2c359872fb8a
| 23,907
|
py
|
Python
|
cinder/volume/drivers/windows/smbfs.py
|
liangintel/stx-cinder
|
f4c43797a3f8c0caebfd8fb67244c084d26d9741
|
[
"Apache-2.0"
] | null | null | null |
cinder/volume/drivers/windows/smbfs.py
|
liangintel/stx-cinder
|
f4c43797a3f8c0caebfd8fb67244c084d26d9741
|
[
"Apache-2.0"
] | 2
|
2018-10-25T13:04:01.000Z
|
2019-08-17T13:15:24.000Z
|
cinder/volume/drivers/windows/smbfs.py
|
liangintel/stx-cinder
|
f4c43797a3f8c0caebfd8fb67244c084d26d9741
|
[
"Apache-2.0"
] | 2
|
2018-10-17T13:32:50.000Z
|
2018-11-08T08:39:39.000Z
|
# Copyright (c) 2014 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from os_brick.remotefs import windows_remotefs as remotefs_brick
from os_win import utilsfactory
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import units
from cinder import context
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers import remotefs as remotefs_drv
VERSION = '1.1.0'
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('smbfs_shares_config',
default=r'C:\OpenStack\smbfs_shares.txt',
help='File with the list of available smbfs shares.'),
cfg.StrOpt('smbfs_allocation_info_file_path',
default=r'C:\OpenStack\allocation_data.txt',
help=('The path of the automatically generated file containing '
'information about volume disk space allocation.'),
deprecated_for_removal=True,
deprecated_since="11.0.0",
deprecated_reason="This allocation file is no longer used."),
cfg.StrOpt('smbfs_default_volume_format',
default='vhd',
choices=['vhd', 'vhdx'],
help=('Default format that will be used when creating volumes '
'if no volume format is specified.')),
cfg.BoolOpt('smbfs_sparsed_volumes',
default=True,
help=('Create volumes as sparsed files which take no space '
'rather than regular files when using raw format, '
'in which case volume creation takes lot of time.')),
cfg.FloatOpt('smbfs_used_ratio',
default=None,
help=('Percent of ACTUAL usage of the underlying volume '
'before no new volumes can be allocated to the volume '
'destination.'),
deprecated_for_removal=True),
cfg.FloatOpt('smbfs_oversub_ratio',
default=None,
help=('This will compare the allocated to available space on '
'the volume destination. If the ratio exceeds this '
'number, the destination will no longer be valid.'),
deprecated_for_removal=True),
cfg.StrOpt('smbfs_mount_point_base',
default=r'C:\OpenStack\_mnt',
help=('Base dir containing mount points for smbfs shares.')),
cfg.DictOpt('smbfs_pool_mappings',
default={},
help=('Mappings between share locations and pool names. '
'If not specified, the share names will be used as '
'pool names. Example: '
'//addr/share:pool_name,//addr/share2:pool_name2')),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
# TODO(lpetrut): drop the following default values. The according
# smbfs driver opts are getting deprecated but we want to preserve
# their defaults until we completely remove them.
CONF.set_default('max_over_subscription_ratio', 1)
CONF.set_default('reserved_percentage', 5)
@interface.volumedriver
class WindowsSmbfsDriver(remotefs_drv.RemoteFSPoolMixin,
remotefs_drv.RemoteFSSnapDriverDistributed):
VERSION = VERSION
driver_volume_type = 'smbfs'
driver_prefix = 'smbfs'
volume_backend_name = 'Generic_SMBFS'
SHARE_FORMAT_REGEX = r'//.+/.+'
VERSION = VERSION
_DISK_FORMAT_VHD = 'vhd'
_DISK_FORMAT_VHD_LEGACY = 'vpc'
_DISK_FORMAT_VHDX = 'vhdx'
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Microsoft_iSCSI_CI"
_MINIMUM_QEMU_IMG_VERSION = '1.6'
_SUPPORTED_IMAGE_FORMATS = [_DISK_FORMAT_VHD, _DISK_FORMAT_VHDX]
_VALID_IMAGE_EXTENSIONS = _SUPPORTED_IMAGE_FORMATS
_always_use_temp_snap_when_cloning = False
_thin_provisioning_support = True
def __init__(self, *args, **kwargs):
self._remotefsclient = None
super(WindowsSmbfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.base = getattr(self.configuration,
'smbfs_mount_point_base')
self._remotefsclient = remotefs_brick.WindowsRemoteFsClient(
'cifs', root_helper=None, smbfs_mount_point_base=self.base,
local_path_for_loopback=True)
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._smbutils = utilsfactory.get_smbutils()
self._diskutils = utilsfactory.get_diskutils()
def do_setup(self, context):
self._check_os_platform()
if self.configuration.smbfs_oversub_ratio is not None:
self.configuration.max_over_subscription_ratio = (
self.configuration.smbfs_oversub_ratio)
if self.configuration.smbfs_used_ratio is not None:
self.configuration.reserved_percentage = (
1 - self.configuration.smbfs_used_ratio) * 100
super(WindowsSmbfsDriver, self).do_setup(context)
image_utils.check_qemu_img_version(self._MINIMUM_QEMU_IMG_VERSION)
config = self.configuration.smbfs_shares_config
if not config:
msg = (_("SMBFS config file not set (smbfs_shares_config)."))
LOG.error(msg)
raise exception.SmbfsException(msg)
if not os.path.exists(config):
msg = (_("SMBFS config file at %(config)s doesn't exist.") %
{'config': config})
LOG.error(msg)
raise exception.SmbfsException(msg)
if not os.path.isabs(self.base):
msg = _("Invalid mount point base: %s") % self.base
LOG.error(msg)
raise exception.SmbfsException(msg)
if not self.configuration.max_over_subscription_ratio > 0:
msg = _(
"SMBFS config 'max_over_subscription_ratio' invalid. "
"Must be > 0: %s"
) % self.configuration.max_over_subscription_ratio
LOG.error(msg)
raise exception.SmbfsException(msg)
if not 0 <= self.configuration.reserved_percentage <= 100:
msg = _(
"SMBFS config 'reserved_percentage' invalid. "
"Must be > 0 and <= 100: %s"
) % self.configuration.reserved_percentage
LOG.error(msg)
raise exception.SmbfsException(msg)
self.shares = {} # address : options
self._ensure_shares_mounted()
self._setup_pool_mappings()
def _setup_pool_mappings(self):
self._pool_mappings = self.configuration.smbfs_pool_mappings
pools = list(self._pool_mappings.values())
duplicate_pools = set([pool for pool in pools
if pools.count(pool) > 1])
if duplicate_pools:
msg = _("Found multiple mappings for pools %(pools)s. "
"Requested pool mappings: %(pool_mappings)s")
raise exception.SmbfsException(
msg % dict(pools=duplicate_pools,
pool_mappings=self._pool_mappings))
shares_missing_mappings = (
set(self.shares).difference(set(self._pool_mappings)))
for share in shares_missing_mappings:
msg = ("No pool name was requested for share %(share)s "
"Using the share name instead.")
LOG.warning(msg, dict(share=share))
self._pool_mappings[share] = self._get_share_name(share)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
# Find active image
active_file = self.get_active_image_from_info(volume)
fmt = self.get_volume_format(volume)
data = {'export': volume.provider_location,
'format': fmt,
'name': active_file}
if volume.provider_location in self.shares:
data['options'] = self.shares[volume.provider_location]
return {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def _check_os_platform(self):
if sys.platform != 'win32':
_msg = _("This system platform (%s) is not supported. This "
"driver supports only Win32 platforms.") % sys.platform
raise exception.SmbfsException(_msg)
def _get_total_allocated(self, smbfs_share):
pool_name = self._get_pool_name_from_share(smbfs_share)
host = "#".join([self.host, pool_name])
vol_sz_sum = self.db.volume_data_get_for_host(
context=context.get_admin_context(),
host=host)[1]
return float(vol_sz_sum * units.Gi)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
volume_path_template = self._get_local_volume_path_template(volume)
volume_path = self._lookup_local_volume_path(volume_path_template)
if volume_path:
return volume_path
# The image does not exist, so retrieve the volume format
# in order to build the path.
fmt = self.get_volume_format(volume)
volume_path = volume_path_template + '.' + fmt
return volume_path
def _get_local_volume_path_template(self, volume):
local_dir = self._local_volume_dir(volume)
local_path_template = os.path.join(local_dir, volume.name)
return local_path_template
def _lookup_local_volume_path(self, volume_path_template):
for ext in self._SUPPORTED_IMAGE_FORMATS:
volume_path = (volume_path_template + '.' + ext
if ext else volume_path_template)
if os.path.exists(volume_path):
return volume_path
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot.volume)
snap_path, ext = os.path.splitext(vol_path)
snap_path += '.' + snapshot.id + ext
return snap_path
def get_volume_format(self, volume, qemu_format=False):
volume_path_template = self._get_local_volume_path_template(volume)
volume_path = self._lookup_local_volume_path(volume_path_template)
if volume_path:
ext = os.path.splitext(volume_path)[1].strip('.').lower()
if ext in self._SUPPORTED_IMAGE_FORMATS:
volume_format = ext
else:
# Hyper-V relies on file extensions so we're enforcing them.
raise exception.SmbfsException(
_("Invalid image file extension: %s") % ext)
else:
volume_format = (
self._get_volume_format_spec(volume) or
self.configuration.smbfs_default_volume_format)
if qemu_format and volume_format == self._DISK_FORMAT_VHD:
volume_format = self._DISK_FORMAT_VHD_LEGACY
elif volume_format == self._DISK_FORMAT_VHD_LEGACY:
volume_format = self._DISK_FORMAT_VHD
return volume_format
def _get_volume_format_spec(self, volume):
vol_type = volume.volume_type
extra_specs = {}
if vol_type and vol_type.extra_specs:
extra_specs = vol_type.extra_specs
extra_specs.update(volume.metadata or {})
return (extra_specs.get('volume_format') or
extra_specs.get('smbfs:volume_format') or
self.configuration.smbfs_default_volume_format)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def create_volume(self, volume):
return super(WindowsSmbfsDriver, self).create_volume(volume)
def _do_create_volume(self, volume):
volume_path = self.local_path(volume)
volume_format = self.get_volume_format(volume)
volume_size_bytes = volume.size * units.Gi
if os.path.exists(volume_path):
err_msg = _('File already exists at: %s') % volume_path
raise exception.InvalidVolume(err_msg)
if volume_format not in self._SUPPORTED_IMAGE_FORMATS:
err_msg = _("Unsupported volume format: %s ") % volume_format
raise exception.InvalidVolume(err_msg)
self._vhdutils.create_dynamic_vhd(volume_path, volume_size_bytes)
def _ensure_share_mounted(self, smbfs_share):
mnt_flags = None
if self.shares.get(smbfs_share) is not None:
mnt_flags = self.shares[smbfs_share]
self._remotefsclient.mount(smbfs_share, mnt_flags)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume.provider_location:
LOG.warning('Volume %s does not have provider_location '
'specified, skipping.', volume.name)
return
self._ensure_share_mounted(volume.provider_location)
volume_dir = self._local_volume_dir(volume)
mounted_path = os.path.join(volume_dir,
self.get_active_image_from_info(volume))
if os.path.exists(mounted_path):
self._delete(mounted_path)
else:
LOG.debug("Skipping deletion of volume %s as it does not exist.",
mounted_path)
info_path = self._local_path_volume_info(volume)
self._delete(info_path)
def _delete(self, path):
fileutils.delete_if_exists(path)
def _get_capacity_info(self, smbfs_share):
"""Calculate available space on the SMBFS share.
:param smbfs_share: example //172.18.194.100/var/smbfs
"""
mount_point = self._get_mount_point_for_share(smbfs_share)
total_size, total_available = self._diskutils.get_disk_capacity(
mount_point)
total_allocated = self._get_total_allocated(smbfs_share)
return_value = [total_size, total_available, total_allocated]
LOG.info('Smb share %(share)s Total size %(size)s '
'Total allocated %(allocated)s',
{'share': smbfs_share, 'size': total_size,
'allocated': total_allocated})
return [float(x) for x in return_value]
def _img_commit(self, snapshot_path):
self._vhdutils.merge_vhd(snapshot_path)
def _rebase_img(self, image, backing_file, volume_format):
# Relative path names are not supported in this case.
image_dir = os.path.dirname(image)
backing_file_path = os.path.join(image_dir, backing_file)
self._vhdutils.reconnect_parent_vhd(image, backing_file_path)
def _qemu_img_info(self, path, volume_name=None):
# This code expects to deal only with relative filenames.
# As this method is needed by the upper class and qemu-img does
# not fully support vhdx images, for the moment we'll use Win32 API
# for retrieving image information.
parent_path = self._vhdutils.get_vhd_parent_path(path)
file_format = os.path.splitext(path)[1][1:].lower()
if parent_path:
backing_file_name = os.path.split(parent_path)[1].lower()
else:
backing_file_name = None
class ImageInfo(object):
def __init__(self, image, backing_file):
self.image = image
self.backing_file = backing_file
self.file_format = file_format
return ImageInfo(os.path.basename(path),
backing_file_name)
def _do_create_snapshot(self, snapshot, backing_file, new_snap_path):
if snapshot.volume.status == 'in-use':
LOG.debug("Snapshot is in-use. Performing Nova "
"assisted creation.")
return
backing_file_full_path = os.path.join(
self._local_volume_dir(snapshot.volume),
backing_file)
self._vhdutils.create_differencing_vhd(new_snap_path,
backing_file_full_path)
def _extend_volume(self, volume, size_gb):
self._check_extend_volume_support(volume, size_gb)
volume_path = self._local_path_active_image(volume)
LOG.info('Resizing file %(volume_path)s to %(size_gb)sGB.',
dict(volume_path=volume_path, size_gb=size_gb))
self._vhdutils.resize_vhd(volume_path, size_gb * units.Gi,
is_file_max_size=False)
def _delete_snapshot(self, snapshot):
# NOTE(lpetrut): We're slightly diverging from the super class
# workflow. The reason is that we cannot query in-use vhd/x images,
# nor can we add or remove images from a vhd/x chain in this case.
volume_status = snapshot.volume.status
if volume_status != 'in-use':
return super(WindowsSmbfsDriver, self)._delete_snapshot(snapshot)
info_path = self._local_path_volume_info(snapshot.volume)
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot.id not in snap_info:
LOG.info('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.', snapshot.id)
return
file_to_merge = snap_info[snapshot.id]
delete_info = {'file_to_merge': file_to_merge,
'volume_id': snapshot.volume.id}
self._nova_assisted_vol_snap_delete(
snapshot._context, snapshot, delete_info)
# At this point, the image file should no longer be in use, so we
# may safely query it so that we can update the 'active' image
# reference, if needed.
merged_img_path = os.path.join(
self._local_volume_dir(snapshot.volume),
file_to_merge)
if utils.paths_normcase_equal(snap_info['active'], file_to_merge):
new_active_file_path = self._vhdutils.get_vhd_parent_path(
merged_img_path).lower()
snap_info['active'] = os.path.basename(new_active_file_path)
self._delete(merged_img_path)
# TODO(lpetrut): drop snapshot info file usage.
del(snap_info[snapshot.id])
self._write_info_file(info_path, snap_info)
def _check_extend_volume_support(self, volume, size_gb):
snapshots_exist = self._snapshots_exist(volume)
fmt = self.get_volume_format(volume)
if snapshots_exist and fmt == self._DISK_FORMAT_VHD:
msg = _('Extending volumes backed by VHD images is not supported '
'when snapshots exist. Please use VHDX images.')
raise exception.InvalidVolume(msg)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
backing_file = self._vhdutils.get_vhd_parent_path(active_file_path)
root_file_fmt = self.get_volume_format(volume)
temp_path = None
try:
if backing_file:
temp_file_name = '%s.temp_image.%s.%s' % (
volume.id,
image_meta['id'],
root_file_fmt)
temp_path = os.path.join(self._local_volume_dir(volume),
temp_file_name)
self._vhdutils.convert_vhd(active_file_path, temp_path)
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path,
root_file_fmt)
finally:
if temp_path:
self._delete(temp_path)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
volume_path = self.local_path(volume)
volume_format = self.get_volume_format(volume, qemu_format=True)
self._delete(volume_path)
image_utils.fetch_to_volume_format(
context, image_service, image_id,
volume_path, volume_format,
self.configuration.volume_dd_blocksize)
self._vhdutils.resize_vhd(self.local_path(volume),
volume.size * units.Gi,
is_file_max_size=False)
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume."""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s",
{'snap': snapshot.id,
'vol': volume.id,
'size': snapshot.volume_size})
info_path = self._local_path_volume_info(snapshot.volume)
snap_info = self._read_info_file(info_path)
vol_dir = self._local_volume_dir(snapshot.volume)
forward_file = snap_info[snapshot.id]
forward_path = os.path.join(vol_dir, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path)
snapshot_path = os.path.join(vol_dir, img_info.backing_file)
volume_path = self.local_path(volume)
self._delete(volume_path)
self._vhdutils.convert_vhd(snapshot_path,
volume_path)
self._vhdutils.resize_vhd(volume_path, volume_size * units.Gi,
is_file_max_size=False)
def _copy_volume_image(self, src_path, dest_path):
self._pathutils.copy(src_path, dest_path)
def _get_share_name(self, share):
return share.replace('/', '\\').lstrip('\\').split('\\', 1)[1]
def _get_pool_name_from_share(self, share):
return self._pool_mappings[share]
def _get_share_from_pool_name(self, pool_name):
mappings = {pool: share
for share, pool in self._pool_mappings.items()}
share = mappings.get(pool_name)
if not share:
msg = _("Could not find any share for pool %(pool_name)s. "
"Pool mappings: %(pool_mappings)s.")
raise exception.SmbfsException(
msg % dict(pool_name=pool_name,
pool_mappings=self._pool_mappings))
return share
| 41.006861
| 79
| 0.633538
|
import os
import sys
from os_brick.remotefs import windows_remotefs as remotefs_brick
from os_win import utilsfactory
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import units
from cinder import context
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import interface
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers import remotefs as remotefs_drv
VERSION = '1.1.0'
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('smbfs_shares_config',
default=r'C:\OpenStack\smbfs_shares.txt',
help='File with the list of available smbfs shares.'),
cfg.StrOpt('smbfs_allocation_info_file_path',
default=r'C:\OpenStack\allocation_data.txt',
help=('The path of the automatically generated file containing '
'information about volume disk space allocation.'),
deprecated_for_removal=True,
deprecated_since="11.0.0",
deprecated_reason="This allocation file is no longer used."),
cfg.StrOpt('smbfs_default_volume_format',
default='vhd',
choices=['vhd', 'vhdx'],
help=('Default format that will be used when creating volumes '
'if no volume format is specified.')),
cfg.BoolOpt('smbfs_sparsed_volumes',
default=True,
help=('Create volumes as sparsed files which take no space '
'rather than regular files when using raw format, '
'in which case volume creation takes lot of time.')),
cfg.FloatOpt('smbfs_used_ratio',
default=None,
help=('Percent of ACTUAL usage of the underlying volume '
'before no new volumes can be allocated to the volume '
'destination.'),
deprecated_for_removal=True),
cfg.FloatOpt('smbfs_oversub_ratio',
default=None,
help=('This will compare the allocated to available space on '
'the volume destination. If the ratio exceeds this '
'number, the destination will no longer be valid.'),
deprecated_for_removal=True),
cfg.StrOpt('smbfs_mount_point_base',
default=r'C:\OpenStack\_mnt',
help=('Base dir containing mount points for smbfs shares.')),
cfg.DictOpt('smbfs_pool_mappings',
default={},
help=('Mappings between share locations and pool names. '
'If not specified, the share names will be used as '
'pool names. Example: '
'//addr/share:pool_name,//addr/share2:pool_name2')),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, group=configuration.SHARED_CONF_GROUP)
CONF.set_default('max_over_subscription_ratio', 1)
CONF.set_default('reserved_percentage', 5)
@interface.volumedriver
class WindowsSmbfsDriver(remotefs_drv.RemoteFSPoolMixin,
remotefs_drv.RemoteFSSnapDriverDistributed):
VERSION = VERSION
driver_volume_type = 'smbfs'
driver_prefix = 'smbfs'
volume_backend_name = 'Generic_SMBFS'
SHARE_FORMAT_REGEX = r'//.+/.+'
VERSION = VERSION
_DISK_FORMAT_VHD = 'vhd'
_DISK_FORMAT_VHD_LEGACY = 'vpc'
_DISK_FORMAT_VHDX = 'vhdx'
CI_WIKI_NAME = "Microsoft_iSCSI_CI"
_MINIMUM_QEMU_IMG_VERSION = '1.6'
_SUPPORTED_IMAGE_FORMATS = [_DISK_FORMAT_VHD, _DISK_FORMAT_VHDX]
_VALID_IMAGE_EXTENSIONS = _SUPPORTED_IMAGE_FORMATS
_always_use_temp_snap_when_cloning = False
_thin_provisioning_support = True
def __init__(self, *args, **kwargs):
self._remotefsclient = None
super(WindowsSmbfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.base = getattr(self.configuration,
'smbfs_mount_point_base')
self._remotefsclient = remotefs_brick.WindowsRemoteFsClient(
'cifs', root_helper=None, smbfs_mount_point_base=self.base,
local_path_for_loopback=True)
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._smbutils = utilsfactory.get_smbutils()
self._diskutils = utilsfactory.get_diskutils()
def do_setup(self, context):
self._check_os_platform()
if self.configuration.smbfs_oversub_ratio is not None:
self.configuration.max_over_subscription_ratio = (
self.configuration.smbfs_oversub_ratio)
if self.configuration.smbfs_used_ratio is not None:
self.configuration.reserved_percentage = (
1 - self.configuration.smbfs_used_ratio) * 100
super(WindowsSmbfsDriver, self).do_setup(context)
image_utils.check_qemu_img_version(self._MINIMUM_QEMU_IMG_VERSION)
config = self.configuration.smbfs_shares_config
if not config:
msg = (_("SMBFS config file not set (smbfs_shares_config)."))
LOG.error(msg)
raise exception.SmbfsException(msg)
if not os.path.exists(config):
msg = (_("SMBFS config file at %(config)s doesn't exist.") %
{'config': config})
LOG.error(msg)
raise exception.SmbfsException(msg)
if not os.path.isabs(self.base):
msg = _("Invalid mount point base: %s") % self.base
LOG.error(msg)
raise exception.SmbfsException(msg)
if not self.configuration.max_over_subscription_ratio > 0:
msg = _(
"SMBFS config 'max_over_subscription_ratio' invalid. "
"Must be > 0: %s"
) % self.configuration.max_over_subscription_ratio
LOG.error(msg)
raise exception.SmbfsException(msg)
if not 0 <= self.configuration.reserved_percentage <= 100:
msg = _(
"SMBFS config 'reserved_percentage' invalid. "
"Must be > 0 and <= 100: %s"
) % self.configuration.reserved_percentage
LOG.error(msg)
raise exception.SmbfsException(msg)
self.shares = {} # address : options
self._ensure_shares_mounted()
self._setup_pool_mappings()
def _setup_pool_mappings(self):
self._pool_mappings = self.configuration.smbfs_pool_mappings
pools = list(self._pool_mappings.values())
duplicate_pools = set([pool for pool in pools
if pools.count(pool) > 1])
if duplicate_pools:
msg = _("Found multiple mappings for pools %(pools)s. "
"Requested pool mappings: %(pool_mappings)s")
raise exception.SmbfsException(
msg % dict(pools=duplicate_pools,
pool_mappings=self._pool_mappings))
shares_missing_mappings = (
set(self.shares).difference(set(self._pool_mappings)))
for share in shares_missing_mappings:
msg = ("No pool name was requested for share %(share)s "
"Using the share name instead.")
LOG.warning(msg, dict(share=share))
self._pool_mappings[share] = self._get_share_name(share)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def initialize_connection(self, volume, connector):
# Find active image
active_file = self.get_active_image_from_info(volume)
fmt = self.get_volume_format(volume)
data = {'export': volume.provider_location,
'format': fmt,
'name': active_file}
if volume.provider_location in self.shares:
data['options'] = self.shares[volume.provider_location]
return {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def _check_os_platform(self):
if sys.platform != 'win32':
_msg = _("This system platform (%s) is not supported. This "
"driver supports only Win32 platforms.") % sys.platform
raise exception.SmbfsException(_msg)
def _get_total_allocated(self, smbfs_share):
pool_name = self._get_pool_name_from_share(smbfs_share)
host = "#".join([self.host, pool_name])
vol_sz_sum = self.db.volume_data_get_for_host(
context=context.get_admin_context(),
host=host)[1]
return float(vol_sz_sum * units.Gi)
def local_path(self, volume):
volume_path_template = self._get_local_volume_path_template(volume)
volume_path = self._lookup_local_volume_path(volume_path_template)
if volume_path:
return volume_path
# The image does not exist, so retrieve the volume format
# in order to build the path.
fmt = self.get_volume_format(volume)
volume_path = volume_path_template + '.' + fmt
return volume_path
def _get_local_volume_path_template(self, volume):
local_dir = self._local_volume_dir(volume)
local_path_template = os.path.join(local_dir, volume.name)
return local_path_template
def _lookup_local_volume_path(self, volume_path_template):
for ext in self._SUPPORTED_IMAGE_FORMATS:
volume_path = (volume_path_template + '.' + ext
if ext else volume_path_template)
if os.path.exists(volume_path):
return volume_path
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot.volume)
snap_path, ext = os.path.splitext(vol_path)
snap_path += '.' + snapshot.id + ext
return snap_path
def get_volume_format(self, volume, qemu_format=False):
volume_path_template = self._get_local_volume_path_template(volume)
volume_path = self._lookup_local_volume_path(volume_path_template)
if volume_path:
ext = os.path.splitext(volume_path)[1].strip('.').lower()
if ext in self._SUPPORTED_IMAGE_FORMATS:
volume_format = ext
else:
# Hyper-V relies on file extensions so we're enforcing them.
raise exception.SmbfsException(
_("Invalid image file extension: %s") % ext)
else:
volume_format = (
self._get_volume_format_spec(volume) or
self.configuration.smbfs_default_volume_format)
if qemu_format and volume_format == self._DISK_FORMAT_VHD:
volume_format = self._DISK_FORMAT_VHD_LEGACY
elif volume_format == self._DISK_FORMAT_VHD_LEGACY:
volume_format = self._DISK_FORMAT_VHD
return volume_format
def _get_volume_format_spec(self, volume):
vol_type = volume.volume_type
extra_specs = {}
if vol_type and vol_type.extra_specs:
extra_specs = vol_type.extra_specs
extra_specs.update(volume.metadata or {})
return (extra_specs.get('volume_format') or
extra_specs.get('smbfs:volume_format') or
self.configuration.smbfs_default_volume_format)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def create_volume(self, volume):
return super(WindowsSmbfsDriver, self).create_volume(volume)
def _do_create_volume(self, volume):
volume_path = self.local_path(volume)
volume_format = self.get_volume_format(volume)
volume_size_bytes = volume.size * units.Gi
if os.path.exists(volume_path):
err_msg = _('File already exists at: %s') % volume_path
raise exception.InvalidVolume(err_msg)
if volume_format not in self._SUPPORTED_IMAGE_FORMATS:
err_msg = _("Unsupported volume format: %s ") % volume_format
raise exception.InvalidVolume(err_msg)
self._vhdutils.create_dynamic_vhd(volume_path, volume_size_bytes)
def _ensure_share_mounted(self, smbfs_share):
mnt_flags = None
if self.shares.get(smbfs_share) is not None:
mnt_flags = self.shares[smbfs_share]
self._remotefsclient.mount(smbfs_share, mnt_flags)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def delete_volume(self, volume):
if not volume.provider_location:
LOG.warning('Volume %s does not have provider_location '
'specified, skipping.', volume.name)
return
self._ensure_share_mounted(volume.provider_location)
volume_dir = self._local_volume_dir(volume)
mounted_path = os.path.join(volume_dir,
self.get_active_image_from_info(volume))
if os.path.exists(mounted_path):
self._delete(mounted_path)
else:
LOG.debug("Skipping deletion of volume %s as it does not exist.",
mounted_path)
info_path = self._local_path_volume_info(volume)
self._delete(info_path)
def _delete(self, path):
fileutils.delete_if_exists(path)
def _get_capacity_info(self, smbfs_share):
mount_point = self._get_mount_point_for_share(smbfs_share)
total_size, total_available = self._diskutils.get_disk_capacity(
mount_point)
total_allocated = self._get_total_allocated(smbfs_share)
return_value = [total_size, total_available, total_allocated]
LOG.info('Smb share %(share)s Total size %(size)s '
'Total allocated %(allocated)s',
{'share': smbfs_share, 'size': total_size,
'allocated': total_allocated})
return [float(x) for x in return_value]
def _img_commit(self, snapshot_path):
self._vhdutils.merge_vhd(snapshot_path)
def _rebase_img(self, image, backing_file, volume_format):
image_dir = os.path.dirname(image)
backing_file_path = os.path.join(image_dir, backing_file)
self._vhdutils.reconnect_parent_vhd(image, backing_file_path)
def _qemu_img_info(self, path, volume_name=None):
# for retrieving image information.
parent_path = self._vhdutils.get_vhd_parent_path(path)
file_format = os.path.splitext(path)[1][1:].lower()
if parent_path:
backing_file_name = os.path.split(parent_path)[1].lower()
else:
backing_file_name = None
class ImageInfo(object):
def __init__(self, image, backing_file):
self.image = image
self.backing_file = backing_file
self.file_format = file_format
return ImageInfo(os.path.basename(path),
backing_file_name)
def _do_create_snapshot(self, snapshot, backing_file, new_snap_path):
if snapshot.volume.status == 'in-use':
LOG.debug("Snapshot is in-use. Performing Nova "
"assisted creation.")
return
backing_file_full_path = os.path.join(
self._local_volume_dir(snapshot.volume),
backing_file)
self._vhdutils.create_differencing_vhd(new_snap_path,
backing_file_full_path)
def _extend_volume(self, volume, size_gb):
self._check_extend_volume_support(volume, size_gb)
volume_path = self._local_path_active_image(volume)
LOG.info('Resizing file %(volume_path)s to %(size_gb)sGB.',
dict(volume_path=volume_path, size_gb=size_gb))
self._vhdutils.resize_vhd(volume_path, size_gb * units.Gi,
is_file_max_size=False)
def _delete_snapshot(self, snapshot):
# NOTE(lpetrut): We're slightly diverging from the super class
volume_status = snapshot.volume.status
if volume_status != 'in-use':
return super(WindowsSmbfsDriver, self)._delete_snapshot(snapshot)
info_path = self._local_path_volume_info(snapshot.volume)
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot.id not in snap_info:
LOG.info('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.', snapshot.id)
return
file_to_merge = snap_info[snapshot.id]
delete_info = {'file_to_merge': file_to_merge,
'volume_id': snapshot.volume.id}
self._nova_assisted_vol_snap_delete(
snapshot._context, snapshot, delete_info)
merged_img_path = os.path.join(
self._local_volume_dir(snapshot.volume),
file_to_merge)
if utils.paths_normcase_equal(snap_info['active'], file_to_merge):
new_active_file_path = self._vhdutils.get_vhd_parent_path(
merged_img_path).lower()
snap_info['active'] = os.path.basename(new_active_file_path)
self._delete(merged_img_path)
del(snap_info[snapshot.id])
self._write_info_file(info_path, snap_info)
def _check_extend_volume_support(self, volume, size_gb):
snapshots_exist = self._snapshots_exist(volume)
fmt = self.get_volume_format(volume)
if snapshots_exist and fmt == self._DISK_FORMAT_VHD:
msg = _('Extending volumes backed by VHD images is not supported '
'when snapshots exist. Please use VHDX images.')
raise exception.InvalidVolume(msg)
@coordination.synchronized('{self.driver_prefix}-{volume.id}')
def copy_volume_to_image(self, context, volume, image_service, image_meta):
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
backing_file = self._vhdutils.get_vhd_parent_path(active_file_path)
root_file_fmt = self.get_volume_format(volume)
temp_path = None
try:
if backing_file:
temp_file_name = '%s.temp_image.%s.%s' % (
volume.id,
image_meta['id'],
root_file_fmt)
temp_path = os.path.join(self._local_volume_dir(volume),
temp_file_name)
self._vhdutils.convert_vhd(active_file_path, temp_path)
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path,
root_file_fmt)
finally:
if temp_path:
self._delete(temp_path)
def copy_image_to_volume(self, context, volume, image_service, image_id):
volume_path = self.local_path(volume)
volume_format = self.get_volume_format(volume, qemu_format=True)
self._delete(volume_path)
image_utils.fetch_to_volume_format(
context, image_service, image_id,
volume_path, volume_format,
self.configuration.volume_dd_blocksize)
self._vhdutils.resize_vhd(self.local_path(volume),
volume.size * units.Gi,
is_file_max_size=False)
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s",
{'snap': snapshot.id,
'vol': volume.id,
'size': snapshot.volume_size})
info_path = self._local_path_volume_info(snapshot.volume)
snap_info = self._read_info_file(info_path)
vol_dir = self._local_volume_dir(snapshot.volume)
forward_file = snap_info[snapshot.id]
forward_path = os.path.join(vol_dir, forward_file)
img_info = self._qemu_img_info(forward_path)
snapshot_path = os.path.join(vol_dir, img_info.backing_file)
volume_path = self.local_path(volume)
self._delete(volume_path)
self._vhdutils.convert_vhd(snapshot_path,
volume_path)
self._vhdutils.resize_vhd(volume_path, volume_size * units.Gi,
is_file_max_size=False)
def _copy_volume_image(self, src_path, dest_path):
self._pathutils.copy(src_path, dest_path)
def _get_share_name(self, share):
return share.replace('/', '\\').lstrip('\\').split('\\', 1)[1]
def _get_pool_name_from_share(self, share):
return self._pool_mappings[share]
def _get_share_from_pool_name(self, pool_name):
mappings = {pool: share
for share, pool in self._pool_mappings.items()}
share = mappings.get(pool_name)
if not share:
msg = _("Could not find any share for pool %(pool_name)s. "
"Pool mappings: %(pool_mappings)s.")
raise exception.SmbfsException(
msg % dict(pool_name=pool_name,
pool_mappings=self._pool_mappings))
return share
| true
| true
|
1c402220bf1776fdb1e333358163fcdcc1643862
| 6,550
|
py
|
Python
|
setup.py
|
basnijholt/ipyparaview
|
6550093d6d5df4c04d1b50e57d9c3a773789ec21
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
basnijholt/ipyparaview
|
6550093d6d5df4c04d1b50e57d9c3a773789ec21
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
basnijholt/ipyparaview
|
6550093d6d5df4c04d1b50e57d9c3a773789ec21
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
import platform
import sys
from subprocess import check_call
from setuptools import Command, find_packages, setup
from setuptools.command.build_py import build_py
from setuptools.command.egg_info import egg_info
from setuptools.command.sdist import sdist
here = os.path.dirname(os.path.abspath(__file__))
node_root = os.path.join(here, "js")
is_repo = os.path.exists(os.path.join(here, ".git"))
npm_path = os.pathsep.join(
[
os.path.join(node_root, "node_modules", ".bin"),
os.environ.get("PATH", os.defpath),
]
)
from distutils import log
log.set_verbosity(log.DEBUG)
log.info("setup.py entered")
log.info("$PATH=%s" % os.environ["PATH"])
LONG_DESCRIPTION = "A widget for interactive server-side ParaView rendering"
def js_prerelease(command, strict=False):
"""decorator for building minified js/css prior to another command"""
class DecoratedCommand(command):
def run(self):
jsdeps = self.distribution.get_command_obj("jsdeps")
if not is_repo and all(os.path.exists(t) for t in jsdeps.targets):
# sdist, nothing to do
command.run(self)
return
try:
self.distribution.run_command("jsdeps")
except Exception as e:
missing = [t for t in jsdeps.targets if not os.path.exists(t)]
if strict or missing:
log.warn("rebuilding js and css failed")
if missing:
log.error("missing files: %s" % missing)
raise e
else:
log.warn("rebuilding js and css failed (not a problem)")
log.warn(str(e))
command.run(self)
update_package_data(self.distribution)
return DecoratedCommand
def update_package_data(distribution):
"""update package_data to catch changes during setup"""
build_py = distribution.get_command_obj("build_py")
# distribution.package_data = find_package_data()
# re-init build_py options which load package_data
build_py.finalize_options()
class NPM(Command):
description = "install package.json dependencies using npm"
user_options = []
node_modules = os.path.join(node_root, "node_modules")
targets = [
os.path.join(here, "ipyparaview", "static", "extension.js"),
os.path.join(here, "ipyparaview", "static", "index.js"),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def get_npm_name(self):
npmName = "npm"
if platform.system() == "Windows":
npmName = "npm.cmd"
return npmName
def has_npm(self):
npmName = self.get_npm_name()
try:
check_call([npmName, "--version"])
return True
except:
return False
def should_run_npm_install(self):
package_json = os.path.join(node_root, "package.json")
node_modules_exists = os.path.exists(self.node_modules)
return self.has_npm()
def run(self):
has_npm = self.has_npm()
if not has_npm:
log.error(
"`npm` unavailable. If you're running this command using sudo, make sure `npm` is available to sudo"
)
env = os.environ.copy()
env["PATH"] = npm_path
if self.should_run_npm_install():
log.info(
"Installing build dependencies with npm. This may take a while..."
)
npmName = self.get_npm_name()
# NOTE: this is a dirty hack to get around permissions issues with npm in docker
# It's not portable, or reliable. It may kill your dog without warning.
import getpass
if getpass.getuser() == "root":
check_call(
[npmName, "install", "--unsafe-perm"],
cwd=node_root,
stdout=sys.stdout,
stderr=sys.stderr,
)
else:
check_call(
[npmName, "install"],
cwd=node_root,
stdout=sys.stdout,
stderr=sys.stderr,
)
os.utime(self.node_modules, None)
for t in self.targets:
if not os.path.exists(t):
msg = "Missing file: %s" % t
if not has_npm:
msg += "\nnpm is required to build a development version of a widget extension"
raise ValueError(msg)
# update package data in case this created new files
update_package_data(self.distribution)
version_ns = {}
with open(os.path.join(here, "ipyparaview", "_version.py")) as f:
exec(f.read(), {}, version_ns)
setup_args = {
"name": "ipyparaview",
"version": version_ns["__version__"],
"description": "A widget for interactive server-side ParaView rendering",
"long_description": LONG_DESCRIPTION,
"include_package_data": True,
"data_files": [
(
"share/jupyter/nbextensions/ipyparaview",
[
"ipyparaview/static/extension.js",
"ipyparaview/static/index.js",
"ipyparaview/static/index.js.map",
],
),
("etc/jupyter/nbconfig/notebook.d", ["ipyparaview.json"]),
],
"install_requires": [
"ipywidgets>=7.0.0",
"pillow>=7.0.0" "numpy",
],
"packages": find_packages(),
"zip_safe": False,
"cmdclass": {
"build_py": js_prerelease(build_py),
"egg_info": js_prerelease(egg_info),
"sdist": js_prerelease(sdist, strict=True),
"jsdeps": NPM,
},
"author": "Nick Leaf",
"author_email": "nleaf@nvidia.com",
"url": "https://github.com/NVIDIA/ipyparaview",
"keywords": [
"ipython",
"jupyter",
"widgets",
],
"classifiers": [
"Development Status :: 4 - Beta",
"Framework :: IPython",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Multimedia :: Graphics",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
}
setup(**setup_args)
| 31.042654
| 117
| 0.575725
|
from __future__ import print_function
import os
import platform
import sys
from subprocess import check_call
from setuptools import Command, find_packages, setup
from setuptools.command.build_py import build_py
from setuptools.command.egg_info import egg_info
from setuptools.command.sdist import sdist
here = os.path.dirname(os.path.abspath(__file__))
node_root = os.path.join(here, "js")
is_repo = os.path.exists(os.path.join(here, ".git"))
npm_path = os.pathsep.join(
[
os.path.join(node_root, "node_modules", ".bin"),
os.environ.get("PATH", os.defpath),
]
)
from distutils import log
log.set_verbosity(log.DEBUG)
log.info("setup.py entered")
log.info("$PATH=%s" % os.environ["PATH"])
LONG_DESCRIPTION = "A widget for interactive server-side ParaView rendering"
def js_prerelease(command, strict=False):
class DecoratedCommand(command):
def run(self):
jsdeps = self.distribution.get_command_obj("jsdeps")
if not is_repo and all(os.path.exists(t) for t in jsdeps.targets):
command.run(self)
return
try:
self.distribution.run_command("jsdeps")
except Exception as e:
missing = [t for t in jsdeps.targets if not os.path.exists(t)]
if strict or missing:
log.warn("rebuilding js and css failed")
if missing:
log.error("missing files: %s" % missing)
raise e
else:
log.warn("rebuilding js and css failed (not a problem)")
log.warn(str(e))
command.run(self)
update_package_data(self.distribution)
return DecoratedCommand
def update_package_data(distribution):
build_py = distribution.get_command_obj("build_py")
build_py.finalize_options()
class NPM(Command):
description = "install package.json dependencies using npm"
user_options = []
node_modules = os.path.join(node_root, "node_modules")
targets = [
os.path.join(here, "ipyparaview", "static", "extension.js"),
os.path.join(here, "ipyparaview", "static", "index.js"),
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def get_npm_name(self):
npmName = "npm"
if platform.system() == "Windows":
npmName = "npm.cmd"
return npmName
def has_npm(self):
npmName = self.get_npm_name()
try:
check_call([npmName, "--version"])
return True
except:
return False
def should_run_npm_install(self):
package_json = os.path.join(node_root, "package.json")
node_modules_exists = os.path.exists(self.node_modules)
return self.has_npm()
def run(self):
has_npm = self.has_npm()
if not has_npm:
log.error(
"`npm` unavailable. If you're running this command using sudo, make sure `npm` is available to sudo"
)
env = os.environ.copy()
env["PATH"] = npm_path
if self.should_run_npm_install():
log.info(
"Installing build dependencies with npm. This may take a while..."
)
npmName = self.get_npm_name()
# NOTE: this is a dirty hack to get around permissions issues with npm in docker
# It's not portable, or reliable. It may kill your dog without warning.
import getpass
if getpass.getuser() == "root":
check_call(
[npmName, "install", "--unsafe-perm"],
cwd=node_root,
stdout=sys.stdout,
stderr=sys.stderr,
)
else:
check_call(
[npmName, "install"],
cwd=node_root,
stdout=sys.stdout,
stderr=sys.stderr,
)
os.utime(self.node_modules, None)
for t in self.targets:
if not os.path.exists(t):
msg = "Missing file: %s" % t
if not has_npm:
msg += "\nnpm is required to build a development version of a widget extension"
raise ValueError(msg)
update_package_data(self.distribution)
version_ns = {}
with open(os.path.join(here, "ipyparaview", "_version.py")) as f:
exec(f.read(), {}, version_ns)
setup_args = {
"name": "ipyparaview",
"version": version_ns["__version__"],
"description": "A widget for interactive server-side ParaView rendering",
"long_description": LONG_DESCRIPTION,
"include_package_data": True,
"data_files": [
(
"share/jupyter/nbextensions/ipyparaview",
[
"ipyparaview/static/extension.js",
"ipyparaview/static/index.js",
"ipyparaview/static/index.js.map",
],
),
("etc/jupyter/nbconfig/notebook.d", ["ipyparaview.json"]),
],
"install_requires": [
"ipywidgets>=7.0.0",
"pillow>=7.0.0" "numpy",
],
"packages": find_packages(),
"zip_safe": False,
"cmdclass": {
"build_py": js_prerelease(build_py),
"egg_info": js_prerelease(egg_info),
"sdist": js_prerelease(sdist, strict=True),
"jsdeps": NPM,
},
"author": "Nick Leaf",
"author_email": "nleaf@nvidia.com",
"url": "https://github.com/NVIDIA/ipyparaview",
"keywords": [
"ipython",
"jupyter",
"widgets",
],
"classifiers": [
"Development Status :: 4 - Beta",
"Framework :: IPython",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Multimedia :: Graphics",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
}
setup(**setup_args)
| true
| true
|
1c4022651ca89b6ae53bd0f71df3a270235c0479
| 7,515
|
py
|
Python
|
terroroftinytown/tracker/app.py
|
Flashfire42/terroroftinytown
|
c52be7ac0f7abc37f4c90955e5c96b91f935903a
|
[
"MIT"
] | null | null | null |
terroroftinytown/tracker/app.py
|
Flashfire42/terroroftinytown
|
c52be7ac0f7abc37f4c90955e5c96b91f935903a
|
[
"MIT"
] | null | null | null |
terroroftinytown/tracker/app.py
|
Flashfire42/terroroftinytown
|
c52be7ac0f7abc37f4c90955e5c96b91f935903a
|
[
"MIT"
] | null | null | null |
# encoding=utf-8
import functools
import os.path
from tornado.web import URLSpec as U
import tornado.web
from terroroftinytown.client.alphabet import str_to_int, int_to_str
from terroroftinytown.services.registry import registry
from terroroftinytown.tracker import account, admin, project, api
from terroroftinytown.tracker import model
from terroroftinytown.tracker.base import BaseHandler
from terroroftinytown.tracker.errors import UserIsBanned
from terroroftinytown.tracker.form import CalculatorForm
from terroroftinytown.tracker.model import GlobalSetting, ErrorReport
from terroroftinytown.tracker.stats import Stats
from terroroftinytown.tracker.ui import FormUIModule
class Application(tornado.web.Application):
def __init__(self, database, redis=None, **kwargs):
self.db = database
self.redis = redis
handlers = [
U(r'/', IndexHandler, name='index'),
U(r'/admin/', admin.AdminHandler, name='admin.overview'),
U(r'/admin/banned', admin.BannedHandler, name='admin.banned'),
U(r'/admin/login', account.LoginHandler, name='admin.login'),
U(r'/admin/logout', account.LogoutHandler, name='admin.logout'),
U(r'/admin/results', admin.ResultsHandler, name='admin.results'),
U(r'/admin/error_reports', admin.ErrorReportsListHandler,
name='admin.error_reports'),
U(r'/admin/error_reports/delete_all',
admin.ErrorReportsDeleteAllHandler,
name='admin.error_reports.delete_all'),
U(r'/admin/error_reports/delete_one/(.+)',
admin.ErrorReportsDeleteOneHandler,
name='admin.error_reports.delete_one'),
U(r'/admin/error_reports/auto_delete_setting',
admin.AutoDeleteErrorReportsSettingHandler,
name='admin.error_reports.auto_delete_setting'),
U(r'/users/', account.AllUsersHandler, name='users.overview'),
U(r'/user/([a-z0-9_-]*)', account.UserHandler, name='user.overview'),
U(r'/projects/overview', project.AllProjectsHandler, name='projects.overview'),
U(r'/project/([a-z0-9_-]*)', project.ProjectHandler, name='project.overview'),
U(r'/project/([a-z0-9_-]*)/queue', project.QueueHandler, name='project.queue'),
U(r'/project/([a-z0-9_-]*)/claims', project.ClaimsHandler, name='project.claims'),
U(r'/project/([a-z0-9_-]*)/settings', project.SettingsHandler, name='project.settings'),
U(r'/project/([a-z0-9_-]*)/delete', project.DeleteHandler, name='project.delete'),
U(r'/api/live_stats', api.LiveStatsHandler, name='api.live_stats'),
U(r'/api/project_settings', api.ProjectSettingsHandler, name='api.project_settings'),
U(r'/api/get', api.GetHandler, name='api.get'),
U(r'/api/done', api.DoneHandler, name='api.done'),
U(r'/api/error', api.ErrorHandler, name='api.error'),
U(r'/status', StatusHandler, name='index.status'),
U(r'/calculator', CalculatorHandler, name='index.calculator'),
]
static_path = os.path.join(
os.path.dirname(__file__), 'static'
)
template_path = os.path.join(
os.path.dirname(__file__), 'template'
)
ui_modules = {
'Form': FormUIModule,
}
super(Application, self).__init__(
handlers,
static_path=static_path,
template_path=template_path,
login_url='/admin/login',
ui_modules=ui_modules,
**kwargs
)
def job_task():
if self.is_maintenance_in_progress():
return
model.Item.release_old(autoqueue_only=True)
model.Budget.calculate_budgets()
job_task()
self._job_timer = tornado.ioloop.PeriodicCallback(
job_task,
60 * 1000
)
self._job_timer.start()
def clean_error_reports():
if self.is_maintenance_in_progress():
return
enabled = GlobalSetting.get_value(
GlobalSetting.AUTO_DELETE_ERROR_REPORTS)
if enabled:
ErrorReport.delete_orphaned()
clean_error_reports()
self._clean_error_reports_timer = tornado.ioloop.PeriodicCallback(
clean_error_reports,
300 * 1000
)
self._clean_error_reports_timer.start()
def checkout_item(self, username, ip_address=None, version=-1, client_version=-1):
if model.BlockedUser.is_username_blocked(username, ip_address):
raise UserIsBanned()
return model.checkout_item(username, ip_address, version, client_version)
def checkin_item(self, item_id, tamper_key, results):
return model.checkin_item(item_id, tamper_key, results)
def report_error(self, item_id, tamper_key, message):
model.report_error(item_id, tamper_key, message)
def is_maintenance_in_progress(self):
sentinel_path = self.settings.get('maintenance_sentinel')
return sentinel_path and os.path.exists(sentinel_path)
class IndexHandler(BaseHandler):
def get(self):
lifetime_list = [
(username, found, scanned)
for username, (found, scanned)
in Stats.instance.get_lifetime().items()
]
lifetime_list = sorted(lifetime_list, key=lambda item: item[2],
reverse=True)
stats = {
'global': Stats.instance.get_global(),
'lifetime': lifetime_list[:300],
'live': Stats.instance.get_live(),
}
self.render('index.html', stats=stats)
class StatusHandler(BaseHandler):
GIT_HASH = model.get_git_hash()
def get(self):
projects = list([
model.Project.get_plain(name)
for name in model.Project.all_project_names()])
project_stats = Stats.instance.get_project()
self.render('status.html', projects=projects, services=registry,
project_stats=project_stats,
git_hash=self.GIT_HASH)
class CalculatorHandler(BaseHandler):
def get_current_user(self):
# No need for database access
pass
def _show_maintenance_page(self):
pass
def get(self):
form = CalculatorForm(self.request.arguments)
message = None
convert_direction = self.get_argument('convert', None)
if convert_direction and form.validate():
try:
if convert_direction == 'up':
source_number = self.get_argument('number_2')
source_alphabet = self.get_argument('alphabet_2')
target_alphabet = self.get_argument('alphabet_1')
num = str_to_int(source_number, source_alphabet)
form.number_1.data = int_to_str(num, target_alphabet)
else:
source_number = self.get_argument('number_1')
source_alphabet = self.get_argument('alphabet_1')
target_alphabet = self.get_argument('alphabet_2')
num = str_to_int(source_number, source_alphabet)
form.number_2.data = int_to_str(num, target_alphabet)
except ValueError as error:
message = str(error)
self.render('calculator.html', form=form, message=message)
| 37.575
| 100
| 0.623021
|
import functools
import os.path
from tornado.web import URLSpec as U
import tornado.web
from terroroftinytown.client.alphabet import str_to_int, int_to_str
from terroroftinytown.services.registry import registry
from terroroftinytown.tracker import account, admin, project, api
from terroroftinytown.tracker import model
from terroroftinytown.tracker.base import BaseHandler
from terroroftinytown.tracker.errors import UserIsBanned
from terroroftinytown.tracker.form import CalculatorForm
from terroroftinytown.tracker.model import GlobalSetting, ErrorReport
from terroroftinytown.tracker.stats import Stats
from terroroftinytown.tracker.ui import FormUIModule
class Application(tornado.web.Application):
def __init__(self, database, redis=None, **kwargs):
self.db = database
self.redis = redis
handlers = [
U(r'/', IndexHandler, name='index'),
U(r'/admin/', admin.AdminHandler, name='admin.overview'),
U(r'/admin/banned', admin.BannedHandler, name='admin.banned'),
U(r'/admin/login', account.LoginHandler, name='admin.login'),
U(r'/admin/logout', account.LogoutHandler, name='admin.logout'),
U(r'/admin/results', admin.ResultsHandler, name='admin.results'),
U(r'/admin/error_reports', admin.ErrorReportsListHandler,
name='admin.error_reports'),
U(r'/admin/error_reports/delete_all',
admin.ErrorReportsDeleteAllHandler,
name='admin.error_reports.delete_all'),
U(r'/admin/error_reports/delete_one/(.+)',
admin.ErrorReportsDeleteOneHandler,
name='admin.error_reports.delete_one'),
U(r'/admin/error_reports/auto_delete_setting',
admin.AutoDeleteErrorReportsSettingHandler,
name='admin.error_reports.auto_delete_setting'),
U(r'/users/', account.AllUsersHandler, name='users.overview'),
U(r'/user/([a-z0-9_-]*)', account.UserHandler, name='user.overview'),
U(r'/projects/overview', project.AllProjectsHandler, name='projects.overview'),
U(r'/project/([a-z0-9_-]*)', project.ProjectHandler, name='project.overview'),
U(r'/project/([a-z0-9_-]*)/queue', project.QueueHandler, name='project.queue'),
U(r'/project/([a-z0-9_-]*)/claims', project.ClaimsHandler, name='project.claims'),
U(r'/project/([a-z0-9_-]*)/settings', project.SettingsHandler, name='project.settings'),
U(r'/project/([a-z0-9_-]*)/delete', project.DeleteHandler, name='project.delete'),
U(r'/api/live_stats', api.LiveStatsHandler, name='api.live_stats'),
U(r'/api/project_settings', api.ProjectSettingsHandler, name='api.project_settings'),
U(r'/api/get', api.GetHandler, name='api.get'),
U(r'/api/done', api.DoneHandler, name='api.done'),
U(r'/api/error', api.ErrorHandler, name='api.error'),
U(r'/status', StatusHandler, name='index.status'),
U(r'/calculator', CalculatorHandler, name='index.calculator'),
]
static_path = os.path.join(
os.path.dirname(__file__), 'static'
)
template_path = os.path.join(
os.path.dirname(__file__), 'template'
)
ui_modules = {
'Form': FormUIModule,
}
super(Application, self).__init__(
handlers,
static_path=static_path,
template_path=template_path,
login_url='/admin/login',
ui_modules=ui_modules,
**kwargs
)
def job_task():
if self.is_maintenance_in_progress():
return
model.Item.release_old(autoqueue_only=True)
model.Budget.calculate_budgets()
job_task()
self._job_timer = tornado.ioloop.PeriodicCallback(
job_task,
60 * 1000
)
self._job_timer.start()
def clean_error_reports():
if self.is_maintenance_in_progress():
return
enabled = GlobalSetting.get_value(
GlobalSetting.AUTO_DELETE_ERROR_REPORTS)
if enabled:
ErrorReport.delete_orphaned()
clean_error_reports()
self._clean_error_reports_timer = tornado.ioloop.PeriodicCallback(
clean_error_reports,
300 * 1000
)
self._clean_error_reports_timer.start()
def checkout_item(self, username, ip_address=None, version=-1, client_version=-1):
if model.BlockedUser.is_username_blocked(username, ip_address):
raise UserIsBanned()
return model.checkout_item(username, ip_address, version, client_version)
def checkin_item(self, item_id, tamper_key, results):
return model.checkin_item(item_id, tamper_key, results)
def report_error(self, item_id, tamper_key, message):
model.report_error(item_id, tamper_key, message)
def is_maintenance_in_progress(self):
sentinel_path = self.settings.get('maintenance_sentinel')
return sentinel_path and os.path.exists(sentinel_path)
class IndexHandler(BaseHandler):
def get(self):
lifetime_list = [
(username, found, scanned)
for username, (found, scanned)
in Stats.instance.get_lifetime().items()
]
lifetime_list = sorted(lifetime_list, key=lambda item: item[2],
reverse=True)
stats = {
'global': Stats.instance.get_global(),
'lifetime': lifetime_list[:300],
'live': Stats.instance.get_live(),
}
self.render('index.html', stats=stats)
class StatusHandler(BaseHandler):
GIT_HASH = model.get_git_hash()
def get(self):
projects = list([
model.Project.get_plain(name)
for name in model.Project.all_project_names()])
project_stats = Stats.instance.get_project()
self.render('status.html', projects=projects, services=registry,
project_stats=project_stats,
git_hash=self.GIT_HASH)
class CalculatorHandler(BaseHandler):
def get_current_user(self):
pass
def _show_maintenance_page(self):
pass
def get(self):
form = CalculatorForm(self.request.arguments)
message = None
convert_direction = self.get_argument('convert', None)
if convert_direction and form.validate():
try:
if convert_direction == 'up':
source_number = self.get_argument('number_2')
source_alphabet = self.get_argument('alphabet_2')
target_alphabet = self.get_argument('alphabet_1')
num = str_to_int(source_number, source_alphabet)
form.number_1.data = int_to_str(num, target_alphabet)
else:
source_number = self.get_argument('number_1')
source_alphabet = self.get_argument('alphabet_1')
target_alphabet = self.get_argument('alphabet_2')
num = str_to_int(source_number, source_alphabet)
form.number_2.data = int_to_str(num, target_alphabet)
except ValueError as error:
message = str(error)
self.render('calculator.html', form=form, message=message)
| true
| true
|
1c40251801215384e7fc1e986dec8972ef669060
| 4,347
|
py
|
Python
|
tests/scripts/thread-cert/Cert_9_2_14_PanIdQuery.py
|
yuzhyang/openthread
|
38f206c6708d8fc7eae21db6ff3e3a50a2053b58
|
[
"BSD-3-Clause"
] | 1
|
2018-09-25T15:27:26.000Z
|
2018-09-25T15:27:26.000Z
|
tests/scripts/thread-cert/Cert_9_2_14_PanIdQuery.py
|
yuzhyang/openthread
|
38f206c6708d8fc7eae21db6ff3e3a50a2053b58
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/Cert_9_2_14_PanIdQuery.py
|
yuzhyang/openthread
|
38f206c6708d8fc7eae21db6ff3e3a50a2053b58
|
[
"BSD-3-Clause"
] | 1
|
2019-08-03T17:35:08.000Z
|
2019-08-03T17:35:08.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import config
import node
COMMISSIONER = 1
LEADER1 = 2
ROUTER1 = 3
LEADER2 = 4
class Cert_9_2_14_PanIdQuery(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,5):
self.nodes[i] = node.Node(i, simulator=self.simulator)
self.nodes[COMMISSIONER].set_panid(0xface)
self.nodes[COMMISSIONER].set_mode('rsdn')
self.nodes[COMMISSIONER].add_whitelist(self.nodes[LEADER1].get_addr64())
self.nodes[COMMISSIONER].enable_whitelist()
self.nodes[COMMISSIONER].set_router_selection_jitter(1)
self.nodes[LEADER1].set_panid(0xface)
self.nodes[LEADER1].set_mode('rsdn')
self.nodes[LEADER1].add_whitelist(self.nodes[COMMISSIONER].get_addr64())
self.nodes[LEADER1].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER1].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER1].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER2].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[LEADER2].set_panid(0xdead)
self.nodes[LEADER2].set_mode('rsdn')
self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER2].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
del self.simulator
def test(self):
self.nodes[LEADER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER1].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[LEADER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER2].get_state(), 'leader')
ipaddrs = self.nodes[ROUTER1].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.nodes[COMMISSIONER].panid_query(0xdead, 0xffffffff, ipaddr)
self.nodes[COMMISSIONER].panid_query(0xdead, 0xffffffff, 'ff33:0040:fdde:ad00:beef:0:0:1')
self.assertTrue(self.nodes[COMMISSIONER].ping(ipaddr))
if __name__ == '__main__':
unittest.main()
| 39.162162
| 98
| 0.701173
|
import time
import unittest
import config
import node
COMMISSIONER = 1
LEADER1 = 2
ROUTER1 = 3
LEADER2 = 4
class Cert_9_2_14_PanIdQuery(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,5):
self.nodes[i] = node.Node(i, simulator=self.simulator)
self.nodes[COMMISSIONER].set_panid(0xface)
self.nodes[COMMISSIONER].set_mode('rsdn')
self.nodes[COMMISSIONER].add_whitelist(self.nodes[LEADER1].get_addr64())
self.nodes[COMMISSIONER].enable_whitelist()
self.nodes[COMMISSIONER].set_router_selection_jitter(1)
self.nodes[LEADER1].set_panid(0xface)
self.nodes[LEADER1].set_mode('rsdn')
self.nodes[LEADER1].add_whitelist(self.nodes[COMMISSIONER].get_addr64())
self.nodes[LEADER1].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER1].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER1].get_addr64())
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER2].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
self.nodes[LEADER2].set_panid(0xdead)
self.nodes[LEADER2].set_mode('rsdn')
self.nodes[LEADER2].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER2].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
del self.simulator
def test(self):
self.nodes[LEADER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER1].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[LEADER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER2].get_state(), 'leader')
ipaddrs = self.nodes[ROUTER1].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.nodes[COMMISSIONER].panid_query(0xdead, 0xffffffff, ipaddr)
self.nodes[COMMISSIONER].panid_query(0xdead, 0xffffffff, 'ff33:0040:fdde:ad00:beef:0:0:1')
self.assertTrue(self.nodes[COMMISSIONER].ping(ipaddr))
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c4027d3d9397504f77f1fd2e9f27861c20ffbc9
| 14,780
|
py
|
Python
|
tensorflow_data_validation/statistics/generators/image_stats_generator_test.py
|
brills/data-validation
|
4f8a5d12b3d5db7383ae53d5fe184af1d781449a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_data_validation/statistics/generators/image_stats_generator_test.py
|
brills/data-validation
|
4f8a5d12b3d5db7383ae53d5fe184af1d781449a
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_data_validation/statistics/generators/image_stats_generator_test.py
|
brills/data-validation
|
4f8a5d12b3d5db7383ae53d5fe184af1d781449a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image statistics generator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pyarrow as pa
import tensorflow as tf
from tensorflow_data_validation.statistics.generators import image_stats_generator
from tensorflow_data_validation.utils import test_util
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import statistics_pb2
class FakeImageDecoder(image_stats_generator.ImageDecoderInterface):
"""Fake ImageDecoderInterface implementation for testing."""
@staticmethod
def encode_image_metadata(image_format, image_height, image_width):
image_metadata = {
'format': image_format,
'height': image_height,
'width': image_width
}
return json.dumps(image_metadata)
def get_formats(self, value_list):
return np.array([json.loads(value)['format'] for value in value_list],
dtype=np.object)
def get_sizes(self, value_list):
loaded_metadata = [json.loads(value) for value in value_list]
return np.array([[meta['height'], meta['width']]
for meta in loaded_metadata])
class ImageStatsGeneratorTest(test_util.CombinerFeatureStatsGeneratorTest,
parameterized.TestCase):
@parameterized.named_parameters(
('EmptyList', []), # Line-break comment for readability.
('EmptyBatch', [pa.Column.from_array('feature', pa.array([]))]),
('NumericalShouldInvalidateImageStats', [
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('JPEG', 1, 1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7),
]
])),
pa.Column.from_array('feature', pa.array([[1]])),
]))
def test_cases_with_no_image_stats(self, batches):
"""Test cases that should not generate image statistics."""
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator,
statistics_pb2.FeatureNameStatistics())
def test_image_stats_generator_with_missing_feature(self):
"""Test with missing values for a batch."""
batches = [
pa.Column.from_array('feature', pa.array([])),
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('JPEG', 10, 1),
]
])),
]
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'JPEG'
sample_count: 1
}
}
}
custom_stats {
name: 'image_max_width'
num: 1.0
}
custom_stats {
name: 'image_max_height'
num: 10.0
}""", statistics_pb2.FeatureNameStatistics())
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_values_threshold_check(self):
"""Check values_threshold with a feature that is all images."""
batches = [
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('PNG', 2, 4),
FakeImageDecoder.encode_image_metadata('JPEG', 4, 2),
],
[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('JPEG', -1, -1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7)
]
])),
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('GIF', 2, 1),
]
])),
]
# With values_threshold = 7 statistics should not be generated.
image_decoder = FakeImageDecoder()
expected_result = statistics_pb2.FeatureNameStatistics()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=7,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
# With values_threshold = 6 statistics should be generated.
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'GIF'
sample_count: 1
}
buckets {
label: 'JPEG'
sample_count: 2
}
buckets {
label: 'PNG'
sample_count: 1
}
buckets {
label: 'TIFF'
sample_count: 2
}
}
}
custom_stats {
name: 'image_max_width'
num: 7.0
}
custom_stats {
name: 'image_max_height'
num: 5.0
}
""", statistics_pb2.FeatureNameStatistics())
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=6,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_check_is_image_ratio(self):
"""Check is_image_ratio with a feature that has partially images."""
# The image ratio is: 0.83
batches = [
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('PNG', 2, 4),
FakeImageDecoder.encode_image_metadata('JPEG', 4, 2),
],
[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('', -1, -1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7)
]
])),
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('GIF', 2, 1),
]
])),
]
# For image_ratio_threshold=0.85 we for not expect stats.
expected_result = statistics_pb2.FeatureNameStatistics()
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.85,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
# For image_ratio_threshold=0.8 we expect stats.
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'UNKNOWN'
sample_count: 1
}
buckets {
label: 'GIF'
sample_count: 1
}
buckets {
label: 'JPEG'
sample_count: 1
}
buckets {
label: 'PNG'
sample_count: 1
}
buckets {
label: 'TIFF'
sample_count: 2
}
}
}
custom_stats {
name: 'image_max_width'
num: 7.0
}
custom_stats {
name: 'image_max_height'
num: 5.0
}
""", statistics_pb2.FeatureNameStatistics())
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.8,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_disable_size_stats(self):
"""Test the enable_size_stats_option."""
# Identical input to test_image_stats_generator_check_is_image_ratio
batches = [
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('PNG', 2, 4),
FakeImageDecoder.encode_image_metadata('JPEG', 4, 2),
],
[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('', -1, -1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7)
]
])),
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('GIF', 2, 1),
]
])),
]
# Stats should be identical but without stats for image size.
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'UNKNOWN'
sample_count: 1
}
buckets {
label: 'GIF'
sample_count: 1
}
buckets {
label: 'JPEG'
sample_count: 1
}
buckets {
label: 'PNG'
sample_count: 1
}
buckets {
label: 'TIFF'
sample_count: 2
}
}
}
""", statistics_pb2.FeatureNameStatistics())
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.8,
values_threshold=1,
enable_size_stats=False)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def _read_file(filepath):
"""Helper method for reading a file in binary mode."""
f = tf.gfile.Open(filepath, mode='rb')
return f.read()
class ImageStatsGeneratorRealImageTest(
test_util.CombinerFeatureStatsGeneratorTest):
def test_image_stats_generator_real_image(self):
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
batches = [
pa.Column.from_array(
'feature',
pa.array([
[
_read_file(os.path.join(test_data_dir, 'image1.gif')),
_read_file(os.path.join(test_data_dir, 'image2.png')),
_read_file(os.path.join(test_data_dir, 'not_a_image.abc'))
],
[
_read_file(os.path.join(test_data_dir, 'image3.bmp')),
b'not_a_image'
]
])),
pa.Column.from_array(
'feature',
pa.array([
[
_read_file(os.path.join(test_data_dir, 'image4.png')),
]
])),
]
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'UNKNOWN'
sample_count: 2
}
buckets {
label: 'bmp'
sample_count: 1
}
buckets {
label: 'gif'
sample_count: 1
}
buckets {
label: 'png'
sample_count: 2
}
}
}
custom_stats {
name: 'image_max_width'
num: 51.0
}
custom_stats {
name: 'image_max_height'
num: 26.0
}
""", statistics_pb2.FeatureNameStatistics())
generator = image_stats_generator.ImageStatsGenerator(
is_image_ratio_threshold=0.6,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_pickle_success(self):
"""Ensure that decoder and generator implementations are pickle-able."""
image_decoder = image_stats_generator.TfImageDecoder()
pickle.dumps(image_decoder)
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.6,
values_threshold=1)
pickle.dumps(generator)
if __name__ == '__main__':
absltest.main()
| 33.438914
| 82
| 0.542355
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pyarrow as pa
import tensorflow as tf
from tensorflow_data_validation.statistics.generators import image_stats_generator
from tensorflow_data_validation.utils import test_util
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import statistics_pb2
class FakeImageDecoder(image_stats_generator.ImageDecoderInterface):
@staticmethod
def encode_image_metadata(image_format, image_height, image_width):
image_metadata = {
'format': image_format,
'height': image_height,
'width': image_width
}
return json.dumps(image_metadata)
def get_formats(self, value_list):
return np.array([json.loads(value)['format'] for value in value_list],
dtype=np.object)
def get_sizes(self, value_list):
loaded_metadata = [json.loads(value) for value in value_list]
return np.array([[meta['height'], meta['width']]
for meta in loaded_metadata])
class ImageStatsGeneratorTest(test_util.CombinerFeatureStatsGeneratorTest,
parameterized.TestCase):
@parameterized.named_parameters(
('EmptyList', []),
('EmptyBatch', [pa.Column.from_array('feature', pa.array([]))]),
('NumericalShouldInvalidateImageStats', [
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('JPEG', 1, 1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7),
]
])),
pa.Column.from_array('feature', pa.array([[1]])),
]))
def test_cases_with_no_image_stats(self, batches):
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator,
statistics_pb2.FeatureNameStatistics())
def test_image_stats_generator_with_missing_feature(self):
batches = [
pa.Column.from_array('feature', pa.array([])),
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('JPEG', 10, 1),
]
])),
]
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'JPEG'
sample_count: 1
}
}
}
custom_stats {
name: 'image_max_width'
num: 1.0
}
custom_stats {
name: 'image_max_height'
num: 10.0
}""", statistics_pb2.FeatureNameStatistics())
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_values_threshold_check(self):
batches = [
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('PNG', 2, 4),
FakeImageDecoder.encode_image_metadata('JPEG', 4, 2),
],
[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('JPEG', -1, -1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7)
]
])),
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('GIF', 2, 1),
]
])),
]
image_decoder = FakeImageDecoder()
expected_result = statistics_pb2.FeatureNameStatistics()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=7,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'GIF'
sample_count: 1
}
buckets {
label: 'JPEG'
sample_count: 2
}
buckets {
label: 'PNG'
sample_count: 1
}
buckets {
label: 'TIFF'
sample_count: 2
}
}
}
custom_stats {
name: 'image_max_width'
num: 7.0
}
custom_stats {
name: 'image_max_height'
num: 5.0
}
""", statistics_pb2.FeatureNameStatistics())
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
values_threshold=6,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_check_is_image_ratio(self):
batches = [
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('PNG', 2, 4),
FakeImageDecoder.encode_image_metadata('JPEG', 4, 2),
],
[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('', -1, -1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7)
]
])),
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('GIF', 2, 1),
]
])),
]
expected_result = statistics_pb2.FeatureNameStatistics()
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.85,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'UNKNOWN'
sample_count: 1
}
buckets {
label: 'GIF'
sample_count: 1
}
buckets {
label: 'JPEG'
sample_count: 1
}
buckets {
label: 'PNG'
sample_count: 1
}
buckets {
label: 'TIFF'
sample_count: 2
}
}
}
custom_stats {
name: 'image_max_width'
num: 7.0
}
custom_stats {
name: 'image_max_height'
num: 5.0
}
""", statistics_pb2.FeatureNameStatistics())
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.8,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_disable_size_stats(self):
batches = [
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('PNG', 2, 4),
FakeImageDecoder.encode_image_metadata('JPEG', 4, 2),
],
[
FakeImageDecoder.encode_image_metadata('TIFF', 5, 1),
FakeImageDecoder.encode_image_metadata('', -1, -1),
FakeImageDecoder.encode_image_metadata('TIFF', 3, 7)
]
])),
pa.Column.from_array(
'feature',
pa.array([
[
FakeImageDecoder.encode_image_metadata('GIF', 2, 1),
]
])),
]
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'UNKNOWN'
sample_count: 1
}
buckets {
label: 'GIF'
sample_count: 1
}
buckets {
label: 'JPEG'
sample_count: 1
}
buckets {
label: 'PNG'
sample_count: 1
}
buckets {
label: 'TIFF'
sample_count: 2
}
}
}
""", statistics_pb2.FeatureNameStatistics())
image_decoder = FakeImageDecoder()
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.8,
values_threshold=1,
enable_size_stats=False)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def _read_file(filepath):
f = tf.gfile.Open(filepath, mode='rb')
return f.read()
class ImageStatsGeneratorRealImageTest(
test_util.CombinerFeatureStatsGeneratorTest):
def test_image_stats_generator_real_image(self):
test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')
batches = [
pa.Column.from_array(
'feature',
pa.array([
[
_read_file(os.path.join(test_data_dir, 'image1.gif')),
_read_file(os.path.join(test_data_dir, 'image2.png')),
_read_file(os.path.join(test_data_dir, 'not_a_image.abc'))
],
[
_read_file(os.path.join(test_data_dir, 'image3.bmp')),
b'not_a_image'
]
])),
pa.Column.from_array(
'feature',
pa.array([
[
_read_file(os.path.join(test_data_dir, 'image4.png')),
]
])),
]
expected_result = text_format.Parse(
"""
custom_stats {
name: 'domain_info'
str: 'image_domain {}'
}
custom_stats {
name: 'image_format_histogram'
rank_histogram {
buckets {
label: 'UNKNOWN'
sample_count: 2
}
buckets {
label: 'bmp'
sample_count: 1
}
buckets {
label: 'gif'
sample_count: 1
}
buckets {
label: 'png'
sample_count: 2
}
}
}
custom_stats {
name: 'image_max_width'
num: 51.0
}
custom_stats {
name: 'image_max_height'
num: 26.0
}
""", statistics_pb2.FeatureNameStatistics())
generator = image_stats_generator.ImageStatsGenerator(
is_image_ratio_threshold=0.6,
values_threshold=1,
enable_size_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_image_stats_generator_pickle_success(self):
image_decoder = image_stats_generator.TfImageDecoder()
pickle.dumps(image_decoder)
generator = image_stats_generator.ImageStatsGenerator(
image_decoder=image_decoder,
is_image_ratio_threshold=0.6,
values_threshold=1)
pickle.dumps(generator)
if __name__ == '__main__':
absltest.main()
| true
| true
|
1c4028347673966a10a131e5c8132d694a7847cb
| 814
|
py
|
Python
|
src/algoritmos-ordenacion/propuestos/strings.py
|
GokoshiJr/algoritmos2-py
|
106dcbed31739309c193a77c671522aac17f6e45
|
[
"MIT"
] | null | null | null |
src/algoritmos-ordenacion/propuestos/strings.py
|
GokoshiJr/algoritmos2-py
|
106dcbed31739309c193a77c671522aac17f6e45
|
[
"MIT"
] | null | null | null |
src/algoritmos-ordenacion/propuestos/strings.py
|
GokoshiJr/algoritmos2-py
|
106dcbed31739309c193a77c671522aac17f6e45
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
def seleccion(stringArray: list) -> list:
for i in range (0, len(stringArray) - 1):
minimo = i
for j in range (i + 1, len(stringArray)):
if stringArray[j].lower() < stringArray[minimo].lower():
minimo = j
temporal = stringArray[i]
stringArray[i] = stringArray[minimo]
stringArray[minimo] = temporal
return stringArray
def mostrarStrings(stringArray: list):
palabra = str
for palabra in stringArray:
print('\t{0}'.format(palabra.lower()))
if __name__ == "__main__":
try:
palabras = ['luNES','marTes','Miercoles','Jueves','VIERNES','sabado','domingo']
mostrarStrings(seleccion(palabras))
except:
print('Error...')
| 32.56
| 96
| 0.563882
|
def seleccion(stringArray: list) -> list:
for i in range (0, len(stringArray) - 1):
minimo = i
for j in range (i + 1, len(stringArray)):
if stringArray[j].lower() < stringArray[minimo].lower():
minimo = j
temporal = stringArray[i]
stringArray[i] = stringArray[minimo]
stringArray[minimo] = temporal
return stringArray
def mostrarStrings(stringArray: list):
palabra = str
for palabra in stringArray:
print('\t{0}'.format(palabra.lower()))
if __name__ == "__main__":
try:
palabras = ['luNES','marTes','Miercoles','Jueves','VIERNES','sabado','domingo']
mostrarStrings(seleccion(palabras))
except:
print('Error...')
| true
| true
|
1c40292a31e03b7306247700a6e10cd93b2c1eb6
| 4,792
|
py
|
Python
|
myhealthapp/views.py
|
SORARAwo4649/HamatteruProject
|
72d5d1a4fc3fa4ef6d09048b6852256500feed84
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
myhealthapp/views.py
|
SORARAwo4649/HamatteruProject
|
72d5d1a4fc3fa4ef6d09048b6852256500feed84
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
myhealthapp/views.py
|
SORARAwo4649/HamatteruProject
|
72d5d1a4fc3fa4ef6d09048b6852256500feed84
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect, resolve_url
from django.urls import reverse_lazy
from django.views.generic import DetailView, UpdateView, CreateView, ListView, \
DeleteView
from .forms import ListForm, StaffCommentForm
from .models import List
from .writer import g_spread
@login_required
def index(request):
return render(request, "myhealthapp/index.html")
def home(request):
return render(request, "myhealthapp/home.html")
class ListCreateView(LoginRequiredMixin, CreateView):
model = List
template_name = "myhealthapp/create.html"
form_class = ListForm
def post(self, request, *args, **kwargs):
print("POSTPOST")
print(request.POST)
"""
フォームを手作業で取り出す方法
https://docs.djangoproject.com/ja/3.1/topics/forms/
Djangoのフォームにモデルのレコードを初期値として入れたい
https://teratail.com/questions/259820
モデルの操作
https://opendata-web.site/blog/entry/22/
フォームAPIのリファレンス
https://docs.djangoproject.com/en/3.1/ref/forms/api/
:param request:
:param args:
:param kwargs:
:return:
"""
form = ListForm(request.POST)
# print(form)
print(form.is_valid())
print(form.errors)
if form.is_valid() is False:
for ele in form:
print("******************************")
print(ele)
if form.is_valid():
print("通った")
form.instance.created_by = self.request.user
instance_form = form.save()
# 睡眠時間の計算
# フォームで入力した時間データを取得
go_to_bed_time = form.cleaned_data["go_to_bed"]
wake_up_time = form.cleaned_data["wakeup"]
sleep_time_time = wake_up_time - go_to_bed_time
# timedeltaから時間と分に直す関数
def timedelta_to_hm(td):
sec = td.total_seconds()
hh = int(sec // 3600)
mm = int(sec % 3600 // 60)
return hh, mm
sleep_time_h, sleep_time_m = timedelta_to_hm(sleep_time_time)
# DBのIDを取得
instance_id = str(instance_form.id)
# IDより編集するレコードをインスタンス化
insert_sleep_time = \
List.objects.filter(id=instance_id).first()
# 睡眠時間の計算結果を文字列に変換してからupdate
insert_sleep_time.sleep_time = str(f'{sleep_time_h}:{sleep_time_m}')
# DBに保存
insert_sleep_time.save()
# return redirect("/myhealthapp/lists/", {"form": form})
return redirect("myhealthapp:lists_list")
else:
print("エラー文")
print(form.errors)
if form.errors:
for error in form.errors:
print(error)
print("エラーが表示されるはず")
# print(form.non_field_errors())
# return render(request, "myhealthapp/lists/form_failed.html", {"form": form})
return render(
request,
"myhealthapp/create.html",
{"form": form}
)
class ListListView(LoginRequiredMixin, ListView):
model = List
template_name = "myhealthapp/list.html"
# 日付順にリスト表示
def get_queryset(self):
current_user = self.request.user
print(current_user)
# スーパーユーザの場合、リストにすべてを表示する。
if current_user.is_superuser or current_user.is_staff:
return List.objects.all().order_by("date").reverse()
# 一般ユーザは自分のレコードのみ表示する。
else:
return List.objects.filter(created_by=current_user.id).order_by("date").reverse()
class ListDetailView(LoginRequiredMixin, DetailView):
model = List
template_name = "myhealthapp/detail.html"
class ListUpdateView(LoginRequiredMixin, UpdateView):
model = List
template_name = "myhealthapp/update.html"
form_class = ListForm
def get_success_url(self):
return resolve_url('myhealthapp:lists_detail', pk=self.kwargs['pk'])
class StaffCommentView(LoginRequiredMixin, UpdateView):
model = List
template_name = "myhealthapp/staff_comments.html"
form_class = StaffCommentForm
def get_success_url(self):
return resolve_url("myhealthapp:lists_detail", pk=self.kwargs["pk"])
class ListDeleteView(LoginRequiredMixin, DeleteView):
model = List
template_name = "myhealthapp/delete.html"
form_class = ListForm
success_url = reverse_lazy("myhealthapp:lists_list")
@login_required
def form_failed(request):
return render(request, "/myhealthapp/form_failed.html")
| 29.580247
| 93
| 0.625626
|
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect, resolve_url
from django.urls import reverse_lazy
from django.views.generic import DetailView, UpdateView, CreateView, ListView, \
DeleteView
from .forms import ListForm, StaffCommentForm
from .models import List
from .writer import g_spread
@login_required
def index(request):
return render(request, "myhealthapp/index.html")
def home(request):
return render(request, "myhealthapp/home.html")
class ListCreateView(LoginRequiredMixin, CreateView):
model = List
template_name = "myhealthapp/create.html"
form_class = ListForm
def post(self, request, *args, **kwargs):
print("POSTPOST")
print(request.POST)
form = ListForm(request.POST)
print(form.is_valid())
print(form.errors)
if form.is_valid() is False:
for ele in form:
print("******************************")
print(ele)
if form.is_valid():
print("通った")
form.instance.created_by = self.request.user
instance_form = form.save()
go_to_bed_time = form.cleaned_data["go_to_bed"]
wake_up_time = form.cleaned_data["wakeup"]
sleep_time_time = wake_up_time - go_to_bed_time
def timedelta_to_hm(td):
sec = td.total_seconds()
hh = int(sec // 3600)
mm = int(sec % 3600 // 60)
return hh, mm
sleep_time_h, sleep_time_m = timedelta_to_hm(sleep_time_time)
instance_id = str(instance_form.id)
insert_sleep_time = \
List.objects.filter(id=instance_id).first()
insert_sleep_time.sleep_time = str(f'{sleep_time_h}:{sleep_time_m}')
insert_sleep_time.save()
return redirect("myhealthapp:lists_list")
else:
print("エラー文")
print(form.errors)
if form.errors:
for error in form.errors:
print(error)
print("エラーが表示されるはず")
return render(
request,
"myhealthapp/create.html",
{"form": form}
)
class ListListView(LoginRequiredMixin, ListView):
model = List
template_name = "myhealthapp/list.html"
def get_queryset(self):
current_user = self.request.user
print(current_user)
if current_user.is_superuser or current_user.is_staff:
return List.objects.all().order_by("date").reverse()
else:
return List.objects.filter(created_by=current_user.id).order_by("date").reverse()
class ListDetailView(LoginRequiredMixin, DetailView):
model = List
template_name = "myhealthapp/detail.html"
class ListUpdateView(LoginRequiredMixin, UpdateView):
model = List
template_name = "myhealthapp/update.html"
form_class = ListForm
def get_success_url(self):
return resolve_url('myhealthapp:lists_detail', pk=self.kwargs['pk'])
class StaffCommentView(LoginRequiredMixin, UpdateView):
model = List
template_name = "myhealthapp/staff_comments.html"
form_class = StaffCommentForm
def get_success_url(self):
return resolve_url("myhealthapp:lists_detail", pk=self.kwargs["pk"])
class ListDeleteView(LoginRequiredMixin, DeleteView):
model = List
template_name = "myhealthapp/delete.html"
form_class = ListForm
success_url = reverse_lazy("myhealthapp:lists_list")
@login_required
def form_failed(request):
return render(request, "/myhealthapp/form_failed.html")
| true
| true
|
1c4029d47a1c7c126aba0f3c556081df61aa58d5
| 1,020
|
py
|
Python
|
desktop/core/ext-py/odfpy-1.4.1/examples/loadsave.py
|
e11it/hue-1
|
436704c40b5fa6ffd30bd972bf50ffeec738d091
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/odfpy-1.4.1/examples/loadsave.py
|
e11it/hue-1
|
436704c40b5fa6ffd30bd972bf50ffeec738d091
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/odfpy-1.4.1/examples/loadsave.py
|
e11it/hue-1
|
436704c40b5fa6ffd30bd972bf50ffeec738d091
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Søren Roug, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
#
# This script simply loads a document into memory and saves it again.
# It takes the filename as argument
import sys
from odf.opendocument import load
infile = sys.argv[1]
doc = load(infile)
outfile = infile[:-4] + "-bak" + infile[-4:]
doc.save(outfile)
| 32.903226
| 80
| 0.742157
|
import sys
from odf.opendocument import load
infile = sys.argv[1]
doc = load(infile)
outfile = infile[:-4] + "-bak" + infile[-4:]
doc.save(outfile)
| true
| true
|
1c402aa994bdd161da5cc185a9ce804ea220c003
| 3,669
|
py
|
Python
|
bindings/python/ensmallen/datasets/string/candidatuswolfebacteriabacteriumrifcsphigho201full4822.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5
|
2021-02-17T00:44:45.000Z
|
2021-08-09T16:41:47.000Z
|
bindings/python/ensmallen/datasets/string/candidatuswolfebacteriabacteriumrifcsphigho201full4822.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18
|
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/candidatuswolfebacteriabacteriumrifcsphigho201full4822.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3
|
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Candidatus Wolfebacteria bacterium RIFCSPHIGHO2_01_FULL_48_22.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def CandidatusWolfebacteriaBacteriumRifcsphigho201Full4822(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Candidatus Wolfebacteria bacterium RIFCSPHIGHO2_01_FULL_48_22 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Candidatus Wolfebacteria bacterium RIFCSPHIGHO2_01_FULL_48_22 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="CandidatusWolfebacteriaBacteriumRifcsphigho201Full4822",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 34.942857
| 223
| 0.693377
|
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def CandidatusWolfebacteriaBacteriumRifcsphigho201Full4822(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="CandidatusWolfebacteriaBacteriumRifcsphigho201Full4822",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true
| true
|
1c402c1157900ff1ad5c6c296a409c9e8fb96d2b
| 538
|
py
|
Python
|
contrail-openstack/hooks/charmhelpers/core/kernel_factory/centos.py
|
exsdev0/tf-charms
|
a7a3cfc7463332e5bc0b335c0304dace1025f18c
|
[
"Apache-2.0"
] | 19
|
2016-04-17T04:00:53.000Z
|
2020-05-06T14:18:16.000Z
|
contrail-openstack/hooks/charmhelpers/core/kernel_factory/centos.py
|
exsdev0/tf-charms
|
a7a3cfc7463332e5bc0b335c0304dace1025f18c
|
[
"Apache-2.0"
] | 313
|
2017-09-15T13:22:58.000Z
|
2022-02-25T17:55:01.000Z
|
contrail-openstack/hooks/charmhelpers/core/kernel_factory/centos.py
|
exsdev0/tf-charms
|
a7a3cfc7463332e5bc0b335c0304dace1025f18c
|
[
"Apache-2.0"
] | 136
|
2017-09-19T13:37:33.000Z
|
2022-03-29T11:08:00.000Z
|
import subprocess
import os
def persistent_modprobe(module):
"""Load a kernel module and configure for auto-load on reboot."""
if not os.path.exists('/etc/rc.modules'):
open('/etc/rc.modules', 'a')
os.chmod('/etc/rc.modules', 111)
with open('/etc/rc.modules', 'r+') as modules:
if module not in modules.read():
modules.write('modprobe %s\n' % module)
def update_initramfs(version='all'):
"""Updates an initramfs image."""
return subprocess.check_call(["dracut", "-f", version])
| 29.888889
| 69
| 0.635688
|
import subprocess
import os
def persistent_modprobe(module):
if not os.path.exists('/etc/rc.modules'):
open('/etc/rc.modules', 'a')
os.chmod('/etc/rc.modules', 111)
with open('/etc/rc.modules', 'r+') as modules:
if module not in modules.read():
modules.write('modprobe %s\n' % module)
def update_initramfs(version='all'):
return subprocess.check_call(["dracut", "-f", version])
| true
| true
|
1c402c625b8cb842ac8f5996a1b4775910c1cdc3
| 2,631
|
py
|
Python
|
exams/signals.py
|
Wassaf-Shahzad/micromasters
|
b1340a8c233499b1d8d22872a6bc1fe7f49fd323
|
[
"BSD-3-Clause"
] | 32
|
2016-03-25T01:03:13.000Z
|
2022-01-15T19:35:42.000Z
|
exams/signals.py
|
Wassaf-Shahzad/micromasters
|
b1340a8c233499b1d8d22872a6bc1fe7f49fd323
|
[
"BSD-3-Clause"
] | 4,858
|
2016-03-03T13:48:30.000Z
|
2022-03-29T22:09:51.000Z
|
exams/signals.py
|
umarmughal824/micromasters
|
ea92d3bcea9be4601150fc497302ddacc1161622
|
[
"BSD-3-Clause"
] | 20
|
2016-08-18T22:07:44.000Z
|
2021-11-15T13:35:35.000Z
|
"""
Signals for exams
"""
import logging
from django.db import transaction
from django.db.models.signals import post_save
from django.dispatch import receiver
from courses.models import CourseRun
from dashboard.models import CachedEnrollment
from dashboard.utils import get_mmtrack
from ecommerce.models import Order
from exams.api import authorize_user_for_schedulable_exam_runs
from exams.models import (
ExamProfile,
ExamRun,
)
from exams.utils import is_eligible_for_exam
from grades.api import update_existing_combined_final_grade_for_exam_run
from grades.models import FinalGrade
log = logging.getLogger(__name__)
@receiver(post_save, sender=ExamRun, dispatch_uid="update_exam_run")
def update_exam_run(sender, instance, created, **kwargs): # pylint: disable=unused-argument
"""If we update an ExamRun, update ExamAuthorizations accordingly"""
if not created:
transaction.on_commit(lambda: update_existing_combined_final_grade_for_exam_run(instance))
@receiver(post_save, sender=FinalGrade, dispatch_uid="update_exam_authorization_final_grade")
def update_exam_authorization_final_grade(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Signal handler to trigger an exam profile and authorization for FinalGrade creation.
"""
authorize_user_for_schedulable_exam_runs(instance.user, instance.course_run)
@receiver(post_save, sender=Order, dispatch_uid="authorize_exams_order")
def update_exam_authorization_order(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Signal handler to trigger an exam profile and authorization for Order fulfillment.
"""
if not Order.is_fulfilled(instance.status):
return
paid_edx_course_keys = instance.line_set.values_list('course_key', flat=True)
for course_run in CourseRun.objects.filter(
edx_course_key__in=paid_edx_course_keys
).select_related('course__program'):
authorize_user_for_schedulable_exam_runs(instance.user, course_run)
@receiver(post_save, sender=CachedEnrollment, dispatch_uid="update_exam_authorization_cached_enrollment")
def update_exam_authorization_cached_enrollment(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Signal handler to trigger an exam profile when user enroll in a course.
"""
mmtrack = get_mmtrack(instance.user, instance.course_run.course.program)
if is_eligible_for_exam(mmtrack, instance.course_run):
# if user paid for a course then create his exam profile if it is not created yet.
ExamProfile.objects.get_or_create(profile=mmtrack.user.profile)
| 38.130435
| 111
| 0.789434
|
import logging
from django.db import transaction
from django.db.models.signals import post_save
from django.dispatch import receiver
from courses.models import CourseRun
from dashboard.models import CachedEnrollment
from dashboard.utils import get_mmtrack
from ecommerce.models import Order
from exams.api import authorize_user_for_schedulable_exam_runs
from exams.models import (
ExamProfile,
ExamRun,
)
from exams.utils import is_eligible_for_exam
from grades.api import update_existing_combined_final_grade_for_exam_run
from grades.models import FinalGrade
log = logging.getLogger(__name__)
@receiver(post_save, sender=ExamRun, dispatch_uid="update_exam_run")
def update_exam_run(sender, instance, created, **kwargs):
if not created:
transaction.on_commit(lambda: update_existing_combined_final_grade_for_exam_run(instance))
@receiver(post_save, sender=FinalGrade, dispatch_uid="update_exam_authorization_final_grade")
def update_exam_authorization_final_grade(sender, instance, **kwargs):
authorize_user_for_schedulable_exam_runs(instance.user, instance.course_run)
@receiver(post_save, sender=Order, dispatch_uid="authorize_exams_order")
def update_exam_authorization_order(sender, instance, **kwargs):
if not Order.is_fulfilled(instance.status):
return
paid_edx_course_keys = instance.line_set.values_list('course_key', flat=True)
for course_run in CourseRun.objects.filter(
edx_course_key__in=paid_edx_course_keys
).select_related('course__program'):
authorize_user_for_schedulable_exam_runs(instance.user, course_run)
@receiver(post_save, sender=CachedEnrollment, dispatch_uid="update_exam_authorization_cached_enrollment")
def update_exam_authorization_cached_enrollment(sender, instance, **kwargs):
mmtrack = get_mmtrack(instance.user, instance.course_run.course.program)
if is_eligible_for_exam(mmtrack, instance.course_run):
ExamProfile.objects.get_or_create(profile=mmtrack.user.profile)
| true
| true
|
1c402c9cfa73e3ef59b9cbae5d064c0a06d1cbc7
| 22
|
py
|
Python
|
pineboolib/system_module/forms/__init__.py
|
juanjosepablos/pineboo
|
f6ce515aec6e0139821bb9c1d62536d9fb50dae4
|
[
"MIT"
] | 2
|
2017-12-10T23:06:16.000Z
|
2017-12-10T23:06:23.000Z
|
pineboolib/system_module/forms/__init__.py
|
Aulla/pineboo
|
3ad6412d365a6ad65c3bb2bdc03f5798d7c37004
|
[
"MIT"
] | 36
|
2017-11-05T21:13:47.000Z
|
2020-08-26T15:56:15.000Z
|
pineboolib/system_module/forms/__init__.py
|
Aulla/pineboo
|
3ad6412d365a6ad65c3bb2bdc03f5798d7c37004
|
[
"MIT"
] | 8
|
2017-11-05T15:56:31.000Z
|
2019-04-25T16:32:28.000Z
|
"""Forms packages."""
| 11
| 21
| 0.590909
| true
| true
|
|
1c402e150e2445a9b07e007e65b56c3dca9a80e4
| 25,773
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/__init__.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/__init__.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2018_10_01/models/__init__.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AdditionalCapabilities
from ._models_py3 import AdditionalUnattendContent
from ._models_py3 import ApiEntityReference
from ._models_py3 import ApiError
from ._models_py3 import ApiErrorBase
from ._models_py3 import AutomaticOSUpgradePolicy
from ._models_py3 import AutomaticOSUpgradeProperties
from ._models_py3 import AutomaticRepairsPolicy
from ._models_py3 import AvailabilitySet
from ._models_py3 import AvailabilitySetListResult
from ._models_py3 import AvailabilitySetUpdate
from ._models_py3 import BootDiagnostics
from ._models_py3 import BootDiagnosticsInstanceView
from ._models_py3 import Components1H8M3EpSchemasVirtualmachineidentityPropertiesUserassignedidentitiesAdditionalproperties
from ._models_py3 import ComponentsNj115SSchemasVirtualmachinescalesetidentityPropertiesUserassignedidentitiesAdditionalproperties
from ._models_py3 import ComputeOperationListResult
from ._models_py3 import ComputeOperationValue
from ._models_py3 import DataDisk
from ._models_py3 import DataDiskImage
from ._models_py3 import DiagnosticsProfile
from ._models_py3 import DiffDiskSettings
from ._models_py3 import DiskEncryptionSettings
from ._models_py3 import DiskInstanceView
from ._models_py3 import HardwareProfile
from ._models_py3 import Image
from ._models_py3 import ImageDataDisk
from ._models_py3 import ImageListResult
from ._models_py3 import ImageOSDisk
from ._models_py3 import ImageReference
from ._models_py3 import ImageStorageProfile
from ._models_py3 import ImageUpdate
from ._models_py3 import InnerError
from ._models_py3 import InstanceViewStatus
from ._models_py3 import KeyVaultKeyReference
from ._models_py3 import KeyVaultSecretReference
from ._models_py3 import LinuxConfiguration
from ._models_py3 import ListUsagesResult
from ._models_py3 import LogAnalyticsInputBase
from ._models_py3 import LogAnalyticsOperationResult
from ._models_py3 import LogAnalyticsOutput
from ._models_py3 import MaintenanceRedeployStatus
from ._models_py3 import ManagedDiskParameters
from ._models_py3 import NetworkInterfaceReference
from ._models_py3 import NetworkProfile
from ._models_py3 import OSDisk
from ._models_py3 import OSDiskImage
from ._models_py3 import OSProfile
from ._models_py3 import Plan
from ._models_py3 import ProximityPlacementGroup
from ._models_py3 import ProximityPlacementGroupListResult
from ._models_py3 import ProximityPlacementGroupUpdate
from ._models_py3 import PurchasePlan
from ._models_py3 import RecoveryWalkResponse
from ._models_py3 import RequestRateByIntervalInput
from ._models_py3 import Resource
from ._models_py3 import RollbackStatusInfo
from ._models_py3 import RollingUpgradePolicy
from ._models_py3 import RollingUpgradeProgressInfo
from ._models_py3 import RollingUpgradeRunningStatus
from ._models_py3 import RollingUpgradeStatusInfo
from ._models_py3 import RunCommandDocument
from ._models_py3 import RunCommandDocumentBase
from ._models_py3 import RunCommandInput
from ._models_py3 import RunCommandInputParameter
from ._models_py3 import RunCommandListResult
from ._models_py3 import RunCommandParameterDefinition
from ._models_py3 import RunCommandResult
from ._models_py3 import Sku
from ._models_py3 import SshConfiguration
from ._models_py3 import SshPublicKey
from ._models_py3 import StorageProfile
from ._models_py3 import SubResource
from ._models_py3 import SubResourceReadOnly
from ._models_py3 import ThrottledRequestsInput
from ._models_py3 import UpdateResource
from ._models_py3 import UpgradeOperationHistoricalStatusInfo
from ._models_py3 import UpgradeOperationHistoricalStatusInfoProperties
from ._models_py3 import UpgradeOperationHistoryStatus
from ._models_py3 import UpgradePolicy
from ._models_py3 import Usage
from ._models_py3 import UsageName
from ._models_py3 import VaultCertificate
from ._models_py3 import VaultSecretGroup
from ._models_py3 import VirtualHardDisk
from ._models_py3 import VirtualMachine
from ._models_py3 import VirtualMachineAgentInstanceView
from ._models_py3 import VirtualMachineCaptureParameters
from ._models_py3 import VirtualMachineCaptureResult
from ._models_py3 import VirtualMachineExtension
from ._models_py3 import VirtualMachineExtensionHandlerInstanceView
from ._models_py3 import VirtualMachineExtensionImage
from ._models_py3 import VirtualMachineExtensionInstanceView
from ._models_py3 import VirtualMachineExtensionUpdate
from ._models_py3 import VirtualMachineExtensionsListResult
from ._models_py3 import VirtualMachineHealthStatus
from ._models_py3 import VirtualMachineIdentity
from ._models_py3 import VirtualMachineImage
from ._models_py3 import VirtualMachineImageResource
from ._models_py3 import VirtualMachineInstanceView
from ._models_py3 import VirtualMachineListResult
from ._models_py3 import VirtualMachineReimageParameters
from ._models_py3 import VirtualMachineScaleSet
from ._models_py3 import VirtualMachineScaleSetDataDisk
from ._models_py3 import VirtualMachineScaleSetExtension
from ._models_py3 import VirtualMachineScaleSetExtensionListResult
from ._models_py3 import VirtualMachineScaleSetExtensionProfile
from ._models_py3 import VirtualMachineScaleSetIPConfiguration
from ._models_py3 import VirtualMachineScaleSetIdentity
from ._models_py3 import VirtualMachineScaleSetInstanceView
from ._models_py3 import VirtualMachineScaleSetInstanceViewStatusesSummary
from ._models_py3 import VirtualMachineScaleSetIpTag
from ._models_py3 import VirtualMachineScaleSetListOSUpgradeHistory
from ._models_py3 import VirtualMachineScaleSetListResult
from ._models_py3 import VirtualMachineScaleSetListSkusResult
from ._models_py3 import VirtualMachineScaleSetListWithLinkResult
from ._models_py3 import VirtualMachineScaleSetManagedDiskParameters
from ._models_py3 import VirtualMachineScaleSetNetworkConfiguration
from ._models_py3 import VirtualMachineScaleSetNetworkConfigurationDnsSettings
from ._models_py3 import VirtualMachineScaleSetNetworkProfile
from ._models_py3 import VirtualMachineScaleSetOSDisk
from ._models_py3 import VirtualMachineScaleSetOSProfile
from ._models_py3 import VirtualMachineScaleSetPublicIPAddressConfiguration
from ._models_py3 import VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
from ._models_py3 import VirtualMachineScaleSetReimageParameters
from ._models_py3 import VirtualMachineScaleSetSku
from ._models_py3 import VirtualMachineScaleSetSkuCapacity
from ._models_py3 import VirtualMachineScaleSetStorageProfile
from ._models_py3 import VirtualMachineScaleSetUpdate
from ._models_py3 import VirtualMachineScaleSetUpdateIPConfiguration
from ._models_py3 import VirtualMachineScaleSetUpdateNetworkConfiguration
from ._models_py3 import VirtualMachineScaleSetUpdateNetworkProfile
from ._models_py3 import VirtualMachineScaleSetUpdateOSDisk
from ._models_py3 import VirtualMachineScaleSetUpdateOSProfile
from ._models_py3 import VirtualMachineScaleSetUpdatePublicIPAddressConfiguration
from ._models_py3 import VirtualMachineScaleSetUpdateStorageProfile
from ._models_py3 import VirtualMachineScaleSetUpdateVMProfile
from ._models_py3 import VirtualMachineScaleSetVM
from ._models_py3 import VirtualMachineScaleSetVMExtensionsSummary
from ._models_py3 import VirtualMachineScaleSetVMInstanceIDs
from ._models_py3 import VirtualMachineScaleSetVMInstanceRequiredIDs
from ._models_py3 import VirtualMachineScaleSetVMInstanceView
from ._models_py3 import VirtualMachineScaleSetVMListResult
from ._models_py3 import VirtualMachineScaleSetVMProfile
from ._models_py3 import VirtualMachineScaleSetVMReimageParameters
from ._models_py3 import VirtualMachineSize
from ._models_py3 import VirtualMachineSizeListResult
from ._models_py3 import VirtualMachineStatusCodeCount
from ._models_py3 import VirtualMachineUpdate
from ._models_py3 import WinRMConfiguration
from ._models_py3 import WinRMListener
from ._models_py3 import WindowsConfiguration
except (SyntaxError, ImportError):
from ._models import AdditionalCapabilities # type: ignore
from ._models import AdditionalUnattendContent # type: ignore
from ._models import ApiEntityReference # type: ignore
from ._models import ApiError # type: ignore
from ._models import ApiErrorBase # type: ignore
from ._models import AutomaticOSUpgradePolicy # type: ignore
from ._models import AutomaticOSUpgradeProperties # type: ignore
from ._models import AutomaticRepairsPolicy # type: ignore
from ._models import AvailabilitySet # type: ignore
from ._models import AvailabilitySetListResult # type: ignore
from ._models import AvailabilitySetUpdate # type: ignore
from ._models import BootDiagnostics # type: ignore
from ._models import BootDiagnosticsInstanceView # type: ignore
from ._models import Components1H8M3EpSchemasVirtualmachineidentityPropertiesUserassignedidentitiesAdditionalproperties # type: ignore
from ._models import ComponentsNj115SSchemasVirtualmachinescalesetidentityPropertiesUserassignedidentitiesAdditionalproperties # type: ignore
from ._models import ComputeOperationListResult # type: ignore
from ._models import ComputeOperationValue # type: ignore
from ._models import DataDisk # type: ignore
from ._models import DataDiskImage # type: ignore
from ._models import DiagnosticsProfile # type: ignore
from ._models import DiffDiskSettings # type: ignore
from ._models import DiskEncryptionSettings # type: ignore
from ._models import DiskInstanceView # type: ignore
from ._models import HardwareProfile # type: ignore
from ._models import Image # type: ignore
from ._models import ImageDataDisk # type: ignore
from ._models import ImageListResult # type: ignore
from ._models import ImageOSDisk # type: ignore
from ._models import ImageReference # type: ignore
from ._models import ImageStorageProfile # type: ignore
from ._models import ImageUpdate # type: ignore
from ._models import InnerError # type: ignore
from ._models import InstanceViewStatus # type: ignore
from ._models import KeyVaultKeyReference # type: ignore
from ._models import KeyVaultSecretReference # type: ignore
from ._models import LinuxConfiguration # type: ignore
from ._models import ListUsagesResult # type: ignore
from ._models import LogAnalyticsInputBase # type: ignore
from ._models import LogAnalyticsOperationResult # type: ignore
from ._models import LogAnalyticsOutput # type: ignore
from ._models import MaintenanceRedeployStatus # type: ignore
from ._models import ManagedDiskParameters # type: ignore
from ._models import NetworkInterfaceReference # type: ignore
from ._models import NetworkProfile # type: ignore
from ._models import OSDisk # type: ignore
from ._models import OSDiskImage # type: ignore
from ._models import OSProfile # type: ignore
from ._models import Plan # type: ignore
from ._models import ProximityPlacementGroup # type: ignore
from ._models import ProximityPlacementGroupListResult # type: ignore
from ._models import ProximityPlacementGroupUpdate # type: ignore
from ._models import PurchasePlan # type: ignore
from ._models import RecoveryWalkResponse # type: ignore
from ._models import RequestRateByIntervalInput # type: ignore
from ._models import Resource # type: ignore
from ._models import RollbackStatusInfo # type: ignore
from ._models import RollingUpgradePolicy # type: ignore
from ._models import RollingUpgradeProgressInfo # type: ignore
from ._models import RollingUpgradeRunningStatus # type: ignore
from ._models import RollingUpgradeStatusInfo # type: ignore
from ._models import RunCommandDocument # type: ignore
from ._models import RunCommandDocumentBase # type: ignore
from ._models import RunCommandInput # type: ignore
from ._models import RunCommandInputParameter # type: ignore
from ._models import RunCommandListResult # type: ignore
from ._models import RunCommandParameterDefinition # type: ignore
from ._models import RunCommandResult # type: ignore
from ._models import Sku # type: ignore
from ._models import SshConfiguration # type: ignore
from ._models import SshPublicKey # type: ignore
from ._models import StorageProfile # type: ignore
from ._models import SubResource # type: ignore
from ._models import SubResourceReadOnly # type: ignore
from ._models import ThrottledRequestsInput # type: ignore
from ._models import UpdateResource # type: ignore
from ._models import UpgradeOperationHistoricalStatusInfo # type: ignore
from ._models import UpgradeOperationHistoricalStatusInfoProperties # type: ignore
from ._models import UpgradeOperationHistoryStatus # type: ignore
from ._models import UpgradePolicy # type: ignore
from ._models import Usage # type: ignore
from ._models import UsageName # type: ignore
from ._models import VaultCertificate # type: ignore
from ._models import VaultSecretGroup # type: ignore
from ._models import VirtualHardDisk # type: ignore
from ._models import VirtualMachine # type: ignore
from ._models import VirtualMachineAgentInstanceView # type: ignore
from ._models import VirtualMachineCaptureParameters # type: ignore
from ._models import VirtualMachineCaptureResult # type: ignore
from ._models import VirtualMachineExtension # type: ignore
from ._models import VirtualMachineExtensionHandlerInstanceView # type: ignore
from ._models import VirtualMachineExtensionImage # type: ignore
from ._models import VirtualMachineExtensionInstanceView # type: ignore
from ._models import VirtualMachineExtensionUpdate # type: ignore
from ._models import VirtualMachineExtensionsListResult # type: ignore
from ._models import VirtualMachineHealthStatus # type: ignore
from ._models import VirtualMachineIdentity # type: ignore
from ._models import VirtualMachineImage # type: ignore
from ._models import VirtualMachineImageResource # type: ignore
from ._models import VirtualMachineInstanceView # type: ignore
from ._models import VirtualMachineListResult # type: ignore
from ._models import VirtualMachineReimageParameters # type: ignore
from ._models import VirtualMachineScaleSet # type: ignore
from ._models import VirtualMachineScaleSetDataDisk # type: ignore
from ._models import VirtualMachineScaleSetExtension # type: ignore
from ._models import VirtualMachineScaleSetExtensionListResult # type: ignore
from ._models import VirtualMachineScaleSetExtensionProfile # type: ignore
from ._models import VirtualMachineScaleSetIPConfiguration # type: ignore
from ._models import VirtualMachineScaleSetIdentity # type: ignore
from ._models import VirtualMachineScaleSetInstanceView # type: ignore
from ._models import VirtualMachineScaleSetInstanceViewStatusesSummary # type: ignore
from ._models import VirtualMachineScaleSetIpTag # type: ignore
from ._models import VirtualMachineScaleSetListOSUpgradeHistory # type: ignore
from ._models import VirtualMachineScaleSetListResult # type: ignore
from ._models import VirtualMachineScaleSetListSkusResult # type: ignore
from ._models import VirtualMachineScaleSetListWithLinkResult # type: ignore
from ._models import VirtualMachineScaleSetManagedDiskParameters # type: ignore
from ._models import VirtualMachineScaleSetNetworkConfiguration # type: ignore
from ._models import VirtualMachineScaleSetNetworkConfigurationDnsSettings # type: ignore
from ._models import VirtualMachineScaleSetNetworkProfile # type: ignore
from ._models import VirtualMachineScaleSetOSDisk # type: ignore
from ._models import VirtualMachineScaleSetOSProfile # type: ignore
from ._models import VirtualMachineScaleSetPublicIPAddressConfiguration # type: ignore
from ._models import VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings # type: ignore
from ._models import VirtualMachineScaleSetReimageParameters # type: ignore
from ._models import VirtualMachineScaleSetSku # type: ignore
from ._models import VirtualMachineScaleSetSkuCapacity # type: ignore
from ._models import VirtualMachineScaleSetStorageProfile # type: ignore
from ._models import VirtualMachineScaleSetUpdate # type: ignore
from ._models import VirtualMachineScaleSetUpdateIPConfiguration # type: ignore
from ._models import VirtualMachineScaleSetUpdateNetworkConfiguration # type: ignore
from ._models import VirtualMachineScaleSetUpdateNetworkProfile # type: ignore
from ._models import VirtualMachineScaleSetUpdateOSDisk # type: ignore
from ._models import VirtualMachineScaleSetUpdateOSProfile # type: ignore
from ._models import VirtualMachineScaleSetUpdatePublicIPAddressConfiguration # type: ignore
from ._models import VirtualMachineScaleSetUpdateStorageProfile # type: ignore
from ._models import VirtualMachineScaleSetUpdateVMProfile # type: ignore
from ._models import VirtualMachineScaleSetVM # type: ignore
from ._models import VirtualMachineScaleSetVMExtensionsSummary # type: ignore
from ._models import VirtualMachineScaleSetVMInstanceIDs # type: ignore
from ._models import VirtualMachineScaleSetVMInstanceRequiredIDs # type: ignore
from ._models import VirtualMachineScaleSetVMInstanceView # type: ignore
from ._models import VirtualMachineScaleSetVMListResult # type: ignore
from ._models import VirtualMachineScaleSetVMProfile # type: ignore
from ._models import VirtualMachineScaleSetVMReimageParameters # type: ignore
from ._models import VirtualMachineSize # type: ignore
from ._models import VirtualMachineSizeListResult # type: ignore
from ._models import VirtualMachineStatusCodeCount # type: ignore
from ._models import VirtualMachineUpdate # type: ignore
from ._models import WinRMConfiguration # type: ignore
from ._models import WinRMListener # type: ignore
from ._models import WindowsConfiguration # type: ignore
from ._compute_management_client_enums import (
AvailabilitySetSkuTypes,
CachingTypes,
DiffDiskOptions,
DiskCreateOptionTypes,
IPVersion,
IntervalInMins,
MaintenanceOperationResultCodeTypes,
OperatingSystemStateTypes,
OperatingSystemTypes,
ProtocolTypes,
ProximityPlacementGroupType,
ResourceIdentityType,
RollingUpgradeActionType,
RollingUpgradeStatusCode,
SettingNames,
StatusLevelTypes,
StorageAccountTypes,
UpgradeMode,
UpgradeOperationInvoker,
UpgradeState,
VirtualMachineEvictionPolicyTypes,
VirtualMachinePriorityTypes,
VirtualMachineScaleSetSkuScaleType,
VirtualMachineSizeTypes,
)
__all__ = [
'AdditionalCapabilities',
'AdditionalUnattendContent',
'ApiEntityReference',
'ApiError',
'ApiErrorBase',
'AutomaticOSUpgradePolicy',
'AutomaticOSUpgradeProperties',
'AutomaticRepairsPolicy',
'AvailabilitySet',
'AvailabilitySetListResult',
'AvailabilitySetUpdate',
'BootDiagnostics',
'BootDiagnosticsInstanceView',
'Components1H8M3EpSchemasVirtualmachineidentityPropertiesUserassignedidentitiesAdditionalproperties',
'ComponentsNj115SSchemasVirtualmachinescalesetidentityPropertiesUserassignedidentitiesAdditionalproperties',
'ComputeOperationListResult',
'ComputeOperationValue',
'DataDisk',
'DataDiskImage',
'DiagnosticsProfile',
'DiffDiskSettings',
'DiskEncryptionSettings',
'DiskInstanceView',
'HardwareProfile',
'Image',
'ImageDataDisk',
'ImageListResult',
'ImageOSDisk',
'ImageReference',
'ImageStorageProfile',
'ImageUpdate',
'InnerError',
'InstanceViewStatus',
'KeyVaultKeyReference',
'KeyVaultSecretReference',
'LinuxConfiguration',
'ListUsagesResult',
'LogAnalyticsInputBase',
'LogAnalyticsOperationResult',
'LogAnalyticsOutput',
'MaintenanceRedeployStatus',
'ManagedDiskParameters',
'NetworkInterfaceReference',
'NetworkProfile',
'OSDisk',
'OSDiskImage',
'OSProfile',
'Plan',
'ProximityPlacementGroup',
'ProximityPlacementGroupListResult',
'ProximityPlacementGroupUpdate',
'PurchasePlan',
'RecoveryWalkResponse',
'RequestRateByIntervalInput',
'Resource',
'RollbackStatusInfo',
'RollingUpgradePolicy',
'RollingUpgradeProgressInfo',
'RollingUpgradeRunningStatus',
'RollingUpgradeStatusInfo',
'RunCommandDocument',
'RunCommandDocumentBase',
'RunCommandInput',
'RunCommandInputParameter',
'RunCommandListResult',
'RunCommandParameterDefinition',
'RunCommandResult',
'Sku',
'SshConfiguration',
'SshPublicKey',
'StorageProfile',
'SubResource',
'SubResourceReadOnly',
'ThrottledRequestsInput',
'UpdateResource',
'UpgradeOperationHistoricalStatusInfo',
'UpgradeOperationHistoricalStatusInfoProperties',
'UpgradeOperationHistoryStatus',
'UpgradePolicy',
'Usage',
'UsageName',
'VaultCertificate',
'VaultSecretGroup',
'VirtualHardDisk',
'VirtualMachine',
'VirtualMachineAgentInstanceView',
'VirtualMachineCaptureParameters',
'VirtualMachineCaptureResult',
'VirtualMachineExtension',
'VirtualMachineExtensionHandlerInstanceView',
'VirtualMachineExtensionImage',
'VirtualMachineExtensionInstanceView',
'VirtualMachineExtensionUpdate',
'VirtualMachineExtensionsListResult',
'VirtualMachineHealthStatus',
'VirtualMachineIdentity',
'VirtualMachineImage',
'VirtualMachineImageResource',
'VirtualMachineInstanceView',
'VirtualMachineListResult',
'VirtualMachineReimageParameters',
'VirtualMachineScaleSet',
'VirtualMachineScaleSetDataDisk',
'VirtualMachineScaleSetExtension',
'VirtualMachineScaleSetExtensionListResult',
'VirtualMachineScaleSetExtensionProfile',
'VirtualMachineScaleSetIPConfiguration',
'VirtualMachineScaleSetIdentity',
'VirtualMachineScaleSetInstanceView',
'VirtualMachineScaleSetInstanceViewStatusesSummary',
'VirtualMachineScaleSetIpTag',
'VirtualMachineScaleSetListOSUpgradeHistory',
'VirtualMachineScaleSetListResult',
'VirtualMachineScaleSetListSkusResult',
'VirtualMachineScaleSetListWithLinkResult',
'VirtualMachineScaleSetManagedDiskParameters',
'VirtualMachineScaleSetNetworkConfiguration',
'VirtualMachineScaleSetNetworkConfigurationDnsSettings',
'VirtualMachineScaleSetNetworkProfile',
'VirtualMachineScaleSetOSDisk',
'VirtualMachineScaleSetOSProfile',
'VirtualMachineScaleSetPublicIPAddressConfiguration',
'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings',
'VirtualMachineScaleSetReimageParameters',
'VirtualMachineScaleSetSku',
'VirtualMachineScaleSetSkuCapacity',
'VirtualMachineScaleSetStorageProfile',
'VirtualMachineScaleSetUpdate',
'VirtualMachineScaleSetUpdateIPConfiguration',
'VirtualMachineScaleSetUpdateNetworkConfiguration',
'VirtualMachineScaleSetUpdateNetworkProfile',
'VirtualMachineScaleSetUpdateOSDisk',
'VirtualMachineScaleSetUpdateOSProfile',
'VirtualMachineScaleSetUpdatePublicIPAddressConfiguration',
'VirtualMachineScaleSetUpdateStorageProfile',
'VirtualMachineScaleSetUpdateVMProfile',
'VirtualMachineScaleSetVM',
'VirtualMachineScaleSetVMExtensionsSummary',
'VirtualMachineScaleSetVMInstanceIDs',
'VirtualMachineScaleSetVMInstanceRequiredIDs',
'VirtualMachineScaleSetVMInstanceView',
'VirtualMachineScaleSetVMListResult',
'VirtualMachineScaleSetVMProfile',
'VirtualMachineScaleSetVMReimageParameters',
'VirtualMachineSize',
'VirtualMachineSizeListResult',
'VirtualMachineStatusCodeCount',
'VirtualMachineUpdate',
'WinRMConfiguration',
'WinRMListener',
'WindowsConfiguration',
'AvailabilitySetSkuTypes',
'CachingTypes',
'DiffDiskOptions',
'DiskCreateOptionTypes',
'IPVersion',
'IntervalInMins',
'MaintenanceOperationResultCodeTypes',
'OperatingSystemStateTypes',
'OperatingSystemTypes',
'ProtocolTypes',
'ProximityPlacementGroupType',
'ResourceIdentityType',
'RollingUpgradeActionType',
'RollingUpgradeStatusCode',
'SettingNames',
'StatusLevelTypes',
'StorageAccountTypes',
'UpgradeMode',
'UpgradeOperationInvoker',
'UpgradeState',
'VirtualMachineEvictionPolicyTypes',
'VirtualMachinePriorityTypes',
'VirtualMachineScaleSetSkuScaleType',
'VirtualMachineSizeTypes',
]
| 49.754826
| 146
| 0.789858
|
try:
from ._models_py3 import AdditionalCapabilities
from ._models_py3 import AdditionalUnattendContent
from ._models_py3 import ApiEntityReference
from ._models_py3 import ApiError
from ._models_py3 import ApiErrorBase
from ._models_py3 import AutomaticOSUpgradePolicy
from ._models_py3 import AutomaticOSUpgradeProperties
from ._models_py3 import AutomaticRepairsPolicy
from ._models_py3 import AvailabilitySet
from ._models_py3 import AvailabilitySetListResult
from ._models_py3 import AvailabilitySetUpdate
from ._models_py3 import BootDiagnostics
from ._models_py3 import BootDiagnosticsInstanceView
from ._models_py3 import Components1H8M3EpSchemasVirtualmachineidentityPropertiesUserassignedidentitiesAdditionalproperties
from ._models_py3 import ComponentsNj115SSchemasVirtualmachinescalesetidentityPropertiesUserassignedidentitiesAdditionalproperties
from ._models_py3 import ComputeOperationListResult
from ._models_py3 import ComputeOperationValue
from ._models_py3 import DataDisk
from ._models_py3 import DataDiskImage
from ._models_py3 import DiagnosticsProfile
from ._models_py3 import DiffDiskSettings
from ._models_py3 import DiskEncryptionSettings
from ._models_py3 import DiskInstanceView
from ._models_py3 import HardwareProfile
from ._models_py3 import Image
from ._models_py3 import ImageDataDisk
from ._models_py3 import ImageListResult
from ._models_py3 import ImageOSDisk
from ._models_py3 import ImageReference
from ._models_py3 import ImageStorageProfile
from ._models_py3 import ImageUpdate
from ._models_py3 import InnerError
from ._models_py3 import InstanceViewStatus
from ._models_py3 import KeyVaultKeyReference
from ._models_py3 import KeyVaultSecretReference
from ._models_py3 import LinuxConfiguration
from ._models_py3 import ListUsagesResult
from ._models_py3 import LogAnalyticsInputBase
from ._models_py3 import LogAnalyticsOperationResult
from ._models_py3 import LogAnalyticsOutput
from ._models_py3 import MaintenanceRedeployStatus
from ._models_py3 import ManagedDiskParameters
from ._models_py3 import NetworkInterfaceReference
from ._models_py3 import NetworkProfile
from ._models_py3 import OSDisk
from ._models_py3 import OSDiskImage
from ._models_py3 import OSProfile
from ._models_py3 import Plan
from ._models_py3 import ProximityPlacementGroup
from ._models_py3 import ProximityPlacementGroupListResult
from ._models_py3 import ProximityPlacementGroupUpdate
from ._models_py3 import PurchasePlan
from ._models_py3 import RecoveryWalkResponse
from ._models_py3 import RequestRateByIntervalInput
from ._models_py3 import Resource
from ._models_py3 import RollbackStatusInfo
from ._models_py3 import RollingUpgradePolicy
from ._models_py3 import RollingUpgradeProgressInfo
from ._models_py3 import RollingUpgradeRunningStatus
from ._models_py3 import RollingUpgradeStatusInfo
from ._models_py3 import RunCommandDocument
from ._models_py3 import RunCommandDocumentBase
from ._models_py3 import RunCommandInput
from ._models_py3 import RunCommandInputParameter
from ._models_py3 import RunCommandListResult
from ._models_py3 import RunCommandParameterDefinition
from ._models_py3 import RunCommandResult
from ._models_py3 import Sku
from ._models_py3 import SshConfiguration
from ._models_py3 import SshPublicKey
from ._models_py3 import StorageProfile
from ._models_py3 import SubResource
from ._models_py3 import SubResourceReadOnly
from ._models_py3 import ThrottledRequestsInput
from ._models_py3 import UpdateResource
from ._models_py3 import UpgradeOperationHistoricalStatusInfo
from ._models_py3 import UpgradeOperationHistoricalStatusInfoProperties
from ._models_py3 import UpgradeOperationHistoryStatus
from ._models_py3 import UpgradePolicy
from ._models_py3 import Usage
from ._models_py3 import UsageName
from ._models_py3 import VaultCertificate
from ._models_py3 import VaultSecretGroup
from ._models_py3 import VirtualHardDisk
from ._models_py3 import VirtualMachine
from ._models_py3 import VirtualMachineAgentInstanceView
from ._models_py3 import VirtualMachineCaptureParameters
from ._models_py3 import VirtualMachineCaptureResult
from ._models_py3 import VirtualMachineExtension
from ._models_py3 import VirtualMachineExtensionHandlerInstanceView
from ._models_py3 import VirtualMachineExtensionImage
from ._models_py3 import VirtualMachineExtensionInstanceView
from ._models_py3 import VirtualMachineExtensionUpdate
from ._models_py3 import VirtualMachineExtensionsListResult
from ._models_py3 import VirtualMachineHealthStatus
from ._models_py3 import VirtualMachineIdentity
from ._models_py3 import VirtualMachineImage
from ._models_py3 import VirtualMachineImageResource
from ._models_py3 import VirtualMachineInstanceView
from ._models_py3 import VirtualMachineListResult
from ._models_py3 import VirtualMachineReimageParameters
from ._models_py3 import VirtualMachineScaleSet
from ._models_py3 import VirtualMachineScaleSetDataDisk
from ._models_py3 import VirtualMachineScaleSetExtension
from ._models_py3 import VirtualMachineScaleSetExtensionListResult
from ._models_py3 import VirtualMachineScaleSetExtensionProfile
from ._models_py3 import VirtualMachineScaleSetIPConfiguration
from ._models_py3 import VirtualMachineScaleSetIdentity
from ._models_py3 import VirtualMachineScaleSetInstanceView
from ._models_py3 import VirtualMachineScaleSetInstanceViewStatusesSummary
from ._models_py3 import VirtualMachineScaleSetIpTag
from ._models_py3 import VirtualMachineScaleSetListOSUpgradeHistory
from ._models_py3 import VirtualMachineScaleSetListResult
from ._models_py3 import VirtualMachineScaleSetListSkusResult
from ._models_py3 import VirtualMachineScaleSetListWithLinkResult
from ._models_py3 import VirtualMachineScaleSetManagedDiskParameters
from ._models_py3 import VirtualMachineScaleSetNetworkConfiguration
from ._models_py3 import VirtualMachineScaleSetNetworkConfigurationDnsSettings
from ._models_py3 import VirtualMachineScaleSetNetworkProfile
from ._models_py3 import VirtualMachineScaleSetOSDisk
from ._models_py3 import VirtualMachineScaleSetOSProfile
from ._models_py3 import VirtualMachineScaleSetPublicIPAddressConfiguration
from ._models_py3 import VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
from ._models_py3 import VirtualMachineScaleSetReimageParameters
from ._models_py3 import VirtualMachineScaleSetSku
from ._models_py3 import VirtualMachineScaleSetSkuCapacity
from ._models_py3 import VirtualMachineScaleSetStorageProfile
from ._models_py3 import VirtualMachineScaleSetUpdate
from ._models_py3 import VirtualMachineScaleSetUpdateIPConfiguration
from ._models_py3 import VirtualMachineScaleSetUpdateNetworkConfiguration
from ._models_py3 import VirtualMachineScaleSetUpdateNetworkProfile
from ._models_py3 import VirtualMachineScaleSetUpdateOSDisk
from ._models_py3 import VirtualMachineScaleSetUpdateOSProfile
from ._models_py3 import VirtualMachineScaleSetUpdatePublicIPAddressConfiguration
from ._models_py3 import VirtualMachineScaleSetUpdateStorageProfile
from ._models_py3 import VirtualMachineScaleSetUpdateVMProfile
from ._models_py3 import VirtualMachineScaleSetVM
from ._models_py3 import VirtualMachineScaleSetVMExtensionsSummary
from ._models_py3 import VirtualMachineScaleSetVMInstanceIDs
from ._models_py3 import VirtualMachineScaleSetVMInstanceRequiredIDs
from ._models_py3 import VirtualMachineScaleSetVMInstanceView
from ._models_py3 import VirtualMachineScaleSetVMListResult
from ._models_py3 import VirtualMachineScaleSetVMProfile
from ._models_py3 import VirtualMachineScaleSetVMReimageParameters
from ._models_py3 import VirtualMachineSize
from ._models_py3 import VirtualMachineSizeListResult
from ._models_py3 import VirtualMachineStatusCodeCount
from ._models_py3 import VirtualMachineUpdate
from ._models_py3 import WinRMConfiguration
from ._models_py3 import WinRMListener
from ._models_py3 import WindowsConfiguration
except (SyntaxError, ImportError):
from ._models import AdditionalCapabilities
from ._models import AdditionalUnattendContent
from ._models import ApiEntityReference
from ._models import ApiError
from ._models import ApiErrorBase
from ._models import AutomaticOSUpgradePolicy
from ._models import AutomaticOSUpgradeProperties
from ._models import AutomaticRepairsPolicy
from ._models import AvailabilitySet
from ._models import AvailabilitySetListResult
from ._models import AvailabilitySetUpdate
from ._models import BootDiagnostics
from ._models import BootDiagnosticsInstanceView
from ._models import Components1H8M3EpSchemasVirtualmachineidentityPropertiesUserassignedidentitiesAdditionalproperties
from ._models import ComponentsNj115SSchemasVirtualmachinescalesetidentityPropertiesUserassignedidentitiesAdditionalproperties
from ._models import ComputeOperationListResult
from ._models import ComputeOperationValue
from ._models import DataDisk
from ._models import DataDiskImage
from ._models import DiagnosticsProfile
from ._models import DiffDiskSettings
from ._models import DiskEncryptionSettings
from ._models import DiskInstanceView
from ._models import HardwareProfile
from ._models import Image
from ._models import ImageDataDisk
from ._models import ImageListResult
from ._models import ImageOSDisk
from ._models import ImageReference
from ._models import ImageStorageProfile
from ._models import ImageUpdate
from ._models import InnerError
from ._models import InstanceViewStatus
from ._models import KeyVaultKeyReference
from ._models import KeyVaultSecretReference
from ._models import LinuxConfiguration
from ._models import ListUsagesResult
from ._models import LogAnalyticsInputBase
from ._models import LogAnalyticsOperationResult
from ._models import LogAnalyticsOutput
from ._models import MaintenanceRedeployStatus
from ._models import ManagedDiskParameters
from ._models import NetworkInterfaceReference
from ._models import NetworkProfile
from ._models import OSDisk
from ._models import OSDiskImage
from ._models import OSProfile
from ._models import Plan
from ._models import ProximityPlacementGroup
from ._models import ProximityPlacementGroupListResult
from ._models import ProximityPlacementGroupUpdate
from ._models import PurchasePlan
from ._models import RecoveryWalkResponse
from ._models import RequestRateByIntervalInput
from ._models import Resource
from ._models import RollbackStatusInfo
from ._models import RollingUpgradePolicy
from ._models import RollingUpgradeProgressInfo
from ._models import RollingUpgradeRunningStatus
from ._models import RollingUpgradeStatusInfo
from ._models import RunCommandDocument
from ._models import RunCommandDocumentBase
from ._models import RunCommandInput
from ._models import RunCommandInputParameter
from ._models import RunCommandListResult
from ._models import RunCommandParameterDefinition
from ._models import RunCommandResult
from ._models import Sku
from ._models import SshConfiguration
from ._models import SshPublicKey
from ._models import StorageProfile
from ._models import SubResource
from ._models import SubResourceReadOnly
from ._models import ThrottledRequestsInput
from ._models import UpdateResource
from ._models import UpgradeOperationHistoricalStatusInfo
from ._models import UpgradeOperationHistoricalStatusInfoProperties
from ._models import UpgradeOperationHistoryStatus
from ._models import UpgradePolicy
from ._models import Usage
from ._models import UsageName
from ._models import VaultCertificate
from ._models import VaultSecretGroup
from ._models import VirtualHardDisk
from ._models import VirtualMachine
from ._models import VirtualMachineAgentInstanceView
from ._models import VirtualMachineCaptureParameters
from ._models import VirtualMachineCaptureResult
from ._models import VirtualMachineExtension
from ._models import VirtualMachineExtensionHandlerInstanceView
from ._models import VirtualMachineExtensionImage
from ._models import VirtualMachineExtensionInstanceView
from ._models import VirtualMachineExtensionUpdate
from ._models import VirtualMachineExtensionsListResult
from ._models import VirtualMachineHealthStatus
from ._models import VirtualMachineIdentity
from ._models import VirtualMachineImage
from ._models import VirtualMachineImageResource
from ._models import VirtualMachineInstanceView
from ._models import VirtualMachineListResult
from ._models import VirtualMachineReimageParameters
from ._models import VirtualMachineScaleSet
from ._models import VirtualMachineScaleSetDataDisk
from ._models import VirtualMachineScaleSetExtension
from ._models import VirtualMachineScaleSetExtensionListResult
from ._models import VirtualMachineScaleSetExtensionProfile
from ._models import VirtualMachineScaleSetIPConfiguration
from ._models import VirtualMachineScaleSetIdentity
from ._models import VirtualMachineScaleSetInstanceView
from ._models import VirtualMachineScaleSetInstanceViewStatusesSummary
from ._models import VirtualMachineScaleSetIpTag
from ._models import VirtualMachineScaleSetListOSUpgradeHistory
from ._models import VirtualMachineScaleSetListResult
from ._models import VirtualMachineScaleSetListSkusResult
from ._models import VirtualMachineScaleSetListWithLinkResult
from ._models import VirtualMachineScaleSetManagedDiskParameters
from ._models import VirtualMachineScaleSetNetworkConfiguration
from ._models import VirtualMachineScaleSetNetworkConfigurationDnsSettings
from ._models import VirtualMachineScaleSetNetworkProfile
from ._models import VirtualMachineScaleSetOSDisk
from ._models import VirtualMachineScaleSetOSProfile
from ._models import VirtualMachineScaleSetPublicIPAddressConfiguration
from ._models import VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings
from ._models import VirtualMachineScaleSetReimageParameters
from ._models import VirtualMachineScaleSetSku
from ._models import VirtualMachineScaleSetSkuCapacity
from ._models import VirtualMachineScaleSetStorageProfile
from ._models import VirtualMachineScaleSetUpdate
from ._models import VirtualMachineScaleSetUpdateIPConfiguration
from ._models import VirtualMachineScaleSetUpdateNetworkConfiguration
from ._models import VirtualMachineScaleSetUpdateNetworkProfile
from ._models import VirtualMachineScaleSetUpdateOSDisk
from ._models import VirtualMachineScaleSetUpdateOSProfile
from ._models import VirtualMachineScaleSetUpdatePublicIPAddressConfiguration
from ._models import VirtualMachineScaleSetUpdateStorageProfile
from ._models import VirtualMachineScaleSetUpdateVMProfile
from ._models import VirtualMachineScaleSetVM
from ._models import VirtualMachineScaleSetVMExtensionsSummary
from ._models import VirtualMachineScaleSetVMInstanceIDs
from ._models import VirtualMachineScaleSetVMInstanceRequiredIDs
from ._models import VirtualMachineScaleSetVMInstanceView
from ._models import VirtualMachineScaleSetVMListResult
from ._models import VirtualMachineScaleSetVMProfile
from ._models import VirtualMachineScaleSetVMReimageParameters
from ._models import VirtualMachineSize
from ._models import VirtualMachineSizeListResult
from ._models import VirtualMachineStatusCodeCount
from ._models import VirtualMachineUpdate
from ._models import WinRMConfiguration
from ._models import WinRMListener
from ._models import WindowsConfiguration
from ._compute_management_client_enums import (
AvailabilitySetSkuTypes,
CachingTypes,
DiffDiskOptions,
DiskCreateOptionTypes,
IPVersion,
IntervalInMins,
MaintenanceOperationResultCodeTypes,
OperatingSystemStateTypes,
OperatingSystemTypes,
ProtocolTypes,
ProximityPlacementGroupType,
ResourceIdentityType,
RollingUpgradeActionType,
RollingUpgradeStatusCode,
SettingNames,
StatusLevelTypes,
StorageAccountTypes,
UpgradeMode,
UpgradeOperationInvoker,
UpgradeState,
VirtualMachineEvictionPolicyTypes,
VirtualMachinePriorityTypes,
VirtualMachineScaleSetSkuScaleType,
VirtualMachineSizeTypes,
)
__all__ = [
'AdditionalCapabilities',
'AdditionalUnattendContent',
'ApiEntityReference',
'ApiError',
'ApiErrorBase',
'AutomaticOSUpgradePolicy',
'AutomaticOSUpgradeProperties',
'AutomaticRepairsPolicy',
'AvailabilitySet',
'AvailabilitySetListResult',
'AvailabilitySetUpdate',
'BootDiagnostics',
'BootDiagnosticsInstanceView',
'Components1H8M3EpSchemasVirtualmachineidentityPropertiesUserassignedidentitiesAdditionalproperties',
'ComponentsNj115SSchemasVirtualmachinescalesetidentityPropertiesUserassignedidentitiesAdditionalproperties',
'ComputeOperationListResult',
'ComputeOperationValue',
'DataDisk',
'DataDiskImage',
'DiagnosticsProfile',
'DiffDiskSettings',
'DiskEncryptionSettings',
'DiskInstanceView',
'HardwareProfile',
'Image',
'ImageDataDisk',
'ImageListResult',
'ImageOSDisk',
'ImageReference',
'ImageStorageProfile',
'ImageUpdate',
'InnerError',
'InstanceViewStatus',
'KeyVaultKeyReference',
'KeyVaultSecretReference',
'LinuxConfiguration',
'ListUsagesResult',
'LogAnalyticsInputBase',
'LogAnalyticsOperationResult',
'LogAnalyticsOutput',
'MaintenanceRedeployStatus',
'ManagedDiskParameters',
'NetworkInterfaceReference',
'NetworkProfile',
'OSDisk',
'OSDiskImage',
'OSProfile',
'Plan',
'ProximityPlacementGroup',
'ProximityPlacementGroupListResult',
'ProximityPlacementGroupUpdate',
'PurchasePlan',
'RecoveryWalkResponse',
'RequestRateByIntervalInput',
'Resource',
'RollbackStatusInfo',
'RollingUpgradePolicy',
'RollingUpgradeProgressInfo',
'RollingUpgradeRunningStatus',
'RollingUpgradeStatusInfo',
'RunCommandDocument',
'RunCommandDocumentBase',
'RunCommandInput',
'RunCommandInputParameter',
'RunCommandListResult',
'RunCommandParameterDefinition',
'RunCommandResult',
'Sku',
'SshConfiguration',
'SshPublicKey',
'StorageProfile',
'SubResource',
'SubResourceReadOnly',
'ThrottledRequestsInput',
'UpdateResource',
'UpgradeOperationHistoricalStatusInfo',
'UpgradeOperationHistoricalStatusInfoProperties',
'UpgradeOperationHistoryStatus',
'UpgradePolicy',
'Usage',
'UsageName',
'VaultCertificate',
'VaultSecretGroup',
'VirtualHardDisk',
'VirtualMachine',
'VirtualMachineAgentInstanceView',
'VirtualMachineCaptureParameters',
'VirtualMachineCaptureResult',
'VirtualMachineExtension',
'VirtualMachineExtensionHandlerInstanceView',
'VirtualMachineExtensionImage',
'VirtualMachineExtensionInstanceView',
'VirtualMachineExtensionUpdate',
'VirtualMachineExtensionsListResult',
'VirtualMachineHealthStatus',
'VirtualMachineIdentity',
'VirtualMachineImage',
'VirtualMachineImageResource',
'VirtualMachineInstanceView',
'VirtualMachineListResult',
'VirtualMachineReimageParameters',
'VirtualMachineScaleSet',
'VirtualMachineScaleSetDataDisk',
'VirtualMachineScaleSetExtension',
'VirtualMachineScaleSetExtensionListResult',
'VirtualMachineScaleSetExtensionProfile',
'VirtualMachineScaleSetIPConfiguration',
'VirtualMachineScaleSetIdentity',
'VirtualMachineScaleSetInstanceView',
'VirtualMachineScaleSetInstanceViewStatusesSummary',
'VirtualMachineScaleSetIpTag',
'VirtualMachineScaleSetListOSUpgradeHistory',
'VirtualMachineScaleSetListResult',
'VirtualMachineScaleSetListSkusResult',
'VirtualMachineScaleSetListWithLinkResult',
'VirtualMachineScaleSetManagedDiskParameters',
'VirtualMachineScaleSetNetworkConfiguration',
'VirtualMachineScaleSetNetworkConfigurationDnsSettings',
'VirtualMachineScaleSetNetworkProfile',
'VirtualMachineScaleSetOSDisk',
'VirtualMachineScaleSetOSProfile',
'VirtualMachineScaleSetPublicIPAddressConfiguration',
'VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings',
'VirtualMachineScaleSetReimageParameters',
'VirtualMachineScaleSetSku',
'VirtualMachineScaleSetSkuCapacity',
'VirtualMachineScaleSetStorageProfile',
'VirtualMachineScaleSetUpdate',
'VirtualMachineScaleSetUpdateIPConfiguration',
'VirtualMachineScaleSetUpdateNetworkConfiguration',
'VirtualMachineScaleSetUpdateNetworkProfile',
'VirtualMachineScaleSetUpdateOSDisk',
'VirtualMachineScaleSetUpdateOSProfile',
'VirtualMachineScaleSetUpdatePublicIPAddressConfiguration',
'VirtualMachineScaleSetUpdateStorageProfile',
'VirtualMachineScaleSetUpdateVMProfile',
'VirtualMachineScaleSetVM',
'VirtualMachineScaleSetVMExtensionsSummary',
'VirtualMachineScaleSetVMInstanceIDs',
'VirtualMachineScaleSetVMInstanceRequiredIDs',
'VirtualMachineScaleSetVMInstanceView',
'VirtualMachineScaleSetVMListResult',
'VirtualMachineScaleSetVMProfile',
'VirtualMachineScaleSetVMReimageParameters',
'VirtualMachineSize',
'VirtualMachineSizeListResult',
'VirtualMachineStatusCodeCount',
'VirtualMachineUpdate',
'WinRMConfiguration',
'WinRMListener',
'WindowsConfiguration',
'AvailabilitySetSkuTypes',
'CachingTypes',
'DiffDiskOptions',
'DiskCreateOptionTypes',
'IPVersion',
'IntervalInMins',
'MaintenanceOperationResultCodeTypes',
'OperatingSystemStateTypes',
'OperatingSystemTypes',
'ProtocolTypes',
'ProximityPlacementGroupType',
'ResourceIdentityType',
'RollingUpgradeActionType',
'RollingUpgradeStatusCode',
'SettingNames',
'StatusLevelTypes',
'StorageAccountTypes',
'UpgradeMode',
'UpgradeOperationInvoker',
'UpgradeState',
'VirtualMachineEvictionPolicyTypes',
'VirtualMachinePriorityTypes',
'VirtualMachineScaleSetSkuScaleType',
'VirtualMachineSizeTypes',
]
| true
| true
|
1c402f34e07ad043935b8aaecd277213c8f6f438
| 16,675
|
py
|
Python
|
code/train_pc_img.py
|
Mehooz/VGSNet
|
18ddae20fb3ccc440a38bd8b23cba8fcaa753518
|
[
"MIT"
] | 3
|
2020-10-26T19:52:53.000Z
|
2021-12-27T07:59:36.000Z
|
code/train_pc_img.py
|
Mehooz/VGSNet
|
18ddae20fb3ccc440a38bd8b23cba8fcaa753518
|
[
"MIT"
] | null | null | null |
code/train_pc_img.py
|
Mehooz/VGSNet
|
18ddae20fb3ccc440a38bd8b23cba8fcaa753518
|
[
"MIT"
] | null | null | null |
"""
This is the main trainer script for point cloud AE/VAE experiments.
Use scripts/train_ae_pc_chair.sh or scripts/train_vae_pc_chair.sh to run.
Before that, you need to run scripts/pretrain_part_pc_ae_chair.sh or scripts/pretrain_part_pc_vae_chair.sh
to pretrain part geometry AE/VAE.
"""
import os
import time
import sys
import shutil
import random
from time import strftime
from argparse import ArgumentParser
import numpy as np
import torch
import torch.utils.data
from config import add_train_vae_args
from data import PartNetDataset, Tree
import utils
# Use 1-4 CPU threads to train.
# Don't use too many CPU threads, which will slow down the training.
# torch.set_num_threads(1)
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def train(conf):
# load network model
conf.model_version = 'model_pc_img'
conf.exp_name = 'pc_ae_table_3000_test'
conf.data_path = '/home/zhangxc/tmp/structurenet/data/partnetdata/table_hier_3000'
conf.train_dataset = 'train_no_other_less_than_10_parts.txt'
conf.val_dataset = 'val_no_other_less_than_10_parts.txt'
conf.epochs = 200
conf.part_pc_exp_name = 'part_pc_ae_table'
conf.part_pc_model_epoch = 194
conf.batch_size = 64
conf.num_point = 3000
conf.checkpoint_interval = 54
models = utils.get_model_module(conf.model_version)
# check if training run already exists. If so, delete it.
if os.path.exists(os.path.join(conf.log_path, conf.exp_name)) or \
os.path.exists(os.path.join(conf.model_path, conf.exp_name)):
response = input('A training run named "%s" already exists, overwrite? (y/n) ' % (conf.exp_name))
if response != 'y':
sys.exit()
if os.path.exists(os.path.join(conf.log_path, conf.exp_name)):
shutil.rmtree(os.path.join(conf.log_path, conf.exp_name))
if os.path.exists(os.path.join(conf.model_path, conf.exp_name)):
shutil.rmtree(os.path.join(conf.model_path, conf.exp_name))
# create directories for this run
os.makedirs(os.path.join(conf.model_path, conf.exp_name))
os.makedirs(os.path.join(conf.log_path, conf.exp_name))
# file log
flog = open(os.path.join(conf.log_path, conf.exp_name, 'train.log'), 'w')
# set training device
device = torch.device(conf.device)
print(f'Using device: {conf.device}')
flog.write(f'Using device: {conf.device}\n')
# log the object category information
print(f'Object Category: {conf.category}')
flog.write(f'Object Category: {conf.category}\n')
# control randomness
if conf.seed < 0:
conf.seed = random.randint(1, 10000)
print("Random Seed: %d" % (conf.seed))
flog.write(f'Random Seed: {conf.seed}\n')
random.seed(conf.seed)
np.random.seed(conf.seed)
torch.manual_seed(conf.seed)
# save config
torch.save(conf, os.path.join(conf.model_path, conf.exp_name, 'conf.pth'))
# create models
encoder = models.RecursiveEncoder(conf, variational=True, probabilistic=not conf.non_variational)
decoder = models.RecursiveDecoder(conf)
models = [encoder, decoder]
model_names = ['encoder', 'decoder']
'''
# load pretrained part AE/VAE
pretrain_ckpt_dir = os.path.join(conf.model_path, conf.part_pc_exp_name)
print(pretrain_ckpt_dir)
pretrain_ckpt_epoch = conf.part_pc_model_epoch
print(f'Loading ckpt from {pretrain_ckpt_dir}: epoch {pretrain_ckpt_epoch}')
__ = utils.load_checkpoint(
models=[encoder.node_encoder.part_encoder, decoder.node_decoder.part_decoder],
model_names=['part_pc_encoder', 'part_pc_decoder'],
dirname=pretrain_ckpt_dir,
epoch=pretrain_ckpt_epoch,
strict=True)
# set part_encoder and part_decoder BatchNorm to eval mode
encoder.node_encoder.part_encoder.eval()
for param in encoder.node_encoder.part_encoder.parameters():
param.requires_grad = False
decoder.node_decoder.part_decoder.eval()
for param in decoder.node_decoder.part_decoder.parameters():
param.requires_grad = False
'''
# create optimizers
encoder_opt = torch.optim.Adam(encoder.parameters(), lr=conf.lr)
decoder_opt = torch.optim.Adam(decoder.parameters(), lr=conf.lr)
optimizers = [encoder_opt, decoder_opt]
optimizer_names = ['encoder', 'decoder']
# learning rate scheduler
encoder_scheduler = torch.optim.lr_scheduler.StepLR(encoder_opt, \
step_size=conf.lr_decay_every, gamma=conf.lr_decay_by)
decoder_scheduler = torch.optim.lr_scheduler.StepLR(decoder_opt, \
step_size=conf.lr_decay_every, gamma=conf.lr_decay_by)
# create training and validation datasets and data loaders
data_features = ['object']
train_dataset = PartNetDataset(conf.data_path, conf.train_dataset, data_features, \
load_geo=conf.load_geo)
valdt_dataset = PartNetDataset(conf.data_path, conf.val_dataset, data_features, \
load_geo=conf.load_geo)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=conf.batch_size, \
shuffle=True, collate_fn=utils.collate_feats)
valdt_dataloader = torch.utils.data.DataLoader(valdt_dataset, batch_size=conf.batch_size, \
shuffle=True, collate_fn=utils.collate_feats)
# create logs
if not conf.no_console_log:
header = ' Time Epoch Dataset Iteration Progress(%) LR LatentLoss GeoLoss CenterLoss ScaleLoss StructLoss EdgeExists KLDivLoss SymLoss AdjLoss TotalLoss'
if not conf.no_tb_log:
# https://github.com/lanpa/tensorboard-pytorch
from tensorboardX import SummaryWriter
train_writer = SummaryWriter(os.path.join(conf.log_path, conf.exp_name, 'train'))
valdt_writer = SummaryWriter(os.path.join(conf.log_path, conf.exp_name, 'val'))
# send parameters to device
for m in models:
m.to(device)
for o in optimizers:
utils.optimizer_to_device(o, device)
# start training
print("Starting training ...... ")
flog.write('Starting training ......\n')
start_time = time.time()
last_checkpoint_step = None
last_train_console_log_step, last_valdt_console_log_step = None, None
train_num_batch, valdt_num_batch = len(train_dataloader), len(valdt_dataloader)
# train for every epoch
for epoch in range(conf.epochs):
if not conf.no_console_log:
print(f'training run {conf.exp_name}')
flog.write(f'training run {conf.exp_name}\n')
print(header)
flog.write(header+'\n')
train_batches = enumerate(train_dataloader, 0)
valdt_batches = enumerate(valdt_dataloader, 0)
train_fraction_done, valdt_fraction_done = 0.0, 0.0
valdt_batch_ind = -1
# train for every batch
for train_batch_ind, batch in train_batches:
train_fraction_done = (train_batch_ind + 1) / train_num_batch
train_step = epoch * train_num_batch + train_batch_ind
log_console = not conf.no_console_log and (last_train_console_log_step is None or \
train_step - last_train_console_log_step >= conf.console_log_interval)
if log_console:
last_train_console_log_step = train_step
# make sure the models are in eval mode to deactivate BatchNorm for PartEncoder and PartDecoder
# there are no other BatchNorm / Dropout in the rest of the network
for m in models:
m.eval()
# forward pass (including logging)
total_loss = forward(
batch=batch, data_features=data_features, encoder=encoder, decoder=decoder, device=device, conf=conf,
is_valdt=False, step=train_step, epoch=epoch, batch_ind=train_batch_ind, num_batch=train_num_batch, start_time=start_time,
log_console=log_console, log_tb=not conf.no_tb_log, tb_writer=train_writer,
lr=encoder_opt.param_groups[0]['lr'], flog=flog)
# optimize one step
encoder_scheduler.step()
decoder_scheduler.step()
encoder_opt.zero_grad()
decoder_opt.zero_grad()
total_loss.backward()
encoder_opt.step()
decoder_opt.step()
# save checkpoint
with torch.no_grad():
if last_checkpoint_step is None or \
train_step - last_checkpoint_step >= conf.checkpoint_interval:
print("Saving checkpoint ...... ", end='', flush=True)
flog.write("Saving checkpoint ...... ")
utils.save_checkpoint(
models=models, model_names=model_names, dirname=os.path.join(conf.model_path, conf.exp_name),
epoch=epoch, prepend_epoch=True, optimizers=optimizers, optimizer_names=model_names)
print("DONE")
flog.write("DONE\n")
last_checkpoint_step = train_step
# validate one batch
while valdt_fraction_done <= train_fraction_done and valdt_batch_ind+1 < valdt_num_batch:
valdt_batch_ind, batch = next(valdt_batches)
valdt_fraction_done = (valdt_batch_ind + 1) / valdt_num_batch
valdt_step = (epoch + valdt_fraction_done) * train_num_batch - 1
log_console = not conf.no_console_log and (last_valdt_console_log_step is None or \
valdt_step - last_valdt_console_log_step >= conf.console_log_interval)
if log_console:
last_valdt_console_log_step = valdt_step
# set models to evaluation mode
for m in models:
m.eval()
with torch.no_grad():
# forward pass (including logging)
__ = forward(
batch=batch, data_features=data_features, encoder=encoder, decoder=decoder, device=device, conf=conf,
is_valdt=True, step=valdt_step, epoch=epoch, batch_ind=valdt_batch_ind, num_batch=valdt_num_batch, start_time=start_time,
log_console=log_console, log_tb=not conf.no_tb_log, tb_writer=valdt_writer,
lr=encoder_opt.param_groups[0]['lr'], flog=flog)
# save the final models
print("Saving final checkpoint ...... ", end='', flush=True)
flog.write("Saving final checkpoint ...... ")
utils.save_checkpoint(
models=models, model_names=model_names, dirname=os.path.join(conf.model_path, conf.exp_name),
epoch=epoch, prepend_epoch=False, optimizers=optimizers, optimizer_names=optimizer_names)
print("DONE")
flog.write("DONE\n")
flog.close()
def forward(batch, data_features, encoder, decoder, device, conf,
is_valdt=False, step=None, epoch=None, batch_ind=0, num_batch=1, start_time=0,
log_console=False, log_tb=False, tb_writer=None, lr=None, flog=None):
objects = batch[data_features.index('object')]
losses = {
'latent': torch.zeros(1, device=device),
'geo': torch.zeros(1, device=device),
'center': torch.zeros(1, device=device),
'scale': torch.zeros(1, device=device),
'leaf': torch.zeros(1, device=device),
'exists': torch.zeros(1, device=device),
'semantic': torch.zeros(1, device=device),
'edge_exists': torch.zeros(1, device=device),
'kldiv': torch.zeros(1, device=device),
'sym': torch.zeros(1, device=device),
'adj': torch.zeros(1, device=device)}
# process every data in the batch individually
for obj in objects:
obj.to(device)
# encode object to get root code
root_code = encoder.encode_structure(obj=obj)
# get kldiv loss
if not conf.non_variational:
root_code, obj_kldiv_loss = torch.chunk(root_code, 2, 1)
obj_kldiv_loss = -obj_kldiv_loss.sum() # negative kldiv, sum over feature dimensions
losses['kldiv'] = losses['kldiv'] + obj_kldiv_loss
# decode root code to get reconstruction loss
img_fea = obj.root.image.to(device)
# print(img_fea.shape)
obj_losses = decoder.structure_recon_loss(z=root_code, gt_tree=obj,img_fea=img_fea)
for loss_name, loss in obj_losses.items():
losses[loss_name] = losses[loss_name] + loss
for loss_name in losses.keys():
losses[loss_name] = losses[loss_name] / len(objects)
losses['latent'] *= conf.loss_weight_latent
losses['geo'] *= conf.loss_weight_geo
losses['center'] *= conf.loss_weight_center
losses['scale'] *= conf.loss_weight_scale
losses['leaf'] *= conf.loss_weight_leaf
losses['exists'] *= conf.loss_weight_exists
losses['semantic'] *= conf.loss_weight_semantic
losses['edge_exists'] *= conf.loss_weight_edge_exists
losses['kldiv'] *= conf.loss_weight_kldiv
losses['sym'] *= conf.loss_weight_sym
losses['adj'] *= conf.loss_weight_adj
total_loss = 0
for loss in losses.values():
total_loss += loss
with torch.no_grad():
# log to console
if log_console:
print(
f'''{strftime("%H:%M:%S", time.gmtime(time.time()-start_time)):>9s} '''
f'''{epoch:>5.0f}/{conf.epochs:<5.0f} '''
f'''{'validation' if is_valdt else 'training':^10s} '''
f'''{batch_ind:>5.0f}/{num_batch:<5.0f} '''
f'''{100. * (1+batch_ind+num_batch*epoch) / (num_batch*conf.epochs):>9.1f}% '''
f'''{lr:>5.2E} '''
f'''{losses['latent'].item():>11.2f} '''
f'''{losses['geo'].item():>11.2f} '''
f'''{losses['center'].item():>11.2f} '''
f'''{losses['scale'].item():>11.2f} '''
f'''{(losses['leaf']+losses['exists']+losses['semantic']).item():>11.2f} '''
f'''{losses['edge_exists'].item():>11.2f} '''
f'''{losses['kldiv'].item():>10.2f} '''
f'''{losses['sym'].item():>10.2f} '''
f'''{losses['adj'].item():>10.2f} '''
f'''{total_loss.item():>10.2f}''')
flog.write(
f'''{strftime("%H:%M:%S", time.gmtime(time.time()-start_time)):>9s} '''
f'''{epoch:>5.0f}/{conf.epochs:<5.0f} '''
f'''{'validation' if is_valdt else 'training':^10s} '''
f'''{batch_ind:>5.0f}/{num_batch:<5.0f} '''
f'''{100. * (1+batch_ind+num_batch*epoch) / (num_batch*conf.epochs):>9.1f}% '''
f'''{lr:>5.2E} '''
f'''{losses['latent'].item():>11.2f} '''
f'''{losses['geo'].item():>11.2f} '''
f'''{losses['center'].item():>11.2f} '''
f'''{losses['scale'].item():>11.2f} '''
f'''{(losses['leaf']+losses['exists']+losses['semantic']).item():>11.2f} '''
f'''{losses['edge_exists'].item():>11.2f} '''
f'''{losses['kldiv'].item():>10.2f} '''
f'''{losses['sym'].item():>10.2f} '''
f'''{losses['adj'].item():>10.2f} '''
f'''{total_loss.item():>10.2f}\n''')
flog.flush()
# log to tensorboard
if log_tb and tb_writer is not None:
tb_writer.add_scalar('loss', total_loss.item(), step)
tb_writer.add_scalar('lr', lr, step)
tb_writer.add_scalar('latent_loss', losses['latent'].item(), step)
tb_writer.add_scalar('geo_loss', losses['geo'].item(), step)
tb_writer.add_scalar('center_loss', losses['center'].item(), step)
tb_writer.add_scalar('scale_loss', losses['scale'].item(), step)
tb_writer.add_scalar('leaf_loss', losses['leaf'].item(), step)
tb_writer.add_scalar('exists_loss', losses['exists'].item(), step)
tb_writer.add_scalar('semantic_loss', losses['semantic'].item(), step)
tb_writer.add_scalar('edge_exists_loss', losses['edge_exists'].item(), step)
tb_writer.add_scalar('kldiv_loss', losses['kldiv'].item(), step)
tb_writer.add_scalar('sym_loss', losses['sym'].item(), step)
tb_writer.add_scalar('adj_loss', losses['adj'].item(), step)
return total_loss
if __name__ == '__main__':
sys.setrecursionlimit(5000) # this code uses recursion a lot for code simplicity
parser = ArgumentParser()
parser = add_train_vae_args(parser)
config = parser.parse_args()
config.category = 'Table'
Tree.load_category_info(config.category)
train(config)
| 44.113757
| 206
| 0.630045
|
import os
import time
import sys
import shutil
import random
from time import strftime
from argparse import ArgumentParser
import numpy as np
import torch
import torch.utils.data
from config import add_train_vae_args
from data import PartNetDataset, Tree
import utils
# torch.set_num_threads(1)
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def train(conf):
# load network model
conf.model_version = 'model_pc_img'
conf.exp_name = 'pc_ae_table_3000_test'
conf.data_path = '/home/zhangxc/tmp/structurenet/data/partnetdata/table_hier_3000'
conf.train_dataset = 'train_no_other_less_than_10_parts.txt'
conf.val_dataset = 'val_no_other_less_than_10_parts.txt'
conf.epochs = 200
conf.part_pc_exp_name = 'part_pc_ae_table'
conf.part_pc_model_epoch = 194
conf.batch_size = 64
conf.num_point = 3000
conf.checkpoint_interval = 54
models = utils.get_model_module(conf.model_version)
# check if training run already exists. If so, delete it.
if os.path.exists(os.path.join(conf.log_path, conf.exp_name)) or \
os.path.exists(os.path.join(conf.model_path, conf.exp_name)):
response = input('A training run named "%s" already exists, overwrite? (y/n) ' % (conf.exp_name))
if response != 'y':
sys.exit()
if os.path.exists(os.path.join(conf.log_path, conf.exp_name)):
shutil.rmtree(os.path.join(conf.log_path, conf.exp_name))
if os.path.exists(os.path.join(conf.model_path, conf.exp_name)):
shutil.rmtree(os.path.join(conf.model_path, conf.exp_name))
# create directories for this run
os.makedirs(os.path.join(conf.model_path, conf.exp_name))
os.makedirs(os.path.join(conf.log_path, conf.exp_name))
# file log
flog = open(os.path.join(conf.log_path, conf.exp_name, 'train.log'), 'w')
# set training device
device = torch.device(conf.device)
print(f'Using device: {conf.device}')
flog.write(f'Using device: {conf.device}\n')
# log the object category information
print(f'Object Category: {conf.category}')
flog.write(f'Object Category: {conf.category}\n')
# control randomness
if conf.seed < 0:
conf.seed = random.randint(1, 10000)
print("Random Seed: %d" % (conf.seed))
flog.write(f'Random Seed: {conf.seed}\n')
random.seed(conf.seed)
np.random.seed(conf.seed)
torch.manual_seed(conf.seed)
# save config
torch.save(conf, os.path.join(conf.model_path, conf.exp_name, 'conf.pth'))
# create models
encoder = models.RecursiveEncoder(conf, variational=True, probabilistic=not conf.non_variational)
decoder = models.RecursiveDecoder(conf)
models = [encoder, decoder]
model_names = ['encoder', 'decoder']
# create optimizers
encoder_opt = torch.optim.Adam(encoder.parameters(), lr=conf.lr)
decoder_opt = torch.optim.Adam(decoder.parameters(), lr=conf.lr)
optimizers = [encoder_opt, decoder_opt]
optimizer_names = ['encoder', 'decoder']
# learning rate scheduler
encoder_scheduler = torch.optim.lr_scheduler.StepLR(encoder_opt, \
step_size=conf.lr_decay_every, gamma=conf.lr_decay_by)
decoder_scheduler = torch.optim.lr_scheduler.StepLR(decoder_opt, \
step_size=conf.lr_decay_every, gamma=conf.lr_decay_by)
# create training and validation datasets and data loaders
data_features = ['object']
train_dataset = PartNetDataset(conf.data_path, conf.train_dataset, data_features, \
load_geo=conf.load_geo)
valdt_dataset = PartNetDataset(conf.data_path, conf.val_dataset, data_features, \
load_geo=conf.load_geo)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=conf.batch_size, \
shuffle=True, collate_fn=utils.collate_feats)
valdt_dataloader = torch.utils.data.DataLoader(valdt_dataset, batch_size=conf.batch_size, \
shuffle=True, collate_fn=utils.collate_feats)
# create logs
if not conf.no_console_log:
header = ' Time Epoch Dataset Iteration Progress(%) LR LatentLoss GeoLoss CenterLoss ScaleLoss StructLoss EdgeExists KLDivLoss SymLoss AdjLoss TotalLoss'
if not conf.no_tb_log:
# https://github.com/lanpa/tensorboard-pytorch
from tensorboardX import SummaryWriter
train_writer = SummaryWriter(os.path.join(conf.log_path, conf.exp_name, 'train'))
valdt_writer = SummaryWriter(os.path.join(conf.log_path, conf.exp_name, 'val'))
# send parameters to device
for m in models:
m.to(device)
for o in optimizers:
utils.optimizer_to_device(o, device)
# start training
print("Starting training ...... ")
flog.write('Starting training ......\n')
start_time = time.time()
last_checkpoint_step = None
last_train_console_log_step, last_valdt_console_log_step = None, None
train_num_batch, valdt_num_batch = len(train_dataloader), len(valdt_dataloader)
# train for every epoch
for epoch in range(conf.epochs):
if not conf.no_console_log:
print(f'training run {conf.exp_name}')
flog.write(f'training run {conf.exp_name}\n')
print(header)
flog.write(header+'\n')
train_batches = enumerate(train_dataloader, 0)
valdt_batches = enumerate(valdt_dataloader, 0)
train_fraction_done, valdt_fraction_done = 0.0, 0.0
valdt_batch_ind = -1
# train for every batch
for train_batch_ind, batch in train_batches:
train_fraction_done = (train_batch_ind + 1) / train_num_batch
train_step = epoch * train_num_batch + train_batch_ind
log_console = not conf.no_console_log and (last_train_console_log_step is None or \
train_step - last_train_console_log_step >= conf.console_log_interval)
if log_console:
last_train_console_log_step = train_step
# make sure the models are in eval mode to deactivate BatchNorm for PartEncoder and PartDecoder
# there are no other BatchNorm / Dropout in the rest of the network
for m in models:
m.eval()
# forward pass (including logging)
total_loss = forward(
batch=batch, data_features=data_features, encoder=encoder, decoder=decoder, device=device, conf=conf,
is_valdt=False, step=train_step, epoch=epoch, batch_ind=train_batch_ind, num_batch=train_num_batch, start_time=start_time,
log_console=log_console, log_tb=not conf.no_tb_log, tb_writer=train_writer,
lr=encoder_opt.param_groups[0]['lr'], flog=flog)
# optimize one step
encoder_scheduler.step()
decoder_scheduler.step()
encoder_opt.zero_grad()
decoder_opt.zero_grad()
total_loss.backward()
encoder_opt.step()
decoder_opt.step()
# save checkpoint
with torch.no_grad():
if last_checkpoint_step is None or \
train_step - last_checkpoint_step >= conf.checkpoint_interval:
print("Saving checkpoint ...... ", end='', flush=True)
flog.write("Saving checkpoint ...... ")
utils.save_checkpoint(
models=models, model_names=model_names, dirname=os.path.join(conf.model_path, conf.exp_name),
epoch=epoch, prepend_epoch=True, optimizers=optimizers, optimizer_names=model_names)
print("DONE")
flog.write("DONE\n")
last_checkpoint_step = train_step
# validate one batch
while valdt_fraction_done <= train_fraction_done and valdt_batch_ind+1 < valdt_num_batch:
valdt_batch_ind, batch = next(valdt_batches)
valdt_fraction_done = (valdt_batch_ind + 1) / valdt_num_batch
valdt_step = (epoch + valdt_fraction_done) * train_num_batch - 1
log_console = not conf.no_console_log and (last_valdt_console_log_step is None or \
valdt_step - last_valdt_console_log_step >= conf.console_log_interval)
if log_console:
last_valdt_console_log_step = valdt_step
# set models to evaluation mode
for m in models:
m.eval()
with torch.no_grad():
# forward pass (including logging)
__ = forward(
batch=batch, data_features=data_features, encoder=encoder, decoder=decoder, device=device, conf=conf,
is_valdt=True, step=valdt_step, epoch=epoch, batch_ind=valdt_batch_ind, num_batch=valdt_num_batch, start_time=start_time,
log_console=log_console, log_tb=not conf.no_tb_log, tb_writer=valdt_writer,
lr=encoder_opt.param_groups[0]['lr'], flog=flog)
# save the final models
print("Saving final checkpoint ...... ", end='', flush=True)
flog.write("Saving final checkpoint ...... ")
utils.save_checkpoint(
models=models, model_names=model_names, dirname=os.path.join(conf.model_path, conf.exp_name),
epoch=epoch, prepend_epoch=False, optimizers=optimizers, optimizer_names=optimizer_names)
print("DONE")
flog.write("DONE\n")
flog.close()
def forward(batch, data_features, encoder, decoder, device, conf,
is_valdt=False, step=None, epoch=None, batch_ind=0, num_batch=1, start_time=0,
log_console=False, log_tb=False, tb_writer=None, lr=None, flog=None):
objects = batch[data_features.index('object')]
losses = {
'latent': torch.zeros(1, device=device),
'geo': torch.zeros(1, device=device),
'center': torch.zeros(1, device=device),
'scale': torch.zeros(1, device=device),
'leaf': torch.zeros(1, device=device),
'exists': torch.zeros(1, device=device),
'semantic': torch.zeros(1, device=device),
'edge_exists': torch.zeros(1, device=device),
'kldiv': torch.zeros(1, device=device),
'sym': torch.zeros(1, device=device),
'adj': torch.zeros(1, device=device)}
# process every data in the batch individually
for obj in objects:
obj.to(device)
# encode object to get root code
root_code = encoder.encode_structure(obj=obj)
# get kldiv loss
if not conf.non_variational:
root_code, obj_kldiv_loss = torch.chunk(root_code, 2, 1)
obj_kldiv_loss = -obj_kldiv_loss.sum() # negative kldiv, sum over feature dimensions
losses['kldiv'] = losses['kldiv'] + obj_kldiv_loss
# decode root code to get reconstruction loss
img_fea = obj.root.image.to(device)
# print(img_fea.shape)
obj_losses = decoder.structure_recon_loss(z=root_code, gt_tree=obj,img_fea=img_fea)
for loss_name, loss in obj_losses.items():
losses[loss_name] = losses[loss_name] + loss
for loss_name in losses.keys():
losses[loss_name] = losses[loss_name] / len(objects)
losses['latent'] *= conf.loss_weight_latent
losses['geo'] *= conf.loss_weight_geo
losses['center'] *= conf.loss_weight_center
losses['scale'] *= conf.loss_weight_scale
losses['leaf'] *= conf.loss_weight_leaf
losses['exists'] *= conf.loss_weight_exists
losses['semantic'] *= conf.loss_weight_semantic
losses['edge_exists'] *= conf.loss_weight_edge_exists
losses['kldiv'] *= conf.loss_weight_kldiv
losses['sym'] *= conf.loss_weight_sym
losses['adj'] *= conf.loss_weight_adj
total_loss = 0
for loss in losses.values():
total_loss += loss
with torch.no_grad():
# log to console
if log_console:
print(
f'''{strftime("%H:%M:%S", time.gmtime(time.time()-start_time)):>9s} '''
f'''{epoch:>5.0f}/{conf.epochs:<5.0f} '''
f'''{'validation' if is_valdt else 'training':^10s} '''
f'''{batch_ind:>5.0f}/{num_batch:<5.0f} '''
f'''{100. * (1+batch_ind+num_batch*epoch) / (num_batch*conf.epochs):>9.1f}% '''
f'''{lr:>5.2E} '''
f'''{losses['latent'].item():>11.2f} '''
f'''{losses['geo'].item():>11.2f} '''
f'''{losses['center'].item():>11.2f} '''
f'''{losses['scale'].item():>11.2f} '''
f'''{(losses['leaf']+losses['exists']+losses['semantic']).item():>11.2f} '''
f'''{losses['edge_exists'].item():>11.2f} '''
f'''{losses['kldiv'].item():>10.2f} '''
f'''{losses['sym'].item():>10.2f} '''
f'''{losses['adj'].item():>10.2f} '''
f'''{total_loss.item():>10.2f}''')
flog.write(
f'''{strftime("%H:%M:%S", time.gmtime(time.time()-start_time)):>9s} '''
f'''{epoch:>5.0f}/{conf.epochs:<5.0f} '''
f'''{'validation' if is_valdt else 'training':^10s} '''
f'''{batch_ind:>5.0f}/{num_batch:<5.0f} '''
f'''{100. * (1+batch_ind+num_batch*epoch) / (num_batch*conf.epochs):>9.1f}% '''
f'''{lr:>5.2E} '''
f'''{losses['latent'].item():>11.2f} '''
f'''{losses['geo'].item():>11.2f} '''
f'''{losses['center'].item():>11.2f} '''
f'''{losses['scale'].item():>11.2f} '''
f'''{(losses['leaf']+losses['exists']+losses['semantic']).item():>11.2f} '''
f'''{losses['edge_exists'].item():>11.2f} '''
f'''{losses['kldiv'].item():>10.2f} '''
f'''{losses['sym'].item():>10.2f} '''
f'''{losses['adj'].item():>10.2f} '''
f'''{total_loss.item():>10.2f}\n''')
flog.flush()
# log to tensorboard
if log_tb and tb_writer is not None:
tb_writer.add_scalar('loss', total_loss.item(), step)
tb_writer.add_scalar('lr', lr, step)
tb_writer.add_scalar('latent_loss', losses['latent'].item(), step)
tb_writer.add_scalar('geo_loss', losses['geo'].item(), step)
tb_writer.add_scalar('center_loss', losses['center'].item(), step)
tb_writer.add_scalar('scale_loss', losses['scale'].item(), step)
tb_writer.add_scalar('leaf_loss', losses['leaf'].item(), step)
tb_writer.add_scalar('exists_loss', losses['exists'].item(), step)
tb_writer.add_scalar('semantic_loss', losses['semantic'].item(), step)
tb_writer.add_scalar('edge_exists_loss', losses['edge_exists'].item(), step)
tb_writer.add_scalar('kldiv_loss', losses['kldiv'].item(), step)
tb_writer.add_scalar('sym_loss', losses['sym'].item(), step)
tb_writer.add_scalar('adj_loss', losses['adj'].item(), step)
return total_loss
if __name__ == '__main__':
sys.setrecursionlimit(5000) # this code uses recursion a lot for code simplicity
parser = ArgumentParser()
parser = add_train_vae_args(parser)
config = parser.parse_args()
config.category = 'Table'
Tree.load_category_info(config.category)
train(config)
| true
| true
|
1c402fbf836bccd97ab91e5f40db2a16171cb7b4
| 2,931
|
py
|
Python
|
homeassistant/components/syncthru/binary_sensor.py
|
SergioBPereira/core
|
4501906da369e23b304857b8a3512798696f26a0
|
[
"Apache-2.0"
] | 5
|
2017-01-26T16:33:09.000Z
|
2018-07-20T13:50:47.000Z
|
homeassistant/components/syncthru/binary_sensor.py
|
SergioBPereira/core
|
4501906da369e23b304857b8a3512798696f26a0
|
[
"Apache-2.0"
] | 87
|
2020-07-06T22:22:54.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/syncthru/binary_sensor.py
|
SergioBPereira/core
|
4501906da369e23b304857b8a3512798696f26a0
|
[
"Apache-2.0"
] | 7
|
2018-10-04T10:12:45.000Z
|
2021-12-29T20:55:40.000Z
|
"""Support for Samsung Printers with SyncThru web interface."""
from pysyncthru import SyncThru, SyncthruState
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_PROBLEM,
BinarySensorEntity,
)
from homeassistant.const import CONF_NAME
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from . import device_identifiers
from .const import DOMAIN
SYNCTHRU_STATE_PROBLEM = {
SyncthruState.INVALID: True,
SyncthruState.OFFLINE: None,
SyncthruState.NORMAL: False,
SyncthruState.UNKNOWN: True,
SyncthruState.WARNING: True,
SyncthruState.TESTING: False,
SyncthruState.ERROR: True,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up from config entry."""
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
name = config_entry.data[CONF_NAME]
entities = [
SyncThruOnlineSensor(coordinator, name),
SyncThruProblemSensor(coordinator, name),
]
async_add_entities(entities)
class SyncThruBinarySensor(CoordinatorEntity, BinarySensorEntity):
"""Implementation of an abstract Samsung Printer binary sensor platform."""
def __init__(self, coordinator, name):
"""Initialize the sensor."""
super().__init__(coordinator)
self.syncthru: SyncThru = coordinator.data
self._name = name
self._id_suffix = ""
@property
def unique_id(self):
"""Return unique ID for the sensor."""
serial = self.syncthru.serial_number()
return f"{serial}{self._id_suffix}" if serial else None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_info(self):
"""Return device information."""
return {"identifiers": device_identifiers(self.syncthru)}
class SyncThruOnlineSensor(SyncThruBinarySensor):
"""Implementation of a sensor that checks whether is turned on/online."""
_attr_device_class = DEVICE_CLASS_CONNECTIVITY
def __init__(self, syncthru, name):
"""Initialize the sensor."""
super().__init__(syncthru, name)
self._id_suffix = "_online"
@property
def is_on(self):
"""Set the state to whether the printer is online."""
return self.syncthru.is_online()
class SyncThruProblemSensor(SyncThruBinarySensor):
"""Implementation of a sensor that checks whether the printer works correctly."""
_attr_device_class = DEVICE_CLASS_PROBLEM
def __init__(self, syncthru, name):
"""Initialize the sensor."""
super().__init__(syncthru, name)
self._id_suffix = "_problem"
@property
def is_on(self):
"""Set the state to whether there is a problem with the printer."""
return SYNCTHRU_STATE_PROBLEM[self.syncthru.device_status()]
| 29.019802
| 85
| 0.701467
|
from pysyncthru import SyncThru, SyncthruState
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_PROBLEM,
BinarySensorEntity,
)
from homeassistant.const import CONF_NAME
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from . import device_identifiers
from .const import DOMAIN
SYNCTHRU_STATE_PROBLEM = {
SyncthruState.INVALID: True,
SyncthruState.OFFLINE: None,
SyncthruState.NORMAL: False,
SyncthruState.UNKNOWN: True,
SyncthruState.WARNING: True,
SyncthruState.TESTING: False,
SyncthruState.ERROR: True,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
name = config_entry.data[CONF_NAME]
entities = [
SyncThruOnlineSensor(coordinator, name),
SyncThruProblemSensor(coordinator, name),
]
async_add_entities(entities)
class SyncThruBinarySensor(CoordinatorEntity, BinarySensorEntity):
def __init__(self, coordinator, name):
super().__init__(coordinator)
self.syncthru: SyncThru = coordinator.data
self._name = name
self._id_suffix = ""
@property
def unique_id(self):
serial = self.syncthru.serial_number()
return f"{serial}{self._id_suffix}" if serial else None
@property
def name(self):
return self._name
@property
def device_info(self):
return {"identifiers": device_identifiers(self.syncthru)}
class SyncThruOnlineSensor(SyncThruBinarySensor):
_attr_device_class = DEVICE_CLASS_CONNECTIVITY
def __init__(self, syncthru, name):
super().__init__(syncthru, name)
self._id_suffix = "_online"
@property
def is_on(self):
return self.syncthru.is_online()
class SyncThruProblemSensor(SyncThruBinarySensor):
_attr_device_class = DEVICE_CLASS_PROBLEM
def __init__(self, syncthru, name):
super().__init__(syncthru, name)
self._id_suffix = "_problem"
@property
def is_on(self):
return SYNCTHRU_STATE_PROBLEM[self.syncthru.device_status()]
| true
| true
|
1c40300dd755571a66248088c4c3405a029e7a3f
| 1,064
|
py
|
Python
|
src/app/models.py
|
wking/spdx-online-tools
|
b9e8373303a90c379cf2afb57904fc930413649e
|
[
"Apache-2.0"
] | null | null | null |
src/app/models.py
|
wking/spdx-online-tools
|
b9e8373303a90c379cf2afb57904fc930413649e
|
[
"Apache-2.0"
] | null | null | null |
src/app/models.py
|
wking/spdx-online-tools
|
b9e8373303a90c379cf2afb57904fc930413649e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Rohit Lodha
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.db import models
from datetime import datetime
from django import forms
from django.contrib.auth.models import User
class UserID(models.Model):
user = models.OneToOneField(User)
organisation = models.CharField("Organisation",max_length=64, null=False, blank=False)
lastlogin = models.DateField("Last Login",default=datetime.now,blank=True)
def __str__(self):
return self.user.username
| 36.689655
| 90
| 0.760338
|
from __future__ import unicode_literals
from django.db import models
from datetime import datetime
from django import forms
from django.contrib.auth.models import User
class UserID(models.Model):
user = models.OneToOneField(User)
organisation = models.CharField("Organisation",max_length=64, null=False, blank=False)
lastlogin = models.DateField("Last Login",default=datetime.now,blank=True)
def __str__(self):
return self.user.username
| true
| true
|
1c403039845af0999712ad952c18ca9c4f246b16
| 60
|
py
|
Python
|
river/base/multi_output.py
|
fox-ds/river
|
9ce947ebfc012ec7059de0a09c765b2da7fc1d25
|
[
"BSD-3-Clause"
] | 2,184
|
2020-11-11T12:31:12.000Z
|
2022-03-31T16:45:41.000Z
|
river/base/multi_output.py
|
raphaelsty/river
|
2e0b25a2ef2d2ba9ec080cf86a491f7465433b18
|
[
"BSD-3-Clause"
] | 328
|
2019-01-25T13:48:43.000Z
|
2020-11-11T11:41:44.000Z
|
river/base/multi_output.py
|
raphaelsty/river
|
2e0b25a2ef2d2ba9ec080cf86a491f7465433b18
|
[
"BSD-3-Clause"
] | 240
|
2020-11-11T14:25:03.000Z
|
2022-03-31T08:25:50.000Z
|
class MultiOutputMixin:
"""A multi-output estimator."""
| 20
| 35
| 0.7
|
class MultiOutputMixin:
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.