hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0098a592b7c07c057dde9192d552f78e133677
| 15,206
|
py
|
Python
|
gs_quant/target/content.py
|
webclinic017/gs-quant
|
ebb8ee5e1d954ab362aa567293906ce51818cfa8
|
[
"Apache-2.0"
] | null | null | null |
gs_quant/target/content.py
|
webclinic017/gs-quant
|
ebb8ee5e1d954ab362aa567293906ce51818cfa8
|
[
"Apache-2.0"
] | null | null | null |
gs_quant/target/content.py
|
webclinic017/gs-quant
|
ebb8ee5e1d954ab362aa567293906ce51818cfa8
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from gs_quant.base import *
from gs_quant.common import *
import datetime
from typing import Dict, Optional, Tuple, Union
from dataclasses import dataclass, field
from dataclasses_json import LetterCase, config, dataclass_json
from enum import Enum
class Division(EnumBase, Enum):
SECDIV = 'SECDIV'
IBD = 'IBD'
RISK = 'RISK'
GIR = 'GIR'
EO = 'EO'
KENSHO = 'KENSHO'
class InvestmentRecommendationDirection(EnumBase, Enum):
Buy = 'Buy'
Hold = 'Hold'
Sell = 'Sell'
Strategy = 'Strategy'
class Language(EnumBase, Enum):
"""ISO 639-1 language code for the content piece"""
an = 'an'
ar = 'ar'
_as = 'as'
av = 'av'
ay = 'ay'
az = 'az'
ba = 'ba'
be = 'be'
bg = 'bg'
bh = 'bh'
bi = 'bi'
bm = 'bm'
bn = 'bn'
bo = 'bo'
br = 'br'
bs = 'bs'
ca = 'ca'
ce = 'ce'
ch = 'ch'
co = 'co'
cr = 'cr'
cs = 'cs'
cu = 'cu'
cv = 'cv'
cy = 'cy'
da = 'da'
de = 'de'
dv = 'dv'
dz = 'dz'
ee = 'ee'
el = 'el'
en = 'en'
eo = 'eo'
es = 'es'
et = 'et'
eu = 'eu'
fa = 'fa'
ff = 'ff'
fi = 'fi'
fj = 'fj'
fo = 'fo'
fr = 'fr'
fy = 'fy'
ga = 'ga'
gd = 'gd'
gl = 'gl'
gn = 'gn'
gu = 'gu'
gv = 'gv'
ha = 'ha'
he = 'he'
hi = 'hi'
ho = 'ho'
hr = 'hr'
ht = 'ht'
hu = 'hu'
hy = 'hy'
hz = 'hz'
ia = 'ia'
id = 'id'
ie = 'ie'
ig = 'ig'
ii = 'ii'
ik = 'ik'
io = 'io'
_is = 'is'
it = 'it'
iu = 'iu'
ja = 'ja'
jv = 'jv'
ka = 'ka'
kg = 'kg'
ki = 'ki'
kj = 'kj'
kk = 'kk'
kl = 'kl'
km = 'km'
kn = 'kn'
ko = 'ko'
kr = 'kr'
ks = 'ks'
ku = 'ku'
kv = 'kv'
kw = 'kw'
ky = 'ky'
la = 'la'
lb = 'lb'
lg = 'lg'
li = 'li'
ln = 'ln'
lo = 'lo'
lt = 'lt'
lu = 'lu'
lv = 'lv'
mg = 'mg'
mh = 'mh'
mi = 'mi'
mk = 'mk'
ml = 'ml'
mn = 'mn'
mr = 'mr'
ms = 'ms'
mt = 'mt'
my = 'my'
na = 'na'
nb = 'nb'
nd = 'nd'
ne = 'ne'
ng = 'ng'
nl = 'nl'
nn = 'nn'
no = 'no'
nr = 'nr'
nv = 'nv'
ny = 'ny'
oc = 'oc'
oj = 'oj'
om = 'om'
_or = 'or'
os = 'os'
pa = 'pa'
pi = 'pi'
pl = 'pl'
ps = 'ps'
pt = 'pt'
qu = 'qu'
rm = 'rm'
rn = 'rn'
ro = 'ro'
ru = 'ru'
rw = 'rw'
sa = 'sa'
sc = 'sc'
sd = 'sd'
se = 'se'
sg = 'sg'
si = 'si'
sk = 'sk'
sl = 'sl'
sm = 'sm'
sn = 'sn'
so = 'so'
sq = 'sq'
sr = 'sr'
ss = 'ss'
st = 'st'
su = 'su'
sv = 'sv'
sw = 'sw'
ta = 'ta'
te = 'te'
tg = 'tg'
th = 'th'
ti = 'ti'
tk = 'tk'
tl = 'tl'
tn = 'tn'
to = 'to'
tr = 'tr'
ts = 'ts'
tt = 'tt'
tw = 'tw'
ty = 'ty'
ug = 'ug'
uk = 'uk'
ur = 'ur'
uz = 'uz'
ve = 've'
vi = 'vi'
vo = 'vo'
wa = 'wa'
wo = 'wo'
xh = 'xh'
yi = 'yi'
yo = 'yo'
za = 'za'
zh = 'zh'
zu = 'zu'
class Origin(EnumBase, Enum):
"""Where the content originated from"""
WEB = 'WEB'
API = 'API'
EMAIL = 'EMAIL'
BLOG = 'BLOG'
ARTICLE = 'ARTICLE'
class QueryableStatus(EnumBase, Enum):
"""Status/state of a content piece that can be queried by a user"""
Draft = 'Draft'
Published = 'Published'
Replaced = 'Replaced'
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class Content(Base):
body: str = field(default=None, metadata=field_metadata)
mime_type: object = field(default=None, metadata=field_metadata)
encoding: object = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
class Object(DictBase):
pass
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class Author(Base):
id_: Optional[str] = field(default=None, metadata=config(field_name='id', exclude=exclude_none))
name: Optional[str] = field(default=None, metadata=field_metadata)
division: Optional[Division] = field(default=None, metadata=field_metadata)
email: Optional[str] = field(default=None, metadata=field_metadata)
title: Optional[str] = field(default=None, metadata=field_metadata)
kerberos: Optional[str] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class BulkDeleteContentResponse(Base):
status: Optional[int] = field(default=None, metadata=field_metadata)
message: Optional[str] = field(default=None, metadata=field_metadata)
data: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class Certification(Base):
submission_id: str = field(default=None, metadata=field_metadata)
version: str = field(default=None, metadata=field_metadata)
submission_state: object = field(default=None, metadata=field_metadata)
allowed_distribution: Tuple[Object, ...] = field(default=None, metadata=field_metadata)
etask_process_instance_id: Optional[str] = field(default=None, metadata=field_metadata)
tags: Optional[Tuple[None, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class DeleteContentResponse(Base):
status: Optional[int] = field(default=None, metadata=field_metadata)
message: Optional[str] = field(default=None, metadata=field_metadata)
data: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class InvestmentRecommendationAsset(Base):
asset_id: str = field(default=None, metadata=field_metadata)
direction: Optional[InvestmentRecommendationDirection] = field(default=None, metadata=field_metadata)
currency: Optional[str] = field(default=None, metadata=field_metadata)
price: Optional[float] = field(default=None, metadata=field_metadata)
price_target: Optional[float] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class InvestmentRecommendationCustomAsset(Base):
asset_name: str = field(default=None, metadata=field_metadata)
direction: Optional[InvestmentRecommendationDirection] = field(default=None, metadata=field_metadata)
currency: Optional[str] = field(default=None, metadata=field_metadata)
price: Optional[float] = field(default=None, metadata=field_metadata)
price_target: Optional[float] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ContentResponse(Base):
id_: Optional[str] = field(default=None, metadata=config(field_name='id', exclude=exclude_none))
version: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=field_metadata)
entitlements: Optional[Entitlements] = field(default=None, metadata=field_metadata)
entitlement_exclusions: Optional[EntitlementExclusions] = field(default=None, metadata=field_metadata)
created_by_id: Optional[str] = field(default=None, metadata=field_metadata)
created_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
last_updated_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
channels: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
content: Optional[Content] = field(default=None, metadata=field_metadata)
language: Optional[Language] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ContentUpdateRequest(Base):
name: Optional[str] = field(default=None, metadata=field_metadata)
entitlements: Optional[Entitlements] = field(default=None, metadata=field_metadata)
entitlement_exclusions: Optional[EntitlementExclusions] = field(default=None, metadata=field_metadata)
content: Optional[Content] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class InvestmentRecommendations(Base):
assets: Tuple[InvestmentRecommendationAsset, ...] = field(default=None, metadata=field_metadata)
custom_assets: Optional[Tuple[InvestmentRecommendationCustomAsset, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class BulkContentUpdateRequestItem(Base):
id_: Optional[str] = field(default=None, metadata=config(field_name='id', exclude=exclude_none))
update: Optional[ContentUpdateRequest] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ContentAuditFields(Base):
id_: Optional[str] = field(default=None, metadata=config(field_name='id', exclude=exclude_none))
version: Optional[str] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=field_metadata)
entitlements: Optional[Entitlements] = field(default=None, metadata=field_metadata)
entitlement_exclusions: Optional[EntitlementExclusions] = field(default=None, metadata=field_metadata)
created_by_id: Optional[str] = field(default=None, metadata=field_metadata)
authors: Optional[Tuple[Author, ...]] = field(default=None, metadata=field_metadata)
created_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
last_updated_time: Optional[datetime.datetime] = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ContentParameters(Base):
author_ids: Tuple[str, ...] = field(default=None, metadata=field_metadata)
language: Language = field(default=None, metadata=field_metadata)
status: Optional[object] = field(default=None, metadata=field_metadata)
source: Optional[Division] = field(default=None, metadata=field_metadata)
tags: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
slug: Optional[str] = field(default=None, metadata=field_metadata)
attachments: Optional[Tuple[Content, ...]] = field(default=None, metadata=field_metadata)
certification: Optional[Certification] = field(default=None, metadata=field_metadata)
certification_type: Optional[object] = field(default=None, metadata=field_metadata)
asset_ids: Optional[Tuple[str, ...]] = field(default=None, metadata=field_metadata)
origin: Optional[Origin] = field(default=None, metadata=field_metadata)
investment_recommendations: Optional[InvestmentRecommendations] = field(default=None, metadata=field_metadata)
is_flow: Optional[bool] = field(default=None, metadata=field_metadata)
is_research_summary: Optional[bool] = field(default=None, metadata=field_metadata)
is_restricted: Optional[bool] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class GetManyContentsResponse(Base):
status: Optional[int] = field(default=None, metadata=field_metadata)
message: Optional[str] = field(default=None, metadata=field_metadata)
data: Optional[Tuple[ContentResponse, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class BulkContentUpdateResponse(Base):
status: Optional[int] = field(default=None, metadata=field_metadata)
message: Optional[str] = field(default=None, metadata=field_metadata)
data: Optional[Tuple[ContentAuditFields, ...]] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ContentCreateRequest(Base):
name: str = field(default=None, metadata=field_metadata)
entitlements: Entitlements = field(default=None, metadata=field_metadata)
entitlement_exclusions: EntitlementExclusions = field(default=None, metadata=field_metadata)
content: Content = field(default=None, metadata=field_metadata)
parameters: ContentParameters = field(default=None, metadata=field_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ContentCreateResponse(Base):
status: Optional[int] = field(default=None, metadata=field_metadata)
message: Optional[str] = field(default=None, metadata=field_metadata)
data: Optional[ContentAuditFields] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
@handle_camel_case_args
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(unsafe_hash=True, repr=False)
class ContentUpdateResponse(Base):
status: Optional[int] = field(default=None, metadata=field_metadata)
message: Optional[str] = field(default=None, metadata=field_metadata)
data: Optional[ContentAuditFields] = field(default=None, metadata=field_metadata)
name: Optional[str] = field(default=None, metadata=name_metadata)
| 32.91342
| 123
| 0.691964
|
4a00990617d75ff066209837f8687f4c6daaf3df
| 8,564
|
py
|
Python
|
pysnmp-with-texts/DCS3FRU-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/DCS3FRU-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/DCS3FRU-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module DCS3FRU-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DCS3FRU-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:37:15 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibIdentifier, Integer32, ObjectIdentity, Unsigned32, Bits, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Counter32, Counter64, IpAddress, Gauge32, NotificationType, enterprises, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Integer32", "ObjectIdentity", "Unsigned32", "Bits", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Counter32", "Counter64", "IpAddress", "Gauge32", "NotificationType", "enterprises", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
dell = MibIdentifier((1, 3, 6, 1, 4, 1, 674))
server3 = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10892))
baseboardGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10892, 1))
fruGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000))
class DellObjectRange(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 128)
class DellUnsigned8BitRange(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 255)
class DellUnsigned16BitRange(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 65535)
class DellUnsigned32BitRange(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 4294967295)
class DellDateName(DisplayString):
subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(25, 25)
fixedLength = 25
class DellStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("other", 1), ("unknown", 2), ("ok", 3), ("nonCritical", 4), ("critical", 5), ("nonRecoverable", 6))
class DellFRUInformationState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("ok", 1), ("notSupported", 2), ("notAvailable", 3), ("checksumInvalid", 4), ("corrupted", 5))
fruTable = MibTable((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10), )
if mibBuilder.loadTexts: fruTable.setStatus('mandatory')
if mibBuilder.loadTexts: fruTable.setDescription('2000.0010 This object defines the Field Replaceable Unit table.')
fruTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1), ).setIndexNames((0, "DCS3FRU-MIB", "fruChassisIndex"), (0, "DCS3FRU-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruTableEntry.setStatus('mandatory')
if mibBuilder.loadTexts: fruTableEntry.setDescription('2000.0010.0001 This object defines the Field Replaceable Unit table entry.')
fruChassisIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1, 1), DellObjectRange()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruChassisIndex.setStatus('mandatory')
if mibBuilder.loadTexts: fruChassisIndex.setDescription('2000.0010.0001.0001 This attribute defines the index (one based) of the chassis containing the field replaceable unit.')
fruIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1, 2), DellObjectRange()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruIndex.setStatus('mandatory')
if mibBuilder.loadTexts: fruIndex.setDescription('2000.0010.0001.0002 This attribute defines the index (one based) of the field replaceable unit.')
fruInformationStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1, 3), DellStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruInformationStatus.setStatus('mandatory')
if mibBuilder.loadTexts: fruInformationStatus.setDescription('2000.0010.0001.0003 This attribute defines the status of the field replaceable unit information.')
fruInformationState = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1, 4), DellFRUInformationState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruInformationState.setStatus('mandatory')
if mibBuilder.loadTexts: fruInformationState.setDescription('2000.0010.0001.0004 This attribute defines the state of the field replaceable unit information. Some information for the field replaceable unit may not be available if the state is other than ok(1).')
fruDeviceName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDeviceName.setStatus('mandatory')
if mibBuilder.loadTexts: fruDeviceName.setDescription('2000.0010.0001.0005 This attribute defines the device name of the field replaceable unit.')
fruManufacturerName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruManufacturerName.setStatus('mandatory')
if mibBuilder.loadTexts: fruManufacturerName.setDescription('2000.0010.0001.0006 This attribute defines the manufacturer of the field replaceable unit.')
fruSerialNumberName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruSerialNumberName.setStatus('mandatory')
if mibBuilder.loadTexts: fruSerialNumberName.setDescription('2000.0010.0001.0007 This attribute defines the serial number of the field replaceable unit.')
fruPartNumberName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPartNumberName.setStatus('mandatory')
if mibBuilder.loadTexts: fruPartNumberName.setDescription('2000.0010.0001.0008 This attribute defines the part number of the field replaceable unit.')
fruRevisionName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruRevisionName.setStatus('mandatory')
if mibBuilder.loadTexts: fruRevisionName.setDescription('2000.0010.0001.0009 This attribute defines the revision of the field replaceable unit.')
fruManufacturingDateName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1, 10), DellDateName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruManufacturingDateName.setStatus('mandatory')
if mibBuilder.loadTexts: fruManufacturingDateName.setDescription('2000.0010.0001.0010 This attribute defines the manufacturing date of the of the field replaceable unit.')
fruAssetTagName = MibTableColumn((1, 3, 6, 1, 4, 1, 674, 10892, 1, 2000, 10, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruAssetTagName.setStatus('mandatory')
if mibBuilder.loadTexts: fruAssetTagName.setDescription('2000.0010.0001.0011 This attribute defines the asset tag of the field replaceable unit.')
mibBuilder.exportSymbols("DCS3FRU-MIB", DellStatus=DellStatus, dell=dell, fruAssetTagName=fruAssetTagName, DellUnsigned32BitRange=DellUnsigned32BitRange, fruPartNumberName=fruPartNumberName, fruGroup=fruGroup, baseboardGroup=baseboardGroup, fruSerialNumberName=fruSerialNumberName, fruTableEntry=fruTableEntry, fruChassisIndex=fruChassisIndex, fruManufacturerName=fruManufacturerName, DellFRUInformationState=DellFRUInformationState, fruManufacturingDateName=fruManufacturingDateName, fruDeviceName=fruDeviceName, fruInformationState=fruInformationState, DellUnsigned8BitRange=DellUnsigned8BitRange, fruTable=fruTable, DellObjectRange=DellObjectRange, DellDateName=DellDateName, DellUnsigned16BitRange=DellUnsigned16BitRange, server3=server3, fruIndex=fruIndex, fruRevisionName=fruRevisionName, fruInformationStatus=fruInformationStatus)
| 104.439024
| 837
| 0.78468
|
4a009b1865caf9bc2209f3bb3babffcaceb1b472
| 3,529
|
py
|
Python
|
idomoo/models/body.py
|
Idomoo-RnD/idomoo-python-sdk
|
d5b7c6a55f75196145a7e6d8f53772a92e4ee2ac
|
[
"MIT"
] | 1
|
2018-05-01T10:47:47.000Z
|
2018-05-01T10:47:47.000Z
|
idomoo/models/body.py
|
Idomoo-RnD/idomoo-python-sdk
|
d5b7c6a55f75196145a7e6d8f53772a92e4ee2ac
|
[
"MIT"
] | 3
|
2018-06-06T08:14:43.000Z
|
2021-03-15T18:35:52.000Z
|
idomoo/models/body.py
|
Idomoo-RnD/idomoo-python-sdk
|
d5b7c6a55f75196145a7e6d8f53772a92e4ee2ac
|
[
"MIT"
] | 2
|
2018-06-26T09:34:20.000Z
|
2019-11-14T10:23:44.000Z
|
# coding: utf-8
"""
Idomoo API
OpenAPI spec version: 2.0
Contact: dev.support@idomoo.com
"""
import pprint
import six
class Body(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'description': 'str'
}
attribute_map = {
'name': 'name',
'description': 'description'
}
def __init__(self, name=None, description=None):
"""Body - a model defined in Swagger"""
self._name = None
self._description = None
self.discriminator = None
self.name = name
if description is not None:
self.description = description
@property
def name(self):
"""Gets the name of this Body.
A descriptive name for the Scene Library.
:return: The name of this Body.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Body.
A descriptive name for the Scene Library.
:param name: The name of this Body.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def description(self):
"""Gets the description of this Body.
A description of the Scene Library. What’s its purpose?
:return: The description of this Body.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Body.
A description of the Scene Library. What’s its purpose?
:param description: The description of this Body.
:type: str
"""
self._description = description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Body):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 24.678322
| 80
| 0.547181
|
4a009b2d9d45b3f52fc5e6fdf73577a55a9cfe42
| 12,816
|
py
|
Python
|
tensorflow/python/ops/numpy_ops/np_math_ops_test.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | 10
|
2021-05-25T17:43:04.000Z
|
2022-03-08T10:46:09.000Z
|
tensorflow/python/ops/numpy_ops/np_math_ops_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/ops/numpy_ops/np_math_ops_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 6
|
2016-09-07T04:00:15.000Z
|
2022-01-12T01:47:38.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf numpy mathematical methods."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl.testing import parameterized
import numpy as np
from six.moves import range
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops.numpy_ops import np_array_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.numpy_ops import np_math_ops
from tensorflow.python.platform import test
class MathTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(MathTest, self).setUp()
self.array_transforms = [
lambda x: x, # Identity,
ops.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
np_array_ops.array,
lambda x: np_array_ops.array(x, dtype=np.float32),
lambda x: np_array_ops.array(x, dtype=np.float64),
]
self.types = [np.int32, np.int64, np.float32, np.float64]
def _testBinaryOp(self,
math_fun,
np_fun,
name,
operands=None,
extra_operands=None,
check_promotion=True,
check_promotion_result_type=True):
def run_test(a, b):
for fn in self.array_transforms:
arg1 = fn(a)
arg2 = fn(b)
self.match(
math_fun(arg1, arg2),
np_fun(arg1, arg2),
msg='{}({}, {})'.format(name, arg1, arg2))
# Tests type promotion
for type_a in self.types:
for type_b in self.types:
if not check_promotion and type_a != type_b:
continue
arg1 = np_array_ops.array(a, dtype=type_a)
arg2 = np_array_ops.array(b, dtype=type_b)
self.match(
math_fun(arg1, arg2),
np_fun(arg1, arg2),
msg='{}({}, {})'.format(name, arg1, arg2),
check_dtype=check_promotion_result_type)
if operands is None:
operands = [(5, 2), (5, [2, 3]), (5, [[2, 3], [6, 7]]), ([1, 2, 3], 7),
([1, 2, 3], [5, 6, 7])]
for operand1, operand2 in operands:
run_test(operand1, operand2)
if extra_operands is not None:
for operand1, operand2 in extra_operands:
run_test(operand1, operand2)
def testDot(self):
extra_operands = [([1, 2], [[5, 6, 7], [8, 9, 10]]),
(np.arange(2 * 3 * 5).reshape([2, 3, 5]).tolist(),
np.arange(5 * 7 * 11).reshape([7, 5, 11]).tolist())]
return self._testBinaryOp(
np_math_ops.dot, np.dot, 'dot', extra_operands=extra_operands)
def testMinimum(self):
# The numpy version has strange result type when promotion happens,
# so set check_promotion_result_type to False.
return self._testBinaryOp(
np_math_ops.minimum,
np.minimum,
'minimum',
check_promotion_result_type=False)
def testMaximum(self):
# The numpy version has strange result type when promotion happens,
# so set check_promotion_result_type to False.
return self._testBinaryOp(
np_math_ops.maximum,
np.maximum,
'maximum',
check_promotion_result_type=False)
def testMatmul(self):
operands = [([[1, 2]], [[3, 4, 5], [6, 7, 8]])]
return self._testBinaryOp(
np_math_ops.matmul, np.matmul, 'matmul', operands=operands)
def testMatmulError(self):
with self.assertRaisesRegex(ValueError, r''):
np_math_ops.matmul(
np_array_ops.ones([], np.int32), np_array_ops.ones([2, 3], np.int32))
with self.assertRaisesRegex(ValueError, r''):
np_math_ops.matmul(
np_array_ops.ones([2, 3], np.int32), np_array_ops.ones([], np.int32))
def testVDot(self):
operands = [([[1, 2], [3, 4]], [[3, 4], [6, 7]]),
([[1, 2], [3, 4]], [3, 4, 6, 7])]
return self._testBinaryOp(
np_math_ops.vdot, np.vdot, 'vdot', operands=operands)
def _testUnaryOp(self, math_fun, np_fun, name):
def run_test(a):
for fn in self.array_transforms:
arg1 = fn(a)
self.match(
math_fun(arg1), np_fun(arg1), msg='{}({})'.format(name, arg1))
run_test(5)
run_test([2, 3])
run_test([[2, -3], [-6, 7]])
def testLog(self):
self._testUnaryOp(np_math_ops.log, np.log, 'log')
def testExp(self):
self._testUnaryOp(np_math_ops.exp, np.exp, 'exp')
def testTanh(self):
self._testUnaryOp(np_math_ops.tanh, np.tanh, 'tanh')
def testSqrt(self):
self._testUnaryOp(np_math_ops.sqrt, np.sqrt, 'sqrt')
def match(self, actual, expected, msg='', check_dtype=True):
self.assertIsInstance(actual, np_arrays.ndarray)
if check_dtype:
self.assertEqual(
actual.dtype, expected.dtype,
'Dtype mismatch.\nActual: {}\nExpected: {}\n{}'.format(
actual.dtype.as_numpy_dtype, expected.dtype, msg))
self.assertEqual(
actual.shape, expected.shape,
'Shape mismatch.\nActual: {}\nExpected: {}\n{}'.format(
actual.shape, expected.shape, msg))
np.testing.assert_allclose(actual.tolist(), expected.tolist(), rtol=1e-6)
def testArgsort(self):
self._testUnaryOp(np_math_ops.argsort, np.argsort, 'argsort')
# Test stability
r = np.arange(100)
a = np.zeros(100)
np.testing.assert_equal(np_math_ops.argsort(a, kind='stable'), r)
def testArgMaxArgMin(self):
data = [
0,
5,
[1],
[1, 2, 3],
[[1, 2, 3]],
[[4, 6], [7, 8]],
[[[4, 6], [9, 10]], [[7, 8], [12, 34]]],
]
for fn, d in itertools.product(self.array_transforms, data):
arr = fn(d)
self.match(np_math_ops.argmax(arr), np.argmax(arr))
self.match(np_math_ops.argmin(arr), np.argmin(arr))
if hasattr(arr, 'shape'):
ndims = len(arr.shape)
else:
ndims = np_array_ops.array(arr, copy=False).ndim
if ndims == 0:
# Numpy flattens the scalar ndarray and treats it as a 1-d array of
# size 1.
ndims = 1
for axis in range(-ndims, ndims):
self.match(
np_math_ops.argmax(arr, axis=axis), np.argmax(arr, axis=axis))
self.match(
np_math_ops.argmin(arr, axis=axis), np.argmin(arr, axis=axis))
@parameterized.parameters([False, True])
def testIsCloseEqualNan(self, equal_nan):
a = np.asarray([1, 1, np.nan, 1, np.nan], np.float32)
b = np.asarray([1, 2, 1, np.nan, np.nan], np.float32)
self.match(
np_math_ops.isclose(a, b, equal_nan=equal_nan),
np.isclose(a, b, equal_nan=equal_nan))
def testAverageWrongShape(self):
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), weights=np.ones([2, 4]))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([2, 4]))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([]))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError, r''):
np_math_ops.average(np.ones([2, 3]), axis=0, weights=np.ones([5]))
def testClip(self):
def run_test(arr, *args, **kwargs):
check_dtype = kwargs.pop('check_dtype', True)
for fn in self.array_transforms:
arr = fn(arr)
self.match(
np_math_ops.clip(arr, *args, **kwargs),
np.clip(arr, *args, **kwargs),
check_dtype=check_dtype)
# NumPy exhibits weird typing behavior when a/a_min/a_max are scalars v/s
# lists, e.g.,
#
# np.clip(np.array(0, dtype=np.int32), -5, 5).dtype == np.int64
# np.clip(np.array([0], dtype=np.int32), -5, 5).dtype == np.int32
# np.clip(np.array([0], dtype=np.int32), [-5], [5]).dtype == np.int64
#
# So we skip matching type. In tf-numpy the type of the output array is
# always the same as the input array.
run_test(0, -1, 5, check_dtype=False)
run_test(-1, -1, 5, check_dtype=False)
run_test(5, -1, 5, check_dtype=False)
run_test(-10, -1, 5, check_dtype=False)
run_test(10, -1, 5, check_dtype=False)
run_test(10, None, 5, check_dtype=False)
run_test(10, -1, None, check_dtype=False)
run_test([0, 20, -5, 4], -1, 5, check_dtype=False)
run_test([0, 20, -5, 4], None, 5, check_dtype=False)
run_test([0, 20, -5, 4], -1, None, check_dtype=False)
run_test([0.5, 20.2, -5.7, 4.4], -1.5, 5.1, check_dtype=False)
run_test([0, 20, -5, 4], [-5, 0, -5, 0], [0, 5, 0, 5], check_dtype=False)
run_test([[1, 2, 3], [4, 5, 6]], [2, 0, 2], 5, check_dtype=False)
run_test([[1, 2, 3], [4, 5, 6]], 0, [5, 3, 1], check_dtype=False)
def testPtp(self):
def run_test(arr, *args, **kwargs):
for fn in self.array_transforms:
arg = fn(arr)
self.match(
np_math_ops.ptp(arg, *args, **kwargs), np.ptp(arg, *args, **kwargs))
run_test([1, 2, 3])
run_test([1., 2., 3.])
run_test([[1, 2], [3, 4]], axis=1)
run_test([[1, 2], [3, 4]], axis=0)
run_test([[1, 2], [3, 4]], axis=-1)
run_test([[1, 2], [3, 4]], axis=-2)
def testLinSpace(self):
array_transforms = [
lambda x: x, # Identity,
ops.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
np_array_ops.array,
lambda x: np_array_ops.array(x, dtype=np.float32),
lambda x: np_array_ops.array(x, dtype=np.float64)
]
def run_test(start, stop, **kwargs):
for fn1 in array_transforms:
for fn2 in array_transforms:
arg1 = fn1(start)
arg2 = fn2(stop)
self.match(
np_math_ops.linspace(arg1, arg2, **kwargs),
np.linspace(arg1, arg2, **kwargs),
msg='linspace({}, {})'.format(arg1, arg2))
run_test(0, 1)
run_test(0, 1, num=10)
run_test(0, 1, endpoint=False)
run_test(0, -1)
run_test(0, -1, num=10)
run_test(0, -1, endpoint=False)
def testLogSpace(self):
array_transforms = [
lambda x: x, # Identity,
ops.convert_to_tensor,
np.array,
lambda x: np.array(x, dtype=np.float32),
lambda x: np.array(x, dtype=np.float64),
np_array_ops.array,
lambda x: np_array_ops.array(x, dtype=np.float32),
lambda x: np_array_ops.array(x, dtype=np.float64)
]
def run_test(start, stop, **kwargs):
for fn1 in array_transforms:
for fn2 in array_transforms:
arg1 = fn1(start)
arg2 = fn2(stop)
self.match(
np_math_ops.logspace(arg1, arg2, **kwargs),
np.logspace(arg1, arg2, **kwargs),
msg='logspace({}, {})'.format(arg1, arg2))
run_test(0, 5)
run_test(0, 5, num=10)
run_test(0, 5, endpoint=False)
run_test(0, 5, base=2.0)
run_test(0, -5)
run_test(0, -5, num=10)
run_test(0, -5, endpoint=False)
run_test(0, -5, base=2.0)
def testGeomSpace(self):
def run_test(start, stop, **kwargs):
arg1 = start
arg2 = stop
self.match(
np_math_ops.geomspace(arg1, arg2, **kwargs),
np.geomspace(arg1, arg2, **kwargs),
msg='geomspace({}, {})'.format(arg1, arg2))
run_test(1, 1000, num=5)
run_test(1, 1000, num=5, endpoint=False)
run_test(-1, -1000, num=5)
run_test(-1, -1000, num=5, endpoint=False)
@parameterized.parameters([
'T', 'ndim', 'size', 'data', '__pos__', '__round__', 'tolist',
'transpose', 'reshape', 'ravel', 'clip', 'astype', 'max', 'mean', 'min'])
def testNumpyMethodsOnTensor(self, np_method):
a = ops.convert_to_tensor([1, 2])
self.assertTrue(hasattr(a, np_method))
if __name__ == '__main__':
ops.enable_eager_execution()
ops.enable_numpy_style_type_promotion()
np_math_ops.enable_numpy_methods_on_tensor()
test.main()
| 35.305785
| 80
| 0.604791
|
4a009b498e40635268231cf5e0dcc34b237afe7b
| 2,928
|
py
|
Python
|
custom_components/bosch/bosch_entity.py
|
pszafer/home-assistant-bosch-custom-component
|
82660d39a989bc7120ae8261f22c477e71aa1e52
|
[
"Apache-2.0"
] | 10
|
2019-05-19T19:21:15.000Z
|
2020-01-24T20:13:42.000Z
|
custom_components/bosch/bosch_entity.py
|
pszafer/home-assistant-bosch-custom-component
|
82660d39a989bc7120ae8261f22c477e71aa1e52
|
[
"Apache-2.0"
] | 24
|
2019-05-19T16:41:41.000Z
|
2020-04-15T14:37:13.000Z
|
custom_components/bosch/bosch_entity.py
|
pszafer/home-assistant-bosch-custom-component
|
82660d39a989bc7120ae8261f22c477e71aa1e52
|
[
"Apache-2.0"
] | 5
|
2019-11-01T10:15:35.000Z
|
2020-04-02T16:25:45.000Z
|
"""Bosch base entity."""
from homeassistant.const import TEMP_CELSIUS
from .const import DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP, DOMAIN
class BoschEntity:
"""Bosch base entity class."""
def __init__(self, **kwargs):
"""Initialize the entity."""
self.hass = kwargs.get("hass")
self._bosch_object = kwargs.get("bosch_object")
self._gateway = kwargs.get("gateway")
self._uuid = kwargs.get("uuid")
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def unique_id(self):
"""Return unique ID for this device."""
return self._unique_id
@property
def bosch_object(self):
"""Return upstream component. Used for refreshing."""
return self._bosch_object
async def async_added_to_hass(self):
"""Register callbacks."""
self.hass.helpers.dispatcher.async_dispatcher_connect(
self.signal, self.async_update
)
@property
def device_info(self):
"""Get attributes about the device."""
return {
"identifiers": self._domain_identifier,
"manufacturer": self._gateway.device_model,
"model": self._gateway.device_type,
"name": self.device_name,
"sw_version": self._gateway.firmware,
"via_hub": (DOMAIN, self._uuid),
}
class BoschClimateWaterEntity(BoschEntity):
"""Bosch climate and water entities base class."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._name = self._bosch_object.name
self._temperature_unit = TEMP_CELSIUS
self._unique_id = self._name + self._uuid
self._current_temperature = None
self._state = None
self._target_temperature = None
@property
def _domain_identifier(self):
return {(DOMAIN, self._unique_id)}
@property
def device_name(self):
"""Return name displayed in device_info"""
return f"{self._name_prefix} {self._name}"
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._temperature_unit
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def min_temp(self):
"""Return the minimum temperature."""
return (
self._bosch_object.min_temp
if self._bosch_object.min_temp
else DEFAULT_MIN_TEMP
)
@property
def max_temp(self):
"""Return the maximum temperature."""
return (
self._bosch_object.max_temp
if self._bosch_object.max_temp
else DEFAULT_MAX_TEMP
)
| 28.153846
| 62
| 0.620219
|
4a009ba7f8312a524087ce0917e5d34dc65fe765
| 1,532
|
py
|
Python
|
tests/apps/user/commands/test_create.py
|
cheesycod/piccolo
|
254750cdd2f40f118a200074f97e93c7dae4461c
|
[
"MIT"
] | null | null | null |
tests/apps/user/commands/test_create.py
|
cheesycod/piccolo
|
254750cdd2f40f118a200074f97e93c7dae4461c
|
[
"MIT"
] | null | null | null |
tests/apps/user/commands/test_create.py
|
cheesycod/piccolo
|
254750cdd2f40f118a200074f97e93c7dae4461c
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import patch
from piccolo.apps.user.commands.create import create
from piccolo.apps.user.tables import BaseUser
class TestCreate(TestCase):
def setUp(self):
BaseUser.create_table(if_not_exists=True).run_sync()
def tearDown(self):
BaseUser.alter().drop_table().run_sync()
@patch(
"piccolo.apps.user.commands.create.get_username",
return_value="bob123",
)
@patch(
"piccolo.apps.user.commands.create.get_email",
return_value="bob@test.com",
)
@patch(
"piccolo.apps.user.commands.create.get_password",
return_value="password123",
)
@patch(
"piccolo.apps.user.commands.create.get_confirmed_password",
return_value="password123",
)
@patch(
"piccolo.apps.user.commands.create.get_is_admin", return_value=True,
)
@patch(
"piccolo.apps.user.commands.create.get_is_superuser",
return_value=True,
)
@patch(
"piccolo.apps.user.commands.create.get_is_active", return_value=True,
)
def test_create(self, *args, **kwargs):
create()
self.assertTrue(
BaseUser.exists()
.where(
(BaseUser.admin == True) # noqa: E712
& (BaseUser.username == "bob123")
& (BaseUser.email == "bob@test.com")
& (BaseUser.superuser == True)
& (BaseUser.active == True)
)
.run_sync()
)
| 27.854545
| 77
| 0.597258
|
4a009c3ca0154c5108e65c364e6a9a8daee6170f
| 1,945
|
py
|
Python
|
antipetros_discordbot/utility/data.py
|
official-antistasi-community/Antipetros_Discord_Bot
|
1b5c8b61c09e61cdff671e259f0478d343a50c8d
|
[
"MIT"
] | null | null | null |
antipetros_discordbot/utility/data.py
|
official-antistasi-community/Antipetros_Discord_Bot
|
1b5c8b61c09e61cdff671e259f0478d343a50c8d
|
[
"MIT"
] | null | null | null |
antipetros_discordbot/utility/data.py
|
official-antistasi-community/Antipetros_Discord_Bot
|
1b5c8b61c09e61cdff671e259f0478d343a50c8d
|
[
"MIT"
] | 1
|
2021-02-12T01:10:51.000Z
|
2021-02-12T01:10:51.000Z
|
IMAGE_EXTENSIONS = {'png', 'jpg', 'jpeg', 'tif', 'tiff', 'tga', 'gif'}
COMMAND_CONFIG_SUFFIXES = {'enabled': ('_enabled', True), 'channels': ('_allowed_channels', ''), 'roles': ('_allowed_roles', ''), 'dm_ids': ('_allowed_dm_ids', '')}
DEFAULT_CONFIG_OPTION_NAMES = {'dm_ids': 'default_allowed_dm_ids', 'channels': 'default_allowed_channels', 'roles': 'default_allowed_roles'}
COG_CHECKER_ATTRIBUTE_NAMES = {'dm_ids': "allowed_dm_ids", 'channels': 'allowed_channels', 'roles': 'allowed_roles'}
DEFAULT_CONFIG_SECTION = """# settings here are used if the options are not specified in the sections
[DEFAULT]
# the default roles that are allowed to invoke commands
# as comma seperated list
default_allowed_roles = Dev Helper, Admin, Dev Team
# default allowed channels, set to testing so if i forgot to specify a channel it at least is confined to testing, comma seperated list
default_allowed_channels = bot-testing
# default role that is allowed to delete data, ie = delete the save suggestion database and so on, also set so in worst case it defaults to admin
# - as comma seperated list cave
delete_all_allowed_roles = Admin, Back End Team
# member to contact mostly for bot related stuff, if someone thinks his blacklist is actually a bug or so.
notify_contact_member = Giddi
# default roles for elevated commands if there are ones
# -- as comma seperated list
allowed_elevated_roles = Admin
# list of user ids that are allowed to invoke restriced (but not admin) dm commands, needs to be and id list as in dms there are no roles
# --- as comma seperated list
# currently --> chubchub, vlash
default_allowed_in_dms = 413109712695984130, 122348088319803392
# from here on these are cog specific settings, a section is the config name for an cog ie = "purge_message_cog" --> [purge_message]
# ----------------------------------------------------------------------------------------------------------------------------------
"""
| 45.232558
| 164
| 0.703856
|
4a009c477522a8cd8e172a00bf963366e3ba14cc
| 14,969
|
py
|
Python
|
es/ch15/pruebas.py
|
skypather/GeneticAlgorithmsWithPython
|
d6d99f59e6e2b77380cadf0a1158d2442298fe89
|
[
"Apache-2.0"
] | 2
|
2018-04-08T15:05:42.000Z
|
2018-04-08T15:06:30.000Z
|
es/ch15/pruebas.py
|
skypather/GeneticAlgorithmsWithPython
|
d6d99f59e6e2b77380cadf0a1158d2442298fe89
|
[
"Apache-2.0"
] | null | null | null |
es/ch15/pruebas.py
|
skypather/GeneticAlgorithmsWithPython
|
d6d99f59e6e2b77380cadf0a1158d2442298fe89
|
[
"Apache-2.0"
] | 1
|
2019-10-09T01:28:38.000Z
|
2019-10-09T01:28:38.000Z
|
# File: pruebas.py
# Del capítulo 15 de _Algoritmos Genéticos con Python_
#
# Author: Clinton Sheppard <fluentcoder@gmail.com>
# Copyright (c) 2017 Clinton Sheppard
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import random
import unittest
import genetic
from cortadora import *
def obtener_aptitud(genes, fnEvaluar):
campo, cortadora, _ = fnEvaluar(genes)
return Aptitud(campo.cuente_cortada(), len(genes), cortadora.CuentaDePasos)
def mostrar(candidato, horaInicio, fnEvaluar):
campo, cortadora, programa = fnEvaluar(candidato.Genes)
diferencia = (datetime.datetime.now() - horaInicio).total_seconds()
campo.mostrar(cortadora)
print("{}\t{}".format(
candidato.Aptitud,
diferencia))
programa.print()
def mudar(genes, geneSet, mínGenes, máxGenes, fnObtenerAptitud, rondasMáximas):
cuenta = random.randint(1, rondasMáximas)
aptitudInicial = fnObtenerAptitud(genes)
while cuenta > 0:
cuenta -= 1
if fnObtenerAptitud(genes) > aptitudInicial:
return
añadiendo = len(genes) == 0 or \
(len(genes) < máxGenes and random.randint(0, 5) == 0)
if añadiendo:
genes.append(random.choice(geneSet)())
continue
eliminando = len(genes) > mínGenes and random.randint(0, 50) == 0
if eliminando:
índice = random.randrange(0, len(genes))
del genes[índice]
continue
índice = random.randrange(0, len(genes))
genes[índice] = random.choice(geneSet)()
def crear(geneSet, mínGenes, máxGenes):
cantidad = random.randint(mínGenes, máxGenes)
genes = [random.choice(geneSet)() for _ in range(1, cantidad)]
return genes
def intercambiar(padre, otroPadre):
genesDelNiño = padre[:]
if len(padre) <= 2 or len(otroPadre) < 2:
return genesDelNiño
longitud = random.randint(1, len(padre) - 2)
principio = random.randrange(0, len(padre) - longitud)
genesDelNiño[principio:principio + longitud] = \
otroPadre[principio:principio + longitud]
return genesDelNiño
class PruebasDeCortadora(unittest.TestCase):
def test_corta_gira(self):
anchura = altura = 8
geneSet = [lambda: Corta(),
lambda: Gira()]
mínGenes = anchura * altura
máxGenes = int(1.5 * mínGenes)
rondasMáximasDeMutación = 3
númeroEsperadoDeInstrucciones = 78
def fnCrearCampo():
return CampoToroidal(anchura, altura,
ContenidoDelCampo.Hierba)
self.ejecutar_con(geneSet, anchura, altura, mínGenes, máxGenes,
númeroEsperadoDeInstrucciones,
rondasMáximasDeMutación, fnCrearCampo,
númeroEsperadoDeInstrucciones)
def test_corta_gira_salta(self):
anchura = altura = 8
geneSet = [lambda: Corta(),
lambda: Gira(),
lambda: Salta(random.randint(0, min(anchura, altura)),
random.randint(0, min(anchura, altura)))]
mínGenes = anchura * altura
máxGenes = int(1.5 * mínGenes)
rondasMáximasDeMutación = 1
númeroEsperadoDeInstrucciones = 64
def fnCrearCampo():
return CampoToroidal(anchura, altura,
ContenidoDelCampo.Hierba)
self.ejecutar_con(geneSet, anchura, altura, mínGenes, máxGenes,
númeroEsperadoDeInstrucciones,
rondasMáximasDeMutación, fnCrearCampo,
númeroEsperadoDeInstrucciones)
def test_corta_gira_salta_validando(self):
anchura = altura = 8
geneSet = [lambda: Corta(),
lambda: Gira(),
lambda: Salta(random.randint(0, min(anchura, altura)),
random.randint(0, min(anchura, altura)))]
mínGenes = anchura * altura
máxGenes = int(1.5 * mínGenes)
rondasMáximasDeMutación = 3
númeroEsperadoDeInstrucciones = 79
def fnCrearCampo():
return CampoValidando(anchura, altura,
ContenidoDelCampo.Hierba)
self.ejecutar_con(geneSet, anchura, altura, mínGenes, máxGenes,
númeroEsperadoDeInstrucciones,
rondasMáximasDeMutación, fnCrearCampo,
númeroEsperadoDeInstrucciones)
def test_corta_gira_repite(self):
anchura = altura = 8
geneSet = [lambda: Corta(),
lambda: Gira(),
lambda: Repite(random.randint(0, 8),
random.randint(0, 8))]
mínGenes = 3
máxGenes = 20
rondasMáximasDeMutación = 3
númeroEsperadoDeInstrucciones = 9
númeroEsperadoDePasos = 88
def fnCrearCampo():
return CampoToroidal(anchura, altura,
ContenidoDelCampo.Hierba)
self.ejecutar_con(geneSet, anchura, altura, mínGenes, máxGenes,
númeroEsperadoDeInstrucciones,
rondasMáximasDeMutación, fnCrearCampo,
númeroEsperadoDePasos)
def test_corta_gira_salta_func(self):
anchura = altura = 8
geneSet = [lambda: Corta(),
lambda: Gira(),
lambda: Salta(random.randint(0, min(anchura, altura)),
random.randint(0, min(anchura, altura))),
lambda: Func()]
mínGenes = 3
máxGenes = 20
rondasMáximasDeMutación = 3
númeroEsperadoDeInstrucciones = 17
númeroEsperadoDePasos = 64
def fnCrearCampo():
return CampoToroidal(anchura, altura,
ContenidoDelCampo.Hierba)
self.ejecutar_con(geneSet, anchura, altura, mínGenes, máxGenes,
númeroEsperadoDeInstrucciones,
rondasMáximasDeMutación, fnCrearCampo,
númeroEsperadoDePasos)
def test_corta_gira_salta_llama(self):
anchura = altura = 8
geneSet = [lambda: Corta(),
lambda: Gira(),
lambda: Salta(random.randint(0, min(anchura, altura)),
random.randint(0, min(anchura, altura))),
lambda: Func(expectLlama=True),
lambda: Llama(random.randint(0, 5))]
mínGenes = 3
máxGenes = 20
rondasMáximasDeMutación = 3
númeroEsperadoDeInstrucciones = 18
númeroEsperadoDePasos = 65
def fnCrearCampo():
return CampoToroidal(anchura, altura,
ContenidoDelCampo.Hierba)
self.ejecutar_con(geneSet, anchura, altura, mínGenes, máxGenes,
númeroEsperadoDeInstrucciones,
rondasMáximasDeMutación, fnCrearCampo,
númeroEsperadoDePasos)
def ejecutar_con(self, geneSet, anchura, altura, mínGenes, máxGenes,
númeroEsperadoDeInstrucciones, rondasMáximasDeMutación,
fnCrearCampo, númeroEsperadoDePasos):
ubicaciónInicialDelCortador = Ubicación(int(anchura / 2),
int(altura / 2))
direcciónInicialDelCortador = Direcciones.Sur.value
def fnCrear():
return crear(geneSet, 1, altura)
def fnEvaluar(instrucciones):
programa = Programa(instrucciones)
cortadora = Cortadora(ubicaciónInicialDelCortador,
direcciónInicialDelCortador)
campo = fnCrearCampo()
try:
programa.evaluar(cortadora, campo)
except RecursionError:
pass
return campo, cortadora, programa
def fnObtenerAptitud(genes):
return obtener_aptitud(genes, fnEvaluar)
horaInicio = datetime.datetime.now()
def fnMostrar(candidato):
mostrar(candidato, horaInicio, fnEvaluar)
def fnMudar(niño):
mudar(niño, geneSet, mínGenes, máxGenes, fnObtenerAptitud,
rondasMáximasDeMutación)
aptitudÓptima = Aptitud(anchura * altura,
númeroEsperadoDeInstrucciones,
númeroEsperadoDePasos)
mejor = genetic.obtener_mejor(fnObtenerAptitud, None, aptitudÓptima,
None, fnMostrar, fnMudar, fnCrear,
edadMáxima=None, tamañoDePiscina=10,
intercambiar=intercambiar)
self.assertTrue(not aptitudÓptima > mejor.Aptitud)
class Corta:
def __init__(self):
pass
@staticmethod
def ejecutar(cortadora, campo):
cortadora.corta(campo)
def __str__(self):
return "corta"
class Gira:
def __init__(self):
pass
@staticmethod
def ejecutar(cortadora, campo):
cortadora.girar_a_la_izquierda()
def __str__(self):
return "gira"
class Salta:
def __init__(self, adelante, derecha):
self.Adelante = adelante
self.Derecha = derecha
def ejecutar(self, cortadora, campo):
cortadora.salta(campo, self.Adelante, self.Derecha)
def __str__(self):
return "salta({},{})".format(self.Adelante, self.Derecha)
class Repite:
def __init__(self, númeroDeOperaciones, veces):
self.NúmeroDeOperaciones = númeroDeOperaciones
self.Veces = veces
self.Instrucciones = []
def ejecutar(self, cortadora, campo):
for i in range(self.Veces):
for instrucción in self.Instrucciones:
instrucción.ejecutar(cortadora, campo)
def __str__(self):
return "repite({},{})".format(
' '.join(map(str, self.Instrucciones))
if len(self.Instrucciones) > 0
else self.NúmeroDeOperaciones,
self.Veces)
class Func:
def __init__(self, expectLlama=False):
self.Instrucciones = []
self.ExpectLlama = expectLlama
self.Id = None
def ejecutar(self, cortadora, campo):
for instrucción in self.Instrucciones:
instrucción.ejecutar(cortadora, campo)
def __str__(self):
return "func{1}: {0}".format(
' '.join(map(str, self.Instrucciones)),
self.Id if self.Id is not None else '')
class Llama:
def __init__(self, funcId=None):
self.FuncId = funcId
self.Funcs = None
def ejecutar(self, cortadora, campo):
funcId = 0 if self.FuncId is None else self.FuncId
if len(self.Funcs) > funcId:
self.Funcs[funcId].ejecutar(cortadora, campo)
def __str__(self):
return "llama-{}".format(
self.FuncId
if self.FuncId is not None
else 'func')
class Programa:
def __init__(self, genes):
temp = genes[:]
funcs = []
for índice in reversed(range(len(temp))):
if type(temp[índice]) is Repite:
principio = índice + 1
fin = min(índice + temp[índice].NúmeroDeOperaciones + 1,
len(temp))
temp[índice].Instrucciones = temp[principio:fin]
del temp[principio:fin]
continue
if type(temp[índice]) is Llama:
temp[índice].Funcs = funcs
if type(temp[índice]) is Func:
if len(funcs) > 0 and not temp[índice].ExpectLlama:
temp[índice] = Llama()
temp[índice].Funcs = funcs
continue
principio = índice + 1
fin = len(temp)
func = Func()
if temp[índice].ExpectLlama:
func.Id = len(funcs)
func.Instrucciones = [i for i in temp[principio:fin]
if type(i) is not Repite or
type(i) is Repite and len(
i.Instrucciones) > 0]
funcs.append(func)
del temp[índice:fin]
for func in funcs:
for índice in reversed(range(len(func.Instrucciones))):
if type(func.Instrucciones[índice]) is Llama:
func_id = func.Instrucciones[índice].FuncId
if func_id is None:
continue
if func_id >= len(funcs) or \
len(funcs[func_id].Instrucciones) == 0:
del func.Instrucciones[índice]
for índice in reversed(range(len(temp))):
if type(temp[índice]) is Llama:
func_id = temp[índice].FuncId
if func_id is None:
continue
if func_id >= len(funcs) or \
len(funcs[func_id].Instrucciones) == 0:
del temp[índice]
self.Principal = temp
self.Funcs = funcs
def evaluar(self, cortadora, campo):
for i, instrucción in enumerate(self.Principal):
instrucción.ejecutar(cortadora, campo)
def print(self):
if self.Funcs is not None:
for func in self.Funcs:
if func.Id is not None and len(func.Instrucciones) == 0:
continue
print(func)
print(' '.join(map(str, self.Principal)))
class Aptitud:
def __init__(self, totalCortada, instruccionesTotales, cuentaDePasos):
self.TotalCortada = totalCortada
self.InstruccionesTotales = instruccionesTotales
self.CuentaDePasos = cuentaDePasos
def __gt__(self, otro):
if self.TotalCortada != otro.TotalCortada:
return self.TotalCortada > otro.TotalCortada
if self.CuentaDePasos != otro.CuentaDePasos:
return self.CuentaDePasos < otro.CuentaDePasos
return self.InstruccionesTotales < otro.InstruccionesTotales
def __str__(self):
return "{} segados con {} instrucciones y {} pasos".format(
self.TotalCortada, self.InstruccionesTotales, self.CuentaDePasos)
if __name__ == '__main__':
unittest.main()
| 35.138498
| 79
| 0.573452
|
4a009c6be2386eca896e1c67f07b5c72e5633a86
| 6,382
|
py
|
Python
|
userbot/plugins/alive.py
|
pro-boy/Marshmello
|
4cf6d96b69a7e0617ba5ced96eb5ee557b318b4c
|
[
"MIT"
] | 2
|
2020-12-06T03:46:08.000Z
|
2022-02-19T20:34:52.000Z
|
userbot/plugins/alive.py
|
pro-boy/Marshmello
|
4cf6d96b69a7e0617ba5ced96eb5ee557b318b4c
|
[
"MIT"
] | 4
|
2020-11-07T07:39:51.000Z
|
2020-11-10T03:46:41.000Z
|
userbot/plugins/alive.py
|
pro-boy/Marshmello
|
4cf6d96b69a7e0617ba5ced96eb5ee557b318b4c
|
[
"MIT"
] | 9
|
2020-11-28T11:30:44.000Z
|
2021-06-01T07:11:57.000Z
|
# Thanks to Sipak bro and Aryan..
# animation Idea by @(Sipakisking) && @Hell boy_pikachu
# Made by @hellboi_atul ....and thanks to @Crackexy for the logos...
# Kang with credits else gay...
import asyncio
import os
import requests
import time
from PIL import Image
from io import BytesIO
from datetime import datetime
import random
from telethon import events
from userbot.utils import admin_cmd, sudo_cmd
from userbot import ALIVE_NAME
from telethon.tl.types import ChannelParticipantsAdmins
# 🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔🤔
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "DARK COBRA"
ALIVE_PHOTTO = os.environ.get("ALIVE_PHOTTO" , None)
# Thanks to Sipak bro and Raganork..
# animation Idea by @NOOB_GUY_OP (Sipakisking)
# Made by @hellboi_atul ....and thanks to @Crackexy for the logos...
# Kang with credits else gay...
# alive.py for DC(DARK COBRA)
global ghanti
ghanti = borg.uid
edit_time = 5
""" =======================CONSTANTS====================== """
file1 = "https://telegra.ph/file/419a921708a9592578665.mp4"
file2 = "https://telegra.ph/file/419a921708a9592578665.mp4"
file3 = "https://telegra.ph/file/419a921708a9592578665.mp4"
file4 = "https://telegra.ph/file/419a921708a9592578665.mp4"
""" =======================CONSTANTS====================== """
pm_caption = " ᴍᴀʀsʜᴍᴇʟʟᴏ 🤟🤟 IS օռʟɨռɛ..!! **🔥🔥\n\n"
pm_caption += "⚔️⚔️ **master, Am Alive And Systems Are Working Perfectly As It Should Be...**⚔️⚔️\n\n"
pm_caption += "༆༄☠︎︎About My System \n\n"
pm_caption += "🔥🔥 **ᴛᴇʟᴇᴛʜᴏɴ**🔥🔥 >>》 15.0.0\n"
pm_caption += "🚨🚨 **group**🚨🚨 >>》 [ʝօɨռ](https://t.me/Marshmellobot_official)\n"
pm_caption += f"🔰🔰**ᴍᴀsᴛᴇʀ**🔰🔰 >>》 {DEFAULTUSER}\n"
pm_caption += "🌏🌏 **ᴄʀᴇᴀᴛᴏʀ**🌏🌏 >>》 [ᴏᴡɴᴇʀ](https://t.me/beast_boy_shubu)\n\n"
pm_caption += "🔶🔶 **ᴄʀᴇᴅɪᴛs**🔶🔶 >>》 [ᴛᴇᴀᴍ-ᴄᴏʙʀᴀ](https://t.me/dark_cobra_support)\n\n"
pm_caption += "[....▄███▄███▄\n....█████████\n.......▀█████▀\n...............▀█▀\n](https://t.me/itsproplugins)\n\n"
@borg.on(admin_cmd(pattern=r"mello"))
@borg.on(sudo_cmd(pattern=r"sudo", allow_sudo=True))
async def amireallyalive(yes):
chat = await yes.get_chat()
global ghanti
ghanti = borg.uid
on = await borg.send_file(yes.chat_id, file=file1,caption=pm_caption)
await asyncio.sleep(edit_time)
ok = await borg.edit_message(yes.chat_id, on, file=file2)
await asyncio.sleep(edit_time)
ok2 = await borg.edit_message(yes.chat_id, ok, file=file3)
await asyncio.sleep(edit_time)
ok3 = await borg.edit_message(yes.chat_id, ok2, file=file1)
await asyncio.sleep(edit_time)
ok4 = await borg.edit_message(yes.chat_id, ok3, file=file3)
await asyncio.sleep(edit_time)
ok5 = await borg.edit_message(yes.chat_id, ok4, file=file2)
await asyncio.sleep(edit_time)
ok6 = await borg.edit_message(yes.chat_id, ok5, file=file1)
await asyncio.sleep(edit_time)
ok7 = await borg.edit_message(yes.chat_id, ok6, file=file4)
await alive.delete()
""" For .alive command, check if the bot is running. """
await borg.send_file(alive.chat_id, PM_IMG,caption=pm_caption)
await alive.delete()
def get_readable_time(seconds: int) -> str:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
@borg.on(admin_cmd(outgoing=True, pattern="salive"))
@borg.on(sudo_cmd(pattern=r"salive", allow_sudo=True))
async def amireallyalive(alive):
""" For .alive command, check if the bot is running. """
if ALIVE_PHOTTO:
pm_caption = "**Marshmello 𝙸𝚂 🅾︎🅽🅻🅸🅽🅴**\n"
pm_caption += f"**𝕄𝕪 𝔹𝕠𝕤𝕤** : {DEFAULTUSER}\n"
pm_caption += "𝚃𝙴𝙻𝙴𝚃𝙷𝙾𝙽 𝚅𝙴𝚁𝚂𝙸𝙾𝙽 : 1.17.5\n"
pm_caption += "𝙿𝚈𝚃𝙷𝙾𝙽 𝚅𝙴𝚁𝚂𝙸𝙾𝙽 : 3.9.0\n"
pm_caption += "Creator : [BOSS-DJ](https://t.me/Beast_boy_shubu)\n"
pm_caption += "Bot Status : Working perfectly\n"
pm_caption += "𝘓𝘐𝘚𝘌𝘕𝘊𝘌 : [AGPL-3.0 ʟɪᴄᴇɴꜱᴇ](https://jenaatul8.wixsite.com/Beast_boy_shubu)\n"
pm_caption += "𝘾𝙊𝙋𝙔𝙍𝙄𝙂𝙃𝙏 𝘽𝙔 : [ MARSHMELLO ](https://t.me/Marshmello_op)\n"
pm_caption += "[┏┓━┏┓━━━━┏┓━┏┓━━━━━\n ┃┃━┃┃━━━━┃┃━┃┃━━━━━\n ┃┗━┛┃┏━━┓┃┃━┃┃━┏━━┓\n ┃┏━┓┃┃┏┓┃┃┃━┃┃━┃┏┓┃ \n ┃┃━┃┃┃┃━┫┃┗┓┃┗┓┃┗┛┃ \n ┗┛━┗┛┗━━┛┗━┛┗━┛┗━━┛](https://t.me/Cyber_legendss)"
chat = await alive.get_chat()
await alive.delete()
""" For .allive command, check if the bot is running. """
await borg.send_file(alive.chat_id, ALIVE_PHOTTO,caption=pm_caption, link_preview = False)
await allive.delete()
return
req = requests.get("https://telegra.ph/file/d39ef0f5a3d7d684f2e33.png")
req.raise_for_status()
file = BytesIO(req.content)
file.seek(0)
img = Image.open(file)
with BytesIO() as sticker:
img.save(sticker, "webp")
sticker.name = "sticker.webp"
sticker.seek(0)
await borg.send_file(alive.chat_id, file=sticker)
await borg.send_message(alive.chat_id,"**Marshmello 𝙸𝚂 🅾︎🅽🅻🅸🅽🅴**\n"
f"**𝕄𝕪 𝔹𝕠𝕤𝕤** : {DEFAULTUSER}\n"
"𝚃𝙴𝙻𝙴𝚃𝙷𝙾𝙽 𝚅𝙴𝚁𝚂𝙸𝙾𝙽 : 1.17.5\n"
"𝙿𝚈𝚃𝙷𝙾𝙽 𝚅𝙴𝚁𝚂𝙸𝙾𝙽 : 3.9.0\n"
"𝚂𝚄𝙿𝙿𝙾𝚁𝚃 𝙲𝙷𝙰𝙽𝙽𝙴𝙻 : [ᴊᴏɪɴ](https://t.me/PROBOY_GFX)\n"
"𝚂𝚄𝙿𝙿𝙾𝚁𝚃 𝙶𝚁𝙾𝚄𝙿 : [ᴊᴏɪɴ](https://t.me/CYBER_LEGENDSS)\n"
"𝘓𝘐𝘚𝘌𝘕𝘊𝘌 : [AGPL-3.0 ʟɪᴄᴇɴꜱᴇ](https://jenaatul8.wixsite.com/BEAST_BOY_SHUBU)\n"
"𝘾𝙊𝙋𝙔𝙍𝙄𝙂𝙃𝙏 𝘽𝙔 : [ DARK-COBRA ](https://t.me/DARK-COBRA)\n"
"[ ┏┓━┏┓━━━━┏┓━┏┓━━━━━\n ┃┃━┃┃━━━━┃┃━┃┃━━━━━\n ┃┗━┛┃┏━━┓┃┃━┃┃━┏━━┓\n ┃┏━┓┃┃┏┓┃┃┃━┃┃━┃┏┓┃ \n ┃┃━┃┃┃┃━┫┃┗┓┃┗┓┃┗┛┃ \n ┗┛━┗┛┗━━┛┗━┛┗━┛┗━━┛](https://t.me/CYBER_LEGENDSS)" , link_preview = False)
await alive.delete()
| 42.546667
| 222
| 0.580852
|
4a009c9a28a9369d2f11ca59a57e2a0a7984695e
| 706
|
py
|
Python
|
app/models/owner.py
|
ashrafmahmood/oscarine-api
|
775f3ad5a3d3c50e36e228ee348ef40f075e95ec
|
[
"MIT"
] | null | null | null |
app/models/owner.py
|
ashrafmahmood/oscarine-api
|
775f3ad5a3d3c50e36e228ee348ef40f075e95ec
|
[
"MIT"
] | 96
|
2021-05-12T11:05:37.000Z
|
2022-03-15T02:20:26.000Z
|
app/models/owner.py
|
ashrafmahmood/oscarine-api
|
775f3ad5a3d3c50e36e228ee348ef40f075e95ec
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import Optional
from pydantic import AnyUrl, BaseModel, EmailStr, StrictBool
class OwnerCreate(BaseModel):
email: EmailStr
password: str
class OwnerDetails(BaseModel):
state: Optional[str] = None
city: Optional[str] = None
phone_number: Optional[str] = None
name: Optional[str] = None
email: EmailStr
last_seen: datetime
id: int
avatar_image: Optional[AnyUrl] = None
email_verified: bool
class Config:
orm_mode = True
class OwnerUpdate(BaseModel):
email: EmailStr = None
name: str = None
phone_number: str = None
avatar_image: AnyUrl = None
city: str = None
state: str = None
| 20.764706
| 60
| 0.685552
|
4a009cd7aeeda27506aea9ebbaac75e8bd3cff78
| 1,694
|
py
|
Python
|
raysect/optical/material/modifiers/__init__.py
|
raysect/source
|
11f03089d0379fc7fb4d23c6f60c3d255673cec9
|
[
"BSD-3-Clause"
] | 71
|
2015-10-25T16:50:18.000Z
|
2022-03-02T03:46:19.000Z
|
raysect/optical/material/modifiers/__init__.py
|
raysect/source
|
11f03089d0379fc7fb4d23c6f60c3d255673cec9
|
[
"BSD-3-Clause"
] | 336
|
2015-02-11T22:39:54.000Z
|
2022-02-22T18:42:32.000Z
|
raysect/optical/material/modifiers/__init__.py
|
raysect/source
|
11f03089d0379fc7fb4d23c6f60c3d255673cec9
|
[
"BSD-3-Clause"
] | 24
|
2016-09-11T17:12:10.000Z
|
2022-02-24T22:57:09.000Z
|
# Copyright (c) 2014-2021, Dr Alex Meakins, Raysect Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Raysect Project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .roughen import *
from .transform import *
from .blend import *
from .add import *
| 49.823529
| 79
| 0.764463
|
4a009d71e42e7ee2b66baf0176b45022cd9e2aa6
| 259
|
py
|
Python
|
produto.py
|
paulo-caixeta/00_CURSO_PYTHON
|
03097c7ed625796560a79c01511b8990f37efa6a
|
[
"MIT"
] | null | null | null |
produto.py
|
paulo-caixeta/00_CURSO_PYTHON
|
03097c7ed625796560a79c01511b8990f37efa6a
|
[
"MIT"
] | null | null | null |
produto.py
|
paulo-caixeta/00_CURSO_PYTHON
|
03097c7ed625796560a79c01511b8990f37efa6a
|
[
"MIT"
] | null | null | null |
tamanho = int(input("Digite o tamanho da sequência de números: "))
produto = 1
i=0
while i < tamanho:
valor = float(input("Digite um valor a ser multiplicado: "))
produto = produto * valor
i = i + 1
print ("O produto dos valores digitados é:", produto)
| 23.545455
| 67
| 0.687259
|
4a009dcf2b9f181b5ad4495c0fc98bfe40a76523
| 2,983
|
py
|
Python
|
huaweicloud-sdk-mpc/huaweicloudsdkmpc/v1/model/create_reset_tracks_task_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-mpc/huaweicloudsdkmpc/v1/model/create_reset_tracks_task_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-mpc/huaweicloudsdkmpc/v1/model/create_reset_tracks_task_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class CreateResetTracksTaskResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'task_id': 'str'
}
attribute_map = {
'task_id': 'task_id'
}
def __init__(self, task_id=None):
"""CreateResetTracksTaskResponse - a model defined in huaweicloud sdk"""
super(CreateResetTracksTaskResponse, self).__init__()
self._task_id = None
self.discriminator = None
if task_id is not None:
self.task_id = task_id
@property
def task_id(self):
"""Gets the task_id of this CreateResetTracksTaskResponse.
任务ID。 如果返回值为200 OK,为接受任务后产生的任务ID。
:return: The task_id of this CreateResetTracksTaskResponse.
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this CreateResetTracksTaskResponse.
任务ID。 如果返回值为200 OK,为接受任务后产生的任务ID。
:param task_id: The task_id of this CreateResetTracksTaskResponse.
:type: str
"""
self._task_id = task_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateResetTracksTaskResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.873874
| 80
| 0.56051
|
4a009e4486a32815520a275c9bf6b4bbba4da247
| 1,114
|
py
|
Python
|
tests/code/test_code_finder.py
|
sentrip/pygamehack
|
90a8722172d6a45bf7468e306a23d89832a9ea44
|
[
"MIT"
] | 2
|
2021-07-25T16:07:17.000Z
|
2021-11-22T15:33:35.000Z
|
tests/code/test_code_finder.py
|
sentrip/pygamehack
|
90a8722172d6a45bf7468e306a23d89832a9ea44
|
[
"MIT"
] | null | null | null |
tests/code/test_code_finder.py
|
sentrip/pygamehack
|
90a8722172d6a45bf7468e306a23d89832a9ea44
|
[
"MIT"
] | null | null | null |
import pytest
import pygamehack as gh
from pygamehack.code import CodeFinder, CodeFindTarget
@pytest.mark.skip
def test_code_finder(hack, app, set_cleanup):
def cleanup():
hack.write_u32(app.addr.marker + 0x8, 0)
hack.write_u32(app.addr.marker + 0xC, 0)
set_cleanup(cleanup)
class Player(gh.Struct):
name: gh.str[8] = 0x0
pos_x: gh.uint = 0x8
pos_y: gh.uint = 0xC
class Game(gh.Struct):
players: gh.ptr[gh.arr[Player, 4]] = 0x10
class TestProgram(gh.Struct):
marker: gh.uint = 0x0
flag: gh.uint = 0x8
update: gh.uint = 0xC
game: gh.ptr[Game] = 0x4C
hack.attach(app.pid)
t = TestProgram(gh.Address(hack, app.addr.marker))
print(t.game.players[0].pos_x)
# getattr(t, 'update')
# finder = CodeFinder()
# finder.add_target('update', CodeFindTarget(t.variables['update'].address,
# watch_trigger=lambda h, a: h.write_u32(a.value - 4, 1)))
# results = finder.find(hack)
#
# for name, code in results:
# print(name, code)
| 27.85
| 105
| 0.598743
|
4a009f0833f6488509ac6d5c3f7386b3070939cb
| 218,958
|
py
|
Python
|
corporate/tests/test_stripe.py
|
yuroitaki/zulip
|
95303a9929424b55a1f7c7cce9313c4619a9533b
|
[
"Apache-2.0"
] | 1
|
2022-01-26T14:45:16.000Z
|
2022-01-26T14:45:16.000Z
|
corporate/tests/test_stripe.py
|
jai2201/zulip
|
95303a9929424b55a1f7c7cce9313c4619a9533b
|
[
"Apache-2.0"
] | null | null | null |
corporate/tests/test_stripe.py
|
jai2201/zulip
|
95303a9929424b55a1f7c7cce9313c4619a9533b
|
[
"Apache-2.0"
] | null | null | null |
import json
import operator
import os
import random
import re
import sys
import uuid
from dataclasses import dataclass
from datetime import datetime, timedelta, timezone
from decimal import Decimal
from functools import wraps
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
)
from unittest.mock import Mock, patch
import orjson
import responses
import stripe
import stripe.util
from django.conf import settings
from django.core import signing
from django.urls.resolvers import get_resolver
from django.utils.crypto import get_random_string
from django.utils.timezone import now as timezone_now
from typing_extensions import ParamSpec
from corporate.lib.stripe import (
DEFAULT_INVOICE_DAYS_UNTIL_DUE,
MAX_INVOICED_LICENSES,
MIN_INVOICED_LICENSES,
STRIPE_API_VERSION,
BillingError,
InvalidBillingSchedule,
InvalidTier,
StripeCardError,
add_months,
approve_sponsorship,
attach_discount_to_realm,
catch_stripe_errors,
compute_plan_parameters,
customer_has_credit_card_as_default_payment_method,
do_change_remote_server_plan_type,
do_create_stripe_customer,
do_deactivate_remote_server,
downgrade_small_realms_behind_on_payments_as_needed,
get_discount_for_realm,
get_latest_seat_count,
get_plan_renewal_or_end_date,
get_price_per_license,
get_realms_to_default_discount_dict,
invoice_plan,
invoice_plans_as_needed,
is_free_trial_offer_enabled,
is_realm_on_free_trial,
is_sponsored_realm,
make_end_of_cycle_updates_if_needed,
next_month,
process_initial_upgrade,
sign_string,
stripe_customer_has_credit_card_as_default_payment_method,
stripe_get_customer,
switch_realm_from_standard_to_plus_plan,
unsign_string,
update_billing_method_of_current_plan,
update_license_ledger_for_automanaged_plan,
update_license_ledger_for_manual_plan,
update_license_ledger_if_needed,
update_or_create_stripe_customer,
update_sponsorship_status,
void_all_open_invoices,
)
from corporate.models import (
Customer,
CustomerPlan,
Event,
LicenseLedger,
PaymentIntent,
Session,
ZulipSponsorshipRequest,
get_current_plan_by_customer,
get_current_plan_by_realm,
get_customer_by_realm,
)
from zerver.actions.create_realm import do_create_realm
from zerver.actions.create_user import (
do_activate_mirror_dummy_user,
do_create_user,
do_reactivate_user,
)
from zerver.actions.realm_settings import do_deactivate_realm, do_reactivate_realm
from zerver.actions.users import do_deactivate_user
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.utils import assert_is_not_none
from zerver.models import (
Message,
Realm,
RealmAuditLog,
Recipient,
UserProfile,
get_realm,
get_system_bot,
)
from zilencer.models import RemoteZulipServer, RemoteZulipServerAuditLog
if TYPE_CHECKING:
from django.test.client import _MonkeyPatchedWSGIResponse as TestHttpResponse
CallableT = TypeVar("CallableT", bound=Callable[..., Any])
ParamT = ParamSpec("ParamT")
ReturnT = TypeVar("ReturnT")
STRIPE_FIXTURES_DIR = "corporate/tests/stripe_fixtures"
def create_payment_method(card_number: str) -> stripe.PaymentMethod:
return stripe.PaymentMethod.create(
type="card",
card={
"number": card_number,
"exp_month": 3,
"exp_year": 2033,
"cvc": "333",
},
)
def stripe_fixture_path(
decorated_function_name: str, mocked_function_name: str, call_count: int
) -> str:
# Make the eventual filename a bit shorter, and also we conventionally
# use test_* for the python test files
if decorated_function_name[:5] == "test_":
decorated_function_name = decorated_function_name[5:]
return f"{STRIPE_FIXTURES_DIR}/{decorated_function_name}--{mocked_function_name[7:]}.{call_count}.json"
def fixture_files_for_function(decorated_function: CallableT) -> List[str]: # nocoverage
decorated_function_name = decorated_function.__name__
if decorated_function_name[:5] == "test_":
decorated_function_name = decorated_function_name[5:]
return sorted(
f"{STRIPE_FIXTURES_DIR}/{f}"
for f in os.listdir(STRIPE_FIXTURES_DIR)
if f.startswith(decorated_function_name + "--")
)
def generate_and_save_stripe_fixture(
decorated_function_name: str, mocked_function_name: str, mocked_function: CallableT
) -> Callable[[Any, Any], Any]: # nocoverage
def _generate_and_save_stripe_fixture(*args: Any, **kwargs: Any) -> Any:
# Note that mock is not the same as mocked_function, even though their
# definitions look the same
mock = operator.attrgetter(mocked_function_name)(sys.modules[__name__])
fixture_path = stripe_fixture_path(
decorated_function_name, mocked_function_name, mock.call_count
)
try:
with responses.RequestsMock() as request_mock:
request_mock.add_passthru("https://api.stripe.com")
# Talk to Stripe
stripe_object = mocked_function(*args, **kwargs)
except stripe.error.StripeError as e:
with open(fixture_path, "w") as f:
error_dict = e.__dict__
error_dict["headers"] = dict(error_dict["headers"])
f.write(
json.dumps(error_dict, indent=2, separators=(",", ": "), sort_keys=True) + "\n"
)
raise e
with open(fixture_path, "w") as f:
if stripe_object is not None:
f.write(str(stripe_object) + "\n")
else:
f.write("{}\n")
return stripe_object
return _generate_and_save_stripe_fixture
def read_stripe_fixture(
decorated_function_name: str, mocked_function_name: str
) -> Callable[[Any, Any], Any]:
def _read_stripe_fixture(*args: Any, **kwargs: Any) -> Any:
mock = operator.attrgetter(mocked_function_name)(sys.modules[__name__])
fixture_path = stripe_fixture_path(
decorated_function_name, mocked_function_name, mock.call_count
)
with open(fixture_path, "rb") as f:
fixture = orjson.loads(f.read())
# Check for StripeError fixtures
if "json_body" in fixture:
requestor = stripe.api_requestor.APIRequestor()
# This function will raise the relevant StripeError according to the fixture
requestor.interpret_response(
fixture["http_body"], fixture["http_status"], fixture["headers"]
)
return stripe.util.convert_to_stripe_object(fixture)
return _read_stripe_fixture
def delete_fixture_data(decorated_function: CallableT) -> None: # nocoverage
for fixture_file in fixture_files_for_function(decorated_function):
os.remove(fixture_file)
def normalize_fixture_data(
decorated_function: CallableT, tested_timestamp_fields: Sequence[str] = []
) -> None: # nocoverage
# stripe ids are all of the form cus_D7OT2jf5YAtZQ2
id_lengths = [
("test", 12),
("cus", 14),
("prod", 14),
("req", 14),
("si", 14),
("sli", 14),
("sub", 14),
("acct", 16),
("card", 24),
("ch", 24),
("ii", 24),
("il", 24),
("in", 24),
("pi", 24),
("price", 24),
("src", 24),
("src_client_secret", 24),
("tok", 24),
("txn", 24),
("invst", 26),
("rcpt", 31),
]
# We'll replace cus_D7OT2jf5YAtZQ2 with something like cus_NORMALIZED0001
pattern_translations = {
f"{prefix}_[A-Za-z0-9]{{{length}}}": f"{prefix}_NORMALIZED%0{length - 10}d"
for prefix, length in id_lengths
}
# We'll replace "invoice_prefix": "A35BC4Q" with something like "invoice_prefix": "NORMA01"
pattern_translations.update(
{
'"invoice_prefix": "([A-Za-z0-9]{7,8})"': "NORMA%02d",
'"fingerprint": "([A-Za-z0-9]{16})"': "NORMALIZED%06d",
'"number": "([A-Za-z0-9]{7,8}-[A-Za-z0-9]{4})"': "NORMALI-%04d",
'"address": "([A-Za-z0-9]{9}-test_[A-Za-z0-9]{12})"': "000000000-test_NORMALIZED%02d",
# Don't use (..) notation, since the matched strings may be small integers that will also match
# elsewhere in the file
'"realm_id": "[0-9]+"': '"realm_id": "%d"',
r'"account_name": "[\w\s]+"': '"account_name": "NORMALIZED-%d"',
}
)
# Normalizing across all timestamps still causes a lot of variance run to run, which is
# why we're doing something a bit more complicated
for i, timestamp_field in enumerate(tested_timestamp_fields):
# Don't use (..) notation, since the matched timestamp can easily appear in other fields
pattern_translations[
f'"{timestamp_field}": 1[5-9][0-9]{{8}}(?![0-9-])'
] = f'"{timestamp_field}": 1{i+1:02}%07d'
normalized_values: Dict[str, Dict[str, str]] = {
pattern: {} for pattern in pattern_translations.keys()
}
for fixture_file in fixture_files_for_function(decorated_function):
with open(fixture_file) as f:
file_content = f.read()
for pattern, translation in pattern_translations.items():
for match in re.findall(pattern, file_content):
if match not in normalized_values[pattern]:
normalized_values[pattern][match] = translation % (
len(normalized_values[pattern]) + 1,
)
file_content = file_content.replace(match, normalized_values[pattern][match])
file_content = re.sub(r'(?<="risk_score": )(\d+)', "0", file_content)
file_content = re.sub(r'(?<="times_redeemed": )(\d+)', "0", file_content)
file_content = re.sub(
r'(?<="idempotency-key": )"([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f-]*)"',
'"00000000-0000-0000-0000-000000000000"',
file_content,
)
# Dates
file_content = re.sub(r'(?<="Date": )"(.* GMT)"', '"NORMALIZED DATETIME"', file_content)
file_content = re.sub(r"[0-3]\d [A-Z][a-z]{2} 20[1-2]\d", "NORMALIZED DATE", file_content)
# IP addresses
file_content = re.sub(r'"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"', '"0.0.0.0"', file_content)
# All timestamps not in tested_timestamp_fields
file_content = re.sub(r": (1[5-9][0-9]{8})(?![0-9-])", ": 1000000000", file_content)
with open(fixture_file, "w") as f:
f.write(file_content)
MOCKED_STRIPE_FUNCTION_NAMES = [
f"stripe.{name}"
for name in [
"checkout.Session.create",
"checkout.Session.list",
"Charge.create",
"Charge.list",
"Coupon.create",
"Customer.create",
"Customer.create_balance_transaction",
"Customer.list_balance_transactions",
"Customer.retrieve",
"Customer.save",
"Customer.list",
"Customer.modify",
"Event.list",
"Invoice.create",
"Invoice.finalize_invoice",
"Invoice.list",
"Invoice.pay",
"Invoice.refresh",
"Invoice.upcoming",
"Invoice.void_invoice",
"InvoiceItem.create",
"InvoiceItem.list",
"PaymentIntent.confirm",
"PaymentIntent.create",
"PaymentIntent.list",
"PaymentIntent.retrieve",
"PaymentMethod.attach",
"PaymentMethod.create",
"PaymentMethod.detach",
"PaymentMethod.list",
"Plan.create",
"Product.create",
"SetupIntent.create",
"SetupIntent.list",
"SetupIntent.retrieve",
"Subscription.create",
"Subscription.delete",
"Subscription.retrieve",
"Subscription.save",
"Token.create",
]
]
def mock_stripe(
tested_timestamp_fields: Sequence[str] = [], generate: bool = settings.GENERATE_STRIPE_FIXTURES
) -> Callable[[Callable[ParamT, ReturnT]], Callable[ParamT, ReturnT]]:
def _mock_stripe(decorated_function: Callable[ParamT, ReturnT]) -> Callable[ParamT, ReturnT]:
generate_fixture = generate
if generate_fixture: # nocoverage
assert stripe.api_key
for mocked_function_name in MOCKED_STRIPE_FUNCTION_NAMES:
mocked_function = operator.attrgetter(mocked_function_name)(sys.modules[__name__])
if generate_fixture:
side_effect = generate_and_save_stripe_fixture(
decorated_function.__name__, mocked_function_name, mocked_function
) # nocoverage
else:
side_effect = read_stripe_fixture(decorated_function.__name__, mocked_function_name)
decorated_function = patch(
mocked_function_name,
side_effect=side_effect,
autospec=mocked_function_name.endswith(".refresh"),
)(decorated_function)
@wraps(decorated_function)
def wrapped(*args: ParamT.args, **kwargs: ParamT.kwargs) -> ReturnT:
if generate_fixture: # nocoverage
delete_fixture_data(decorated_function)
val = decorated_function(*args, **kwargs)
normalize_fixture_data(decorated_function, tested_timestamp_fields)
return val
else:
return decorated_function(*args, **kwargs)
return wrapped
return _mock_stripe
class StripeTestCase(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
realm = get_realm("zulip")
# Explicitly limit our active users to 6 regular users,
# to make seat_count less prone to changes in our test data.
# We also keep a guest user and a bot to make the data
# slightly realistic.
active_emails = [
self.example_email("AARON"),
self.example_email("cordelia"),
self.example_email("hamlet"),
self.example_email("iago"),
self.example_email("othello"),
self.example_email("desdemona"),
self.example_email("polonius"), # guest
self.example_email("default_bot"), # bot
]
# Deactivate all users in our realm that aren't in our whitelist.
for user_profile in UserProfile.objects.filter(realm_id=realm.id).exclude(
delivery_email__in=active_emails
):
do_deactivate_user(user_profile, acting_user=None)
# sanity check our 8 expected users are active
self.assertEqual(
UserProfile.objects.filter(realm=realm, is_active=True).count(),
8,
)
# Make sure we have active users outside our realm (to make
# sure relevant queries restrict on realm).
self.assertEqual(
UserProfile.objects.exclude(realm=realm).filter(is_active=True).count(),
10,
)
# Our seat count excludes our guest user and bot, and
# we want this to be predictable for certain tests with
# arithmetic calculations.
self.assertEqual(get_latest_seat_count(realm), 6)
self.seat_count = 6
self.signed_seat_count, self.salt = sign_string(str(self.seat_count))
# Choosing dates with corresponding timestamps below 1500000000 so that they are
# not caught by our timestamp normalization regex in normalize_fixture_data
self.now = datetime(2012, 1, 2, 3, 4, 5, tzinfo=timezone.utc)
self.next_month = datetime(2012, 2, 2, 3, 4, 5, tzinfo=timezone.utc)
self.next_year = datetime(2013, 1, 2, 3, 4, 5, tzinfo=timezone.utc)
def get_signed_seat_count_from_response(self, response: "TestHttpResponse") -> Optional[str]:
match = re.search(r"name=\"signed_seat_count\" value=\"(.+)\"", response.content.decode())
return match.group(1) if match else None
def get_salt_from_response(self, response: "TestHttpResponse") -> Optional[str]:
match = re.search(r"name=\"salt\" value=\"(\w+)\"", response.content.decode())
return match.group(1) if match else None
def get_test_card_number(
self,
attaches_to_customer: bool,
charge_succeeds: Optional[bool] = None,
card_provider: Optional[str] = None,
) -> str:
if attaches_to_customer:
assert charge_succeeds is not None
if charge_succeeds:
if card_provider == "visa":
return "4242424242424242"
if card_provider == "mastercard":
return "5555555555554444"
raise AssertionError("Unreachable code path")
else:
return "4000000000000341"
else:
return "4000000000000002"
def assert_details_of_valid_session_from_event_status_endpoint(
self, stripe_session_id: str, expected_details: Dict[str, Any]
) -> None:
json_response = self.client_get(
"/json/billing/event/status",
{
"stripe_session_id": stripe_session_id,
},
)
response_dict = self.assert_json_success(json_response)
self.assertEqual(response_dict["session"], expected_details)
def assert_details_of_valid_payment_intent_from_event_status_endpoint(
self,
stripe_payment_intent_id: str,
expected_details: Dict[str, Any],
) -> None:
json_response = self.client_get(
"/json/billing/event/status",
{
"stripe_payment_intent_id": stripe_payment_intent_id,
},
)
response_dict = self.assert_json_success(json_response)
self.assertEqual(response_dict["payment_intent"], expected_details)
def trigger_stripe_checkout_session_completed_webhook(
self,
payment_method: stripe.PaymentMethod,
stripe_session: Optional[stripe.checkout.Session] = None,
) -> None:
[checkout_setup_intent] = stripe.SetupIntent.list(limit=1)
stripe_setup_intent = stripe.SetupIntent.create(
payment_method=payment_method.id,
confirm=True,
payment_method_types=checkout_setup_intent.payment_method_types,
customer=checkout_setup_intent.customer,
metadata=checkout_setup_intent.metadata,
usage=checkout_setup_intent.usage,
)
if stripe_session is None:
[stripe_session] = stripe.checkout.Session.list(limit=1)
stripe_session_dict = stripe_session.to_dict_recursive()
stripe_session_dict["setup_intent"] = stripe_setup_intent.id
event_payload = {
"id": f"evt_{get_random_string(24)}",
"object": "event",
"data": {"object": stripe_session_dict},
"type": "checkout.session.completed",
"api_version": STRIPE_API_VERSION,
}
response = self.client_post(
"/stripe/webhook/", event_payload, content_type="application/json"
)
assert response.status_code == 200
def send_stripe_webhook_event(self, event: stripe.Event) -> None:
response = self.client_post(
"/stripe/webhook/", event.to_dict_recursive(), content_type="application/json"
)
assert response.status_code == 200
def send_stripe_webhook_events(self, most_recent_event: stripe.Event) -> None:
while True:
events_old_to_new = list(reversed(stripe.Event.list(ending_before=most_recent_event)))
if len(events_old_to_new) == 0:
break
for event in events_old_to_new:
self.send_stripe_webhook_event(event)
most_recent_event = events_old_to_new[-1]
def send_last_stripe_webhook_event(self) -> None:
[last_event] = stripe.Event.list(limit=1)
self.send_stripe_webhook_event(last_event)
def upgrade(
self,
invoice: bool = False,
talk_to_stripe: bool = True,
onboarding: bool = False,
realm: Optional[Realm] = None,
payment_method: Optional[stripe.PaymentMethod] = None,
upgrade_page_response: Optional["TestHttpResponse"] = None,
del_args: Sequence[str] = [],
**kwargs: Any,
) -> "TestHttpResponse":
host_args = {}
if realm is not None: # nocoverage: TODO
host_args["HTTP_HOST"] = realm.host
if upgrade_page_response is None:
upgrade_page_response = self.client_get("/upgrade/", {}, **host_args)
params: Dict[str, Any] = {
"schedule": "annual",
"signed_seat_count": self.get_signed_seat_count_from_response(upgrade_page_response),
"salt": self.get_salt_from_response(upgrade_page_response),
}
if invoice: # send_invoice
params.update(
billing_modality="send_invoice",
licenses=kwargs.get("licenses", 123),
)
else: # charge_automatically
params.update(
billing_modality="charge_automatically",
license_management="automatic",
)
if onboarding:
params.update(
onboarding="true",
)
params.update(kwargs)
for key in del_args:
if key in params:
del params[key]
if talk_to_stripe:
[last_event] = stripe.Event.list(limit=1)
upgrade_json_response = self.client_post("/json/billing/upgrade", params, **host_args)
if invoice or not talk_to_stripe:
return upgrade_json_response
expected_session_details = {"status": "created"}
if is_free_trial_offer_enabled():
if onboarding:
expected_session_details["type"] = "free_trial_upgrade_from_onboarding_page"
else:
expected_session_details["type"] = "free_trial_upgrade_from_billing_page"
else:
last_stripe_payment_intent = PaymentIntent.objects.last()
assert last_stripe_payment_intent is not None
expected_session_details["type"] = "upgrade_from_billing_page"
expected_session_details[
"stripe_payment_intent_id"
] = last_stripe_payment_intent.stripe_payment_intent_id
response_dict = self.assert_json_success(upgrade_json_response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"], expected_session_details
)
if payment_method is None:
payment_method = create_payment_method(
self.get_test_card_number(
attaches_to_customer=True, charge_succeeds=True, card_provider="visa"
)
)
self.trigger_stripe_checkout_session_completed_webhook(payment_method)
self.send_stripe_webhook_events(last_event)
return upgrade_json_response
# Upgrade without talking to Stripe
def local_upgrade(
self,
licenses: int,
automanage_licenses: bool,
billing_schedule: int,
charge_automatically: bool,
free_trial: bool,
) -> None:
class StripeMock(Mock):
def __init__(self, depth: int = 1):
super().__init__(spec=stripe.Card)
self.id = "id"
self.created = "1000"
self.last4 = "4242"
def upgrade_func(
licenses: int,
automanage_licenses: bool,
billing_schedule: int,
charge_automatically: bool,
free_trial: bool,
*mock_args: Any,
) -> Any:
return process_initial_upgrade(
self.example_user("hamlet"),
licenses,
automanage_licenses,
billing_schedule,
charge_automatically,
free_trial,
)
for mocked_function_name in MOCKED_STRIPE_FUNCTION_NAMES:
upgrade_func = patch(mocked_function_name, return_value=StripeMock())(upgrade_func)
upgrade_func(
licenses, automanage_licenses, billing_schedule, charge_automatically, free_trial
)
class StripeTest(StripeTestCase):
def test_catch_stripe_errors(self) -> None:
@catch_stripe_errors
def raise_invalid_request_error() -> None:
raise stripe.error.InvalidRequestError("message", "param", "code", json_body={})
with self.assertLogs("corporate.stripe", "ERROR") as error_log:
with self.assertRaises(BillingError) as billing_context:
raise_invalid_request_error()
self.assertEqual("other stripe error", billing_context.exception.error_description)
self.assertEqual(
error_log.output, ["ERROR:corporate.stripe:Stripe error: None None None None"]
)
@catch_stripe_errors
def raise_card_error() -> None:
error_message = "The card number is not a valid credit card number."
json_body = {"error": {"message": error_message}}
raise stripe.error.CardError(
error_message, "number", "invalid_number", json_body=json_body
)
with self.assertLogs("corporate.stripe", "INFO") as info_log:
with self.assertRaises(StripeCardError) as card_context:
raise_card_error()
self.assertIn("not a valid credit card", str(card_context.exception))
self.assertEqual("card error", card_context.exception.error_description)
self.assertEqual(
info_log.output, ["INFO:corporate.stripe:Stripe card error: None None None None"]
)
def test_billing_not_enabled(self) -> None:
iago = self.example_user("iago")
with self.settings(BILLING_ENABLED=False):
self.login_user(iago)
response = self.client_get("/upgrade/", follow=True)
self.assertEqual(response.status_code, 404)
@mock_stripe(tested_timestamp_fields=["created"])
def test_upgrade_by_card(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
response = self.client_get("/upgrade/")
self.assert_in_success_response(["Pay annually"], response)
self.assertNotEqual(user.realm.plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertFalse(Customer.objects.filter(realm=user.realm).exists())
# Click "Make payment" in Stripe Checkout
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
response = self.upgrade()
[payment_intent] = PaymentIntent.objects.all()
assert payment_intent.stripe_payment_intent_id is not None
response_dict = self.assert_json_success(response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "upgrade_from_billing_page",
"status": "completed",
"stripe_payment_intent_id": payment_intent.stripe_payment_intent_id,
"event_handler": {
"status": "succeeded",
},
},
)
self.assert_details_of_valid_payment_intent_from_event_status_endpoint(
payment_intent.stripe_payment_intent_id,
{"status": "succeeded", "event_handler": {"status": "succeeded"}},
)
# Check that we correctly created a Customer object in Stripe
stripe_customer = stripe_get_customer(
assert_is_not_none(Customer.objects.get(realm=user.realm).stripe_customer_id)
)
self.assertTrue(stripe_customer_has_credit_card_as_default_payment_method(stripe_customer))
self.assertEqual(stripe_customer.description, "zulip (Zulip Dev)")
self.assertEqual(stripe_customer.discount, None)
self.assertEqual(stripe_customer.email, user.delivery_email)
metadata_dict = dict(stripe_customer.metadata)
self.assertEqual(metadata_dict["realm_str"], "zulip")
try:
int(metadata_dict["realm_id"])
except ValueError: # nocoverage
raise AssertionError("realm_id is not a number")
# Check Charges in Stripe
[charge] = stripe.Charge.list(customer=stripe_customer.id)
self.assertEqual(charge.amount, 8000 * self.seat_count)
# TODO: fix Decimal
self.assertEqual(
charge.description, f"Upgrade to Zulip Cloud Standard, $80.0 x {self.seat_count}"
)
self.assertEqual(charge.receipt_email, user.delivery_email)
self.assertEqual(charge.statement_descriptor, "Zulip Cloud Standard")
# Check Invoices in Stripe
[invoice] = stripe.Invoice.list(customer=stripe_customer.id)
self.assertIsNotNone(invoice.status_transitions.finalized_at)
invoice_params = {
# auto_advance is False because the invoice has been paid
"amount_due": 0,
"amount_paid": 0,
"auto_advance": False,
"collection_method": "charge_automatically",
"charge": None,
"status": "paid",
"total": 0,
}
for key, value in invoice_params.items():
self.assertEqual(invoice.get(key), value)
# Check Line Items on Stripe Invoice
[item0, item1] = invoice.lines
line_item_params = {
"amount": 8000 * self.seat_count,
"description": "Zulip Cloud Standard",
"discountable": False,
"period": {
"end": datetime_to_timestamp(self.next_year),
"start": datetime_to_timestamp(self.now),
},
# There's no unit_amount on Line Items, probably because it doesn't show up on the
# user-facing invoice. We could pull the Invoice Item instead and test unit_amount there,
# but testing the amount and quantity seems sufficient.
"plan": None,
"proration": False,
"quantity": self.seat_count,
}
for key, value in line_item_params.items():
self.assertEqual(item0.get(key), value)
line_item_params = {
"amount": -8000 * self.seat_count,
"description": "Payment (Card ending in 4242)",
"discountable": False,
"plan": None,
"proration": False,
"quantity": 1,
}
for key, value in line_item_params.items():
self.assertEqual(item1.get(key), value)
# Check that we correctly populated Customer, CustomerPlan, and LicenseLedger in Zulip
customer = Customer.objects.get(stripe_customer_id=stripe_customer.id, realm=user.realm)
plan = CustomerPlan.objects.get(
customer=customer,
automanage_licenses=True,
price_per_license=8000,
fixed_price=None,
discount=None,
billing_cycle_anchor=self.now,
billing_schedule=CustomerPlan.ANNUAL,
invoiced_through=LicenseLedger.objects.first(),
next_invoice_date=self.next_month,
tier=CustomerPlan.STANDARD,
status=CustomerPlan.ACTIVE,
)
LicenseLedger.objects.get(
plan=plan,
is_renewal=True,
event_time=self.now,
licenses=self.seat_count,
licenses_at_next_renewal=self.seat_count,
)
# Check RealmAuditLog
audit_log_entries = list(
RealmAuditLog.objects.filter(acting_user=user)
.values_list("event_type", "event_time")
.order_by("id")
)
self.assertEqual(
audit_log_entries[:3],
[
(
RealmAuditLog.STRIPE_CUSTOMER_CREATED,
timestamp_to_datetime(stripe_customer.created),
),
(RealmAuditLog.STRIPE_CARD_CHANGED, self.now),
(RealmAuditLog.CUSTOMER_PLAN_CREATED, self.now),
],
)
self.assertEqual(audit_log_entries[3][0], RealmAuditLog.REALM_PLAN_TYPE_CHANGED)
self.assertEqual(
orjson.loads(
assert_is_not_none(
RealmAuditLog.objects.filter(event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED)
.values_list("extra_data", flat=True)
.first()
)
)["automanage_licenses"],
True,
)
# Check that we correctly updated Realm
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
# Check that we can no longer access /upgrade
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual("/billing/", response["Location"])
# Check /billing has the correct information
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_get("/billing/")
self.assert_not_in_success_response(["Pay annually"], response)
for substring in [
"Zulip Cloud Standard",
str(self.seat_count),
"You are using",
f"{self.seat_count} of {self.seat_count} licenses",
"Licenses are automatically managed by Zulip; when you add",
"Your plan will renew on",
"January 2, 2013",
f"${80 * self.seat_count}.00",
f"Billing email: <strong>{user.delivery_email}</strong>",
"visa ending in 4242",
"Update card",
]:
self.assert_in_response(substring, response)
self.assert_not_in_success_response(
[
"You can only increase the number of licenses.",
"Number of licenses",
"Licenses in next renewal",
],
response,
)
@mock_stripe(tested_timestamp_fields=["created"])
def test_upgrade_by_invoice(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
# Click "Make payment" in Stripe Checkout
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.upgrade(invoice=True)
# Check that we correctly created a Customer in Stripe
stripe_customer = stripe_get_customer(
assert_is_not_none(Customer.objects.get(realm=user.realm).stripe_customer_id)
)
self.assertFalse(stripe_customer_has_credit_card_as_default_payment_method(stripe_customer))
# It can take a second for Stripe to attach the source to the customer, and in
# particular it may not be attached at the time stripe_get_customer is called above,
# causing test flakes.
# So commenting the next line out, but leaving it here so future readers know what
# is supposed to happen here
# self.assertEqual(stripe_customer.default_source.type, 'ach_credit_transfer')
# Check Charges in Stripe
self.assertFalse(stripe.Charge.list(customer=stripe_customer.id))
# Check Invoices in Stripe
[invoice] = stripe.Invoice.list(customer=stripe_customer.id)
self.assertIsNotNone(invoice.due_date)
self.assertIsNotNone(invoice.status_transitions.finalized_at)
invoice_params = {
"amount_due": 8000 * 123,
"amount_paid": 0,
"attempt_count": 0,
"auto_advance": True,
"collection_method": "send_invoice",
"statement_descriptor": "Zulip Cloud Standard",
"status": "open",
"total": 8000 * 123,
}
for key, value in invoice_params.items():
self.assertEqual(invoice.get(key), value)
# Check Line Items on Stripe Invoice
[item] = invoice.lines
line_item_params = {
"amount": 8000 * 123,
"description": "Zulip Cloud Standard",
"discountable": False,
"period": {
"end": datetime_to_timestamp(self.next_year),
"start": datetime_to_timestamp(self.now),
},
"plan": None,
"proration": False,
"quantity": 123,
}
for key, value in line_item_params.items():
self.assertEqual(item.get(key), value)
# Check that we correctly populated Customer, CustomerPlan and LicenseLedger in Zulip
customer = Customer.objects.get(stripe_customer_id=stripe_customer.id, realm=user.realm)
plan = CustomerPlan.objects.get(
customer=customer,
automanage_licenses=False,
charge_automatically=False,
price_per_license=8000,
fixed_price=None,
discount=None,
billing_cycle_anchor=self.now,
billing_schedule=CustomerPlan.ANNUAL,
invoiced_through=LicenseLedger.objects.first(),
next_invoice_date=self.next_year,
tier=CustomerPlan.STANDARD,
status=CustomerPlan.ACTIVE,
)
LicenseLedger.objects.get(
plan=plan,
is_renewal=True,
event_time=self.now,
licenses=123,
licenses_at_next_renewal=123,
)
# Check RealmAuditLog
audit_log_entries = list(
RealmAuditLog.objects.filter(acting_user=user)
.values_list("event_type", "event_time")
.order_by("id")
)
self.assertEqual(
audit_log_entries[:2],
[
(
RealmAuditLog.STRIPE_CUSTOMER_CREATED,
timestamp_to_datetime(stripe_customer.created),
),
(RealmAuditLog.CUSTOMER_PLAN_CREATED, self.now),
],
)
self.assertEqual(audit_log_entries[2][0], RealmAuditLog.REALM_PLAN_TYPE_CHANGED)
self.assertEqual(
orjson.loads(
assert_is_not_none(
RealmAuditLog.objects.filter(event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED)
.values_list("extra_data", flat=True)
.first()
)
)["automanage_licenses"],
False,
)
# Check that we correctly updated Realm
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
# Check that we can no longer access /upgrade
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual("/billing/", response["Location"])
# Check /billing has the correct information
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_get("/billing/")
self.assert_not_in_success_response(["Pay annually", "Update card"], response)
for substring in [
"Zulip Cloud Standard",
str(123),
"You are using",
f"{self.seat_count} of {123} licenses",
"Licenses are manually managed. You will not be able to add ",
"Your plan will renew on",
"January 2, 2013",
"$9,840.00", # 9840 = 80 * 123
f"Billing email: <strong>{user.delivery_email}</strong>",
"Billed by invoice",
"You can only increase the number of licenses.",
"Number of licenses",
"Licenses in next renewal",
]:
self.assert_in_response(substring, response)
@mock_stripe(tested_timestamp_fields=["created"])
def test_free_trial_upgrade_by_card(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with self.settings(FREE_TRIAL_DAYS=60):
response = self.client_get("/upgrade/")
free_trial_end_date = self.now + timedelta(days=60)
self.assert_in_success_response(["Pay annually", "Free Trial", "60 day"], response)
self.assertNotEqual(user.realm.plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertFalse(Customer.objects.filter(realm=user.realm).exists())
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
response = self.upgrade()
self.assertEqual(PaymentIntent.objects.count(), 0)
response_dict = self.assert_json_success(response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "free_trial_upgrade_from_billing_page",
"status": "completed",
"event_handler": {"status": "succeeded"},
},
)
stripe_customer = stripe_get_customer(
assert_is_not_none(Customer.objects.get(realm=user.realm).stripe_customer_id)
)
self.assertTrue(
stripe_customer_has_credit_card_as_default_payment_method(stripe_customer)
)
self.assertEqual(stripe_customer.description, "zulip (Zulip Dev)")
self.assertEqual(stripe_customer.discount, None)
self.assertEqual(stripe_customer.email, user.delivery_email)
metadata_dict = dict(stripe_customer.metadata)
self.assertEqual(metadata_dict["realm_str"], "zulip")
try:
int(metadata_dict["realm_id"])
except ValueError: # nocoverage
raise AssertionError("realm_id is not a number")
self.assertFalse(stripe.Charge.list(customer=stripe_customer.id))
self.assertFalse(stripe.Invoice.list(customer=stripe_customer.id))
customer = Customer.objects.get(stripe_customer_id=stripe_customer.id, realm=user.realm)
plan = CustomerPlan.objects.get(
customer=customer,
automanage_licenses=True,
price_per_license=8000,
fixed_price=None,
discount=None,
billing_cycle_anchor=self.now,
billing_schedule=CustomerPlan.ANNUAL,
invoiced_through=LicenseLedger.objects.first(),
next_invoice_date=free_trial_end_date,
tier=CustomerPlan.STANDARD,
status=CustomerPlan.FREE_TRIAL,
)
LicenseLedger.objects.get(
plan=plan,
is_renewal=True,
event_time=self.now,
licenses=self.seat_count,
licenses_at_next_renewal=self.seat_count,
)
audit_log_entries = list(
RealmAuditLog.objects.filter(acting_user=user)
.values_list("event_type", "event_time")
.order_by("id")
)
self.assertEqual(
audit_log_entries[:3],
[
(
RealmAuditLog.STRIPE_CUSTOMER_CREATED,
timestamp_to_datetime(stripe_customer.created),
),
(
RealmAuditLog.STRIPE_CARD_CHANGED,
self.now,
),
(RealmAuditLog.CUSTOMER_PLAN_CREATED, self.now),
],
)
self.assertEqual(audit_log_entries[3][0], RealmAuditLog.REALM_PLAN_TYPE_CHANGED)
self.assertEqual(
orjson.loads(
assert_is_not_none(
RealmAuditLog.objects.filter(event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED)
.values_list("extra_data", flat=True)
.first()
)
)["automanage_licenses"],
True,
)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_get("/billing/")
self.assert_not_in_success_response(["Pay annually"], response)
for substring in [
"Zulip Cloud Standard",
"Free Trial",
str(self.seat_count),
"You are using",
f"{self.seat_count} of {self.seat_count} licenses",
"Your plan will be upgraded to",
"March 2, 2012",
f"${80 * self.seat_count}.00",
f"Billing email: <strong>{user.delivery_email}</strong>",
"visa ending in 4242",
"Update card",
]:
self.assert_in_response(substring, response)
self.assert_not_in_success_response(["Go to your Zulip organization"], response)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_get("/billing/", {"onboarding": "true"})
self.assert_in_success_response(["Go to your Zulip organization"], response)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=12):
update_license_ledger_if_needed(realm, self.now)
self.assertEqual(
LicenseLedger.objects.order_by("-id")
.values_list("licenses", "licenses_at_next_renewal")
.first(),
(12, 12),
)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=15):
update_license_ledger_if_needed(realm, self.next_month)
self.assertEqual(
LicenseLedger.objects.order_by("-id")
.values_list("licenses", "licenses_at_next_renewal")
.first(),
(15, 15),
)
invoice_plans_as_needed(self.next_month)
self.assertFalse(stripe.Invoice.list(customer=stripe_customer.id))
customer_plan = CustomerPlan.objects.get(customer=customer)
self.assertEqual(customer_plan.status, CustomerPlan.FREE_TRIAL)
self.assertEqual(customer_plan.next_invoice_date, free_trial_end_date)
invoice_plans_as_needed(free_trial_end_date)
customer_plan.refresh_from_db()
realm.refresh_from_db()
self.assertEqual(customer_plan.status, CustomerPlan.ACTIVE)
self.assertEqual(customer_plan.next_invoice_date, add_months(free_trial_end_date, 1))
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
[invoice] = stripe.Invoice.list(customer=stripe_customer.id)
invoice_params = {
"amount_due": 15 * 80 * 100,
"amount_paid": 0,
"amount_remaining": 15 * 80 * 100,
"auto_advance": True,
"collection_method": "charge_automatically",
"customer_email": self.example_email("hamlet"),
"discount": None,
"paid": False,
"status": "open",
"total": 15 * 80 * 100,
}
for key, value in invoice_params.items():
self.assertEqual(invoice.get(key), value)
[invoice_item] = invoice.get("lines")
invoice_item_params = {
"amount": 15 * 80 * 100,
"description": "Zulip Cloud Standard - renewal",
"plan": None,
"quantity": 15,
"subscription": None,
"discountable": False,
"period": {
"start": datetime_to_timestamp(free_trial_end_date),
"end": datetime_to_timestamp(add_months(free_trial_end_date, 12)),
},
}
for key, value in invoice_item_params.items():
self.assertEqual(invoice_item[key], value)
invoice_plans_as_needed(add_months(free_trial_end_date, 1))
[invoice] = stripe.Invoice.list(customer=stripe_customer.id)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=19):
update_license_ledger_if_needed(realm, add_months(free_trial_end_date, 10))
self.assertEqual(
LicenseLedger.objects.order_by("-id")
.values_list("licenses", "licenses_at_next_renewal")
.first(),
(19, 19),
)
invoice_plans_as_needed(add_months(free_trial_end_date, 10))
[invoice0, invoice1] = stripe.Invoice.list(customer=stripe_customer.id)
invoice_params = {
"amount_due": 5172,
"auto_advance": True,
"collection_method": "charge_automatically",
"customer_email": "hamlet@zulip.com",
}
[invoice_item] = invoice0.get("lines")
invoice_item_params = {
"amount": 5172,
"description": "Additional license (Jan 2, 2013 - Mar 2, 2013)",
"discountable": False,
"quantity": 4,
"period": {
"start": datetime_to_timestamp(add_months(free_trial_end_date, 10)),
"end": datetime_to_timestamp(add_months(free_trial_end_date, 12)),
},
}
invoice_plans_as_needed(add_months(free_trial_end_date, 12))
[invoice0, invoice1, invoice2] = stripe.Invoice.list(customer=stripe_customer.id)
@mock_stripe(tested_timestamp_fields=["created"])
def test_free_trial_upgrade_by_card_from_onboarding_page(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with self.settings(FREE_TRIAL_DAYS=60):
free_trial_end_date = self.now + timedelta(days=60)
self.assertNotEqual(user.realm.plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertFalse(Customer.objects.filter(realm=user.realm).exists())
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
response = self.upgrade(onboarding=True)
self.assertEqual(PaymentIntent.objects.all().count(), 0)
response_dict = self.assert_json_success(response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "free_trial_upgrade_from_onboarding_page",
"status": "completed",
"event_handler": {"status": "succeeded"},
},
)
stripe_customer = stripe_get_customer(
assert_is_not_none(Customer.objects.get(realm=user.realm).stripe_customer_id)
)
self.assertTrue(
stripe_customer_has_credit_card_as_default_payment_method(stripe_customer)
)
self.assertEqual(stripe_customer.description, "zulip (Zulip Dev)")
self.assertEqual(stripe_customer.discount, None)
self.assertEqual(stripe_customer.email, user.delivery_email)
metadata_dict = dict(stripe_customer.metadata)
self.assertEqual(metadata_dict["realm_str"], "zulip")
try:
int(metadata_dict["realm_id"])
except ValueError: # nocoverage
raise AssertionError("realm_id is not a number")
self.assertFalse(stripe.Charge.list(customer=stripe_customer.id))
self.assertFalse(stripe.Invoice.list(customer=stripe_customer.id))
customer = Customer.objects.get(stripe_customer_id=stripe_customer.id, realm=user.realm)
customer = Customer.objects.get(stripe_customer_id=stripe_customer.id, realm=user.realm)
plan = CustomerPlan.objects.get(
customer=customer,
automanage_licenses=True,
price_per_license=8000,
fixed_price=None,
discount=None,
billing_cycle_anchor=self.now,
billing_schedule=CustomerPlan.ANNUAL,
invoiced_through=LicenseLedger.objects.first(),
next_invoice_date=free_trial_end_date,
tier=CustomerPlan.STANDARD,
status=CustomerPlan.FREE_TRIAL,
)
LicenseLedger.objects.get(
plan=plan,
is_renewal=True,
event_time=self.now,
licenses=self.seat_count,
licenses_at_next_renewal=self.seat_count,
)
# We don't test anything else since test_free_trial_upgrade_by_card does this already.
@mock_stripe(tested_timestamp_fields=["created"])
def test_free_trial_upgrade_by_invoice(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
free_trial_end_date = self.now + timedelta(days=60)
with self.settings(FREE_TRIAL_DAYS=60):
response = self.client_get("/upgrade/")
self.assert_in_success_response(["Pay annually", "Free Trial", "60 day"], response)
self.assertNotEqual(user.realm.plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertFalse(Customer.objects.filter(realm=user.realm).exists())
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.upgrade(invoice=True)
stripe_customer = stripe_get_customer(
assert_is_not_none(Customer.objects.get(realm=user.realm).stripe_customer_id)
)
self.assertEqual(stripe_customer.discount, None)
self.assertEqual(stripe_customer.email, user.delivery_email)
metadata_dict = dict(stripe_customer.metadata)
self.assertEqual(metadata_dict["realm_str"], "zulip")
try:
int(metadata_dict["realm_id"])
except ValueError: # nocoverage
raise AssertionError("realm_id is not a number")
self.assertFalse(stripe.Invoice.list(customer=stripe_customer.id))
customer = Customer.objects.get(stripe_customer_id=stripe_customer.id, realm=user.realm)
plan = CustomerPlan.objects.get(
customer=customer,
automanage_licenses=False,
price_per_license=8000,
fixed_price=None,
discount=None,
billing_cycle_anchor=self.now,
billing_schedule=CustomerPlan.ANNUAL,
invoiced_through=LicenseLedger.objects.first(),
next_invoice_date=free_trial_end_date,
tier=CustomerPlan.STANDARD,
status=CustomerPlan.FREE_TRIAL,
)
LicenseLedger.objects.get(
plan=plan,
is_renewal=True,
event_time=self.now,
licenses=123,
licenses_at_next_renewal=123,
)
audit_log_entries = list(
RealmAuditLog.objects.filter(acting_user=user)
.values_list("event_type", "event_time")
.order_by("id")
)
self.assertEqual(
audit_log_entries[:2],
[
(
RealmAuditLog.STRIPE_CUSTOMER_CREATED,
timestamp_to_datetime(stripe_customer.created),
),
(RealmAuditLog.CUSTOMER_PLAN_CREATED, self.now),
],
)
self.assertEqual(audit_log_entries[2][0], RealmAuditLog.REALM_PLAN_TYPE_CHANGED)
self.assertEqual(
orjson.loads(
assert_is_not_none(
RealmAuditLog.objects.filter(event_type=RealmAuditLog.CUSTOMER_PLAN_CREATED)
.values_list("extra_data", flat=True)
.first()
)
)["automanage_licenses"],
False,
)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertEqual(realm.max_invites, Realm.INVITES_STANDARD_REALM_DAILY_MAX)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_get("/billing/")
self.assert_not_in_success_response(["Pay annually"], response)
for substring in [
"Zulip Cloud Standard",
"Free Trial",
str(self.seat_count),
"You are using",
f"{self.seat_count} of {123} licenses",
"Your plan will be upgraded to",
"March 2, 2012",
f"{80 * 123:,.2f}",
f"Billing email: <strong>{user.delivery_email}</strong>",
"Billed by invoice",
]:
self.assert_in_response(substring, response)
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_month)
mocked.assert_not_called()
mocked.reset_mock()
customer_plan = CustomerPlan.objects.get(customer=customer)
self.assertEqual(customer_plan.status, CustomerPlan.FREE_TRIAL)
self.assertEqual(customer_plan.next_invoice_date, free_trial_end_date)
invoice_plans_as_needed(free_trial_end_date)
customer_plan.refresh_from_db()
realm.refresh_from_db()
self.assertEqual(customer_plan.status, CustomerPlan.ACTIVE)
self.assertEqual(customer_plan.next_invoice_date, add_months(free_trial_end_date, 12))
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
[invoice] = stripe.Invoice.list(customer=stripe_customer.id)
invoice_params = {
"amount_due": 123 * 80 * 100,
"amount_paid": 0,
"amount_remaining": 123 * 80 * 100,
"auto_advance": True,
"collection_method": "send_invoice",
"customer_email": self.example_email("hamlet"),
"discount": None,
"paid": False,
"status": "open",
"total": 123 * 80 * 100,
}
for key, value in invoice_params.items():
self.assertEqual(invoice.get(key), value)
[invoice_item] = invoice.get("lines")
invoice_item_params = {
"amount": 123 * 80 * 100,
"description": "Zulip Cloud Standard - renewal",
"plan": None,
"quantity": 123,
"subscription": None,
"discountable": False,
"period": {
"start": datetime_to_timestamp(free_trial_end_date),
"end": datetime_to_timestamp(add_months(free_trial_end_date, 12)),
},
}
for key, value in invoice_item_params.items():
self.assertEqual(invoice_item[key], value)
invoice_plans_as_needed(add_months(free_trial_end_date, 1))
[invoice] = stripe.Invoice.list(customer=stripe_customer.id)
invoice_plans_as_needed(add_months(free_trial_end_date, 10))
[invoice] = stripe.Invoice.list(customer=stripe_customer.id)
invoice_plans_as_needed(add_months(free_trial_end_date, 12))
[invoice0, invoice1] = stripe.Invoice.list(customer=stripe_customer.id)
@mock_stripe(tested_timestamp_fields=["created"])
def test_upgrade_by_card_with_outdated_seat_count(self, *mocks: Mock) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
new_seat_count = 23
# Change the seat count while the user is going through the upgrade flow
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=new_seat_count):
with patch(
"corporate.views.upgrade.get_latest_seat_count", return_value=self.seat_count
):
self.upgrade()
customer = Customer.objects.first()
assert customer is not None
stripe_customer_id: str = assert_is_not_none(customer.stripe_customer_id)
# Check that the Charge used the old quantity, not new_seat_count
[charge] = stripe.Charge.list(customer=stripe_customer_id)
self.assertEqual(8000 * self.seat_count, charge.amount)
# Check that the invoice has a credit for the old amount and a charge for the new one
[stripe_invoice] = stripe.Invoice.list(customer=stripe_customer_id)
self.assertEqual(
[8000 * new_seat_count, -8000 * self.seat_count],
[item.amount for item in stripe_invoice.lines],
)
# Check LicenseLedger has the new amount
ledger_entry = LicenseLedger.objects.first()
assert ledger_entry is not None
self.assertEqual(ledger_entry.licenses, new_seat_count)
self.assertEqual(ledger_entry.licenses_at_next_renewal, new_seat_count)
@mock_stripe()
def test_upgrade_first_card_fails_and_retry_with_another_card_without_starting_from_begining(
self, *mocks: Mock
) -> None:
user = self.example_user("hamlet")
self.login_user(user)
# From https://stripe.com/docs/testing#cards: Attaching this card to
# a Customer object succeeds, but attempts to charge the customer fail.
with self.assertLogs("corporate.stripe", "INFO") as m:
response = self.upgrade(
payment_method=create_payment_method(
self.get_test_card_number(attaches_to_customer=True, charge_succeeds=False)
)
)
self.assertEqual(
m.output,
[
"INFO:corporate.stripe:Stripe payment intent failed: zulip card_error card_declined None"
],
)
[payment_intent] = PaymentIntent.objects.all()
response_dict = self.assert_json_success(response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "upgrade_from_billing_page",
"status": "completed",
"stripe_payment_intent_id": payment_intent.stripe_payment_intent_id,
"event_handler": {
"status": "succeeded",
},
},
)
self.assert_details_of_valid_payment_intent_from_event_status_endpoint(
payment_intent.stripe_payment_intent_id,
{
"status": "requires_payment_method",
"last_payment_error": {
"message": "Your card was declined.",
"description": "card_error",
},
"event_handler": {"status": "succeeded"},
},
)
# Check that we created a Customer object but no CustomerPlan
stripe_customer_id = Customer.objects.get(realm=get_realm("zulip")).stripe_customer_id
assert stripe_customer_id is not None
self.assertFalse(CustomerPlan.objects.exists())
# Check that we created a Customer in stripe, a failed Charge, and no Invoices or Invoice Items
self.assertTrue(stripe_get_customer(stripe_customer_id))
[charge] = stripe.Charge.list(customer=stripe_customer_id)
self.assertEqual(charge.failure_code, "card_declined")
self.assertFalse(stripe.Invoice.list(customer=stripe_customer_id))
self.assertFalse(stripe.InvoiceItem.list(customer=stripe_customer_id))
# Check that we correctly populated RealmAuditLog
audit_log_entries = list(
RealmAuditLog.objects.filter(acting_user=user)
.values_list("event_type", flat=True)
.order_by("id")
)
self.assertEqual(
audit_log_entries,
[RealmAuditLog.STRIPE_CUSTOMER_CREATED, RealmAuditLog.STRIPE_CARD_CHANGED],
)
# Check that we did not update Realm
realm = get_realm("zulip")
self.assertNotEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
# Check that we still get redirected to /upgrade
response = self.client_get("/billing/")
self.assertEqual(response.status_code, 302)
self.assertEqual("/upgrade/", response["Location"])
[last_event] = stripe.Event.list(limit=1)
retry_payment_intent_json_response = self.client_post(
"/json/billing/session/start_retry_payment_intent_session",
{
"stripe_payment_intent_id": payment_intent.stripe_payment_intent_id,
},
)
self.assert_json_success(retry_payment_intent_json_response)
[payment_intent] = PaymentIntent.objects.all()
response_dict = self.assert_json_success(retry_payment_intent_json_response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "retry_upgrade_with_another_payment_method",
"status": "created",
"stripe_payment_intent_id": payment_intent.stripe_payment_intent_id,
},
)
self.trigger_stripe_checkout_session_completed_webhook(
create_payment_method(
self.get_test_card_number(
attaches_to_customer=True, charge_succeeds=True, card_provider="visa"
)
)
)
self.assert_details_of_valid_payment_intent_from_event_status_endpoint(
payment_intent.stripe_payment_intent_id,
{"status": "processing"},
)
self.send_stripe_webhook_events(last_event)
response_dict = self.assert_json_success(retry_payment_intent_json_response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "retry_upgrade_with_another_payment_method",
"status": "completed",
"stripe_payment_intent_id": payment_intent.stripe_payment_intent_id,
"event_handler": {
"status": "succeeded",
},
},
)
self.assert_details_of_valid_payment_intent_from_event_status_endpoint(
payment_intent.stripe_payment_intent_id,
{"status": "succeeded", "event_handler": {"status": "succeeded"}},
)
retry_payment_intent_json_response = self.client_post(
"/json/billing/session/start_retry_payment_intent_session",
{
"stripe_payment_intent_id": payment_intent.stripe_payment_intent_id,
},
)
self.assert_json_error(retry_payment_intent_json_response, "Payment already succeeded.")
customer = Customer.objects.get(realm=get_realm("zulip"))
# It's impossible to create two Customers, but check that we didn't
# change stripe_customer_id
self.assertEqual(customer.stripe_customer_id, stripe_customer_id)
# Check that we successfully added a CustomerPlan, and have the right number of licenses
plan = CustomerPlan.objects.get(customer=customer)
ledger_entry = LicenseLedger.objects.get(plan=plan)
self.assertEqual(ledger_entry.licenses, self.seat_count)
self.assertEqual(ledger_entry.licenses_at_next_renewal, self.seat_count)
# Check the Charges and Invoices in Stripe
[charge0, charge1] = stripe.Charge.list(customer=stripe_customer_id)
self.assertEqual(8000 * self.seat_count, charge0.amount)
[stripe_invoice] = stripe.Invoice.list(customer=stripe_customer_id)
self.assertEqual(
[8000 * self.seat_count, -8000 * self.seat_count],
[item.amount for item in stripe_invoice.lines],
)
# Check that we correctly populated RealmAuditLog
audit_log_entries = list(
RealmAuditLog.objects.filter(acting_user=user)
.values_list("event_type", flat=True)
.order_by("id")
)
self.assertEqual(
audit_log_entries,
[
RealmAuditLog.STRIPE_CUSTOMER_CREATED,
RealmAuditLog.STRIPE_CARD_CHANGED,
RealmAuditLog.STRIPE_CARD_CHANGED,
RealmAuditLog.CUSTOMER_PLAN_CREATED,
RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
],
)
# Check that we correctly updated Realm
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
# Check that we can no longer access /upgrade
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual("/billing/", response["Location"])
@mock_stripe()
def test_upgrade_first_card_fails_and_restart_from_begining(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
# From https://stripe.com/docs/testing#cards: Attaching this card to
# a Customer object succeeds, but attempts to charge the customer fail.
with self.assertLogs("corporate.stripe", "INFO") as m:
response = self.upgrade(
payment_method=create_payment_method(
self.get_test_card_number(attaches_to_customer=True, charge_succeeds=False)
)
)
self.assertEqual(
m.output,
[
"INFO:corporate.stripe:Stripe payment intent failed: zulip card_error card_declined None"
],
)
[payment_intent] = PaymentIntent.objects.all()
assert payment_intent.stripe_payment_intent_id is not None
response_dict = self.assert_json_success(response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "upgrade_from_billing_page",
"status": "completed",
"stripe_payment_intent_id": payment_intent.stripe_payment_intent_id,
"event_handler": {
"status": "succeeded",
},
},
)
self.assert_details_of_valid_payment_intent_from_event_status_endpoint(
payment_intent.stripe_payment_intent_id,
{
"status": "requires_payment_method",
"last_payment_error": {
"message": "Your card was declined.",
"description": "card_error",
},
"event_handler": {"status": "succeeded"},
},
)
# Check that we created a Customer object but no CustomerPlan
stripe_customer_id = Customer.objects.get(realm=get_realm("zulip")).stripe_customer_id
assert stripe_customer_id is not None
self.assertFalse(CustomerPlan.objects.exists())
# Check that we created a Customer in stripe, a failed Charge, and no Invoices or Invoice Items
self.assertTrue(stripe_get_customer(stripe_customer_id))
[charge] = stripe.Charge.list(customer=stripe_customer_id)
self.assertEqual(charge.failure_code, "card_declined")
# TODO: figure out what these actually are
self.assertFalse(stripe.Invoice.list(customer=stripe_customer_id))
self.assertFalse(stripe.InvoiceItem.list(customer=stripe_customer_id))
# Check that we correctly populated RealmAuditLog
audit_log_entries = list(
RealmAuditLog.objects.filter(acting_user=user)
.values_list("event_type", flat=True)
.order_by("id")
)
self.assertEqual(
audit_log_entries,
[RealmAuditLog.STRIPE_CUSTOMER_CREATED, RealmAuditLog.STRIPE_CARD_CHANGED],
)
# Check that we did not update Realm
realm = get_realm("zulip")
self.assertNotEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
# Check that we still get redirected to /upgrade
response = self.client_get("/billing/")
self.assertEqual(response.status_code, 302)
self.assertEqual("/upgrade/", response["Location"])
# Try again, with a valid card, after they added a few users
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=23):
with patch("corporate.views.upgrade.get_latest_seat_count", return_value=23):
response = self.upgrade()
[second_payment_intent, _] = PaymentIntent.objects.all().order_by("-id")
assert second_payment_intent.stripe_payment_intent_id is not None
response_dict = self.assert_json_success(response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "upgrade_from_billing_page",
"status": "completed",
"stripe_payment_intent_id": second_payment_intent.stripe_payment_intent_id,
"event_handler": {
"status": "succeeded",
},
},
)
self.assert_details_of_valid_payment_intent_from_event_status_endpoint(
second_payment_intent.stripe_payment_intent_id,
{"status": "succeeded", "event_handler": {"status": "succeeded"}},
)
customer = Customer.objects.get(realm=get_realm("zulip"))
# It's impossible to create two Customers, but check that we didn't
# change stripe_customer_id
self.assertEqual(customer.stripe_customer_id, stripe_customer_id)
# Check that we successfully added a CustomerPlan, and have the right number of licenses
plan = CustomerPlan.objects.get(customer=customer)
ledger_entry = LicenseLedger.objects.get(plan=plan)
self.assertEqual(ledger_entry.licenses, 23)
self.assertEqual(ledger_entry.licenses_at_next_renewal, 23)
# Check the Charges and Invoices in Stripe
[charge0, charge1] = stripe.Charge.list(customer=stripe_customer_id)
self.assertEqual(8000 * 23, charge0.amount)
[stripe_invoice] = stripe.Invoice.list(customer=stripe_customer_id)
self.assertEqual([8000 * 23, -8000 * 23], [item.amount for item in stripe_invoice.lines])
# Check that we correctly populated RealmAuditLog
audit_log_entries = list(
RealmAuditLog.objects.filter(acting_user=user)
.values_list("event_type", flat=True)
.order_by("id")
)
self.assertEqual(
audit_log_entries,
[
RealmAuditLog.STRIPE_CUSTOMER_CREATED,
RealmAuditLog.STRIPE_CARD_CHANGED,
RealmAuditLog.STRIPE_CARD_CHANGED,
RealmAuditLog.CUSTOMER_PLAN_CREATED,
RealmAuditLog.REALM_PLAN_TYPE_CHANGED,
],
)
# Check that we correctly updated Realm
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
# Check that we can no longer access /upgrade
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual("/billing/", response["Location"])
def test_upgrade_with_tampered_seat_count(self) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
with self.assertLogs("corporate.stripe", "WARNING"):
response = self.upgrade(talk_to_stripe=False, salt="badsalt")
self.assert_json_error_contains(response, "Something went wrong. Please contact")
self.assertEqual(orjson.loads(response.content)["error_description"], "tampered seat count")
@mock_stripe()
def test_upgrade_race_condition_during_card_upgrade(self, *mocks: Mock) -> None:
hamlet = self.example_user("hamlet")
othello = self.example_user("othello")
self.login_user(hamlet)
hamlet_upgrade_page_response = self.client_get("/upgrade/")
self.client_post(
"/json/billing/upgrade",
{
"billing_modality": "charge_automatically",
"schedule": "annual",
"signed_seat_count": self.get_signed_seat_count_from_response(
hamlet_upgrade_page_response
),
"salt": self.get_salt_from_response(hamlet_upgrade_page_response),
"license_management": "automatic",
},
)
[hamlet_stripe_session] = stripe.checkout.Session.list(limit=1)
[hamlet_payment_intent] = stripe.PaymentIntent.list(limit=1)
self.login_user(othello)
self.upgrade()
self.login_user(hamlet)
# Checkout session cannot be started since the organization has been already upgraded.
with self.assertLogs("corporate.stripe", "WARNING"):
response = self.client_post(
"/json/billing/upgrade",
{
"billing_modality": "charge_automatically",
"schedule": "annual",
"signed_seat_count": self.get_signed_seat_count_from_response(
hamlet_upgrade_page_response
),
"salt": self.get_salt_from_response(hamlet_upgrade_page_response),
"license_management": "automatic",
},
)
self.assert_json_error_contains(
response,
"The organization is already subscribed to a plan. Please reload the billing page.",
)
payment_method = create_payment_method(
self.get_test_card_number(
attaches_to_customer=True, charge_succeeds=True, card_provider="visa"
)
)
# Organization has been upgraded by the time hamlet completes the checkout session.
with self.assertLogs("corporate.stripe", "WARNING"):
self.trigger_stripe_checkout_session_completed_webhook(
payment_method, hamlet_stripe_session
)
self.assert_details_of_valid_session_from_event_status_endpoint(
hamlet_stripe_session.id,
{
"type": "upgrade_from_billing_page",
"status": "completed",
"stripe_payment_intent_id": hamlet_payment_intent.id,
"event_handler": {
"status": "failed",
"error": {
"message": "The organization is already subscribed to a plan. Please reload the billing page.",
"description": "subscribing with existing subscription",
},
},
},
)
# Organization has been upgraded by the time payment intent is successful.
stripe.PaymentIntent.confirm(
hamlet_payment_intent.id,
payment_method=payment_method.id,
off_session=True,
)
with self.assertLogs("corporate.stripe", "WARNING"):
self.send_last_stripe_webhook_event()
self.assert_details_of_valid_payment_intent_from_event_status_endpoint(
hamlet_payment_intent.id,
{
"status": "succeeded",
"event_handler": {
"status": "failed",
"error": {
"message": "The organization is already subscribed to a plan. Please reload the billing page.",
"description": "subscribing with existing subscription",
},
},
},
)
charged_amount = self.seat_count * 8000
customer = get_customer_by_realm(get_realm("zulip"))
assert customer is not None
assert customer.stripe_customer_id is not None
[invoice, _] = stripe.Invoice.list(customer=customer.stripe_customer_id)
self.assertEqual(invoice.total, -1 * charged_amount)
stripe_customer = stripe.Customer.retrieve(customer.stripe_customer_id)
self.assertEqual(stripe_customer.balance, -1 * charged_amount)
def test_upgrade_race_condition_during_invoice_upgrade(self) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
with self.assertLogs("corporate.stripe", "WARNING") as m:
with self.assertRaises(BillingError) as context:
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
self.assertEqual(
"subscribing with existing subscription", context.exception.error_description
)
self.assertEqual(
m.output[0],
"WARNING:corporate.stripe:Upgrade of zulip failed because of existing active plan.",
)
self.assert_length(m.output, 1)
def test_check_upgrade_parameters(self) -> None:
# Tests all the error paths except 'not enough licenses'
def check_error(
error_message: str,
error_description: str,
upgrade_params: Mapping[str, Any],
del_args: Sequence[str] = [],
) -> None:
if error_description:
with self.assertLogs("corporate.stripe", "WARNING"):
response = self.upgrade(
talk_to_stripe=False, del_args=del_args, **upgrade_params
)
self.assertEqual(
orjson.loads(response.content)["error_description"], error_description
)
else:
response = self.upgrade(talk_to_stripe=False, del_args=del_args, **upgrade_params)
self.assert_json_error_contains(response, error_message)
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
check_error("Invalid billing_modality", "", {"billing_modality": "invalid"})
check_error("Invalid schedule", "", {"schedule": "invalid"})
check_error("Invalid license_management", "", {"license_management": "invalid"})
check_error(
"You must invoice for at least 30 users.",
"not enough licenses",
{"billing_modality": "send_invoice", "licenses": -1},
)
check_error(
"You must invoice for at least 30 users.",
"not enough licenses",
{"billing_modality": "send_invoice"},
)
check_error(
"You must invoice for at least 30 users.",
"not enough licenses",
{"billing_modality": "send_invoice", "licenses": 25},
)
check_error(
"Invoices with more than 1000 licenses can't be processed from this page",
"too many licenses",
{"billing_modality": "send_invoice", "licenses": 10000},
)
check_error(
"You must invoice for at least 6 users.",
"not enough licenses",
{"billing_modality": "charge_automatically", "license_management": "manual"},
)
check_error(
"You must invoice for at least 6 users.",
"not enough licenses",
{
"billing_modality": "charge_automatically",
"license_management": "manual",
"licenses": 3,
},
)
def test_upgrade_license_counts(self) -> None:
def check_min_licenses_error(
invoice: bool,
licenses: Optional[int],
min_licenses_in_response: int,
upgrade_params: Dict[str, Any] = {},
) -> None:
if licenses is None:
del_args = ["licenses"]
else:
del_args = []
upgrade_params["licenses"] = licenses
with self.assertLogs("corporate.stripe", "WARNING"):
response = self.upgrade(
invoice=invoice, talk_to_stripe=False, del_args=del_args, **upgrade_params
)
self.assert_json_error_contains(response, f"at least {min_licenses_in_response} users")
self.assertEqual(
orjson.loads(response.content)["error_description"], "not enough licenses"
)
def check_max_licenses_error(licenses: int) -> None:
with self.assertLogs("corporate.stripe", "WARNING"):
response = self.upgrade(invoice=True, talk_to_stripe=False, licenses=licenses)
self.assert_json_error_contains(
response, f"with more than {MAX_INVOICED_LICENSES} licenses"
)
self.assertEqual(
orjson.loads(response.content)["error_description"], "too many licenses"
)
def check_success(
invoice: bool, licenses: Optional[int], upgrade_params: Dict[str, Any] = {}
) -> None:
if licenses is None:
del_args = ["licenses"]
else:
del_args = []
upgrade_params["licenses"] = licenses
with patch("corporate.views.upgrade.process_initial_upgrade"):
stripe_session = stripe.checkout.Session()
stripe_session.id = "stripe_session_id"
stripe_session.url = "stripe_session_url"
with patch(
"corporate.views.upgrade.setup_upgrade_checkout_session_and_payment_intent",
return_value=stripe_session,
):
response = self.upgrade(
invoice=invoice, talk_to_stripe=False, del_args=del_args, **upgrade_params
)
self.assert_json_success(response)
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
# Autopay with licenses < seat count
check_min_licenses_error(
False, self.seat_count - 1, self.seat_count, {"license_management": "manual"}
)
# Autopay with not setting licenses
check_min_licenses_error(False, None, self.seat_count, {"license_management": "manual"})
# Invoice with licenses < MIN_INVOICED_LICENSES
check_min_licenses_error(True, MIN_INVOICED_LICENSES - 1, MIN_INVOICED_LICENSES)
# Invoice with licenses < seat count
with patch("corporate.lib.stripe.MIN_INVOICED_LICENSES", 3):
check_min_licenses_error(True, 4, self.seat_count)
# Invoice with not setting licenses
check_min_licenses_error(True, None, MIN_INVOICED_LICENSES)
# Invoice exceeding max licenses
check_max_licenses_error(MAX_INVOICED_LICENSES + 1)
with patch(
"corporate.lib.stripe.get_latest_seat_count", return_value=MAX_INVOICED_LICENSES + 5
):
check_max_licenses_error(MAX_INVOICED_LICENSES + 5)
# Autopay with automatic license_management
check_success(False, None)
# Autopay with automatic license_management, should just ignore the licenses entry
check_success(False, self.seat_count)
# Autopay
check_success(False, self.seat_count, {"license_management": "manual"})
# Autopay has no limit on max licenses
check_success(False, MAX_INVOICED_LICENSES + 1, {"license_management": "manual"})
# Invoice
check_success(True, self.seat_count + MIN_INVOICED_LICENSES)
# Invoice
check_success(True, MAX_INVOICED_LICENSES)
def test_upgrade_with_uncaught_exception(self) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
with patch(
"corporate.views.upgrade.setup_upgrade_checkout_session_and_payment_intent",
side_effect=Exception,
), self.assertLogs("corporate.stripe", "WARNING") as m:
response = self.upgrade(talk_to_stripe=False)
self.assertIn("ERROR:corporate.stripe:Uncaught exception in billing", m.output[0])
self.assertIn(m.records[0].stack_info, m.output[0])
self.assert_json_error_contains(
response, "Something went wrong. Please contact desdemona+admin@zulip.com."
)
self.assertEqual(
orjson.loads(response.content)["error_description"], "uncaught exception during upgrade"
)
@mock_stripe()
def test_checkout_session_completed_with_uncaught_exception(self, *mock_args: Any) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
with patch(
"corporate.lib.stripe_event_handler.update_or_create_stripe_customer",
side_effect=Exception,
), self.assertLogs("corporate.stripe", "WARNING"):
response = self.upgrade()
response_dict = self.assert_json_success(response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "upgrade_from_billing_page",
"status": "completed",
"stripe_payment_intent_id": PaymentIntent.objects.get().stripe_payment_intent_id,
"event_handler": {
"status": "failed",
"error": {
"message": "Something went wrong. Please contact desdemona+admin@zulip.com.",
"description": "uncaught exception in checkout.session.completed event handler",
},
},
},
)
@mock_stripe()
def test_payment_intent_succeeded_event_with_uncaught_exception(self, *mock_args: Any) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
with patch(
"corporate.lib.stripe_event_handler.process_initial_upgrade", side_effect=Exception
), self.assertLogs("corporate.stripe", "WARNING"):
response = self.upgrade()
[payment_intent] = PaymentIntent.objects.all().order_by("-id")
response_dict = self.assert_json_success(response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "upgrade_from_billing_page",
"status": "completed",
"stripe_payment_intent_id": payment_intent.stripe_payment_intent_id,
"event_handler": {
"status": "succeeded",
},
},
)
self.assert_details_of_valid_payment_intent_from_event_status_endpoint(
payment_intent.stripe_payment_intent_id,
{
"status": "succeeded",
"event_handler": {
"status": "failed",
"error": {
"message": "Something went wrong. Please contact desdemona+admin@zulip.com.",
"description": "uncaught exception in payment_intent.succeeded event handler",
},
},
},
)
@mock_stripe()
def test_restart_payment_intent_session_errors(self, *mocks: Any) -> None:
user = self.example_user("hamlet")
self.login_user(user)
json_response = self.client_post("/json/billing/session/start_retry_payment_intent_session")
self.assert_json_error(json_response, "Missing 'stripe_payment_intent_id' argument")
json_response = self.client_post(
"/json/billing/session/start_retry_payment_intent_session",
{"stripe_payment_intent_id": "stripe_payment_intent_id"},
)
self.assert_json_error(json_response, "Please create a customer first.")
upgrade_page_response = self.client_get("/upgrade/")
self.client_post(
"/json/billing/upgrade",
{
"billing_modality": "charge_automatically",
"schedule": "monthly",
"signed_seat_count": self.get_signed_seat_count_from_response(
upgrade_page_response
),
"salt": self.get_salt_from_response(upgrade_page_response),
"license_management": "automatic",
},
)
response = self.client_post(
"/json/billing/session/start_retry_payment_intent_session",
{"stripe_payment_intent_id": "stripe_payment_intent_id"},
)
self.assert_json_error(response, "Invalid payment intent id.")
def test_request_sponsorship_form_with_invalid_url(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
data = {
"organization-type": Realm.ORG_TYPES["opensource"]["id"],
"website": "invalid-url",
"description": "Infinispan is a distributed in-memory key/value data store with optional schema.",
}
response = self.client_post("/json/billing/sponsorship", data)
self.assert_json_error(response, "Enter a valid URL.")
def test_request_sponsorship_form_with_blank_url(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
data = {
"organization-type": Realm.ORG_TYPES["opensource"]["id"],
"website": "",
"description": "Infinispan is a distributed in-memory key/value data store with optional schema.",
}
response = self.client_post("/json/billing/sponsorship", data)
self.assert_json_success(response)
def test_support_request(self) -> None:
user = self.example_user("hamlet")
self.assertIsNone(get_customer_by_realm(user.realm))
self.login_user(user)
result = self.client_get("/support/")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["Contact support"], result)
data = {
"request_subject": "Not getting messages.",
"request_message": "Running into this weird issue.",
}
result = self.client_post("/support/", data)
self.assert_in_success_response(["Thanks for contacting us!"], result)
from django.core.mail import outbox
self.assert_length(outbox, 1)
for message in outbox:
self.assert_length(message.to, 1)
self.assertEqual(message.to[0], "desdemona+admin@zulip.com")
self.assertEqual(message.subject, "Support request for zulip")
self.assertEqual(message.reply_to, ["hamlet@zulip.com"])
self.assertEqual(self.email_envelope_from(message), settings.NOREPLY_EMAIL_ADDRESS)
self.assertIn("Zulip Support <noreply-", self.email_display_from(message))
self.assertIn("Requested by: King Hamlet (Member)", message.body)
self.assertIn(
"Support URL: http://zulip.testserver/activity/support?q=zulip", message.body
)
self.assertIn("Subject: Not getting messages.", message.body)
self.assertIn("Message:\nRunning into this weird issue", message.body)
def test_request_sponsorship(self) -> None:
user = self.example_user("hamlet")
self.assertIsNone(get_customer_by_realm(user.realm))
self.login_user(user)
data = {
"organization-type": Realm.ORG_TYPES["opensource"]["id"],
"website": "https://infinispan.org/",
"description": "Infinispan is a distributed in-memory key/value data store with optional schema.",
}
response = self.client_post("/json/billing/sponsorship", data)
self.assert_json_success(response)
sponsorship_request = ZulipSponsorshipRequest.objects.filter(
realm=user.realm, requested_by=user
).first()
assert sponsorship_request is not None
self.assertEqual(sponsorship_request.org_website, data["website"])
self.assertEqual(sponsorship_request.org_description, data["description"])
self.assertEqual(
sponsorship_request.org_type,
Realm.ORG_TYPES["opensource"]["id"],
)
customer = get_customer_by_realm(user.realm)
assert customer is not None
self.assertEqual(customer.sponsorship_pending, True)
from django.core.mail import outbox
self.assert_length(outbox, 1)
for message in outbox:
self.assert_length(message.to, 1)
self.assertEqual(message.to[0], "desdemona+admin@zulip.com")
self.assertEqual(message.subject, "Sponsorship request (Open-source project) for zulip")
self.assertEqual(message.reply_to, ["hamlet@zulip.com"])
self.assertEqual(self.email_envelope_from(message), settings.NOREPLY_EMAIL_ADDRESS)
self.assertIn("Zulip sponsorship <noreply-", self.email_display_from(message))
self.assertIn("Requested by: King Hamlet (Member)", message.body)
self.assertIn(
"Support URL: http://zulip.testserver/activity/support?q=zulip", message.body
)
self.assertIn("Website: https://infinispan.org", message.body)
self.assertIn("Organization type: Open-source", message.body)
self.assertIn("Description:\nInfinispan is a distributed in-memory", message.body)
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response["Location"], "/billing/")
response = self.client_get("/billing/")
self.assert_in_success_response(
["Your organization has requested sponsored or discounted hosting."], response
)
self.login_user(self.example_user("othello"))
response = self.client_get("/billing/")
self.assert_in_success_response(
["You must be an organization owner or a billing administrator to view this page."],
response,
)
user.realm.plan_type = Realm.PLAN_TYPE_STANDARD_FREE
user.realm.save()
self.login_user(self.example_user("hamlet"))
response = self.client_get("/billing/")
self.assert_in_success_response(
["Your organization is fully sponsored and is on the <b>Zulip Cloud Standard</b>"],
response,
)
def test_redirect_for_billing_home(self) -> None:
user = self.example_user("iago")
self.login_user(user)
response = self.client_get("/billing/")
self.assertEqual(response.status_code, 302)
self.assertEqual("/upgrade/", response["Location"])
user.realm.plan_type = Realm.PLAN_TYPE_STANDARD_FREE
user.realm.save()
response = self.client_get("/billing/")
self.assertEqual(response.status_code, 200)
user.realm.plan_type = Realm.PLAN_TYPE_LIMITED
user.realm.save()
Customer.objects.create(realm=user.realm, stripe_customer_id="cus_123")
response = self.client_get("/billing/")
self.assertEqual(response.status_code, 302)
self.assertEqual("/upgrade/", response["Location"])
def test_redirect_for_upgrade_page(self) -> None:
user = self.example_user("iago")
self.login_user(user)
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 200)
user.realm.plan_type = Realm.PLAN_TYPE_STANDARD_FREE
user.realm.save()
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response["Location"], "/billing/")
user.realm.plan_type = Realm.PLAN_TYPE_LIMITED
user.realm.save()
customer = Customer.objects.create(realm=user.realm, stripe_customer_id="cus_123")
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 200)
CustomerPlan.objects.create(
customer=customer,
billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.ANNUAL,
tier=CustomerPlan.STANDARD,
)
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response["Location"], "/billing/")
with self.settings(FREE_TRIAL_DAYS=30):
response = self.client_get("/upgrade/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response["Location"], "/billing/")
response = self.client_get("/upgrade/", {"onboarding": "true"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response["Location"], "/billing/?onboarding=true")
def test_get_latest_seat_count(self) -> None:
realm = get_realm("zulip")
initial_count = get_latest_seat_count(realm)
user1 = UserProfile.objects.create(
realm=realm, email="user1@zulip.com", delivery_email="user1@zulip.com"
)
user2 = UserProfile.objects.create(
realm=realm, email="user2@zulip.com", delivery_email="user2@zulip.com"
)
self.assertEqual(get_latest_seat_count(realm), initial_count + 2)
# Test that bots aren't counted
user1.is_bot = True
user1.save(update_fields=["is_bot"])
self.assertEqual(get_latest_seat_count(realm), initial_count + 1)
# Test that inactive users aren't counted
do_deactivate_user(user2, acting_user=None)
self.assertEqual(get_latest_seat_count(realm), initial_count)
# Test guests
# Adding a guest to a realm with a lot of members shouldn't change anything
UserProfile.objects.create(
realm=realm,
email="user3@zulip.com",
delivery_email="user3@zulip.com",
role=UserProfile.ROLE_GUEST,
)
self.assertEqual(get_latest_seat_count(realm), initial_count)
# Test 1 member and 5 guests
realm = do_create_realm(string_id="second", name="second")
UserProfile.objects.create(
realm=realm, email="member@second.com", delivery_email="member@second.com"
)
for i in range(5):
UserProfile.objects.create(
realm=realm,
email=f"guest{i}@second.com",
delivery_email=f"guest{i}@second.com",
role=UserProfile.ROLE_GUEST,
)
self.assertEqual(get_latest_seat_count(realm), 1)
# Test 1 member and 6 guests
UserProfile.objects.create(
realm=realm,
email="guest5@second.com",
delivery_email="guest5@second.com",
role=UserProfile.ROLE_GUEST,
)
self.assertEqual(get_latest_seat_count(realm), 2)
def test_sign_string(self) -> None:
string = "abc"
signed_string, salt = sign_string(string)
self.assertEqual(string, unsign_string(signed_string, salt))
with self.assertRaises(signing.BadSignature):
unsign_string(signed_string, "randomsalt")
# This tests both the payment method string, and also is a very basic
# test that the various upgrade paths involving non-standard payment
# histories don't throw errors
@mock_stripe()
def test_payment_method_string(self, *mocks: Mock) -> None:
pass
# If you sign up with a card, we should show your card as the payment method
# Already tested in test_initial_upgrade
# If you pay by invoice, your payment method should be
# "Billed by invoice", even if you have a card on file
# user = self.example_user("hamlet")
# do_create_stripe_customer(user, stripe_create_token().id)
# self.login_user(user)
# self.upgrade(invoice=True)
# stripe_customer = stripe_get_customer(Customer.objects.get(realm=user.realm).stripe_customer_id)
# self.assertEqual('Billed by invoice', payment_method_string(stripe_customer))
# If you sign up with a card and then downgrade, we still have your
# card on file, and should show it
# TODO
@mock_stripe()
def test_attach_discount_to_realm(self, *mocks: Mock) -> None:
# Attach discount before Stripe customer exists
user = self.example_user("hamlet")
attach_discount_to_realm(user.realm, Decimal(85), acting_user=user)
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_DISCOUNT_CHANGED
).last()
assert realm_audit_log is not None
expected_extra_data = str({"old_discount": None, "new_discount": Decimal("85")})
self.assertEqual(realm_audit_log.extra_data, expected_extra_data)
self.login_user(user)
# Check that the discount appears in page_params
self.assert_in_success_response(["85"], self.client_get("/upgrade/"))
# Check that the customer was charged the discounted amount
self.upgrade()
customer = Customer.objects.first()
assert customer is not None
[charge] = stripe.Charge.list(customer=customer.stripe_customer_id)
self.assertEqual(1200 * self.seat_count, charge.amount)
stripe_customer_id = customer.stripe_customer_id
assert stripe_customer_id is not None
[invoice] = stripe.Invoice.list(customer=stripe_customer_id)
self.assertEqual(
[1200 * self.seat_count, -1200 * self.seat_count],
[item.amount for item in invoice.lines],
)
# Check CustomerPlan reflects the discount
plan = CustomerPlan.objects.get(price_per_license=1200, discount=Decimal(85))
# Attach discount to existing Stripe customer
plan.status = CustomerPlan.ENDED
plan.save(update_fields=["status"])
attach_discount_to_realm(user.realm, Decimal(25), acting_user=user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.upgrade(license_management="automatic", billing_modality="charge_automatically")
[charge, _] = stripe.Charge.list(customer=customer.stripe_customer_id)
self.assertEqual(6000 * self.seat_count, charge.amount)
stripe_customer_id = customer.stripe_customer_id
assert stripe_customer_id is not None
[invoice, _] = stripe.Invoice.list(customer=stripe_customer_id)
self.assertEqual(
[6000 * self.seat_count, -6000 * self.seat_count],
[item.amount for item in invoice.lines],
)
plan = CustomerPlan.objects.get(price_per_license=6000, discount=Decimal(25))
attach_discount_to_realm(user.realm, Decimal(50), acting_user=user)
plan.refresh_from_db()
self.assertEqual(plan.price_per_license, 4000)
self.assertEqual(plan.discount, 50)
customer.refresh_from_db()
self.assertEqual(customer.default_discount, 50)
invoice_plans_as_needed(self.next_year + timedelta(days=10))
stripe_customer_id = customer.stripe_customer_id
assert stripe_customer_id is not None
[invoice, _, _] = stripe.Invoice.list(customer=stripe_customer_id)
self.assertEqual([4000 * self.seat_count], [item.amount for item in invoice.lines])
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_DISCOUNT_CHANGED
).last()
assert realm_audit_log is not None
expected_extra_data = str(
{"old_discount": Decimal("25.0000"), "new_discount": Decimal("50")}
)
self.assertEqual(realm_audit_log.extra_data, expected_extra_data)
self.assertEqual(realm_audit_log.acting_user, user)
def test_approve_sponsorship(self) -> None:
user = self.example_user("hamlet")
approve_sponsorship(user.realm, acting_user=user)
realm = get_realm("zulip")
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD_FREE)
expected_message = "Your organization's request for sponsored hosting has been approved! :tada:.\nYou have been upgraded to Zulip Cloud Standard, free of charge."
sender = get_system_bot(settings.NOTIFICATION_BOT, user.realm_id)
recipient_id = self.example_user("desdemona").recipient_id
message = Message.objects.filter(sender=sender.id).first()
assert message is not None
self.assertEqual(message.content, expected_message)
self.assertEqual(message.recipient.type, Recipient.PERSONAL)
self.assertEqual(message.recipient_id, recipient_id)
def test_update_sponsorship_status(self) -> None:
lear = get_realm("lear")
iago = self.example_user("iago")
update_sponsorship_status(lear, True, acting_user=iago)
customer = get_customer_by_realm(realm=lear)
assert customer is not None
self.assertTrue(customer.sponsorship_pending)
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_SPONSORSHIP_PENDING_STATUS_CHANGED
).last()
assert realm_audit_log is not None
expected_extra_data = {"sponsorship_pending": True}
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
self.assertEqual(realm_audit_log.acting_user, iago)
def test_get_discount_for_realm(self) -> None:
user = self.example_user("hamlet")
self.assertEqual(get_discount_for_realm(user.realm), None)
attach_discount_to_realm(user.realm, Decimal(85), acting_user=None)
self.assertEqual(get_discount_for_realm(user.realm), 85)
@mock_stripe()
def test_replace_payment_method(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
self.upgrade()
# Create an open invoice
customer = Customer.objects.first()
assert customer is not None
stripe_customer_id = customer.stripe_customer_id
assert stripe_customer_id is not None
stripe.InvoiceItem.create(amount=5000, currency="usd", customer=stripe_customer_id)
stripe_invoice = stripe.Invoice.create(customer=stripe_customer_id)
stripe.Invoice.finalize_invoice(stripe_invoice)
RealmAuditLog.objects.filter(event_type=RealmAuditLog.STRIPE_CARD_CHANGED).delete()
start_session_json_response = self.client_post(
"/json/billing/session/start_card_update_session"
)
response_dict = self.assert_json_success(start_session_json_response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "card_update_from_billing_page",
"status": "created",
},
)
with self.assertRaises(stripe.error.CardError):
# We don't have to handle this since the Stripe Checkout page would
# ask Customer to enter a valid card number. trigger_stripe_checkout_session_completed_webhook
# emulates what happens in the Stripe Checkout page. Adding this check mostly for coverage of
# create_payment_method.
self.trigger_stripe_checkout_session_completed_webhook(
create_payment_method(self.get_test_card_number(attaches_to_customer=False))
)
start_session_json_response = self.client_post(
"/json/billing/session/start_card_update_session"
)
response_dict = self.assert_json_success(start_session_json_response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "card_update_from_billing_page",
"status": "created",
},
)
with self.assertLogs("corporate.stripe", "INFO") as m:
self.trigger_stripe_checkout_session_completed_webhook(
create_payment_method(
self.get_test_card_number(attaches_to_customer=True, charge_succeeds=False)
)
)
self.assertEqual(
m.output[0],
"INFO:corporate.stripe:Stripe card error: 402 card_error card_declined None",
)
response_dict = self.assert_json_success(start_session_json_response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "card_update_from_billing_page",
"status": "completed",
"event_handler": {
"status": "failed",
"error": {"message": "Your card was declined.", "description": "card error"},
},
},
)
response = self.client_get("/billing/")
self.assert_in_success_response(["payment method: <strong>visa ending in 0341"], response)
assert RealmAuditLog.objects.filter(event_type=RealmAuditLog.STRIPE_CARD_CHANGED).exists()
stripe_payment_methods = stripe.PaymentMethod.list(customer=stripe_customer_id, type="card")
self.assert_length(stripe_payment_methods, 2)
for stripe_payment_method in stripe_payment_methods:
stripe.PaymentMethod.detach(stripe_payment_method.id)
response = self.client_get("/billing/")
self.assert_in_success_response(
["payment method: <strong>No payment method on file"], response
)
start_session_json_response = self.client_post(
"/json/billing/session/start_card_update_session"
)
self.assert_json_success(start_session_json_response)
self.trigger_stripe_checkout_session_completed_webhook(
create_payment_method(
self.get_test_card_number(
attaches_to_customer=True, charge_succeeds=True, card_provider="mastercard"
)
)
)
response_dict = self.assert_json_success(start_session_json_response)
self.assert_details_of_valid_session_from_event_status_endpoint(
response_dict["stripe_session_id"],
{
"type": "card_update_from_billing_page",
"status": "completed",
"event_handler": {"status": "succeeded"},
},
)
self.login_user(self.example_user("iago"))
response = self.client_get(
"/json/billing/event/status",
{"stripe_session_id": response_dict["stripe_session_id"]},
)
self.assert_json_error_contains(
response, "Must be a billing administrator or an organization owner"
)
self.login_user(self.example_user("hamlet"))
response = self.client_get("/billing/")
self.assert_in_success_response(
["payment method: <strong>mastercard ending in 4444"], response
)
self.assert_length(stripe.PaymentMethod.list(customer=stripe_customer_id, type="card"), 1)
# Ideally we'd also test that we don't pay invoices with collection_method=='send_invoice'
for stripe_invoice in stripe.Invoice.list(customer=stripe_customer_id):
self.assertEqual(stripe_invoice.status, "paid")
self.assertEqual(
2, RealmAuditLog.objects.filter(event_type=RealmAuditLog.STRIPE_CARD_CHANGED).count()
)
def test_downgrade(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
plan = get_current_plan_by_realm(user.realm)
assert plan is not None
self.assertEqual(plan.licenses(), self.seat_count)
self.assertEqual(plan.licenses_at_next_renewal(), self.seat_count)
with self.assertLogs("corporate.stripe", "INFO") as m:
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_patch(
"/json/billing/plan", {"status": CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE}
)
stripe_customer_id = Customer.objects.get(realm=user.realm).id
new_plan = get_current_plan_by_realm(user.realm)
assert new_plan is not None
expected_log = f"INFO:corporate.stripe:Change plan status: Customer.id: {stripe_customer_id}, CustomerPlan.id: {new_plan.id}, status: {CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE}"
self.assertEqual(m.output[0], expected_log)
self.assert_json_success(response)
plan.refresh_from_db()
self.assertEqual(plan.licenses(), self.seat_count)
self.assertEqual(plan.licenses_at_next_renewal(), None)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
mock_customer = Mock(email=user.delivery_email)
with patch(
"corporate.views.billing_page.stripe_get_customer", return_value=mock_customer
):
response = self.client_get("/billing/")
self.assert_in_success_response(
[
"Your plan will be downgraded to <strong>Zulip Limited</strong> on "
"<strong>January 2, 2013</strong>",
"You plan is scheduled for downgrade on <strong>January 2, 2013</strong>",
"Cancel downgrade",
],
response,
)
# Verify that we still write LicenseLedger rows during the remaining
# part of the cycle
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=20):
update_license_ledger_if_needed(user.realm, self.now)
self.assertEqual(
LicenseLedger.objects.order_by("-id")
.values_list("licenses", "licenses_at_next_renewal")
.first(),
(20, 20),
)
# Verify that we invoice them for the additional users
from stripe import Invoice
Invoice.create = lambda **args: None # type: ignore[assignment] # cleaner than mocking
Invoice.finalize_invoice = lambda *args: None # type: ignore[assignment] # cleaner than mocking
with patch("stripe.InvoiceItem.create") as mocked:
invoice_plans_as_needed(self.next_month)
mocked.assert_called_once()
mocked.reset_mock()
# Check that we downgrade properly if the cycle is over
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=30):
update_license_ledger_if_needed(user.realm, self.next_year)
plan = CustomerPlan.objects.first()
assert plan is not None
self.assertEqual(get_realm("zulip").plan_type, Realm.PLAN_TYPE_LIMITED)
self.assertEqual(plan.status, CustomerPlan.ENDED)
self.assertEqual(
LicenseLedger.objects.order_by("-id")
.values_list("licenses", "licenses_at_next_renewal")
.first(),
(20, 20),
)
# Verify that we don't write LicenseLedger rows once we've downgraded
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=40):
update_license_ledger_if_needed(user.realm, self.next_year)
self.assertEqual(
LicenseLedger.objects.order_by("-id")
.values_list("licenses", "licenses_at_next_renewal")
.first(),
(20, 20),
)
# Verify that we call invoice_plan once more after cycle end but
# don't invoice them for users added after the cycle end
plan = CustomerPlan.objects.first()
assert plan is not None
self.assertIsNotNone(plan.next_invoice_date)
with patch("stripe.InvoiceItem.create") as mocked:
invoice_plans_as_needed(self.next_year + timedelta(days=32))
mocked.assert_not_called()
mocked.reset_mock()
# Check that we updated next_invoice_date in invoice_plan
plan = CustomerPlan.objects.first()
assert plan is not None
self.assertIsNone(plan.next_invoice_date)
# Check that we don't call invoice_plan after that final call
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=50):
update_license_ledger_if_needed(user.realm, self.next_year + timedelta(days=80))
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_year + timedelta(days=400))
mocked.assert_not_called()
@mock_stripe()
def test_switch_from_monthly_plan_to_annual_plan_for_automatic_license_management(
self, *mocks: Mock
) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.upgrade(schedule="monthly")
monthly_plan = get_current_plan_by_realm(user.realm)
assert monthly_plan is not None
self.assertEqual(monthly_plan.automanage_licenses, True)
self.assertEqual(monthly_plan.billing_schedule, CustomerPlan.MONTHLY)
stripe_customer_id = Customer.objects.get(realm=user.realm).id
new_plan = get_current_plan_by_realm(user.realm)
assert new_plan is not None
with self.assertLogs("corporate.stripe", "INFO") as m:
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_patch(
"/json/billing/plan",
{"status": CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE},
)
expected_log = f"INFO:corporate.stripe:Change plan status: Customer.id: {stripe_customer_id}, CustomerPlan.id: {new_plan.id}, status: {CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE}"
self.assertEqual(m.output[0], expected_log)
self.assert_json_success(response)
monthly_plan.refresh_from_db()
self.assertEqual(monthly_plan.status, CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_get("/billing/")
self.assert_in_success_response(
["be switched from monthly to annual billing on <strong>February 2, 2012"], response
)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=20):
update_license_ledger_if_needed(user.realm, self.now)
self.assertEqual(LicenseLedger.objects.filter(plan=monthly_plan).count(), 2)
self.assertEqual(
LicenseLedger.objects.order_by("-id")
.values_list("licenses", "licenses_at_next_renewal")
.first(),
(20, 20),
)
with patch("corporate.lib.stripe.timezone_now", return_value=self.next_month):
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=25):
update_license_ledger_if_needed(user.realm, self.next_month)
self.assertEqual(LicenseLedger.objects.filter(plan=monthly_plan).count(), 2)
customer = get_customer_by_realm(user.realm)
assert customer is not None
self.assertEqual(CustomerPlan.objects.filter(customer=customer).count(), 2)
monthly_plan.refresh_from_db()
self.assertEqual(monthly_plan.status, CustomerPlan.ENDED)
self.assertEqual(monthly_plan.next_invoice_date, self.next_month)
annual_plan = get_current_plan_by_realm(user.realm)
assert annual_plan is not None
self.assertEqual(annual_plan.status, CustomerPlan.ACTIVE)
self.assertEqual(annual_plan.billing_schedule, CustomerPlan.ANNUAL)
self.assertEqual(annual_plan.invoicing_status, CustomerPlan.INITIAL_INVOICE_TO_BE_SENT)
self.assertEqual(annual_plan.billing_cycle_anchor, self.next_month)
self.assertEqual(annual_plan.next_invoice_date, self.next_month)
self.assertEqual(annual_plan.invoiced_through, None)
annual_ledger_entries = LicenseLedger.objects.filter(plan=annual_plan).order_by("id")
self.assert_length(annual_ledger_entries, 2)
self.assertEqual(annual_ledger_entries[0].is_renewal, True)
self.assertEqual(
annual_ledger_entries.values_list("licenses", "licenses_at_next_renewal")[0], (20, 20)
)
self.assertEqual(annual_ledger_entries[1].is_renewal, False)
self.assertEqual(
annual_ledger_entries.values_list("licenses", "licenses_at_next_renewal")[1], (25, 25)
)
audit_log = RealmAuditLog.objects.get(
event_type=RealmAuditLog.CUSTOMER_SWITCHED_FROM_MONTHLY_TO_ANNUAL_PLAN
)
extra_data: str = assert_is_not_none(audit_log.extra_data)
self.assertEqual(audit_log.realm, user.realm)
self.assertEqual(orjson.loads(extra_data)["monthly_plan_id"], monthly_plan.id)
self.assertEqual(orjson.loads(extra_data)["annual_plan_id"], annual_plan.id)
invoice_plans_as_needed(self.next_month)
annual_ledger_entries = LicenseLedger.objects.filter(plan=annual_plan).order_by("id")
self.assert_length(annual_ledger_entries, 2)
annual_plan.refresh_from_db()
self.assertEqual(annual_plan.invoicing_status, CustomerPlan.DONE)
self.assertEqual(annual_plan.invoiced_through, annual_ledger_entries[1])
self.assertEqual(annual_plan.billing_cycle_anchor, self.next_month)
self.assertEqual(annual_plan.next_invoice_date, add_months(self.next_month, 1))
monthly_plan.refresh_from_db()
self.assertEqual(monthly_plan.next_invoice_date, None)
assert customer.stripe_customer_id
[invoice0, invoice1, invoice2] = stripe.Invoice.list(customer=customer.stripe_customer_id)
[invoice_item0, invoice_item1] = invoice0.get("lines")
annual_plan_invoice_item_params = {
"amount": 5 * 80 * 100,
"description": "Additional license (Feb 2, 2012 - Feb 2, 2013)",
"plan": None,
"quantity": 5,
"subscription": None,
"discountable": False,
"period": {
"start": datetime_to_timestamp(self.next_month),
"end": datetime_to_timestamp(add_months(self.next_month, 12)),
},
}
for key, value in annual_plan_invoice_item_params.items():
self.assertEqual(invoice_item0[key], value)
annual_plan_invoice_item_params = {
"amount": 20 * 80 * 100,
"description": "Zulip Cloud Standard - renewal",
"plan": None,
"quantity": 20,
"subscription": None,
"discountable": False,
"period": {
"start": datetime_to_timestamp(self.next_month),
"end": datetime_to_timestamp(add_months(self.next_month, 12)),
},
}
for key, value in annual_plan_invoice_item_params.items():
self.assertEqual(invoice_item1[key], value)
[monthly_plan_invoice_item] = invoice1.get("lines")
monthly_plan_invoice_item_params = {
"amount": 14 * 8 * 100,
"description": "Additional license (Jan 2, 2012 - Feb 2, 2012)",
"plan": None,
"quantity": 14,
"subscription": None,
"discountable": False,
"period": {
"start": datetime_to_timestamp(self.now),
"end": datetime_to_timestamp(self.next_month),
},
}
for key, value in monthly_plan_invoice_item_params.items():
self.assertEqual(monthly_plan_invoice_item[key], value)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=30):
update_license_ledger_if_needed(user.realm, add_months(self.next_month, 1))
invoice_plans_as_needed(add_months(self.next_month, 1))
[invoice0, invoice1, invoice2, invoice3] = stripe.Invoice.list(
customer=customer.stripe_customer_id
)
[monthly_plan_invoice_item] = invoice0.get("lines")
monthly_plan_invoice_item_params = {
"amount": 5 * 7366,
"description": "Additional license (Mar 2, 2012 - Feb 2, 2013)",
"plan": None,
"quantity": 5,
"subscription": None,
"discountable": False,
"period": {
"start": datetime_to_timestamp(add_months(self.next_month, 1)),
"end": datetime_to_timestamp(add_months(self.next_month, 12)),
},
}
for key, value in monthly_plan_invoice_item_params.items():
self.assertEqual(monthly_plan_invoice_item[key], value)
invoice_plans_as_needed(add_months(self.now, 13))
[invoice0, invoice1, invoice2, invoice3, invoice4] = stripe.Invoice.list(
customer=customer.stripe_customer_id
)
[invoice_item] = invoice0.get("lines")
annual_plan_invoice_item_params = {
"amount": 30 * 80 * 100,
"description": "Zulip Cloud Standard - renewal",
"plan": None,
"quantity": 30,
"subscription": None,
"discountable": False,
"period": {
"start": datetime_to_timestamp(add_months(self.next_month, 12)),
"end": datetime_to_timestamp(add_months(self.next_month, 24)),
},
}
for key, value in annual_plan_invoice_item_params.items():
self.assertEqual(invoice_item[key], value)
@mock_stripe()
def test_switch_from_monthly_plan_to_annual_plan_for_manual_license_management(
self, *mocks: Mock
) -> None:
user = self.example_user("hamlet")
num_licenses = 35
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.upgrade(schedule="monthly", license_management="manual", licenses=num_licenses)
monthly_plan = get_current_plan_by_realm(user.realm)
assert monthly_plan is not None
self.assertEqual(monthly_plan.automanage_licenses, False)
self.assertEqual(monthly_plan.billing_schedule, CustomerPlan.MONTHLY)
stripe_customer_id = Customer.objects.get(realm=user.realm).id
new_plan = get_current_plan_by_realm(user.realm)
assert new_plan is not None
with self.assertLogs("corporate.stripe", "INFO") as m:
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_patch(
"/json/billing/plan",
{"status": CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE},
)
self.assertEqual(
m.output[0],
f"INFO:corporate.stripe:Change plan status: Customer.id: {stripe_customer_id}, CustomerPlan.id: {new_plan.id}, status: {CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE}",
)
self.assert_json_success(response)
monthly_plan.refresh_from_db()
self.assertEqual(monthly_plan.status, CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_get("/billing/")
self.assert_in_success_response(
["be switched from monthly to annual billing on <strong>February 2, 2012"], response
)
invoice_plans_as_needed(self.next_month)
self.assertEqual(LicenseLedger.objects.filter(plan=monthly_plan).count(), 1)
customer = get_customer_by_realm(user.realm)
assert customer is not None
self.assertEqual(CustomerPlan.objects.filter(customer=customer).count(), 2)
monthly_plan.refresh_from_db()
self.assertEqual(monthly_plan.status, CustomerPlan.ENDED)
self.assertEqual(monthly_plan.next_invoice_date, None)
annual_plan = get_current_plan_by_realm(user.realm)
assert annual_plan is not None
self.assertEqual(annual_plan.status, CustomerPlan.ACTIVE)
self.assertEqual(annual_plan.billing_schedule, CustomerPlan.ANNUAL)
self.assertEqual(annual_plan.invoicing_status, CustomerPlan.INITIAL_INVOICE_TO_BE_SENT)
self.assertEqual(annual_plan.billing_cycle_anchor, self.next_month)
self.assertEqual(annual_plan.next_invoice_date, self.next_month)
annual_ledger_entries = LicenseLedger.objects.filter(plan=annual_plan).order_by("id")
self.assert_length(annual_ledger_entries, 1)
self.assertEqual(annual_ledger_entries[0].is_renewal, True)
self.assertEqual(
annual_ledger_entries.values_list("licenses", "licenses_at_next_renewal")[0],
(num_licenses, num_licenses),
)
self.assertEqual(annual_plan.invoiced_through, None)
# First call of invoice_plans_as_needed creates the new plan. Second call
# calls invoice_plan on the newly created plan.
invoice_plans_as_needed(self.next_month + timedelta(days=1))
annual_plan.refresh_from_db()
self.assertEqual(annual_plan.invoiced_through, annual_ledger_entries[0])
self.assertEqual(annual_plan.next_invoice_date, add_months(self.next_month, 12))
self.assertEqual(annual_plan.invoicing_status, CustomerPlan.DONE)
assert customer.stripe_customer_id
[invoice0, invoice1] = stripe.Invoice.list(customer=customer.stripe_customer_id)
[invoice_item] = invoice0.get("lines")
annual_plan_invoice_item_params = {
"amount": num_licenses * 80 * 100,
"description": "Zulip Cloud Standard - renewal",
"plan": None,
"quantity": num_licenses,
"subscription": None,
"discountable": False,
"period": {
"start": datetime_to_timestamp(self.next_month),
"end": datetime_to_timestamp(add_months(self.next_month, 12)),
},
}
for key, value in annual_plan_invoice_item_params.items():
self.assertEqual(invoice_item[key], value)
with patch("corporate.lib.stripe.invoice_plan") as m:
invoice_plans_as_needed(add_months(self.now, 2))
m.assert_not_called()
invoice_plans_as_needed(add_months(self.now, 13))
[invoice0, invoice1, invoice2] = stripe.Invoice.list(customer=customer.stripe_customer_id)
[invoice_item] = invoice0.get("lines")
annual_plan_invoice_item_params = {
"amount": num_licenses * 80 * 100,
"description": "Zulip Cloud Standard - renewal",
"plan": None,
"quantity": num_licenses,
"subscription": None,
"discountable": False,
"period": {
"start": datetime_to_timestamp(add_months(self.next_month, 12)),
"end": datetime_to_timestamp(add_months(self.next_month, 24)),
},
}
for key, value in annual_plan_invoice_item_params.items():
self.assertEqual(invoice_item[key], value)
def test_reupgrade_after_plan_status_changed_to_downgrade_at_end_of_cycle(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
with self.assertLogs("corporate.stripe", "INFO") as m:
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_patch(
"/json/billing/plan", {"status": CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE}
)
stripe_customer_id = Customer.objects.get(realm=user.realm).id
new_plan = get_current_plan_by_realm(user.realm)
assert new_plan is not None
expected_log = f"INFO:corporate.stripe:Change plan status: Customer.id: {stripe_customer_id}, CustomerPlan.id: {new_plan.id}, status: {CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE}"
self.assertEqual(m.output[0], expected_log)
self.assert_json_success(response)
plan = CustomerPlan.objects.first()
assert plan is not None
self.assertEqual(plan.status, CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE)
with self.assertLogs("corporate.stripe", "INFO") as m:
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_patch("/json/billing/plan", {"status": CustomerPlan.ACTIVE})
expected_log = f"INFO:corporate.stripe:Change plan status: Customer.id: {stripe_customer_id}, CustomerPlan.id: {new_plan.id}, status: {CustomerPlan.ACTIVE}"
self.assertEqual(m.output[0], expected_log)
self.assert_json_success(response)
plan = CustomerPlan.objects.first()
assert plan is not None
self.assertEqual(plan.status, CustomerPlan.ACTIVE)
@patch("stripe.Invoice.create")
@patch("stripe.Invoice.finalize_invoice")
@patch("stripe.InvoiceItem.create")
def test_downgrade_during_invoicing(self, *mocks: Mock) -> None:
# The difference between this test and test_downgrade is that
# CustomerPlan.status is DOWNGRADE_AT_END_OF_CYCLE rather than ENDED
# when we call invoice_plans_as_needed
# This test is essentially checking that we call make_end_of_cycle_updates_if_needed
# during the invoicing process.
user = self.example_user("hamlet")
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
with self.assertLogs("corporate.stripe", "INFO") as m:
stripe_customer_id = Customer.objects.get(realm=user.realm).id
new_plan = get_current_plan_by_realm(user.realm)
assert new_plan is not None
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
self.client_patch(
"/json/billing/plan", {"status": CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE}
)
expected_log = f"INFO:corporate.stripe:Change plan status: Customer.id: {stripe_customer_id}, CustomerPlan.id: {new_plan.id}, status: {CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE}"
self.assertEqual(m.output[0], expected_log)
plan = CustomerPlan.objects.first()
assert plan is not None
self.assertIsNotNone(plan.next_invoice_date)
self.assertEqual(plan.status, CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE)
invoice_plans_as_needed(self.next_year)
plan = CustomerPlan.objects.first()
assert plan is not None
self.assertIsNone(plan.next_invoice_date)
self.assertEqual(plan.status, CustomerPlan.ENDED)
def test_downgrade_free_trial(self) -> None:
user = self.example_user("hamlet")
free_trial_end_date = self.now + timedelta(days=60)
with self.settings(FREE_TRIAL_DAYS=60):
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, True)
plan = CustomerPlan.objects.get()
self.assertEqual(plan.next_invoice_date, free_trial_end_date)
self.assertEqual(get_realm("zulip").plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertEqual(plan.status, CustomerPlan.FREE_TRIAL)
# Add some extra users before the realm is deactivated
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=21):
update_license_ledger_if_needed(user.realm, self.now)
last_ledger_entry = LicenseLedger.objects.order_by("id").last()
assert last_ledger_entry is not None
self.assertEqual(last_ledger_entry.licenses, 21)
self.assertEqual(last_ledger_entry.licenses_at_next_renewal, 21)
self.login_user(user)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
self.client_patch("/json/billing/plan", {"status": CustomerPlan.ENDED})
plan.refresh_from_db()
self.assertEqual(get_realm("zulip").plan_type, Realm.PLAN_TYPE_LIMITED)
self.assertEqual(plan.status, CustomerPlan.ENDED)
self.assertEqual(plan.invoiced_through, last_ledger_entry)
self.assertIsNone(plan.next_invoice_date)
self.login_user(user)
response = self.client_get("/billing/")
self.assert_in_success_response(
["Your organization is on the <b>Zulip Free</b>"], response
)
# The extra users added in the final month are not charged
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_month)
mocked.assert_not_called()
# The plan is not renewed after an year
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_year)
mocked.assert_not_called()
def test_reupgrade_by_billing_admin_after_downgrade(self) -> None:
user = self.example_user("hamlet")
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
self.login_user(user)
with self.assertLogs("corporate.stripe", "INFO") as m:
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
self.client_patch(
"/json/billing/plan", {"status": CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE}
)
stripe_customer_id = Customer.objects.get(realm=user.realm).id
new_plan = get_current_plan_by_realm(user.realm)
assert new_plan is not None
expected_log = f"INFO:corporate.stripe:Change plan status: Customer.id: {stripe_customer_id}, CustomerPlan.id: {new_plan.id}, status: {CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE}"
self.assertEqual(m.output[0], expected_log)
with self.assertRaises(BillingError) as context, self.assertLogs(
"corporate.stripe", "WARNING"
) as m:
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
self.assertEqual(
m.output[0],
"WARNING:corporate.stripe:Upgrade of zulip failed because of existing active plan.",
)
self.assertEqual(
context.exception.error_description, "subscribing with existing subscription"
)
invoice_plans_as_needed(self.next_year)
response = self.client_get("/billing/")
self.assert_in_success_response(["Your organization is on the <b>Zulip Free</b>"], response)
with patch("corporate.lib.stripe.timezone_now", return_value=self.next_year):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
self.assertEqual(Customer.objects.count(), 1)
self.assertEqual(CustomerPlan.objects.count(), 2)
current_plan = CustomerPlan.objects.all().order_by("id").last()
assert current_plan is not None
next_invoice_date = add_months(self.next_year, 1)
self.assertEqual(current_plan.next_invoice_date, next_invoice_date)
self.assertEqual(get_realm("zulip").plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertEqual(current_plan.status, CustomerPlan.ACTIVE)
old_plan = CustomerPlan.objects.all().order_by("id").first()
assert old_plan is not None
self.assertEqual(old_plan.next_invoice_date, None)
self.assertEqual(old_plan.status, CustomerPlan.ENDED)
@mock_stripe()
def test_update_licenses_of_manual_plan_from_billing_page(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.upgrade(invoice=True, licenses=100)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
result = self.client_patch("/json/billing/plan", {"licenses": 100})
self.assert_json_error_contains(
result, "Your plan is already on 100 licenses in the current billing period."
)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
result = self.client_patch("/json/billing/plan", {"licenses_at_next_renewal": 100})
self.assert_json_error_contains(
result, "Your plan is already scheduled to renew with 100 licenses."
)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
result = self.client_patch("/json/billing/plan", {"licenses": 50})
self.assert_json_error_contains(
result, "You cannot decrease the licenses in the current billing period."
)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
result = self.client_patch("/json/billing/plan", {"licenses_at_next_renewal": 25})
self.assert_json_error_contains(result, "You must invoice for at least 30 users.")
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
result = self.client_patch("/json/billing/plan", {"licenses": 2000})
self.assert_json_error_contains(
result, "Invoices with more than 1000 licenses can't be processed from this page."
)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
result = self.client_patch("/json/billing/plan", {"licenses": 150})
self.assert_json_success(result)
invoice_plans_as_needed(self.next_year)
stripe_customer = stripe_get_customer(
assert_is_not_none(Customer.objects.get(realm=user.realm).stripe_customer_id)
)
[invoice, _] = stripe.Invoice.list(customer=stripe_customer.id)
invoice_params = {
"amount_due": (8000 * 150 + 8000 * 50),
"amount_paid": 0,
"attempt_count": 0,
"auto_advance": True,
"collection_method": "send_invoice",
"statement_descriptor": "Zulip Cloud Standard",
"status": "open",
"total": (8000 * 150 + 8000 * 50),
}
for key, value in invoice_params.items():
self.assertEqual(invoice.get(key), value)
[renewal_item, extra_license_item] = invoice.lines
line_item_params = {
"amount": 8000 * 150,
"description": "Zulip Cloud Standard - renewal",
"discountable": False,
"period": {
"end": datetime_to_timestamp(self.next_year + timedelta(days=365)),
"start": datetime_to_timestamp(self.next_year),
},
"plan": None,
"proration": False,
"quantity": 150,
}
for key, value in line_item_params.items():
self.assertEqual(renewal_item.get(key), value)
line_item_params = {
"amount": 8000 * 50,
"description": "Additional license (Jan 2, 2012 - Jan 2, 2013)",
"discountable": False,
"period": {
"end": datetime_to_timestamp(self.next_year),
"start": datetime_to_timestamp(self.now),
},
"plan": None,
"proration": False,
"quantity": 50,
}
for key, value in line_item_params.items():
self.assertEqual(extra_license_item.get(key), value)
with patch("corporate.views.billing_page.timezone_now", return_value=self.next_year):
result = self.client_patch("/json/billing/plan", {"licenses_at_next_renewal": 120})
self.assert_json_success(result)
invoice_plans_as_needed(self.next_year + timedelta(days=365))
stripe_customer = stripe_get_customer(
assert_is_not_none(Customer.objects.get(realm=user.realm).stripe_customer_id)
)
[invoice, _, _] = stripe.Invoice.list(customer=stripe_customer.id)
invoice_params = {
"amount_due": 8000 * 120,
"amount_paid": 0,
"attempt_count": 0,
"auto_advance": True,
"collection_method": "send_invoice",
"statement_descriptor": "Zulip Cloud Standard",
"status": "open",
"total": 8000 * 120,
}
for key, value in invoice_params.items():
self.assertEqual(invoice.get(key), value)
[renewal_item] = invoice.lines
line_item_params = {
"amount": 8000 * 120,
"description": "Zulip Cloud Standard - renewal",
"discountable": False,
"period": {
"end": datetime_to_timestamp(self.next_year + timedelta(days=2 * 365)),
"start": datetime_to_timestamp(self.next_year + timedelta(days=365)),
},
"plan": None,
"proration": False,
"quantity": 120,
}
for key, value in line_item_params.items():
self.assertEqual(renewal_item.get(key), value)
def test_update_licenses_of_automatic_plan_from_billing_page(self) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
result = self.client_patch("/json/billing/plan", {"licenses": 100})
self.assert_json_error_contains(result, "Your plan is on automatic license management.")
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
result = self.client_patch("/json/billing/plan", {"licenses_at_next_renewal": 100})
self.assert_json_error_contains(result, "Your plan is on automatic license management.")
def test_update_plan_with_invalid_status(self) -> None:
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
self.login_user(self.example_user("hamlet"))
response = self.client_patch(
"/json/billing/plan",
{"status": CustomerPlan.NEVER_STARTED},
)
self.assert_json_error_contains(response, "Invalid status")
def test_update_plan_without_any_params(self) -> None:
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
self.login_user(self.example_user("hamlet"))
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
response = self.client_patch("/json/billing/plan", {})
self.assert_json_error_contains(response, "Nothing to change")
def test_update_plan_that_which_is_due_for_expiry(self) -> None:
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
self.login_user(self.example_user("hamlet"))
with self.assertLogs("corporate.stripe", "INFO") as m:
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
result = self.client_patch(
"/json/billing/plan", {"status": CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE}
)
self.assert_json_success(result)
self.assertRegex(
m.output[0],
r"INFO:corporate.stripe:Change plan status: Customer.id: \d*, CustomerPlan.id: \d*, status: 2",
)
with patch("corporate.lib.stripe.timezone_now", return_value=self.next_year):
result = self.client_patch("/json/billing/plan", {"status": CustomerPlan.ACTIVE})
self.assert_json_error_contains(
result, "Unable to update the plan. The plan has ended."
)
def test_update_plan_that_which_is_due_for_replacement(self) -> None:
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.MONTHLY, True, False)
self.login_user(self.example_user("hamlet"))
with self.assertLogs("corporate.stripe", "INFO") as m:
with patch("corporate.views.billing_page.timezone_now", return_value=self.now):
result = self.client_patch(
"/json/billing/plan", {"status": CustomerPlan.SWITCH_TO_ANNUAL_AT_END_OF_CYCLE}
)
self.assert_json_success(result)
self.assertRegex(
m.output[0],
r"INFO:corporate.stripe:Change plan status: Customer.id: \d*, CustomerPlan.id: \d*, status: 4",
)
with patch("corporate.lib.stripe.timezone_now", return_value=self.next_month):
result = self.client_patch("/json/billing/plan", {})
self.assert_json_error_contains(
result,
"Unable to update the plan. The plan has been expired and replaced with a new plan.",
)
@patch("corporate.lib.stripe.billing_logger.info")
def test_deactivate_realm(self, mock_: Mock) -> None:
user = self.example_user("hamlet")
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
plan = CustomerPlan.objects.get()
self.assertEqual(plan.next_invoice_date, self.next_month)
self.assertEqual(get_realm("zulip").plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertEqual(plan.status, CustomerPlan.ACTIVE)
# Add some extra users before the realm is deactivated
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=20):
update_license_ledger_if_needed(user.realm, self.now)
last_ledger_entry = LicenseLedger.objects.order_by("id").last()
assert last_ledger_entry is not None
self.assertEqual(last_ledger_entry.licenses, 20)
self.assertEqual(last_ledger_entry.licenses_at_next_renewal, 20)
do_deactivate_realm(get_realm("zulip"), acting_user=None)
plan.refresh_from_db()
self.assertTrue(get_realm("zulip").deactivated)
self.assertEqual(get_realm("zulip").plan_type, Realm.PLAN_TYPE_LIMITED)
self.assertEqual(plan.status, CustomerPlan.ENDED)
self.assertEqual(plan.invoiced_through, last_ledger_entry)
self.assertIsNone(plan.next_invoice_date)
do_reactivate_realm(get_realm("zulip"))
self.login_user(user)
response = self.client_get("/billing/")
self.assert_in_success_response(["Your organization is on the <b>Zulip Free</b>"], response)
# The extra users added in the final month are not charged
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_month)
mocked.assert_not_called()
# The plan is not renewed after an year
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_year)
mocked.assert_not_called()
def test_reupgrade_by_billing_admin_after_realm_deactivation(self) -> None:
user = self.example_user("hamlet")
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
do_deactivate_realm(get_realm("zulip"), acting_user=None)
self.assertTrue(get_realm("zulip").deactivated)
do_reactivate_realm(get_realm("zulip"))
self.login_user(user)
response = self.client_get("/billing/")
self.assert_in_success_response(["Your organization is on the <b>Zulip Free</b>"], response)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
self.assertEqual(Customer.objects.count(), 1)
self.assertEqual(CustomerPlan.objects.count(), 2)
current_plan = CustomerPlan.objects.all().order_by("id").last()
assert current_plan is not None
self.assertEqual(current_plan.next_invoice_date, self.next_month)
self.assertEqual(get_realm("zulip").plan_type, Realm.PLAN_TYPE_STANDARD)
self.assertEqual(current_plan.status, CustomerPlan.ACTIVE)
old_plan = CustomerPlan.objects.all().order_by("id").first()
assert old_plan is not None
self.assertEqual(old_plan.next_invoice_date, None)
self.assertEqual(old_plan.status, CustomerPlan.ENDED)
@mock_stripe()
def test_void_all_open_invoices(self, *mock: Mock) -> None:
iago = self.example_user("iago")
king = self.lear_user("king")
self.assertEqual(void_all_open_invoices(iago.realm), 0)
zulip_customer = update_or_create_stripe_customer(iago)
lear_customer = update_or_create_stripe_customer(king)
assert zulip_customer.stripe_customer_id
stripe.InvoiceItem.create(
currency="usd",
customer=zulip_customer.stripe_customer_id,
description="Zulip Cloud Standard upgrade",
discountable=False,
unit_amount=800,
quantity=8,
)
stripe_invoice = stripe.Invoice.create(
auto_advance=True,
collection_method="send_invoice",
customer=zulip_customer.stripe_customer_id,
days_until_due=30,
statement_descriptor="Zulip Cloud Standard",
)
stripe.Invoice.finalize_invoice(stripe_invoice)
assert lear_customer.stripe_customer_id
stripe.InvoiceItem.create(
currency="usd",
customer=lear_customer.stripe_customer_id,
description="Zulip Cloud Standard upgrade",
discountable=False,
unit_amount=800,
quantity=8,
)
stripe_invoice = stripe.Invoice.create(
auto_advance=True,
collection_method="send_invoice",
customer=lear_customer.stripe_customer_id,
days_until_due=30,
statement_descriptor="Zulip Cloud Standard",
)
stripe.Invoice.finalize_invoice(stripe_invoice)
self.assertEqual(void_all_open_invoices(iago.realm), 1)
invoices = stripe.Invoice.list(customer=zulip_customer.stripe_customer_id)
self.assert_length(invoices, 1)
for invoice in invoices:
self.assertEqual(invoice.status, "void")
lear_stripe_customer_id = lear_customer.stripe_customer_id
lear_customer.stripe_customer_id = None
lear_customer.save(update_fields=["stripe_customer_id"])
self.assertEqual(void_all_open_invoices(king.realm), 0)
lear_customer.stripe_customer_id = lear_stripe_customer_id
lear_customer.save(update_fields=["stripe_customer_id"])
self.assertEqual(void_all_open_invoices(king.realm), 1)
invoices = stripe.Invoice.list(customer=lear_customer.stripe_customer_id)
self.assert_length(invoices, 1)
for invoice in invoices:
self.assertEqual(invoice.status, "void")
def create_invoices(self, customer: Customer, num_invoices: int) -> List[stripe.Invoice]:
invoices = []
assert customer.stripe_customer_id is not None
for _ in range(num_invoices):
stripe.InvoiceItem.create(
amount=10000,
currency="usd",
customer=customer.stripe_customer_id,
description="Zulip Cloud Standard",
discountable=False,
)
invoice = stripe.Invoice.create(
auto_advance=True,
collection_method="send_invoice",
customer=customer.stripe_customer_id,
days_until_due=DEFAULT_INVOICE_DAYS_UNTIL_DUE,
statement_descriptor="Zulip Cloud Standard",
)
stripe.Invoice.finalize_invoice(invoice)
invoices.append(invoice)
return invoices
@mock_stripe()
def test_downgrade_small_realms_behind_on_payments_as_needed(self, *mock: Mock) -> None:
def create_realm(
users_to_create: int,
create_stripe_customer: bool,
create_plan: bool,
num_invoices: Optional[int] = None,
) -> Tuple[Realm, Optional[Customer], Optional[CustomerPlan], List[stripe.Invoice]]:
realm_string_id = "realm_" + str(random.randrange(1, 1000000))
realm = Realm.objects.create(string_id=realm_string_id)
users = []
for i in range(users_to_create):
user = UserProfile.objects.create(
delivery_email=f"user-{i}-{realm_string_id}@zulip.com",
email=f"user-{i}-{realm_string_id}@zulip.com",
realm=realm,
)
users.append(user)
customer = None
if create_stripe_customer:
customer = do_create_stripe_customer(users[0])
plan = None
if create_plan:
plan, _ = self.subscribe_realm_to_monthly_plan_on_manual_license_management(
realm, users_to_create, users_to_create
)
invoices = []
if num_invoices is not None:
assert customer is not None
invoices = self.create_invoices(customer, num_invoices)
return realm, customer, plan, invoices
@dataclass
class Row:
realm: Realm
expected_plan_type: int
plan: Optional[CustomerPlan]
expected_plan_status: Optional[int]
void_all_open_invoices_mock_called: bool
email_expected_to_be_sent: bool
rows: List[Row] = []
realm, _, _, _ = create_realm(
users_to_create=1, create_stripe_customer=False, create_plan=False
)
# To create local Customer object but no Stripe customer.
attach_discount_to_realm(realm, Decimal(20), acting_user=None)
rows.append(Row(realm, Realm.PLAN_TYPE_SELF_HOSTED, None, None, False, False))
realm, _, _, _ = create_realm(
users_to_create=1, create_stripe_customer=True, create_plan=False
)
rows.append(Row(realm, Realm.PLAN_TYPE_SELF_HOSTED, None, None, False, False))
realm, customer, _, _ = create_realm(
users_to_create=1, create_stripe_customer=True, create_plan=False, num_invoices=1
)
rows.append(Row(realm, Realm.PLAN_TYPE_SELF_HOSTED, None, None, True, False))
realm, _, plan, _ = create_realm(
users_to_create=1, create_stripe_customer=True, create_plan=True
)
rows.append(Row(realm, Realm.PLAN_TYPE_STANDARD, plan, CustomerPlan.ACTIVE, False, False))
realm, customer, plan, _ = create_realm(
users_to_create=1, create_stripe_customer=True, create_plan=True, num_invoices=1
)
rows.append(Row(realm, Realm.PLAN_TYPE_STANDARD, plan, CustomerPlan.ACTIVE, False, False))
realm, customer, plan, _ = create_realm(
users_to_create=3, create_stripe_customer=True, create_plan=True, num_invoices=2
)
rows.append(Row(realm, Realm.PLAN_TYPE_LIMITED, plan, CustomerPlan.ENDED, True, True))
realm, customer, plan, invoices = create_realm(
users_to_create=1, create_stripe_customer=True, create_plan=True, num_invoices=2
)
for invoice in invoices:
stripe.Invoice.pay(invoice, paid_out_of_band=True)
rows.append(Row(realm, Realm.PLAN_TYPE_STANDARD, plan, CustomerPlan.ACTIVE, False, False))
realm, customer, plan, _ = create_realm(
users_to_create=20, create_stripe_customer=True, create_plan=True, num_invoices=2
)
rows.append(Row(realm, Realm.PLAN_TYPE_STANDARD, plan, CustomerPlan.ACTIVE, False, False))
with patch("corporate.lib.stripe.void_all_open_invoices") as void_all_open_invoices_mock:
downgrade_small_realms_behind_on_payments_as_needed()
from django.core.mail import outbox
for row in rows:
row.realm.refresh_from_db()
self.assertEqual(row.realm.plan_type, row.expected_plan_type)
if row.plan is not None:
row.plan.refresh_from_db()
self.assertEqual(row.plan.status, row.expected_plan_status)
if row.void_all_open_invoices_mock_called:
void_all_open_invoices_mock.assert_any_call(row.realm)
else:
try:
void_all_open_invoices_mock.assert_any_call(row.realm)
except AssertionError:
pass
else: # nocoverage
raise AssertionError("void_all_open_invoices_mock should not be called")
email_found = False
for email in outbox:
recipient = UserProfile.objects.get(email=email.to[0])
if recipient.realm == row.realm:
self.assertIn(
f"Your organization, http://{row.realm.string_id}.testserver, has been downgraded",
outbox[0].body,
)
self.assert_length(email.to, 1)
self.assertTrue(recipient.is_billing_admin)
email_found = True
self.assertEqual(row.email_expected_to_be_sent, email_found)
@mock_stripe()
def test_switch_realm_from_standard_to_plus_plan(self, *mock: Mock) -> None:
iago = self.example_user("iago")
realm = iago.realm
# Test upgrading to Plus when realm has no Standard subscription
with self.assertRaises(BillingError) as billing_context:
switch_realm_from_standard_to_plus_plan(realm)
self.assertEqual(
"Organization does not have an active Standard plan",
billing_context.exception.error_description,
)
plan, ledger = self.subscribe_realm_to_manual_license_management_plan(
realm, 9, 9, CustomerPlan.MONTHLY
)
# Test upgrading to Plus when realm has no stripe_customer_id
with self.assertRaises(BillingError) as billing_context:
switch_realm_from_standard_to_plus_plan(realm)
self.assertEqual(
"Organization missing Stripe customer.", billing_context.exception.error_description
)
king = self.lear_user("king")
realm = king.realm
customer = update_or_create_stripe_customer(king)
plan = CustomerPlan.objects.create(
customer=customer,
automanage_licenses=True,
billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.MONTHLY,
tier=CustomerPlan.STANDARD,
)
ledger = LicenseLedger.objects.create(
plan=plan,
is_renewal=True,
event_time=timezone_now(),
licenses=9,
licenses_at_next_renewal=9,
)
realm.plan_type = Realm.PLAN_TYPE_STANDARD
realm.save(update_fields=["plan_type"])
plan.invoiced_through = ledger
plan.price_per_license = get_price_per_license(CustomerPlan.STANDARD, CustomerPlan.MONTHLY)
plan.save(update_fields=["invoiced_through", "price_per_license"])
switch_realm_from_standard_to_plus_plan(realm)
plan.refresh_from_db()
self.assertEqual(plan.status, CustomerPlan.ENDED)
plus_plan = get_current_plan_by_realm(realm)
assert plus_plan is not None
self.assertEqual(plus_plan.tier, CustomerPlan.PLUS)
self.assertEqual(LicenseLedger.objects.filter(plan=plus_plan).count(), 1)
# There are 9 licenses and the realm is on the Standard monthly plan.
# Therefore, the customer has already paid 800 * 9 = 7200 = $72 for
# the month. Once they upgrade to Plus, the new price for their 9
# licenses will be 1600 * 9 = 14400 = $144. Since the customer has
# already paid $72 for a month, -7200 = -$72 will be credited to the
# customer's balance.
stripe_customer_id = customer.stripe_customer_id
assert stripe_customer_id is not None
_, cb_txn = stripe.Customer.list_balance_transactions(stripe_customer_id)
self.assertEqual(cb_txn.amount, -7200)
self.assertEqual(
cb_txn.description,
"Credit from early termination of Standard plan",
)
self.assertEqual(cb_txn.type, "adjustment")
# The customer now only pays the difference 14400 - 7200 = 7200 = $72,
# since the unused proration is for the whole month.
(invoice,) = stripe.Invoice.list(customer=stripe_customer_id)
self.assertEqual(invoice.amount_due, 7200)
def test_update_billing_method_of_current_plan(self) -> None:
realm = get_realm("zulip")
customer = Customer.objects.create(realm=realm, stripe_customer_id="cus_12345")
plan = CustomerPlan.objects.create(
customer=customer,
status=CustomerPlan.ACTIVE,
billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.ANNUAL,
tier=CustomerPlan.STANDARD,
)
self.assertEqual(plan.charge_automatically, False)
iago = self.example_user("iago")
update_billing_method_of_current_plan(realm, True, acting_user=iago)
plan.refresh_from_db()
self.assertEqual(plan.charge_automatically, True)
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_BILLING_METHOD_CHANGED
).last()
assert realm_audit_log is not None
expected_extra_data = {"charge_automatically": plan.charge_automatically}
self.assertEqual(realm_audit_log.acting_user, iago)
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
update_billing_method_of_current_plan(realm, False, acting_user=iago)
plan.refresh_from_db()
self.assertEqual(plan.charge_automatically, False)
realm_audit_log = RealmAuditLog.objects.filter(
event_type=RealmAuditLog.REALM_BILLING_METHOD_CHANGED
).last()
assert realm_audit_log is not None
expected_extra_data = {"charge_automatically": plan.charge_automatically}
self.assertEqual(realm_audit_log.acting_user, iago)
self.assertEqual(realm_audit_log.extra_data, str(expected_extra_data))
@mock_stripe()
def test_customer_has_credit_card_as_default_payment_method(self, *mocks: Mock) -> None:
iago = self.example_user("iago")
customer = Customer.objects.create(realm=iago.realm)
self.assertFalse(customer_has_credit_card_as_default_payment_method(customer))
customer = do_create_stripe_customer(iago)
self.assertFalse(customer_has_credit_card_as_default_payment_method(customer))
customer = do_create_stripe_customer(
iago,
payment_method=create_payment_method(
self.get_test_card_number(
attaches_to_customer=True, charge_succeeds=True, card_provider="visa"
)
).id,
)
self.assertTrue(customer_has_credit_card_as_default_payment_method(customer))
class StripeWebhookEndpointTest(ZulipTestCase):
def test_stripe_webhook_with_invalid_data(self) -> None:
result = self.client_post(
"/stripe/webhook/",
'["dsdsds"]',
content_type="application/json",
)
self.assertEqual(result.status_code, 400)
def test_stripe_webhook_endpoint_invalid_api_version(self) -> None:
event_data = {
"id": "stripe_event_id",
"api_version": "1991-02-20",
"type": "event_type",
"data": {"object": {"object": "checkout.session", "id": "stripe_session_id"}},
}
expected_error_message = rf"Mismatch between billing system Stripe API version({STRIPE_API_VERSION}) and Stripe webhook event API version(1991-02-20)."
with self.assertLogs("corporate.stripe", "ERROR") as error_log:
self.client_post(
"/stripe/webhook/",
event_data,
content_type="application/json",
)
self.assertEqual(error_log.output, [f"ERROR:corporate.stripe:{expected_error_message}"])
def test_stripe_webhook_for_session_completed_event(self) -> None:
valid_session_event_data = {
"id": "stripe_event_id",
"api_version": STRIPE_API_VERSION,
"type": "checkout.session.completed",
"data": {"object": {"object": "checkout.session", "id": "stripe_session_id"}},
}
with patch("corporate.views.webhook.handle_checkout_session_completed_event") as m:
result = self.client_post(
"/stripe/webhook/",
valid_session_event_data,
content_type="application/json",
)
self.assert_length(Event.objects.all(), 0)
self.assertEqual(result.status_code, 200)
m.assert_not_called()
customer = Customer.objects.create(realm=get_realm("zulip"))
Session.objects.create(
stripe_session_id="stripe_session_id",
customer=customer,
type=Session.UPGRADE_FROM_BILLING_PAGE,
)
self.assert_length(Event.objects.all(), 0)
with patch("corporate.views.webhook.handle_checkout_session_completed_event") as m:
result = self.client_post(
"/stripe/webhook/",
valid_session_event_data,
content_type="application/json",
)
[event] = Event.objects.all()
self.assertEqual(result.status_code, 200)
strip_event = stripe.Event.construct_from(valid_session_event_data, stripe.api_key)
m.assert_called_once_with(strip_event.data.object, event)
with patch("corporate.views.webhook.handle_checkout_session_completed_event") as m:
result = self.client_post(
"/stripe/webhook/",
valid_session_event_data,
content_type="application/json",
)
self.assert_length(Event.objects.all(), 1)
self.assertEqual(result.status_code, 200)
m.assert_not_called()
def test_stripe_webhook_for_payment_intent_events(self) -> None:
customer = Customer.objects.create(realm=get_realm("zulip"))
for index, event_type in enumerate(
["payment_intent.succeeded", "payment_intent.payment_failed"]
):
handler_function_name = "handle_" + event_type.replace(".", "_") + "_event"
handler_function_path = f"corporate.views.webhook.{handler_function_name}"
stripe_event_id = f"stripe_event_id_{index}"
stripe_payment_intent_id = f"stripe_payment_intent_id{index}"
valid_session_event_data = {
"id": stripe_event_id,
"type": event_type,
"api_version": STRIPE_API_VERSION,
"data": {"object": {"object": "payment_intent", "id": stripe_payment_intent_id}},
}
with patch(handler_function_path) as m:
result = self.client_post(
"/stripe/webhook/",
valid_session_event_data,
content_type="application/json",
)
self.assert_length(Event.objects.filter(stripe_event_id=stripe_event_id), 0)
self.assertEqual(result.status_code, 200)
m.assert_not_called()
PaymentIntent.objects.create(
stripe_payment_intent_id=stripe_payment_intent_id,
customer=customer,
status=PaymentIntent.REQUIRES_PAYMENT_METHOD,
)
self.assert_length(Event.objects.filter(stripe_event_id=stripe_event_id), 0)
with patch(handler_function_path) as m:
result = self.client_post(
"/stripe/webhook/",
valid_session_event_data,
content_type="application/json",
)
[event] = Event.objects.filter(stripe_event_id=stripe_event_id)
self.assertEqual(result.status_code, 200)
strip_event = stripe.Event.construct_from(valid_session_event_data, stripe.api_key)
m.assert_called_once_with(strip_event.data.object, event)
with patch(handler_function_path) as m:
result = self.client_post(
"/stripe/webhook/",
valid_session_event_data,
content_type="application/json",
)
self.assert_length(Event.objects.filter(stripe_event_id=stripe_event_id), 1)
self.assertEqual(result.status_code, 200)
m.assert_not_called()
class EventStatusTest(StripeTestCase):
def test_event_status_json_endpoint_errors(self) -> None:
self.login_user(self.example_user("iago"))
response = self.client_get("/json/billing/event/status")
self.assert_json_error_contains(response, "No customer for this organization!")
Customer.objects.create(realm=get_realm("zulip"), stripe_customer_id="cus_123")
response = self.client_get(
"/json/billing/event/status", {"stripe_session_id": "invalid_session_id"}
)
self.assert_json_error_contains(response, "Session not found")
response = self.client_get(
"/json/billing/event/status", {"stripe_payment_intent_id": "invalid_payment_intent_id"}
)
self.assert_json_error_contains(response, "Payment intent not found")
response = self.client_get(
"/json/billing/event/status",
)
self.assert_json_error_contains(
response, "Pass stripe_session_id or stripe_payment_intent_id"
)
def test_event_status_page(self) -> None:
self.login_user(self.example_user("polonius"))
stripe_session_id = "cs_test_9QCz62mPTJQUwvhcwZHBpJMHmMZiLU512AQHU9g5znkx6NweU3j7kJvY"
response = self.client_get(
"/billing/event_status/", {"stripe_session_id": stripe_session_id}
)
self.assert_in_success_response([f'data-stripe-session-id="{stripe_session_id}"'], response)
stripe_payment_intent_id = "pi_1JGLpnA4KHR4JzRvUfkF9Tn7"
response = self.client_get(
"/billing/event_status/", {"stripe_payment_intent_id": stripe_payment_intent_id}
)
self.assert_in_success_response(
[f'data-stripe-payment-intent-id="{stripe_payment_intent_id}"'], response
)
class RequiresBillingAccessTest(StripeTestCase):
def setUp(self, *mocks: Mock) -> None:
super().setUp()
hamlet = self.example_user("hamlet")
hamlet.is_billing_admin = True
hamlet.save(update_fields=["is_billing_admin"])
desdemona = self.example_user("desdemona")
desdemona.role = UserProfile.ROLE_REALM_OWNER
desdemona.save(update_fields=["role"])
def test_json_endpoints_permissions(self) -> None:
guest = self.example_user("polonius")
member = self.example_user("othello")
realm_admin = self.example_user("iago")
billing_admin = self.example_user("hamlet")
billing_admin.is_billing_admin = True
billing_admin.save(update_fields=["is_billing_admin"])
tested_endpoints = set()
def check_users_cant_access(
users: List[UserProfile],
error_message: str,
url: str,
method: str,
data: Dict[str, Any],
) -> None:
tested_endpoints.add(url)
for user in users:
self.login_user(user)
if method == "POST":
client_func: Any = self.client_post
elif method == "GET":
client_func = self.client_get
else:
client_func = self.client_patch
result = client_func(
url,
data,
content_type="application/json",
)
self.assert_json_error_contains(result, error_message)
check_users_cant_access(
[guest],
"Must be an organization member",
"/json/billing/upgrade",
"POST",
{},
)
check_users_cant_access(
[guest],
"Must be an organization member",
"/json/billing/sponsorship",
"POST",
{},
)
check_users_cant_access(
[guest, member, realm_admin],
"Must be a billing administrator or an organization owner",
"/json/billing/plan",
"PATCH",
{},
)
check_users_cant_access(
[guest, member, realm_admin],
"Must be a billing administrator or an organization owner",
"/json/billing/session/start_card_update_session",
"POST",
{},
)
check_users_cant_access(
[guest],
"Must be an organization member",
"/json/billing/session/start_retry_payment_intent_session",
"POST",
{},
)
check_users_cant_access(
[guest],
"Must be an organization member",
"/json/billing/event/status",
"GET",
{},
)
# Make sure that we are testing all the JSON endpoints
# Quite a hack, but probably fine for now
reverse_dict = get_resolver("corporate.urls").reverse_dict
json_endpoints = {
pat
for name in reverse_dict
for matches, pat, defaults, converters in reverse_dict.getlist(name)
if pat.startswith(re.escape("json/"))
}
self.assert_length(json_endpoints, len(tested_endpoints))
@mock_stripe()
def test_billing_page_permissions(self, *mocks: Mock) -> None:
# Guest users can't access /upgrade page
self.login_user(self.example_user("polonius"))
response = self.client_get("/upgrade/", follow=True)
self.assertEqual(response.status_code, 404)
# Check that non-admins can access /upgrade via /billing, when there is no Customer object
self.login_user(self.example_user("hamlet"))
response = self.client_get("/billing/")
self.assertEqual(response.status_code, 302)
self.assertEqual("/upgrade/", response["Location"])
# Check that non-admins can sign up and pay
self.upgrade()
# Check that the non-admin hamlet can still access /billing
response = self.client_get("/billing/")
self.assert_in_success_response(["Your current plan is"], response)
# Check realm owners can access billing, even though they are not a billing admin
desdemona = self.example_user("desdemona")
desdemona.role = UserProfile.ROLE_REALM_OWNER
desdemona.save(update_fields=["role"])
self.login_user(self.example_user("desdemona"))
response = self.client_get("/billing/")
self.assert_in_success_response(["Your current plan is"], response)
# Check that member who is not a billing admin does not have access
self.login_user(self.example_user("cordelia"))
response = self.client_get("/billing/")
self.assert_in_success_response(
["You must be an organization owner or a billing administrator"], response
)
class BillingHelpersTest(ZulipTestCase):
def test_next_month(self) -> None:
anchor = datetime(2019, 12, 31, 1, 2, 3, tzinfo=timezone.utc)
period_boundaries = [
anchor,
datetime(2020, 1, 31, 1, 2, 3, tzinfo=timezone.utc),
# Test that this is the 28th even during leap years
datetime(2020, 2, 28, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 3, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 4, 30, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 5, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 6, 30, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 7, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 8, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 9, 30, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 10, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 11, 30, 1, 2, 3, tzinfo=timezone.utc),
datetime(2020, 12, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2021, 1, 31, 1, 2, 3, tzinfo=timezone.utc),
datetime(2021, 2, 28, 1, 2, 3, tzinfo=timezone.utc),
]
with self.assertRaises(AssertionError):
add_months(anchor, -1)
# Explicitly test add_months for each value of MAX_DAY_FOR_MONTH and
# for crossing a year boundary
for i, boundary in enumerate(period_boundaries):
self.assertEqual(add_months(anchor, i), boundary)
# Test next_month for small values
for last, next_ in zip(period_boundaries[:-1], period_boundaries[1:]):
self.assertEqual(next_month(anchor, last), next_)
# Test next_month for large values
period_boundaries = [dt.replace(year=dt.year + 100) for dt in period_boundaries]
for last, next_ in zip(period_boundaries[:-1], period_boundaries[1:]):
self.assertEqual(next_month(anchor, last), next_)
def test_compute_plan_parameters(self) -> None:
# TODO: test rounding down microseconds
anchor = datetime(2019, 12, 31, 1, 2, 3, tzinfo=timezone.utc)
month_later = datetime(2020, 1, 31, 1, 2, 3, tzinfo=timezone.utc)
year_later = datetime(2020, 12, 31, 1, 2, 3, tzinfo=timezone.utc)
test_cases = [
# test all possibilities, since there aren't that many
(
(CustomerPlan.STANDARD, True, CustomerPlan.ANNUAL, None),
(anchor, month_later, year_later, 8000),
),
(
(CustomerPlan.STANDARD, True, CustomerPlan.ANNUAL, 85),
(anchor, month_later, year_later, 1200),
),
(
(CustomerPlan.STANDARD, True, CustomerPlan.MONTHLY, None),
(anchor, month_later, month_later, 800),
),
(
(CustomerPlan.STANDARD, True, CustomerPlan.MONTHLY, 85),
(anchor, month_later, month_later, 120),
),
(
(CustomerPlan.STANDARD, False, CustomerPlan.ANNUAL, None),
(anchor, year_later, year_later, 8000),
),
(
(CustomerPlan.STANDARD, False, CustomerPlan.ANNUAL, 85),
(anchor, year_later, year_later, 1200),
),
(
(CustomerPlan.STANDARD, False, CustomerPlan.MONTHLY, None),
(anchor, month_later, month_later, 800),
),
(
(CustomerPlan.STANDARD, False, CustomerPlan.MONTHLY, 85),
(anchor, month_later, month_later, 120),
),
# test exact math of Decimals; 800 * (1 - 87.25) = 101.9999999..
(
(CustomerPlan.STANDARD, False, CustomerPlan.MONTHLY, 87.25),
(anchor, month_later, month_later, 102),
),
# test dropping of fractional cents; without the int it's 102.8
(
(CustomerPlan.STANDARD, False, CustomerPlan.MONTHLY, 87.15),
(anchor, month_later, month_later, 102),
),
]
with patch("corporate.lib.stripe.timezone_now", return_value=anchor):
for (tier, automanage_licenses, billing_schedule, discount), output in test_cases:
output_ = compute_plan_parameters(
tier,
automanage_licenses,
billing_schedule,
None if discount is None else Decimal(discount),
)
self.assertEqual(output_, output)
def test_get_price_per_license(self) -> None:
self.assertEqual(get_price_per_license(CustomerPlan.STANDARD, CustomerPlan.ANNUAL), 8000)
self.assertEqual(get_price_per_license(CustomerPlan.STANDARD, CustomerPlan.MONTHLY), 800)
self.assertEqual(
get_price_per_license(
CustomerPlan.STANDARD, CustomerPlan.MONTHLY, discount=Decimal(50)
),
400,
)
self.assertEqual(get_price_per_license(CustomerPlan.PLUS, CustomerPlan.ANNUAL), 16000)
self.assertEqual(get_price_per_license(CustomerPlan.PLUS, CustomerPlan.MONTHLY), 1600)
self.assertEqual(
get_price_per_license(CustomerPlan.PLUS, CustomerPlan.MONTHLY, discount=Decimal(50)),
800,
)
with self.assertRaisesRegex(InvalidBillingSchedule, "Unknown billing_schedule: 1000"):
get_price_per_license(CustomerPlan.STANDARD, 1000)
with self.assertRaisesRegex(InvalidTier, "Unknown tier: 10"):
get_price_per_license(CustomerPlan.ENTERPRISE, CustomerPlan.ANNUAL)
def test_get_plan_renewal_or_end_date(self) -> None:
realm = get_realm("zulip")
customer = Customer.objects.create(realm=realm, stripe_customer_id="cus_12345")
billing_cycle_anchor = timezone_now()
plan = CustomerPlan.objects.create(
customer=customer,
status=CustomerPlan.ACTIVE,
billing_cycle_anchor=billing_cycle_anchor,
billing_schedule=CustomerPlan.MONTHLY,
tier=CustomerPlan.STANDARD,
)
renewal_date = get_plan_renewal_or_end_date(plan, billing_cycle_anchor)
self.assertEqual(renewal_date, add_months(billing_cycle_anchor, 1))
# When the plan ends 2 days before the start of the next billing cycle,
# the function should return the end_date.
plan_end_date = add_months(billing_cycle_anchor, 1) - timedelta(days=2)
plan.end_date = plan_end_date
plan.save(update_fields=["end_date"])
renewal_date = get_plan_renewal_or_end_date(plan, billing_cycle_anchor)
self.assertEqual(renewal_date, plan_end_date)
def test_update_or_create_stripe_customer_logic(self) -> None:
user = self.example_user("hamlet")
# No existing Customer object
with patch(
"corporate.lib.stripe.do_create_stripe_customer", return_value="returned"
) as mocked1:
returned = update_or_create_stripe_customer(user, payment_method="payment_method_id")
mocked1.assert_called_once()
self.assertEqual(returned, "returned")
customer = Customer.objects.create(realm=get_realm("zulip"))
# Customer exists but stripe_customer_id is None
with patch(
"corporate.lib.stripe.do_create_stripe_customer", return_value="returned"
) as mocked2:
returned = update_or_create_stripe_customer(user, payment_method="payment_method_id")
mocked2.assert_called_once()
self.assertEqual(returned, "returned")
customer.stripe_customer_id = "cus_12345"
customer.save()
# Customer exists, replace payment source
with patch("corporate.lib.stripe.do_replace_payment_method") as mocked3:
returned_customer = update_or_create_stripe_customer(
self.example_user("hamlet"), "token"
)
mocked3.assert_called_once()
self.assertEqual(returned_customer, customer)
# Customer exists, do nothing
with patch("corporate.lib.stripe.do_replace_payment_method") as mocked4:
returned_customer = update_or_create_stripe_customer(self.example_user("hamlet"), None)
mocked4.assert_not_called()
self.assertEqual(returned_customer, customer)
def test_get_customer_by_realm(self) -> None:
realm = get_realm("zulip")
self.assertEqual(get_customer_by_realm(realm), None)
customer = Customer.objects.create(realm=realm, stripe_customer_id="cus_12345")
self.assertEqual(get_customer_by_realm(realm), customer)
def test_get_current_plan_by_customer(self) -> None:
realm = get_realm("zulip")
customer = Customer.objects.create(realm=realm, stripe_customer_id="cus_12345")
self.assertEqual(get_current_plan_by_customer(customer), None)
plan = CustomerPlan.objects.create(
customer=customer,
status=CustomerPlan.ACTIVE,
billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.ANNUAL,
tier=CustomerPlan.STANDARD,
)
self.assertEqual(get_current_plan_by_customer(customer), plan)
plan.status = CustomerPlan.DOWNGRADE_AT_END_OF_CYCLE
plan.save(update_fields=["status"])
self.assertEqual(get_current_plan_by_customer(customer), plan)
plan.status = CustomerPlan.ENDED
plan.save(update_fields=["status"])
self.assertEqual(get_current_plan_by_customer(customer), None)
plan.status = CustomerPlan.NEVER_STARTED
plan.save(update_fields=["status"])
self.assertEqual(get_current_plan_by_customer(customer), None)
def test_get_current_plan_by_realm(self) -> None:
realm = get_realm("zulip")
self.assertEqual(get_current_plan_by_realm(realm), None)
customer = Customer.objects.create(realm=realm, stripe_customer_id="cus_12345")
self.assertEqual(get_current_plan_by_realm(realm), None)
plan = CustomerPlan.objects.create(
customer=customer,
status=CustomerPlan.ACTIVE,
billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.ANNUAL,
tier=CustomerPlan.STANDARD,
)
self.assertEqual(get_current_plan_by_realm(realm), plan)
def test_get_realms_to_default_discount_dict(self) -> None:
Customer.objects.create(realm=get_realm("zulip"), stripe_customer_id="cus_1")
lear_customer = Customer.objects.create(realm=get_realm("lear"), stripe_customer_id="cus_2")
lear_customer.default_discount = Decimal(30)
lear_customer.save(update_fields=["default_discount"])
zephyr_customer = Customer.objects.create(
realm=get_realm("zephyr"), stripe_customer_id="cus_3"
)
zephyr_customer.default_discount = Decimal(0)
zephyr_customer.save(update_fields=["default_discount"])
self.assertEqual(
get_realms_to_default_discount_dict(),
{
"lear": Decimal("30.0000"),
},
)
def test_is_realm_on_free_trial(self) -> None:
realm = get_realm("zulip")
self.assertFalse(is_realm_on_free_trial(realm))
customer = Customer.objects.create(realm=realm, stripe_customer_id="cus_12345")
plan = CustomerPlan.objects.create(
customer=customer,
status=CustomerPlan.ACTIVE,
billing_cycle_anchor=timezone_now(),
billing_schedule=CustomerPlan.ANNUAL,
tier=CustomerPlan.STANDARD,
)
self.assertFalse(is_realm_on_free_trial(realm))
plan.status = CustomerPlan.FREE_TRIAL
plan.save(update_fields=["status"])
self.assertTrue(is_realm_on_free_trial(realm))
def test_is_sponsored_realm(self) -> None:
realm = get_realm("zulip")
self.assertFalse(is_sponsored_realm(realm))
realm.plan_type = Realm.PLAN_TYPE_STANDARD_FREE
realm.save()
self.assertTrue(is_sponsored_realm(realm))
def test_change_remote_server_plan_type(self) -> None:
server_uuid = str(uuid.uuid4())
remote_server = RemoteZulipServer.objects.create(
uuid=server_uuid,
api_key="magic_secret_api_key",
hostname="demo.example.com",
contact_email="email@example.com",
)
self.assertEqual(remote_server.plan_type, RemoteZulipServer.PLAN_TYPE_SELF_HOSTED)
do_change_remote_server_plan_type(remote_server, RemoteZulipServer.PLAN_TYPE_STANDARD)
remote_server = RemoteZulipServer.objects.get(uuid=server_uuid)
remote_realm_audit_log = RemoteZulipServerAuditLog.objects.filter(
event_type=RealmAuditLog.REMOTE_SERVER_PLAN_TYPE_CHANGED
).last()
assert remote_realm_audit_log is not None
expected_extra_data = {
"old_value": RemoteZulipServer.PLAN_TYPE_SELF_HOSTED,
"new_value": RemoteZulipServer.PLAN_TYPE_STANDARD,
}
self.assertEqual(remote_realm_audit_log.extra_data, str(expected_extra_data))
self.assertEqual(remote_server.plan_type, RemoteZulipServer.PLAN_TYPE_STANDARD)
def test_deactivate_remote_server(self) -> None:
server_uuid = str(uuid.uuid4())
remote_server = RemoteZulipServer.objects.create(
uuid=server_uuid,
api_key="magic_secret_api_key",
hostname="demo.example.com",
contact_email="email@example.com",
)
self.assertFalse(remote_server.deactivated)
do_deactivate_remote_server(remote_server)
remote_server = RemoteZulipServer.objects.get(uuid=server_uuid)
remote_realm_audit_log = RemoteZulipServerAuditLog.objects.filter(
event_type=RealmAuditLog.REMOTE_SERVER_DEACTIVATED
).last()
assert remote_realm_audit_log is not None
self.assertTrue(remote_server.deactivated)
# Try to deactivate a remote server that is already deactivated
with self.assertLogs("corporate.stripe", "WARN") as warning_log:
do_deactivate_remote_server(remote_server)
self.assertEqual(
warning_log.output,
[
"WARNING:corporate.stripe:Cannot deactivate remote server with ID "
f"{remote_server.id}, server has already been deactivated."
],
)
class LicenseLedgerTest(StripeTestCase):
def test_add_plan_renewal_if_needed(self) -> None:
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
self.assertEqual(LicenseLedger.objects.count(), 1)
plan = CustomerPlan.objects.get()
# Plan hasn't renewed yet
make_end_of_cycle_updates_if_needed(plan, self.next_year - timedelta(days=1))
self.assertEqual(LicenseLedger.objects.count(), 1)
# Plan needs to renew
# TODO: do_deactivate_user for a user, so that licenses_at_next_renewal != licenses
new_plan, ledger_entry = make_end_of_cycle_updates_if_needed(plan, self.next_year)
self.assertIsNone(new_plan)
self.assertEqual(LicenseLedger.objects.count(), 2)
ledger_params = {
"plan": plan,
"is_renewal": True,
"event_time": self.next_year,
"licenses": self.seat_count,
"licenses_at_next_renewal": self.seat_count,
}
for key, value in ledger_params.items():
self.assertEqual(getattr(ledger_entry, key), value)
# Plan needs to renew, but we already added the plan_renewal ledger entry
make_end_of_cycle_updates_if_needed(plan, self.next_year + timedelta(days=1))
self.assertEqual(LicenseLedger.objects.count(), 2)
def test_update_license_ledger_if_needed(self) -> None:
realm = get_realm("zulip")
# Test no Customer
update_license_ledger_if_needed(realm, self.now)
self.assertFalse(LicenseLedger.objects.exists())
# Test plan not automanaged
self.local_upgrade(self.seat_count + 1, False, CustomerPlan.ANNUAL, True, False)
plan = CustomerPlan.objects.get()
self.assertEqual(LicenseLedger.objects.count(), 1)
self.assertEqual(plan.licenses(), self.seat_count + 1)
self.assertEqual(plan.licenses_at_next_renewal(), self.seat_count + 1)
update_license_ledger_if_needed(realm, self.now)
self.assertEqual(LicenseLedger.objects.count(), 1)
# Test no active plan
plan.automanage_licenses = True
plan.status = CustomerPlan.ENDED
plan.save(update_fields=["automanage_licenses", "status"])
update_license_ledger_if_needed(realm, self.now)
self.assertEqual(LicenseLedger.objects.count(), 1)
# Test update needed
plan.status = CustomerPlan.ACTIVE
plan.save(update_fields=["status"])
update_license_ledger_if_needed(realm, self.now)
self.assertEqual(LicenseLedger.objects.count(), 2)
def test_update_license_ledger_for_automanaged_plan(self) -> None:
realm = get_realm("zulip")
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
plan = CustomerPlan.objects.first()
assert plan is not None
self.assertEqual(plan.licenses(), self.seat_count)
self.assertEqual(plan.licenses_at_next_renewal(), self.seat_count)
# Simple increase
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=23):
update_license_ledger_for_automanaged_plan(realm, plan, self.now)
self.assertEqual(plan.licenses(), 23)
self.assertEqual(plan.licenses_at_next_renewal(), 23)
# Decrease
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=20):
update_license_ledger_for_automanaged_plan(realm, plan, self.now)
self.assertEqual(plan.licenses(), 23)
self.assertEqual(plan.licenses_at_next_renewal(), 20)
# Increase, but not past high watermark
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=21):
update_license_ledger_for_automanaged_plan(realm, plan, self.now)
self.assertEqual(plan.licenses(), 23)
self.assertEqual(plan.licenses_at_next_renewal(), 21)
# Increase, but after renewal date, and below last year's high watermark
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=22):
update_license_ledger_for_automanaged_plan(
realm, plan, self.next_year + timedelta(seconds=1)
)
self.assertEqual(plan.licenses(), 22)
self.assertEqual(plan.licenses_at_next_renewal(), 22)
ledger_entries = list(
LicenseLedger.objects.values_list(
"is_renewal", "event_time", "licenses", "licenses_at_next_renewal"
).order_by("id")
)
self.assertEqual(
ledger_entries,
[
(True, self.now, self.seat_count, self.seat_count),
(False, self.now, 23, 23),
(False, self.now, 23, 20),
(False, self.now, 23, 21),
(True, self.next_year, 21, 21),
(False, self.next_year + timedelta(seconds=1), 22, 22),
],
)
def test_update_license_ledger_for_manual_plan(self) -> None:
realm = get_realm("zulip")
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count + 1, False, CustomerPlan.ANNUAL, True, False)
plan = get_current_plan_by_realm(realm)
assert plan is not None
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=self.seat_count):
update_license_ledger_for_manual_plan(plan, self.now, licenses=self.seat_count + 3)
self.assertEqual(plan.licenses(), self.seat_count + 3)
self.assertEqual(plan.licenses_at_next_renewal(), self.seat_count + 3)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=self.seat_count):
with self.assertRaises(AssertionError):
update_license_ledger_for_manual_plan(plan, self.now, licenses=self.seat_count)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=self.seat_count):
update_license_ledger_for_manual_plan(
plan, self.now, licenses_at_next_renewal=self.seat_count
)
self.assertEqual(plan.licenses(), self.seat_count + 3)
self.assertEqual(plan.licenses_at_next_renewal(), self.seat_count)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=self.seat_count):
with self.assertRaises(AssertionError):
update_license_ledger_for_manual_plan(
plan, self.now, licenses_at_next_renewal=self.seat_count - 1
)
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=self.seat_count):
update_license_ledger_for_manual_plan(plan, self.now, licenses=self.seat_count + 10)
self.assertEqual(plan.licenses(), self.seat_count + 10)
self.assertEqual(plan.licenses_at_next_renewal(), self.seat_count + 10)
make_end_of_cycle_updates_if_needed(plan, self.next_year)
self.assertEqual(plan.licenses(), self.seat_count + 10)
ledger_entries = list(
LicenseLedger.objects.values_list(
"is_renewal", "event_time", "licenses", "licenses_at_next_renewal"
).order_by("id")
)
self.assertEqual(
ledger_entries,
[
(True, self.now, self.seat_count + 1, self.seat_count + 1),
(False, self.now, self.seat_count + 3, self.seat_count + 3),
(False, self.now, self.seat_count + 3, self.seat_count),
(False, self.now, self.seat_count + 10, self.seat_count + 10),
(True, self.next_year, self.seat_count + 10, self.seat_count + 10),
],
)
with self.assertRaises(AssertionError):
update_license_ledger_for_manual_plan(plan, self.now)
def test_user_changes(self) -> None:
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
user = do_create_user("email", "password", get_realm("zulip"), "name", acting_user=None)
do_deactivate_user(user, acting_user=None)
do_reactivate_user(user, acting_user=None)
# Not a proper use of do_activate_mirror_dummy_user, but fine for this test
do_activate_mirror_dummy_user(user, acting_user=None)
ledger_entries = list(
LicenseLedger.objects.values_list(
"is_renewal", "licenses", "licenses_at_next_renewal"
).order_by("id")
)
self.assertEqual(
ledger_entries,
[
(True, self.seat_count, self.seat_count),
(False, self.seat_count + 1, self.seat_count + 1),
(False, self.seat_count + 1, self.seat_count),
(False, self.seat_count + 1, self.seat_count + 1),
(False, self.seat_count + 1, self.seat_count + 1),
],
)
class InvoiceTest(StripeTestCase):
def test_invoicing_status_is_started(self) -> None:
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
plan = CustomerPlan.objects.first()
assert plan is not None
plan.invoicing_status = CustomerPlan.STARTED
plan.save(update_fields=["invoicing_status"])
with self.assertRaises(NotImplementedError):
invoice_plan(assert_is_not_none(CustomerPlan.objects.first()), self.now)
def test_invoice_plan_without_stripe_customer(self) -> None:
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, False, False)
plan = get_current_plan_by_realm(get_realm("zulip"))
assert plan and plan.customer
plan.customer.stripe_customer_id = None
plan.customer.save(update_fields=["stripe_customer_id"])
with self.assertRaises(BillingError) as context:
invoice_plan(plan, timezone_now())
self.assertRegex(
context.exception.error_description,
"Realm zulip has a paid plan without a Stripe customer",
)
@mock_stripe()
def test_invoice_plan(self, *mocks: Mock) -> None:
user = self.example_user("hamlet")
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.upgrade()
# Increase
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=self.seat_count + 3):
update_license_ledger_if_needed(get_realm("zulip"), self.now + timedelta(days=100))
# Decrease
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=self.seat_count):
update_license_ledger_if_needed(get_realm("zulip"), self.now + timedelta(days=200))
# Increase, but not past high watermark
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=self.seat_count + 1):
update_license_ledger_if_needed(get_realm("zulip"), self.now + timedelta(days=300))
# Increase, but after renewal date, and below last year's high watermark
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=self.seat_count + 2):
update_license_ledger_if_needed(get_realm("zulip"), self.now + timedelta(days=400))
# Increase, but after event_time
with patch("corporate.lib.stripe.get_latest_seat_count", return_value=self.seat_count + 3):
update_license_ledger_if_needed(get_realm("zulip"), self.now + timedelta(days=500))
plan = CustomerPlan.objects.first()
assert plan is not None
invoice_plan(plan, self.now + timedelta(days=400))
stripe_cutomer_id = plan.customer.stripe_customer_id
assert stripe_cutomer_id is not None
[invoice0, invoice1] = stripe.Invoice.list(customer=stripe_cutomer_id)
self.assertIsNotNone(invoice0.status_transitions.finalized_at)
[item0, item1, item2] = invoice0.lines
line_item_params = {
"amount": int(8000 * (1 - ((400 - 366) / 365)) + 0.5),
"description": "Additional license (Feb 5, 2013 - Jan 2, 2014)",
"discountable": False,
"period": {
"start": datetime_to_timestamp(self.now + timedelta(days=400)),
"end": datetime_to_timestamp(self.now + timedelta(days=2 * 365 + 1)),
},
"quantity": 1,
}
for key, value in line_item_params.items():
self.assertEqual(item0.get(key), value)
line_item_params = {
"amount": 8000 * (self.seat_count + 1),
"description": "Zulip Cloud Standard - renewal",
"discountable": False,
"period": {
"start": datetime_to_timestamp(self.now + timedelta(days=366)),
"end": datetime_to_timestamp(self.now + timedelta(days=2 * 365 + 1)),
},
"quantity": (self.seat_count + 1),
}
for key, value in line_item_params.items():
self.assertEqual(item1.get(key), value)
line_item_params = {
"amount": 3 * int(8000 * (366 - 100) / 366 + 0.5),
"description": "Additional license (Apr 11, 2012 - Jan 2, 2013)",
"discountable": False,
"period": {
"start": datetime_to_timestamp(self.now + timedelta(days=100)),
"end": datetime_to_timestamp(self.now + timedelta(days=366)),
},
"quantity": 3,
}
for key, value in line_item_params.items():
self.assertEqual(item2.get(key), value)
@mock_stripe()
def test_fixed_price_plans(self, *mocks: Mock) -> None:
# Also tests charge_automatically=False
user = self.example_user("hamlet")
self.login_user(user)
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.upgrade(invoice=True)
plan = CustomerPlan.objects.first()
assert plan is not None
plan.fixed_price = 100
plan.price_per_license = 0
plan.save(update_fields=["fixed_price", "price_per_license"])
invoice_plan(plan, self.next_year)
stripe_customer_id = plan.customer.stripe_customer_id
assert stripe_customer_id is not None
[invoice0, invoice1] = stripe.Invoice.list(customer=stripe_customer_id)
self.assertEqual(invoice0.collection_method, "send_invoice")
[item] = invoice0.lines
line_item_params = {
"amount": 100,
"description": "Zulip Cloud Standard - renewal",
"discountable": False,
"period": {
"start": datetime_to_timestamp(self.next_year),
"end": datetime_to_timestamp(self.next_year + timedelta(days=365)),
},
"quantity": 1,
}
for key, value in line_item_params.items():
self.assertEqual(item.get(key), value)
def test_no_invoice_needed(self) -> None:
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
plan = CustomerPlan.objects.first()
assert plan is not None
self.assertEqual(plan.next_invoice_date, self.next_month)
# Test this doesn't make any calls to stripe.Invoice or stripe.InvoiceItem
invoice_plan(plan, self.next_month)
plan = CustomerPlan.objects.first()
# Test that we still update next_invoice_date
assert plan is not None
self.assertEqual(plan.next_invoice_date, self.next_month + timedelta(days=29))
def test_invoice_plans_as_needed(self) -> None:
with patch("corporate.lib.stripe.timezone_now", return_value=self.now):
self.local_upgrade(self.seat_count, True, CustomerPlan.ANNUAL, True, False)
plan = CustomerPlan.objects.first()
assert plan is not None
self.assertEqual(plan.next_invoice_date, self.next_month)
# Test nothing needed to be done
with patch("corporate.lib.stripe.invoice_plan") as mocked:
invoice_plans_as_needed(self.next_month - timedelta(days=1))
mocked.assert_not_called()
# Test something needing to be done
invoice_plans_as_needed(self.next_month)
plan = CustomerPlan.objects.first()
assert plan is not None
self.assertEqual(plan.next_invoice_date, self.next_month + timedelta(days=29))
class TestTestClasses(ZulipTestCase):
def test_subscribe_realm_to_manual_license_management_plan(self) -> None:
realm = get_realm("zulip")
plan, ledger = self.subscribe_realm_to_manual_license_management_plan(
realm, 50, 60, CustomerPlan.ANNUAL
)
plan.refresh_from_db()
self.assertEqual(plan.automanage_licenses, False)
self.assertEqual(plan.billing_schedule, CustomerPlan.ANNUAL)
self.assertEqual(plan.tier, CustomerPlan.STANDARD)
self.assertEqual(plan.licenses(), 50)
self.assertEqual(plan.licenses_at_next_renewal(), 60)
ledger.refresh_from_db()
self.assertEqual(ledger.plan, plan)
self.assertEqual(ledger.licenses, 50)
self.assertEqual(ledger.licenses_at_next_renewal, 60)
realm.refresh_from_db()
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
def test_subscribe_realm_to_monthly_plan_on_manual_license_management(self) -> None:
realm = get_realm("zulip")
plan, ledger = self.subscribe_realm_to_monthly_plan_on_manual_license_management(
realm, 20, 30
)
plan.refresh_from_db()
self.assertEqual(plan.automanage_licenses, False)
self.assertEqual(plan.billing_schedule, CustomerPlan.MONTHLY)
self.assertEqual(plan.tier, CustomerPlan.STANDARD)
self.assertEqual(plan.licenses(), 20)
self.assertEqual(plan.licenses_at_next_renewal(), 30)
ledger.refresh_from_db()
self.assertEqual(ledger.plan, plan)
self.assertEqual(ledger.licenses, 20)
self.assertEqual(ledger.licenses_at_next_renewal, 30)
realm.refresh_from_db()
self.assertEqual(realm.plan_type, Realm.PLAN_TYPE_STANDARD)
| 44.795008
| 198
| 0.632477
|
4a009fd6d4f35ce02e1377e83ed56537b0444846
| 2,463
|
py
|
Python
|
components/Calibrate/calibrateEXT.py
|
bforest-ariadne/Live-AV-Template
|
6b663c3757c15ab5695a22d42001b35f7c8db925
|
[
"MIT"
] | 2
|
2019-08-30T01:22:56.000Z
|
2019-11-02T02:36:00.000Z
|
components/Calibrate/calibrateEXT.py
|
bforest-ariadne/Live-AV-Template
|
6b663c3757c15ab5695a22d42001b35f7c8db925
|
[
"MIT"
] | null | null | null |
components/Calibrate/calibrateEXT.py
|
bforest-ariadne/Live-AV-Template
|
6b663c3757c15ab5695a22d42001b35f7c8db925
|
[
"MIT"
] | null | null | null |
op = op # pylint:disable=invalid-name,used-before-assignment
root = root # pylint:disable=invalid-name,used-before-assignment
PreShowExtension = mod('preShowEXT').PreShowExtension
ParSendModeExtension = mod('parSendModeEXT').ParSendModeExtension
class CalibrateExtension(PreShowExtension, ParSendModeExtension):
def __init__(self, my_op):
PreShowExtension.__init__(self, my_op)
ParSendModeExtension.__init__(self, my_op)
self.sliderChange = False
self.controlIPar = self.Me.op('widget_keyControl/iparLocal')
self.keyDat = self.Me.op('calibration/keyOffset')
self.keyChange = False
return
def Test(self):
super().Test()
self.print('test extension Calibreate Class')
return
def Reset(self):
op('/Calibrate/stoner/settingsUI/reset/button1').click()
return
def onStonerKeyChange(self, dat, cells, prev):
# TODO fix glitchyness of key translation
# this is due to the two bound parameters
if not self.sliderChange and not self.keyChange:
self.keyChange = True
# self.print('onStonerKeyChange')
for i in range(4):
parNameU = 'Key{}1'.format(i)
parNameV = 'Key{}2'.format(i)
cellu = dat[ i + 1, 'u']
cellv = dat[ i + 1, 'v']
# print(self.name, 'index', i, 'cellv.val: ', str(cellv.val), type(cellv.val) )
if cellu.val != '':
self.controlIPar.pars(parNameU)[0].val = int(cellu*1000)
else:
self.controlIPar.pars(parNameU)[0].val = 0.0
if cellv.val != '':
self.controlIPar.pars(parNameV)[0].val = int(cellv*1000)
else:
self.controlIPar.pars(parNameV)[0].val = 0.0
self.keyChange = False
return
def OnsliderChange(self, channel, val ):
if not self.keyChange:
self.sliderChange = True
# self.print('OnSliderChange')
self.keyDat[ tdu.digits(channel.name), channel.name[-1:] ] = val/1000
self.sliderChange = False
def Showui(self):
self.Me.op('widget_keyControl').openViewer()
return
def OnValueChange(self, par):
super().OnValueChange(par)
self.sendApplyParVals()
return
def OnParsChange(self):
self.sendApplyPars()
return
| 34.690141
| 95
| 0.587495
|
4a00a021ca3aa67503ecd517d437519b354c9c80
| 887
|
py
|
Python
|
ros/src/mux_demux/scripts/mock_propigate.py
|
purduerov/X12-Repo
|
33574a9a07c3512d6db3a513d13a5666f60fc1f7
|
[
"MIT"
] | 2
|
2020-01-13T17:28:59.000Z
|
2020-02-14T01:00:14.000Z
|
ros/src/mux_demux/scripts/mock_propigate.py
|
purduerov/X12-Repo
|
33574a9a07c3512d6db3a513d13a5666f60fc1f7
|
[
"MIT"
] | 2
|
2019-10-23T23:16:36.000Z
|
2020-10-10T17:52:27.000Z
|
ros/src/mux_demux/scripts/mock_propigate.py
|
purduerov/X12-Repo
|
33574a9a07c3512d6db3a513d13a5666f60fc1f7
|
[
"MIT"
] | 2
|
2020-02-15T19:00:38.000Z
|
2020-02-15T19:00:40.000Z
|
#! /usr/bin/python
import rospy
from shared_msgs.msg import can_msg, auto_command_msg, thrust_status_msg, thrust_command_msg, esc_single_msg
from sensor_msgs.msg import Imu, Temperature
from std_msgs.msg import Float32
import random
mock = thrust_command_msg()
def mock_catch(msg):
global mock
mock = msg
if __name__ == "__main__":
rospy.init_node('mock_prop')
ns = rospy.get_namespace() # This should return /surface
status_sub = rospy.Subscriber(ns + 'thrust_mock',
thrust_command_msg, mock_catch)
# Publishers out onto the ROS System
thrust_pub = rospy.Publisher(ns + 'thrust_command',
thrust_command_msg, queue_size=10)
rate = rospy.Rate(50) # 50hz
# TODO: I2C related activities
while not rospy.is_shutdown():
thrust_pub.publish(mock)
rate.sleep()
| 26.878788
| 108
| 0.674183
|
4a00a120144bd4efec75bc485c163b16d276964a
| 63,189
|
py
|
Python
|
crits/emails/handlers.py
|
puhley/crits
|
9870f2d449295c272402af73b5b335e2494b61f3
|
[
"MIT"
] | null | null | null |
crits/emails/handlers.py
|
puhley/crits
|
9870f2d449295c272402af73b5b335e2494b61f3
|
[
"MIT"
] | null | null | null |
crits/emails/handlers.py
|
puhley/crits
|
9870f2d449295c272402af73b5b335e2494b61f3
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
import datetime
import email as eml
from email.parser import Parser
from email.utils import parseaddr, getaddresses, mktime_tz, parsedate_tz
import hashlib
import json
import magic
import re
import yaml
import StringIO
import sys
from dateutil.parser import parse as date_parser
from django.conf import settings
from crits.core.forms import DownloadFileForm
from crits.emails.forms import EmailYAMLForm
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
from crits.campaigns.forms import CampaignForm
from crits.config.config import CRITsConfig
from crits.core.crits_mongoengine import json_handler, create_embedded_source
from crits.core.crits_mongoengine import EmbeddedCampaign
from crits.core.data_tools import clean_dict
from crits.core.exceptions import ZipFileError
from crits.core.handlers import class_from_id
from crits.core.handlers import build_jtable, jtable_ajax_list, jtable_ajax_delete
from crits.core.handlers import csv_export
from crits.core.user_tools import user_sources, is_admin, is_user_favorite
from crits.core.user_tools import is_user_subscribed
from crits.domains.handlers import get_domain
from crits.emails import OleFileIO_PL
from crits.emails.email import Email
from crits.indicators.handlers import handle_indicator_ind
from crits.indicators.indicator import Indicator
from crits.notifications.handlers import remove_user_from_notification
from crits.samples.handlers import handle_file, handle_uploaded_file, mail_sample
from crits.services.handlers import run_triage
def create_email_field_dict(field_name,
field_type,
field_value,
field_displayed_text,
is_allow_create_indicator,
is_href,
is_editable,
is_email_list,
is_splunk,
href_search_field=None):
"""
Generates a 1:1 dictionary from all of the input fields.
Returns:
A dictionary of all the input fields, with the input parameter names
each as a key and its associated value as the value pair.
"""
return {"field_name": field_name,
"field_type": field_type,
"field_value": field_value,
"field_displayed_text": field_displayed_text,
"is_allow_create_indicator": is_allow_create_indicator,
"is_href": is_href,
"is_editable": is_editable,
"is_email_list": is_email_list,
"is_splunk": is_splunk,
"href_search_field": href_search_field
}
def generate_email_csv(request):
"""
Generate a CSV file of the Email information
:param request: The request for this CSV.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
response = csv_export(request, Email)
return response
def get_email_formatted(email_id, analyst, data_format):
"""
Format an email in YAML or JSON.
:param email_id: The ObjectId of the email.
:type email_id: str
:param analyst: The user requesting the data.
:type analyst: str
:param data_format: The format you want the data in.
:type data_format: "json" or "yaml"
:returns: :class:`django.http.HttpResponse`
"""
sources = user_sources(analyst)
email = Email.objects(id=email_id, source__name__in=sources).first()
if not email:
return HttpResponse(json.dumps({}), mimetype="application/json")
exclude = [
"created",
"source",
"relationships",
"schema_version",
"campaign",
"analysis",
"bucket_list",
"ticket",
"releasability",
"unsupported_attrs",
"status",
"objects",
"modified",
"analyst",
"_id",
"to",
"cc",
"raw_headers",
]
if data_format == "yaml":
data = {"email_yaml": email.to_yaml(exclude=exclude)}
elif data_format == "json":
data = {"email_yaml": email.to_json(exclude=exclude)}
else:
data = {"email_yaml": {}}
return HttpResponse(json.dumps(data), mimetype="application/json")
def get_email_detail(email_id, analyst):
"""
Generate the email details page.
:param email_id: The ObjectId of the email.
:type email_id: str
:param analyst: The user requesting the data.
:type analyst: str
:returns: tuple
"""
template = None
sources = user_sources(analyst)
email = Email.objects(id=email_id, source__name__in=sources).first()
if not email:
template = "error.html"
args = {'error': "ID does not exist or insufficient privs for source"}
else:
email.sanitize(username="%s" % analyst, sources=sources)
update_data_form = EmailYAMLForm(analyst)
campaign_form = CampaignForm()
download_form = DownloadFileForm(initial={"obj_type": 'Email',
"obj_id":email_id})
# remove pending notifications for user
remove_user_from_notification("%s" % analyst, email.id, 'Email')
# subscription
subscription = {
'type': 'Email',
'id': email.id,
'subscribed': is_user_subscribed("%s" % analyst, 'Email',
email.id),
}
# objects
objects = email.sort_objects()
# relationships
relationships = email.sort_relationships("%s" % analyst, meta=True)
# relationship
relationship = {
'type': 'Email',
'value': email.id
}
# comments
comments = {'comments': email.get_comments(),
'url_key': email.id}
#screenshots
screenshots = email.get_screenshots(analyst)
# favorites
favorite = is_user_favorite("%s" % analyst, 'Email', email.id)
email_fields = []
email_fields.append(create_email_field_dict(
"from_address", # field_name
"Address - e-mail", # field_type
email.from_address, # field_value
"From", # field_displayed_text
# is_allow_create_indicator
# is_href
# is_editable
# is_email_list
# is_splunk
True, True, True, False, True,
href_search_field="from" # href_search_field
))
email_fields.append(create_email_field_dict(
"sender",
"Address - e-mail",
email.sender,
"Sender",
True, True, True, False, True,
href_search_field="sender"
))
email_fields.append(create_email_field_dict(
"to",
"String",
email.to,
"To",
False, True, True, True, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"cc",
"String",
email.cc,
"CC",
False, True, True, True, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"date",
"String",
email.date,
"Date",
False, False, True, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"isodate",
"String",
email.isodate,
"ISODate",
False, False, False, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"subject",
"String",
email.subject,
"Subject",
True, True, True, False, False,
href_search_field="subject"
))
email_fields.append(create_email_field_dict(
"x_mailer",
"String",
email.x_mailer,
"X-Mailer",
True, True, True, False, False,
href_search_field="x_mailer"
))
email_fields.append(create_email_field_dict(
"reply_to",
"Address - e-mail",
email.reply_to,
"Reply To",
True, True, True, False, False,
href_search_field="reply_to"
))
email_fields.append(create_email_field_dict(
"message_id",
"String",
email.message_id,
"Message ID",
True, False, True, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"helo",
"String",
email.helo,
"helo",
True, True, True, False, False,
href_search_field="helo"
))
email_fields.append(create_email_field_dict(
"boundary",
"String",
email.boundary,
"Boundary",
True, False, True, False, False,
href_search_field=None
))
email_fields.append(create_email_field_dict(
"originating_ip",
"String",
email.originating_ip,
"Originating IP",
True, True, True, False, True,
href_search_field="originating_ip"
))
email_fields.append(create_email_field_dict(
"x_originating_ip",
"Address - ipv4-addr",
email.x_originating_ip,
"X-Originating IP",
True, True, True, False, True,
href_search_field="x_originating_ip"
))
# analysis results
service_results = email.get_analysis_results()
args = {'objects': objects,
'email_fields': email_fields,
'relationships': relationships,
'comments': comments,
'favorite': favorite,
'relationship': relationship,
'screenshots': screenshots,
'subscription': subscription,
'email': email,
'campaign_form': campaign_form,
'download_form': download_form,
'update_data_form': update_data_form,
'admin': is_admin(analyst),
'service_results': service_results,
'rt_url': settings.RT_URL}
return template, args
def generate_email_jtable(request, option):
"""
Generate email jtable.
:param request: The request for this jtable.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
obj_type = Email
type_ = "email"
mapper = obj_type._meta['jtable_opts']
if option == "jtlist":
# Sets display url
details_url = mapper['details_url']
details_url_key = mapper['details_url_key']
fields = mapper['fields']
response = jtable_ajax_list(obj_type,
details_url,
details_url_key,
request,
includes=fields)
if 'Records' in response:
for doc in response['Records']:
if doc['to']:
doc['recip'] = len(doc['to'].split(','))
else:
doc['recip'] = 0
if doc['cc']:
doc['recip'] += len(doc['cc'].split(','))
return HttpResponse(json.dumps(response, default=json_handler),
content_type="application/json")
if option == "jtdelete":
response = {"Result": "ERROR"}
if jtable_ajax_delete(obj_type, request):
response = {"Result": "OK"}
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
jtopts = {
'title': "Emails",
'default_sort': mapper['default_sort'],
'listurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_), args=('jtlist',)),
'deleteurl': reverse('crits.%ss.views.%ss_listing' % (type_,
type_), args=('jtdelete',)),
'searchurl': reverse(mapper['searchurl']),
'fields': mapper['jtopts_fields'],
'hidden_fields': mapper['hidden_fields'],
'linked_fields': mapper['linked_fields'],
'details_link': mapper['details_link'],
'no_sort': mapper['no_sort']
}
jtable = build_jtable(jtopts, request)
jtable['toolbar'] = [
{
'tooltip': "'All Emails'",
'text': "'All'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'New Emails'",
'text': "'New'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'New'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'In Progress Emails'",
'text': "'In Progress'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'In Progress'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Analyzed Emails'",
'text': "'Analyzed'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'Analyzed'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Deprecated Emails'",
'text': "'Deprecated'",
'click': "function () {$('#email_listing').jtable('load', {'refresh': 'yes', 'status': 'Deprecated'});}",
'cssClass': "'jtable-toolbar-center'",
},
{
'tooltip': "'Add Email'",
'text': "'Add Email'",
'click': "function () {$('#new-email-fields').click()}",
},
]
if option == "inline":
return render_to_response("jtable.html",
{'jtable': jtable,
'jtid': '%s_listing' % type_,
'button' : '%ss_tab' % type_},
RequestContext(request))
else:
return render_to_response("%s_listing.html" % type_,
{'jtable': jtable,
'jtid': '%s_listing' % type_},
RequestContext(request))
def handle_email_fields(data, analyst, method):
"""
Take email fields and convert them into an email object.
:param data: The fields to include in the email.
:type data: dict
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:returns: dict with keys:
"status" (boolean),
"object" The email object if successful,
"reason" (str).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None
}
# Date and source are the only required ones.
# If there is no campaign confidence, default it to low.
# Remove these items from data so they are not added when merged.
sourcename = data.get('source', None)
del data['source']
if data.get('source_method', None):
method = method + " - " + data.get('source_method', None)
try:
del data['source_method']
except:
pass
reference = data.get('source_reference', None)
try:
del data['source_reference']
except:
pass
bucket_list = data.get('bucket_list', None)
try:
del data['bucket_list']
except:
pass
ticket = data.get('ticket', None)
try:
del data['ticket']
except:
pass
campaign = data.get('campaign', None)
try:
del data['campaign']
except:
pass
confidence = data.get('campaign_confidence', 'low')
try:
del data['campaign_confidence']
except:
pass
for x in ('cc', 'to'):
y = data.get(x, None)
if isinstance(y, basestring):
if len(y) > 0:
tmp_y = y.split(',')
y_final = [ty.strip() for ty in tmp_y]
data[x] = y_final
else:
data[x] = []
elif not y:
data[x] = []
new_email = Email()
new_email.merge(data)
if bucket_list:
new_email.add_bucket_list(bucket_list, analyst)
if ticket:
new_email.add_ticket(ticket, analyst)
new_email.source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
if campaign:
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
try:
new_email.save(username=analyst)
new_email.reload()
run_triage(new_email, analyst)
result['object'] = new_email
result['status'] = True
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
return result
def handle_json(data, sourcename, reference, analyst, method,
save_unsupported=True, campaign=None, confidence=None):
"""
Take email in JSON and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param save_unsupported: Save any unsupported fields instead of ignoring.
:type save_unsupported: boolean
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:returns: dict with keys:
"status" (boolean),
"object" The email object if successful,
"data" the converted email data.
"reason" (str).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None
}
try:
converted = json.loads(data)
if isinstance(converted, dict) == False:
raise
except Exception, e:
result["reason"] = "Cannot convert data to JSON.\n<br /><pre>%s</pre>" % str(e)
return result
result['data'] = converted
new_email = dict_to_email(result['data'], save_unsupported=save_unsupported)
if campaign:
if not confidence:
confidence = "low"
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
result['object'] = new_email
result['object'].source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
try:
result['object'].save(username=analyst)
result['object'].reload()
run_triage(result['object'], analyst)
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
result['status'] = True
return result
# if email_id is provided it is the existing email id to modify.
def handle_yaml(data, sourcename, reference, analyst, method, email_id=None,
save_unsupported=True, campaign=None, confidence=None):
"""
Take email in YAML and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param email_id: The ObjectId of the existing email to update.
:type email_id: str
:param save_unsupported: Save any unsupported fields instead of ignoring.
:type save_unsupported: boolean
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:returns: dict with keys:
"status" (boolean),
"object" The email object if successful,
"data" the converted email data.
"reason" (str).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None
}
try:
converted = yaml.load(data)
if isinstance(converted, dict) == False:
raise
except Exception, e:
result["reason"] = "Cannot convert data to YAML.\n<br /><pre>%s</pre>" % str(e)
return result
result['data'] = converted
new_email = dict_to_email(result['data'], save_unsupported=save_unsupported)
if campaign:
if not confidence:
confidence = "low"
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
result['object'] = new_email
if email_id:
old_email = class_from_id('Email', email_id)
if not old_email:
result['reason'] = "Unknown email_id."
return result
# Can not merge with a source?
# For now, just save the original source and put it back after merge.
saved_source = old_email.source
# XXX: If you use the "Edit YAML" button and edit the "from" field
# it gets put into the new email object in dict_to_email() correctly
# but calling to_dict() on that object results in a 'from' key being
# put into the dictionary. Thus, the following line will result in
# your new 'from' field being stuffed into unsupported_attrs.
# old_email.merge(result['object'].to_dict(), True)
# To work around this (for now) convert the new email object to a
# dictionary and manually replace 'from' with the from_address
# property.
tmp = result['object'].to_dict()
if 'from' in tmp:
tmp['from_address'] = result['object'].from_address
old_email.merge(tmp, True)
old_email.source = saved_source
try:
old_email.save(username=analyst)
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
return result
else:
result['object'].source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
try:
result['object'].save(username=analyst)
result['object'].reload()
run_triage(result['object'], analyst)
except Exception, e:
result['reason'] = "Failed to save object.\n<br /><pre>%s</pre>" % str(e)
return result
result['status'] = True
return result
def handle_msg(data, sourcename, reference, analyst, method, password='',
campaign=None, confidence=None):
"""
Take email in MSG and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param password: The password for the attachment.
:type password: str
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:returns: dict with keys:
"status" (boolean),
"obj_id" The email ObjectId if successful,
"message" (str)
"reason" (str).
"""
response = {'status': False}
result = parse_ole_file(data)
if result.has_key('error'):
response['reason'] = result['error']
return response
result['email']['source'] = sourcename
result['email']['source_reference'] = reference
result['email']['campaign'] = campaign
result['email']['campaign_confidence'] = confidence
result['email']['bucket_list'] = ""
result['email']['ticket'] = ""
if result['email'].has_key('date'):
result['email']['isodate'] = date_parser(result['email']['date'],
fuzzy=True)
obj = handle_email_fields(result['email'], analyst, method)
if not obj["status"]:
response['reason'] = obj['reason']
return response
email = obj.get('object')
# Process attachments and upload as samples
attach_messages = []
for file in result['attachments']:
type_ = file.get('type', '')
if 'pkcs7' not in type_:
mimetype = magic.from_buffer(file['data'], mime=True)
if mimetype is None:
file_format = 'raw'
elif 'application/zip' in mimetype:
file_format = 'zip'
elif 'application/x-rar' in mimetype:
file_format = 'rar'
else:
file_format = 'raw'
try:
cleaned_data = {'file_format': file_format,
'password': password}
r = create_email_attachment(email, cleaned_data, analyst, sourcename,
method, reference, campaign, confidence,
"", "", file['data'], file['name'])
if 'success' in r:
if not r['success']:
attach_messages.append("%s: %s" % (file['name'],
r['message']))
else:
attach_messages.append("%s: Added Successfully!" % file['name'])
except BaseException:
error_message = 'The email uploaded successfully, but there was an error\
uploading the attachment ' + file['name'] + '\n\n' + str(sys.exc_info())
response['reason'] = error_message
return response
else:
attach_messages.append('%s: Cannot decrypt attachment (pkcs7).' % file['name'])
if len(attach_messages):
response['message'] = '<br/>'.join(attach_messages)
response['status'] = True
response['obj_id'] = obj['object'].id
return response
def handle_pasted_eml(data, sourcename, reference, analyst, method,
parent_type=None, parent_id=None, campaign=None,
confidence=None):
"""
Take email in EML and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param parent_type: The top-level object type of the parent.
:type parent_type: str
:param parent_id: The ObjectId of the parent.
:type parent_id: str
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:returns: dict with keys:
"status" (boolean),
"reason" (str),
"object" The email object if successful,
"data" the converted email data,
"attachments" (dict).
"""
# Try to fix headers where we lost whitespace indents
# Split by newline, parse/fix headers, join by newline
hfieldre = re.compile('^\S+:\s')
boundaryre = re.compile('boundary="?([^\s"\']+)"?')
emldata = []
boundary = None
isbody = False
for line in data.split("\n"):
# We match the regex for a boundary definition
m = boundaryre.search(line)
if m:
boundary = m.group(1)
# content boundary exists and we reached it
if boundary and boundary in line:
isbody = True
# If we are not in the body and see somethign that does not look
# like a valid header field, prepend a space to attach this line
# to the previous header we found
if not isbody and not hfieldre.match(line):
line = " %s" % line
emldata.append(line)
emldata = "\n".join(emldata)
return handle_eml(emldata, sourcename, reference, analyst, method, parent_type,
parent_id, campaign, confidence)
def handle_eml(data, sourcename, reference, analyst, method, parent_type=None,
parent_id=None, campaign=None, confidence=None):
"""
Take email in EML and convert them into an email object.
:param data: The data for the email.
:type data: dict
:param sourcename: The name of the source providing this email.
:type sourcename: str
:param reference: The reference to the data from the source.
:type reference: str
:param analyst: The user creating this email object.
:type analyst: str
:param method: The method of acquiring this email.
:type method: str
:param parent_type: The top-level object type of the parent.
:type parent_type: str
:param parent_id: The ObjectId of the parent.
:type parent_id: str
:param campaign: The campaign to attribute to this email.
:type campaign: str
:param confidence: Confidence level of the campaign.
:type confidence: str
:returns: dict with keys:
"status" (boolean),
"reason" (str),
"object" The email object if successful,
"data" the converted email data,
"attachments" (dict).
"""
result = {
'status': False,
'reason': "",
'object': None,
'data': None,
'attachments': {}
}
msg_import = {'raw_header': ''}
reImap = re.compile(r"(\*\s\d+\sFETCH\s.+?\r\n)(.+)\).*?OK\s(UID\sFETCH\scompleted|Success)", re.M | re.S)
# search for SMTP dialog
start = data.find("DATA")
end = data.find("\x0d\x0a\x2e\x0d\x0a")
if start >= 0 and end >= 0:
premail = data[:start]
mailfrom = None
rcptto = None
for preheaders in premail.splitlines():
mfpos = preheaders.find("MAIL FROM")
if mfpos > -1:
try:
mailfrom = unicode(preheaders[mfpos + 10:])
except UnicodeDecodeError:
mailfrom = unicode(preheaders[mfpos + 10:], errors="replace")
rcpos = preheaders.find("RCPT TO")
if rcpos > -1:
try:
rcptto = unicode(preheaders[rcpos + 9:])
except UnicodeDecodeError:
rcptto = unicode(preheaders[rcpos + 9:], errors="replace")
if mailfrom:
msg_import['mailfrom'] = mailfrom
if rcptto:
msg_import['rcptto'] = rcptto
mail1 = data[start + 6:end]
stripped_mail = ""
for line in mail1.splitlines(True):
# Strip SMTP response codes. Some people like to grab a single
# TCP session in wireshark and save it to disk and call it an EML.
if line[:4] in ['200 ', '211 ', '214 ', '220 ', '221 ', '250 ',
'250-', '251 ', '354 ', '421 ', '450 ', '451 ',
'452 ', '500 ', '501 ', '502 ', '503 ', '504 ',
'521 ', '530 ', '550 ', '551 ', '552 ', '553 ',
'554 ']:
continue
stripped_mail += line
else:
# No SMTP dialog found, search for IMAP markers
match = reImap.search(data)
if match:
stripped_mail = match.groups()[1]
else:
stripped_mail = data
msg = eml.message_from_string(str(stripped_mail))
if not msg.items():
result['reason'] = "No items found."
return result
# clean up headers
for d in msg.items():
cleand = ''.join([x for x in d[1] if (ord(x) < 127 and ord(x) >= 32)])
msg_import[d[0].replace(".",
"").replace("$",
"").replace("\x00",
"").replace("-",
"_").lower()] = cleand
msg_import['raw_header'] += d[0] + ": " + cleand + "\n"
# Rip out anything that looks like an email address and store it.
if 'to' in msg_import:
to_list = re.findall(r'[\w\-][\w\-\.]+@[\w\-][\w\-\.]+[a-zA-Z]{1,4}',
msg_import['to'])
msg_import['to'] = []
msg_import['to'] = [i for i in to_list if i not in msg_import['to']]
# Parse the body of the email
msg_import["raw_body"] = ""
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get_content_maintype() == "text":
content = part.get_payload(decode=True)
if content:
try:
message_part = unicode(content)
except UnicodeDecodeError:
message_part = unicode(content, errors="replace")
msg_import["raw_body"] = msg_import["raw_body"] + \
message_part + "\n"
# Check for attachment in mail parts
filename = part.get_filename()
attach = part.get_payload(decode=True)
if attach is not None and len(attach):
md5 = hashlib.md5(attach).hexdigest()
mtype = magic.from_buffer(attach)
if filename is not None:
try:
filename = unicode(filename)
except UnicodeDecodeError:
filename = unicode(filename, errors="replace")
else:
filename = md5
result['attachments'][md5] = {
'filename': filename,
'magic': mtype,
'blob': attach
}
result['data'] = msg_import
new_email = dict_to_email(result['data'])
if campaign:
if not confidence:
confidence = "low"
ec = EmbeddedCampaign(name=campaign,
confidence=confidence,
description="",
analyst=analyst,
date=datetime.datetime.now())
new_email.add_campaign(ec)
result['object'] = new_email
result['object'].source = [create_embedded_source(sourcename,
reference=reference,
method=method,
analyst=analyst)]
# Save the Email first, so we can have the id to use to create
# relationships.
if not result['object'].date:
result['object'].date = None
try:
result['object'].save(username=analyst)
result['object'].reload()
run_triage(result['object'], analyst)
except Exception, e:
result['reason'] = "Failed to save email.\n<br /><pre>" + \
str(e) + "</pre>"
return result
# Relate the email back to the pcap, if it came from PCAP.
if parent_id and parent_type:
ret = result['object'].add_relationship(rel_id=parent_id,
type_=parent_type,
rel_type='Extracted_From',
analyst=analyst,
get_rels=False)
if not ret['success']:
result['reason'] = "Failed to create relationship.\n<br /><pre>"
+ result['message'] + "</pre>"
return result
# Save the email again since it now has a new relationship.
try:
result['object'].save(username=analyst)
except Exception, e:
result['reason'] = "Failed to save email.\n<br /><pre>"
+ str(e) + "</pre>"
return result
for (md5_, attachment) in result['attachments'].items():
if handle_file(attachment['filename'],
attachment['blob'],
sourcename,
method='eml_processor',
reference=reference,
related_id=result['object'].id,
user=analyst,
md5_digest=md5_,
related_type='Email',
campaign=campaign,
confidence=confidence,
relationship='Extracted_From') == None:
result['reason'] = "Failed to save attachment.\n<br /><pre>"
+ md5_ + "</pre>"
return result
result['status'] = True
return result
def dict_to_email(d, save_unsupported=True):
"""
Convert a dictionary to an email.
Standardize all key names:
- Convert hyphens and whitespace to underscores
- Remove all non-alphanumeric and non-underscore characters.
- Combine multiple underscores.
- convert alpha characters to lowercase.
:param d: The dictionary to convert.
:type d: dict
:param save_unsupported: Whether or not to save unsupported fields.
:type save_unsupported: boolean
:returns: :class:`crits.email.email.Email`
"""
for key in d:
newkey = re.sub('[\s-]', '_', key)
newkey = re.sub('[\W]', '', newkey)
newkey = re.sub('_+', '_', newkey)
newkey = newkey.lower()
if key != newkey:
d[newkey] = d[key]
del d[key]
# Remove keys which we don't want the user to modify via YAML.
keys = ('schema_version', 'comments', 'objects', 'campaign',
'relationships', 'source', 'releasability', 'analysis',
'bucket_list', 'ticket', 'objects')
clean_dict(d, keys)
if 'x_originating_ip' in d and d['x_originating_ip']:
d['x_originating_ip'] = re.findall(r'[0-9]+(?:\.[0-9]+){3}',
d['x_originating_ip'])[0]
if 'date' in d and d['date']:
if isinstance(d['date'], datetime.datetime):
d['isodate'] = d['date']
d['date'] = str(d['date'])
else:
d['isodate'] = date_parser(d['date'], fuzzy=True)
if 'to' in d and isinstance(d['to'], basestring) and len(d['to']) > 0:
d['to'] = [d['to']]
if 'cc' in d and isinstance(d['cc'], basestring) and len(d['cc']) > 0:
d['cc'] = [d['cc']]
if 'from' in d:
d['from_address'] = d['from']
del d['from']
if save_unsupported:
for (k, v) in d.get('unsupported_attrs', {}).items():
d[k] = v
if 'unsupported_attrs' in d:
del d['unsupported_attrs']
crits_email = Email()
crits_email.merge(d)
return crits_email
def generate_email_cybox(email_id):
"""
Generate Cybox for a given email.
:param email_id: The ObjectId of the email.
:returns: str
"""
email = Email.objects(id=email_id).first()
if email:
return email.to_cybox_observable()[0][0]
else:
return None
def update_email_header_value(email_id, type_, value, analyst):
"""
Update a header value for an email.
:param email_id: The ObjectId of the email to update.
:type email_id: str
:param type_: The header type.
:type type_: str
:param value: The header value.
:type value: str
:param analyst: The user updating the header field.
:type analyst: str
:returns: dict with keys:
"success" (boolean),
"message" (str),
"isodate" (datetime.datetime) if the header field was "date".
"""
if type_ in ('to', 'cc'):
bad_chars = "<>^&(){}[]!#$%=+;:'/\|?~`"
if any((bad_char in value) for bad_char in bad_chars):
return {'success': False, 'message': "Invalid characters in list"}
email = Email.objects(id=email_id).first()
if email:
try:
if type_ in ('to', 'cc'):
vlist = value.split(",")
vfinal = []
for v in vlist:
if len(v.strip()) > 0:
vfinal.append(v.strip())
value = vfinal
setattr(email, type_, value)
if type_ == 'date':
isodate = date_parser(value, fuzzy=True)
email.isodate = isodate
email.save(username=analyst)
if type_ == 'date':
result = {'success': True,
'message': "Successfully updated email",
'isodate': email.isodate.strftime("%Y-%m-%d %H:%M:%S.%f")}
elif type_ in ('to', 'cc'):
links = ""
for v in value:
# dirty ugly hack to "urlencode" the resulting URL
url = reverse('crits.targets.views.target_info',
args=[v]).replace('@', '%40')
links += '<a href="%s">%s</a>, ' % (url, v)
result = {'success': True,
'message': "Successfully updated email",
'links': links}
else:
result = {'success': True,
'message': "Successfully updated email"}
except Exception, e:
result = {'success': False, 'message': e}
else:
result = {'success': False, 'message': "Could not find email"}
return result
def create_indicator_from_header_field(email, header_field, ind_type,
analyst, request):
"""
Create an indicator out of the header field.
:param email: The email to get the header from.
:type email: :class:`crits.emails.email.Email`
:param header_field: The header type.
:type header_field: str
:param ind_type: The Indicator type to use.
:type ind_type: str
:param analyst: The user updating the header field.
:type analyst: str
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:returns: dict with keys:
"success" (boolean),
"message" (str),
"""
value = getattr(email, header_field)
# Check to make sure the "value" is valid
if value == None or value.strip() == "":
result = {
'success': False,
'message': "Can't create indicator from email field [" +
str(header_field) + "] with an empty value field",
}
return result
elif ind_type == None or ind_type.strip() == "":
result = {
'success': False,
'message': "Can't create indicator from email field " +
"with an empty type field",
}
return result
newindicator = handle_indicator_ind(value,
email.source,
'',
ind_type,
analyst=analyst)
if newindicator.get('objectid'):
indicator = Indicator.objects(id=newindicator['objectid']).first()
results = email.add_relationship(rel_item=indicator,
rel_type="Related_To",
analyst=analyst,
get_rels=True)
if results['success']:
email.save(username=analyst)
indicator.save(username=analyst)
relationship = {'type': 'Email', 'value': email.id}
message = render_to_string('relationships_listing_widget.html',
{'relationship': relationship,
'relationships': results['message']},
RequestContext(request))
result = {'success': True, 'message': message}
else:
result = {
'success': False,
'message': "Error adding relationship: %s" % results['message']
}
else:
result = {
'success': False,
'message': "Error adding relationship: Could not find email/indicator",
}
return result
def create_email_attachment(email, cleaned_data, analyst, source, method="Upload",
reference="", campaign=None, confidence='low',
bucket_list=None, ticket=None, filedata=None,
filename=None, md5=None, email_addr=None, inherit_sources=False):
"""
Create an attachment for an email.
:param email: The email to use.
:type email: :class:`crits.emails.email.Email`
:param cleaned_data: Cleaned form information about the email.
:type cleaned_data: dict
:param analyst: The user creating this attachment.
:type analyst: str
:param source: The name of the source.
:type source: str
:param method: The method for this file upload.
:type method: str
:param reference: The source reference.
:type reference: str
:param campaign: The campaign to attribute to this attachment.
:type campaign: str
:param confidence: The campaign confidence.
:type confidence: str
:param bucket_list: The list of buckets to assign to this attachment.
:type bucket_list: str
:param ticket: The ticket to assign to this attachment.
:type ticket: str
:param filedata: The attachment.
:type filedata: request file data.
:param filename: The name of the file.
:type filename: str
:param md5: The MD5 of the file.
:type md5: str
:param email_addr: Email address to which to email the sample
:type email_addr: str
:param inherit_sources: 'True' if attachment should inherit Email's Source(s)
:type inherit_sources: bool
:returns: dict with keys "success" (boolean) and "message" (str).
"""
response = {'success': False,
'message': 'Unknown error; unable to upload file.'}
if filename:
filename = filename.strip()
# If selected, new sample inherits the campaigns of the related email.
if cleaned_data.get('inherit_campaigns'):
if campaign:
email.campaign.append(EmbeddedCampaign(name=campaign, confidence=confidence, analyst=analyst))
campaign = email.campaign
inherited_source = email.source if inherit_sources else None
try:
if filedata:
result = handle_uploaded_file(filedata,
source,
method,
reference,
cleaned_data['file_format'],
cleaned_data['password'],
analyst,
campaign,
confidence,
related_id=email.id,
related_type='Email',
filename=filename,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source)
else:
if md5:
md5 = md5.strip().lower()
result = handle_uploaded_file(None,
source,
method,
reference,
cleaned_data['file_format'],
None,
analyst,
campaign,
confidence,
related_id=email.id,
related_type='Email',
filename=filename,
md5=md5,
bucket_list=bucket_list,
ticket=ticket,
inherited_source=inherited_source,
is_return_only_md5=False)
except ZipFileError, zfe:
return {'success': False, 'message': zfe.value}
else:
if len(result) > 1:
response = {'success': True, 'message': 'Files uploaded successfully. '}
elif len(result) == 1:
if not filedata:
response['success'] = result[0].get('success', False)
if(response['success'] == False):
response['message'] = result[0].get('message', response.get('message'))
else:
result = [result[0].get('object').md5]
response['message'] = 'File uploaded successfully. '
else:
response = {'success': True, 'message': 'Files uploaded successfully. '}
if not response['success']:
return response
else:
if email_addr:
for s in result:
email_errmsg = mail_sample(s, [email_addr])
if email_errmsg is not None:
response['success'] = False
msg = "<br>Error emailing sample %s: %s\n" % (s, email_errmsg)
response['message'] = response['message'] + msg
return response
def parse_ole_file(file):
"""
Parse an OLE2.0 file to obtain data inside an email including attachments.
References:
http://www.fileformat.info/format/outlookmsg/
http://www.decalage.info/en/python/olefileio
https://code.google.com/p/pyflag/source/browse/src/FileFormats/OLE2.py
http://cpansearch.perl.org/src/MVZ/Email-Outlook-Message-0.912/lib/Email/Outlook/Message.pm
"""
header = file.read(len(OleFileIO_PL.MAGIC))
# Verify the file is in OLE2 format first
if header != OleFileIO_PL.MAGIC:
return {'error': 'The upload file is not a valid Outlook file. It must be in OLE2 format (.msg)'}
msg = {'subject': '_0037',
'body': '_1000',
'header': '_007D',
'message_class': '_001A',
'recipient_email': '_39FE',
'attachment_name': '_3707',
'attachment_data': '_3701',
'attachment_type': '_370E',
}
file.seek(0)
data = file.read()
msg_file = StringIO.StringIO(data)
ole = OleFileIO_PL.OleFileIO(msg_file)
# Helper function to grab data out of stream objects
def get_stream_data(entry):
stream = ole.openstream(entry)
data = stream.read()
stream.close()
return data
# Parse the OLE streams and get attachments, subject, body, headers, and class
# The email dict is what will be put into MongoDB for CRITs
attachments = {}
email = {}
email['to'] = []
for entry in ole.listdir():
if 'attach' in entry[0]:
# Attachments are keyed by directory entry in the stream
# e.g. '__attach_version1.0_#00000000'
if entry[0] not in attachments:
attachments[entry[0]] = {}
if msg['attachment_name'] in entry[-1]:
attachments[entry[0]].update({'name': get_stream_data(entry).decode('utf-16')})
if msg['attachment_data'] in entry[-1]:
attachments[entry[0]].update({'data': get_stream_data(entry)})
if msg['attachment_type'] in entry[-1]:
attachments[entry[0]].update({'type': get_stream_data(entry).decode('utf-16')})
else:
if msg['subject'] in entry[-1]:
email['subject'] = get_stream_data(entry).decode('utf-16')
if msg['body'] in entry[-1]:
email['raw_body'] = get_stream_data(entry).decode('utf-16')
if msg['header'] in entry[-1]:
email['raw_header'] = get_stream_data(entry).decode('utf-16')
if msg['recipient_email'] in entry[-1]:
email['to'].append(get_stream_data(entry).decode('utf-16').lower())
if msg['message_class'] in entry[-1]:
message_class = get_stream_data(entry).decode('utf-16').lower()
ole.close()
# Process headers to extract data
headers = Parser().parsestr(email.get('raw_header', ''), headersonly=True)
email['from_address'] = headers.get('From', '')
email['reply_to'] = headers.get('Reply-To', '')
email['date'] = headers.get('Date', '')
email['message_id'] = headers.get('Message-ID', '')
email['x_mailer'] = headers.get('X-Mailer', '')
email['x_originating_ip'] = headers.get('X-Originating-IP', '')
email['sender'] = getaddresses(headers.get_all('Sender', '')) # getaddresses returns list [(name, email)]
# If no sender, set the email address found in From:
if not email['sender']:
email['sender'] = getaddresses(headers.get_all('From', ''))
if len(email['sender']) > 0:
email['sender'] = email['sender'][0][1]
else:
email['sender'] = ''
# Get list of recipients and add to email['to'] if not already there
# Some emails do not have a stream for recipients (_39FE)
to = headers.get_all('To', [])
cc = headers.get_all('CC', [])
resent_to = headers.get_all('Resent-To', [])
resent_cc = headers.get_all('Resent-CC', [])
recipients = getaddresses(to + cc + resent_to + resent_cc)
for r in recipients:
addr = r[1].lower()
# If BCC then addr could be blank or set to undisclosed-recipients:
if addr and addr not in email['to'] and not re.match(r'^undisclosed-recipients[:;]?(?::;)?$', addr):
email['to'].append(addr)
# Check for encrypted and signed messages. The body will be empty in this case
# Message classes: http://msdn.microsoft.com/en-us/library/ee200767%28v=exchg.80%29.aspx
if message_class == 'ipm.note.smime' and not email.has_key('raw_body'):
email['raw_body'] = '<ENCRYPTED>'
if message_class == 'ipm.note.smime.multipartsigned' and not email.has_key('raw_body'):
email['raw_body'] = '<DIGITALLY SIGNED: body in smime.p7m>'
# Parse Received headers to get Helo and X-Originating-IP
# This can be unreliable since Received headers can be reordered by gateways
# and the date may not be in sync between systems. This is best effort based
# on the date as it appears in the Received header. In some cases there is no
# Received header present
#
# Received: from __ by __ with __ id __ for __ ; date
#
# See helper functions _get_received_from, _get_received_by, _get_received_date
current_datetime = datetime.datetime.now()
earliest_helo_date = current_datetime
earliest_ip_date = current_datetime
email['helo'] = ''
originating_ip = ''
last_from = ''
helo_for = ''
all_received = headers.get_all('Received')
crits_config = CRITsConfig.objects().first()
if crits_config:
email_domain = get_domain(crits_config.crits_email.split('@')[-1])[0]
else:
email_domain = ''
if all_received:
for received in all_received:
received_from = _get_received_from(received).lower() # from __
received_by = _get_received_by(received).lower() # by __ with __ id __
received_for = _get_received_for(received).lower() # for <email>
date = _get_received_date(received) # date
try:
current_date = datetime.datetime.fromtimestamp(mktime_tz(parsedate_tz(date))) # rfc2822 -> Time -> Datetime
except:
# Exception will occur if the date is not in the Received header. This could be
# where the originating IP is. e.g. Received: from 11.12.13.14 by rms-us019 with HTTP
current_date = datetime.datetime.min
grp = re.search(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b', received_from)
if grp and not _is_reserved_ip(grp.group()) and ' localhost ' not in received_from:
if email_domain not in received_from and email_domain in received_by:
if(current_date < earliest_helo_date):
helo_for = parseaddr(received_for.strip())[1]
earliest_helo_date = current_date
email['helo'] = received_from
else:
last_from = received_from
if grp and not email['x_originating_ip'] and not _is_reserved_ip(grp.group()):
if current_date < earliest_ip_date:
earliest_ip_date = current_date
originating_ip = grp.group()
# If no proper Helo found, just use the last received_from without a reserved IP
if not email['helo']:
email['helo'] = last_from
# Set the extracted originating ip. If not found, then just use the IP from Helo
if not email['x_originating_ip']:
if originating_ip:
email['x_originating_ip'] = originating_ip
else:
grp = re.search(r'\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b', email['helo'])
if grp:
email['x_originating_ip'] = grp.group()
# Add the email address found in Helo
if helo_for and '@' in helo_for:
if helo_for not in email['to']:
email['to'].append(helo_for)
# If no Helo date found, then try to use the Date field
if earliest_helo_date == current_datetime and email['date']:
earliest_helo_date = datetime.datetime.fromtimestamp(mktime_tz(parsedate_tz(email['date'])))
return {'email': email, 'attachments': attachments.values(), 'received_date': earliest_helo_date}
def _get_received_from(received_header):
"""
Helper function to grab the 'from' part of a Received email header.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
info = received_header.split('by ')
try:
return info[0]
except:
''
def _get_received_by(received_header):
"""
Helper function to grab the 'by' part of a Received email header.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
info = received_header.split('by ')
try:
return info[-1].split('for ')[0]
except:
return ''
def _get_received_for(received_header):
"""
Helper function to grab the 'for' part of a Received email header
WARNING: If 'for' is not there, the entire Received header is returned.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
info = received_header.split('for ')
try:
return info[-1].split(';')[0]
except:
return ''
def _get_received_date(received_header):
"""
Helper function to grab the date part of a Received email header.
"""
received_header = received_header.replace('\r', '').replace('\n', '')
date = received_header.split(';')
try:
return date[-1]
except:
''
def _is_reserved_ip(ip):
"""
Simple test to detect if an IP is private or loopback. Does not check
validity of the address.
"""
grp = re.match(r'127.\d{1,3}.\d{1,3}.\d{1,3}', ip) # 127.0.0.0/8
if grp:
return True
grp = re.match(r'10.\d{1,3}.\d{1,3}.\d{1,3}', ip) # 10.0.0.0/8
if grp:
return True
grp = re.match(r'192.168.\d{1,3}.\d{1,3}', ip) # 192.168.0.0/16
if grp:
return True
grp = re.match(r'172.(1[6-9]|2[0-9]|3[0-1]).\d{1,3}.\d{1,3}', ip) # 172.16.0.0/12
if grp:
return True
# No matches
return False
| 37.951351
| 123
| 0.539398
|
4a00a3c3d26a63ae9487fe3ae2f9e2ba7fd9aa9f
| 3,108
|
py
|
Python
|
che/che/spiders/artlist.py
|
worry1613/che-spider
|
32f75249db6bc15d4a795cb20aab428cf32f855d
|
[
"MIT"
] | null | null | null |
che/che/spiders/artlist.py
|
worry1613/che-spider
|
32f75249db6bc15d4a795cb20aab428cf32f855d
|
[
"MIT"
] | null | null | null |
che/che/spiders/artlist.py
|
worry1613/che-spider
|
32f75249db6bc15d4a795cb20aab428cf32f855d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @创建时间 : 10/12/2018
# @作者 : worry1613(549145583@qq.com)
# GitHub : https://github.com/worry1613
# @CSDN : http://blog.csdn.net/worryabout/
from bs4 import BeautifulSoup
from scrapy_redis_bloomfilter import bloomfilter
from scrapy_redis_bloomfilter.spiders import RedisSpider
import json
# from scrapy.spiders import Spider
from scrapy.http import Request
import redis
from che.util import get_js,payload_for_get
from che.settings import USERS,REDIS_HOST,REDIS_PORT,REDIS_DB,ARTICLES
pool = redis.ConnectionPool(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)
rserver = redis.StrictRedis(connection_pool=pool)
p = rserver.pipeline()
bf = bloomfilter.BloomFilter(rserver,key='articles:bloomfilter')
class ArticlesSpider(RedisSpider):
# class ArticlesSpider(Spider):
name = "userarts"
redis_key = USERS
# start_urls = ['http://www.toutiao.com/c/user/token/MS4wLjABAAAAz2T7HE2F-fbTc8WdKw_XLKnMdmzhfEGwuNkwbjluXdI/']
def __init__(self, *args, **kwargs):
# Dynamically define the allowed domains list.
domain = kwargs.pop('domain', '')
self.allowed_domains = filter(None, domain.split(','))
super(ArticlesSpider, self).__init__(*args, **kwargs)
def parse(self,response):
"""
不做数据处理,只是找到url,写入redis
:param response:
:return:
"""
data = response.body
soup = BeautifulSoup(data, "html5lib")
script = soup.find('script', {'type': 'text/javascript'}).get_text()
start = script.find('id:') + 3
end = script[start:].find(',')
userid = script[start:start + end].strip()
fi = open('data/userids.txt','a')
fi.write(str(userid)+'\n')
fi.close()
Honey = payload_for_get(userid,1,'0')
_as = Honey['as']
_cp = Honey['cp']
_sign = Honey['_signature']
url = 'https://www.toutiao.com/c/user/article/?page_type=1' \
'&user_id=%s&max_behot_time=%d&count=20' \
'&as=%s&cp=%s&_signature=%s' % (userid, 0, _as, _cp,_sign)
yield Request(url=url, callback=self.parse_artlist, dont_filter=True,meta={'userid':userid})
def parse_artlist(self, response):
bodyjson = json.loads(response._body)
if not bodyjson['data']:
return
articles = ['https://www.toutiao.com' + b['source_url'] for b in bodyjson['data']]
for a in articles:
if not bf.exists(a):
bf.insert(a)
p.lpush(ARTICLES, a)
p.execute()
max_behot_time = bodyjson['next']['max_behot_time']
userid = response.meta['userid']
Honey = payload_for_get(userid, 1, str(max_behot_time))
_as = Honey['as']
_cp = Honey['cp']
_sign = Honey['_signature']
url = 'https://www.toutiao.com/c/user/article/?page_type=1' \
'&user_id=%s&max_behot_time=%d&count=20' \
'&as=%s&cp=%s&_signature=%s' % (userid, max_behot_time, _as, _cp, _sign)
yield Request(url=url, callback=self.parse_artlist, dont_filter=True, meta={'userid': userid})
| 39.846154
| 115
| 0.631918
|
4a00a440138618ef34003276a834f3e75055a978
| 445
|
py
|
Python
|
GDAL/ReadHDF.py
|
xiaoke0O/Tests
|
ec8124879be31d64d526bf274dcd4edb4f85365c
|
[
"MIT"
] | null | null | null |
GDAL/ReadHDF.py
|
xiaoke0O/Tests
|
ec8124879be31d64d526bf274dcd4edb4f85365c
|
[
"MIT"
] | null | null | null |
GDAL/ReadHDF.py
|
xiaoke0O/Tests
|
ec8124879be31d64d526bf274dcd4edb4f85365c
|
[
"MIT"
] | null | null | null |
from osgeo import gdal
datasets = gdal.Open("MOD13A3.A2020001.h21v03.006.2020034153005.hdf", gdal.GA_ReadOnly).GetMetadata('SUBDATASETS')
# 输出每个数据集的键
print(datasets.keys())
# 打开第一波段
dataset1 = gdal.Open(datasets["SUBDATASET_1_NAME"])
dataset1_data = dataset1.ReadAsArray()
# 打开第二波段
dataset2 = gdal.Open(datasets["SUBDATASET_2_NAME"])
dataset2_data = dataset2.ReadAsArray()
print("dataset1\n", dataset1_data)
print("dataset2\n", dataset2_data)
| 29.666667
| 114
| 0.786517
|
4a00a458f11700c59130133fe6b65dbe4e91e351
| 555
|
py
|
Python
|
setup.py
|
h0rac/raiden-python
|
df722086759e111f0846a7c6fd5433febc6c3b42
|
[
"BSD-3-Clause"
] | 5
|
2020-10-17T18:02:59.000Z
|
2021-07-05T20:39:26.000Z
|
setup.py
|
h0rac/raiden-python
|
df722086759e111f0846a7c6fd5433febc6c3b42
|
[
"BSD-3-Clause"
] | 1
|
2021-03-22T04:48:33.000Z
|
2021-03-22T04:48:33.000Z
|
setup.py
|
h0rac/raiden-python
|
df722086759e111f0846a7c6fd5433febc6c3b42
|
[
"BSD-3-Clause"
] | 1
|
2020-10-19T06:29:47.000Z
|
2020-10-19T06:29:47.000Z
|
from setuptools import setup
setup(
name='raiden_python',
version='1.0.2',
description='Raiden api',
url='',
author='Grzegorz Wypych (h0rac) & Adam Laurie (M@jor Malfunction)',
author_email='',
license='BSD 3-clause',
packages=['raiden_python'],
install_requires=['pySerial',
],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
],
)
| 25.227273
| 71
| 0.540541
|
4a00a4c2fc1262f0f24b288e210ed8b046cbac44
| 5,045
|
py
|
Python
|
src/upax/importer.py
|
jddixon/upax
|
23037212570d35a6d5d01b9ebd9f60280e4eacc5
|
[
"MIT"
] | null | null | null |
src/upax/importer.py
|
jddixon/upax
|
23037212570d35a6d5d01b9ebd9f60280e4eacc5
|
[
"MIT"
] | null | null | null |
src/upax/importer.py
|
jddixon/upax
|
23037212570d35a6d5d01b9ebd9f60280e4eacc5
|
[
"MIT"
] | null | null | null |
# upax/__init__.py
import re
try:
from os.scandir import scandir
except BaseException:
from scandir import scandir
from xlattice import HashTypes
from upax.server import BlockingServer
__all__ = ['Importer', ]
# PATs AND REs ######################################################
DIR_NAME_PAT = '^[0-9a-fA-F]{2}$'
DIR_NAME_RE = re.compile(DIR_NAME_PAT)
FILE_NAME_1_PAT = '^[0-9a-fA-F]{40}$'
FILE_NAME_1_RE = re.compile(FILE_NAME_1_PAT)
FILE_NAME_2_PAT = '^[0-9a-fA-F]{64}$'
FILE_NAME_2_RE = re.compile(FILE_NAME_2_PAT)
# -- classes --------------------------------------------------------
class Importer(object):
def __init__(self, src_dir, dest_dir, pgm_name_and_version,
hashtype=HashTypes.SHA2, verbose=False):
self._src_dir = src_dir
self._dest_dir = dest_dir
self._pgm_name_and_version = pgm_name_and_version
self._server = None
self._hashtype = hashtype
self._verbose = verbose
self._count = 0
@property
def src_dir(self):
"""
Return the path to the source directory from which files are
being loaded.
"""
return self._src_dir
def dest_dir(self):
"""
Return the path to the destination directory into which files
will be copied.
"""
return self._dest_dir
def pgm_name_and_version(self):
""" Return the name of the program loading the data. """
return self._pgm_name_and_version
def verbose(self):
""" Return whether to be chatty. """
return self._verbose
@staticmethod
def create_importer(args):
""" Create an Importer given a set of command line options. """
return Importer(args.src_dir, args.dest_dir,
args.pgm_name_and_version, args.hashtype,
args.verbose)
def import_bottom_dir(self, bottom_dir):
"""
Import the files in the bottom directory of a content-keyed store.
"""
src = self._pgm_name_and_version
string = self._server
count = 0
for entry in scandir(bottom_dir):
ok_ = False
if entry.is_file():
ok_ = True
name = entry.name
# leaf name names should be the file's SHA hash, its content
# key
if self._hashtype == HashTypes.SHA1:
match = FILE_NAME_1_RE.match(name)
else:
match = FILE_NAME_2_RE.match(name)
if match is not None:
count += 1
if self._verbose:
print(' ' + entry.path)
(_, actual_hash) = string.put(entry.path, name, src)
if actual_hash != name:
print(
"%s has content key %s" %
(entry.path, actual_hash))
else:
ok_ = False
if not ok_:
print("not a proper leaf file: " + entry.path)
self._count += count
def import_sub_dir(self, sub_dir):
""" Import the files in a subdirectory of a content-keyed store. """
for entry in scandir(sub_dir):
ok_ = False
if entry.is_dir():
ok_ = True
if DIR_NAME_RE.match(entry.name):
if self._verbose:
print((' ' + entry.path))
self.import_bottom_dir(entry.path)
if not ok_:
print(("not a proper subsubdirectory: " + entry.path))
def do_import_u_dir(self):
"""
Importation files in the source directory, which is a content-keyed
store.
"""
src_dir = self._src_dir
dest_dir = self._dest_dir
verbose = self._verbose
# os.umask(0o222) # CAN'T USE THIS
self._server = BlockingServer(dest_dir, self._hashtype)
log = self._server.log
if verbose:
print(("there were %7u files in %s at the beginning of the run" % (
len(log), src_dir)))
self._count = 0
src_dir = self._src_dir
if self._verbose:
print(src_dir)
try:
for entry in scandir(src_dir):
sub_dir = entry.name
if sub_dir == 'L' or sub_dir == 'in' or \
sub_dir == 'node_id' or sub_dir == 'tmp':
continue
ok_ = False
if entry.is_dir():
if DIR_NAME_RE.match(sub_dir):
if self._verbose:
print((' ' + entry.name))
ok_ = True
self.import_sub_dir(entry.path)
if not ok_:
print(("not a proper subdirectory: " + entry.name))
finally:
self._server.close() # GEEP
| 32.973856
| 79
| 0.510406
|
4a00a4f5f22bdc7135eb57353401e37f1860977e
| 3,328
|
py
|
Python
|
contrib/opencensus-ext-azure/opencensus/ext/azure/metrics_exporter/standard_metrics/http_dependency.py
|
jtbeach/opencensus-python
|
2e396b063a238b3e823b6efc136b9a0405dd5565
|
[
"Apache-2.0"
] | 1
|
2020-10-19T07:46:42.000Z
|
2020-10-19T07:46:42.000Z
|
contrib/opencensus-ext-azure/opencensus/ext/azure/metrics_exporter/standard_metrics/http_dependency.py
|
dineshkrishnareddy/opencensus-python
|
e5e752ceab3371ec4b78cec23a717168e2ed9372
|
[
"Apache-2.0"
] | null | null | null |
contrib/opencensus-ext-azure/opencensus/ext/azure/metrics_exporter/standard_metrics/http_dependency.py
|
dineshkrishnareddy/opencensus-python
|
e5e752ceab3371ec4b78cec23a717168e2ed9372
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import threading
import time
from opencensus.metrics.export.gauge import DerivedDoubleGauge
from opencensus.trace import execution_context
dependency_map = dict()
_dependency_lock = threading.Lock()
ORIGINAL_REQUEST = requests.Session.request
def dependency_patch(*args, **kwargs):
result = ORIGINAL_REQUEST(*args, **kwargs)
# Only collect request metric if sent from non-exporter thread
if not execution_context.is_exporter():
# We don't want multiple threads updating this at once
with _dependency_lock:
count = dependency_map.get('count', 0)
dependency_map['count'] = count + 1
return result
def setup():
# Patch the requests library functions to track dependency information
requests.Session.request = dependency_patch
class DependencyRateMetric(object):
# Dependency call metrics can be found under custom metrics
NAME = "\\ApplicationInsights\\Dependency Calls/Sec"
def __init__(self):
setup()
@staticmethod
def get_value():
current_count = dependency_map.get('count', 0)
current_time = time.time()
last_count = dependency_map.get('last_count', 0)
last_time = dependency_map.get('last_time')
last_result = dependency_map.get('last_result', 0)
try:
# last_time is None the very first time this function is called
if last_time is not None:
elapsed_seconds = current_time - last_time
interval_count = current_count - last_count
result = interval_count / elapsed_seconds
else:
result = 0
dependency_map['last_time'] = current_time
dependency_map['last_count'] = current_count
dependency_map['last_result'] = result
return result
except ZeroDivisionError:
# If elapsed_seconds is 0, exporter call made too close to previous
# Return the previous result if this is the case
return last_result
def __call__(self):
""" Returns a derived gauge for outgoing requests per second
Calculated by obtaining by getting the number of outgoing requests made
using the requests library within an elapsed time and dividing that
value over the elapsed time.
:rtype: :class:`opencensus.metrics.export.gauge.DerivedLongGauge`
:return: The gauge representing the outgoing requests metric
"""
gauge = DerivedDoubleGauge(
DependencyRateMetric.NAME,
'Outgoing Requests per second',
'rps',
[])
gauge.create_default_time_series(DependencyRateMetric.get_value)
return gauge
| 36.173913
| 79
| 0.683894
|
4a00a557a46211486b0108d1576776f2a3422bb7
| 1,051
|
py
|
Python
|
pincer/middleware/message_delete_bulk.py
|
Kylianalex/Pincer
|
7ca530798a696c70e7d5c939902653575e3d8054
|
[
"MIT"
] | 1
|
2021-11-04T13:20:23.000Z
|
2021-11-04T13:20:23.000Z
|
pincer/middleware/message_delete_bulk.py
|
Kylianalex/Pincer
|
7ca530798a696c70e7d5c939902653575e3d8054
|
[
"MIT"
] | 1
|
2021-10-31T11:41:42.000Z
|
2021-10-31T11:41:42.000Z
|
pincer/middleware/message_delete_bulk.py
|
Kylianalex/Pincer
|
7ca530798a696c70e7d5c939902653575e3d8054
|
[
"MIT"
] | 1
|
2021-11-17T13:55:07.000Z
|
2021-11-17T13:55:07.000Z
|
# Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
"""sent when multiple messages are deleted at once"""
from ..core.dispatch import GatewayDispatch
from ..objects.events.message import MessageDeleteBulkEvent
from ..utils.conversion import construct_client_dict
from ..utils.types import Coro
async def message_delete_bulk_middleware(self, payload: GatewayDispatch):
"""|coro|
Middleware for the ``on_message_delete_bulk`` event.
Parameters
----------
payload : :class:`~pincer.core.dispatch.GatewayDispatch`
The data received from the message delete bulk event
Returns
-------
Tuple[:class:`str`, :class:`~pincer.events.message.MessageDeleteBulkEvent`]
``on_message_delete_bulk`` and an ``MessageDeleteBulkEvent``
"""
return (
"on_message_delete_bulk",
MessageDeleteBulkEvent.from_dict(
construct_client_dict(self, payload.data)
),
)
def export() -> Coro:
return message_delete_bulk_middleware
| 28.405405
| 79
| 0.706946
|
4a00a56dc7ed3d3d7ba203e1ad7c97a80b464737
| 4,201
|
py
|
Python
|
tic_tac_toe_5_4/network.py
|
alphagamatoe/AlphaToe
|
a7cd0969aa46dfd151a22ed8b9aec1a894747b17
|
[
"MIT"
] | 172
|
2016-09-27T12:23:10.000Z
|
2022-01-19T09:52:11.000Z
|
tic_tac_toe_5_4/network.py
|
afcarl/AlphaToe
|
1220f4f883dbbd7ac1d84092bdaf04ca18a4dbc2
|
[
"MIT"
] | 13
|
2018-07-19T09:42:28.000Z
|
2018-09-25T15:08:05.000Z
|
tic_tac_toe_5_4/network.py
|
afcarl/AlphaToe
|
1220f4f883dbbd7ac1d84092bdaf04ca18a4dbc2
|
[
"MIT"
] | 63
|
2016-09-27T13:00:51.000Z
|
2021-04-04T04:34:37.000Z
|
import tensorflow as tf
from common.benchmark import benchmark
from games.tic_tac_toe_x import TicTacToeXGameSpec
tic_tac_toe_5_4_game_spec = TicTacToeXGameSpec(5, 4)
def create_convolutional_network():
input_layer = tf.input_layer = tf.placeholder("float",
(None,) + tic_tac_toe_5_4_game_spec.board_dimensions() + (1,))
CONVOLUTIONS_LAYER_1 = 64
CONVOLUTIONS_LAYER_2 = 64
CONVOLUTIONS_LAYER_3 = 64
CONVOLUTIONS_LAYER_4 = 64
CONVOLUTIONS_LAYER_5 = 64
FLAT_SIZE = 5 * 5 * CONVOLUTIONS_LAYER_2
FLAT_HIDDEN_NODES = 256
convolution_weights_1 = tf.Variable(tf.truncated_normal([3, 3, 1, CONVOLUTIONS_LAYER_1], stddev=0.01))
convolution_bias_1 = tf.Variable(tf.constant(0.01, shape=[CONVOLUTIONS_LAYER_1]))
convolution_weights_2 = tf.Variable(
tf.truncated_normal([3, 3, CONVOLUTIONS_LAYER_1, CONVOLUTIONS_LAYER_2], stddev=0.01))
convolution_bias_2 = tf.Variable(tf.constant(0.01, shape=[CONVOLUTIONS_LAYER_2]))
convolution_weights_3 = tf.Variable(
tf.truncated_normal([3, 3, CONVOLUTIONS_LAYER_2, CONVOLUTIONS_LAYER_3], stddev=0.01))
convolution_bias_3 = tf.Variable(tf.constant(0.01, shape=[CONVOLUTIONS_LAYER_3]))
convolution_weights_4 = tf.Variable(
tf.truncated_normal([3, 3, CONVOLUTIONS_LAYER_3, CONVOLUTIONS_LAYER_4], stddev=0.01))
convolution_bias_4 = tf.Variable(tf.constant(0.01, shape=[CONVOLUTIONS_LAYER_4]))
# convolution_weights_5 = tf.Variable(
# tf.truncated_normal([3, 3, CONVOLUTIONS_LAYER_4, CONVOLUTIONS_LAYER_5], stddev=0.01))
# convolution_bias_5 = tf.Variable(tf.constant(0.01, shape=[CONVOLUTIONS_LAYER_5]))
# feed_forward_weights_1 = tf.Variable(tf.truncated_normal([FLAT_SIZE, FLAT_HIDDEN_NODES], stddev=0.01))
# feed_forward_bias_1 = tf.Variable(tf.constant(0.01, shape=[FLAT_HIDDEN_NODES]))
feed_forward_weights_2 = tf.Variable(
tf.truncated_normal([FLAT_SIZE, tic_tac_toe_5_4_game_spec.outputs()], stddev=0.01))
feed_forward_bias_2 = tf.Variable(tf.constant(0.01, shape=[tic_tac_toe_5_4_game_spec.outputs()]))
hidden_convolutional_layer_1 = tf.nn.relu(
tf.nn.conv2d(input_layer, convolution_weights_1, strides=[1, 1, 1, 1], padding="SAME") + convolution_bias_1)
hidden_convolutional_layer_2 = tf.nn.relu(
tf.nn.conv2d(hidden_convolutional_layer_1, convolution_weights_2, strides=[1, 1, 1, 1],
padding="SAME") + convolution_bias_2)
hidden_convolutional_layer_3 = tf.nn.relu(
tf.nn.conv2d(hidden_convolutional_layer_2, convolution_weights_3, strides=[1, 1, 1, 1],
padding="SAME") + convolution_bias_3)
hidden_convolutional_layer_4 = tf.nn.relu(
tf.nn.conv2d(hidden_convolutional_layer_3, convolution_weights_4, strides=[1, 1, 1, 1],
padding="SAME") + convolution_bias_4)
# hidden_convolutional_layer_5 = tf.nn.relu(
# tf.nn.conv2d(hidden_convolutional_layer_4, convolution_weights_5, strides=[1, 1, 1, 1],
# padding="SAME") + convolution_bias_5)
hidden_convolutional_layer_3_flat = tf.reshape(hidden_convolutional_layer_4, [-1, FLAT_SIZE])
# final_hidden_activations = tf.nn.relu(
# tf.matmul(hidden_convolutional_layer_3_flat, feed_forward_weights_1) + feed_forward_bias_1)
output_layer = tf.nn.softmax(tf.matmul(hidden_convolutional_layer_3_flat, feed_forward_weights_2) + feed_forward_bias_2)
return input_layer, output_layer, [convolution_weights_1, convolution_bias_1,
convolution_weights_2, convolution_bias_2,
convolution_weights_3, convolution_bias_3,
convolution_weights_4, convolution_bias_4,
# convolution_weights_5, convolution_bias_5,
# feed_forward_weights_1, feed_forward_bias_1,
feed_forward_weights_2, feed_forward_bias_2]
file_path = 'convolutional_net_5_4_l_c_4_f_1_other_fresh.p'
benchmark(tic_tac_toe_5_4_game_spec, file_path, create_convolutional_network)
| 51.231707
| 124
| 0.702452
|
4a00a5a06daae6231674f188126a01fb6299c3d7
| 3,200
|
py
|
Python
|
ephios/core/migrations/0005_auto_20210106_2219.py
|
garinm90/ephios
|
7d04d3287ae16ee332e31add1f25829b199f29a5
|
[
"MIT"
] | 14
|
2021-01-13T12:15:03.000Z
|
2022-03-20T10:02:11.000Z
|
ephios/core/migrations/0005_auto_20210106_2219.py
|
garinm90/ephios
|
7d04d3287ae16ee332e31add1f25829b199f29a5
|
[
"MIT"
] | 570
|
2020-09-19T13:37:27.000Z
|
2022-03-31T09:14:37.000Z
|
ephios/core/migrations/0005_auto_20210106_2219.py
|
garinm90/ephios
|
7d04d3287ae16ee332e31add1f25829b199f29a5
|
[
"MIT"
] | 5
|
2021-01-08T16:52:31.000Z
|
2022-03-20T10:02:25.000Z
|
# Generated by Django 3.1.4 on 2021-01-06 21:19
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("core", "0004_auto_20201014_1648"),
]
operations = [
migrations.AlterUniqueTogether(
name="qualificationgrant",
unique_together={("qualification", "user")},
),
migrations.CreateModel(
name="WorkingHours",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("hours", models.DecimalField(decimal_places=2, max_digits=7)),
("reason", models.CharField(blank=True, default="", max_length=1024)),
("datetime", models.DateTimeField(blank=True, null=True)),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
],
),
migrations.CreateModel(
name="Consequence",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("slug", models.CharField(max_length=255)),
("data", models.JSONField(default=dict)),
(
"state",
models.TextField(
choices=[
("needs_confirmation", "needs confirmation"),
("executed", "executed"),
("failed", "failed"),
("denied", "denied"),
],
default="needs_confirmation",
max_length=31,
),
),
("executed_at", models.DateTimeField(blank=True, null=True)),
("fail_reason", models.TextField(blank=True, max_length=255)),
(
"decided_by",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="confirmed_consequences",
to=settings.AUTH_USER_MODEL,
verbose_name="confirmed by",
),
),
(
"user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="affecting_consequences",
to=settings.AUTH_USER_MODEL,
verbose_name="affected user",
),
),
],
),
]
| 35.955056
| 96
| 0.425625
|
4a00a65d4d8ec24cf9f32230ecab303677cefd42
| 433
|
py
|
Python
|
find/views.py
|
WilliamTakeshi/eCommerce
|
26fd1b178aa117cb35a8112f8249280d0a68a0f7
|
[
"MIT"
] | null | null | null |
find/views.py
|
WilliamTakeshi/eCommerce
|
26fd1b178aa117cb35a8112f8249280d0a68a0f7
|
[
"MIT"
] | null | null | null |
find/views.py
|
WilliamTakeshi/eCommerce
|
26fd1b178aa117cb35a8112f8249280d0a68a0f7
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from products.models import Product
from django.views.generic import ListView
# Create your views here.
class FindProductView(ListView):
template_name = "find/view.html"
def get_queryset(self, *args, **kwargs):
request = self.request
query = request.GET.get('q')
if query:
return Product.objects.search(query)
return Product.objects.none()
| 24.055556
| 48
| 0.688222
|
4a00a76fcc9561c983c65a5b22ffe21c81d273d8
| 10,379
|
py
|
Python
|
src/silicone/database_crunchers/time_dep_ratio.py
|
gaurav-ganti/silicone
|
9d8a814a3518aa309f87a53bdc01e38d9de365c2
|
[
"BSD-3-Clause"
] | null | null | null |
src/silicone/database_crunchers/time_dep_ratio.py
|
gaurav-ganti/silicone
|
9d8a814a3518aa309f87a53bdc01e38d9de365c2
|
[
"BSD-3-Clause"
] | null | null | null |
src/silicone/database_crunchers/time_dep_ratio.py
|
gaurav-ganti/silicone
|
9d8a814a3518aa309f87a53bdc01e38d9de365c2
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Module for the database cruncher which uses the 'time-dependent ratio' technique.
"""
import logging
import warnings
import numpy as np
import pandas as pd
from pyam import IamDataFrame
from .base import _DatabaseCruncher
logger = logging.getLogger(__name__)
class TimeDepRatio(_DatabaseCruncher):
"""
Database cruncher which uses the 'time-dependent ratio' technique.
This cruncher derives the relationship between two variables by simply assuming
that the follower timeseries is equal to the lead timeseries multiplied by a
time-dependent scaling factor. The scaling factor is the ratio of the
follower variable to the lead variable. If the database contains many such pairs,
the scaling factor is the ratio between the means of the values. By default, the
calculation will include only values where the lead variable takes the same sign
(+ or -) in the infilling database as in the case infilled. This prevents getting
negative values of emissions that cannot be negative. To allow cases where we
have no data of the correct sign, set `same_sign = False` in `derive_relationship`.
Once the relationship is derived, the 'filler' function will infill following:
.. math::
E_f(t) = R(t) * E_l(t)
where :math:`E_f(t)` is emissions of the follower variable and :math:`E_l(t)` is
emissions of the lead variable.
:math:`R(t)` is the scaling factor, calculated as the ratio of the means of the
the follower and the leader in the infiller database, denoted with
lower case e. By default, we include only cases where `sign(e_l(t))` is the same in
both databases). The cruncher will raise a warning if the lead data is ever
negative, which can create complications for the use of this cruncher.
.. math::
R(t) = \\frac{mean( e_f(t) )}{mean( e_l(t) )})
"""
def derive_relationship(
self,
variable_follower,
variable_leaders,
same_sign=True,
only_consistent_cases=True,
):
"""
Derive the relationship between two variables from the database.
Parameters
----------
variable_follower : str
The variable for which we want to calculate timeseries (e.g.
``"Emissions|C5F12"``).
variable_leaders : list[str]
The variable we want to use in order to infer timeseries of
``variable_follower`` (e.g. ``["Emissions|CO2"]``).
same_sign : bool
Do we want to only use data where the leader has the same sign in the
infiller and infillee data? If so, we have a potential error from
not having data of the correct sign, but have more confidence in the
sign of the follower data.
only_consistent_cases : bool
Do we want to only use model/scenario combinations where both lead and
follow have data at all times? This will reduce the risk of inconsistencies
or unevenness in the results, but will slightly decrease performance speed
if you know the data is consistent. Senario/model pairs where
data is only returned at certain times will be removed, as will any
scenarios not returning both lead and follow data.
Returns
-------
:obj:`func`
Function which takes a :obj:`pyam.IamDataFrame` containing
``variable_leaders`` timeseries and returns timeseries for
``variable_follower`` based on the derived relationship between the two.
Please see the source code for the exact definition (and docstring) of the
returned function.
Raises
------
ValueError
``variable_leaders`` contains more than one variable.
ValueError
There is no data for ``variable_leaders`` or ``variable_follower`` in the
database.
"""
if only_consistent_cases:
consistent_cases = (
self._db.filter(variable=variable_leaders + [variable_follower])
.timeseries()
.dropna()
)
consistent_cases = consistent_cases.loc[
consistent_cases.index.to_frame().duplicated(
["model", "scenario", "region"], keep=False
)
]
self._filtered_db = IamDataFrame(consistent_cases)
else:
self._filtered_db = self._db
iamdf_follower, data_follower = self._get_iamdf_followers(
variable_follower, variable_leaders
)
data_follower_unit = np.unique(iamdf_follower.data["unit"].values)
if data_follower_unit.size == 1:
data_follower_unit = data_follower_unit[0]
else:
raise ValueError("There are multiple/no units in follower data")
data_follower_time_col = iamdf_follower.time_col
iamdf_leader = self._filtered_db.filter(variable=variable_leaders[0])
data_leader = iamdf_leader.timeseries()
if iamdf_leader["unit"].nunique() != 1:
raise ValueError("There are multiple/no units for the leader data.")
if data_follower.size != data_leader.size:
error_msg = "The follower and leader data have different sizes"
raise ValueError(error_msg)
# Calculate the ratios to use
all_times = np.unique(iamdf_leader.data[iamdf_leader.time_col])
scaling = pd.DataFrame(index=all_times, columns=["pos", "neg"])
if same_sign:
# We want to have separate positive and negative answers. We calculate a
# tuple, first for positive and then negative values.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for year in all_times:
pos_inds = data_leader[year].values > 0
scaling["pos"][year] = np.nanmean(
data_follower[year].iloc[pos_inds].values
) / np.nanmean(data_leader[year].iloc[pos_inds].values)
scaling["neg"][year] = np.nanmean(
data_follower[year].iloc[~pos_inds].values
) / np.nanmean(data_leader[year].iloc[~pos_inds].values)
else:
# The tuple is the same in both cases
for year in all_times:
scaling["pos"][year] = np.mean(data_follower[year].values) / np.mean(
data_leader[year].values
)
scaling["neg"] = scaling["pos"]
def filler(in_iamdf):
"""
Filler function derived from :obj:`TimeDepRatio`.
Parameters
----------
in_iamdf : :obj:`pyam.IamDataFrame`
Input data to fill data in
Returns
-------
:obj:`pyam.IamDataFrame`
Filled-in data (without original source data)
Raises
------
ValueError
The key year for filling is not in ``in_iamdf``.
"""
lead_var = in_iamdf.filter(variable=variable_leaders)
assert (
lead_var["unit"].nunique() == 1
), "There are multiple units for the lead variable."
if data_follower_time_col != in_iamdf.time_col:
raise ValueError(
"`in_iamdf` time column must be the same as the time column used "
"to generate this filler function (`{}`)".format(
data_follower_time_col
)
)
if any(lead_var["value"] < 0):
warn_str = (
"Note that the lead variable {} goes negative. The time dependent "
"ratio cruncher can produce unexpected results in this case.".format(
variable_leaders
)
)
logger.warning(warn_str)
print(warn_str)
times_needed = set(in_iamdf.data[in_iamdf.time_col])
if any(
[
k not in set(iamdf_follower[data_follower_time_col])
for k in times_needed
]
):
error_msg = (
"Not all required timepoints are in the data for "
"the lead gas ({})".format(variable_leaders[0])
)
raise ValueError(error_msg)
output_ts = lead_var.timeseries()
for year in times_needed:
if (
scaling.loc[year][
output_ts[year].map(lambda x: "neg" if x < 0 else "pos")
]
.isnull()
.values.any()
):
raise ValueError(
"Attempt to infill {} data using the time_dep_ratio cruncher "
"where the infillee data has a sign not seen in the infiller "
"database for year "
"{}.".format(variable_leaders, year)
)
output_ts[year] = (
output_ts[year].values
* scaling.loc[year][
output_ts[year].map(lambda x: "pos" if x > 0 else "neg")
].values
)
output_ts.reset_index(inplace=True)
output_ts["variable"] = variable_follower
output_ts["unit"] = data_follower_unit
return IamDataFrame(output_ts)
return filler
def _get_iamdf_followers(self, variable_follower, variable_leaders):
if len(variable_leaders) > 1:
raise ValueError(
"For `TimeDepRatio`, ``variable_leaders`` should only "
"contain one variable"
)
self._check_follower_and_leader_in_db(variable_follower, variable_leaders)
iamdf_follower = self._filtered_db.filter(variable=variable_follower)
if iamdf_follower.empty:
raise ValueError(
"No data is complete enough to use in the time-dependent ratio cruncher"
)
data_follower = iamdf_follower.timeseries()
return iamdf_follower, data_follower
| 40.862205
| 89
| 0.579439
|
4a00a89c414fe1d1344c476b74fcb0191b23a70f
| 783
|
py
|
Python
|
aoj/2/AOJ0523.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | 1
|
2018-11-12T15:18:55.000Z
|
2018-11-12T15:18:55.000Z
|
aoj/2/AOJ0523.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
aoj/2/AOJ0523.py
|
knuu/competitive-programming
|
16bc68fdaedd6f96ae24310d697585ca8836ab6e
|
[
"MIT"
] | null | null | null |
def discard(c, cards):
for card in cards:
if c < card:
return card
return 0
while True:
n = int(input())
if n == 0:
break
taro = [int(input()) for _ in range(n)]
hanako = [x + 1 for x in range(2*n) if (x + 1) not in taro]
taro.sort()
hanako.sort()
table = 0
while True:
if taro:
table = discard(table, taro)
if table:
taro.remove(table)
if not taro:
print(len(hanako))
print(0)
break
if hanako:
table = discard(table, hanako)
if table:
hanako.remove(table)
if not hanako:
print(0)
print(len(taro))
break
| 20.605263
| 63
| 0.439336
|
4a00a8d0679fc0d3f5ed722fc51b6088d8914270
| 5,176
|
py
|
Python
|
examples/contrib/webscanner_helper/proxyauth_selenium.py
|
KarlParkinson/mitmproxy
|
fd5caf40c75ca73c4b767170497abf6a5bf016a0
|
[
"MIT"
] | 24,939
|
2015-01-01T17:13:21.000Z
|
2022-03-31T17:50:04.000Z
|
examples/contrib/webscanner_helper/proxyauth_selenium.py
|
PeterDaveHello/mitmproxy
|
4bd7b6c4eadeaca712f63e0e73f20bcf6aadbffb
|
[
"MIT"
] | 3,655
|
2015-01-02T12:31:43.000Z
|
2022-03-31T20:24:57.000Z
|
examples/contrib/webscanner_helper/proxyauth_selenium.py
|
PeterDaveHello/mitmproxy
|
4bd7b6c4eadeaca712f63e0e73f20bcf6aadbffb
|
[
"MIT"
] | 3,712
|
2015-01-06T06:47:06.000Z
|
2022-03-31T10:33:27.000Z
|
import abc
import logging
import random
import string
import time
from typing import Dict, List, cast, Any
import mitmproxy.http
from mitmproxy import flowfilter
from mitmproxy import master
from mitmproxy.script import concurrent
from selenium import webdriver
logger = logging.getLogger(__name__)
cookie_key_name = {
"path": "Path",
"expires": "Expires",
"domain": "Domain",
"is_http_only": "HttpOnly",
"is_secure": "Secure"
}
def randomString(string_length=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(string_length))
class AuthorizationOracle(abc.ABC):
"""Abstract class for an authorization oracle which decides if a given request or response is authenticated."""
@abc.abstractmethod
def is_unauthorized_request(self, flow: mitmproxy.http.HTTPFlow) -> bool:
pass
@abc.abstractmethod
def is_unauthorized_response(self, flow: mitmproxy.http.HTTPFlow) -> bool:
pass
class SeleniumAddon:
""" This Addon can be used in combination with web application scanners in order to help them to authenticate
against a web application.
Since the authentication is highly dependant on the web application, this add-on includes the abstract method
*login*. In order to use the add-on, a class for the web application inheriting from SeleniumAddon needs to be
created. This class needs to include the concrete selenium actions necessary to authenticate against the web
application. In addition, an authentication oracle which inherits from AuthorizationOracle should be created.
"""
def __init__(self, fltr: str, domain: str,
auth_oracle: AuthorizationOracle):
self.filter = flowfilter.parse(fltr)
self.auth_oracle = auth_oracle
self.domain = domain
self.browser = None
self.set_cookies = False
options = webdriver.FirefoxOptions()
options.headless = True
profile = webdriver.FirefoxProfile()
profile.set_preference('network.proxy.type', 0)
self.browser = webdriver.Firefox(firefox_profile=profile,
options=options)
self.cookies: List[Dict[str, str]] = []
def _login(self, flow):
self.cookies = self.login(flow)
self.browser.get("about:blank")
self._set_request_cookies(flow)
self.set_cookies = True
def request(self, flow: mitmproxy.http.HTTPFlow):
if flow.request.is_replay:
logger.warning("Caught replayed request: " + str(flow))
if (not self.filter or self.filter(flow)) and self.auth_oracle.is_unauthorized_request(flow):
logger.debug("unauthorized request detected, perform login")
self._login(flow)
# has to be concurrent because replay.client is blocking and replayed flows
# will also call response
@concurrent
def response(self, flow: mitmproxy.http.HTTPFlow):
if flow.response and (self.filter is None or self.filter(flow)):
if self.auth_oracle.is_unauthorized_response(flow):
self._login(flow)
new_flow = flow.copy()
if master and hasattr(master, 'commands'):
# cast necessary for mypy
cast(Any, master).commands.call("replay.client", [new_flow])
count = 0
while new_flow.response is None and count < 10:
logger.error("waiting since " + str(count) + " ...")
count = count + 1
time.sleep(1)
if new_flow.response:
flow.response = new_flow.response
else:
logger.warning("Could not call 'replay.client' command since master was not initialized yet.")
if self.set_cookies and flow.response:
logger.debug("set set-cookie header for response")
self._set_set_cookie_headers(flow)
self.set_cookies = False
def done(self):
self.browser.close()
def _set_set_cookie_headers(self, flow: mitmproxy.http.HTTPFlow):
if flow.response and self.cookies:
for cookie in self.cookies:
parts = [f"{cookie['name']}={cookie['value']}"]
for k, v in cookie_key_name.items():
if k in cookie and isinstance(cookie[k], str):
parts.append(f"{v}={cookie[k]}")
elif k in cookie and isinstance(cookie[k], bool) and cookie[k]:
parts.append(cookie[k])
encoded_c = "; ".join(parts)
flow.response.headers["set-cookie"] = encoded_c
def _set_request_cookies(self, flow: mitmproxy.http.HTTPFlow):
if self.cookies:
cookies = "; ".join(
map(lambda c: f"{c['name']}={c['value']}", self.cookies))
flow.request.headers["cookie"] = cookies
@abc.abstractmethod
def login(self, flow: mitmproxy.http.HTTPFlow) -> List[Dict[str, str]]:
pass
| 38.917293
| 115
| 0.627512
|
4a00a8d898e4ddad2f548a9cbb763e331d9ac2be
| 805
|
py
|
Python
|
drf_tutorial/drf_tutorial/urls.py
|
Ryu0n/stock_prediction
|
f6d117768ff33f6de95d5b94ce46d2ca07964873
|
[
"Unlicense"
] | 6
|
2021-06-01T10:48:40.000Z
|
2021-09-10T04:51:07.000Z
|
drf_tutorial/drf_tutorial/urls.py
|
Ryu0n/stock_prediction
|
f6d117768ff33f6de95d5b94ce46d2ca07964873
|
[
"Unlicense"
] | null | null | null |
drf_tutorial/drf_tutorial/urls.py
|
Ryu0n/stock_prediction
|
f6d117768ff33f6de95d5b94ce46d2ca07964873
|
[
"Unlicense"
] | 1
|
2021-06-01T00:13:15.000Z
|
2021-06-01T00:13:15.000Z
|
"""drf_tutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('api.urls')),
]
| 30.961538
| 77
| 0.700621
|
4a00a913f71e290ef7e079c3bccc4237b599001d
| 518
|
py
|
Python
|
ex86 matriz.py
|
joaoschweikart/python_projects
|
a30361551ec71ac3bef6d38e4b6ffc7bad21f1cc
|
[
"MIT"
] | null | null | null |
ex86 matriz.py
|
joaoschweikart/python_projects
|
a30361551ec71ac3bef6d38e4b6ffc7bad21f1cc
|
[
"MIT"
] | null | null | null |
ex86 matriz.py
|
joaoschweikart/python_projects
|
a30361551ec71ac3bef6d38e4b6ffc7bad21f1cc
|
[
"MIT"
] | null | null | null |
matriz = [[], [], []]
for c in range(0, 3):
for d in range(0, 3):
matriz[c].append(int(input(f'Digite um valor para a posição [{c+1}, {d+1}] da matriz: ')))
print('=-'*30)
for c in range(0, 3):
for d in range(0, 3):
print(f'[{matriz[c][d]:^5}]', end='')
print()
print('=-'*30)
'''print(f'[ {matriz[0][0]} ] [ {matriz[0][1]} ] [ {matriz[0][2]} ]'
f'\n[ {matriz[1][0]} ] [ {matriz[1][1]} ] [ {matriz[1][2]} ]'
f'\n[ {matriz[2][0]} ] [ {matriz[2][1]} ] [ {matriz[2][2]} ]')'''
| 28.777778
| 98
| 0.455598
|
4a00aacf7add960aaf84458e8e4a2f8f32fa6288
| 13,804
|
py
|
Python
|
nova/tests/test_service.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 1
|
2015-11-25T10:18:22.000Z
|
2015-11-25T10:18:22.000Z
|
nova/tests/test_service.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 9
|
2015-05-20T11:20:17.000Z
|
2017-07-27T08:21:33.000Z
|
nova/tests/test_service.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 13
|
2015-05-05T09:34:04.000Z
|
2017-11-08T02:03:46.000Z
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using queue
"""
import sys
import mock
import mox
from oslo.config import cfg
import testtools
from nova import context
from nova import db
from nova import exception
from nova import manager
from nova.openstack.common import processutils
from nova.openstack.common import service as _service
from nova import rpc
from nova import service
from nova import test
from nova.tests import utils
from nova import wsgi
test_service_opts = [
cfg.StrOpt("fake_manager",
default="nova.tests.test_service.FakeManager",
help="Manager for testing"),
cfg.StrOpt("test_service_listen",
default='127.0.0.1',
help="Host to bind test service to"),
cfg.IntOpt("test_service_listen_port",
default=0,
help="Port number to bind test service to"),
]
CONF = cfg.CONF
CONF.register_opts(test_service_opts)
class FakeManager(manager.Manager):
"""Fake manager for tests."""
def test_method(self):
return 'manager'
class ExtendedService(service.Service):
def test_method(self):
return 'service'
class ServiceManagerTestCase(test.TestCase):
"""Test cases for Services."""
def test_message_gets_to_manager(self):
serv = service.Service('test',
'test',
'test',
'nova.tests.test_service.FakeManager')
serv.start()
self.assertEqual(serv.test_method(), 'manager')
def test_override_manager_method(self):
serv = ExtendedService('test',
'test',
'test',
'nova.tests.test_service.FakeManager')
serv.start()
self.assertEqual(serv.test_method(), 'service')
def test_service_with_min_down_time(self):
CONF.set_override('service_down_time', 10)
CONF.set_override('report_interval', 10)
serv = service.Service('test',
'test',
'test',
'nova.tests.test_service.FakeManager')
serv.start()
self.assertEqual(CONF.service_down_time, 25)
class ServiceFlagsTestCase(test.TestCase):
def test_service_enabled_on_create_based_on_flag(self):
self.flags(enable_new_services=True)
host = 'foo'
binary = 'nova-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assertFalse(ref['disabled'])
def test_service_disabled_on_create_based_on_flag(self):
self.flags(enable_new_services=False)
host = 'foo'
binary = 'nova-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assertTrue(ref['disabled'])
class ServiceTestCase(test.TestCase):
"""Test cases for Services."""
def setUp(self):
super(ServiceTestCase, self).setUp()
self.host = 'foo'
self.binary = 'nova-fake'
self.topic = 'fake'
self.mox.StubOutWithMock(db, 'service_create')
self.mox.StubOutWithMock(db, 'service_get_by_args')
self.flags(use_local=True, group='conductor')
def test_create(self):
# NOTE(vish): Create was moved out of mox replay to make sure that
# the looping calls are created in StartService.
app = service.Service.create(host=self.host, binary=self.binary,
topic=self.topic)
self.assertTrue(app)
def _service_start_mocks(self):
service_create = {'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0}
service_ref = {'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
'id': 1}
db.service_get_by_args(mox.IgnoreArg(),
self.host, self.binary).AndRaise(exception.NotFound())
db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
return service_ref
def test_init_and_start_hooks(self):
self.manager_mock = self.mox.CreateMock(FakeManager)
self.mox.StubOutWithMock(sys.modules[__name__],
'FakeManager', use_mock_anything=True)
self.mox.StubOutWithMock(self.manager_mock, 'init_host')
self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
FakeManager(host=self.host).AndReturn(self.manager_mock)
self.manager_mock.service_name = self.topic
self.manager_mock.additional_endpoints = []
# init_host is called before any service record is created
self.manager_mock.init_host()
self._service_start_mocks()
# pre_start_hook is called after service record is created,
# but before RPC consumer is created
self.manager_mock.pre_start_hook()
# post_start_hook is called after RPC consumer is created.
self.manager_mock.post_start_hook()
self.mox.ReplayAll()
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.test_service.FakeManager')
serv.start()
def _test_service_check_create_race(self, ex):
self.manager_mock = self.mox.CreateMock(FakeManager)
self.mox.StubOutWithMock(sys.modules[__name__], 'FakeManager',
use_mock_anything=True)
self.mox.StubOutWithMock(self.manager_mock, 'init_host')
self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
FakeManager(host=self.host).AndReturn(self.manager_mock)
# init_host is called before any service record is created
self.manager_mock.init_host()
db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary
).AndRaise(exception.NotFound)
db.service_create(mox.IgnoreArg(), mox.IgnoreArg()
).AndRaise(ex)
class TestException(Exception):
pass
db.service_get_by_args(mox.IgnoreArg(), self.host, self.binary
).AndRaise(TestException)
self.mox.ReplayAll()
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.test_service.FakeManager')
self.assertRaises(TestException, serv.start)
def test_service_check_create_race_topic_exists(self):
ex = exception.ServiceTopicExists(host='foo', topic='bar')
self._test_service_check_create_race(ex)
def test_service_check_create_race_binary_exists(self):
ex = exception.ServiceBinaryExists(host='foo', binary='bar')
self._test_service_check_create_race(ex)
def test_parent_graceful_shutdown(self):
self.manager_mock = self.mox.CreateMock(FakeManager)
self.mox.StubOutWithMock(sys.modules[__name__],
'FakeManager', use_mock_anything=True)
self.mox.StubOutWithMock(self.manager_mock, 'init_host')
self.mox.StubOutWithMock(self.manager_mock, 'pre_start_hook')
self.mox.StubOutWithMock(self.manager_mock, 'post_start_hook')
self.mox.StubOutWithMock(_service.Service, 'stop')
FakeManager(host=self.host).AndReturn(self.manager_mock)
self.manager_mock.service_name = self.topic
self.manager_mock.additional_endpoints = []
# init_host is called before any service record is created
self.manager_mock.init_host()
self._service_start_mocks()
# pre_start_hook is called after service record is created,
# but before RPC consumer is created
self.manager_mock.pre_start_hook()
# post_start_hook is called after RPC consumer is created.
self.manager_mock.post_start_hook()
_service.Service.stop()
self.mox.ReplayAll()
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.test_service.FakeManager')
serv.start()
serv.stop()
@mock.patch('nova.servicegroup.API')
@mock.patch('nova.conductor.api.LocalAPI.service_get_by_args')
def test_parent_graceful_shutdown_with_cleanup_host(self,
mock_svc_get_by_args,
mock_API):
mock_svc_get_by_args.return_value = {'id': 'some_value'}
mock_manager = mock.Mock()
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.test_service.FakeManager')
serv.manager = mock_manager
serv.manager.additional_endpoints = []
serv.start()
serv.manager.init_host.assert_called_with()
serv.stop()
serv.manager.cleanup_host.assert_called_with()
@mock.patch('nova.servicegroup.API')
@mock.patch('nova.conductor.api.LocalAPI.service_get_by_args')
@mock.patch.object(rpc, 'get_server')
def test_service_stop_waits_for_rpcserver(
self, mock_rpc, mock_svc_get_by_args, mock_API):
mock_svc_get_by_args.return_value = {'id': 'some_value'}
serv = service.Service(self.host,
self.binary,
self.topic,
'nova.tests.test_service.FakeManager')
serv.start()
serv.stop()
serv.rpcserver.start.assert_called_once_with()
serv.rpcserver.stop.assert_called_once_with()
serv.rpcserver.wait.assert_called_once_with()
class TestWSGIService(test.TestCase):
def setUp(self):
super(TestWSGIService, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
def test_service_random_port(self):
test_service = service.WSGIService("test_service")
test_service.start()
self.assertNotEqual(0, test_service.port)
test_service.stop()
def test_workers_set_default(self):
test_service = service.WSGIService("osapi_compute")
self.assertEqual(test_service.workers, processutils.get_worker_count())
def test_workers_set_good_user_setting(self):
CONF.set_override('osapi_compute_workers', 8)
test_service = service.WSGIService("osapi_compute")
self.assertEqual(test_service.workers, 8)
def test_workers_set_zero_user_setting(self):
CONF.set_override('osapi_compute_workers', 0)
test_service = service.WSGIService("osapi_compute")
# If a value less than 1 is used, defaults to number of procs available
self.assertEqual(test_service.workers, processutils.get_worker_count())
def test_service_start_with_illegal_workers(self):
CONF.set_override("osapi_compute_workers", -1)
self.assertRaises(exception.InvalidInput,
service.WSGIService, "osapi_compute")
@testtools.skipIf(not utils.is_ipv6_supported(), "no ipv6 support")
def test_service_random_port_with_ipv6(self):
CONF.set_default("test_service_listen", "::1")
test_service = service.WSGIService("test_service")
test_service.start()
self.assertEqual("::1", test_service.host)
self.assertNotEqual(0, test_service.port)
test_service.stop()
def test_reset_pool_size_to_default(self):
test_service = service.WSGIService("test_service")
test_service.start()
# Stopping the service, which in turn sets pool size to 0
test_service.stop()
self.assertEqual(test_service.server._pool.size, 0)
# Resetting pool size to default
test_service.reset()
test_service.start()
self.assertEqual(test_service.server._pool.size,
CONF.wsgi_default_pool_size)
class TestLauncher(test.TestCase):
def setUp(self):
super(TestLauncher, self).setUp()
self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
self.service = service.WSGIService("test_service")
def test_launch_app(self):
service.serve(self.service)
self.assertNotEqual(0, self.service.port)
service._launcher.stop()
| 37.207547
| 79
| 0.632643
|
4a00aacff52d774506c0b3e3f850871190151c85
| 263
|
py
|
Python
|
bugtests/test306.py
|
jeff5/jython-whinchat
|
65d8e5268189f8197295ff2d91be3decb1ee0081
|
[
"CNRI-Jython"
] | 577
|
2020-06-04T16:34:44.000Z
|
2022-03-31T11:46:07.000Z
|
bugtests/test306.py
|
jeff5/jython-whinchat
|
65d8e5268189f8197295ff2d91be3decb1ee0081
|
[
"CNRI-Jython"
] | 174
|
2015-01-08T20:37:09.000Z
|
2020-06-03T16:48:59.000Z
|
bugtests/test306.py
|
jeff5/jython-whinchat
|
65d8e5268189f8197295ff2d91be3decb1ee0081
|
[
"CNRI-Jython"
] | 162
|
2015-02-07T02:14:38.000Z
|
2020-05-30T16:42:03.000Z
|
"""
Test normcase.
"""
import support
import os
if os.sep == '\\': #only do this test on windows.
p1 = os.path.normpath('e:\\someDir\\packag/modul.py')
if p1 != 'e:\\someDir\\packag\\modul.py':
raise support.TestError('Wrong normpath %s' % p1)
| 20.230769
| 57
| 0.61597
|
4a00ab45a64e46c952e0330f4c127601af1400cc
| 372
|
py
|
Python
|
src/movieapi/migrations/0009_auto_20190715_1946.py
|
tabramczyk/moviebase_rest_api
|
c3b2f14bf7f7c2c7bfcb8e2eed0ff06e1acf7619
|
[
"MIT"
] | null | null | null |
src/movieapi/migrations/0009_auto_20190715_1946.py
|
tabramczyk/moviebase_rest_api
|
c3b2f14bf7f7c2c7bfcb8e2eed0ff06e1acf7619
|
[
"MIT"
] | 9
|
2019-12-04T23:46:01.000Z
|
2022-02-10T12:27:14.000Z
|
src/movieapi/migrations/0009_auto_20190715_1946.py
|
tabramczyk/moviebase_rest_api
|
c3b2f14bf7f7c2c7bfcb8e2eed0ff06e1acf7619
|
[
"MIT"
] | 1
|
2020-05-07T15:12:07.000Z
|
2020-05-07T15:12:07.000Z
|
# Generated by Django 2.2.3 on 2019-07-15 19:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('movieapi', '0008_auto_20190715_1937'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='commented_film',
new_name='movie',
),
]
| 19.578947
| 48
| 0.594086
|
4a00ab90632f1eae995dc04584c469b6db668987
| 335
|
py
|
Python
|
multi_layer_network/test/run_entity.py
|
shinyichen/gaia-clustering
|
4e91604085e2132b712eaba11527c51fc7ab4296
|
[
"MIT"
] | null | null | null |
multi_layer_network/test/run_entity.py
|
shinyichen/gaia-clustering
|
4e91604085e2132b712eaba11527c51fc7ab4296
|
[
"MIT"
] | 1
|
2018-09-19T18:49:06.000Z
|
2018-09-19T18:49:06.000Z
|
multi_layer_network/test/run_entity.py
|
shinyichen/gaia-clustering
|
4e91604085e2132b712eaba11527c51fc7ab4296
|
[
"MIT"
] | 3
|
2018-08-26T21:36:23.000Z
|
2018-09-16T22:11:02.000Z
|
from from_jsonhead2cluster import run_with_file_io
# output = "/Users/dongyuli/isi/jsonhead/1003r2nl/"
# output = "/Users/dongyuli/isi/jsonhead/1003r1wl/"
output = "/Users/dongyuli/isi/jsonhead/1003r4nl/"
entity_json = output + "entity.json"
entity_file = output + "cluster.json"
run_with_file_io(entity_json, entity_file, output)
| 27.916667
| 51
| 0.776119
|
4a00abcc657136ecd0b6a6ae40f5d16a0c6f3575
| 33,315
|
py
|
Python
|
tests/components/zwave_js/test_init.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/zwave_js/test_init.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/zwave_js/test_init.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Test the Z-Wave JS init module."""
from copy import deepcopy
from unittest.mock import call, patch
import pytest
from zwave_js_server.exceptions import BaseZwaveJSServerError, InvalidServerVersion
from zwave_js_server.model.node import Node
from openpeerpower.components.oppio.handler import OppioAPIError
from openpeerpower.components.zwave_js.const import DOMAIN
from openpeerpower.components.zwave_js.helpers import get_device_id
from openpeerpower.config_entries import DISABLED_USER, ConfigEntryState
from openpeerpower.const import STATE_UNAVAILABLE
from openpeerpower.helpers import device_registry as dr, entity_registry as er
from .common import (
AIR_TEMPERATURE_SENSOR,
EATON_RF9640_ENTITY,
NOTIFICATION_MOTION_BINARY_SENSOR,
)
from tests.common import MockConfigEntry
@pytest.fixture(name="connect_timeout")
def connect_timeout_fixture():
"""Mock the connect timeout."""
with patch("openpeerpower.components.zwave_js.CONNECT_TIMEOUT", new=0) as timeout:
yield timeout
async def test_entry_setup_unload(opp, client, integration):
"""Test the integration set up and unload."""
entry = integration
assert client.connect.call_count == 1
assert entry.state is ConfigEntryState.LOADED
await opp.config_entries.async_unload(entry.entry_id)
assert client.disconnect.call_count == 1
assert entry.state is ConfigEntryState.NOT_LOADED
async def test_open_peer_power_stop(opp, client, integration):
"""Test we clean up on open peer power stop."""
await opp.async_stop()
assert client.disconnect.call_count == 1
async def test_initialized_timeout(opp, client, connect_timeout):
"""Test we handle a timeout during client initialization."""
entry = MockConfigEntry(domain="zwave_js", data={"url": "ws://test.org"})
entry.add_to_opp(opp)
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
assert entry.state is ConfigEntryState.SETUP_RETRY
async def test_enabled_statistics(opp, client):
"""Test that we enabled statistics if the entry is opted in."""
entry = MockConfigEntry(
domain="zwave_js",
data={"url": "ws://test.org", "data_collection_opted_in": True},
)
entry.add_to_opp(opp)
with patch(
"zwave_js_server.model.driver.Driver.async_enable_statistics"
) as mock_cmd:
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
assert mock_cmd.called
async def test_disabled_statistics(opp, client):
"""Test that we diisabled statistics if the entry is opted out."""
entry = MockConfigEntry(
domain="zwave_js",
data={"url": "ws://test.org", "data_collection_opted_in": False},
)
entry.add_to_opp(opp)
with patch(
"zwave_js_server.model.driver.Driver.async_disable_statistics"
) as mock_cmd:
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
assert mock_cmd.called
async def test_noop_statistics(opp, client):
"""Test that we don't make any statistics calls if user hasn't provided preference."""
entry = MockConfigEntry(domain="zwave_js", data={"url": "ws://test.org"})
entry.add_to_opp(opp)
with patch(
"zwave_js_server.model.driver.Driver.async_enable_statistics"
) as mock_cmd1, patch(
"zwave_js_server.model.driver.Driver.async_disable_statistics"
) as mock_cmd2:
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
assert not mock_cmd1.called
assert not mock_cmd2.called
@pytest.mark.parametrize("error", [BaseZwaveJSServerError("Boom"), Exception("Boom")])
async def test_listen_failure(opp, client, error):
"""Test we handle errors during client listen."""
async def listen(driver_ready):
"""Mock the client listen method."""
# Set the connect side effect to stop an endless loop on reload.
client.connect.side_effect = BaseZwaveJSServerError("Boom")
raise error
client.listen.side_effect = listen
entry = MockConfigEntry(domain="zwave_js", data={"url": "ws://test.org"})
entry.add_to_opp(opp)
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
assert entry.state is ConfigEntryState.SETUP_RETRY
async def test_on_node_added_ready(opp, multisensor_6_state, client, integration):
"""Test we handle a ready node added event."""
dev_reg = dr.async_get(opp)
node = Node(client, multisensor_6_state)
event = {"node": node}
air_temperature_device_id = f"{client.driver.controller.home_id}-{node.node_id}"
state = opp.states.get(AIR_TEMPERATURE_SENSOR)
assert not state # entity and device not yet added
assert not dev_reg.async_get_device(
identifiers={(DOMAIN, air_temperature_device_id)}
)
client.driver.controller.emit("node added", event)
await opp.async_block_till_done()
state = opp.states.get(AIR_TEMPERATURE_SENSOR)
assert state # entity and device added
assert state.state != STATE_UNAVAILABLE
assert dev_reg.async_get_device(identifiers={(DOMAIN, air_temperature_device_id)})
async def test_unique_id_migration_dupes(opp, multisensor_6_state, client, integration):
"""Test we remove an entity when ."""
ent_reg = er.async_get(opp)
entity_name = AIR_TEMPERATURE_SENSOR.split(".")[1]
# Create entity RegistryEntry using old unique ID format
old_unique_id_1 = (
f"{client.driver.controller.home_id}.52.52-49-00-Air temperature-00"
)
entity_entry = ent_reg.async_get_or_create(
"sensor",
DOMAIN,
old_unique_id_1,
suggested_object_id=entity_name,
config_entry=integration,
original_name=entity_name,
)
assert entity_entry.entity_id == AIR_TEMPERATURE_SENSOR
assert entity_entry.unique_id == old_unique_id_1
# Create entity RegistryEntry using b0 unique ID format
old_unique_id_2 = (
f"{client.driver.controller.home_id}.52.52-49-0-Air temperature-00-00"
)
entity_entry = ent_reg.async_get_or_create(
"sensor",
DOMAIN,
old_unique_id_2,
suggested_object_id=f"{entity_name}_1",
config_entry=integration,
original_name=entity_name,
)
assert entity_entry.entity_id == f"{AIR_TEMPERATURE_SENSOR}_1"
assert entity_entry.unique_id == old_unique_id_2
# Add a ready node, unique ID should be migrated
node = Node(client, multisensor_6_state)
event = {"node": node}
client.driver.controller.emit("node added", event)
await opp.async_block_till_done()
# Check that new RegistryEntry is using new unique ID format
entity_entry = ent_reg.async_get(AIR_TEMPERATURE_SENSOR)
new_unique_id = f"{client.driver.controller.home_id}.52-49-0-Air temperature"
assert entity_entry.unique_id == new_unique_id
assert ent_reg.async_get_entity_id("sensor", DOMAIN, old_unique_id_1) is None
assert ent_reg.async_get_entity_id("sensor", DOMAIN, old_unique_id_2) is None
@pytest.mark.parametrize(
"id",
[
("52.52-49-00-Air temperature-00"),
("52.52-49-0-Air temperature-00-00"),
("52-49-0-Air temperature-00-00"),
],
)
async def test_unique_id_migration(opp, multisensor_6_state, client, integration, id):
"""Test unique ID is migrated from old format to new."""
ent_reg = er.async_get(opp)
# Migrate version 1
entity_name = AIR_TEMPERATURE_SENSOR.split(".")[1]
# Create entity RegistryEntry using old unique ID format
old_unique_id = f"{client.driver.controller.home_id}.{id}"
entity_entry = ent_reg.async_get_or_create(
"sensor",
DOMAIN,
old_unique_id,
suggested_object_id=entity_name,
config_entry=integration,
original_name=entity_name,
)
assert entity_entry.entity_id == AIR_TEMPERATURE_SENSOR
assert entity_entry.unique_id == old_unique_id
# Add a ready node, unique ID should be migrated
node = Node(client, multisensor_6_state)
event = {"node": node}
client.driver.controller.emit("node added", event)
await opp.async_block_till_done()
# Check that new RegistryEntry is using new unique ID format
entity_entry = ent_reg.async_get(AIR_TEMPERATURE_SENSOR)
new_unique_id = f"{client.driver.controller.home_id}.52-49-0-Air temperature"
assert entity_entry.unique_id == new_unique_id
assert ent_reg.async_get_entity_id("sensor", DOMAIN, old_unique_id) is None
@pytest.mark.parametrize(
"id",
[
("32.32-50-00-value-W_Consumed"),
("32.32-50-0-value-66049-W_Consumed"),
("32-50-0-value-66049-W_Consumed"),
],
)
async def test_unique_id_migration_property_key(
opp, hank_binary_switch_state, client, integration, id
):
"""Test unique ID with property key is migrated from old format to new."""
ent_reg = er.async_get(opp)
SENSOR_NAME = "sensor.smart_plug_with_two_usb_ports_value_electric_consumed"
entity_name = SENSOR_NAME.split(".")[1]
# Create entity RegistryEntry using old unique ID format
old_unique_id = f"{client.driver.controller.home_id}.{id}"
entity_entry = ent_reg.async_get_or_create(
"sensor",
DOMAIN,
old_unique_id,
suggested_object_id=entity_name,
config_entry=integration,
original_name=entity_name,
)
assert entity_entry.entity_id == SENSOR_NAME
assert entity_entry.unique_id == old_unique_id
# Add a ready node, unique ID should be migrated
node = Node(client, hank_binary_switch_state)
event = {"node": node}
client.driver.controller.emit("node added", event)
await opp.async_block_till_done()
# Check that new RegistryEntry is using new unique ID format
entity_entry = ent_reg.async_get(SENSOR_NAME)
new_unique_id = f"{client.driver.controller.home_id}.32-50-0-value-66049"
assert entity_entry.unique_id == new_unique_id
assert ent_reg.async_get_entity_id("sensor", DOMAIN, old_unique_id) is None
async def test_unique_id_migration_notification_binary_sensor(
opp, multisensor_6_state, client, integration
):
"""Test unique ID is migrated from old format to new for a notification binary sensor."""
ent_reg = er.async_get(opp)
entity_name = NOTIFICATION_MOTION_BINARY_SENSOR.split(".")[1]
# Create entity RegistryEntry using old unique ID format
old_unique_id = f"{client.driver.controller.home_id}.52.52-113-00-Home Security-Motion sensor status.8"
entity_entry = ent_reg.async_get_or_create(
"binary_sensor",
DOMAIN,
old_unique_id,
suggested_object_id=entity_name,
config_entry=integration,
original_name=entity_name,
)
assert entity_entry.entity_id == NOTIFICATION_MOTION_BINARY_SENSOR
assert entity_entry.unique_id == old_unique_id
# Add a ready node, unique ID should be migrated
node = Node(client, multisensor_6_state)
event = {"node": node}
client.driver.controller.emit("node added", event)
await opp.async_block_till_done()
# Check that new RegistryEntry is using new unique ID format
entity_entry = ent_reg.async_get(NOTIFICATION_MOTION_BINARY_SENSOR)
new_unique_id = f"{client.driver.controller.home_id}.52-113-0-Home Security-Motion sensor status.8"
assert entity_entry.unique_id == new_unique_id
assert ent_reg.async_get_entity_id("binary_sensor", DOMAIN, old_unique_id) is None
async def test_old_entity_migration(opp, hank_binary_switch_state, client, integration):
"""Test old entity on a different endpoint is migrated to a new one."""
node = Node(client, hank_binary_switch_state)
ent_reg = er.async_get(opp)
dev_reg = dr.async_get(opp)
device = dev_reg.async_get_or_create(
config_entry_id=integration.entry_id, identifiers={get_device_id(client, node)}
)
SENSOR_NAME = "sensor.smart_plug_with_two_usb_ports_value_electric_consumed"
entity_name = SENSOR_NAME.split(".")[1]
# Create entity RegistryEntry using fake endpoint
old_unique_id = f"{client.driver.controller.home_id}.32-50-1-value-66049"
entity_entry = ent_reg.async_get_or_create(
"sensor",
DOMAIN,
old_unique_id,
suggested_object_id=entity_name,
config_entry=integration,
original_name=entity_name,
device_id=device.id,
)
assert entity_entry.entity_id == SENSOR_NAME
assert entity_entry.unique_id == old_unique_id
# Do this twice to make sure re-interview doesn't do anything weird
for i in range(0, 2):
# Add a ready node, unique ID should be migrated
event = {"node": node}
client.driver.controller.emit("node added", event)
await opp.async_block_till_done()
# Check that new RegistryEntry is using new unique ID format
entity_entry = ent_reg.async_get(SENSOR_NAME)
new_unique_id = f"{client.driver.controller.home_id}.32-50-0-value-66049"
assert entity_entry.unique_id == new_unique_id
assert ent_reg.async_get_entity_id("sensor", DOMAIN, old_unique_id) is None
async def test_skip_old_entity_migration_for_multiple(
opp, hank_binary_switch_state, client, integration
):
"""Test that multiple entities of the same value but on a different endpoint get skipped."""
node = Node(client, hank_binary_switch_state)
ent_reg = er.async_get(opp)
dev_reg = dr.async_get(opp)
device = dev_reg.async_get_or_create(
config_entry_id=integration.entry_id, identifiers={get_device_id(client, node)}
)
SENSOR_NAME = "sensor.smart_plug_with_two_usb_ports_value_electric_consumed"
entity_name = SENSOR_NAME.split(".")[1]
# Create two entity entrrys using different endpoints
old_unique_id_1 = f"{client.driver.controller.home_id}.32-50-1-value-66049"
entity_entry = ent_reg.async_get_or_create(
"sensor",
DOMAIN,
old_unique_id_1,
suggested_object_id=f"{entity_name}_1",
config_entry=integration,
original_name=f"{entity_name}_1",
device_id=device.id,
)
assert entity_entry.entity_id == f"{SENSOR_NAME}_1"
assert entity_entry.unique_id == old_unique_id_1
# Create two entity entrrys using different endpoints
old_unique_id_2 = f"{client.driver.controller.home_id}.32-50-2-value-66049"
entity_entry = ent_reg.async_get_or_create(
"sensor",
DOMAIN,
old_unique_id_2,
suggested_object_id=f"{entity_name}_2",
config_entry=integration,
original_name=f"{entity_name}_2",
device_id=device.id,
)
assert entity_entry.entity_id == f"{SENSOR_NAME}_2"
assert entity_entry.unique_id == old_unique_id_2
# Add a ready node, unique ID should be migrated
event = {"node": node}
client.driver.controller.emit("node added", event)
await opp.async_block_till_done()
# Check that new RegistryEntry is created using new unique ID format
entity_entry = ent_reg.async_get(SENSOR_NAME)
new_unique_id = f"{client.driver.controller.home_id}.32-50-0-value-66049"
assert entity_entry.unique_id == new_unique_id
# Check that the old entities stuck around because we skipped the migration step
assert ent_reg.async_get_entity_id("sensor", DOMAIN, old_unique_id_1)
assert ent_reg.async_get_entity_id("sensor", DOMAIN, old_unique_id_2)
async def test_old_entity_migration_notification_binary_sensor(
opp, multisensor_6_state, client, integration
):
"""Test old entity on a different endpoint is migrated to a new one for a notification binary sensor."""
node = Node(client, multisensor_6_state)
ent_reg = er.async_get(opp)
dev_reg = dr.async_get(opp)
device = dev_reg.async_get_or_create(
config_entry_id=integration.entry_id, identifiers={get_device_id(client, node)}
)
entity_name = NOTIFICATION_MOTION_BINARY_SENSOR.split(".")[1]
# Create entity RegistryEntry using old unique ID format
old_unique_id = f"{client.driver.controller.home_id}.52-113-1-Home Security-Motion sensor status.8"
entity_entry = ent_reg.async_get_or_create(
"binary_sensor",
DOMAIN,
old_unique_id,
suggested_object_id=entity_name,
config_entry=integration,
original_name=entity_name,
device_id=device.id,
)
assert entity_entry.entity_id == NOTIFICATION_MOTION_BINARY_SENSOR
assert entity_entry.unique_id == old_unique_id
# Do this twice to make sure re-interview doesn't do anything weird
for _ in range(0, 2):
# Add a ready node, unique ID should be migrated
event = {"node": node}
client.driver.controller.emit("node added", event)
await opp.async_block_till_done()
# Check that new RegistryEntry is using new unique ID format
entity_entry = ent_reg.async_get(NOTIFICATION_MOTION_BINARY_SENSOR)
new_unique_id = f"{client.driver.controller.home_id}.52-113-0-Home Security-Motion sensor status.8"
assert entity_entry.unique_id == new_unique_id
assert (
ent_reg.async_get_entity_id("binary_sensor", DOMAIN, old_unique_id) is None
)
async def test_on_node_added_not_ready(opp, multisensor_6_state, client, integration):
"""Test we handle a non ready node added event."""
dev_reg = dr.async_get(opp)
node_data = deepcopy(multisensor_6_state) # Copy to allow modification in tests.
node = Node(client, node_data)
node.data["ready"] = False
event = {"node": node}
air_temperature_device_id = f"{client.driver.controller.home_id}-{node.node_id}"
state = opp.states.get(AIR_TEMPERATURE_SENSOR)
assert not state # entity and device not yet added
assert not dev_reg.async_get_device(
identifiers={(DOMAIN, air_temperature_device_id)}
)
client.driver.controller.emit("node added", event)
await opp.async_block_till_done()
state = opp.states.get(AIR_TEMPERATURE_SENSOR)
assert not state # entity not yet added but device added in registry
assert dev_reg.async_get_device(identifiers={(DOMAIN, air_temperature_device_id)})
node.data["ready"] = True
node.emit("ready", event)
await opp.async_block_till_done()
state = opp.states.get(AIR_TEMPERATURE_SENSOR)
assert state # entity added
assert state.state != STATE_UNAVAILABLE
async def test_existing_node_ready(opp, client, multisensor_6, integration):
"""Test we handle a ready node that exists during integration setup."""
dev_reg = dr.async_get(opp)
node = multisensor_6
air_temperature_device_id = f"{client.driver.controller.home_id}-{node.node_id}"
state = opp.states.get(AIR_TEMPERATURE_SENSOR)
assert state # entity and device added
assert state.state != STATE_UNAVAILABLE
assert dev_reg.async_get_device(identifiers={(DOMAIN, air_temperature_device_id)})
async def test_null_name(opp, client, null_name_check, integration):
"""Test that node without a name gets a generic node name."""
node = null_name_check
assert opp.states.get(f"switch.node_{node.node_id}")
async def test_existing_node_not_ready(opp, client, multisensor_6):
"""Test we handle a non ready node that exists during integration setup."""
dev_reg = dr.async_get(opp)
node = multisensor_6
node.data = deepcopy(node.data) # Copy to allow modification in tests.
node.data["ready"] = False
event = {"node": node}
air_temperature_device_id = f"{client.driver.controller.home_id}-{node.node_id}"
entry = MockConfigEntry(domain="zwave_js", data={"url": "ws://test.org"})
entry.add_to_opp(opp)
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
state = opp.states.get(AIR_TEMPERATURE_SENSOR)
assert not state # entity not yet added
assert dev_reg.async_get_device( # device should be added
identifiers={(DOMAIN, air_temperature_device_id)}
)
node.data["ready"] = True
node.emit("ready", event)
await opp.async_block_till_done()
state = opp.states.get(AIR_TEMPERATURE_SENSOR)
assert state # entity and device added
assert state.state != STATE_UNAVAILABLE
assert dev_reg.async_get_device(identifiers={(DOMAIN, air_temperature_device_id)})
async def test_start_addon(
opp, addon_installed, install_addon, addon_options, set_addon_options, start_addon
):
"""Test start the Z-Wave JS add-on during entry setup."""
device = "/test"
network_key = "abc123"
addon_options = {
"device": device,
"network_key": network_key,
}
entry = MockConfigEntry(
domain=DOMAIN,
title="Z-Wave JS",
data={"use_addon": True, "usb_path": device, "network_key": network_key},
)
entry.add_to_opp(opp)
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
assert entry.state is ConfigEntryState.SETUP_RETRY
assert install_addon.call_count == 0
assert set_addon_options.call_count == 1
assert set_addon_options.call_args == call(
opp, "core_zwave_js", {"options": addon_options}
)
assert start_addon.call_count == 1
assert start_addon.call_args == call(opp, "core_zwave_js")
async def test_install_addon(
opp, addon_installed, install_addon, addon_options, set_addon_options, start_addon
):
"""Test install and start the Z-Wave JS add-on during entry setup."""
addon_installed.return_value["version"] = None
device = "/test"
network_key = "abc123"
addon_options = {
"device": device,
"network_key": network_key,
}
entry = MockConfigEntry(
domain=DOMAIN,
title="Z-Wave JS",
data={"use_addon": True, "usb_path": device, "network_key": network_key},
)
entry.add_to_opp(opp)
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
assert entry.state is ConfigEntryState.SETUP_RETRY
assert install_addon.call_count == 1
assert install_addon.call_args == call(opp, "core_zwave_js")
assert set_addon_options.call_count == 1
assert set_addon_options.call_args == call(
opp, "core_zwave_js", {"options": addon_options}
)
assert start_addon.call_count == 1
assert start_addon.call_args == call(opp, "core_zwave_js")
@pytest.mark.parametrize("addon_info_side_effect", [OppioAPIError("Boom")])
async def test_addon_info_failure(
opp,
addon_installed,
install_addon,
addon_options,
set_addon_options,
start_addon,
):
"""Test failure to get add-on info for Z-Wave JS add-on during entry setup."""
device = "/test"
network_key = "abc123"
entry = MockConfigEntry(
domain=DOMAIN,
title="Z-Wave JS",
data={"use_addon": True, "usb_path": device, "network_key": network_key},
)
entry.add_to_opp(opp)
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
assert entry.state is ConfigEntryState.SETUP_RETRY
assert install_addon.call_count == 0
assert start_addon.call_count == 0
@pytest.mark.parametrize(
"old_device, new_device, old_network_key, new_network_key",
[("/old_test", "/new_test", "old123", "new123")],
)
async def test_addon_options_changed(
opp,
client,
addon_installed,
addon_running,
install_addon,
addon_options,
start_addon,
old_device,
new_device,
old_network_key,
new_network_key,
):
"""Test update config entry data on entry setup if add-on options changed."""
addon_options["device"] = new_device
addon_options["network_key"] = new_network_key
entry = MockConfigEntry(
domain=DOMAIN,
title="Z-Wave JS",
data={
"url": "ws://host1:3001",
"use_addon": True,
"usb_path": old_device,
"network_key": old_network_key,
},
)
entry.add_to_opp(opp)
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
assert entry.data["usb_path"] == new_device
assert entry.data["network_key"] == new_network_key
assert install_addon.call_count == 0
assert start_addon.call_count == 0
@pytest.mark.parametrize(
"addon_version, update_available, update_calls, snapshot_calls, "
"update_addon_side_effect, create_shapshot_side_effect",
[
("1.0", True, 1, 1, None, None),
("1.0", False, 0, 0, None, None),
("1.0", True, 1, 1, OppioAPIError("Boom"), None),
("1.0", True, 0, 1, None, OppioAPIError("Boom")),
],
)
async def test_update_addon(
opp,
client,
addon_info,
addon_installed,
addon_running,
create_shapshot,
update_addon,
addon_options,
addon_version,
update_available,
update_calls,
snapshot_calls,
update_addon_side_effect,
create_shapshot_side_effect,
):
"""Test update the Z-Wave JS add-on during entry setup."""
device = "/test"
network_key = "abc123"
addon_options["device"] = device
addon_options["network_key"] = network_key
addon_info.return_value["version"] = addon_version
addon_info.return_value["update_available"] = update_available
create_shapshot.side_effect = create_shapshot_side_effect
update_addon.side_effect = update_addon_side_effect
client.connect.side_effect = InvalidServerVersion("Invalid version")
entry = MockConfigEntry(
domain=DOMAIN,
title="Z-Wave JS",
data={
"url": "ws://host1:3001",
"use_addon": True,
"usb_path": device,
"network_key": network_key,
},
)
entry.add_to_opp(opp)
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
assert entry.state is ConfigEntryState.SETUP_RETRY
assert create_shapshot.call_count == snapshot_calls
assert update_addon.call_count == update_calls
@pytest.mark.parametrize(
"stop_addon_side_effect, entry_state",
[
(None, ConfigEntryState.NOT_LOADED),
(OppioAPIError("Boom"), ConfigEntryState.LOADED),
],
)
async def test_stop_addon(
opp,
client,
addon_installed,
addon_running,
addon_options,
stop_addon,
stop_addon_side_effect,
entry_state,
):
"""Test stop the Z-Wave JS add-on on entry unload if entry is disabled."""
stop_addon.side_effect = stop_addon_side_effect
device = "/test"
network_key = "abc123"
addon_options["device"] = device
addon_options["network_key"] = network_key
entry = MockConfigEntry(
domain=DOMAIN,
title="Z-Wave JS",
data={
"url": "ws://host1:3001",
"use_addon": True,
"usb_path": device,
"network_key": network_key,
},
)
entry.add_to_opp(opp)
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
assert entry.state is ConfigEntryState.LOADED
await opp.config_entries.async_set_disabled_by(entry.entry_id, DISABLED_USER)
await opp.async_block_till_done()
assert entry.state == entry_state
assert stop_addon.call_count == 1
assert stop_addon.call_args == call(opp, "core_zwave_js")
async def test_remove_entry(
opp, addon_installed, stop_addon, create_shapshot, uninstall_addon, caplog
):
"""Test remove the config entry."""
# test successful remove without created add-on
entry = MockConfigEntry(
domain=DOMAIN,
title="Z-Wave JS",
data={"integration_created_addon": False},
)
entry.add_to_opp(opp)
assert entry.state is ConfigEntryState.NOT_LOADED
assert len(opp.config_entries.async_entries(DOMAIN)) == 1
await opp.config_entries.async_remove(entry.entry_id)
assert entry.state is ConfigEntryState.NOT_LOADED
assert len(opp.config_entries.async_entries(DOMAIN)) == 0
# test successful remove with created add-on
entry = MockConfigEntry(
domain=DOMAIN,
title="Z-Wave JS",
data={"integration_created_addon": True},
)
entry.add_to_opp(opp)
assert len(opp.config_entries.async_entries(DOMAIN)) == 1
await opp.config_entries.async_remove(entry.entry_id)
assert stop_addon.call_count == 1
assert stop_addon.call_args == call(opp, "core_zwave_js")
assert create_shapshot.call_count == 1
assert create_shapshot.call_args == call(
opp,
{"name": "addon_core_zwave_js_1.0", "addons": ["core_zwave_js"]},
partial=True,
)
assert uninstall_addon.call_count == 1
assert uninstall_addon.call_args == call(opp, "core_zwave_js")
assert entry.state is ConfigEntryState.NOT_LOADED
assert len(opp.config_entries.async_entries(DOMAIN)) == 0
stop_addon.reset_mock()
create_shapshot.reset_mock()
uninstall_addon.reset_mock()
# test add-on stop failure
entry.add_to_opp(opp)
assert len(opp.config_entries.async_entries(DOMAIN)) == 1
stop_addon.side_effect = OppioAPIError()
await opp.config_entries.async_remove(entry.entry_id)
assert stop_addon.call_count == 1
assert stop_addon.call_args == call(opp, "core_zwave_js")
assert create_shapshot.call_count == 0
assert uninstall_addon.call_count == 0
assert entry.state is ConfigEntryState.NOT_LOADED
assert len(opp.config_entries.async_entries(DOMAIN)) == 0
assert "Failed to stop the Z-Wave JS add-on" in caplog.text
stop_addon.side_effect = None
stop_addon.reset_mock()
create_shapshot.reset_mock()
uninstall_addon.reset_mock()
# test create snapshot failure
entry.add_to_opp(opp)
assert len(opp.config_entries.async_entries(DOMAIN)) == 1
create_shapshot.side_effect = OppioAPIError()
await opp.config_entries.async_remove(entry.entry_id)
assert stop_addon.call_count == 1
assert stop_addon.call_args == call(opp, "core_zwave_js")
assert create_shapshot.call_count == 1
assert create_shapshot.call_args == call(
opp,
{"name": "addon_core_zwave_js_1.0", "addons": ["core_zwave_js"]},
partial=True,
)
assert uninstall_addon.call_count == 0
assert entry.state is ConfigEntryState.NOT_LOADED
assert len(opp.config_entries.async_entries(DOMAIN)) == 0
assert "Failed to create a snapshot of the Z-Wave JS add-on" in caplog.text
create_shapshot.side_effect = None
stop_addon.reset_mock()
create_shapshot.reset_mock()
uninstall_addon.reset_mock()
# test add-on uninstall failure
entry.add_to_opp(opp)
assert len(opp.config_entries.async_entries(DOMAIN)) == 1
uninstall_addon.side_effect = OppioAPIError()
await opp.config_entries.async_remove(entry.entry_id)
assert stop_addon.call_count == 1
assert stop_addon.call_args == call(opp, "core_zwave_js")
assert create_shapshot.call_count == 1
assert create_shapshot.call_args == call(
opp,
{"name": "addon_core_zwave_js_1.0", "addons": ["core_zwave_js"]},
partial=True,
)
assert uninstall_addon.call_count == 1
assert uninstall_addon.call_args == call(opp, "core_zwave_js")
assert entry.state is ConfigEntryState.NOT_LOADED
assert len(opp.config_entries.async_entries(DOMAIN)) == 0
assert "Failed to uninstall the Z-Wave JS add-on" in caplog.text
async def test_removed_device(opp, client, multiple_devices, integration):
"""Test that the device registry gets updated when a device gets removed."""
nodes = multiple_devices
# Verify how many nodes are available
assert len(client.driver.controller.nodes) == 2
# Make sure there are the same number of devices
dev_reg = dr.async_get(opp)
device_entries = dr.async_entries_for_config_entry(dev_reg, integration.entry_id)
assert len(device_entries) == 2
# Check how many entities there are
ent_reg = er.async_get(opp)
entity_entries = er.async_entries_for_config_entry(ent_reg, integration.entry_id)
assert len(entity_entries) == 24
# Remove a node and reload the entry
old_node = nodes.pop(13)
await opp.config_entries.async_reload(integration.entry_id)
await opp.async_block_till_done()
# Assert that the node and all of it's entities were removed from the device and
# entity registry
device_entries = dr.async_entries_for_config_entry(dev_reg, integration.entry_id)
assert len(device_entries) == 1
entity_entries = er.async_entries_for_config_entry(ent_reg, integration.entry_id)
assert len(entity_entries) == 15
assert dev_reg.async_get_device({get_device_id(client, old_node)}) is None
async def test_suggested_area(opp, client, eaton_rf9640_dimmer):
"""Test that suggested area works."""
dev_reg = dr.async_get(opp)
ent_reg = er.async_get(opp)
entry = MockConfigEntry(domain="zwave_js", data={"url": "ws://test.org"})
entry.add_to_opp(opp)
await opp.config_entries.async_setup(entry.entry_id)
await opp.async_block_till_done()
entity = ent_reg.async_get(EATON_RF9640_ENTITY)
assert dev_reg.async_get(entity.device_id).area_id is not None
| 35.328738
| 108
| 0.711691
|
4a00acddfd6c9bb409e4ca0e85951c48fc490956
| 116
|
py
|
Python
|
Lab_5/Task_3.py
|
spencerperley/CPE_101
|
9ae3c5a0042780f824de5edee275b35cdb0bbaec
|
[
"MIT"
] | 1
|
2022-01-12T21:48:23.000Z
|
2022-01-12T21:48:23.000Z
|
Lab_5/Task_3.py
|
spencerperley/CPE_101
|
9ae3c5a0042780f824de5edee275b35cdb0bbaec
|
[
"MIT"
] | null | null | null |
Lab_5/Task_3.py
|
spencerperley/CPE_101
|
9ae3c5a0042780f824de5edee275b35cdb0bbaec
|
[
"MIT"
] | null | null | null |
def filter_pallendromes(myList):
return [value for value in myList if (value[0]+value[1])==(value[4]+value[3])]
| 38.666667
| 82
| 0.698276
|
4a00ad051c446dcc93590eafccde1f7fa63b723d
| 4,152
|
py
|
Python
|
run.py
|
tuzi3040/qwebirc
|
7a69c77d7325fa271f1cdf6e07755587eb35763d
|
[
"BSD-3-Clause"
] | 2
|
2021-06-10T19:13:54.000Z
|
2021-11-21T15:17:35.000Z
|
run.py
|
tuzi3040/qwebirc
|
7a69c77d7325fa271f1cdf6e07755587eb35763d
|
[
"BSD-3-Clause"
] | null | null | null |
run.py
|
tuzi3040/qwebirc
|
7a69c77d7325fa271f1cdf6e07755587eb35763d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# this entire thing is a hack and badly needs reimplementing
import bin.compile
bin.compile.vcheck()
DEFAULT_PORT = 9090
from optparse import OptionParser
import sys, os, config
def run_twistd(args1=None, args2=None):
from twisted.scripts.twistd import run
args = [sys.argv[0]]
if args1 is not None:
args.extend(args1)
args.append("qwebirc")
if args2 is not None:
args.extend(args2)
sys.argv = args
run()
def help_reactors(*args):
run_twistd(["--help-reactors"])
sys.exit(1)
try:
from select import epoll
DEFAULT_REACTOR = "epoll"
except ImportError:
try:
from select import kqueue
DEFAULT_REACTOR = "kqueue"
except ImportError:
try:
from select import poll
DEFAULT_REACTOR = "poll"
except ImportError:
DEFAULT_REACTOR = "select"
parser = OptionParser()
parser.add_option("-n", "--no-daemon", help="Don't run in the background.", action="store_false", dest="daemonise", default=True)
parser.add_option("--help-reactors", help="Display a list of reactor names.", action="callback", callback=help_reactors)
parser.add_option("-b", "--debug", help="Run in the Python Debugger.", action="store_true", dest="debug", default=False)
parser.add_option("-t", "--tracebacks", help="Display tracebacks in error pages (this reveals a LOT of information, do NOT use in production!)", action="store_true", dest="tracebacks", default=False)
parser.add_option("-r", "--reactor", help="Which reactor to use (see --help-reactors for a list).", dest="reactor", default=DEFAULT_REACTOR)
parser.add_option("-p", "--port", help="Port to start the server on.", type="int", dest="port", default=DEFAULT_PORT)
parser.add_option("-i", "--ip", help="IP address to listen on.", dest="ip", default="0.0.0.0")
parser.add_option("-l", "--logfile", help="Path to twisted log file.", dest="logfile")
parser.add_option("-c", "--clf", help="Path to web CLF (Combined Log Format) log file.", dest="clogfile")
parser.add_option("-C", "--certificate", help="Path to SSL certificate.", dest="sslcertificate")
parser.add_option("-k", "--key", help="Path to SSL key.", dest="sslkey")
parser.add_option("-H", "--certificate-chain", help="Path to SSL certificate chain file.", dest="sslchain")
parser.add_option("-P", "--pidfile", help="Path to store PID file", dest="pidfile")
parser.add_option("-s", "--syslog", help="Log to syslog", action="store_true", dest="syslog", default=False)
parser.add_option("-f", "--flash-port", help="Port to listen for flash policy connections on.", type="int", dest="flashPort")
parser.add_option("--profile", help="Run in profile mode, dumping results to this file", dest="profile")
parser.add_option("--profiler", help="Name of profiler to use", dest="profiler")
parser.add_option("--syslog-prefix", help="Syslog prefix", dest="syslog_prefix", default="qwebirc")
sargs = sys.argv[1:]
if "ARGS" in dir(config):
import shlex
sargs = shlex.split(config.ARGS) + sargs
(options, args) = parser.parse_args(args=sargs)
args1, args2 = [], []
if not options.daemonise:
args1.append("-n")
if options.debug:
args1.append("-b")
if options.reactor != DEFAULT_REACTOR:
rn = options.reactor + "reactor"
getattr(__import__("twisted.internet", fromlist=[rn]), rn).install()
if options.logfile:
args1+=["--logfile", options.logfile]
if options.pidfile:
args1+=["--pidfile", options.pidfile]
if options.syslog:
args1+=["--syslog"]
if options.profile:
args1+=["--profile", options.profile]
if options.profiler:
args1+=["--profiler", options.profiler]
if options.syslog and options.syslog_prefix:
import syslog
syslog.openlog(options.syslog_prefix)
if not options.tracebacks:
args2.append("-n")
if options.clogfile:
args2+=["--logfile", options.clogfile]
if options.flashPort:
args2+=["--flashPort", options.flashPort]
if options.sslcertificate and options.sslkey:
args2+=["--certificate", options.sslcertificate, "--privkey", options.sslkey, "--https", options.port]
if options.sslchain:
args2+=["--certificate-chain", options.sslchain]
else:
args2+=["--port", options.port]
args2+=["--ip", options.ip]
run_twistd(args1, args2)
| 37.745455
| 199
| 0.706166
|
4a00adb300244686e7b2b7fc1b770b1a1024e07d
| 11,885
|
py
|
Python
|
elasticsearch/_sync/client/utils.py
|
elastic/elasticsearch-py
|
c0d9bd1b0bd94f6819172199bebc338358410496
|
[
"Apache-2.0"
] | 3,353
|
2015-03-12T17:41:01.000Z
|
2022-03-31T05:03:02.000Z
|
elasticsearch/_sync/client/utils.py
|
elastic/elasticsearch-py
|
c0d9bd1b0bd94f6819172199bebc338358410496
|
[
"Apache-2.0"
] | 1,356
|
2015-03-11T14:27:35.000Z
|
2022-03-30T22:57:07.000Z
|
elasticsearch/_sync/client/utils.py
|
elastic/elasticsearch-py
|
c0d9bd1b0bd94f6819172199bebc338358410496
|
[
"Apache-2.0"
] | 1,193
|
2015-03-11T15:13:26.000Z
|
2022-03-29T02:45:55.000Z
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import warnings
from datetime import date, datetime
from functools import wraps
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
List,
Mapping,
MutableMapping,
Optional,
Tuple,
TypeVar,
Union,
)
from elastic_transport import NodeConfig
from elastic_transport.client_utils import (
DEFAULT,
client_meta_version,
parse_cloud_id,
url_to_node_config,
)
from ..._version import __versionstr__
from ...compat import quote, string_types, to_bytes, to_str
from ...serializer import Serializer
if TYPE_CHECKING:
from ... import Elasticsearch
# parts of URL to be omitted
SKIP_IN_PATH: Collection[Any] = (None, "", b"", [], ())
# To be passed to 'client_meta_service' on the Transport
CLIENT_META_SERVICE = ("es", client_meta_version(__versionstr__))
_TYPE_HOSTS = Union[str, List[Union[str, Mapping[str, Union[str, int]], NodeConfig]]]
def client_node_configs(
hosts: _TYPE_HOSTS, cloud_id: str, **kwargs: Any
) -> List[NodeConfig]:
if cloud_id is not None:
if hosts is not None:
raise ValueError(
"The 'cloud_id' and 'hosts' parameters are mutually exclusive"
)
node_configs = cloud_id_to_node_configs(cloud_id)
else:
node_configs = hosts_to_node_configs(hosts)
# Remove all values which are 'DEFAULT' to avoid overwriting actual defaults.
node_options = {k: v for k, v in kwargs.items() if v is not DEFAULT}
return [node_config.replace(**node_options) for node_config in node_configs]
def hosts_to_node_configs(hosts: _TYPE_HOSTS) -> List[NodeConfig]:
"""Transforms the many formats of 'hosts' into NodeConfigs"""
# To make the logic here simpler we reroute everything to be List[X]
if not isinstance(hosts, (tuple, list)):
return hosts_to_node_configs([hosts])
node_configs: List[NodeConfig] = []
for host in hosts:
if isinstance(host, NodeConfig):
node_configs.append(host)
elif isinstance(host, str):
node_configs.append(url_to_node_config(host))
elif isinstance(host, Mapping):
node_configs.append(host_mapping_to_node_config(host))
else:
raise ValueError(
"'hosts' must be a list of URLs, NodeConfigs, or dictionaries"
)
return node_configs
def host_mapping_to_node_config(host: Mapping[str, Union[str, int]]) -> NodeConfig:
"""Converts an old-style dictionary host specification to a NodeConfig"""
allow_hosts_keys = {
"scheme",
"host",
"port",
"path_prefix",
"url_prefix",
"use_ssl",
}
disallowed_keys = set(host.keys()).difference(allow_hosts_keys)
if disallowed_keys:
bad_keys_used = "', '".join(sorted(disallowed_keys))
allowed_keys = "', '".join(sorted(allow_hosts_keys))
raise ValueError(
f"Can't specify the options '{bad_keys_used}' via a "
f"dictionary in 'hosts', only '{allowed_keys}' options "
"are allowed"
)
options = dict(host)
# Handle the deprecated option 'use_ssl'
if "use_ssl" in options:
use_ssl = options.pop("use_ssl")
if not isinstance(use_ssl, bool):
raise TypeError("'use_ssl' must be of type 'bool'")
# Ensure the user isn't specifying scheme=http use_ssl=True or vice-versa
if "scheme" in options and (options["scheme"] == "https") != use_ssl:
raise ValueError(
f"Cannot specify conflicting options 'scheme={options['scheme']}' "
f"and 'use_ssl={use_ssl}'. Use 'scheme' only instead"
)
warnings.warn(
"The 'use_ssl' option is no longer needed as specifying a 'scheme' is now required",
category=DeprecationWarning,
stacklevel=3,
)
options.setdefault("scheme", "https" if use_ssl else "http")
# Handle the deprecated option 'url_prefix'
if "url_prefix" in options:
if "path_prefix" in options:
raise ValueError(
"Cannot specify conflicting options 'url_prefix' and "
"'path_prefix'. Use 'path_prefix' only instead"
)
warnings.warn(
"The 'url_prefix' option is deprecated in favor of 'path_prefix'",
category=DeprecationWarning,
stacklevel=3,
)
options["path_prefix"] = options.pop("url_prefix")
return NodeConfig(**options) # type: ignore
def cloud_id_to_node_configs(cloud_id: str) -> List[NodeConfig]:
"""Transforms an Elastic Cloud ID into a NodeConfig"""
es_addr = parse_cloud_id(cloud_id).es_address
if es_addr is None or not all(es_addr):
raise ValueError("Cloud ID missing host and port information for Elasticsearch")
host, port = es_addr
return [
NodeConfig(
scheme="https",
host=host,
port=port,
http_compress=True,
# TODO: Set TLSv1.2+
)
]
def _escape(value: Any) -> Union[bytes, str]:
"""
Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first.
"""
# make sequences into comma-separated stings
if isinstance(value, (list, tuple)):
value = ",".join(value)
# dates and datetimes into isoformat
elif isinstance(value, (date, datetime)):
value = value.isoformat()
# make bools into true/false strings
elif isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, bytes):
return value
# encode strings to utf-8
if not isinstance(value, str):
return str(value).encode("utf-8")
return value.encode("utf-8")
def _make_path(*parts: Any) -> str:
"""
Create a URL string from parts, omit all `None` values and empty strings.
Convert lists and tuples to comma separated values.
"""
# TODO: maybe only allow some parts to be lists/tuples ?
return "/" + "/".join(
# preserve ',' and '*' in url for nicer URLs in logs
quote(_escape(p), b",*")
for p in parts
if p not in SKIP_IN_PATH
)
# parameters that apply to all methods
GLOBAL_PARAMS: Tuple[str, ...] = (
"pretty",
"human",
"error_trace",
"format",
"filter_path",
)
T = TypeVar("T")
def query_params(
*es_query_params: str,
) -> Callable[[T], T]:
"""
Decorator that pops all accepted parameters from method's kwargs and puts
them in the params argument.
"""
def _wrapper(func: Any) -> Any:
@wraps(func)
def _wrapped(*args: Any, **kwargs: Any) -> Any:
params = (kwargs.pop("params", None) or {}).copy()
headers = {
k.lower(): v
for k, v in (kwargs.pop("headers", None) or {}).copy().items()
}
if "opaque_id" in kwargs:
headers["x-opaque-id"] = kwargs.pop("opaque_id")
http_auth = kwargs.pop("http_auth", None)
api_key = kwargs.pop("api_key", None)
if http_auth is not None and api_key is not None:
raise ValueError(
"Only one of 'http_auth' and 'api_key' may be passed at a time"
)
elif http_auth is not None:
headers["authorization"] = f"Basic {_base64_auth_header(http_auth)}"
elif api_key is not None:
headers["authorization"] = f"ApiKey {_base64_auth_header(api_key)}"
for p in es_query_params + GLOBAL_PARAMS:
if p in kwargs:
v = kwargs.pop(p)
if v is not None:
params[p] = _escape(v)
# don't treat ignore, request_timeout, and opaque_id as other params to avoid escaping
for p in ("ignore", "request_timeout"):
if p in kwargs:
params[p] = kwargs.pop(p)
return func(*args, params=params, headers=headers, **kwargs)
return _wrapped
return _wrapper
def _bulk_body(
serializer: Serializer, body: Union[str, bytes, Collection[Any]]
) -> Union[str, bytes]:
# if not passed in a string, serialize items and join by newline
if not isinstance(body, string_types):
body = b"\n".join(map(serializer.dumps, body))
# bulk body must end with a newline
if isinstance(body, bytes):
if not body.endswith(b"\n"):
body += b"\n"
elif isinstance(body, str) and not body.endswith("\n"):
body += "\n"
return body
def _base64_auth_header(
auth_value: Union[List[str], Tuple[str, ...], str, bytes]
) -> str:
"""Takes either a 2-tuple or a base64-encoded string
and returns a base64-encoded string to be used
as an HTTP authorization header.
"""
if isinstance(auth_value, (list, tuple)):
auth_value = base64.b64encode(to_bytes(":".join(auth_value)))
return to_str(auth_value)
def _deprecated_options(
client: "Elasticsearch",
params: Optional[MutableMapping[str, Any]],
) -> Tuple["Elasticsearch", Optional[Mapping[str, Any]]]:
"""Applies the deprecated logic for per-request options. When passed deprecated options
this function will convert them into a Elasticsearch.options() or encoded params"""
if params:
options_kwargs = {}
opaque_id = params.pop("opaque_id", None)
api_key = params.pop("api_key", None)
http_auth = params.pop("http_auth", None)
headers = {}
if opaque_id is not None:
headers["x-opaque-id"] = opaque_id
if http_auth is not None and api_key is not None:
raise ValueError(
"Only one of 'http_auth' and 'api_key' may be passed at a time"
)
elif api_key is not None:
options_kwargs["api_key"] = api_key
elif http_auth is not None:
options_kwargs["basic_auth"] = http_auth
if headers:
options_kwargs["headers"] = headers
request_timeout = params.pop("request_timeout", None)
if request_timeout is not None:
options_kwargs["request_timeout"] = request_timeout
ignore = params.pop("ignore", None)
if ignore is not None:
options_kwargs["ignore_status"] = ignore
if options_kwargs:
warnings.warn(
"Passing transport options in the API method is deprecated. Use 'Elasticsearch.options()' instead.",
category=DeprecationWarning,
stacklevel=3,
)
client = client.options(**options_kwargs)
# If there are any query params left we warn about API parameters.
if params:
warnings.warn(
"Passing options via 'params' is deprecated, instead use API parameters directly.",
category=DeprecationWarning,
stacklevel=3,
)
return client, params or None
| 32.922438
| 116
| 0.621708
|
4a00add5cfc6b0fb864012326da9f7de8d73468c
| 21,812
|
py
|
Python
|
sdks/python/apache_beam/pvalue.py
|
shitanshu-google/beam
|
9cd959f61d377874ee1839c2de4bb8f65a948ecc
|
[
"Apache-2.0"
] | 3
|
2020-08-28T17:47:26.000Z
|
2021-08-17T06:38:58.000Z
|
sdks/python/apache_beam/pvalue.py
|
shitanshu-google/beam
|
9cd959f61d377874ee1839c2de4bb8f65a948ecc
|
[
"Apache-2.0"
] | 5
|
2020-11-13T19:06:10.000Z
|
2021-11-10T19:56:12.000Z
|
sdks/python/apache_beam/pvalue.py
|
shitanshu-google/beam
|
9cd959f61d377874ee1839c2de4bb8f65a948ecc
|
[
"Apache-2.0"
] | 1
|
2021-10-05T20:53:52.000Z
|
2021-10-05T20:53:52.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PValue, PCollection: one node of a dataflow graph.
A node of a dataflow processing graph is a PValue. Currently, there is only
one type: PCollection (a potentially very large set of arbitrary values).
Once created, a PValue belongs to a pipeline and has an associated
transform (of type PTransform), which describes how the value will be
produced when the pipeline gets executed.
"""
# pytype: skip-file
from __future__ import absolute_import
import collections
import itertools
from builtins import hex
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Generic
from typing import Iterator
from typing import Optional
from typing import Sequence
from typing import TypeVar
from typing import Union
from past.builtins import unicode
from apache_beam import coders
from apache_beam import typehints
from apache_beam.internal import pickler
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
if TYPE_CHECKING:
from apache_beam.transforms import sideinputs
from apache_beam.transforms.core import ParDo
from apache_beam.transforms.core import Windowing
from apache_beam.pipeline import AppliedPTransform
from apache_beam.pipeline import Pipeline
from apache_beam.runners.pipeline_context import PipelineContext
__all__ = [
'PCollection',
'TaggedOutput',
'AsSingleton',
'AsIter',
'AsList',
'AsDict',
'EmptySideInput',
]
T = TypeVar('T')
class PValue(object):
"""Base class for PCollection.
Dataflow users should not construct PValue objects directly in their
pipelines.
A PValue has the following main characteristics:
(1) Belongs to a pipeline. Added during object initialization.
(2) Has a transform that can compute the value if executed.
(3) Has a value which is meaningful if the transform was executed.
"""
def __init__(self,
pipeline, # type: Pipeline
tag=None, # type: Optional[str]
element_type=None, # type: Optional[object]
windowing=None, # type: Optional[Windowing]
is_bounded=True,
):
"""Initializes a PValue with all arguments hidden behind keyword arguments.
Args:
pipeline: Pipeline object for this PValue.
tag: Tag of this PValue.
element_type: The type of this PValue.
"""
self.pipeline = pipeline
self.tag = tag
self.element_type = element_type
# The AppliedPTransform instance for the application of the PTransform
# generating this PValue. The field gets initialized when a transform
# gets applied.
self.producer = None # type: Optional[AppliedPTransform]
self.is_bounded = is_bounded
if windowing:
self._windowing = windowing
def __str__(self):
return self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return "%s[%s.%s]" % (
self.__class__.__name__,
self.producer.full_label if self.producer else None,
self.tag)
def apply(self, *args, **kwargs):
"""Applies a transform or callable to a PValue.
Args:
*args: positional arguments.
**kwargs: keyword arguments.
The method will insert the pvalue as the next argument following an
optional first label and a transform/callable object. It will call the
pipeline.apply() method with this modified argument list.
"""
arglist = list(args)
arglist.insert(1, self)
return self.pipeline.apply(*arglist, **kwargs)
def __or__(self, ptransform):
return self.pipeline.apply(ptransform, self)
class PCollection(PValue, Generic[T]):
"""A multiple values (potentially huge) container.
Dataflow users should not construct PCollection objects directly in their
pipelines.
"""
def __eq__(self, other):
if isinstance(other, PCollection):
return self.tag == other.tag and self.producer == other.producer
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.tag, self.producer))
@property
def windowing(self):
# type: () -> Windowing
if not hasattr(self, '_windowing'):
assert self.producer is not None and self.producer.transform is not None
self._windowing = self.producer.transform.get_windowing(
self.producer.inputs)
return self._windowing
def __reduce_ex__(self, unused_version):
# Pickling a PCollection is almost always the wrong thing to do, but we
# can't prohibit it as it often gets implicitly picked up (e.g. as part
# of a closure).
return _InvalidUnpickledPCollection, ()
@staticmethod
def from_(pcoll):
# type: (PValue) -> PCollection
"""Create a PCollection, using another PCollection as a starting point.
Transfers relevant attributes.
"""
return PCollection(pcoll.pipeline, is_bounded=pcoll.is_bounded)
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.PCollection
return beam_runner_api_pb2.PCollection(
unique_name=self._unique_name(),
coder_id=context.coder_id_from_element_type(self.element_type),
is_bounded=beam_runner_api_pb2.IsBounded.BOUNDED
if self.is_bounded else beam_runner_api_pb2.IsBounded.UNBOUNDED,
windowing_strategy_id=context.windowing_strategies.get_id(
self.windowing))
def _unique_name(self):
# type: () -> str
if self.producer:
return '%d%s.%s' % (
len(self.producer.full_label), self.producer.full_label, self.tag)
else:
return 'PCollection%s' % id(self)
@staticmethod
def from_runner_api(proto, context):
# type: (beam_runner_api_pb2.PCollection, PipelineContext) -> PCollection
# Producer and tag will be filled in later, the key point is that the same
# object is returned for the same pcollection id.
# We pass None for the PCollection's Pipeline to avoid a cycle during
# deserialization. It will be populated soon after this call, in
# Pipeline.from_runner_api(). This brief period is the only time that
# PCollection.pipeline is allowed to be None.
return PCollection(
None, # type: ignore[arg-type]
element_type=context.element_type_from_coder_id(proto.coder_id),
windowing=context.windowing_strategies.get_by_id(
proto.windowing_strategy_id),
is_bounded=proto.is_bounded == beam_runner_api_pb2.IsBounded.BOUNDED)
class _InvalidUnpickledPCollection(object):
pass
class PBegin(PValue):
"""A pipeline begin marker used as input to create/read transforms.
The class is used internally to represent inputs to Create and Read
transforms. This allows us to have transforms that uniformly take PValue(s)
as inputs.
"""
pass
class PDone(PValue):
"""PDone is the output of a transform that has a trivial result such as Write.
"""
pass
class DoOutputsTuple(object):
"""An object grouping the multiple outputs of a ParDo or FlatMap transform."""
def __init__(self,
pipeline, # type: Pipeline
transform, # type: ParDo
tags, # type: Sequence[str]
main_tag # type: Optional[str]
):
self._pipeline = pipeline
self._tags = tags
self._main_tag = main_tag
self._transform = transform
# The ApplyPTransform instance for the application of the multi FlatMap
# generating this value. The field gets initialized when a transform
# gets applied.
self.producer = None # type: Optional[AppliedPTransform]
# Dictionary of PCollections already associated with tags.
self._pcolls = {} # type: Dict[Optional[str], PCollection]
def __str__(self):
return '<%s>' % self._str_internal()
def __repr__(self):
return '<%s at %s>' % (self._str_internal(), hex(id(self)))
def _str_internal(self):
return '%s main_tag=%s tags=%s transform=%s' % (
self.__class__.__name__, self._main_tag, self._tags, self._transform)
def __iter__(self):
# type: () -> Iterator[PCollection]
"""Iterates over tags returning for each call a (tag, pcollection) pair."""
if self._main_tag is not None:
yield self[self._main_tag]
for tag in self._tags:
yield self[tag]
def __getattr__(self, tag):
# type: (str) -> PCollection
# Special methods which may be accessed before the object is
# fully constructed (e.g. in unpickling).
if tag[:2] == tag[-2:] == '__':
return object.__getattr__(self, tag) # type: ignore
return self[tag]
def __getitem__(self, tag):
# type: (Union[int, str, None]) -> PCollection
# Accept int tags so that we can look at Partition tags with the
# same ints that we used in the partition function.
# TODO(gildea): Consider requiring string-based tags everywhere.
# This will require a partition function that does not return ints.
if isinstance(tag, int):
tag = str(tag)
if tag == self._main_tag:
tag = None
elif self._tags and tag not in self._tags:
raise ValueError(
"Tag '%s' is neither the main tag '%s' "
"nor any of the tags %s" % (tag, self._main_tag, self._tags))
# Check if we accessed this tag before.
if tag in self._pcolls:
return self._pcolls[tag]
assert self.producer is not None
if tag is not None:
self._transform.output_tags.add(tag)
pcoll = PCollection(self._pipeline, tag=tag, element_type=typehints.Any)
# Transfer the producer from the DoOutputsTuple to the resulting
# PCollection.
pcoll.producer = self.producer.parts[0]
# Add this as an output to both the inner ParDo and the outer _MultiParDo
# PTransforms.
if tag not in self.producer.parts[0].outputs:
self.producer.parts[0].add_output(pcoll, tag)
self.producer.add_output(pcoll, tag)
else:
# Main output is output of inner ParDo.
pval = self.producer.parts[0].outputs[None]
assert isinstance(pval,
PCollection), ("DoOutputsTuple should follow a ParDo.")
pcoll = pval
self._pcolls[tag] = pcoll
return pcoll
class TaggedOutput(object):
"""An object representing a tagged value.
ParDo, Map, and FlatMap transforms can emit values on multiple outputs which
are distinguished by string tags. The DoFn will return plain values
if it wants to emit on the main output and TaggedOutput objects
if it wants to emit a value on a specific tagged output.
"""
def __init__(self, tag, value):
# type: (str, Any) -> None
if not isinstance(tag, (str, unicode)):
raise TypeError(
'Attempting to create a TaggedOutput with non-string tag %s' %
(tag, ))
self.tag = tag
self.value = value
class AsSideInput(object):
"""Marker specifying that a PCollection will be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate how the PCollection should be made available
as a PTransform side argument (e.g. in the form of an iterable, mapping,
or single value). This class is the superclass of all the various
options, and should not be instantiated directly. (See instead AsSingleton,
AsIter, etc.)
"""
def __init__(self, pcoll):
# type: (PCollection) -> None
from apache_beam.transforms import sideinputs
self.pvalue = pcoll
self._window_mapping_fn = sideinputs.default_window_mapping_fn(
pcoll.windowing.windowfn)
def _view_options(self):
"""Internal options corresponding to specific view.
Intended for internal use by runner implementations.
Returns:
Tuple of options for the given view.
"""
return {
'window_mapping_fn': self._window_mapping_fn,
'coder': self._windowed_coder(),
}
@property
def element_type(self):
return typehints.Any
def _windowed_coder(self):
return coders.WindowedValueCoder(
coders.registry.get_coder(
self.pvalue.element_type or self.element_type),
self.pvalue.windowing.windowfn.get_window_coder())
# TODO(robertwb): Get rid of _from_runtime_iterable and _view_options
# in favor of _side_input_data().
def _side_input_data(self):
# type: () -> SideInputData
view_options = self._view_options()
from_runtime_iterable = type(self)._from_runtime_iterable
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
lambda iterable: from_runtime_iterable(iterable, view_options))
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.SideInput
return self._side_input_data().to_runner_api(context)
@staticmethod
def from_runner_api(proto, # type: beam_runner_api_pb2.SideInput
context # type: PipelineContext
):
# type: (...) -> _UnpickledSideInput
return _UnpickledSideInput(SideInputData.from_runner_api(proto, context))
@staticmethod
def _from_runtime_iterable(it, options):
raise NotImplementedError
def requires_keyed_input(self):
return False
class _UnpickledSideInput(AsSideInput):
def __init__(self, side_input_data):
# type: (SideInputData) -> None
self._data = side_input_data
self._window_mapping_fn = side_input_data.window_mapping_fn
@staticmethod
def _from_runtime_iterable(it, options):
return options['data'].view_fn(it)
def _view_options(self):
return {
'data': self._data,
# For non-fn-api runners.
'window_mapping_fn': self._data.window_mapping_fn,
'coder': self._windowed_coder(),
}
def _side_input_data(self):
return self._data
class SideInputData(object):
"""All of the data about a side input except for the bound PCollection."""
def __init__(self,
access_pattern, # type: str
window_mapping_fn, # type: sideinputs.WindowMappingFn
view_fn
):
self.access_pattern = access_pattern
self.window_mapping_fn = window_mapping_fn
self.view_fn = view_fn
def to_runner_api(self, context):
# type: (PipelineContext) -> beam_runner_api_pb2.SideInput
return beam_runner_api_pb2.SideInput(
access_pattern=beam_runner_api_pb2.FunctionSpec(
urn=self.access_pattern),
view_fn=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.PICKLED_VIEWFN,
payload=pickler.dumps(self.view_fn)),
window_mapping_fn=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.PICKLED_WINDOW_MAPPING_FN,
payload=pickler.dumps(self.window_mapping_fn)))
@staticmethod
def from_runner_api(proto, unused_context):
# type: (beam_runner_api_pb2.SideInput, PipelineContext) -> SideInputData
assert proto.view_fn.urn == python_urns.PICKLED_VIEWFN
assert (
proto.window_mapping_fn.urn == python_urns.PICKLED_WINDOW_MAPPING_FN)
return SideInputData(
proto.access_pattern.urn,
pickler.loads(proto.window_mapping_fn.payload),
pickler.loads(proto.view_fn.payload))
class AsSingleton(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate whether the entire PCollection should be made available
as a PTransform side argument (in the form of an iterable), or whether just
one value should be pulled from the PCollection and supplied as the side
argument (as an ordinary value).
Wrapping a PCollection side input argument to a PTransform in this container
(e.g., data.apply('label', MyPTransform(), AsSingleton(my_side_input) )
selects the latter behavior.
The input PCollection must contain exactly one value per window, unless a
default is given, in which case it may be empty.
"""
_NO_DEFAULT = object()
def __init__(self, pcoll, default_value=_NO_DEFAULT):
# type: (PCollection, Any) -> None
super(AsSingleton, self).__init__(pcoll)
self.default_value = default_value
def __repr__(self):
return 'AsSingleton(%s)' % self.pvalue
def _view_options(self):
base = super(AsSingleton, self)._view_options()
if self.default_value != AsSingleton._NO_DEFAULT:
return dict(base, default=self.default_value)
return base
@staticmethod
def _from_runtime_iterable(it, options):
head = list(itertools.islice(it, 2))
if not head:
return options.get('default', EmptySideInput())
elif len(head) == 1:
return head[0]
raise ValueError(
'PCollection of size %d with more than one element accessed as a '
'singleton view. First two elements encountered are "%s", "%s".' %
(len(head), str(head[0]), str(head[1])))
@property
def element_type(self):
return self.pvalue.element_type
class AsIter(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
When a PCollection is supplied as a side input to a PTransform, it is
necessary to indicate whether the entire PCollection should be made available
as a PTransform side argument (in the form of an iterable), or whether just
one value should be pulled from the PCollection and supplied as the side
argument (as an ordinary value).
Wrapping a PCollection side input argument to a PTransform in this container
(e.g., data.apply('label', MyPTransform(), AsIter(my_side_input) ) selects the
former behavor.
"""
def __repr__(self):
return 'AsIter(%s)' % self.pvalue
@staticmethod
def _from_runtime_iterable(it, options):
return it
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn,
self._window_mapping_fn,
lambda iterable: iterable)
@property
def element_type(self):
return typehints.Iterable[self.pvalue.element_type]
class AsList(AsSideInput):
"""Marker specifying that an entire PCollection is to be used as a side input.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but forces materialization of this
PCollection as a list.
Args:
pcoll: Input pcollection.
Returns:
An AsList-wrapper around a PCollection whose one element is a list
containing all elements in pcoll.
"""
@staticmethod
def _from_runtime_iterable(it, options):
return list(it)
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn, self._window_mapping_fn, list)
class AsDict(AsSideInput):
"""Marker specifying a PCollection to be used as an indexable side input.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but returns an interface that allows
key lookup.
Args:
pcoll: Input pcollection. All elements should be key-value pairs (i.e.
2-tuples) with unique keys.
Returns:
An AsDict-wrapper around a PCollection whose one element is a dict with
entries for uniquely-keyed pairs in pcoll.
"""
@staticmethod
def _from_runtime_iterable(it, options):
return dict(it)
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.ITERABLE.urn, self._window_mapping_fn, dict)
class AsMultiMap(AsSideInput):
"""Marker specifying a PCollection to be used as an indexable side input.
Similar to AsDict, but multiple values may be associated per key, and
the keys are fetched lazily rather than all having to fit in memory.
Intended for use in side-argument specification---the same places where
AsSingleton and AsIter are used, but returns an interface that allows
key lookup.
"""
@staticmethod
def _from_runtime_iterable(it, options):
# Legacy implementation.
result = collections.defaultdict(list)
for k, v in it:
result[k].append(v)
return result
def _side_input_data(self):
# type: () -> SideInputData
return SideInputData(
common_urns.side_inputs.MULTIMAP.urn,
self._window_mapping_fn,
lambda x: x)
def requires_keyed_input(self):
return True
class EmptySideInput(object):
"""Value indicating when a singleton side input was empty.
If a PCollection was furnished as a singleton side input to a PTransform, and
that PCollection was empty, then this value is supplied to the DoFn in the
place where a value from a non-empty PCollection would have gone. This alerts
the DoFn that the side input PCollection was empty. Users may want to check
whether side input values are EmptySideInput, but they will very likely never
want to create new instances of this class themselves.
"""
pass
| 33.975078
| 80
| 0.710114
|
4a00aeadb08d02308019396b3b94b862c9c3b404
| 46,887
|
py
|
Python
|
src/module.py
|
DanielLin94144/End-to-End-jointCTC-Attention-ASR
|
2b8900f1f397d65d0e86972f7379bb3dfeb7c4ea
|
[
"MIT"
] | 3
|
2020-10-12T08:08:25.000Z
|
2021-11-15T01:02:11.000Z
|
src/module.py
|
DanielLin94144/End-to-End-jointCTC-Attention-ASR
|
2b8900f1f397d65d0e86972f7379bb3dfeb7c4ea
|
[
"MIT"
] | 1
|
2021-11-15T16:40:54.000Z
|
2021-11-15T16:40:54.000Z
|
src/module.py
|
DanielLin94144/End-to-End-jointCTC-Attention-ASR
|
2b8900f1f397d65d0e86972f7379bb3dfeb7c4ea
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence,pad_packed_sequence
from torch.autograd import Function
FBANK_SIZE = 80
<<<<<<< HEAD
=======
>>>>>>> jit_ligru
''' one layer of liGRU using torchscript to accelrate training speed'''
class liGRU_layer(torch.jit.ScriptModule):
def __init__(
self,
input_size,
hidden_size,
<<<<<<< HEAD
num_layers,
=======
#num_layers,
>>>>>>> jit_ligru
batch_size,
dropout=0.0,
nonlinearity="relu",
bidirectional=True,
device="cuda",
do_fusion=False,
fusion_layer_size=64,
number_of_mic=1,
act="relu",
reduce="mean",
):
super(liGRU_layer, self).__init__()
self.hidden_size = int(hidden_size)
self.input_size = int(input_size)
self.batch_size = batch_size
self.bidirectional = bidirectional
self.dropout = dropout
self.device = device
self.do_fusion = do_fusion
self.fusion_layer_size = fusion_layer_size
self.number_of_mic = number_of_mic
self.act = act
self.reduce = reduce
if self.do_fusion:
self.hidden_size = self.fusion_layer_size // self.number_of_mic
if self.do_fusion:
self.wz = FusionLinearConv(
self.input_size, self.hidden_size, bias=True, number_of_mic = self.number_of_mic, act=self.act, reduce=self.reduce
).to(device)
self.wh = FusionLinearConv(
self.input_size, self.hidden_size, bias=True, number_of_mic = self.number_of_mic, act=self.act, reduce=self.reduce
).to(device)
else:
self.wz = nn.Linear(
self.input_size, self.hidden_size, bias=True
).to(device)
self.wh = nn.Linear(
self.input_size, self.hidden_size, bias=True
).to(device)
self.wz.bias.data.fill_(0)
torch.nn.init.xavier_normal_(self.wz.weight.data)
self.wh.bias.data.fill_(0)
torch.nn.init.xavier_normal_(self.wh.weight.data)
self.u = nn.Linear(
self.hidden_size, 2 * self.hidden_size, bias=False
).to(device)
# Adding orthogonal initialization for recurrent connection
nn.init.orthogonal_(self.u.weight)
self.bn_wh = nn.BatchNorm1d(self.hidden_size, momentum=0.05).to(
device
)
self.bn_wz = nn.BatchNorm1d(self.hidden_size, momentum=0.05).to(
device
)
self.drop = torch.nn.Dropout(p=self.dropout, inplace=False).to(device)
self.drop_mask_te = torch.tensor([1.0], device=device).float()
self.N_drop_masks = 100
self.drop_mask_cnt = 0
<<<<<<< HEAD
=======
self.b_even = True
>>>>>>> jit_ligru
# Setting the activation function
self.act = torch.nn.ReLU().to(device)
@torch.jit.script_method
def forward(self, x):
# type: (Tensor) -> Tensor
<<<<<<< HEAD
=======
#print('li', x.shape)
>>>>>>> jit_ligru
if self.bidirectional:
x_flip = x.flip(0)
x = torch.cat([x, x_flip], dim=1)
# Feed-forward affine transformations (all steps in parallel)
wz = self.wz(x)
wh = self.wh(x)
# Apply batch normalization
wz_bn = self.bn_wz(wz.view(wz.shape[0] * wz.shape[1], wz.shape[2]))
wh_bn = self.bn_wh(wh.view(wh.shape[0] * wh.shape[1], wh.shape[2]))
wz = wz_bn.view(wz.shape[0], wz.shape[1], wz.shape[2])
wh = wh_bn.view(wh.shape[0], wh.shape[1], wh.shape[2])
# Processing time steps
h = self.ligru_cell(wz, wh)
if self.bidirectional:
h_f, h_b = h.chunk(2, dim=1)
h_b = h_b.flip(0)
h = torch.cat([h_f, h_b], dim=2)
<<<<<<< HEAD
=======
#print('h', h.shape)
>>>>>>> jit_ligru
return h
@torch.jit.script_method
def ligru_cell(self, wz, wh):
# type: (Tensor, Tensor) -> Tensor
<<<<<<< HEAD
if self.bidirectional:
h_init = torch.zeros(
2 * self.batch_size,
self.hidden_size,
device="cuda",
)
drop_masks_i = self.drop(
torch.ones(
self.N_drop_masks,
=======
self.batch_size = wh.shape[0]//2
if self.batch_size % 2 == 0:
self.b_even = True
else:
self.b_even = False
if self.b_even:
if self.bidirectional:
h_init = torch.zeros(
>>>>>>> jit_ligru
2 * self.batch_size,
self.hidden_size,
device="cuda",
)
<<<<<<< HEAD
).data
else:
h_init = torch.zeros(
self.batch_size,
self.hidden_size,
device="cuda",
)
drop_masks_i = self.drop(
torch.ones(
self.N_drop_masks,
=======
drop_masks_i = self.drop(
torch.ones(
self.N_drop_masks,
2 * self.batch_size,
self.hidden_size,
device="cuda",
)
).data
else:
h_init = torch.zeros(
>>>>>>> jit_ligru
self.batch_size,
self.hidden_size,
device="cuda",
)
<<<<<<< HEAD
).data
hiddens = []
ht = h_init
if self.training:
drop_mask = drop_masks_i[self.drop_mask_cnt]
self.drop_mask_cnt = self.drop_mask_cnt + 1
if self.drop_mask_cnt >= self.N_drop_masks:
self.drop_mask_cnt = 0
if self.bidirectional:
drop_masks_i = (
self.drop(
torch.ones(
self.N_drop_masks,
2 * self.batch_size,
self.hidden_size,
)
)
.to(self.device)
.data
)
else:
drop_masks_i = (
self.drop(
torch.ones(
self.N_drop_masks,
self.batch_size,
self.hidden_size,
)
)
.to(self.device)
.data
)
else:
drop_mask = self.drop_mask_te
for k in range(wh.shape[0]):
uz, uh = self.u(ht).chunk(2, 1)
at = wh[k] + uh
zt = wz[k] + uz
# ligru equation
zt = torch.sigmoid(zt)
=======
drop_masks_i = self.drop(
torch.ones(
self.N_drop_masks,
self.batch_size,
self.hidden_size,
device="cuda",
)
).data
hiddens = []
ht = h_init
if self.training:
drop_mask = drop_masks_i[self.drop_mask_cnt]
self.drop_mask_cnt = self.drop_mask_cnt + 1
if self.drop_mask_cnt >= self.N_drop_masks:
self.drop_mask_cnt = 0
if self.bidirectional:
drop_masks_i = (
self.drop(
torch.ones(
self.N_drop_masks,
2 * self.batch_size+1,
self.hidden_size,
)
)
.to(self.device)
.data
)
else:
drop_masks_i = (
self.drop(
torch.ones(
self.N_drop_masks,
self.batch_size,
self.hidden_size,
)
)
.to(self.device)
.data
)
else:
drop_mask = self.drop_mask_te
else:
if self.bidirectional:
h_init = torch.zeros(
2 * self.batch_size+1,
self.hidden_size,
device="cuda",
)
drop_masks_i = self.drop(
torch.ones(
self.N_drop_masks,
2 * self.batch_size+1,
self.hidden_size,
device="cuda",
)
).data
else:
h_init = torch.zeros(
self.batch_size,
self.hidden_size,
device="cuda",
)
drop_masks_i = self.drop(
torch.ones(
self.N_drop_masks,
self.batch_size,
self.hidden_size,
device="cuda",
)
).data
hiddens = []
ht = h_init
if self.training:
drop_mask = drop_masks_i[self.drop_mask_cnt]
self.drop_mask_cnt = self.drop_mask_cnt + 1
if self.drop_mask_cnt >= self.N_drop_masks:
self.drop_mask_cnt = 0
if self.bidirectional:
drop_masks_i = (
self.drop(
torch.ones(
self.N_drop_masks,
2 * self.batch_size+1,
self.hidden_size,
)
)
.to(self.device)
.data
)
else:
drop_masks_i = (
self.drop(
torch.ones(
self.N_drop_masks,
self.batch_size,
self.hidden_size,
)
)
.to(self.device)
.data
)
else:
drop_mask = self.drop_mask_te
#print('wh', wh.shape)
#print('ht', ht.shape)
for k in range(wh.shape[1]):
uz, uh = self.u(ht).chunk(2, 1)
#print('uz', uz.shape)
#print('uh', uh.shape)
'''bug fixing'''
at = wh[:, k, :] + uh # B, T, D
zt = wz[:, k, :] + uz
# ligru equation
zt = torch.sigmoid(zt)
#print('at:', at)
#print(drop_mask.shape)
>>>>>>> jit_ligru
hcand = self.act(at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
<<<<<<< HEAD
return h
=======
h = h.permute(1, 0, 2)
return h
>>>>>>> jit_ligru
def flip(x, dim):
xsize = x.size()
dim = x.dim() + dim if dim < 0 else dim
x = x.contiguous()
x = x.view(-1, *xsize[dim:])
x = x.view(x.size(0), x.size(1), -1)[
:, getattr(torch.arange(x.size(1) - 1, -1, -1), ("cpu", "cuda")[x.is_cuda])().long(), :
]
return x.view(xsize)
def act_fun(act_type):
if act_type == "relu":
return nn.ReLU()
if act_type == "tanh":
return nn.Tanh()
if act_type == "sigmoid":
return nn.Sigmoid()
if act_type == "leaky_relu":
return nn.LeakyReLU(0.2)
if act_type == "elu":
return nn.ELU()
if act_type == "softmax":
return nn.LogSoftmax(dim=1)
if act_type == "linear":
return nn.LeakyReLU(1) # initializzed like this, but not used in forward!
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(features))
self.beta = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
''' new liGRU '''
class liGRU(nn.Module):
def __init__(self, inp_dim, ligru_lay, bidirection, dropout, layer_norm, \
proj=[False, False, False, False], to_do='train'):
super(liGRU, self).__init__()
# Reading parameters
self.input_dim = inp_dim
self.ligru_lay = ligru_lay
self.ligru_drop = dropout
self.ligru_use_batchnorm = [True,True,True,True]
self.ligru_use_laynorm = layer_norm
self.ligru_use_laynorm_inp = False
self.ligru_use_batchnorm_inp = False
self.ligru_orthinit = True
self.ligru_act = ["relu","relu","relu","relu"]
self.bidir = bidirection
self.use_cuda =True
self.to_do = to_do
self.proj = proj
if isinstance(self.ligru_lay, list):
self.N_ligru_lay = len(self.ligru_lay)
else:
self.N_ligru_lay = 1
self.ligru_use_batchnorm = [False] #[True]
self.ligru_act = ["relu"]
self.ligru_lay = [self.ligru_lay]
self.proj = [False] # for decoder
if self.to_do == "train":
self.test_flag = False
else:
self.test_flag = True
# List initialization
self.wh = nn.ModuleList([])
self.uh = nn.ModuleList([])
self.wz = nn.ModuleList([]) # Update Gate
self.uz = nn.ModuleList([]) # Update Gate
self.ln = nn.ModuleList([]) # Layer Norm
self.bn_wh = nn.ModuleList([]) # Batch Norm
self.bn_wz = nn.ModuleList([]) # Batch Norm
self.act = nn.ModuleList([]) # Activations
# Input layer normalization
if self.ligru_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# Input batch normalization
if self.ligru_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d(self.input_dim, momentum=0.05)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_ligru_lay):
# Activations
self.act.append(act_fun(self.ligru_act[i]))
add_bias = True
if self.ligru_use_laynorm[i] or self.ligru_use_batchnorm[i]:
add_bias = False
# Feed-forward connections
self.wh.append(nn.Linear(current_input, self.ligru_lay[i], bias=add_bias))
self.wz.append(nn.Linear(current_input, self.ligru_lay[i], bias=add_bias))
# Recurrent connections
self.uh.append(nn.Linear(self.ligru_lay[i], self.ligru_lay[i], bias=False))
self.uz.append(nn.Linear(self.ligru_lay[i], self.ligru_lay[i], bias=False))
if self.ligru_orthinit:
nn.init.orthogonal_(self.uh[i].weight)
nn.init.orthogonal_(self.uz[i].weight)
# Glorot init for feedforward weight
nn.init.xavier_normal_(self.wh[i].weight)
nn.init.xavier_normal_(self.wz[i].weight)
# batch norm initialization
self.bn_wh.append(nn.BatchNorm1d(self.ligru_lay[i], momentum=0.05))
self.bn_wz.append(nn.BatchNorm1d(self.ligru_lay[i], momentum=0.05))
self.ln.append(LayerNorm(self.ligru_lay[i]))
if self.bidir:
current_input = 2 * self.ligru_lay[i]
else:
current_input = self.ligru_lay[i]
self.out_dim = self.ligru_lay[i] + self.bidir * self.ligru_lay[i]
# for encoder
self.pj = None
if self.proj[0]:
self.pj = nn.Linear(self.out_dim, self.out_dim)
def forward(self, x, x_len):
#print('decoder input shape:', x.shape)
# Applying Layer/Batch Norm
if bool(self.ligru_use_laynorm_inp):
x = self.ln0((x))
if bool(self.ligru_use_batchnorm_inp):
x_bn = self.bn0(x.view(x.shape[0] * x.shape[1], x.shape[2]))
x = x_bn.view(x.shape[0], x.shape[1], x.shape[2])
for i in range(self.N_ligru_lay):
# Initial state and concatenation
if self.bidir:
h_init = torch.zeros(2 * x.shape[1], self.ligru_lay[i])
x = torch.cat([x, flip(x, 0)], 1)
else:
h_init = torch.zeros(x.shape[1], self.ligru_lay[i])
# Drop mask initilization (same mask for all time steps)
if self.test_flag == False:
drop_mask = torch.bernoulli(
torch.Tensor(h_init.shape[0], h_init.shape[1]).fill_(1 - self.ligru_drop[i])
)
else:
drop_mask = torch.FloatTensor([1 - self.ligru_drop[i]])
if self.use_cuda:
h_init = h_init.cuda()
drop_mask = drop_mask.cuda()
# Feed-forward affine transformations (all steps in parallel)
wh_out = self.wh[i](x)
wz_out = self.wz[i](x)
# Apply batch norm if needed (all steos in parallel)
if self.ligru_use_batchnorm[i]:
wh_out_bn = self.bn_wh[i](wh_out.view(wh_out.shape[0] * wh_out.shape[1], wh_out.shape[2]))
wh_out = wh_out_bn.view(wh_out.shape[0], wh_out.shape[1], wh_out.shape[2])
wz_out_bn = self.bn_wz[i](wz_out.view(wz_out.shape[0] * wz_out.shape[1], wz_out.shape[2]))
wz_out = wz_out_bn.view(wz_out.shape[0], wz_out.shape[1], wz_out.shape[2])
# Processing time steps
hiddens = []
ht = h_init
for k in range(x.shape[0]):
# ligru equation
zt = torch.sigmoid(wz_out[k] + self.uz[i](ht))
at = wh_out[k] + self.uh[i](ht)
hcand = self.act[i](at) * drop_mask
ht = zt * ht + (1 - zt) * hcand
#print('ht:', ht)
#print(ht.shape)
if self.ligru_use_laynorm[i]:
ht = self.ln[i](ht)
hiddens.append(ht)
# Stacking hidden states
h = torch.stack(hiddens)
# Bidirectional concatenations
if self.bidir:
h_f = h[:, 0 : int(x.shape[1] / 2)]
h_b = flip(h[:, int(x.shape[1] / 2) : x.shape[1]].contiguous(), 0)
h = torch.cat([h_f, h_b], 2)
# Setup x for the next hidden layer
x = h
if self.proj[0]:
x = torch.tanh(self.pj(x))
return x, x_len
'''new function for layer normalization'''
class CNNLayerNorm(nn.Module):
"""Layer normalization built for cnns input"""
def __init__(self, n_feats):
super(CNNLayerNorm, self).__init__()
self.layer_norm = nn.LayerNorm(n_feats)
def forward(self, x):
# x (batch, channel, feature, time)
#x = x.transpose(2, 3).contiguous() # (batch, channel, time, feature)
x = self.layer_norm(x)
#return x.transpose(2, 3).contiguous() # (batch, channel, feature, time)
return x
class ResidualCNN(nn.Module):
"""Residual CNN inspired by https://arxiv.org/pdf/1603.05027.pdf
except with layer norm instead of batch norm
"""
def __init__(self, in_channels, out_channels, kernel, stride, dropout, n_feats):
super(ResidualCNN, self).__init__()
self.cnn1 = nn.Conv2d(in_channels, out_channels, kernel, stride, padding=kernel//2)
self.cnn2 = nn.Conv2d(out_channels, out_channels, kernel, stride, padding=kernel//2)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.layer_norm1 = CNNLayerNorm(n_feats)
self.layer_norm2 = CNNLayerNorm(n_feats)
def forward(self, x):
residual = x # (batch, channel, feature, time)
x = self.layer_norm1(x)
x = F.gelu(x)
x = self.dropout1(x)
x = self.cnn1(x)
x = self.layer_norm2(x)
x = F.gelu(x)
x = self.dropout2(x)
x = self.cnn2(x)
x += residual
return x # (batch, channel, feature, time)
class VGGExtractor_LN(nn.Module):
''' VGG extractor for ASR described in https://arxiv.org/pdf/1706.02737.pdf'''
def __init__(self,input_dim):
super(VGGExtractor_LN, self).__init__()
self.init_dim = 64
self.hide_dim = 128
in_channel,freq_dim,out_dim = self.check_dim(input_dim)
self.in_channel = in_channel
self.freq_dim = freq_dim
self.out_dim = out_dim
self.extractor = nn.Sequential(
nn.Conv2d( in_channel, self.init_dim, 3, stride=1, padding=1),
CNNLayerNorm(input_dim),
nn.ReLU(),
nn.Conv2d( self.init_dim, self.init_dim, 3, stride=1, padding=1),
CNNLayerNorm(input_dim),
nn.ReLU(),
nn.MaxPool2d(2, stride=2), # Half-time dimension
#nn.Dropout2d(p=0.2),
nn.Conv2d( self.init_dim, self.hide_dim, 3, stride=1, padding=1),
CNNLayerNorm(input_dim//2),
nn.ReLU(),
nn.Conv2d( self.hide_dim, self.hide_dim, 3, stride=1, padding=1),
CNNLayerNorm(input_dim//2),
nn.ReLU(),
nn.MaxPool2d(2, stride=2),
#nn.Dropout2d(p=0.2)
)
def check_dim(self,input_dim):
# Check input dimension, delta feature should be stack over channel.
if input_dim % 13 == 0:
# MFCC feature
return int(input_dim // 13),13,(13 // 4)*self.hide_dim
elif input_dim % FBANK_SIZE == 0:
# Fbank feature
return int(input_dim // FBANK_SIZE),FBANK_SIZE,(FBANK_SIZE//4)*self.hide_dim
else:
raise ValueError('Acoustic feature dimension for VGG should be 13/26/39(MFCC) or 40/80/120(Fbank) but got '+d)
def view_input(self,feature,feat_len):
# downsample time
feat_len = feat_len//4
# crop sequence s.t. t%4==0
if feature.shape[1]%4 != 0:
feature = feature[:,:-(feature.shape[1]%4),:].contiguous()
bs,ts,ds = feature.shape
# stack feature according to result of check_dim
feature = feature.view(bs,ts,self.in_channel,self.freq_dim)
feature = feature.transpose(1,2)
return feature,feat_len
def forward(self,feature,feat_len):
# Feature shape BSxTxD -> BS x CH(num of delta) x T x D(acoustic feature dim)
feature, feat_len = self.view_input(feature,feat_len)
# Foward
feature = self.extractor(feature)
# BSx128xT/4xD/4 -> BSxT/4x128xD/4
feature = feature.transpose(1,2)
# BS x T/4 x 128 x D/4 -> BS x T/4 x 32D
feature = feature.contiguous().view(feature.shape[0],feature.shape[1],self.out_dim)
return feature,feat_len
class VGGExtractor(nn.Module):
''' VGG extractor for ASR described in https://arxiv.org/pdf/1706.02737.pdf'''
def __init__(self,input_dim):
super(VGGExtractor, self).__init__()
self.init_dim = 64
self.hide_dim = 128
in_channel,freq_dim,out_dim = self.check_dim(input_dim)
self.in_channel = in_channel
self.freq_dim = freq_dim
self.out_dim = out_dim
self.extractor = nn.Sequential(
nn.Conv2d( in_channel, self.init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.init_dim, self.init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2, ceil_mode=True), # Half-time dimension
nn.Conv2d( self.init_dim, self.hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.hide_dim, self.hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2, ceil_mode=True) # Half-time dimension
)
def check_dim(self,input_dim):
# Check input dimension, delta feature should be stack over channel.
if input_dim % 13 == 0:
# MFCC feature
return int(input_dim // 13),13,(13 // 4)*self.hide_dim
elif input_dim % FBANK_SIZE == 0:
# Fbank feature
return int(input_dim // FBANK_SIZE),FBANK_SIZE,(FBANK_SIZE//4)*self.hide_dim
else:
raise ValueError('Acoustic feature dimension for VGG should be 13/26/39(MFCC) or 40/80/120(Fbank) but got '+d)
def view_input(self,feature,feat_len):
# downsample time
feat_len = feat_len//4 # ?
# crop sequence s.t. t%4==0
if feature.shape[1]%4 != 0:
feature = feature[:,:-(feature.shape[1]%4),:].contiguous()
bs,ts,ds = feature.shape
# stack feature according to result of check_dim
feature = feature.view(bs,ts,self.in_channel,self.freq_dim)
feature = feature.transpose(1,2)
return feature,feat_len
def forward(self,feature,feat_len):
# Feature shape BSxTxD -> BS x CH(num of delta) x T x D(acoustic feature dim)
feature, feat_len = self.view_input(feature,feat_len)
# Foward
feature = self.extractor(feature)
# BSx128xT/4xD/4 -> BSxT/4x128xD/4
feature = feature.transpose(1,2)
# BS x T/4 x 128 x D/4 -> BS x T/4 x 32D
feature = feature.contiguous().view(feature.shape[0],feature.shape[1],self.out_dim)
return feature,feat_len
class FreqVGGExtractor(nn.Module):
''' Frequency Modification VGG extractor for ASR '''
def __init__(self,input_dim, split_freq, low_dim=4):
super(FreqVGGExtractor, self).__init__()
self.split_freq = split_freq
self.low_init_dim = low_dim
self.low_hide_dim = low_dim * 2
self.high_init_dim = 64 - low_dim
self.high_hide_dim = 128 - low_dim * 2
in_channel,freq_dim = self.check_dim(input_dim)
self.in_channel = in_channel
self.freq_dim = freq_dim
self.low_out_dim = split_freq // 4 * self.low_hide_dim
self.high_out_dim = (freq_dim - split_freq) // 4 * self.high_hide_dim
self.out_dim = self.low_out_dim + self.high_out_dim
self.low_extractor = nn.Sequential(
nn.Conv2d( in_channel, self.low_init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.low_init_dim, self.low_init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2), # Half-time dimension
nn.Conv2d( self.low_init_dim, self.low_hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.low_hide_dim, self.low_hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2) # Half-time dimension
)
self.high_extractor = nn.Sequential(
nn.Conv2d( in_channel, self.high_init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.high_init_dim, self.high_init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2), # Half-time dimension
nn.Conv2d( self.high_init_dim, self.high_hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.high_hide_dim, self.high_hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2) # Half-time dimension
)
assert(self.split_freq % 4 == 0)
assert(self.split_freq > 0 and self.split_freq < self.freq_dim)
def check_dim(self,input_dim):
# Check input dimension, delta feature should be stack over channel.
if input_dim % 13 == 0:
# MFCC feature
return int(input_dim // 13),13
elif input_dim % FBANK_SIZE == 0:
# Fbank feature
return int(input_dim // FBANK_SIZE),FBANK_SIZE
else:
raise ValueError('Acoustic feature dimension for VGG should be 13/26/39(MFCC) or 40/80/120(Fbank) but got '+d)
def view_input(self,feature,feat_len):
# downsample time
feat_len = feat_len//4
# crop sequence s.t. t%4==0
if feature.shape[1]%4 != 0:
feature = feature[:,:-(feature.shape[1]%4),:].contiguous()
bs,ts,ds = feature.shape
# stack feature according to result of check_dim
feature = feature.view(bs,ts,self.in_channel,self.freq_dim)
feature = feature.transpose(1,2)
return feature,feat_len
def forward(self,feature,feat_len):
# Feature shape BSxTxD -> BS x CH(num of delta) x T x D(acoustic feature dim)
feature, feat_len = self.view_input(feature,feat_len)
# Foward
low_feature = self.low_extractor(feature[:,:,:,:self.split_freq])
high_feature = self.high_extractor(feature[:,:,:,self.split_freq:])
# features : BS x 4 x T/4 x D/4 , BS x 124 x T/4 x D/4
# BS x H x T/4 x D/4 -> BS x T/4 x H x D/4
low_feature = low_feature.transpose(1,2)
high_feature = high_feature.transpose(1,2)
# BS x T/4 x H x D/4 -> BS x T/4 x HD/4
low_feature = low_feature.contiguous().view(low_feature.shape[0],low_feature.shape[1],self.low_out_dim)
high_feature = high_feature.contiguous().view(high_feature.shape[0],high_feature.shape[1],self.high_out_dim)
feature = torch.cat((low_feature, high_feature), dim=-1)
return feature, feat_len
class VGGExtractor2(nn.Module):
''' VGG extractor for ASR described in https://arxiv.org/pdf/1706.02737.pdf'''
''' Only downsample once '''
def __init__(self,input_dim):
super(VGGExtractor2, self).__init__()
self.init_dim = 64
self.hide_dim = 128
in_channel,freq_dim,out_dim = self.check_dim(input_dim)
self.in_channel = in_channel
self.freq_dim = freq_dim
self.out_dim = out_dim
self.extractor = nn.Sequential(
nn.Conv2d( in_channel, self.init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.init_dim, self.init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2), # Half-time dimension
nn.Conv2d( self.init_dim, self.hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.hide_dim, self.hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d((1, 2), stride=(1, 2)) #
)
def check_dim(self,input_dim):
# Check input dimension, delta feature should be stack over channel.
if input_dim % 13 == 0:
# MFCC feature
return int(input_dim // 13),13,(13 // 4)*self.hide_dim
elif input_dim % FBANK_SIZE == 0:
# Fbank feature
return int(input_dim // FBANK_SIZE),FBANK_SIZE,(FBANK_SIZE//4)*self.hide_dim
else:
raise ValueError('Acoustic feature dimension for VGG should be 13/26/39(MFCC) or 40/80/120(Fbank) but got '+d)
def view_input(self,feature,feat_len):
# downsample time
feat_len = feat_len//2
# crop sequence s.t. t%4==0
if feature.shape[1]%2 != 0:
feature = feature[:,:-(feature.shape[1]%2),:].contiguous()
bs,ts,ds = feature.shape
# stack feature according to result of check_dim
feature = feature.view(bs,ts,self.in_channel,self.freq_dim)
feature = feature.transpose(1,2)
return feature,feat_len
def forward(self,feature,feat_len):
# Feature shape BSxTxD -> BS x CH(num of delta) x T x D(acoustic feature dim)
feature, feat_len = self.view_input(feature,feat_len)
# Foward
feature = self.extractor(feature)
# BSx128xT/2xD/4 -> BSxT/2x128xD/4
feature = feature.transpose(1,2)
# BS x T/2 x 128 x D/4 -> BS x T/2 x 32D
feature = feature.contiguous().view(feature.shape[0],feature.shape[1],self.out_dim)
return feature,feat_len
class FreqVGGExtractor2(nn.Module):
''' Frequency Modification VGG extractor for ASR '''
def __init__(self,input_dim, split_freq, low_dim=4):
super(FreqVGGExtractor2, self).__init__()
self.split_freq = split_freq
self.low_init_dim = low_dim
self.low_hide_dim = low_dim * 2
self.high_init_dim = 64 - low_dim
self.high_hide_dim = 128 - low_dim * 2
# self.init_dim = 64
# self.low_hide_dim = 8
# self.high_hide_dim = 120
in_channel,freq_dim = self.check_dim(input_dim)
self.in_channel = in_channel
self.freq_dim = freq_dim
self.low_out_dim = split_freq // 4 * self.low_hide_dim
self.high_out_dim = (freq_dim - split_freq) // 4 * self.high_hide_dim
self.out_dim = self.low_out_dim + self.high_out_dim
# self.first_extractor = nn.Sequential(
# nn.Conv2d( in_channel, self.init_dim, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.Conv2d( self.init_dim, self.init_dim, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.MaxPool2d(2, stride=2), # Half-time dimension
# )
# self.low_extractor = nn.Sequential(
# nn.Conv2d( self.init_dim, self.low_hide_dim, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.Conv2d( self.low_hide_dim, self.low_hide_dim, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.MaxPool2d((1, 2), stride=(1, 2)) #
# )
# self.high_extractor = nn.Sequential(
# nn.Conv2d( self.init_dim, self.high_hide_dim, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.Conv2d( self.high_hide_dim, self.high_hide_dim, 3, stride=1, padding=1),
# nn.ReLU(),
# nn.MaxPool2d((1, 2), stride=(1, 2)) #
# )
self.low_extractor = nn.Sequential(
nn.Conv2d( in_channel, self.low_init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.low_init_dim, self.low_init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2), # Half-time dimension
nn.Conv2d( self.low_init_dim, self.low_hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.low_hide_dim, self.low_hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d((1, 2), stride=(1, 2)) #
)
self.high_extractor = nn.Sequential(
nn.Conv2d( in_channel, self.high_init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.high_init_dim, self.high_init_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, stride=2), # Half-time dimension
nn.Conv2d( self.high_init_dim, self.high_hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d( self.high_hide_dim, self.high_hide_dim, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d((1, 2), stride=(1, 2)) #
)
assert(self.split_freq % 4 == 0)
assert(self.split_freq > 0 and self.split_freq < self.freq_dim)
def check_dim(self,input_dim):
# Check input dimension, delta feature should be stack over channel.
if input_dim % 13 == 0:
# MFCC feature
return int(input_dim // 13),13
elif input_dim % FBANK_SIZE == 0:
# Fbank feature
return int(input_dim // FBANK_SIZE),FBANK_SIZE
else:
raise ValueError('Acoustic feature dimension for VGG should be 13/26/39(MFCC) or 40/80/120(Fbank) but got '+d)
def view_input(self,feature,feat_len):
# downsample time
feat_len = feat_len//2
# crop sequence s.t. t%4==0
if feature.shape[1]%2 != 0:
feature = feature[:,:-(feature.shape[1]%2),:].contiguous()
bs,ts,ds = feature.shape
# stack feature according to result of check_dim
feature = feature.view(bs,ts,self.in_channel,self.freq_dim)
feature = feature.transpose(1,2)
return feature,feat_len
def forward(self,feature,feat_len):
# Feature shape BSxTxD -> BS x CH(num of delta) x T x D(acoustic feature dim)
feature, feat_len = self.view_input(feature,feat_len)
# feature = self.first_extractor(feature) # new
# Foward
low_feature = self.low_extractor(feature[:,:,:,:self.split_freq])
high_feature = self.high_extractor(feature[:,:,:,self.split_freq:])
# low_feature = self.low_extractor(feature[:,:,:,:self.split_freq//2])
# high_feature = self.high_extractor(feature[:,:,:,self.split_freq//2:])
# features : BS x 4 x T/4 x D/4 , BS x 124 x T/4 x D/4
# BS x H x T/4 x D/4 -> BS x T/4 x H x D/4
low_feature = low_feature.transpose(1,2)
high_feature = high_feature.transpose(1,2)
# BS x T/4 x H x D/4 -> BS x T/4 x HD/4
low_feature = low_feature.contiguous().view(low_feature.shape[0],low_feature.shape[1],self.low_out_dim)
high_feature = high_feature.contiguous().view(high_feature.shape[0],high_feature.shape[1],self.high_out_dim)
feature = torch.cat((low_feature, high_feature), dim=-1)
return feature, feat_len
class RNNLayer(nn.Module):
''' RNN wrapper, includes time-downsampling'''
def __init__(self, input_dim, module, dim, bidirection, dropout, layer_norm, sample_rate, sample_style, proj, batch_size):
super(RNNLayer, self).__init__()
# Setup
rnn_out_dim = 2*dim if bidirection else dim
self.out_dim = sample_rate*rnn_out_dim if sample_rate>1 and sample_style=='concat' else rnn_out_dim
self.dropout = dropout
self.layer_norm = layer_norm
self.sample_rate = sample_rate
self.sample_style = sample_style
self.proj = proj
if self.sample_style not in ['drop','concat']:
raise ValueError('Unsupported Sample Style: '+self.sample_style)
#print(input_dim) = 160
#print(dim) = 320
# Recurrent layer
if module in ['LSTM','GRU']:
self.layer = getattr(nn,module.upper())(input_dim, dim, bidirectional=bidirection, num_layers=1, batch_first=True)
self.gru = True
## get LSTM or GRU
else: # liGRU
self.layer = liGRU_layer(input_dim, dim, batch_size, bidirectional=bidirection)
self.gru = False
# Regularizations
if self.layer_norm:
self.ln = nn.LayerNorm(rnn_out_dim)
if self.dropout>0:
self.dp = nn.Dropout(p=dropout)
# Additional projection layer
if self.proj:
self.pj = nn.Linear(rnn_out_dim,rnn_out_dim)
def forward(self, input_x , x_len):
# Forward RNN
'''before using rnn to acclerate?'''
#if not self.training:
#self.layer.flatten_parameters()
# ToDo: check time efficiency of pack/pad
#input_x = pack_padded_sequence(input_x, x_len, batch_first=True, enforce_sorted=False)
if self.gru:
output,_ = self.layer(input_x)
else:
output = self.layer(input_x)
#print('input:', input_x.shape)
#print('output:', output.shape)
#output,x_len = pad_packed_sequence(output,batch_first=True)
# Normalizations
if self.layer_norm:
output = self.ln(output)
if self.dropout>0:
output = self.dp(output)
# Perform Downsampling
if self.sample_rate > 1:
batch_size,timestep,feature_dim = output.shape
'''output is not 1d'''
x_len = x_len//self.sample_rate
if self.sample_style =='drop':
# Drop the unselected timesteps
output = output[:,::self.sample_rate,:].contiguous()
else:
# Drop the redundant frames and concat the rest according to sample rate
if timestep%self.sample_rate != 0:
output = output[:,:-(timestep%self.sample_rate),:]
output = output.contiguous().view(batch_size,int(timestep/self.sample_rate),feature_dim*self.sample_rate)
if self.proj:
output = torch.tanh(self.pj(output))
return output,x_len
class BaseAttention(nn.Module):
''' Base module for attentions '''
def __init__(self, temperature, num_head):
super().__init__()
self.temperature = temperature
self.num_head = num_head
self.softmax = nn.Softmax(dim=-1)
self.reset_mem()
def reset_mem(self):
# Reset mask
self.mask = None
self.k_len = None
def set_mem(self):
pass
def compute_mask(self,k,k_len):
# Make the mask for padded states
self.k_len = k_len
bs,ts,_ = k.shape
self.mask = np.zeros((bs,self.num_head,ts))
for idx,sl in enumerate(k_len): # there are "batch" enc_len
self.mask[idx,:,sl:] = 1 # ToDo: more elegant way? padding spare in the end of the sentence
self.mask = torch.from_numpy(self.mask).to(k_len.device, dtype=torch.bool).view(-1,ts)# BNxT
### important
def _attend(self, energy, value):
attn = energy / self.temperature
attn = attn.masked_fill(self.mask, -np.inf)
attn = self.softmax(attn) # BNxT
output = torch.bmm(attn.unsqueeze(1), value).squeeze(1) # BNxT x BNxTxD-> BNxD
## we don't use v in LAS case, v is enc_feature
## output is g, to decoder
return output, attn
class ScaleDotAttention(BaseAttention):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, num_head):
super().__init__(temperature, num_head)
def forward(self, q, k, v):
ts = k.shape[1]
energy = torch.bmm(q.unsqueeze(1), k.transpose(1, 2)).squeeze(1) # BNxD * BNxDxT = BNxT
output, attn = self._attend(energy,v)
attn = attn.view(-1,self.num_head,ts) # BNxT -> BxNxT
return output, attn
class LocationAwareAttention(BaseAttention):
''' Location-Awared Attention '''
def __init__(self, kernel_size, kernel_num, dim, num_head, temperature):
super().__init__(temperature, num_head)
self.prev_att = None
self.loc_conv = nn.Conv1d(num_head, kernel_num, kernel_size=2*kernel_size+1, padding=kernel_size, bias=False)
self.loc_proj = nn.Linear(kernel_num, dim,bias=False)
self.gen_energy = nn.Linear(dim, 1) # why output dim is 1?
self.dim = dim
def reset_mem(self):
super().reset_mem()
self.prev_att = None
def set_mem(self, prev_att):
self.prev_att = prev_att
def forward(self, q, k, v):
bs_nh,ts,_ = k.shape
bs = bs_nh//self.num_head
# Uniformly init prev_att
if self.prev_att is None:
self.prev_att = torch.zeros((bs,self.num_head,ts)).to(k.device)
for idx,sl in enumerate(self.k_len):
self.prev_att[idx,:,:sl] = 1.0/sl
# Calculate location context
loc_context = torch.tanh(self.loc_proj(self.loc_conv(self.prev_att).transpose(1,2))) # BxNxT->BxTxD
loc_context = loc_context.unsqueeze(1).repeat(1,self.num_head,1,1).view(-1,ts,self.dim) # BxNxTxD -> BNxTxD
q = q.unsqueeze(1) # BNx1xD
# Compute energy and context
energy = self.gen_energy(torch.tanh( k+q+loc_context )).squeeze(2) # BNxTxD -> BNxT
output, attn = self._attend(energy,v) # including softmax
attn = attn.view(bs,self.num_head,ts) # BNxT -> BxNxT
self.prev_att = attn
return output, attn
| 38.181596
| 130
| 0.517308
|
4a00aefa455fe9e71097c48983aa10e86b00b444
| 1,450
|
py
|
Python
|
synth.py
|
wyardley/nodejs-pubsub
|
40f315fe1203c441f22937ce4e5b518c32d0c214
|
[
"Apache-2.0"
] | null | null | null |
synth.py
|
wyardley/nodejs-pubsub
|
40f315fe1203c441f22937ce4e5b518c32d0c214
|
[
"Apache-2.0"
] | null | null | null |
synth.py
|
wyardley/nodejs-pubsub
|
40f315fe1203c441f22937ce4e5b518c32d0c214
|
[
"Apache-2.0"
] | null | null | null |
import synthtool as s
import synthtool.gcp as gcp
import logging
import subprocess
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
# tasks has two product names, and a poorly named artman yaml
version = 'v1'
library = gapic.node_library(
'pubsub', version, config_path="/google/pubsub/artman_pubsub.yaml")
# skip index, protos, package.json, and README.md
s.copy(
library,
excludes=['package.json', 'README.md', 'src/index.js'])
templates = common_templates.node_library(source_location='build/src')
s.copy(templates)
# https://github.com/googleapis/gapic-generator/issues/2127
s.replace("src/v1/subscriber_client.js",
" }\n\s*/\*\*\n\s+\* The DNS address for this API service.",
"\n // note: editing generated code\n"
" this.waitForReady = function(deadline, callback) {\n"
" return subscriberStub.then(\n"
" stub => stub.waitForReady(deadline, callback),\n"
" callback\n"
" );\n"
" };\n"
"\g<0>")
# Update path discovery due to build/ dir and TypeScript conversion.
s.replace("src/v1/publisher_client.js", "../../package.json", "../../../package.json")
s.replace("src/v1/subscriber_client.js", "../../package.json", "../../../package.json")
# Node.js specific cleanup
subprocess.run(['npm', 'install'])
subprocess.run(['npm', 'run', 'fix'])
| 33.72093
| 87
| 0.647586
|
4a00b0ab878801d0cac6ce7b68c9e08634fb2082
| 137
|
py
|
Python
|
denorm/agg_query.py
|
rivethealth/denorm
|
c9b9070730e3cc7fbe78927d34db7ffa384aed42
|
[
"MIT"
] | 11
|
2021-03-29T14:27:48.000Z
|
2022-01-01T00:31:40.000Z
|
denorm/agg_query.py
|
rivethealth/denorm
|
c9b9070730e3cc7fbe78927d34db7ffa384aed42
|
[
"MIT"
] | null | null | null |
denorm/agg_query.py
|
rivethealth/denorm
|
c9b9070730e3cc7fbe78927d34db7ffa384aed42
|
[
"MIT"
] | null | null | null |
def query():
return f"""
INSERT INTO {target} ()
SELECT
FROM {source}
GROUP BY 1, 2
ORDER BY 1, 2
ON CONFLICT () DO UPDATE
SET
"""
| 12.454545
| 24
| 0.627737
|
4a00b1049898da39e0340d4ae0e9bf3fe560f2ab
| 11,874
|
py
|
Python
|
deepchem/utils/test/test_evaluate.py
|
hssinejihene/deepchem
|
7b5177240eb85a68819b4450635ec9df54afed23
|
[
"MIT"
] | 1
|
2020-09-14T02:34:40.000Z
|
2020-09-14T02:34:40.000Z
|
deepchem/utils/test/test_evaluate.py
|
cpfpengfei/deepchem
|
a3d827ddeaa181157237894abe5055e200cfd27e
|
[
"MIT"
] | null | null | null |
deepchem/utils/test/test_evaluate.py
|
cpfpengfei/deepchem
|
a3d827ddeaa181157237894abe5055e200cfd27e
|
[
"MIT"
] | null | null | null |
"""Unit tests for evaluators."""
import deepchem as dc
import numpy as np
import unittest
import sklearn
from deepchem.utils.evaluate import Evaluator
from deepchem.utils.evaluate import GeneratorEvaluator
def test_multiclass_threshold_predictions():
"""Check prediction thresholding works correctly."""
# Construct a random class probability matrix
y = np.random.rand(10, 5)
y_sums = np.sum(y, axis=1)
y = y / y_sums[:, None]
y_out = dc.metrics.threshold_predictions(y)
assert y_out.shape == (10,)
assert np.allclose(y_out, np.argmax(y, axis=1))
def test_binary_threshold_predictions():
"""Check prediction thresholding works correctly."""
# Construct a random class probability matrix
y = np.random.rand(10, 2)
y_sums = np.sum(y, axis=1)
y = y / y_sums[:, None]
y_out = dc.metrics.threshold_predictions(y, threshold=0.3)
assert y_out.shape == (10,)
assert np.allclose(y_out, np.where(y[:, 1] >= 0.3, np.ones(10), np.zeros(10)))
def test_evaluator_dc_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskClassifier(1, 5, n_classes=5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_sklearn_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
rf = sklearn.ensemble.RandomForestClassifier(50)
model = dc.models.SklearnModel(rf)
model.fit(dataset)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_evaluate_multiclass_classification_singletask():
"""Test multiclass classification evaluation."""
X = np.random.rand(100, 5)
y = np.random.randint(5, size=(100,))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskClassifier(1, 5, n_classes=5)
multitask_scores = model.evaluate(
dataset, dc.metrics.roc_auc_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_multitask_evaluator():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
evaluator = Evaluator(model, dataset, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores, all_task_scores = evaluator.compute_model_performance(
metric, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
assert isinstance(all_task_scores, dict)
assert len(multitask_scores) == 1
def test_model_evaluate_dc_metric():
"""Test a model evaluate on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = model.evaluate(dataset, metric, [])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_multitask_model_evaluate_sklearn():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores, all_task_scores = evaluator.compute_model_performance(
dc.metrics.mean_absolute_error, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['metric-1'] > 0
assert isinstance(all_task_scores, dict)
assert len(multitask_scores) == 1
def test_multitask_model_evaluate():
"""Test evaluation of a multitask metric."""
n_tasks = 2
X = np.random.rand(10, 5)
y = np.random.rand(10, 2)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(2, 5)
multitask_scores, all_task_scores = model.evaluate(
dataset, dc.metrics.mean_absolute_error, per_task_metrics=True)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] > 0
assert isinstance(all_task_scores, dict)
def test_evaluator_dc_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
metric1 = dc.metrics.Metric(dc.metrics.mae_score, n_tasks=2)
metric2 = dc.metrics.Metric(dc.metrics.r2_score, n_tasks=2)
multitask_scores = evaluator.compute_model_performance([metric1, metric2])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 2
assert multitask_scores['mae_score'] > 0
assert "r2_score" in multitask_scores
def test_model_evaluate_dc_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
metric1 = dc.metrics.Metric(dc.metrics.mae_score)
metric2 = dc.metrics.Metric(dc.metrics.r2_score)
multitask_scores = model.evaluate(dataset, [metric1, metric2])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 2
assert multitask_scores['mae_score'] > 0
assert "r2_score" in multitask_scores
def test_generator_evaluator_dc_metric_multitask_single_point():
"""Test generator evaluator on a generator."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
generator = model.default_generator(dataset, pad_batches=False)
evaluator = GeneratorEvaluator(model, generator, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
assert len(multitask_scores) == 1
def test_evaluator_sklearn_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.mean_absolute_error)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
def test_generator_evaluator_dc_metric_multitask():
"""Test generator evaluator on a generator."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
generator = model.default_generator(dataset, pad_batches=False)
evaluator = GeneratorEvaluator(model, generator, [])
metric = dc.metrics.Metric(dc.metrics.mae_score)
multitask_scores = evaluator.compute_model_performance(metric)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
assert multitask_scores['mae_score'] > 0
def test_model_evaluate_sklearn_metric():
"""Test a model evaluate on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
multitask_scores = model.evaluate(dataset, dc.metrics.mean_absolute_error)
assert isinstance(multitask_scores, dict)
assert len(multitask_scores) == 1
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
def test_evaluator_sklearn_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
[dc.metrics.mean_absolute_error, dc.metrics.r2_score])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores.keys()) == 2
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
assert "metric-2" in multitask_scores
def test_model_evaluate_sklearn_multi_metric():
"""Test an evaluator on a dataset."""
X = np.random.rand(10, 5)
y = np.random.rand(10, 1)
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.MultitaskRegressor(1, 5)
multitask_scores = model.evaluate(
dataset, [dc.metrics.mean_absolute_error, dc.metrics.r2_score])
assert isinstance(multitask_scores, dict)
assert len(multitask_scores.keys()) == 2
# Note that since no name as provided, metrics are index by order
# given.
assert multitask_scores['metric-1'] > 0
assert "metric-2" in multitask_scores
def test_gc_binary_classification():
"""Test multiclass classification evaluation."""
smiles = ["C", "CC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(2, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification")
# TODO: Fix this case with correct thresholding
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.accuracy_score, n_classes=2)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
def test_gc_binary_kappa_classification():
"""Test multiclass classification evaluation."""
np.random.seed(1234)
smiles = ["C", "CC", "CO", "CCC", "CCCC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(2, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification")
# TODO: Fix this case with correct thresholding
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.kappa_score, n_classes=2)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] <= 1
assert multitask_scores["metric-1"] >= -1
def test_gc_multiclass_classification():
"""Test multiclass classification evaluation."""
np.random.seed(1234)
smiles = ["C", "CC"]
featurizer = dc.feat.ConvMolFeaturizer()
X = featurizer.featurize(smiles)
y = np.random.randint(5, size=(len(smiles),))
dataset = dc.data.NumpyDataset(X, y)
model = dc.models.GraphConvModel(1, mode="classification", n_classes=5)
evaluator = Evaluator(model, dataset, [])
multitask_scores = evaluator.compute_model_performance(
dc.metrics.accuracy_score, n_classes=5)
assert len(multitask_scores) == 1
assert multitask_scores["metric-1"] >= 0
| 36.875776
| 80
| 0.732862
|
4a00b1d13eb3925d7e5032d05f15107f379263e7
| 6,755
|
py
|
Python
|
model_zoo/official/recommend/wide_and_deep/train_and_eval_parameter_server_distribute.py
|
ATestGroup233/mindspore
|
5d81221b5896cf7d7c6adb44daef28d92cb43352
|
[
"Apache-2.0"
] | 1
|
2021-06-01T12:34:37.000Z
|
2021-06-01T12:34:37.000Z
|
model_zoo/official/recommend/wide_and_deep/train_and_eval_parameter_server_distribute.py
|
ATestGroup233/mindspore
|
5d81221b5896cf7d7c6adb44daef28d92cb43352
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/recommend/wide_and_deep/train_and_eval_parameter_server_distribute.py
|
ATestGroup233/mindspore
|
5d81221b5896cf7d7c6adb44daef28d92cb43352
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train distribute on parameter server."""
import os
import sys
import mindspore.dataset as ds
from mindspore import Model, context
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor
from mindspore.context import ParallelMode
from mindspore.communication.management import get_rank, get_group_size, init
from mindspore.nn.wrap.cell_wrapper import VirtualDatasetCellTriple
from mindspore.common import set_seed
from src.wide_and_deep import PredictWithSigmoid, TrainStepWrap, NetWithLossClass, WideDeepModel
from src.callbacks import LossCallBack, EvalCallBack
from src.datasets import create_dataset, DataType
from src.metrics import AUCMetric
from src.config import WideDeepConfig
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
def get_wide_deep_net(config):
"""
Get network of wide&deep model.
"""
wide_deep_net = WideDeepModel(config)
loss_net = NetWithLossClass(wide_deep_net, config)
if cache_enable:
loss_net = VirtualDatasetCellTriple(loss_net)
train_net = TrainStepWrap(loss_net, parameter_server=bool(config.parameter_server),
sparse=config.sparse, cache_enable=(config.vocab_cache_size > 0))
eval_net = PredictWithSigmoid(wide_deep_net)
if cache_enable:
eval_net = VirtualDatasetCellTriple(eval_net)
return train_net, eval_net
class ModelBuilder():
"""
ModelBuilder
"""
def __init__(self):
pass
def get_hook(self):
pass
def get_train_hook(self):
hooks = []
callback = LossCallBack()
hooks.append(callback)
if int(os.getenv('DEVICE_ID')) == 0:
pass
return hooks
def get_net(self, config):
return get_wide_deep_net(config)
def train_and_eval(config):
"""
test_train_eval
"""
set_seed(1000)
data_path = config.data_path
batch_size = config.batch_size
epochs = config.epochs
if config.dataset_type == "tfrecord":
dataset_type = DataType.TFRECORD
elif config.dataset_type == "mindrecord":
dataset_type = DataType.MINDRECORD
else:
dataset_type = DataType.H5
parameter_server = bool(config.parameter_server)
if cache_enable:
config.full_batch = True
print("epochs is {}".format(epochs))
if config.full_batch:
context.set_auto_parallel_context(full_batch=True)
ds.config.set_seed(1)
ds_train = create_dataset(data_path, train_mode=True, epochs=1,
batch_size=batch_size*get_group_size(), data_type=dataset_type)
ds_eval = create_dataset(data_path, train_mode=False, epochs=1,
batch_size=batch_size*get_group_size(), data_type=dataset_type)
else:
ds_train = create_dataset(data_path, train_mode=True, epochs=1,
batch_size=batch_size, rank_id=get_rank(),
rank_size=get_group_size(), data_type=dataset_type)
ds_eval = create_dataset(data_path, train_mode=False, epochs=1,
batch_size=batch_size, rank_id=get_rank(),
rank_size=get_group_size(), data_type=dataset_type)
print("ds_train.size: {}".format(ds_train.get_dataset_size()))
print("ds_eval.size: {}".format(ds_eval.get_dataset_size()))
net_builder = ModelBuilder()
train_net, eval_net = net_builder.get_net(config)
train_net.set_train()
auc_metric = AUCMetric()
model = Model(train_net, eval_network=eval_net, metrics={"auc": auc_metric})
if cache_enable:
config.stra_ckpt = os.path.join(config.stra_ckpt + "-{}".format(get_rank()), "strategy.ckpt")
context.set_auto_parallel_context(strategy_ckpt_save_file=config.stra_ckpt)
eval_callback = EvalCallBack(model, ds_eval, auc_metric, config)
callback = LossCallBack(config=config)
ms_role = os.getenv("MS_ROLE")
if ms_role == "MS_WORKER":
if cache_enable:
ckptconfig = CheckpointConfig(save_checkpoint_steps=ds_train.get_dataset_size()*epochs,
keep_checkpoint_max=1, integrated_save=False)
else:
ckptconfig = CheckpointConfig(save_checkpoint_steps=ds_train.get_dataset_size(), keep_checkpoint_max=5)
else:
ckptconfig = CheckpointConfig(save_checkpoint_steps=1, keep_checkpoint_max=1)
ckpoint_cb = ModelCheckpoint(prefix='widedeep_train',
directory=config.ckpt_path + '/ckpt_' + str(get_rank()) + '/',
config=ckptconfig)
callback_list = [TimeMonitor(ds_train.get_dataset_size()), eval_callback, callback]
if get_rank() == 0:
callback_list.append(ckpoint_cb)
model.train(epochs, ds_train,
callbacks=callback_list,
dataset_sink_mode=(parameter_server and cache_enable))
if __name__ == "__main__":
wide_deep_config = WideDeepConfig()
wide_deep_config.argparse_init()
context.set_context(mode=context.GRAPH_MODE, device_target=wide_deep_config.device_target, save_graphs=True)
cache_enable = wide_deep_config.vocab_cache_size > 0
if cache_enable and wide_deep_config.device_target != "GPU":
context.set_context(variable_memory_max_size="24GB")
context.set_ps_context(enable_ps=True)
init()
context.set_context(save_graphs_path='./graphs_of_device_id_'+str(get_rank()))
if cache_enable:
context.set_auto_parallel_context(
parallel_mode=ParallelMode.AUTO_PARALLEL, gradients_mean=True)
else:
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=get_group_size())
wide_deep_config.sparse = True
if wide_deep_config.sparse:
context.set_context(enable_sparse=True)
if wide_deep_config.device_target == "GPU":
context.set_context(enable_graph_kernel=True)
train_and_eval(wide_deep_config)
| 39.735294
| 115
| 0.687787
|
4a00b2bb59451d3fc227af526d4f8af185be2016
| 1,085
|
py
|
Python
|
tests/test_validate_meta_schema.py
|
berland/configsuite
|
9c1eaeed3610ffaa9e549a35dc2709da44633c75
|
[
"MIT"
] | 8
|
2019-08-12T08:16:12.000Z
|
2022-03-15T12:42:03.000Z
|
tests/test_validate_meta_schema.py
|
berland/configsuite
|
9c1eaeed3610ffaa9e549a35dc2709da44633c75
|
[
"MIT"
] | 95
|
2019-01-29T08:05:35.000Z
|
2022-01-06T07:42:59.000Z
|
tests/test_validate_meta_schema.py
|
berland/configsuite
|
9c1eaeed3610ffaa9e549a35dc2709da44633c75
|
[
"MIT"
] | 14
|
2019-02-06T08:15:10.000Z
|
2020-11-05T12:59:37.000Z
|
"""Copyright 2020 Equinor ASA and The Netherlands Organisation for
Applied Scientific Research TNO.
Licensed under the MIT license.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the conditions stated in the LICENSE file in the project root for
details.
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
"""
import configsuite
import unittest
class TestValidateMetaSchema(unittest.TestCase):
# pylint: disable=no-self-use
def test_validate_meta_schema(self):
configsuite.schema.assert_valid_schema(
configsuite.schema.META_SCHEMA,
allow_default=False,
validate_named_keys=False,
)
| 33.90625
| 71
| 0.770507
|
4a00b433c1610e008792f185a6c7b282d1c9638e
| 5,547
|
py
|
Python
|
others/GDAS/lib/nas_rnn/basemodel.py
|
shashank3959/NAS-Projects
|
2c0577231a52375de5ebd7a588750899a8c7bf1c
|
[
"MIT"
] | 20
|
2019-10-10T07:13:27.000Z
|
2022-03-25T11:33:16.000Z
|
lib/nas_rnn/basemodel.py
|
BaiYuYuan/GDAS
|
5eed8101a78d223a20a43494176051298b24ac3a
|
[
"MIT"
] | 1
|
2022-02-22T14:00:59.000Z
|
2022-02-25T08:57:29.000Z
|
lib/nas_rnn/basemodel.py
|
BaiYuYuan/GDAS
|
5eed8101a78d223a20a43494176051298b24ac3a
|
[
"MIT"
] | 6
|
2020-04-21T14:52:02.000Z
|
2021-08-05T15:00:22.000Z
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .genotypes import STEPS
from .utils import mask2d, LockedDropout, embedded_dropout
INITRANGE = 0.04
def none_func(x):
return x * 0
class DARTSCell(nn.Module):
def __init__(self, ninp, nhid, dropouth, dropoutx, genotype):
super(DARTSCell, self).__init__()
self.nhid = nhid
self.dropouth = dropouth
self.dropoutx = dropoutx
self.genotype = genotype
# genotype is None when doing arch search
steps = len(self.genotype.recurrent) if self.genotype is not None else STEPS
self._W0 = nn.Parameter(torch.Tensor(ninp+nhid, 2*nhid).uniform_(-INITRANGE, INITRANGE))
self._Ws = nn.ParameterList([
nn.Parameter(torch.Tensor(nhid, 2*nhid).uniform_(-INITRANGE, INITRANGE)) for i in range(steps)
])
def forward(self, inputs, hidden, arch_probs):
T, B = inputs.size(0), inputs.size(1)
if self.training:
x_mask = mask2d(B, inputs.size(2), keep_prob=1.-self.dropoutx)
h_mask = mask2d(B, hidden.size(2), keep_prob=1.-self.dropouth)
else:
x_mask = h_mask = None
hidden = hidden[0]
hiddens = []
for t in range(T):
hidden = self.cell(inputs[t], hidden, x_mask, h_mask, arch_probs)
hiddens.append(hidden)
hiddens = torch.stack(hiddens)
return hiddens, hiddens[-1].unsqueeze(0)
def _compute_init_state(self, x, h_prev, x_mask, h_mask):
if self.training:
xh_prev = torch.cat([x * x_mask, h_prev * h_mask], dim=-1)
else:
xh_prev = torch.cat([x, h_prev], dim=-1)
c0, h0 = torch.split(xh_prev.mm(self._W0), self.nhid, dim=-1)
c0 = c0.sigmoid()
h0 = h0.tanh()
s0 = h_prev + c0 * (h0-h_prev)
return s0
def _get_activation(self, name):
if name == 'tanh':
f = torch.tanh
elif name == 'relu':
f = torch.relu
elif name == 'sigmoid':
f = torch.sigmoid
elif name == 'identity':
f = lambda x: x
elif name == 'none':
f = none_func
else:
raise NotImplementedError
return f
def cell(self, x, h_prev, x_mask, h_mask, _):
s0 = self._compute_init_state(x, h_prev, x_mask, h_mask)
states = [s0]
for i, (name, pred) in enumerate(self.genotype.recurrent):
s_prev = states[pred]
if self.training:
ch = (s_prev * h_mask).mm(self._Ws[i])
else:
ch = s_prev.mm(self._Ws[i])
c, h = torch.split(ch, self.nhid, dim=-1)
c = c.sigmoid()
fn = self._get_activation(name)
h = fn(h)
s = s_prev + c * (h-s_prev)
states += [s]
output = torch.mean(torch.stack([states[i] for i in self.genotype.concat], -1), -1)
return output
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, ntoken, ninp, nhid, nhidlast,
dropout=0.5, dropouth=0.5, dropoutx=0.5, dropouti=0.5, dropoute=0.1,
cell_cls=None, genotype=None):
super(RNNModel, self).__init__()
self.lockdrop = LockedDropout()
self.encoder = nn.Embedding(ntoken, ninp)
assert ninp == nhid == nhidlast
if cell_cls == DARTSCell:
assert genotype is not None
rnns = [cell_cls(ninp, nhid, dropouth, dropoutx, genotype)]
else:
assert genotype is None
rnns = [cell_cls(ninp, nhid, dropouth, dropoutx)]
self.rnns = torch.nn.ModuleList(rnns)
self.decoder = nn.Linear(ninp, ntoken)
self.decoder.weight = self.encoder.weight
self.init_weights()
self.arch_weights = None
self.ninp = ninp
self.nhid = nhid
self.nhidlast = nhidlast
self.dropout = dropout
self.dropouti = dropouti
self.dropoute = dropoute
self.ntoken = ntoken
self.cell_cls = cell_cls
# acceleration
self.tau = None
self.use_gumbel = False
def set_gumbel(self, use_gumbel, set_check):
self.use_gumbel = use_gumbel
for i, rnn in enumerate(self.rnns):
rnn.set_check(set_check)
def set_tau(self, tau):
self.tau = tau
def get_tau(self):
return self.tau
def init_weights(self):
self.encoder.weight.data.uniform_(-INITRANGE, INITRANGE)
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-INITRANGE, INITRANGE)
def forward(self, input, hidden, return_h=False):
batch_size = input.size(1)
emb = embedded_dropout(self.encoder, input, dropout=self.dropoute if self.training else 0)
emb = self.lockdrop(emb, self.dropouti)
raw_output = emb
new_hidden = []
raw_outputs = []
outputs = []
if self.arch_weights is None:
arch_probs = None
else:
if self.use_gumbel: arch_probs = F.gumbel_softmax(self.arch_weights, self.tau, False)
else : arch_probs = F.softmax(self.arch_weights, dim=-1)
for l, rnn in enumerate(self.rnns):
current_input = raw_output
raw_output, new_h = rnn(raw_output, hidden[l], arch_probs)
new_hidden.append(new_h)
raw_outputs.append(raw_output)
hidden = new_hidden
output = self.lockdrop(raw_output, self.dropout)
outputs.append(output)
logit = self.decoder(output.view(-1, self.ninp))
log_prob = nn.functional.log_softmax(logit, dim=-1)
model_output = log_prob
model_output = model_output.view(-1, batch_size, self.ntoken)
if return_h: return model_output, hidden, raw_outputs, outputs
else : return model_output, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).clone()
return [weight.new(1, bsz, self.nhid).zero_()]
| 30.478022
| 102
| 0.650261
|
4a00b5e8fc47f5a63cdbf3000cdb40a59b42eace
| 5,211
|
py
|
Python
|
textrank4zh/Segmentation.py
|
yanzhelee/TextRank4ZH
|
e0336c8e0d53e1414394ce8fccda6ff9ecb1a720
|
[
"MIT"
] | null | null | null |
textrank4zh/Segmentation.py
|
yanzhelee/TextRank4ZH
|
e0336c8e0d53e1414394ce8fccda6ff9ecb1a720
|
[
"MIT"
] | null | null | null |
textrank4zh/Segmentation.py
|
yanzhelee/TextRank4ZH
|
e0336c8e0d53e1414394ce8fccda6ff9ecb1a720
|
[
"MIT"
] | null | null | null |
#-*- encoding:utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import jieba.posseg as pseg
import codecs
import os
from . import util
def get_default_stop_words_file():
d = os.path.dirname(os.path.realpath(__file__))
return os.path.join(d, 'stopwords.txt')
class WordSegmentation(object):
""" 分词 """
def __init__(self, stop_words_file = None, allow_speech_tags = util.allow_speech_tags):
"""
Keyword arguments:
stop_words_file -- 保存停止词的文件路径,utf8编码,每行一个停止词。若不是str类型,则使用默认的停止词
allow_speech_tags -- 词性列表,用于过滤
"""
allow_speech_tags = [util.as_text(item) for item in allow_speech_tags]
self.default_speech_tag_filter = allow_speech_tags
self.stop_words = set()
self.stop_words_file = get_default_stop_words_file()
if type(stop_words_file) is str:
self.stop_words_file = stop_words_file
for word in codecs.open(self.stop_words_file, 'r', 'utf-8', 'ignore'):
self.stop_words.add(word.strip())
def segment(self, text, lower = True, use_stop_words = True, use_speech_tags_filter = False):
"""对一段文本进行分词,返回list类型的分词结果
Keyword arguments:
lower -- 是否将单词小写(针对英文)
use_stop_words -- 若为True,则利用停止词集合来过滤(去掉停止词)
use_speech_tags_filter -- 是否基于词性进行过滤。若为True,则使用self.default_speech_tag_filter过滤。否则,不过滤。
"""
text = util.as_text(text)
jieba_result = pseg.cut(text)
if use_speech_tags_filter == True:
jieba_result = [w for w in jieba_result if w.flag in self.default_speech_tag_filter]
else:
jieba_result = [w for w in jieba_result]
# 去除特殊符号
word_list = [w.word.strip() for w in jieba_result if w.flag!='x']
word_list = [word for word in word_list if len(word)>0]
if lower:
word_list = [word.lower() for word in word_list]
if use_stop_words:
word_list = [word.strip() for word in word_list if word.strip() not in self.stop_words]
return word_list
def segment_sentences(self, sentences, lower=True, use_stop_words=True, use_speech_tags_filter=False):
"""将列表sequences中的每个元素/句子转换为由单词构成的列表。
sequences -- 列表,每个元素是一个句子(字符串类型)
"""
res = []
for sentence in sentences:
res.append(self.segment(text=sentence,
lower=lower,
use_stop_words=use_stop_words,
use_speech_tags_filter=use_speech_tags_filter))
return res
class SentenceSegmentation(object):
""" 分句 """
def __init__(self, delimiters=util.sentence_delimiters):
"""
Keyword arguments:
delimiters -- 可迭代对象,用来拆分句子
"""
self.delimiters = set([util.as_text(item) for item in delimiters])
def segment(self, text):
res = [util.as_text(text)]
util.debug(res)
util.debug(self.delimiters)
for sep in self.delimiters:
text, res = res, []
for seq in text:
res += seq.split(sep)
res = [s.strip() for s in res if len(s.strip()) > 0]
return res
class Segmentation(object):
def __init__(self, stop_words_file = None,
allow_speech_tags = util.allow_speech_tags,
delimiters = util.sentence_delimiters):
"""
Keyword arguments:
stop_words_file -- 停止词文件
delimiters -- 用来拆分句子的符号集合
"""
self.ws = WordSegmentation(stop_words_file=stop_words_file, allow_speech_tags=allow_speech_tags)
self.ss = SentenceSegmentation(delimiters=delimiters)
def segment(self, text, lower = False):
text = util.as_text(text)
sentences = self.ss.segment(text)
words_no_filter = self.ws.segment_sentences(sentences=sentences,
lower = lower,
use_stop_words = False,
use_speech_tags_filter = False)
words_no_stop_words = self.ws.segment_sentences(sentences=sentences,
lower = lower,
use_stop_words = True,
use_speech_tags_filter = False)
words_all_filters = self.ws.segment_sentences(sentences=sentences,
lower = lower,
use_stop_words = True,
use_speech_tags_filter = True)
return util.AttrDict(
sentences = sentences,
words_no_filter = words_no_filter,
words_no_stop_words = words_no_stop_words,
words_all_filters = words_all_filters
)
| 38.88806
| 106
| 0.551909
|
4a00b720beba669c8bd3cb8cc4f5a7a037a7be00
| 14,910
|
py
|
Python
|
pytest_django/fixtures.py
|
qaprosoft/pytest-django
|
b502af3dc3ed55b0ac67a1b357bf5d57f005619e
|
[
"BSD-3-Clause"
] | null | null | null |
pytest_django/fixtures.py
|
qaprosoft/pytest-django
|
b502af3dc3ed55b0ac67a1b357bf5d57f005619e
|
[
"BSD-3-Clause"
] | null | null | null |
pytest_django/fixtures.py
|
qaprosoft/pytest-django
|
b502af3dc3ed55b0ac67a1b357bf5d57f005619e
|
[
"BSD-3-Clause"
] | null | null | null |
"""All pytest-django fixtures"""
from __future__ import with_statement
import os
import warnings
from contextlib import contextmanager
from functools import partial
import pytest
from . import live_server_helper
from .django_compat import is_django_unittest
from .lazy_django import skip_if_no_django
__all__ = [
"django_db_setup",
"db",
"transactional_db",
"django_db_reset_sequences",
"admin_user",
"django_user_model",
"django_username_field",
"client",
"admin_client",
"rf",
"settings",
"live_server",
"_live_server_helper",
"django_assert_num_queries",
"django_assert_max_num_queries",
]
@pytest.fixture(scope="session")
def django_db_modify_db_settings_tox_suffix(request):
skip_if_no_django()
tox_environment = os.getenv("TOX_PARALLEL_ENV")
if tox_environment:
# Put a suffix like _py27-django21 on tox workers
_set_suffix_to_test_databases(suffix=tox_environment)
@pytest.fixture(scope="session")
def django_db_modify_db_settings_xdist_suffix(request):
skip_if_no_django()
xdist_suffix = getattr(request.config, "slaveinput", {}).get("slaveid")
if xdist_suffix:
# Put a suffix like _gw0, _gw1 etc on xdist processes
_set_suffix_to_test_databases(suffix=xdist_suffix)
@pytest.fixture(scope="session")
def django_db_modify_db_settings_parallel_suffix(
django_db_modify_db_settings_tox_suffix,
django_db_modify_db_settings_xdist_suffix,
):
skip_if_no_django()
@pytest.fixture(scope="session")
def django_db_modify_db_settings(django_db_modify_db_settings_parallel_suffix):
skip_if_no_django()
@pytest.fixture(scope="session")
def django_db_use_migrations(request):
return not request.config.getvalue("nomigrations")
@pytest.fixture(scope="session")
def django_db_keepdb(request):
return request.config.getvalue("reuse_db")
@pytest.fixture(scope="session")
def django_db_createdb(request):
return request.config.getvalue("create_db")
@pytest.fixture(scope="session")
def django_db_setup(
request,
django_test_environment,
django_db_blocker,
django_db_use_migrations,
django_db_keepdb,
django_db_createdb,
django_db_modify_db_settings,
):
"""Top level fixture to ensure test databases are available"""
from .compat import setup_databases, teardown_databases
setup_databases_args = {}
if not django_db_use_migrations:
_disable_native_migrations()
if django_db_keepdb and not django_db_createdb:
setup_databases_args["keepdb"] = True
with django_db_blocker.unblock():
db_cfg = setup_databases(
verbosity=request.config.option.verbose,
interactive=False,
**setup_databases_args
)
def teardown_database():
# Do not teardown db
pass
#with django_db_blocker.unblock():
# try:
# teardown_databases(db_cfg, verbosity=request.config.option.verbose)
# except Exception as exc:
# request.node.warn(
# pytest.PytestWarning(
# "Error when trying to teardown test databases: %r" % exc
# )
# )
#if not django_db_keepdb:
# request.addfinalizer(teardown_database)
def _django_db_fixture_helper(
request, django_db_blocker, transactional=False, reset_sequences=False
):
if is_django_unittest(request):
return
if not transactional and "live_server" in request.fixturenames:
# Do nothing, we get called with transactional=True, too.
return
django_db_blocker.unblock()
request.addfinalizer(django_db_blocker.restore)
if transactional:
from django.test import TransactionTestCase as django_case
if reset_sequences:
class ResetSequenceTestCase(django_case):
reset_sequences = True
django_case = ResetSequenceTestCase
else:
from django.test import TestCase as django_case
test_case = django_case(methodName="__init__")
test_case._pre_setup()
# Removing call to teardown db
#request.addfinalizer(test_case._post_teardown)
def _disable_native_migrations():
from django.conf import settings
from django.core.management.commands import migrate
from .migrations import DisableMigrations
settings.MIGRATION_MODULES = DisableMigrations()
class MigrateSilentCommand(migrate.Command):
def handle(self, *args, **kwargs):
kwargs["verbosity"] = 0
return super(MigrateSilentCommand, self).handle(*args, **kwargs)
migrate.Command = MigrateSilentCommand
def _set_suffix_to_test_databases(suffix):
from django.conf import settings
for db_settings in settings.DATABASES.values():
test_name = db_settings.get("TEST", {}).get("NAME")
if not test_name:
if db_settings["ENGINE"] == "django.db.backends.sqlite3":
continue
test_name = "test_{}".format(db_settings["NAME"])
if test_name == ":memory:":
continue
db_settings.setdefault("TEST", {})
db_settings["TEST"]["NAME"] = "{}_{}".format(test_name, suffix)
# ############### User visible fixtures ################
@pytest.fixture(scope="function")
def db(request, django_db_setup, django_db_blocker):
"""Require a django test database.
This database will be setup with the default fixtures and will have
the transaction management disabled. At the end of the test the outer
transaction that wraps the test itself will be rolled back to undo any
changes to the database (in case the backend supports transactions).
This is more limited than the ``transactional_db`` resource but
faster.
If multiple database fixtures are requested, they take precedence
over each other in the following order (the last one wins): ``db``,
``transactional_db``, ``django_db_reset_sequences``.
"""
if "django_db_reset_sequences" in request.fixturenames:
request.getfixturevalue("django_db_reset_sequences")
if (
"transactional_db" in request.fixturenames
or "live_server" in request.fixturenames
):
request.getfixturevalue("transactional_db")
else:
_django_db_fixture_helper(request, django_db_blocker, transactional=False)
@pytest.fixture(scope="function")
def transactional_db(request, django_db_setup, django_db_blocker):
"""Require a django test database with transaction support.
This will re-initialise the django database for each test and is
thus slower than the normal ``db`` fixture.
If you want to use the database with transactions you must request
this resource.
If multiple database fixtures are requested, they take precedence
over each other in the following order (the last one wins): ``db``,
``transactional_db``, ``django_db_reset_sequences``.
"""
if "django_db_reset_sequences" in request.fixturenames:
request.getfixturevalue("django_db_reset_sequences")
_django_db_fixture_helper(request, django_db_blocker, transactional=True)
@pytest.fixture(scope="function")
def django_db_reset_sequences(request, django_db_setup, django_db_blocker):
"""Require a transactional test database with sequence reset support.
This behaves like the ``transactional_db`` fixture, with the addition
of enforcing a reset of all auto increment sequences. If the enquiring
test relies on such values (e.g. ids as primary keys), you should
request this resource to ensure they are consistent across tests.
If multiple database fixtures are requested, they take precedence
over each other in the following order (the last one wins): ``db``,
``transactional_db``, ``django_db_reset_sequences``.
"""
_django_db_fixture_helper(
request, django_db_blocker, transactional=True, reset_sequences=True
)
@pytest.fixture()
def client():
"""A Django test client instance."""
skip_if_no_django()
from django.test.client import Client
return Client()
@pytest.fixture()
def django_user_model(db):
"""The class of Django's user model."""
from django.contrib.auth import get_user_model
return get_user_model()
@pytest.fixture()
def django_username_field(django_user_model):
"""The fieldname for the username used with Django's user model."""
return django_user_model.USERNAME_FIELD
@pytest.fixture()
def admin_user(db, django_user_model, django_username_field):
"""A Django admin user.
This uses an existing user with username "admin", or creates a new one with
password "password".
"""
UserModel = django_user_model
username_field = django_username_field
username = "admin@example.com" if username_field == "email" else "admin"
try:
user = UserModel._default_manager.get(**{username_field: username})
except UserModel.DoesNotExist:
extra_fields = {}
if username_field not in ("username", "email"):
extra_fields[username_field] = "admin"
user = UserModel._default_manager.create_superuser(
username, "admin@example.com", "password", **extra_fields
)
return user
@pytest.fixture()
def admin_client(db, admin_user):
"""A Django test client logged in as an admin user."""
from django.test.client import Client
client = Client()
client.login(username=admin_user.username, password="password")
return client
@pytest.fixture()
def rf():
"""RequestFactory instance"""
skip_if_no_django()
from django.test.client import RequestFactory
return RequestFactory()
class SettingsWrapper(object):
_to_restore = []
def __delattr__(self, attr):
from django.test import override_settings
override = override_settings()
override.enable()
from django.conf import settings
delattr(settings, attr)
self._to_restore.append(override)
def __setattr__(self, attr, value):
from django.test import override_settings
override = override_settings(**{attr: value})
override.enable()
self._to_restore.append(override)
def __getattr__(self, item):
from django.conf import settings
return getattr(settings, item)
def finalize(self):
for override in reversed(self._to_restore):
override.disable()
del self._to_restore[:]
@pytest.yield_fixture()
def settings():
"""A Django settings object which restores changes after the testrun"""
skip_if_no_django()
wrapper = SettingsWrapper()
yield wrapper
wrapper.finalize()
@pytest.fixture(scope="session")
def live_server(request):
"""Run a live Django server in the background during tests
The address the server is started from is taken from the
--liveserver command line option or if this is not provided from
the DJANGO_LIVE_TEST_SERVER_ADDRESS environment variable. If
neither is provided ``localhost:8081,8100-8200`` is used. See the
Django documentation for its full syntax.
NOTE: If the live server needs database access to handle a request
your test will have to request database access. Furthermore
when the tests want to see data added by the live-server (or
the other way around) transactional database access will be
needed as data inside a transaction is not shared between
the live server and test code.
Static assets will be automatically served when
``django.contrib.staticfiles`` is available in INSTALLED_APPS.
"""
skip_if_no_django()
import django
addr = request.config.getvalue("liveserver") or os.getenv(
"DJANGO_LIVE_TEST_SERVER_ADDRESS"
)
if addr and ":" in addr:
if django.VERSION >= (1, 11):
ports = addr.split(":")[1]
if "-" in ports or "," in ports:
warnings.warn(
"Specifying multiple live server ports is not supported "
"in Django 1.11. This will be an error in a future "
"pytest-django release."
)
if not addr:
if django.VERSION < (1, 11):
addr = "localhost:8081,8100-8200"
else:
addr = "localhost"
server = live_server_helper.LiveServer(addr)
request.addfinalizer(server.stop)
return server
@pytest.fixture(autouse=True, scope="function")
def _live_server_helper(request):
"""Helper to make live_server work, internal to pytest-django.
This helper will dynamically request the transactional_db fixture
for a test which uses the live_server fixture. This allows the
server and test to access the database without having to mark
this explicitly which is handy since it is usually required and
matches the Django behaviour.
The separate helper is required since live_server can not request
transactional_db directly since it is session scoped instead of
function-scoped.
It will also override settings only for the duration of the test.
"""
if "live_server" not in request.fixturenames:
return
request.getfixturevalue("transactional_db")
live_server = request.getfixturevalue("live_server")
live_server._live_server_modified_settings.enable()
request.addfinalizer(live_server._live_server_modified_settings.disable)
@contextmanager
def _assert_num_queries(config, num, exact=True, connection=None, info=None):
from django.test.utils import CaptureQueriesContext
if connection is None:
from django.db import connection
verbose = config.getoption("verbose") > 0
with CaptureQueriesContext(connection) as context:
yield context
num_performed = len(context)
if exact:
failed = num != num_performed
else:
failed = num_performed > num
if failed:
msg = "Expected to perform {} queries {}{}".format(
num,
"" if exact else "or less ",
"but {} done".format(
num_performed == 1 and "1 was" or "%d were" % (num_performed,)
),
)
if info:
msg += "\n{}".format(info)
if verbose:
sqls = (q["sql"] for q in context.captured_queries)
msg += "\n\nQueries:\n========\n\n%s" % "\n\n".join(sqls)
else:
msg += " (add -v option to show queries)"
pytest.fail(msg)
@pytest.fixture(scope="function")
def django_assert_num_queries(pytestconfig):
return partial(_assert_num_queries, pytestconfig)
@pytest.fixture(scope="function")
def django_assert_max_num_queries(pytestconfig):
return partial(_assert_num_queries, pytestconfig, exact=False)
| 30.742268
| 84
| 0.687324
|
4a00b76ad404c0db4526afe6783576198aa493ad
| 4,750
|
py
|
Python
|
main.py
|
YRC99/CellMachineLearning
|
abdffcd10e313721180737fb306821d3d10f7cdb
|
[
"MIT"
] | null | null | null |
main.py
|
YRC99/CellMachineLearning
|
abdffcd10e313721180737fb306821d3d10f7cdb
|
[
"MIT"
] | null | null | null |
main.py
|
YRC99/CellMachineLearning
|
abdffcd10e313721180737fb306821d3d10f7cdb
|
[
"MIT"
] | 1
|
2022-01-09T10:58:21.000Z
|
2022-01-09T10:58:21.000Z
|
from __future__ import annotations
import os
from os import environ
from flask import Flask, request
import scanpy
import joblib
import pandas as pd
from pymongo import MongoClient
import boto3
requiredKeysDefault = {
'nCount_ADT': 0, 'nFeature_ADT': 0, 'nCount_RNA': 0, 'nFeature_RNA': 0,
'nCount_SCT': 0, 'nFeature_SCT': 0, 'Phase_G1': 0, 'Phase_G2M': 0, 'Phase_S': 1}
outputLabels = ['B', 'CD4 T', 'CD8', 'DC',
'Mono', 'NK', 'other', 'other T']
app = Flask(__name__)
endpoint = os.getenv('ENDPOINT')
access_key = os.getenv('ACCESS_KEY')
secret_key = os.getenv(
'SECRET_KEY')
modelname=os.getenv('MODEL_NAME')
database_uri = os.getenv(
'DATABASE_URI')
bucket=os.getenv('BUCKET')
def dispose(filename):
if os.path.isfile(filename):
print(filename+" found, will be disposed")
os.remove(filename)
s3 = boto3.client('s3', endpoint_url=endpoint,
aws_access_key_id=access_key, aws_secret_access_key=secret_key)
print("starting download")
s3.download_file(bucket, modelname, modelname)
print("Download finished, loading model")
clf = joblib.load(modelname)
print("Model loaded, ready to dispose")
dispose(modelname)
db = MongoClient(database_uri).get_default_database()
@app.route("/")
@app.route("/<path:path>")
def catch_all(path):
return 'You want path: %s, which is not yet implemented or does not exist' % path
@app.route("/run_classifier", methods=['POST'])
def classify():
data = request.get_json()
for key in ["uploadId"]:
if key not in data.keys() :
return "Key \"{}\" missing in request json data!\nPlease check again if the request is correct!".format(key), 400
uploadId = data['uploadId']
project = db.projects.find_one({"uploadId": uploadId})
if project is None:
message = f"There exists no project with upload_id {uploadId}"
print(message)
return message, 400
if (project['status']) == "ABORTED":
print("Project has been aborted. Terminating.")
return
print("Project found and not aborted")
fileName=str(project['_id'])
print("Starting download h5ad")
s3.download_file(bucket,fileName,fileName+'.h5ad')
print("Ready for prediction")
result = predict(fileName+'.h5ad')
uploadSize = upload(result)
dispose(result)
dispose(fileName+'.h5ad')
db.projects.update_one({'uploadId':uploadId }, {
"$set": {"status": "DONE", "resultSize": uploadSize,"resultName":result}})
print("Classification has been computed")
return "Classification has been computed", 200
def upload(filename):
s3.upload_file(filename, bucket, filename)
response = s3.head_object(Bucket=bucket, Key=filename)
return response['ContentLength']
def predict(filename):
input = scanpy.read_h5ad(filename, backed='r+')
cleanedDataset = input.obs
if not cleanedDataset.empty:
cleanedDataset = pd.get_dummies(cleanedDataset)
for key in cleanedDataset:
if key not in requiredKeysDefault.keys():
cleanedDataset.drop(key, inplace=True, axis=1)
for key, default in requiredKeysDefault.items():
if key not in cleanedDataset.keys():
cleanedDataset[key] = default
cleanedDataset = cleanedDataset[requiredKeysDefault.keys()]
y_predict = clf.predict(cleanedDataset)
output = pd.DataFrame(
data=y_predict, index=cleanedDataset.index, columns=outputLabels)
output = output.idxmax(axis=1)
if "X_umap" not in input.obsm.keys():
scanpy.pp.normalize_total(input)
scanpy.pp.log1p(input)
scanpy.pp.pca(input)
scanpy.pp.neighbors(input)
scanpy.tl.umap(input)
cleanedDataset['celltype'] = output
cleanedDataset['x'] = list(
map(lambda pair: pair[0], input.obsm['X_umap']))
cleanedDataset['y'] = list(
map(lambda pair: pair[1], input.obsm['X_umap']))
resultname = 'result_'+filename.rsplit(".", 1)[0]+'.tsv'
cleanedDataset.index.name = 'id'
cleanedDataset.to_csv(resultname, columns=['x', 'y', 'celltype'], sep='\t')
return resultname
def download_file(url):
print("Begin Download")
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter below
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
# if chunk:
f.write(chunk)
return local_filename
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=int(os.environ.get("PORT", 8080)))
| 33.687943
| 125
| 0.663158
|
4a00b77bdca32486a6f85be27b21485474d2fb24
| 1,411
|
py
|
Python
|
generate.py
|
Dominique2509/generate-fsm
|
cb032ed310693f95048929d5840f5b5df3624a4b
|
[
"MIT"
] | null | null | null |
generate.py
|
Dominique2509/generate-fsm
|
cb032ed310693f95048929d5840f5b5df3624a4b
|
[
"MIT"
] | null | null | null |
generate.py
|
Dominique2509/generate-fsm
|
cb032ed310693f95048929d5840f5b5df3624a4b
|
[
"MIT"
] | null | null | null |
import random
import sys
# State the amount of states and transitions the FSM should have.
num_of_states = 5
transitions = 4;
# Your input alphabet, can be any character
input_alphabet = {"A", "B", "C", "D"}
if transitions > len(input_alphabet):
sys.exit("Input alphabet has to be longer than the amount of transitions.")
res = "#include <stdio.h> \n\nint currentState = 0;\n\n"
res = res + "void makeTransition(char input) { \n"
res = res + "\tswitch (currentState) {\n"
# Generate cases for every state and create transitions
for i in range(num_of_states):
res = res + "\tcase " + str(i) + ":\n"
rand_item = random.sample(input_alphabet, transitions)
res = res + "\t\tswitch (input) { \n"
for k in rand_item:
next_state = str(random.randint(0, num_of_states - 1))
res = res + "\t\t\tcase \'" + k + "\':\n"
res = res + "\t\t\t\tcurrentState = " + next_state +";\n"
res = res + "\t\t\t\treturn; \n"
res = res + "\t\tbreak;\n\t\t}\n"
res = res + "\t} \n}\n\n"
# Generate the main function
res = res + "int main(int argc, char** charv) { \n"
res = res + "\tchar c = \'a\'; \n"
res = res + "\twhile(1) {\n"
res = res + "\t\t printf(\"Input: \");"
res = res + "\t\t scanf(\" %c\", &c); \n "
res = res + "\t\t makeTransition(c); \n"
res = res + "\t\t printf(\"Current State: %d\\n\", currentState); \n"
res = res + "\t}\n"
res = res + "} \n"
print(res)
| 32.813953
| 79
| 0.595322
|
4a00b7bc97d1f9329a2ea9cd12963603477b40f5
| 984
|
py
|
Python
|
src/kd/cmd/linux/blockdev_hdlr.py
|
chiyukuan/ktest
|
05714b683940d88a8c0b3523a5245ca056a457da
|
[
"MIT"
] | null | null | null |
src/kd/cmd/linux/blockdev_hdlr.py
|
chiyukuan/ktest
|
05714b683940d88a8c0b3523a5245ca056a457da
|
[
"MIT"
] | null | null | null |
src/kd/cmd/linux/blockdev_hdlr.py
|
chiyukuan/ktest
|
05714b683940d88a8c0b3523a5245ca056a457da
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
''' ------------------------| Python SOURCE FILE |------------------------
The Description of this file.
@copyright: Copyright (c) by Kodiak Data, Inc. All rights reserved.
'''
from lcmd_hdlr import LcmdHdlr
from kd.ep.cmd_ctx import CmdCtx
from kd.util.rc_msg import RC
class BlockdevHdlr(LcmdHdlr):
def __init__(self):
pass
@staticmethod
def canParseRslt(cmdCtx):
if cmdCtx.cmdMsg.startswith("blockdev --getsize "):
return True
if cmdCtx.cmdMsg.startswith("blockdev --getsize64 "):
return True
return False
@staticmethod
def parseRslt(cmdCtx):
rslt = None
for line in cmdCtx.rspMsg.splitlines():
if line == "" or line.isspace():
continue
if line.startswith("blockdev "):
continue
rslt = int(line)
return rslt
if __name__ == '__main__':
''' Test this module here '''
| 22.363636
| 76
| 0.566057
|
4a00b82404a6c9cc5b17ff42ef4360e457dc4252
| 5,870
|
py
|
Python
|
material_models/plot.py
|
fnrizzi/ElasticShearWaves
|
b09cde0711562412c6bc24de0d18ad3a972b7289
|
[
"BSD-3-Clause"
] | 8
|
2021-12-06T16:17:17.000Z
|
2022-03-05T09:23:45.000Z
|
material_models/plot.py
|
fnrizzi/ElasticShearWaves
|
b09cde0711562412c6bc24de0d18ad3a972b7289
|
[
"BSD-3-Clause"
] | 6
|
2021-12-01T14:40:38.000Z
|
2022-01-13T07:44:41.000Z
|
material_models/plot.py
|
fnrizzi/ElasticShearWaves
|
b09cde0711562412c6bc24de0d18ad3a972b7289
|
[
"BSD-3-Clause"
] | 1
|
2021-11-02T02:06:41.000Z
|
2021-11-02T02:06:41.000Z
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import sys, re
from argparse import ArgumentParser
# radius of the earth
earthRadius = 6371. #km
# thickness of the mantle
cmbDepth = 2891. # km
# the radius of the core-mantle boundary
cmbRadius = earthRadius - cmbDepth
def ak135f(dv):
n = len(dv)
rho, vs = np.zeros(n), np.zeros(n)
for i in range(n):
d = dv[i]
dSq = d*d
dCu = d*d*d
if(d >= 0 and d <= 3.):
rho[i] = 1.02
vs[i] = 1.
elif(d > 3. and d <= 3.3):
rho[i] = 2.
vs[i] = 1.
elif(d > 3.3 and d <= 10.):
rho[i] = 2.6
vs[i] = 3.2
elif(d > 10. and d <= 18.):
rho[i] = 2.92
vs[i] = 3.9
elif(d > 18. and d <= 80.):
rho[i] = 3.688908 - 0.002755944*d + 5.244987e-6*dSq
vs[i] = 4.479938 - 2.838134e-4*d - 3.537925e-6*dSq
elif(d > 80. and d <= 120.):
rho[i] = 3.6524 - 0.00188*d
vs[i] = 4.47 + 0.00025*d
elif(d > 120. and d <= 210.):
rho[i] = 3.561983 - 0.001138889*d
vs[i] = 4.4754 + 0.00020444*d
elif(d > 210. and d <= 410.):
rho[i] = 3.130252 + 0.0009128*d
vs[i] = 4.151532 + 0.0017548*d
elif(d > 410. and d <= 660.):
rho[i] = 3.948468 - 4.548571e-5*d
vs[i] = 4.211150 + 2.120343e-3*d
elif(d > 660. and d <= 2740.):
rho[i] = 3.789334 + 8.533642e-4*d - 9.455671e-8*dSq
vs[i] = 5.530732 + 9.439965e-4*d - 1.202153e-7*dSq
elif(d > 2740. and d <= 2891.5):
rho[i] = 4.268296 + 0.0005202*d
vs[i] = 6.648918 + 0.0002188*d
elif(d > 2891.5 and d <= 5153.5):
rho[i] = 3.523060 + 2.916881e-3*d - 2.421664e-7*dSq
vs[i] = 0.0
elif(d > 5153.5 and d <= 6371):
rho[i] = 4.565499 + 2.651449e-3*d - 2.080745e-7*dSq
vs[i] = -0.8068098 + 1.405390e-3*d - 1.103537e-7*dSq
return [rho, vs]
def prem(rv):
n = len(rv)
rho, vs = np.zeros(n), np.zeros(n)
for i in range(n):
rKm = rv[i]
x = rKm/earthRadius;
xSq = x*x;
xCu = x*x*x;
if(rKm >= 6356.0):
rho[i] = 2.6
vs[i] = 3.2
elif(rKm >= 6346.6 and rKm < 6356.0):
rho[i] = 2.9
vs[i] = 3.9
elif(rKm >= 6291.0 and rKm < 6346.6):
rho[i] = 2.691 + 0.6924*x
vs[i] = 2.1519 + 2.3481*x
elif(rKm >= 6151.0 and rKm < 6291.0):
rho[i] = 2.691 + 0.6924*x
vs[i] = 2.1519 + 2.3481*x
elif(rKm >= 5971.0 and rKm < 6151.0):
rho[i] = 7.1089 - 3.8045*x
vs[i] = 8.9496 - 4.4597*x
elif(rKm >= 5771.0 and rKm < 5971.0):
rho[i] = 11.2494 - 8.0298*x
vs[i] = 22.3512 - 18.5856*x
elif(rKm >= 5701.0 and rKm < 5771.0):
rho[i] = 5.3197 - 1.4836*x
vs[i] = 9.9839 - 4.9324*x
elif(rKm >= 5600.0 and rKm < 5701.0):
rho[i] = 7.9565 - 6.4761*x + 5.5283*xSq - 3.0807*xCu
vs[i] = 22.3459 - 17.2473*x - 2.0834*xSq + 0.9783*xCu
elif(rKm >= 3630.0 and rKm < 5600.0):
rho[i] = 7.9565 - 6.4761*x + 5.5283*xSq - 3.0807*xCu
vs[i] = 11.1671 - 13.7818*x + 17.4575*xSq - 9.2777*xCu
elif(rKm >= 3480.0 and rKm < 3630.0):
rho[i] = 7.9565 - 6.4761*x + 5.5283*xSq - 3.0807*xCu
vs[i] = 6.9254 + 1.4672*x - 2.0834*xSq + 0.9783*xCu
elif(rKm >= 1221.5 and rKm < 3480.0):
rho[i] = 12.5815 - 1.2638*x - 3.6426*xSq - 5.5281*xCu
vs[i] = 0.0
elif(rKm < 1221.5):
rho[i] = 13.0885 - 8.8381*xSq
vs[i] = 3.6678 - 4.4475*xSq
return [rho, vs]
def getPrem():
rr = np.linspace(cmbRadius, earthRadius, 50000)
[rho, vs] = prem(rr)
return [rho, vs, rr]
def getAk135f():
rr = np.linspace(cmbRadius, earthRadius, 50000)
[rho, vs] = ak135f(rr)
return [rho, vs, np.flipud(rr)]
###############################
if __name__== "__main__":
###############################
parser = ArgumentParser()
parser.add_argument("-model", "--model",
dest="model", default="empty",
help="Model: prem or ak135f. Must be set.")
# parse args
args = parser.parse_args()
assert(args.model != "empty")
if args.model == "prem":
[rho, vs, rr] = getPrem()
else:
[rho, vs, rr] = getAk135f()
fig = plt.figure(1)
ax = fig.add_subplot(111)
h1 = plt.plot(rho, rr, '-k', linewidth=1.5, label=r'$\rho$')
h2 = plt.plot(vs, rr, '--k', linewidth=1.5, label=r'$v_s$')
x = [0, 15]
zcmb = [cmbRadius,cmbRadius]
plt.text(7, 1600, "Core", fontsize=10)
xfill = np.linspace(0, 15, 100)
yfill1 = cmbRadius*np.ones(100)
yfill2 = earthRadius*np.ones(100)
ax.fill_between(xfill, yfill1, yfill2, facecolor='gray', alpha=0.4)
#plt.text(7, cmbRadius-300, "Core-mantle boundary", fontsize=10)
# plt.text(11.2, 500, "inner core", fontsize=10)
# plt.text(11, 2200, "outer core", fontsize=10)
if (args.model == 'prem'):
zcore = [1221.5, 1221.5]
plt.text(11, 4500, "Mantle", fontsize=10)
plt.text(6.5, 5950, "Upper mantle and crust", fontsize=10)
plt.plot(x, [5701, 5701], 'k--', lw=0.5)
elif args.model == 'ak135f':
zcore = [1217.5, 1217.5]
plt.text(11, 4500, "Mantle", fontsize=10)
plt.text(6.5, 5950, "Upper mantle and crust", fontsize=10)
plt.plot(x, [5711, 5711], 'k--', lw=0.5)
#plt.plot(x, zcore, 'k--', lw=1)
plt.plot(x, zcmb, 'k--', lw=0.5)
plt.plot(x, [earthRadius, earthRadius], 'k--', lw=0.5)
ax.set_xlabel(r'$\rho~[g/cm^3]$, $v_s~[km/s]$', fontsize=15)
ax.set_xlim(0,15)
ax.set_xticks(np.linspace(0, 15, 16, endpoint=True))
ax.set_ylabel(r'$r~[km]$', fontsize=15)
ax.set_ylim(0,earthRadius+1000)
ax.set_yticks(np.array([0, 500, 1000, 1500, 2000, 2500, 3000, cmbRadius,
4000, 4500, 5000, 5500, 6000, earthRadius]))
ax.tick_params(axis='both', which='major', labelsize=11)
plt.legend(loc="upper right", ncol=2)
ax.set_aspect(0.0025)
fig.savefig(args.model+".pdf", format="pdf", bbox_inches='tight', dpi=300)
plt.show()
| 27.175926
| 76
| 0.540204
|
4a00b8cc6299d66fd36cf4132cc80486b0c90a6c
| 4,556
|
py
|
Python
|
IreneUtility/util/u_logging.py
|
MujyKun/IreneUtility
|
1790c80e220e2d8bf7901286177893e985ec4cf4
|
[
"MIT"
] | 1
|
2021-07-08T05:06:29.000Z
|
2021-07-08T05:06:29.000Z
|
IreneUtility/util/u_logging.py
|
mina9999/IreneUtility
|
bf3a4a379b6f0b27a2c9f1d7b72888beb90ba756
|
[
"MIT"
] | null | null | null |
IreneUtility/util/u_logging.py
|
mina9999/IreneUtility
|
bf3a4a379b6f0b27a2c9f1d7b72888beb90ba756
|
[
"MIT"
] | 3
|
2021-07-09T16:24:17.000Z
|
2021-11-11T02:06:51.000Z
|
from ..Base import Base
# noinspection PyBroadException,PyPep8
class Logging(Base):
def __init__(self, *args):
super().__init__(*args)
#################
# ## LOGGING ## #
#################
async def get_servers_logged(self):
"""Get the servers that are being logged."""
return [server_id for server_id in self.ex.cache.logged_channels]
async def get_channels_logged(self):
"""Get all the channels that are being logged."""
return self.ex.cache.list_of_logged_channels
async def add_to_logging(self, server_id, channel_id): # return true if status is on
"""Add a channel to be logged."""
if (self.ex.first_result(
await self.ex.conn.fetchrow("SELECT COUNT(*) FROM logging.servers WHERE serverid = $1", server_id))) == 0:
await self.ex.conn.execute(
"INSERT INTO logging.servers (serverid, channelid, status, sendall) VALUES ($1, $2, $3, $4)", server_id,
channel_id, 1, 1)
server = self.ex.cache.logged_channels.get(server_id)
if server is None:
self.ex.cache.logged_channels[server_id] = {"send_all": 1, "logging_channel": channel_id, "channels": []}
else:
self.ex.cache.list_of_logged_channels.append(channel_id)
server['channels'].append(channel_id)
else:
await self.set_logging_status(server_id, 1)
current_channel_id = self.ex.first_result(
await self.ex.conn.fetchrow("SELECT channelid FROM logging.servers WHERE serverid = $1", server_id))
if current_channel_id != channel_id:
await self.ex.conn.execute("UPDATE logging.servers SET channelid = $1 WHERE serverid = $2", channel_id,
server_id)
return True
async def check_if_logged(self, server_id=None, channel_id=None): # only one parameter should be passed in
"""Check if a server or channel is being logged."""
if channel_id:
return channel_id in self.ex.cache.list_of_logged_channels
elif server_id:
return server_id in self.ex.cache.logged_channels
async def get_send_all(self, server_id):
return (self.ex.cache.logged_channels.get(server_id))['send_all']
async def set_logging_status(self, server_id, status): # status can only be 0 or 1
"""Set a server's logging status."""
await self.ex.conn.execute("UPDATE logging.servers SET status = $1 WHERE serverid = $2", status, server_id)
if not status:
self.ex.cache.logged_channels.pop(server_id, None)
else:
logged_server = await self.ex.conn.fetchrow(
"SELECT id, serverid, channelid, sendall FROM logging.servers WHERE serverid = $1", server_id)
channels = await self.ex.conn.fetch("SELECT channelid FROM logging.channels WHERE server = $1", logged_server[0])
for channel in channels:
self.ex.cache.list_of_logged_channels.append(channel[0])
self.ex.cache.logged_channels[logged_server[1]] = {
"send_all": logged_server[3],
"logging_channel": logged_server[2],
"channels": [channel[0] for channel in channels]
}
async def get_logging_id(self, server_id):
"""Get the ID in the table of a server."""
return self.ex.first_result(
await self.ex.conn.fetchrow("SELECT id FROM logging.servers WHERE serverid = $1", server_id))
async def check_logging_requirements(self, message):
"""Check if a message meets all the logging requirements."""
try:
if not message.author.bot:
if await self.check_if_logged(server_id=message.guild.id):
if await self.check_if_logged(channel_id=message.channel.id):
return True
except:
pass
@staticmethod
async def get_attachments(message):
"""Get the attachments of a message."""
files = None
if message.attachments:
files = []
for attachment in message.attachments:
files.append(await attachment.to_file())
return files
async def get_log_channel_id(self, message):
"""Get the channel where logs are made on a server."""
return self.ex.client.get_channel((self.ex.cache.logged_channels.get(message.guild.id))['logging_channel'])
# self.ex.u_logging = Logging()
| 45.56
| 125
| 0.620939
|
4a00b9ef2f10e09629ac406f42e5decdde33f18d
| 1,602
|
py
|
Python
|
src/python/grpcio_tests/tests/unit/_cython/test_utilities.py
|
4con/grpc-win-xp
|
26e73cad8721030ada9b5765bea627376ccaef9e
|
[
"Apache-2.0"
] | 91
|
2018-11-24T05:33:58.000Z
|
2022-03-16T05:58:05.000Z
|
src/python/grpcio_tests/tests/unit/_cython/test_utilities.py
|
4con/grpc-win-xp
|
26e73cad8721030ada9b5765bea627376ccaef9e
|
[
"Apache-2.0"
] | 11
|
2019-06-02T23:50:17.000Z
|
2022-02-04T23:58:56.000Z
|
src/python/grpcio_tests/tests/unit/_cython/test_utilities.py
|
4con/grpc-win-xp
|
26e73cad8721030ada9b5765bea627376ccaef9e
|
[
"Apache-2.0"
] | 18
|
2018-11-24T10:35:29.000Z
|
2021-04-22T07:22:10.000Z
|
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
from grpc._cython import cygrpc
class SimpleFuture(object):
"""A simple future mechanism."""
def __init__(self, function, *args, **kwargs):
def wrapped_function():
try:
self._result = function(*args, **kwargs)
except Exception as error:
self._error = error
self._result = None
self._error = None
self._thread = threading.Thread(target=wrapped_function)
self._thread.start()
def result(self):
"""The resulting value of this future.
Re-raises any exceptions.
"""
self._thread.join()
if self._error:
# TODO(atash): re-raise exceptions in a way that preserves tracebacks
raise self._error
return self._result
class CompletionQueuePollFuture(SimpleFuture):
def __init__(self, completion_queue, deadline):
super(CompletionQueuePollFuture,
self).__init__(lambda: completion_queue.poll(deadline=deadline))
| 30.226415
| 81
| 0.67603
|
4a00ba0f40b3fed6125d30cdb229094508293f17
| 378
|
py
|
Python
|
PythonExercicios/04-condicoes/ex041.py
|
mateusmarinho/python3-cursoemvideo
|
706d419865532e156fb80b8a873e18cb90d6e0da
|
[
"MIT"
] | null | null | null |
PythonExercicios/04-condicoes/ex041.py
|
mateusmarinho/python3-cursoemvideo
|
706d419865532e156fb80b8a873e18cb90d6e0da
|
[
"MIT"
] | null | null | null |
PythonExercicios/04-condicoes/ex041.py
|
mateusmarinho/python3-cursoemvideo
|
706d419865532e156fb80b8a873e18cb90d6e0da
|
[
"MIT"
] | null | null | null |
from datetime import date
nasc = int(input('Digite o ano de nascimento do atleta: '))
atual = date.today().year
idade = atual - nasc
if idade <= 9:
print('Categoria MIRIM.')
elif idade <= 14:
print('Categoria INFANTIL.')
elif idade <= 19:
print('Categoria JUNIOR.')
elif idade <= 25:
print('Categoria SENIOR.')
else:
print('Categoria MASTER.')
| 25.2
| 60
| 0.640212
|
4a00bcb30697ed50d014318a400f9d72b391c830
| 11,686
|
py
|
Python
|
cryptodoge/util/merkle_set.py
|
grayfallstown-cryptodoge/cryptodoge
|
ffeb5218ce184a56073a5dc0ac5acddba3728bd4
|
[
"Apache-2.0"
] | 10
|
2021-08-21T17:41:51.000Z
|
2022-02-09T04:28:12.000Z
|
cryptodoge/util/merkle_set.py
|
grayfallstown-cryptodoge/cryptodoge
|
ffeb5218ce184a56073a5dc0ac5acddba3728bd4
|
[
"Apache-2.0"
] | 1
|
2021-12-15T21:23:38.000Z
|
2021-12-15T21:23:38.000Z
|
cryptodoge/util/merkle_set.py
|
grayfallstown-cryptodoge/cryptodoge
|
ffeb5218ce184a56073a5dc0ac5acddba3728bd4
|
[
"Apache-2.0"
] | 2
|
2021-08-21T18:22:59.000Z
|
2021-12-10T07:12:18.000Z
|
from abc import ABCMeta, abstractmethod
from hashlib import sha256
from typing import Any, Dict, List, Tuple
from cryptodoge.types.blockchain_format.sized_bytes import bytes32
"""
A simple, confidence-inspiring Merkle Set standard
Advantages of this standard:
Low CPU requirements
Small proofs of inclusion/exclusion
Reasonably simple implementation
The main tricks in this standard are:
Skips repeated hashing of exactly two things even when they share prefix bits
Proofs support proving including/exclusion for a large number of values in
a single string. They're a serialization of a subset of the tree.
Proof format:
multiproof: subtree
subtree: middle or terminal or truncated or empty
middle: MIDDLE 1 subtree subtree
terminal: TERMINAL 1 hash 32
# If the sibling is empty truncated implies more than two children.
truncated: TRUNCATED 1 hash 32
empty: EMPTY 1
EMPTY: \x00
TERMINAL: \x01
MIDDLE: \x02
TRUNCATED: \x03
"""
EMPTY = bytes([0])
TERMINAL = bytes([1])
MIDDLE = bytes([2])
TRUNCATED = bytes([3])
BLANK = bytes([0] * 32)
prehashed: Dict[bytes, Any] = {}
def init_prehashed():
for x in [EMPTY, TERMINAL, MIDDLE]:
for y in [EMPTY, TERMINAL, MIDDLE]:
prehashed[x + y] = sha256(bytes([0] * 30) + x + y)
init_prehashed()
def hashdown(mystr: bytes) -> bytes:
assert len(mystr) == 66
h = prehashed[bytes(mystr[0:1] + mystr[33:34])].copy()
h.update(mystr[1:33] + mystr[34:])
return h.digest()[:32]
def compress_root(mystr: bytes) -> bytes:
assert len(mystr) == 33
if mystr[0:1] == MIDDLE:
return mystr[1:]
if mystr[0:1] == EMPTY:
assert mystr[1:] == BLANK
return BLANK
return sha256(mystr).digest()[:32]
def get_bit(mybytes: bytes, pos: int) -> int:
assert len(mybytes) == 32
return (mybytes[pos // 8] >> (7 - (pos % 8))) & 1
class Node(metaclass=ABCMeta):
hash: bytes
@abstractmethod
def get_hash(self) -> bytes:
pass
@abstractmethod
def is_empty(self) -> bool:
pass
@abstractmethod
def is_terminal(self) -> bool:
pass
@abstractmethod
def is_double(self) -> bool:
pass
@abstractmethod
def add(self, toadd: bytes, depth: int) -> "Node":
pass
@abstractmethod
def remove(self, toremove: bytes, depth: int):
pass
@abstractmethod
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
pass
@abstractmethod
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
pass
@abstractmethod
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
class MerkleSet:
root: Node
def __init__(self, root: Node = None):
if root is None:
self.root = _empty
else:
self.root = root
def get_root(self) -> bytes:
return compress_root(self.root.get_hash())
def add_already_hashed(self, toadd: bytes):
self.root = self.root.add(toadd, 0)
def remove_already_hashed(self, toremove: bytes):
self.root = self.root.remove(toremove, 0)
def is_included_already_hashed(self, tocheck: bytes) -> Tuple[bool, bytes]:
proof: List = []
r = self.root.is_included(tocheck, 0, proof)
return r, b"".join(proof)
def _audit(self, hashes: List[bytes]):
newhashes: List = []
self.root._audit(newhashes, [])
assert newhashes == sorted(newhashes)
class EmptyNode(Node):
def __init__(self):
self.hash = BLANK
def get_hash(self) -> bytes:
return EMPTY + BLANK
def is_empty(self) -> bool:
return True
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
raise SetError()
def add(self, toadd: bytes, depth: int) -> Node:
return TerminalNode(toadd)
def remove(self, toremove: bytes, depth: int) -> Node:
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(EMPTY)
return False
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(EMPTY)
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
_empty = EmptyNode()
class TerminalNode(Node):
def __init__(self, hash: bytes, bits: List[int] = None):
assert len(hash) == 32
self.hash = hash
if bits is not None:
self._audit([], bits)
def get_hash(self) -> bytes:
return TERMINAL + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return True
def is_double(self) -> bool:
raise SetError()
def add(self, toadd: bytes, depth: int) -> Node:
if toadd == self.hash:
return self
if toadd > self.hash:
return self._make_middle([self, TerminalNode(toadd)], depth)
else:
return self._make_middle([TerminalNode(toadd), self], depth)
def _make_middle(self, children: Any, depth: int) -> Node:
cbits = [get_bit(child.hash, depth) for child in children]
if cbits[0] != cbits[1]:
return MiddleNode(children)
nextvals: List[Node] = [_empty, _empty]
nextvals[cbits[0] ^ 1] = _empty # type: ignore
nextvals[cbits[0]] = self._make_middle(children, depth + 1)
return MiddleNode(nextvals)
def remove(self, toremove: bytes, depth: int) -> Node:
if toremove == self.hash:
return _empty
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(TERMINAL + self.hash)
return tocheck == self.hash
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(TERMINAL + self.hash)
def _audit(self, hashes: List[bytes], bits: List[int]):
hashes.append(self.hash)
for pos, v in enumerate(bits):
assert get_bit(self.hash, pos) == v
class MiddleNode(Node):
def __init__(self, children: List[Node]):
self.children = children
if children[0].is_empty() and children[1].is_double():
self.hash = children[1].hash
elif children[1].is_empty() and children[0].is_double():
self.hash = children[0].hash
else:
if children[0].is_empty() and (children[1].is_empty() or children[1].is_terminal()):
raise SetError()
if children[1].is_empty() and children[0].is_terminal():
raise SetError
if children[0].is_terminal() and children[1].is_terminal() and children[0].hash >= children[1].hash:
raise SetError
self.hash = hashdown(children[0].get_hash() + children[1].get_hash())
def get_hash(self) -> bytes:
return MIDDLE + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
if self.children[0].is_empty():
return self.children[1].is_double()
if self.children[1].is_empty():
return self.children[0].is_double()
return self.children[0].is_terminal() and self.children[1].is_terminal()
def add(self, toadd: bytes, depth: int) -> Node:
bit = get_bit(toadd, depth)
child = self.children[bit]
newchild = child.add(toadd, depth + 1)
if newchild is child:
return self
newvals = [x for x in self.children]
newvals[bit] = newchild
return MiddleNode(newvals)
def remove(self, toremove: bytes, depth: int) -> Node:
bit = get_bit(toremove, depth)
child = self.children[bit]
newchild = child.remove(toremove, depth + 1)
if newchild is child:
return self
otherchild = self.children[bit ^ 1]
if newchild.is_empty() and otherchild.is_terminal():
return otherchild
if newchild.is_terminal() and otherchild.is_empty():
return newchild
newvals = [x for x in self.children]
newvals[bit] = newchild
return MiddleNode(newvals)
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(MIDDLE)
if get_bit(tocheck, depth) == 0:
r = self.children[0].is_included(tocheck, depth + 1, p)
self.children[1].other_included(tocheck, depth + 1, p, not self.children[0].is_empty())
return r
else:
self.children[0].other_included(tocheck, depth + 1, p, not self.children[1].is_empty())
return self.children[1].is_included(tocheck, depth + 1, p)
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
if collapse or not self.is_double():
p.append(TRUNCATED + self.hash)
else:
self.is_included(tocheck, depth, p)
def _audit(self, hashes: List[bytes], bits: List[int]):
self.children[0]._audit(hashes, bits + [0])
self.children[1]._audit(hashes, bits + [1])
class TruncatedNode(Node):
def __init__(self, hash: bytes):
self.hash = hash
def get_hash(self) -> bytes:
return MIDDLE + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
return False
def add(self, toadd: bytes, depth: int) -> Node:
return self
def remove(self, toremove: bytes, depth: int) -> Node:
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
raise SetError()
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(TRUNCATED + self.hash)
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
class SetError(Exception):
pass
def confirm_included(root: Node, val: bytes, proof: bytes32) -> bool:
return confirm_not_included_already_hashed(root, sha256(val).digest(), proof)
def confirm_included_already_hashed(root: Node, val: bytes, proof: bytes32) -> bool:
return _confirm(root, val, proof, True)
def confirm_not_included(root: Node, val: bytes, proof: bytes32) -> bool:
return confirm_not_included_already_hashed(root, sha256(val).digest(), proof)
def confirm_not_included_already_hashed(root: Node, val: bytes, proof: bytes32) -> bool:
return _confirm(root, val, proof, False)
def _confirm(root: Node, val: bytes, proof: bytes32, expected: bool) -> bool:
try:
p = deserialize_proof(proof)
if p.get_root() != root:
return False
r, junk = p.is_included_already_hashed(val)
return r == expected
except SetError:
return False
def deserialize_proof(proof: bytes32) -> MerkleSet:
try:
r, pos = _deserialize(proof, 0, [])
if pos != len(proof):
raise SetError()
return MerkleSet(r)
except IndexError:
raise SetError()
def _deserialize(proof: bytes32, pos: int, bits: List[int]) -> Tuple[Node, int]:
t = proof[pos : pos + 1] # flake8: noqa
if t == EMPTY:
return _empty, pos + 1
if t == TERMINAL:
return TerminalNode(proof[pos + 1 : pos + 33], bits), pos + 33 # flake8: noqa
if t == TRUNCATED:
return TruncatedNode(proof[pos + 1 : pos + 33]), pos + 33 # flake8: noqa
if t != MIDDLE:
raise SetError()
v0, pos = _deserialize(proof, pos + 1, bits + [0])
v1, pos = _deserialize(proof, pos, bits + [1])
return MiddleNode([v0, v1]), pos
| 29.069652
| 112
| 0.617919
|
4a00bdfd052e88877ee6d3fbf47663ec9c6c9f5b
| 1,032
|
py
|
Python
|
utils.py
|
subramanya1997/Low-Light-Single-Image-Depth-Estimation-via-Transfer-Learning
|
2a7d9c7599fc8d748339818e761f5aae19d15dc6
|
[
"MIT"
] | 1
|
2021-11-16T03:35:18.000Z
|
2021-11-16T03:35:18.000Z
|
utils.py
|
subramanya1997/Low-Light-Single-Image-Depth-Estimation-via-Transfer-Learning
|
2a7d9c7599fc8d748339818e761f5aae19d15dc6
|
[
"MIT"
] | null | null | null |
utils.py
|
subramanya1997/Low-Light-Single-Image-Depth-Estimation-via-Transfer-Learning
|
2a7d9c7599fc8d748339818e761f5aae19d15dc6
|
[
"MIT"
] | null | null | null |
import matplotlib
import matplotlib.cm
import numpy as np
def DepthNorm(depth, maxDepth=1000.0):
return maxDepth / depth
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def colorize(value, vmin=10, vmax=1000, cmap='plasma'):
value = value.cpu().numpy()[0,:,:]
# normalize
vmin = value.min() if vmin is None else vmin
vmax = value.max() if vmax is None else vmax
if vmin!=vmax:
value = (value - vmin) / (vmax - vmin) # vmin..vmax
else:
# Avoid 0-division
value = value*0.
# squeeze last dim if it exists
#value = value.squeeze(axis=0)
cmapper = matplotlib.cm.get_cmap(cmap)
value = cmapper(value,bytes=True) # (nxmx4)
img = value[:,:,:3]
return img.transpose((2,0,1))
| 24
| 59
| 0.583333
|
4a00c0caf9d180ee7fc3fd97c2f303192b2e6937
| 467
|
py
|
Python
|
bindings/python/pydeck/pydeck/bindings/view.py
|
shalevy1/deck.gl
|
7b57f0c0a02a44821a3f18ba91f1786c8c166b2b
|
[
"MIT"
] | null | null | null |
bindings/python/pydeck/pydeck/bindings/view.py
|
shalevy1/deck.gl
|
7b57f0c0a02a44821a3f18ba91f1786c8c166b2b
|
[
"MIT"
] | 4
|
2021-05-09T12:05:30.000Z
|
2022-02-13T22:38:00.000Z
|
bindings/python/pydeck/pydeck/bindings/view.py
|
shalevy1/deck.gl
|
7b57f0c0a02a44821a3f18ba91f1786c8c166b2b
|
[
"MIT"
] | null | null | null |
from .json_tools import JSONMixin
class View(JSONMixin):
"""
Represents a "hard configuration" of a camera location
Parameters
---------
type : str, default None
deck.gl view to display, e.g., MapView
controller : bool, default None
If enabled, camera becomes interactive.
"""
def __init__(
self,
type=None,
controller=None
):
self.type = type
self.controller = controller
| 22.238095
| 58
| 0.59743
|
4a00c0dde2de1cf3d91f828f254a96a9a8bf8dd2
| 1,625
|
py
|
Python
|
jeu_educatif/python/test_copy.py
|
Charles-Svg/Projet_L3_jeu_educatif
|
841f70f1368117288128342258f5832ca9028161
|
[
"MIT"
] | null | null | null |
jeu_educatif/python/test_copy.py
|
Charles-Svg/Projet_L3_jeu_educatif
|
841f70f1368117288128342258f5832ca9028161
|
[
"MIT"
] | null | null | null |
jeu_educatif/python/test_copy.py
|
Charles-Svg/Projet_L3_jeu_educatif
|
841f70f1368117288128342258f5832ca9028161
|
[
"MIT"
] | null | null | null |
import temp
def test():
if temp.deepCopy(temp.files.Dossier()) is None:
print("Incorrect : aucun retour")
return False
fichier1 = temp.files.Fichier(1)
fichier2 = temp.files.Fichier(2)
fichier3 = temp.files.Fichier(3)
fichier4 = temp.files.Fichier(4)
fichier5 = temp.files.Fichier(5)
repertoire = temp.files.Dossier([fichier1, fichier2, fichier3, fichier4, fichier5])
rep2 = temp.deepCopy(repertoire)
#Vérification que la copie renvoie une liste avec le bon nombre d'éléments
if rep2.nbElem() != repertoire.nbElem():
print("Incorrect : erreur de copie")
return False
#Vérification que les fichiers soient les mêmes
for i in range(len(repertoire.fichiers)):
if rep2.fichiers[i] == repertoire.fichiers[i]:
print("Incorrect : erreur de copie, fichiers copiés en surface et non en profondeur")
return False
repertoire.supprFichier()
#Vérification que la suppression d'un élémet dans repertoire n'influe pas sur rep2
if rep2.nbElem() == repertoire.nbElem():
print("Incorrect : copie en surface et non en profondeur")
return False
print("Copie effectuée avec succès, vous pouvez fermer cette fenêtre")
return True
try:
if not ("deepCopy" in dir(temp) and callable(getattr(temp,'deepCopy'))):
print("Incorrect : fonction deepCopy(dossier) non déclarée")
answer = False
else:
answer = test()
except Exception as e:
print(type(e).__name__, ":", e)
answer = False
if answer:
import save
save.valider(1, save.Sauvegarde.COPIE)
| 30.092593
| 97
| 0.666462
|
4a00c2ba4813f1ec7fbe5ac7894679697159f518
| 3,668
|
py
|
Python
|
examples/zhihu/zhihu_spider.py
|
lmeetr1984/haipproxy
|
b071ea7c347ef3c72f81fc1e92f227bed0e1f8cc
|
[
"MIT"
] | null | null | null |
examples/zhihu/zhihu_spider.py
|
lmeetr1984/haipproxy
|
b071ea7c347ef3c72f81fc1e92f227bed0e1f8cc
|
[
"MIT"
] | 3
|
2021-03-31T18:48:49.000Z
|
2022-02-11T03:39:55.000Z
|
examples/zhihu/zhihu_spider.py
|
lmeetr1984/haipproxy
|
b071ea7c347ef3c72f81fc1e92f227bed0e1f8cc
|
[
"MIT"
] | null | null | null |
# the code is partially copied from https://github.com/windcode/zhihu-crawler-people
import json
import time
from multiprocessing import Pool
from bs4 import BeautifulSoup as BS
from examples.zhihu.crawler import Crawler
from haipproxy.utils import get_redis_conn
per_page = 20
info_max_process_num = 50
list_max_process_num = 10
host = 'https://www.zhihu.com'
waiting_set = 'zhihu:seeds:to_crawl'
seeds_all = 'zhihu:seeds:all'
info_set = 'zhihu:info:user'
# Not considering concurrent security
common_crawler = Crawler()
def init_db():
redis_client = get_redis_conn(db=1)
return redis_client
def get_info(url_token):
"""get user info"""
url = '%s/people/%s/answers' % (host, url_token)
html = common_crawler.get(url)
print("parsing page's HTML……")
if not html:
return
s = BS(html, 'html.parser')
try:
data = s.find('div', attrs={'id': 'data'})['data-state']
data = json.loads(data)
data = data['entities']['users'][url_token]
except Exception:
return None
# filter data according to userType
if data['userType'] != 'people':
return None
return data
def get_per_followers(url_token, page, sum_page):
"""crawl use's followers"""
print('crawling page %d/%d ……' % (page, sum_page))
followers = list()
url = '%s/people/%s/followers?page=%d' % (host, url_token, page)
html = common_crawler.get(url)
s = BS(html, 'html.parser')
try:
data = s.find('div', attrs={'id': 'data'})['data-state']
data = json.loads(data)
items = data['people']['followersByUser'][url_token]['ids']
except (AttributeError, TypeError):
return list()
for item in items:
if item is not None and item is not False and item is not True and item != '知乎用户':
print(item)
followers.append(item)
return followers
def get_followers(url_token, follower_count):
# get all the followers of the specified url_token
# return [] if user has no followers
if follower_count == 0:
return []
sum_page = int((follower_count - 1) / per_page) + 1
pool = Pool(processes=list_max_process_num)
results = []
for page in range(1, sum_page + 1):
results.append(pool.apply_async(get_per_followers, (url_token, page, sum_page)))
pool.close()
pool.join()
follower_list = []
for result in results:
follower_list += result.get()
return follower_list
def start():
redis_client = init_db()
while not redis_client.scard(waiting_set):
# block if there is no seed in waiting_set
print('no seeds in waiting set {}'.format(waiting_set))
time.sleep(0.1)
# fetch seeds from waiting_set
url_token = redis_client.spop(waiting_set).decode()
print("crawling %s's user info……" % url_token)
user = get_info(url_token)
redis_client.sadd(info_set, user)
print("crawling %s's followers list……" % url_token)
try:
follower_list = get_followers(url_token, user['followerCount'])
except (TypeError, AttributeError):
return
for follower in follower_list:
if not redis_client.sismember(seeds_all, follower):
pipe = redis_client.pipeline(False)
pipe.sadd(waiting_set, follower)
pipe.sadd(seeds_all, follower)
pipe.execute()
print("user {}'s info has being crawled".format(url_token))
if __name__ == '__main__':
init_seeds = ['resolvewang', 'excited-vczh']
redis_conn = init_db()
redis_conn.sadd(waiting_set, *init_seeds)
redis_conn.sadd(seeds_all, *init_seeds)
while True:
start()
| 28
| 90
| 0.652126
|
4a00c312accd12d8d5fe3c86e31339cf1863eaed
| 317
|
py
|
Python
|
flask_app/tests/system/home_test.py
|
MDRCS/Automation_testing
|
609ac8d295de86ffdd237afb407418b198576b9a
|
[
"MIT"
] | null | null | null |
flask_app/tests/system/home_test.py
|
MDRCS/Automation_testing
|
609ac8d295de86ffdd237afb407418b198576b9a
|
[
"MIT"
] | null | null | null |
flask_app/tests/system/home_test.py
|
MDRCS/Automation_testing
|
609ac8d295de86ffdd237afb407418b198576b9a
|
[
"MIT"
] | null | null | null |
from flask_app.tests.system.test_base import BaseTest
import json
class HomeTest(BaseTest):
def test_home(self):
with self.app() as c:
req = c.get('/')
self.assertEqual(req.status_code, 200)
self.assertEqual(json.loads(req.get_data()), {'message': 'Hello, world!'})
| 26.416667
| 86
| 0.62776
|
4a00c452cca927361f3450c7b5d2d72764c59b08
| 137,427
|
py
|
Python
|
destiny2data.py
|
movsesyanpv/clanBot
|
5d46ee7ee685b9606a1627c46944ee27cdd275da
|
[
"BSD-3-Clause"
] | null | null | null |
destiny2data.py
|
movsesyanpv/clanBot
|
5d46ee7ee685b9606a1627c46944ee27cdd275da
|
[
"BSD-3-Clause"
] | 245
|
2019-11-20T15:18:37.000Z
|
2022-02-07T20:57:16.000Z
|
destiny2data.py
|
movsesyanpv/clanBot
|
5d46ee7ee685b9606a1627c46944ee27cdd275da
|
[
"BSD-3-Clause"
] | 1
|
2020-09-21T17:07:56.000Z
|
2020-09-21T17:07:56.000Z
|
import json
import time
from urllib.parse import quote
import pydest
from bs4 import BeautifulSoup
from bungied2auth import BungieOAuth
from datetime import datetime, timezone, timedelta
from dateutil.parser import *
import aiohttp
import aiosqlite
import matplotlib.pyplot as plt
import csv
import codecs
import mariadb
import asyncio
import tracemalloc
class D2data:
api_data_file = open('api.json', 'r')
api_data = json.loads(api_data_file.read())
destiny = ''
cache_db = ''
data_db = ''
icon_prefix = "https://www.bungie.net"
token = {}
headers = {}
data = {}
wait_codes = [1672]
max_retries = 10
vendor_params = {
'components': '400,401,402,302,304,306,310,305'
}
activities_params = {
'components': '204'
}
record_params = {
"components": "900,700"
}
metric_params = {
"components": "1100"
}
is_oauth = False
char_info = {}
oauth = ''
def __init__(self, translations, lang, is_oauth, prod, context, **options):
super().__init__(**options)
self.translations = translations
self.is_oauth = is_oauth
for locale in lang:
self.data[locale] = json.loads(open('d2data.json', 'r').read())
self.data[locale]['api_is_down'] = {
'fields': [{
'inline': True,
'name': translations[locale]['msg']['noapi'],
'value': translations[locale]['msg']['later']
}],
'color': 0xff0000,
'type': "rich",
'title': translations[locale]['msg']['error'],
}
self.data[locale]['api_maintenance'] = {
'fields': [{
'inline': True,
'name': translations[locale]['msg']['maintenance'],
'value': translations[locale]['msg']['later']
}],
'color': 0xff0000,
'type': "rich",
'title': translations[locale]['msg']['error'],
}
if prod:
self.oauth = BungieOAuth(self.api_data['id'], self.api_data['secret'], context=context, host='0.0.0.0',
port='4200')
else:
self.oauth = BungieOAuth(self.api_data['id'], self.api_data['secret'], host='localhost', port='4200')
self.session = aiohttp.ClientSession()
asyncio.run(self.set_up_cache())
try:
self.cache_pool = mariadb.ConnectionPool(pool_name='cache', pool_size=10, pool_reset_connection=False,
host=self.api_data['db_host'], user=self.api_data['cache_login'],
password=self.api_data['pass'], port=self.api_data['db_port'],
database=self.api_data['cache_name'])
# self.cache_pool.pool_reset_connection = True
except mariadb.ProgrammingError:
pass
# self.cache_db.auto_reconnect = True
try:
self.data_pool = mariadb.ConnectionPool(pool_name='data', pool_size=10, pool_reset_connection=False,
host=self.api_data['db_host'], user=self.api_data['cache_login'],
password=self.api_data['pass'], port=self.api_data['db_port'],
database=self.api_data['data_db'])
# self.data_pool.pool_reset_connection = True
except mariadb.ProgrammingError:
pass
# self.data_db.auto_reconnect = True
async def set_up_cache(self):
self.cache_db = await aiosqlite.connect('cache.db')
cache_cursor = await self.cache_db.cursor()
try:
await cache_cursor.execute(
'''CREATE TABLE cache (id text, expires integer, json text, timestamp text);''')
await cache_cursor.execute('''CREATE UNIQUE INDEX cache_id ON cache(id)''')
await self.cache_db.commit()
await cache_cursor.close()
except aiosqlite.OperationalError:
pass
async def get_chars(self):
platform = 0
membership_id = ''
try:
char_file = open('char.json', 'r')
self.char_info = json.loads(char_file.read())
except FileNotFoundError:
membership_url = 'https://www.bungie.net/platform/User/GetMembershipsForCurrentUser/'
search_resp = await self.session.get(url=membership_url, headers=self.headers)
search_json = await search_resp.json()
self.char_info['membershipid'] = search_json['Response']['primaryMembershipId']
membership_id = search_json['Response']['primaryMembershipId']
for membership in search_json['Response']['destinyMemberships']:
if membership['membershipId'] == self.char_info['membershipid']:
platform = membership['membershipType']
self.char_info['platform'] = platform
char_search_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/'.format(platform, membership_id)
char_search_params = {
'components': '200'
}
char_search_resp = await self.session.get(char_search_url, params=char_search_params, headers=self.headers)
char_search_json = await char_search_resp.json()
chars = char_search_json['Response']['characters']['data']
char_ids = []
for key in sorted(chars.keys()):
char_ids.append(chars[key]['characterId'])
self.char_info['charid'] = char_ids
char_file = open('char.json', 'w')
char_file.write(json.dumps(self.char_info))
async def refresh_token(self, re_token):
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
params = {
'grant_type': 'refresh_token',
'refresh_token': re_token,
'client_id': self.api_data['id'],
'client_secret': self.api_data['secret']
}
r = await self.session.post('https://www.bungie.net/platform/app/oauth/token/', data=params, headers=headers)
while not r:
print("re_token get error", json.dumps(r.json(), indent=4, sort_keys=True) + "\n")
r = await self.session.post('https://www.bungie.net/platform/app/oauth/token/', data=params,
headers=headers)
if not r:
r_json = await r.json()
if not r_json['error_description'] == 'DestinyThrottledByGameServer':
break
await asyncio.sleep(5)
if not r:
r_json = await r.json()
print("re_token get error", json.dumps(r_json, indent=4, sort_keys=True) + "\n")
return
resp = await r.json()
try:
token = {
'refresh': resp['refresh_token'],
'expires': time.time() + resp['refresh_expires_in']
}
token_file = open('token.json', 'w')
token_file.write(json.dumps(token))
self.headers = {
'X-API-Key': self.api_data['key'],
'Authorization': 'Bearer ' + resp['access_token']
}
except KeyError:
pass
self.destiny = pydest.Pydest(self.api_data['key'])
async def get_bungie_json(self, name, url, params=None, lang=None, string=None, change_msg=True, is_get=True, body=None):
if lang is None:
lang = list(self.data.keys())
lang_str = ''
else:
lang_str = lang
if string is None:
string = str(name)
try:
if is_get:
resp = await self.session.get(url, params=params, headers=self.headers)
else:
resp = await self.session.post(url, params=params, headers=self.headers, json=body)
except:
if change_msg:
for locale in lang:
self.data[locale][name] = self.data[locale]['api_is_down']
return False
try:
resp_code = await resp.json()
resp_code = resp_code['ErrorCode']
except KeyError:
resp_code = 1
except json.decoder.JSONDecodeError:
if change_msg:
for locale in lang:
self.data[locale][name] = self.data[locale]['api_is_down']
resp.close()
return False
except aiohttp.ContentTypeError:
if change_msg:
for locale in lang:
self.data[locale][name] = self.data[locale]['api_is_down']
resp.close()
return False
print('getting {} {}'.format(string, lang_str))
curr_try = 2
while resp_code in self.wait_codes and curr_try <= self.max_retries:
print('{}, attempt {}'.format(resp_code, curr_try))
resp = await self.session.get(url, params=params, headers=self.headers)
try:
resp_code = await resp.json()
resp_code = resp_code['ErrorCode']
except aiohttp.ContentTypeError:
resp_code = 1672
if resp_code == 5:
if change_msg:
for locale in lang:
self.data[locale][name] = self.data[locale]['api_maintenance']
curr_try -= 1
curr_try += 1
await asyncio.sleep(5)
if not resp:
try:
resp_code = await resp.json()
except aiohttp.ContentTypeError:
if change_msg:
for locale in lang:
self.data[locale][name] = self.data[locale]['api_is_down']
resp.close()
return False
resp_code = resp_code['ErrorCode']
if resp_code in [5, 1618]:
if change_msg:
for locale in lang:
self.data[locale][name] = self.data[locale]['api_maintenance']
resp.close()
return False
print("{} get error".format(name), json.dumps(resp.json(), indent=4, sort_keys=True) + "\n")
if change_msg:
for locale in lang:
self.data[locale][name] = self.data[locale]['api_is_down']
resp.close()
return False
else:
try:
resp_code = await resp.json()
except aiohttp.ContentTypeError:
if change_msg:
for locale in lang:
self.data[locale][name] = self.data[locale]['api_is_down']
resp.close()
return False
if 'ErrorCode' in resp_code.keys():
resp_code = resp_code['ErrorCode']
if resp_code == 5:
if change_msg:
for locale in lang:
self.data[locale][name] = self.data[locale]['api_maintenance']
resp.close()
return False
else:
for suspected_season in resp_code:
if 'seasonNumber' in resp_code[suspected_season].keys():
resp.close()
return resp_code
resp_code = await resp.json()
if 'Response' not in resp_code.keys():
if change_msg:
for locale in lang:
self.data[locale][name] = self.data[locale]['api_is_down']
resp.close()
return False
resp_json = await resp.json()
resp.close()
return resp_json
async def get_vendor_sales(self, lang, vendor_resp, cats, exceptions=[]):
embed_sales = []
data_sales = []
vendor_json = vendor_resp
tess_sales = vendor_json['Response']['sales']['data']
n_order = 0
for key in cats:
item = tess_sales[str(key)]
item_hash = item['itemHash']
if item_hash not in exceptions:
definition = 'DestinyInventoryItemDefinition'
item_resp = await self.destiny.decode_hash(item_hash, definition, language=lang)
item_name_list = item_resp['displayProperties']['name'].split()
item_name = ' '.join(item_name_list)
costs = []
if len(item['costs']) > 0:
cost_line = '{}: '.format(self.translations[lang]['msg']['cost'])
for cost in item['costs']:
currency = cost
currency_resp = await self.destiny.decode_hash(currency['itemHash'], definition, language=lang)
currency_cost = str(currency['quantity'])
currency_item = currency_resp['displayProperties']['name']
currency_icon = currency_resp['displayProperties']['icon']
cost_line = '{}{} {}\n'.format(cost_line, currency_cost, currency_item.capitalize())
costs.append({
'currency_name': currency_item,
'currency_icon': currency_icon,
'cost': currency_cost
})
else:
currency_cost = 'N/A\n'
currency_item = ''
currency_icon = ''
cost_line = currency_cost
costs.append({
'currency_name': currency_item,
'currency_icon': currency_icon,
'cost': currency_cost
})
if 'screenshot' in item_resp.keys():
screenshot = '<img alt="Screenshot" class="screenshot_hover" src="https://bungie.net{}" ' \
'loading="lazy">'.format(item_resp['screenshot'])
else:
screenshot = ''
stats = []
perks = []
if 'itemComponents' in vendor_json['Response']:
if str(item['vendorItemIndex']) in vendor_json['Response']['itemComponents']['stats']['data'].keys():
stats_json = \
vendor_json['Response']['itemComponents']['stats']['data'][str(item['vendorItemIndex'])]['stats']
for stat in stats_json:
value = stats_json[stat]['value']
if value == 0:
continue
stat_def = await self.destiny.decode_hash(stats_json[stat]['statHash'], 'DestinyStatDefinition',
language=lang)
stats.append({
'name': stat_def['displayProperties']['name'],
'value': stats_json[stat]['value']
})
if str(item['vendorItemIndex']) in vendor_json['Response']['itemComponents']['perks']['data'].keys():
try:
plugs_json = vendor_json['Response']['itemComponents']['reusablePlugs']['data'][
str(item['vendorItemIndex'])]['plugs']
plug_str = 'plugItemHash'
except KeyError:
plugs_json = \
vendor_json['Response']['itemComponents']['sockets']['data'][str(item['vendorItemIndex'])][
'sockets']
plug_str = 'plugHash'
for perk in plugs_json:
plug = []
if type(perk) == str:
perk_list = plugs_json[perk]
elif type(perk) == dict:
perk_list = [perk]
else:
raise TypeError
for perk_dict in perk_list:
if plug_str in perk_dict.keys():
perk_def = await self.destiny.decode_hash(perk_dict[plug_str],
'DestinyInventoryItemDefinition',
language=lang)
if 'name' in perk_def['displayProperties'].keys() and 'icon' in perk_def[
'displayProperties'].keys():
plug.append({
'name': perk_def['displayProperties']['name'],
'icon': 'https://bungie.net{}'.format(perk_def['displayProperties']['icon'])
})
perks.append(plug)
cost_line = cost_line[:-1]
item_data = {
'inline': True,
'name': item_name.capitalize(),
'value': cost_line
}
data_sales.append({
'id': '{}_{}_{}'.format(item['itemHash'], key, n_order),
'icon': item_resp['displayProperties']['icon'],
'name': item_name.capitalize(),
'description': "{}: {} {}".format('Цена', currency_cost,
currency_item.capitalize()),
'tooltip_id': '{}_{}_{}_tooltip'.format(item['itemHash'], key, n_order),
'hash': item['itemHash'],
'screenshot': screenshot,
'costs': costs,
'stats': stats,
'perks': perks
})
embed_sales.append(item_data)
n_order += 1
return [embed_sales, data_sales]
async def get_featured_bd(self, langs, forceget=False):
tess_resp = []
for char in self.char_info['charid']:
tess_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/Vendors/3361454721/'. \
format(self.char_info['platform'], self.char_info['membershipid'], char)
resp = await self.get_cached_json('eververse_{}'.format(char), 'featured_bd', tess_url, self.vendor_params,
string='featured bright dust for {}'.format(char), force=forceget)
if not resp:
return
tess_resp.append(resp)
resp_time = resp['timestamp']
for lang in langs:
tess_def = await self.destiny.decode_hash(3361454721, 'DestinyVendorDefinition', language=lang)
self.data[lang]['featured_bd'] = {
'thumbnail': {
'url': self.icon_prefix + '/common/destiny2_content/icons/30c6cc828d7753bcca72748ba2aa83d6.png'
},
'fields': [],
'color': 0x38479F,
'type': "rich",
'title': self.translations[lang]['msg']['featured_bd'],
'footer': {'text': self.translations[lang]['msg']['resp_time']}
}
tmp_fields = []
for resp in tess_resp:
resp_json = resp
tess_cats = resp_json['Response']['categories']['data']['categories']
items_to_get = tess_cats[3]['itemIndexes']
sales = await self.get_vendor_sales(lang, resp, items_to_get,
[353932628, 3260482534, 3536420626, 3187955025,
2638689062])
tmp_fields = tmp_fields + sales[0]
await self.write_to_db(lang, 'featured_bright_dust_items', sales[1])
for i in range(0, len(tmp_fields)):
if tmp_fields[i] not in tmp_fields[i + 1:]:
self.data[lang]['featured_bd']['fields'].append(tmp_fields[i])
self.data[lang]['featured_bd']['timestamp'] = resp_time
async def get_bd(self, langs, forceget=False):
tess_resp = []
for char in self.char_info['charid']:
tess_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/Vendors/3361454721/'. \
format(self.char_info['platform'], self.char_info['membershipid'], char)
resp = await self.get_cached_json('eververse_{}'.format(char), 'bd', tess_url, self.vendor_params,
string='bright dust for {}'.format(char), force=forceget)
if not resp:
return
tess_resp.append(resp)
resp_time = resp['timestamp']
for lang in langs:
tess_def = await self.destiny.decode_hash(3361454721, 'DestinyVendorDefinition', language=lang)
self.data[lang]['bd'] = {
'thumbnail': {
'url': self.icon_prefix + '/common/destiny2_content/icons/30c6cc828d7753bcca72748ba2aa83d6.png'
},
'fields': [],
'color': 0x38479F,
'type': "rich",
'title': self.translations[lang]['msg']['bd'],
'footer': {'text': self.translations[lang]['msg']['resp_time']}
}
tmp_fields = []
for resp in tess_resp:
resp_json = resp
tess_cats = resp_json['Response']['categories']['data']['categories']
items_to_get = tess_cats[8]['itemIndexes'] + tess_cats[10]['itemIndexes']
sales = await self.get_vendor_sales(lang, resp, items_to_get,
[353932628, 3260482534, 3536420626, 3187955025,
2638689062])
tmp_fields = tmp_fields + sales[0]
await self.write_to_db(lang, 'bright_dust_items', sales[1])
for i in range(0, len(tmp_fields)):
if tmp_fields[i] not in tmp_fields[i + 1:]:
self.data[lang]['bd']['fields'].append(tmp_fields[i])
self.data[lang]['bd']['timestamp'] = resp_time
async def get_featured_silver(self, langs, forceget=False):
tess_resp = []
for char in self.char_info['charid']:
tess_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/Vendors/3361454721/'. \
format(self.char_info['platform'], self.char_info['membershipid'], char)
resp = await self.get_cached_json('eververse_{}'.format(char), 'silver', tess_url, self.vendor_params,
string='silver for {}'.format(char), force=forceget)
if not resp:
return
tess_resp.append(resp)
resp_time = resp['timestamp']
for lang in langs:
tess_def = await self.destiny.decode_hash(3361454721, 'DestinyVendorDefinition', language=lang)
self.data[lang]['silver'] = {
'thumbnail': {
'url': self.icon_prefix + '/common/destiny2_content/icons/30c6cc828d7753bcca72748ba2aa83d6.png'
},
'fields': [],
'color': 0x38479F,
'type': "rich",
'title': self.translations[lang]['msg']['silver'],
'footer': {'text': self.translations[lang]['msg']['resp_time']}
}
tmp_fields = []
for resp in tess_resp:
resp_json = resp
tess_cats = resp_json['Response']['categories']['data']['categories']
items_to_get = tess_cats[2]['itemIndexes']
sales = await self.get_vendor_sales(lang, resp, items_to_get, [827183327])
tmp_fields = tmp_fields + sales[0]
await self.write_to_db(lang, 'featured_silver', sales[1])
for i in range(0, len(tmp_fields)):
if tmp_fields[i] not in tmp_fields[i + 1:]:
self.data[lang]['silver']['fields'].append(tmp_fields[i])
self.data[lang]['silver']['timestamp'] = resp_time
async def get_global_alerts(self, langs, forceget=False):
alert_url = 'https://www.bungie.net/Platform/GlobalAlerts/'
alert_json = await self.get_bungie_json('alerts', alert_url, {}, '')
if not alert_json:
return
for lang in langs:
self.data[lang]['alerts'].clear()
for alert in alert_json['Response']:
alert_embed = {
'color': 0xff0000,
'type': "rich",
'description': alert['AlertHtml'],
'timestamp': '{}+00:00'.format(alert['AlertTimestamp'][:-1]),
'author': {
'name': 'Bungie Help',
'url': alert['AlertLink'],
'icon_url': 'https://pbs.twimg.com/profile_images/887332604143312896/ydVDSfjE_400x400.jpg'
}
}
self.data[lang]['alerts'].append(alert_embed)
async def get_season_start(self):
manifest_url = 'https://www.bungie.net/Platform/Destiny2/Manifest/'
manifest_json = await self.get_bungie_json('default', manifest_url, {}, '')
season_url = 'https://www.bungie.net{}'.format(
manifest_json['Response']['jsonWorldComponentContentPaths']['en']['DestinySeasonDefinition'])
season_json = await self.get_bungie_json('default', season_url, {}, '')
for season in season_json:
try:
start = isoparse(season_json[season]['startDate'])
end = isoparse(season_json[season]['endDate'])
if start <= datetime.now(tz=timezone.utc) <= end:
current_season = season
return start
except KeyError:
if 'startDate' in season_json[season].keys() and 'endDate' not in season_json[season].keys():
return isoparse(season_json[season]['startDate'])
pass
async def get_seasonal_featured_bd(self, langs, start):
tess_def = await self.destiny.decode_hash(3361454721, 'DestinyVendorDefinition')
bd = []
for lang in langs:
classnames = self.translations[lang]['classnames']
n_items = 0
curr_week = []
i_week = 1
class_items = 0
n_order = 0
for i, item in enumerate(tess_def['itemList']):
if n_items >= 4 and n_items - class_items / 3 * 2 >= 4:
i_week = i_week + 1
bd.append(list.copy(curr_week))
n_items = 0
curr_week = []
class_items = 0
if item['displayCategoryIndex'] == 2 and item['itemHash'] not in [353932628, 3260482534, 3536420626,
3187955025, 2638689062]:
definition = 'DestinyInventoryItemDefinition'
item_def = await self.destiny.decode_hash(item['itemHash'], definition, language=lang)
if len(item['currencies']) > 0:
currency_resp = await self.destiny.decode_hash(item['currencies'][0]['itemHash'], definition,
language=lang)
else:
currency_resp = {'displayProperties': {'icon': '', 'name': ''}}
item['currencies'] = [{'quantity': ''}]
cat_number = 2
if 'screenshot' in item_def.keys():
screenshot = '<img alt="Screenshot" class="screenshot_hover" src="https://bungie.net{}"' \
'loading="lazy">'.format(item_def['screenshot'])
else:
screenshot = ''
curr_week.append({
'id': '{}_{}_{}'.format(item['itemHash'], cat_number, n_order),
'icon': item_def['displayProperties']['icon'],
'tooltip_id': '{}_{}_{}_tooltip'.format(item['itemHash'], cat_number, n_order),
'hash': item['itemHash'],
'name': item_def['displayProperties']['name'],
'screenshot': screenshot,
'costs': [
{
'currency_icon': currency_resp['displayProperties']['icon'],
'cost': item['currencies'][0]['quantity'],
'currency_name': currency_resp['displayProperties']['name']
}]
})
n_order += 1
n_items = n_items + 1
if item_def['classType'] < 3 or any(
class_name in item_def['itemTypeDisplayName'].lower() for class_name in classnames):
class_items = class_items + 1
return bd
async def get_seasonal_bd(self, langs, start):
tess_def = await self.destiny.decode_hash(3361454721, 'DestinyVendorDefinition')
bd = []
for lang in langs:
classnames = self.translations[lang]['classnames']
n_items = 0
curr_week = []
i_week = 1
class_items = 0
n_order = 0
for i, item in enumerate(tess_def['itemList']):
if n_items >= 7 and n_items - class_items / 3 * 2 >= 7:
i_week = i_week + 1
bd.append(list.copy(curr_week))
n_items = 0
curr_week = []
class_items = 0
if item['displayCategoryIndex'] == 8 and item['itemHash'] not in [353932628, 3260482534, 3536420626,
3187955025, 2638689062]:
definition = 'DestinyInventoryItemDefinition'
item_def = await self.destiny.decode_hash(item['itemHash'], definition, language=lang)
if len(item['currencies']) > 0:
currency_resp = await self.destiny.decode_hash(item['currencies'][0]['itemHash'], definition,
language=lang)
else:
currency_resp = {'displayProperties': {'icon': '', 'name': ''}}
item['currencies'] = [{'quantity': ''}]
cat_number = 8
if 'screenshot' in item_def.keys():
screenshot = '<img alt="Screenshot" class="screenshot_hover" src="https://bungie.net{}" ' \
'loading="lazy">'.format(item_def['screenshot'])
else:
screenshot = ''
curr_week.append({
'id': '{}_{}_{}'.format(item['itemHash'], cat_number, n_order),
'icon': item_def['displayProperties']['icon'],
'tooltip_id': '{}_{}_{}_tooltip'.format(item['itemHash'], cat_number, n_order),
'hash': item['itemHash'],
'name': item_def['displayProperties']['name'],
'screenshot': screenshot,
'costs': [
{
'currency_icon': currency_resp['displayProperties']['icon'],
'cost': item['currencies'][0]['quantity'],
'currency_name': currency_resp['displayProperties']['name']
}]
})
n_order += 1
n_items = n_items + 1
if item_def['classType'] < 3 or any(
class_name in item_def['itemTypeDisplayName'].lower() for class_name in classnames):
class_items = class_items + 1
return bd
async def get_seasonal_featured_silver(self, langs, start):
tess_def = await self.destiny.decode_hash(3361454721, 'DestinyVendorDefinition')
bd = []
for lang in langs:
classnames = self.translations[lang]['classnames']
n_items = 0
curr_week = []
i_week = 1
class_items = 0
n_order = 0
for i, item in enumerate(tess_def['itemList']):
if n_items >= 5 and n_items - class_items / 3 * 2 >= 5:
i_week = i_week + 1
bd.append(list.copy(curr_week))
n_items = 0
curr_week = []
class_items = 0
if item['displayCategoryIndex'] == 1 and item['categoryIndex'] != 37:
definition = 'DestinyInventoryItemDefinition'
item_def = await self.destiny.decode_hash(item['itemHash'], definition, language=lang)
if len(item['currencies']) > 0:
currency_resp = await self.destiny.decode_hash(item['currencies'][0]['itemHash'], definition,
language=lang)
else:
currency_resp = {'displayProperties': {'icon': '', 'name': ''}}
item['currencies'] = [{'quantity': ''}]
cat_number = 2
if 'screenshot' in item_def.keys():
screenshot = '<img alt="Screenshot" class="screenshot_hover" src="https://bungie.net{}"' \
'loading="lazy">'.format(item_def['screenshot'])
else:
screenshot = ''
curr_week.append({
'id': '{}_{}_{}'.format(item['itemHash'], cat_number, n_order),
'icon': item_def['displayProperties']['icon'],
'tooltip_id': '{}_{}_{}_tooltip'.format(item['itemHash'], cat_number, n_order),
'hash': item['itemHash'],
'name': item_def['displayProperties']['name'],
'screenshot': screenshot,
'costs': [
{
'currency_icon': currency_resp['displayProperties']['icon'],
'cost': item['currencies'][0]['quantity'],
'currency_name': currency_resp['displayProperties']['name']
}]
})
n_order += 1
n_items = n_items + 1
if item_def['classType'] < 3 or any(
class_name in item_def['itemTypeDisplayName'].lower() for class_name in classnames):
class_items = class_items + 1
return bd
async def get_weekly_eververse(self, langs):
data = []
start = await self.get_season_start()
week_n = datetime.now(tz=timezone.utc) - await self.get_season_start()
week_n = int(week_n.days / 7) - 15
for lang in langs:
data.clear()
bd = await self.get_seasonal_bd([lang], start)
featured_bd = await self.get_seasonal_featured_bd([lang], start)
# await self.get_seasonal_consumables(langs, start)
silver = await self.get_seasonal_featured_silver([lang], start)
for i in range(0, len(bd)):
data.append({
'items': [*bd[i]]
})
if len(bd) == len(featured_bd):
for i in range(0, len(bd)):
data[i]['items'] = [*data[i]['items'], *featured_bd[i]]
if len(bd) == len(silver):
for i in range(0, len(bd)):
data[i]['items'] = [*data[i]['items'], *silver[i]]
await self.write_to_db(lang, 'weekly_eververse', data[week_n]['items'],
name=self.translations[lang]['site']['bd'],
template='hover_items.html', order=0, type='weekly', size='tall')
async def write_to_db(self, lang, id, response, size='', name='', template='table_items.html', order=0,
type='daily', annotations=[]):
while True:
try:
data_db = self.data_pool.get_connection()
data_db.auto_reconnect = True
break
except mariadb.PoolError:
try:
self.data_pool.add_connection()
except mariadb.PoolError:
pass
await asyncio.sleep(0.125)
data_cursor = data_db.cursor()
try:
data_cursor.execute('''CREATE TABLE `{}` (id text, timestamp_int integer, json json, timestamp text, size text, name text, template text, place integer, type text, annotations text)'''.format(lang))
data_cursor.execute('''CREATE UNIQUE INDEX `data_id_{}` ON `{}`(id(256))'''.format(lang, lang))
except mariadb.Error:
pass
try:
data_cursor.execute('''INSERT IGNORE INTO `{}` VALUES (?,?,?,?,?,?,?,?,?,?)'''.format(lang),
(id, datetime.utcnow().timestamp(), json.dumps({'data': response}),
datetime.utcnow().isoformat(), size, name, template, order, type, str(annotations)))
data_db.commit()
except mariadb.Error:
pass
try:
data_cursor.execute('''UPDATE `{}` SET timestamp_int=?, json=?, timestamp=?, name=?, size=?, template=?, place=?, type=?, annotations=? WHERE id=?'''.format(lang),
(datetime.utcnow().timestamp(), json.dumps({'data': response}),
datetime.utcnow().isoformat(), name, size, template, order, type, str(annotations), id))
data_db.commit()
except mariadb.Error:
pass
data_cursor.close()
data_db.close()
async def get_spider(self, lang, forceget=False):
char_info = self.char_info
spider_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/Vendors/863940356/'. \
format(char_info['platform'], char_info['membershipid'], char_info['charid'][0])
spider_resp = await self.get_cached_json('spider', 'spider', spider_url, self.vendor_params, force=forceget)
if not spider_resp:
for locale in lang:
db_data = {
'name': self.translations[locale]['msg']['spider'],
'description': self.data[locale]['spider']['fields'][0]['value']
}
await self.write_to_db(locale, 'spider_mats', [db_data],
name=self.translations[locale]['site']['spider'])
return False
spider_json = spider_resp
spider_cats = spider_json['Response']['categories']['data']['categories']
resp_time = spider_json['timestamp']
for locale in lang:
spider_def = await self.destiny.decode_hash(863940356, 'DestinyVendorDefinition', language=locale)
self.data[locale]['spider'] = {
'thumbnail': {
'url': self.icon_prefix + spider_def['displayProperties']['smallTransparentIcon']
},
'fields': [],
'color': 7102001,
'type': "rich",
'title': self.translations[locale]['msg']['spider'],
'footer': {'text': self.translations[locale]['msg']['resp_time']},
'timestamp': resp_time
}
items_to_get = spider_cats[0]['itemIndexes']
spider_sales = await self.get_vendor_sales(locale, spider_resp, items_to_get, [1812969468])
self.data[locale]['spider']['fields'] = self.data[locale]['spider']['fields'] + spider_sales[0]
data = spider_sales[1]
await self.write_to_db(locale, 'spider_mats', data, name=self.translations[locale]['site']['spider'],
order=0, size='tall')
async def get_banshee(self, lang, forceget=False):
char_info = self.char_info
cat_templates = {
'6': 'contract_item.html',
'0': 'weapon_item.html',
'4': 'armor_item.html'
}
banshee_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/Vendors/672118013/'. \
format(char_info['platform'], char_info['membershipid'], char_info['charid'][0])
banshee_resp = await self.get_cached_json('banshee', 'banshee', banshee_url, self.vendor_params, force=forceget)
if not banshee_resp:
for locale in lang:
banshee_def = await self.destiny.decode_hash(672118013, 'DestinyVendorDefinition', language=locale)
db_data = {
'name': self.translations[locale]['msg']['error'],
'description': self.translations[locale]['msg']['noapi']
}
await self.write_to_db(locale, 'spider_mats', [db_data], name=banshee_def['displayProperties']['name'])
return False
banshee_json = banshee_resp
banshee_cats = banshee_json['Response']['categories']['data']['categories']
resp_time = banshee_json['timestamp']
for locale in lang:
banshee_def = await self.destiny.decode_hash(672118013, 'DestinyVendorDefinition', language=locale)
# self.data[locale]['spider'] = {
# 'thumbnail': {
# 'url': self.icon_prefix + banshee_def['displayProperties']['smallTransparentIcon']
# },
# 'fields': [],
# 'color': 7102001,
# 'type': "rich",
# 'title': self.translations[locale]['msg']['spider'],
# 'footer': {'text': self.translations[locale]['msg']['resp_time']},
# 'timestamp': resp_time
# }
items_to_get = banshee_cats[3]['itemIndexes']
sales = []
banshee_sales = await self.get_vendor_sales(locale, banshee_resp, items_to_get, [1812969468])
# self.data[locale]['spider']['fields'] = self.data[locale]['spider']['fields'] + banshee_sales[0]
sales.append({'name': "", "items": banshee_sales[1], "template": cat_templates['6']})
items_to_get = banshee_cats[4]['itemIndexes']
banshee_sales = await self.get_vendor_sales(locale, banshee_resp, items_to_get, [1812969468])
sales.append({'name': "", "items": banshee_sales[1], "template": cat_templates['0']})
await self.write_to_db(locale, 'banshee_mods', sales, name=banshee_def['displayProperties']['name'], order=5,
template='vendor_items.html', annotations=[])
# size='tall')
async def get_ada(self, lang, forceget=False):
char_info = self.char_info
cat_templates = {
'6': 'contract_item.html',
'0': 'weapon_item.html',
'4': 'armor_item.html'
}
ada_resps = []
for char in char_info['charid']:
ada_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/Vendors/350061650/'. \
format(char_info['platform'], char_info['membershipid'], char)
ada_resps.append(await self.get_cached_json('ada_{}'.format(char), 'ada', ada_url, self.vendor_params, force=forceget))
for ada_resp in ada_resps:
if not ada_resp:
for locale in lang:
ada_def = await self.destiny.decode_hash(350061650, 'DestinyVendorDefinition', language=locale)
db_data = {
'name': self.translations[locale]['msg']['error'],
'description': self.translations[locale]['msg']['noapi']
}
await self.write_to_db(locale, 'spider_mats', [db_data], name=ada_def['displayProperties']['name'])
return False
ada_json = ada_resps[0]
ada_cats = ada_json['Response']['categories']['data']['categories']
resp_time = ada_json['timestamp']
for locale in lang:
ada_def = await self.destiny.decode_hash(350061650, 'DestinyVendorDefinition', language=locale)
# self.data[locale]['spider'] = {
# 'thumbnail': {
# 'url': self.icon_prefix + banshee_def['displayProperties']['smallTransparentIcon']
# },
# 'fields': [],
# 'color': 7102001,
# 'type': "rich",
# 'title': self.translations[locale]['msg']['spider'],
# 'footer': {'text': self.translations[locale]['msg']['resp_time']},
# 'timestamp': resp_time
# }
items_to_get = ada_cats[1]['itemIndexes']
sales = []
ada_sales = await self.get_vendor_sales(locale, ada_resps[0], items_to_get, [1812969468])
# self.data[locale]['spider']['fields'] = self.data[locale]['spider']['fields'] + banshee_sales[0]
sales.append({'name': "", "items": ada_sales[1], "template": cat_templates['6']})
items_to_get = ada_cats[2]['itemIndexes']
for ada_resp in ada_resps:
items_to_get = ada_resp['Response']['categories']['data']['categories'][2]['itemIndexes']
ada_sales = await self.get_vendor_sales(locale, ada_resp, items_to_get, [1812969468])
sales.append({'name': "", "items": ada_sales[1], "template": cat_templates['4']})
await self.write_to_db(locale, 'ada_mods', sales, name=ada_def['displayProperties']['name'], order=5,
template='vendor_items.html', annotations=[], size='tall')
async def get_daily_mods(self, langs, forceget=False):
char_info = self.char_info
ada_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/Vendors/350061650/'.\
format(char_info['platform'], char_info['membershipid'], char_info['charid'][0])
banshee_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/Vendors/672118013/'. \
format(char_info['platform'], char_info['membershipid'], char_info['charid'][0])
ada_resp = await self.get_cached_json('ada_{}'.format(char_info['charid'][0]), 'ada', ada_url, self.vendor_params, force=forceget)
banshee_resp = await self.get_cached_json('banshee', 'banshee', banshee_url, self.vendor_params, force=forceget)
if not(ada_resp and banshee_resp):
return False
ada_cats = ada_resp['Response']['categories']['data']['categories']
banshee_cats = banshee_resp['Response']['categories']['data']['categories']
resp_time = banshee_resp['timestamp']
for lang in langs:
self.data[lang]['daily_mods'] = {
'fields': [],
'color': 0x4c3461,
'type': "rich",
'title': self.translations[lang]['msg']['daily_mods'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
ada_def = await self.destiny.decode_hash(350061650, 'DestinyVendorDefinition', language=lang)
banshee_def = await self.destiny.decode_hash(672118013, 'DestinyVendorDefinition', language=lang)
mods = []
items_to_get = ada_cats[1]['itemIndexes']
ada_sales = await self.get_vendor_sales(lang, ada_resp, items_to_get, [1812969468])
for item in ada_sales[1]:
item_def = await self.destiny.decode_hash(item['hash'], 'DestinyInventoryItemDefinition', language=lang)
if item_def['itemType'] == 19:
mods.append({'inline': True, 'name': item_def['displayProperties']['name'], 'value': item_def['itemTypeDisplayName']})
items_to_get = banshee_cats[3]['itemIndexes']
banshee_sales = await self.get_vendor_sales(lang, banshee_resp, items_to_get, [1812969468, 2731650749])
for item in banshee_sales[1]:
item_def = await self.destiny.decode_hash(item['hash'], 'DestinyInventoryItemDefinition', language=lang)
if item_def['itemType'] == 19:
mods.append({'inline': True, 'name': item_def['displayProperties']['name'], 'value': item_def['itemTypeDisplayName']})
self.data[lang]['daily_mods']['fields'] = mods
async def get_xur_loc(self):
url = 'https://paracausal.science/xur/current.json'
r = await self.session.get(url)
r_json = await r.json()
return r_json
async def get_xur(self, langs, forceget=False):
char_info = self.char_info
cat_templates = {
'6': 'contract_item.html',
'0': 'weapon_item.html',
'4': 'armor_item.html'
}
place_hashes = {
'1737926756': 3747705955,
'3607432451': 3607432451,
'697502628': 3747705955
}
xur_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/Vendors/2190858386/'. \
format(char_info['platform'], char_info['membershipid'], char_info['charid'][0])
xur_resp = await self.get_cached_json('xur', 'xur', xur_url, self.vendor_params, force=forceget)
if not xur_resp:
for lang in langs:
db_data = {
'name': self.translations[lang]['msg']['xur'],
'description': self.data[lang]['xur']['fields'][0]['value']
}
await self.write_to_db(lang, 'xur', [db_data],
name=self.translations[lang]['msg']['xur'])
return False
resp_time = xur_resp['timestamp']
xur_loc = await self.get_xur_loc()
for lang in langs:
xur_def = await self.destiny.decode_hash(2190858386, 'DestinyVendorDefinition', language=lang)
self.data[lang]['xur'] = {
'thumbnail': {
'url': self.icon_prefix + xur_def['displayProperties']['smallTransparentIcon']
},
'fields': [],
'color': 0x3DD5D6,
'type': "rich",
'title': self.translations[lang]['msg']['xurtitle'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
xur_json = xur_resp
if not xur_json['ErrorCode'] == 1627:
loc_field = {
"inline": False,
"name": self.translations[lang]['msg']['xurloc'],
"value": self.translations[lang]['xur']['NULL']
}
weapon = {
'inline': False,
'name': self.translations[lang]['msg']['weapon'],
'value': ''
}
annotations = []
if xur_loc:
self.data[lang]['xur']['footer']['text'] = self.translations[lang]['xur']['copyright']
annotations = [self.translations[lang]['xur']['copyright']]
else:
xur_loc = {}
if xur_def['locations'][xur_json['Response']['vendor']['data']['vendorLocationIndex']] != 2961497387:
xur_loc['destinationHash'] = xur_def['locations'][xur_json['Response']['vendor']['data']['vendorLocationIndex']]['destinationHash']
xur_loc['placeHash'] = place_hashes[str(xur_loc['destinationHash'])]
self.data[lang]['xur'].pop('footer')
annotations = []
sales = []
if xur_loc:
xur_place_name = await self.destiny.decode_hash(xur_loc['placeHash'], 'DestinyPlaceDefinition', language=lang)
xur_destination_name = await self.destiny.decode_hash(xur_loc['destinationHash'], 'DestinyDestinationDefinition', language=lang)
loc_field['value'] = '{}, {}'.format(xur_place_name['displayProperties']['name'], xur_destination_name['displayProperties']['name'])
self.data[lang]['xur']['fields'].append(loc_field)
sales = [{'name': '{}, {}'.format(xur_place_name['displayProperties']['name'],
xur_destination_name['displayProperties']['name']),
'items': [], 'template': cat_templates['6']},
{'name': 'Оружие', 'items': [], 'template': cat_templates['0']},
{'name': 'Броня', 'items': [], 'template': cat_templates['4']}]
xur_cats = xur_resp['Response']['categories']['data']['categories']
cat_sales = await self.get_vendor_sales(lang, xur_resp, xur_cats[0]['itemIndexes'], [3875551374])
xur_sales = xur_json['Response']['sales']['data']
self.data[lang]['xur']['fields'].append(weapon)
for key in sorted(xur_sales.keys()):
item_hash = xur_sales[key]['itemHash']
if item_hash not in [4285666432, 2293314698, 2125848607, 3875551374]:
definition = 'DestinyInventoryItemDefinition'
item_resp = await self.destiny.decode_hash(item_hash, definition, language=lang)
item_name = item_resp['displayProperties']['name']
if item_resp['itemType'] == 2:
item_sockets = item_resp['sockets']['socketEntries']
plugs = []
for s in item_sockets:
if len(s['reusablePlugItems']) > 0 and s['plugSources'] == 2:
plugs.append(s['reusablePlugItems'][0]['plugItemHash'])
exotic = {
'inline': True,
'name': '',
'value': item_name
}
if item_resp['summaryItemHash'] in [715326750, 2673424576]:
if item_resp['classType'] == 0:
exotic['name'] = self.translations[lang]['Titan']
elif item_resp['classType'] == 1:
exotic['name'] = self.translations[lang]['Hunter']
elif item_resp['classType'] == 2:
exotic['name'] = self.translations[lang]['Warlock']
self.data[lang]['xur']['fields'].append(exotic)
for item in cat_sales[1]:
if item['hash'] == item_hash:
sales[2]['items'].append(item)
else:
if item_resp['summaryItemHash'] in [715326750, 2673424576]:
i = 0
for item in self.data[lang]['xur']['fields']:
if item['name'] == self.translations[lang]['msg']['weapon']:
self.data[lang]['xur']['fields'][i]['value'] = item_name
i += 1
for item in cat_sales[1]:
if item['hash'] == item_hash:
sales[1]['items'].append(item)
else:
loc_field = {
"inline": False,
"name": self.translations[lang]['msg']['xurloc'],
"value": self.translations[lang]['xur']['noxur']
}
self.data[lang]['xur']['fields'].append(loc_field)
sales = [{'name': self.translations[lang]['xur']['noxur'],
'items': [], 'template': cat_templates['6']}]
else:
loc_field = {
"inline": False,
"name": self.translations[lang]['msg']['xurloc'],
"value": self.translations[lang]['xur']['noxur']
}
self.data[lang]['xur']['fields'].append(loc_field)
sales = [{'name': self.translations[lang]['xur']['noxur'],
'items': [], 'template': cat_templates['6']}]
await self.write_to_db(lang, 'xur', sales, template='vendor_items.html', order=7,
name=xur_def['displayProperties']['name'],
annotations=annotations)
async def get_heroic_story(self, langs, forceget=False):
activities_resp = await self.get_activities_response('heroicstory', string='heroic story missions',
force=forceget)
if not activities_resp:
for lang in langs:
db_data = {
'name': self.data[lang]['heroicstory']['fields'][0]['name'],
'description': self.data[lang]['heroicstory']['fields'][0]['value']
}
await self.write_to_db(lang, 'heroic_story_missions', [db_data],
name=self.translations[lang]['site']['heroicstory'])
return False
resp_time = activities_resp['timestamp']
for lang in langs:
local_types = self.translations[lang]
self.data[lang]['heroicstory'] = {
'thumbnail': {
'url': "https://www.bungie.net/common/destiny2_content/icons/DestinyActivityModeDefinition_"
"5f8a923a0d0ac1e4289ae3be03f94aa2.png"
},
'fields': [],
'color': 10070709,
'type': 'rich',
'title': self.translations[lang]['msg']['heroicstory'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
db_data = []
activities_json = activities_resp
for key in activities_json['Response']['activities']['data']['availableActivities']:
item_hash = key['activityHash']
definition = 'DestinyActivityDefinition'
r_json = await self.destiny.decode_hash(item_hash, definition, language=lang)
if local_types['heroicstory'] in r_json['displayProperties']['name']:
info = {
'inline': True,
"name": r_json['selectionScreenDisplayProperties']['name'],
"value": r_json['selectionScreenDisplayProperties']['description']
}
db_data.append({
"name": r_json['selectionScreenDisplayProperties']['name'],
"description": r_json['selectionScreenDisplayProperties']['description']
})
self.data[lang]['heroicstory']['fields'].append(info)
await self.write_to_db(lang, 'heroic_story_missions', db_data, name=self.translations[lang]['site']['heroicstory'],
size='tall', order=3)
async def get_forge(self, langs, forceget=False):
activities_resp = await self.get_activities_response('forge', force=forceget)
if not activities_resp:
for lang in langs:
local_types = self.translations[lang]
db_data = {
'name': self.data[lang]['forge']['fields'][0]['name'],
'description': self.data[lang]['forge']['fields'][0]['value']
}
await self.write_to_db(lang, 'forge', [db_data], name=self.translations[lang]['site']['forge'],
template='table_items.html')
return False
resp_time = activities_resp['timestamp']
for lang in langs:
local_types = self.translations[lang]
self.data[lang]['forge'] = {
'thumbnail': {
'url': ''
},
'fields': [],
'color': 3678761,
'type': 'rich',
'title': self.translations[lang]['msg']['forge'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
db_data = []
activities_json = activities_resp
for key in activities_json['Response']['activities']['data']['availableActivities']:
item_hash = key['activityHash']
definition = 'DestinyActivityDefinition'
r_json = await self.destiny.decode_hash(item_hash, definition, language=lang)
if local_types['forge'] in r_json['displayProperties']['name']:
forge_def = 'DestinyDestinationDefinition'
place = await self.destiny.decode_hash(r_json['destinationHash'], forge_def, language=lang)
self.data[lang]['forge']['thumbnail']['url'] = self.icon_prefix + r_json['displayProperties'][
'icon']
info = {
"inline": True,
"name": r_json['displayProperties']['name'],
"value": place['displayProperties']['name']
}
db_data.append({
"name": r_json['displayProperties']['name'],
"description": place['displayProperties']['name'],
"icon": r_json['displayProperties']['icon']
})
self.data[lang]['forge']['fields'].append(info)
await self.write_to_db(lang, 'forge', db_data, name=self.translations[lang]['site']['forge'],
template='table_items.html', order=4)
async def get_strike_modifiers(self, langs, forceget=False):
activities_resp = await self.get_activities_response('vanguardstrikes', string='strike modifiers',
force=forceget)
if not activities_resp:
for lang in langs:
db_data = {
'name': self.data[lang]['vanguardstrikes']['fields'][0]['name'],
'description': self.data[lang]['vanguardstrikes']['fields'][0]['value']
}
await self.write_to_db(lang, 'strike_modifiers', [db_data],
name=self.translations[lang]['msg']['strikesmods'])
return False
resp_time = activities_resp['timestamp']
for lang in langs:
local_types = self.translations[lang]
self.data[lang]['vanguardstrikes'] = {
'thumbnail': {
'url': ''
},
'fields': [],
'color': 7506394,
'type': 'rich',
'title': self.translations[lang]['msg']['strikesmods'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
db_data = []
activities_json = activities_resp
strikes = await self.destiny.decode_hash(743628305, 'DestinyActivityDefinition', language=lang)
for key in activities_json['Response']['activities']['data']['availableActivities']:
item_hash = key['activityHash']
definition = 'DestinyActivityDefinition'
r_json = await self.destiny.decode_hash(item_hash, definition, language=lang)
if item_hash == 743628305:
mods = await self.decode_modifiers(key, lang)
self.data[lang]['vanguardstrikes']['fields'] = mods[0]
db_data = mods[1]
if self.translations[lang]['strikes'] in r_json['displayProperties']['name']:
self.data[lang]['vanguardstrikes']['thumbnail']['url'] = self.icon_prefix + \
r_json['displayProperties']['icon']
await self.write_to_db(lang, 'strike_modifiers', db_data, size='wide',
name=self.translations[lang]['msg']['strikesmods'], order=1)
async def get_reckoning_boss(self, lang):
first_reset_time = 1539709200
seconds_since_first = time.time() - first_reset_time
weeks_since_first = seconds_since_first // 604800
reckoning_bosses = ['swords', 'oryx']
self.data[lang]['reckoningboss'] = {
"thumbnail": {
"url": "https://www.bungie.net/common/destiny2_content/icons/DestinyActivityModeDefinition_"
"e74b3385c5269da226372df8ae7f500d.png"
},
'fields': [
{
'inline': True,
"name": self.translations[lang][reckoning_bosses[int(weeks_since_first % 2)]],
"value": self.translations[lang]['r_desc']
}
],
"color": 1332799,
"type": "rich",
"title": self.translations[lang]['msg']['reckoningboss'],
}
def add_reckoning_boss(self, lang):
first_reset_time = 1539709200
seconds_since_first = time.time() - first_reset_time
weeks_since_first = seconds_since_first // 604800
reckoning_bosses = ['swords', 'oryx']
data = [{
'inline': False,
'name': self.translations[lang]['msg']['reckoningboss'],
'value': self.translations[lang][reckoning_bosses[int(weeks_since_first % 2)]],
}]
db_data = [{
'name': self.translations[lang]['site']['reckoningboss'],
'description': self.translations[lang][reckoning_bosses[int(weeks_since_first % 2)]],
'icon': "/common/destiny2_content/icons/DestinyActivityModeDefinition_e74b3385c5269da226372df8ae7f500d.png"
}]
return [data, db_data]
async def get_reckoning_modifiers(self, langs, forceget=False):
activities_resp = await self.get_activities_response('reckoning', string='reckoning modifiers', force=forceget)
if not activities_resp:
for lang in langs:
db_data = {
'name': self.data[lang]['reckoning']['fields'][0]['name'],
'description': self.data[lang]['reckoning']['fields'][0]['value']
}
await self.write_to_db(lang, 'reckoning', [db_data],
name=self.translations[lang]['msg']['reckoningmods'])
return False
resp_time = activities_resp['timestamp']
for lang in langs:
local_types = self.translations[lang]
self.data[lang]['reckoning'] = {
'thumbnail': {
'url': "https://www.bungie.net/common/destiny2_content/icons/DestinyActivityModeDefinition_"
"e74b3385c5269da226372df8ae7f500d.png"
},
'fields': [],
'color': 1332799,
'type': 'rich',
'title': self.translations[lang]['msg']['reckoningmods'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
self.data[lang]['reckoning']['fields'] = self.add_reckoning_boss(lang)[0]
db_data = self.add_reckoning_boss(lang)[1]
activities_json = activities_resp
for key in activities_json['Response']['activities']['data']['availableActivities']:
item_hash = key['activityHash']
definition = 'DestinyActivityDefinition'
r_json = await self.destiny.decode_hash(item_hash, definition, language=lang)
if self.translations[lang]['reckoning'] in r_json['displayProperties']['name']:
mods = await self.decode_modifiers(key, lang)
db_data = [*db_data, *mods[1]]
self.data[lang]['reckoning']['fields'] = [*self.data[lang]['reckoning']['fields'], *mods[0]]
await self.write_to_db(lang, 'reckoning', db_data, 'wide', self.translations[lang]['msg']['reckoningmods'],
order=2)
async def get_nightfall820(self, langs, forceget=False):
activities_resp = await self.get_activities_response('nightfalls820', string='820 nightfalls', force=forceget)
if not activities_resp:
for lang in langs:
db_data = {
'name': self.data[lang]['nightfalls820']['fields'][0]['name'],
'description': self.data[lang]['nightfalls820']['fields'][0]['value']
}
await self.write_to_db(lang, '820_nightfalls', [db_data],
name=self.translations[lang]['site']['nightfalls820'], type='weekly')
return False
resp_time = activities_resp['timestamp']
for lang in langs:
local_types = self.translations[lang]
self.data[lang]['nightfalls820'] = {
'thumbnail': {
'url': ''
},
'fields': [],
'color': 7506394,
'type': 'rich',
'title': self.translations[lang]['msg']['nightfalls820'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
db_data = []
activities_json = activities_resp
for key in activities_json['Response']['activities']['data']['availableActivities']:
item_hash = key['activityHash']
definition = 'DestinyActivityDefinition'
r_json = await self.destiny.decode_hash(item_hash, definition, language=lang)
try:
recommended_light = key['recommendedLight']
if recommended_light == 820:
self.data[lang]['nightfalls820']['thumbnail']['url'] = self.icon_prefix + \
r_json['displayProperties']['icon']
if r_json['matchmaking']['requiresGuardianOath']:
info = {
'inline': True,
'name': self.translations[lang]['msg']['guidedgamenightfall'],
'value': r_json['selectionScreenDisplayProperties']['name']
}
db_data.append({
'name': self.translations[lang]['msg']['guidedgamenightfall'],
'description': r_json['selectionScreenDisplayProperties']['name']
})
else:
info = {
'inline': True,
'name': r_json['selectionScreenDisplayProperties']['name'],
'value': r_json['selectionScreenDisplayProperties']['description']
}
db_data.append({
'name': r_json['selectionScreenDisplayProperties']['name'],
'description': r_json['selectionScreenDisplayProperties']['description']
})
self.data[lang]['nightfalls820']['fields'].append(info)
except KeyError:
pass
await self.write_to_db(lang, '820_nightfalls', db_data,
name=self.translations[lang]['site']['nightfalls820'], order=0, type='weekly')
async def get_modifiers(self, lang, act_hash):
url = 'https://www.bungie.net/{}/Explore/Detail/DestinyActivityDefinition/{}'.format(lang, act_hash)
r = await self.session.get(url)
r = await r.text()
soup = BeautifulSoup(r, features="html.parser")
modifier_list = soup.find_all('div', {'data-identifier': 'modifier-information'})
modifiers = []
for item in modifier_list:
modifier = item.find('div', {'class': 'text-content'})
modifier_title = modifier.find('div', {'class': 'title'})
modifier_subtitle = modifier.find('div', {'class': 'subtitle'})
mod = {
"name": modifier_title.text,
"description": modifier_subtitle.text
}
modifiers.append(mod)
if r:
return modifiers
else:
return False
async def get_raids(self, langs, forceget=False):
activities_resp = await self.get_activities_response('raids', force=forceget)
if not activities_resp:
for lang in langs:
db_data = {
'name': self.data[lang]['raids']['fields'][0]['name'],
'description': self.data[lang]['raids']['fields'][0]['value']
}
await self.write_to_db(lang, 'raid_challenges', [db_data], self.translations[lang]['msg']['raids'],
type='weekly')
return False
resp_time = activities_resp['timestamp']
hawthorne_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/Vendors/3347378076/'. \
format(self.char_info['platform'], self.char_info['membershipid'], self.char_info['charid'][0])
hawthorne_resp = await self.get_cached_json('hawthorne', 'hawthorne', hawthorne_url, self.vendor_params,
force=forceget)
for lang in langs:
local_types = self.translations[lang]
self.data[lang]['raids'] = {
'thumbnail': {
'url': 'https://www.bungie.net/common/destiny2_content/icons/8b1bfd1c1ce1cab51d23c78235a6e067.png'
},
'fields': [],
'color': 0xF1C40F,
'type': 'rich',
'title': self.translations[lang]['msg']['raids'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
first_reset_time = 1580230800
seconds_since_first = time.time() - first_reset_time
weeks_since_first = seconds_since_first // 604800
eow_loadout = int(weeks_since_first % 6)
last_wish_challenges = [1250327262, 3871581136, 1568895666, 4007940282, 2836954349]
sotp_challenges = [1348944144, 3415614992, 1381881897]
cos_challenges = [2459033425, 2459033426, 2459033427]
lw_ch = 0
sotp_ch = 0
cos_ch = 0
hawthorne_json = hawthorne_resp
if hawthorne_resp:
resp_time = hawthorne_json['timestamp']
for cat in hawthorne_json['Response']['sales']['data']:
if hawthorne_json['Response']['sales']['data'][cat]['itemHash'] in last_wish_challenges:
lw_ch = hawthorne_json['Response']['sales']['data'][cat]['itemHash']
elif hawthorne_json['Response']['sales']['data'][cat]['itemHash'] in sotp_challenges:
sotp_ch = hawthorne_json['Response']['sales']['data'][cat]['itemHash']
elif hawthorne_json['Response']['sales']['data'][cat]['itemHash'] in cos_challenges:
cos_ch = hawthorne_json['Response']['sales']['data'][cat]['itemHash']
db_data = []
activities_json = activities_resp
for key in activities_json['Response']['activities']['data']['availableActivities']:
item_hash = key['activityHash']
definition = 'DestinyActivityDefinition'
r_json = await self.destiny.decode_hash(item_hash, definition, language=lang)
i = 1
# if str(r_json['hash']) in self.translations[lang]['levi_order'] and \
# not r_json['matchmaking']['requiresGuardianOath']:
# challenges = await self.get_modifiers(lang, item_hash)
# if challenges:
# challenge = set(challenges[0]['name'].lower().replace('"', '').split(' '))
# challenge.discard('the')
# order_strings = self.translations[lang]['levi_order'][str(r_json['hash'])].splitlines()
# levi_str = ''
# for string in order_strings:
# intersection = challenge.intersection(set(string.lower().split(' ')))
# if intersection:
# levi_str = '{}<b>{}</b>\n'.format(levi_str, string)
# else:
# levi_str = '{}{}\n'.format(levi_str, string)
# levi_str = levi_str[:-1]
# else:
# levi_str = self.translations[lang]['levi_order'][str(r_json['hash'])]
# info = {
# 'inline': True,
# 'name': r_json['originalDisplayProperties']['name'],
# 'value': levi_str.replace('<b>', '**').replace('</b>', '**')
# }
# db_data.append({
# 'name': info['name'],
# 'description': levi_str.replace('\n', '<br>')
# })
# self.data[lang]['raids']['fields'].append(info)
# if self.translations[lang]["EoW"] in r_json['displayProperties']['name'] and \
# not r_json['matchmaking']['requiresGuardianOath']:
# info = {
# 'inline': False,
# 'name': self.translations[lang]['lairs'],
# 'value': u"\u2063"
# }
# mods = await self.get_modifiers(lang, r_json['hash'])
# resp_time = datetime.utcnow().isoformat()
# if mods:
# loadout = '{}\n{}\n{}'.format(self.translations[lang]['armsmaster'][eow_loadout*3],
# self.translations[lang]['armsmaster'][eow_loadout*3+1],
# self.translations[lang]['armsmaster'][eow_loadout*3+2])
# info['value'] = '{}: {}\n\n{}:\n{}'.format(mods[0]['name'], mods[0]['description'],
# mods[1]['name'], loadout)
# else:
# info['value'] = self.data[lang]['api_is_down']['fields'][0]['name']
# db_data.append({
# 'name': info['name'],
# 'description': info['value'].replace('\n\n', '<br>').replace('\n', '<br>')
# })
# self.data[lang]['raids']['fields'].append(info)
if self.translations[lang]['LW'] in r_json['displayProperties']['name'] and \
not r_json['matchmaking']['requiresGuardianOath'] and lw_ch != 0 and hawthorne_resp:
info = {
'inline': True,
'name': r_json['originalDisplayProperties']['name'],
'value': u"\u2063"
}
curr_challenge = lw_ch
curr_challenge = await self.destiny.decode_hash(curr_challenge, 'DestinyInventoryItemDefinition',
language=lang)
info['value'] = curr_challenge['displayProperties']['name']
db_data.append({
'name': info['name'],
'description': info['value'].replace('\n', '<br>')
})
self.data[lang]['raids']['fields'].append(info)
# if self.translations[lang]['SotP'] in r_json['displayProperties']['name'] and \
# not r_json['matchmaking']['requiresGuardianOath'] and sotp_ch != 0 and hawthorne_resp:
# info = {
# 'inline': True,
# 'name': r_json['originalDisplayProperties']['name'],
# 'value': u"\u2063"
# }
# curr_challenge = sotp_ch
# curr_challenge = await self.destiny.decode_hash(curr_challenge, 'DestinyInventoryItemDefinition',
# language=lang)
# info['value'] = curr_challenge['displayProperties']['name']
# db_data.append({
# 'name': info['name'],
# 'description': info['value'].replace('\n', '<br>')
# })
# self.data[lang]['raids']['fields'].append(info)
# if self.translations[lang]['CoS'] in r_json['displayProperties']['name'] and \
# not r_json['matchmaking']['requiresGuardianOath'] and cos_ch != 0 and hawthorne_resp:
# info = {
# 'inline': True,
# 'name': r_json['originalDisplayProperties']['name'],
# 'value': u"\u2063"
# }
# curr_challenge = cos_ch
# curr_challenge = await self.destiny.decode_hash(curr_challenge, 'DestinyInventoryItemDefinition',
# language=lang)
# info['value'] = curr_challenge['displayProperties']['name']
# db_data.append({
# 'name': info['name'],
# 'description': info['value'].replace('\n', '<br>')
# })
# self.data[lang]['raids']['fields'].append(info)
if self.translations[lang]['GoS'] in r_json['displayProperties']['name'] and \
not r_json['matchmaking']['requiresGuardianOath'] and 'modifierHashes' in key.keys():
info = {
'inline': True,
'name': r_json['originalDisplayProperties']['name'],
'value': u"\u2063"
}
# mods = await self.get_modifiers(lang, r_json['hash'])
mods = await self.destiny.decode_hash(key['modifierHashes'][0], 'DestinyActivityModifierDefinition', lang)
resp_time = datetime.utcnow().isoformat()
if mods:
info['value'] = mods['displayProperties']['name']
else:
info['value'] = self.data[lang]['api_is_down']['fields'][0]['name']
db_data.append({
'name': info['name'],
'description': info['value'].replace('\n', '<br>')
})
self.data[lang]['raids']['fields'].append(info)
if r_json['hash'] in [910380154, 3881495763] and 'modifierHashes' in key.keys():
info = {
'inline': True,
'name': r_json['originalDisplayProperties']['name'],
'value': u"\u2063"
}
mods = await self.destiny.decode_hash(key['modifierHashes'][0], 'DestinyActivityModifierDefinition', lang)
resp_time = datetime.utcnow().isoformat()
if mods:
info['value'] = mods['displayProperties']['name']
else:
info['value'] = self.data[lang]['api_is_down']['fields'][0]['name']
db_data.append({
'name': info['name'],
'description': info['value'].replace('\n', '<br>')
})
self.data[lang]['raids']['fields'].append(info)
self.data[lang]['raids']['timestamp'] = resp_time
await self.write_to_db(lang, 'raid_challenges', db_data, '',
self.translations[lang]['msg']['raids'], order=1, type='weekly')
async def get_ordeal(self, langs, forceget=False):
activities_resp = await self.get_activities_response('ordeal', force=forceget)
if not activities_resp:
for lang in langs:
db_data = {
'name': self.data[lang]['ordeal']['fields'][0]['name'],
'description': self.data[lang]['ordeal']['fields'][0]['value']
}
await self.write_to_db(lang, 'ordeal', [db_data], name=self.translations[lang]['msg']['ordeal'],
type='weekly')
return False
resp_time = activities_resp['timestamp']
for lang in langs:
local_types = self.translations[lang]
self.data[lang]['ordeal'] = {
'thumbnail': {
'url': 'https://www.bungie.net/common/destiny2_content/icons/DestinyMilestoneDefinition'
'_a72e5ce5c66e21f34a420271a30d7ec3.png'
},
'fields': [],
'color': 5331575,
'type': 'rich',
'title': self.translations[lang]['msg']['ordeal'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
strikes = []
db_data = []
activities_json = activities_resp
for key in activities_json['Response']['activities']['data']['availableActivities']:
item_hash = key['activityHash']
definition = 'DestinyActivityDefinition'
r_json = await self.destiny.decode_hash(item_hash, definition, language=lang)
if r_json['activityTypeHash'] == 4110605575:
strikes.append({"name": r_json['displayProperties']['name'],
"description": r_json['displayProperties']['description']})
if r_json['activityTypeHash'] == 575572995 and \
local_types['adept'] in r_json['displayProperties']['name']:
info = {
'inline': True,
'name': r_json['originalDisplayProperties']['description'],
'value': u"\u2063"
}
self.data[lang]['ordeal']['fields'].append(info)
db_data.append({
'name': info['name'],
'description': info['value']
})
if len(self.data[lang]['ordeal']['fields']) > 0:
for strike in strikes:
if strike['name'] in self.data[lang]['ordeal']['fields'][0]['name']:
self.data[lang]['ordeal']['fields'][0]['value'] = strike['description']
db_data[0]['description'] = strike['description']
break
await self.write_to_db(lang, 'ordeal', db_data, name=self.translations[lang]['msg']['ordeal'], order=3,
type='weekly')
async def get_nightmares(self, langs, forceget=False):
activities_resp = await self.get_activities_response('nightmares', force=forceget)
if not activities_resp:
for lang in langs:
db_data = {
'name': self.data[lang]['nightmares']['fields'][0]['name'],
'description': self.data[lang]['nightmares']['fields'][0]['value']
}
await self.write_to_db(lang, 'nightmare_hunts', [db_data],
name=self.translations[lang]['site']['nightmares'], type='weekly')
return False
resp_time = activities_resp['timestamp']
for lang in langs:
local_types = self.translations[lang]
self.data[lang]['nightmares'] = {
'thumbnail': {
'url': 'https://www.bungie.net/common/destiny2_content/icons/DestinyActivityModeDefinition_'
'48ad57129cd0c46a355ef8bcaa1acd04.png'
},
'fields': [],
'color': 6037023,
'type': 'rich',
'title': self.translations[lang]['msg']['nightmares'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
db_data = []
activities_json = activities_resp
for key in activities_json['Response']['activities']['data']['availableActivities']:
item_hash = key['activityHash']
definition = 'DestinyActivityDefinition'
r_json = await self.destiny.decode_hash(item_hash, definition, language=lang)
if local_types['nightmare'] in r_json['displayProperties']['name'] and \
local_types['adept'] in r_json['displayProperties']['name']:
info = {
'inline': True,
'name': r_json['displayProperties']['name'].replace(local_types['adept'], ""),
'value': r_json['displayProperties']['description']
}
db_data.append({
'name': info['name'].replace(local_types['nightmare'], '').replace('\"', ''),
'description': info['value']
})
self.data[lang]['nightmares']['fields'].append(info)
await self.write_to_db(lang, 'nightmare_hunts', db_data, name=self.translations[lang]['site']['nightmares'],
order=2, type='weekly')
async def get_empire_hunt(self, langs, forceget=False):
activities_resp = await self.get_activities_response('empire_hunts', force=forceget)
if not activities_resp:
for lang in langs:
db_data = {
'name': self.data[lang]['empire_hunts']['fields'][0]['name'],
'description': self.data[lang]['empire_hunts']['fields'][0]['value']
}
await self.write_to_db(lang, 'empire_hunts', [db_data],
name=self.translations[lang]['site']['empire_hunts'], type='weekly')
return False
resp_time = activities_resp['timestamp']
for lang in langs:
local_types = self.translations[lang]
self.data[lang]['empire_hunts'] = {
'thumbnail': {
'url': 'https://www.bungie.net/common/destiny2_content/icons/64ea61b26a2cba84954b4b73960bef7e.jpg'
},
'fields': [],
'color': 0x0a2b4c,
'type': 'rich',
'title': self.translations[lang]['msg']['empire_hunts'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
db_data = []
for key in activities_resp['Response']['activities']['data']['availableActivities']:
item_hash = key['activityHash']
definition = 'DestinyActivityDefinition'
r_json = await self.destiny.decode_hash(item_hash, definition, language=lang)
if r_json['activityTypeHash'] == 494260690 and \
local_types['adept'] in r_json['displayProperties']['name']:
info = {
'inline': True,
'name': r_json['displayProperties']['name'].replace(local_types['adept'], "").
replace(local_types['empire_hunt'], ""),
'value': r_json['displayProperties']['description']
}
db_data.append({
'name': info['name'].replace(local_types['empire_hunt'], '').replace('\"', ''),
'description': info['value']
})
self.data[lang]['empire_hunts']['fields'].append(info)
await self.write_to_db(lang, 'empire_hunts', db_data, name=self.translations[lang]['site']['empire_hunts'],
order=5, type='weekly')
async def get_crucible_rotators(self, langs, forceget=False):
activities_resp = await self.get_activities_response('cruciblerotators', string='crucible rotators',
force=forceget)
if not activities_resp:
for lang in langs:
local_types = self.translations[lang]
db_data = {
'name': self.data[lang]['cruciblerotators']['fields'][0]['name'],
'description': self.data[lang]['cruciblerotators']['fields'][0]['value']
}
await self.write_to_db(lang, 'crucible_rotators', [db_data],
name=self.translations[lang]['msg']['cruciblerotators'],
template='table_items.html', type='weekly')
return False
resp_time = activities_resp['timestamp']
for lang in langs:
local_types = self.translations[lang]
self.data[lang]['cruciblerotators'] = {
'thumbnail': {
'url': self.icon_prefix + '/common/destiny2_content/icons/cc8e6eea2300a1e27832d52e9453a227.png'
},
'fields': [],
'color': 6629649,
'type': 'rich',
'title': self.translations[lang]['msg']['cruciblerotators'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
db_data = []
activities_json = activities_resp
for key in activities_json['Response']['activities']['data']['availableActivities']:
item_hash = key['activityHash']
definition = 'DestinyActivityDefinition'
r_json = await self.destiny.decode_hash(item_hash, definition, language=lang)
if r_json['destinationHash'] == 4088006058:
if len(r_json['challenges']) > 0:
obj_def = 'DestinyObjectiveDefinition'
objective = await self.destiny.decode_hash(r_json['challenges'][0]['objectiveHash'], obj_def,
lang)
if item_hash in [540869524, 3847433434, 142028034, 1683791010, 3787302650, 935998519]:
if not self.data[lang]['cruciblerotators']['thumbnail']['url']:
if 'icon' in r_json['displayProperties']:
self.data[lang]['cruciblerotators']['thumbnail']['url'] = self.icon_prefix + \
r_json[
'displayProperties'][
'icon']
else:
self.data[lang]['cruciblerotators']['thumbnail']['url'] = self.icon_prefix + \
'/common/destiny2_content/icons/' \
'cc8e6eea2300a1e27832d52e9453a227.png'
if 'icon' in r_json['displayProperties']:
icon = r_json['displayProperties']['icon']
else:
icon = '/common/destiny2_content/icons/cc8e6eea2300a1e27832d52e9453a227.png'
info = {
'inline': True,
"name": r_json['displayProperties']['name'],
"value": r_json['displayProperties']['description']
}
db_data.append({
'name': info['name'],
'description': info['value'].replace('\n\n', '<br>'),
'icon': icon
})
self.data[lang]['cruciblerotators']['fields'].append(info)
if len(db_data) >= 3:
style = 'wide tall'
else:
style = 'wide'
await self.write_to_db(lang, 'crucible_rotators', db_data,
name=self.translations[lang]['msg']['cruciblerotators'], size=style, order=4,
type='weekly')
async def get_the_lie_progress(self, langs, forceget=True):
url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/'.format(self.char_info['platform'],
self.char_info[
'membershipid'],
self.char_info['charid'][0])
progression_json = await self.get_cached_json('objectives_{}'.format(self.char_info['charid'][0]),
'progressions', url, {'components': 301}, force=forceget)
resp_time = progression_json['timestamp']
progress = []
if '1797229574' in progression_json['Response']['uninstancedItemComponents']['objectives']['data']:
for lang in langs:
quest_def = await self.destiny.decode_hash(1797229574, 'DestinyInventoryItemDefinition', language=lang)
self.data[lang]['thelie'] = {
'thumbnail': {
'url': self.icon_prefix + quest_def['displayProperties']['icon']
},
'fields': [],
'color': 0x226197,
'type': 'rich',
'title': quest_def['displayProperties']['name'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': resp_time
}
newrow = [resp_time, 0, 0, 0]
names = ['', '', '']
for place in \
progression_json['Response']['uninstancedItemComponents']['objectives']['data']['1797229574'][
'objectives']:
objective_def = await self.destiny.decode_hash(place['objectiveHash'], 'DestinyObjectiveDefinition',
language=lang)
if place['complete']:
self.data[lang]['thelie']['fields'].append({
'inline': True,
'name': objective_def['progressDescription'],
'value': self.translations[lang]['msg']['complete']
})
if place['objectiveHash'] == 1851115127:
newrow[1] = 100
names[0] = objective_def['progressDescription']
elif place['objectiveHash'] == 1851115126:
newrow[2] = 100
names[1] = objective_def['progressDescription']
elif place['objectiveHash'] == 1851115125:
newrow[3] = 100
names[2] = objective_def['progressDescription']
else:
self.data[lang]['thelie']['fields'].append({
'inline': True,
'name': objective_def['progressDescription'],
'value': '{} ({:.2f}%)'.format(place['progress'],
place['progress'] / place['completionValue'] * 100)
})
if place['objectiveHash'] == 1851115127:
newrow[1] = place['progress'] / place['completionValue'] * 100
names[0] = objective_def['progressDescription']
elif place['objectiveHash'] == 1851115126:
newrow[2] = place['progress'] / place['completionValue'] * 100
names[1] = objective_def['progressDescription']
elif place['objectiveHash'] == 1851115125:
newrow[3] = place['progress'] / place['completionValue'] * 100
names[2] = objective_def['progressDescription']
date = []
edz = []
moon = []
io = []
with open('thelie.csv', 'r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
if len(row) < 4:
continue
diff = datetime.fromisoformat(row[0]) - datetime.fromisoformat('2020-05-12T17:00:00')
date.append(diff.total_seconds() / 86400)
edz.append(float(row[1]))
moon.append(float(row[2]))
io.append(float(row[3]))
csvfile.close()
diff = datetime.fromisoformat(newrow[0]) - datetime.fromisoformat('2020-05-12T17:00:00')
date.append(diff.total_seconds() / 86400)
edz.append(float(newrow[1]))
moon.append(float(newrow[2]))
io.append(float(newrow[3]))
with open('thelie.csv', 'a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(newrow)
csvfile.close()
fig = plt.figure()
ax = plt.axes()
for spine in ax.spines.values():
spine.set_visible(False)
plt.plot(date, edz, label=names[0])
plt.plot(date, moon, label=names[1])
plt.plot(date, io, label=names[2])
ax.set_xlabel(self.translations[lang]['graph']['datefromstart'], color='#226197')
ax.set_ylabel(self.translations[lang]['graph']['percentage'], color='#226197')
ax.tick_params(colors='#bdbdff', direction='out')
for tick in ax.get_xticklabels():
tick.set_color('#226197')
for tick in ax.get_yticklabels():
tick.set_color('#226197')
plt.grid(color='#bdbdff', linestyle='solid', axis='y')
plt.legend()
plt.savefig('thelie-{}.png'.format(lang), format='png', transparent=True)
plt.close(fig)
self.data[lang]['thelie']['image'] = {
'url': 'attachment://thelie-{}.png'.format(lang)
}
async def decode_modifiers(self, key, lang):
data = []
db_data = []
for mod_key in key['modifierHashes']:
mod_def = 'DestinyActivityModifierDefinition'
mod_json = await self.destiny.decode_hash(mod_key, mod_def, lang)
mod = {
'inline': True,
"name": mod_json['displayProperties']['name'],
"value": mod_json['displayProperties']['description']
}
data.append(mod)
db_data.append({
"name": mod_json['displayProperties']['name'],
"description": mod_json['displayProperties']['description'],
"icon": mod_json['displayProperties']['icon']
})
return [data, db_data]
async def get_activities_response(self, name, lang=None, string=None, force=False):
char_info = self.char_info
activities = []
hashes = set()
for char in char_info['charid']:
activities_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/'. \
format(char_info['platform'], char_info['membershipid'], char)
activities_resp = await self.get_cached_json('activities_{}'.format(char), name, activities_url,
self.activities_params, lang, string, force=force)
if activities_resp:
activities.append(activities_resp)
activities_json = await self.get_cached_json('activities_{}'.format(char_info['charid'][-1]), name,
activities_url, self.activities_params, lang, string, force=force)
if activities_json:
activities_json['Response']['activities']['data']['availableActivities'].clear()
if len(activities) == 0:
return False
else:
if len(activities) > 0:
for char_activities in activities:
for activity in char_activities['Response']['activities']['data']['availableActivities']:
if activity['activityHash'] not in hashes and activities_json:
activities_json['Response']['activities']['data']['availableActivities'].append(activity)
hashes.add(activity['activityHash'])
return activities_json
async def get_player_metric(self, membership_type, membership_id, metric, is_global=False):
url = 'https://www.bungie.net/Platform/Destiny2/{}/Profile/{}/'.format(membership_type, membership_id)
metric_resp = await self.get_cached_json('playermetrics_{}'.format(membership_id),
'metric {} for {}'.format(metric, membership_id), url,
params=self.metric_params, change_msg=False, cache_only=is_global)
if metric_resp:
metric_json = metric_resp
try:
return metric_json['Response']['metrics']['data']['metrics'][str(metric)]['objectiveProgress'][
'progress']
except KeyError:
return -1
else:
return -1
async def get_member_metric_wrapper(self, member, metric, is_global=False, tag=''):
member_id = member['destinyUserInfo']['membershipId']
member_type = member['destinyUserInfo']['membershipType']
if member['destinyUserInfo']['bungieGlobalDisplayName'] != '' and False:
name = '{}#{}'.format(member['destinyUserInfo']['bungieGlobalDisplayName'], member['destinyUserInfo']['bungieGlobalDisplayNameCode'])
else:
name = member['destinyUserInfo']['LastSeenDisplayName']
if is_global:
player = '{} [{}]'.format(name, tag)
else:
player = name
return [player, await self.get_player_metric(member_type, member_id, metric, is_global)]
async def get_osiris_predictions(self, langs, forceget=False, force_info = None):
win3_rotation = ['?', '?', 'gloves', '?', '?', 'chest', '?', '?', 'boots', '?', '?', 'helmet', '?', '?', 'class']
# win3_rotation = ['?', '?', '?']
win5_rotation = ['?', 'gloves', '?', '?', 'chest', '?', '?', 'boots', '?', '?', 'helmet', '?', '?', 'class', '?']
# win5_rotation = ['?', '?', '?']
win7_rotation = ['gloves', '?', 'chest', '?', 'boots', '?', 'helmet', '?', 'class', '?']
# win7_rotation = ['?', '?', '?']
# flawless_rotation = ['gloves', 'chest', 'class', 'helmet', 'boots']
flawless_rotation = ['?', '?', '?']
mod_rotation = ['?', '?', '?']
def find_adept(saint_resp):
flawless = '?'
for item in saint_resp['Response']['sales']['data']:
for cost in saint_resp['Response']['sales']['data'][item]['costs']:
if cost['quantity'] == 50000:
flawless = saint_resp['Response']['sales']['data'][item]['itemHash']
return flawless
week_n = datetime.now(tz=timezone.utc) - await self.get_season_start()
week_n = int(week_n.days / 7)
saint_url = 'https://www.bungie.net/platform/Destiny2/{}/Profile/{}/Character/{}/Vendors/765357505/'.\
format(self.char_info['platform'], self.char_info['membershipid'], self.char_info['charid'][0])
saint_resp = await self.get_cached_json('saint', 'saint', saint_url, self.vendor_params, force=forceget)
if force_info is not None:
if force_info[1] != '?':
flawless = force_info[1]
else:
flawless = find_adept(saint_resp)
else:
flawless = find_adept(saint_resp)
for lang in langs:
db_data = []
self.data[lang]['osiris'] = {
'thumbnail': {
'url': self.icon_prefix + '/common/destiny2_content/icons/DestinyActivityModeDefinition_'
'e35792b49b249ca5dcdb1e7657ca42b6.png'
},
'fields': [],
'color': 0xb69460,
'type': "rich",
'title': self.translations[lang]['msg']['osiris'],
'footer': {'text': self.translations[lang]['msg']['resp_time']},
'timestamp': datetime.utcnow().isoformat()
}
locale = self.translations[lang]['osiris']
if flawless != '?':
flawless_def = await self.destiny.decode_hash(flawless, 'DestinyInventoryItemDefinition', language=lang)
else:
flawless_def = {
'displayProperties': {'name': '?'},
'itemTypeDisplayName': '?'
}
if force_info is None:
self.data[lang]['osiris']['fields'] = [
{
'name': locale['map'],
'value': locale['?']
},
{
'name': locale['flawless'],
'value': '{} ({})'.format(flawless_def['displayProperties']['name'], flawless_def['itemTypeDisplayName'])
}
]
else:
info = []
for parameter in force_info:
if isinstance(parameter, int):
try:
definition = await self.destiny.decode_hash(parameter, 'DestinyActivityDefinition', lang)
info.append(definition['displayProperties']['name'])
except pydest.PydestException:
try:
definition = await self.destiny.decode_hash(parameter, 'DestinySandboxPerkDefinition',
lang)
info.append(definition['displayProperties']['name'])
except pydest.PydestException:
definition = await self.destiny.decode_hash(parameter, 'DestinyInventoryItemDefinition',
lang)
info.append('{} ({})'.format(definition['displayProperties']['name'],
definition['itemTypeDisplayName']))
elif parameter in locale.keys():
info.append(locale[parameter])
else:
info.append(parameter)
self.data[lang]['osiris']['fields'] = [
{
'name': locale['map'],
'value': info[0]
},
{
'name': locale['flawless'],
'value': '{} ({})'.format(flawless_def['displayProperties']['name'], flawless_def['itemTypeDisplayName'])
}
]
for field in self.data[lang]['osiris']['fields']:
db_data.append({
'name': field['name'],
'description': field['value']
})
await self.write_to_db(lang, 'trials_of_osiris', db_data, order=6,
name=self.translations[lang]['site']['osiris'])
async def drop_weekend_info(self, langs):
while True:
try:
data_db = self.data_pool.get_connection()
data_db.auto_reconnect = True
break
except mariadb.PoolError:
try:
self.data_pool.add_connection()
except mariadb.PoolError:
pass
await asyncio.sleep(0.125)
data_cursor = data_db.cursor()
for lang in langs:
data_cursor.execute('''DELETE FROM `{}` WHERE id=?'''.format(lang), ('trials_of_osiris',))
data_cursor.execute('''DELETE FROM `{}` WHERE id=?'''.format(lang), ('xur',))
data_db.commit()
data_cursor.close()
data_db.close()
async def get_cached_json(self, cache_id, name, url, params=None, lang=None, string=None, change_msg=True,
force=False, cache_only=False, expires_in=1800):
# while True:
# try:
# cache_connection = self.cache_pool.get_connection()
# cache_connection.auto_reconnect = True
# break
# except mariadb.PoolError:
# try:
# self.cache_pool.add_connection()
# except mariadb.PoolError:
# pass
# await asyncio.sleep(0.125)
# cache_cursor = cache_connection.cursor()
cache_connection = self.cache_db
cache_cursor = await cache_connection.cursor()
try:
await cache_cursor.execute('''SELECT json, expires, timestamp from cache WHERE id=?''', (cache_id,))
cached_entry = await cache_cursor.fetchone()
if cached_entry is not None:
expired = datetime.now().timestamp() > cached_entry[1]
else:
expired = True
except aiosqlite.OperationalError:
# except mariadb.Error:
expired = True
if cache_only:
await cache_cursor.close()
# await cache_connection.close()
return False
if (expired or force) and not cache_only:
response = await self.get_bungie_json(name, url, params, lang, string, change_msg)
timestamp = datetime.utcnow().isoformat()
if response:
response_json = response
try:
await cache_cursor.execute(
'''CREATE TABLE cache (id text, expires integer, json text, timestamp text);''')
await cache_cursor.execute('''CREATE UNIQUE INDEX cache_id ON cache(id(256))''')
await cache_cursor.execute('''INSERT IGNORE INTO cache VALUES (?,?,?,?)''',
(cache_id, int(datetime.now().timestamp() + expires_in), json.dumps(response_json),
timestamp))
except aiosqlite.OperationalError:
# except mariadb.Error:
try:
await cache_cursor.execute('''ALTER TABLE cache ADD COLUMN timestamp text''')
await cache_cursor.execute('''INSERT IGNORE INTO cache VALUES (?,?,?,?)''',
(cache_id, int(datetime.now().timestamp() + expires_in),
json.dumps(response_json), timestamp))
# except mariadb.Error:
except aiosqlite.OperationalError:
pass
# try:
await cache_cursor.execute('''INSERT OR IGNORE INTO cache VALUES (?,?,?,?)''',
(cache_id, int(datetime.now().timestamp() + expires_in), json.dumps(response_json),
timestamp))
# except mariadb.Error:
# pass
# try:
await cache_cursor.execute('''UPDATE cache SET expires=?, json=?, timestamp=? WHERE id=?''',
(int(datetime.now().timestamp() + expires_in), json.dumps(response_json), timestamp,
cache_id))
# except mariadb.Error:
# pass
else:
await cache_cursor.close()
# await cache_connection.close()
return False
else:
if cached_entry is not None:
timestamp = cached_entry[2]
response_json = json.loads(cached_entry[0])
else:
await cache_cursor.close()
# await cache_connection.close()
return False
await cache_cursor.close()
await cache_connection.commit()
# await cache_connection.close()
response_json['timestamp'] = timestamp
return response_json
async def get_clan_leaderboard(self, clan_ids, metric, number, is_time=False, is_kda=False, is_global=False):
metric_list = []
for clan_id in clan_ids:
url = 'https://www.bungie.net/Platform/GroupV2/{}/Members/'.format(clan_id)
clan_members_resp = await self.get_cached_json('clanmembers_{}'.format(clan_id), 'clan members', url, change_msg=False,
cache_only=is_global)
url = 'https://www.bungie.net/Platform/GroupV2/{}/'.format(clan_id)
clan_resp = await self.get_cached_json('clan_{}'.format(clan_id), 'clan info', url)
clan_json = clan_resp
try:
code = clan_json['ErrorCode']
except KeyError:
code = 0
except TypeError:
code = 0
if code == 1:
tag = clan_json['Response']['detail']['clanInfo']['clanCallsign']
else:
tag = ''
if clan_members_resp and type(clan_json) == dict:
clan_json = clan_members_resp
try:
tasks = []
for member in clan_json['Response']['results']:
task = asyncio.ensure_future(self.get_member_metric_wrapper(member, metric, is_global, tag))
tasks.append(task)
results = await asyncio.gather(*tasks)
metric_list = [*metric_list, *results]
except KeyError:
pass
if len(metric_list) > 0:
try:
if is_time:
metric_list.sort(reverse=False, key=lambda x: x[1])
while metric_list[0][1] <= 0:
metric_list.pop(0)
else:
metric_list.sort(reverse=True, key=lambda x: x[1])
while metric_list[-1][1] <= 0:
metric_list.pop(-1)
except IndexError:
return []
for place in metric_list[1:]:
delta = 0
try:
index = metric_list.index(place)
except ValueError:
continue
if metric_list[index][1] == metric_list[index - 1][1]:
metric_list[index][0] = '{}\n{}'.format(metric_list[index - 1][0], metric_list[index][0])
metric_list.pop(index - 1)
indexed_list = metric_list.copy()
i = 1
for place in indexed_list:
old_i = i
index = indexed_list.index(place)
indexed_list[index] = [i, *indexed_list[index]]
i = i + len(indexed_list[index][1].splitlines())
while indexed_list[-1][0] > number:
indexed_list.pop(-1)
if is_time:
for place in indexed_list:
index = indexed_list.index(place)
indexed_list[index][2] = str(timedelta(minutes=(indexed_list[index][2] / 60000))).split('.')[0]
if is_kda:
for place in indexed_list:
index = indexed_list.index(place)
indexed_list[index][2] = indexed_list[index][2] / 100
return indexed_list[:old_i]
else:
return metric_list
async def get_last_activity(self, member, lang):
status_change = datetime.fromtimestamp(float(member['lastOnlineStatusChange']))
now = datetime.utcnow()
membership_id = member['destinyUserInfo']['membershipId']
membership_type = member['destinyUserInfo']['membershipType']
url = 'https://www.bungie.net/Platform/Destiny2/{}/Profile/{}/'.format(membership_type,
membership_id)
profile_resp = await self.get_bungie_json('playeractivity_{}'.format(membership_id),
url, params={'components': 204}, change_msg=False)
activity_string = ''
if profile_resp:
try:
test = profile_resp['Response']['characterActivities']['data']
except KeyError:
return [member['destinyUserInfo']['LastSeenDisplayName'], '']
for char in profile_resp['Response']['characterActivities']['data']:
char_resp = profile_resp['Response']['characterActivities']['data'][char]
if char_resp['currentActivityHash'] != 0:
activity = await self.destiny.decode_hash(char_resp['currentActivityHash'],
'DestinyActivityDefinition', language=lang)
try:
activity_mode = await self.destiny.decode_hash(char_resp['currentActivityModeHash'],
'DestinyActivityModeDefinition', language=lang)
except pydest.PydestException:
activity_mode = {'displayProperties': {'name': ''}}
activity_type = await self.destiny.decode_hash(activity['activityTypeHash'],
'DestinyActivityTypeDefinition', language=lang)
place = await self.destiny.decode_hash(activity['placeHash'], 'DestinyPlaceDefinition',
language=lang)
if activity['activityTypeHash'] in [332181804] and char_resp['currentActivityHash'] not in [
82913930]:
activity_string = activity['displayProperties']['name']
elif char_resp['currentActivityHash'] in [82913930]:
activity_string = place['displayProperties']['name']
elif activity['activityTypeHash'] in [4088006058, 2371050408]:
activity_string = '{}: {}: {}'.format(activity_type['displayProperties']['name'],
activity_mode['displayProperties']['name'],
activity['displayProperties']['name'])
elif activity['activityTypeHash'] in [4110605575, 1686739444, 248695599, 2043403989, 2112637710] \
and char_resp['currentActivityModeHash'] not in [2166136261]:
activity_string = '{}: {}'.format(activity_mode['displayProperties']['name'],
activity['displayProperties']['name'])
elif activity['activityTypeHash'] in [3497767639]:
activity_string = '{}: {}'.format(activity_mode['displayProperties']['name'],
place['displayProperties']['name'])
else:
activity_string = '{}'.format(activity['displayProperties']['name'])
break
length = now - datetime.fromisoformat(char_resp['dateActivityStarted'].replace('Z', ''))
activity_string = '{} ({})'.format(activity_string, str(timedelta(seconds=length.seconds)))
return [member['destinyUserInfo']['LastSeenDisplayName'], activity_string]
async def get_online_clan_members(self, clan_id, lang):
url = 'https://www.bungie.net/Platform/GroupV2/{}/Members/'.format(clan_id)
clan_members_resp = await self.get_cached_json('clanmembers_{}'.format(clan_id), 'clan members', url,
change_msg=False, force=True)
header = [[self.translations[lang]['online']['nick'], self.translations[lang]['online']['since']]]
if clan_members_resp:
tasks = []
for member in clan_members_resp['Response']['results']:
if member['isOnline']:
task = asyncio.ensure_future(self.get_last_activity(member, lang))
tasks.append(task)
online_members = await asyncio.gather(*tasks)
online_members = [*header, *online_members]
else:
online_members = [[self.translations[lang]['online']['error'], self.translations[lang]['online']['error_t']]]
return online_members
async def iterate_clans(self, max_id):
while True:
try:
cache_connection = self.cache_pool.get_connection()
cache_connection.auto_reconnect = True
break
except mariadb.PoolError:
try:
self.cache_pool.add_connection()
except mariadb.PoolError:
pass
await asyncio.sleep(0.125)
clan_cursor = cache_connection.cursor()
min_id = 1
try:
clan_cursor.execute('''CREATE TABLE clans (id INTEGER, json JSON)''')
# clan_db.commit()
except mariadb.Error:
# clan_cursor = clan_db.cursor()
clan_cursor.execute('''SELECT id FROM clans ORDER by id DESC''')
min_id_tuple = clan_cursor.fetchall()
if min_id_tuple is not None:
min_id = min_id_tuple[0][0] + 1
for clan_id in range(min_id, max_id+1):
url = 'https://www.bungie.net/Platform/GroupV2/{}/'.format(clan_id)
clan_resp = await self.get_cached_json('clan_{}'.format(clan_id), '{} clan info'.format(clan_id), url,
expires_in=86400)
clan_json = clan_resp
if not clan_json:
continue
# clan_cursor.close()
# cache_connection.close()
# return 'unable to fetch clan {}'.format(clan_id)
try:
code = clan_json['ErrorCode']
# print('{} ec {}'.format(clan_id, clan_json['ErrorCode']))
except KeyError:
code = 0
clan_cursor.close()
cache_connection.close()
return '```{}```'.format(json.dumps(clan_json))
if code in [621, 622, 686]:
continue
if code != 1:
clan_cursor.close()
cache_connection.close()
return code
# print('{} {}'.format(clan_id, clan_json['Response']['detail']['features']['capabilities'] & 16))
if clan_json['Response']['detail']['features']['capabilities'] & 16:
clan_cursor.execute('''INSERT INTO clans VALUES (?,?)''', (clan_id, json.dumps(clan_json)))
# clan_db.commit()
clan_cursor.close()
cache_connection.close()
return 'Finished'
async def iterate_clans_new(self, max_id):
# tracemalloc.start()
# snapshot1 = tracemalloc.take_snapshot()
# while True:
# try:
# cache_connection = self.cache_pool.get_connection()
# cache_connection.auto_reconnect = True
# break
# except mariadb.PoolError:
# try:
# self.cache_pool.add_connection()
# except mariadb.PoolError:
# pass
# await asyncio.sleep(0.125)
# clan_cursor = cache_connection.cursor()
cache_connection = self.cache_db
clan_cursor = await cache_connection.cursor()
min_id = 1
try:
await clan_cursor.execute('''CREATE TABLE clans (id INTEGER, json JSON)''')
# clan_db.commit()
# except mariadb.Error:
except aiosqlite.OperationalError:
# clan_cursor = clan_db.cursor()
await clan_cursor.execute('''SELECT id FROM clans ORDER by id DESC''')
min_id_tuple = await clan_cursor.fetchall()
if min_id_tuple is not None:
min_id = min_id_tuple[0][0] + 1
ranges = list(range(min_id, max_id, 1000))
if max(ranges) != max_id:
ranges.append(max_id)
for max_id_ranged in ranges[1:]:
min_id = ranges[ranges.index(max_id_ranged) - 1]
max_id = max_id_ranged
tasks = []
for clan_id in range(min_id, max_id+1):
task = asyncio.ensure_future(self.get_cached_json('clan_{}'.format(clan_id), '{} clan info'.format(clan_id),
'https://www.bungie.net/Platform/GroupV2/{}/'.
format(clan_id), expires_in=86400))
tasks.append(task)
responses = await asyncio.gather(*tasks)
a = ''
for clan_json in responses:
if not clan_json:
continue
# clan_cursor.close()
# cache_connection.close()
# return 'unable to fetch clan {}'.format(clan_id)
try:
code = clan_json['ErrorCode']
# print('{} ec {}'.format(clan_id, clan_json['ErrorCode']))
except KeyError:
code = 0
await clan_cursor.close()
# await cache_connection.close()
return '```{}```'.format(json.dumps(clan_json))
if code in [621, 622, 686]:
continue
if code != 1:
await clan_cursor.close()
# await cache_connection.close()
return code
# print('{} {}'.format(clan_id, clan_json['Response']['detail']['features']['capabilities'] & 16))
if clan_json['Response']['detail']['features']['capabilities'] & 16:
clan_id = clan_json['Response']['detail']['groupId']
await clan_cursor.execute('''INSERT INTO clans VALUES (?,?)''', (clan_id, json.dumps(clan_json)))
await cache_connection.commit()
# clan_db.commit()
await clan_cursor.close()
# await cache_connection.close()
# snapshot2 = tracemalloc.take_snapshot()
# top_stats = snapshot2.compare_to(snapshot1, 'lineno')
# print(top_stats)
return 'Finished'
async def fetch_players(self):
clan_db = mariadb.connect(host=self.api_data['db_host'], user=self.api_data['cache_login'],
password=self.api_data['pass'], port=self.api_data['db_port'],
database=self.api_data['cache_name'])
clan_cursor = clan_db.cursor()
try:
clan_cursor.execute('''CREATE TABLE clans (id INTEGER, json JSON)''')
# clan_db.commit()
except mariadb.Error:
clan_cursor.execute('''SELECT id FROM clans ORDER by id DESC''')
min_id_tuple = clan_cursor.fetchall()
if min_id_tuple is not None:
min_id = min_id_tuple[0][0] + 1
for clan_id_tuple in min_id_tuple:
clan_id = clan_id_tuple[0]
url = 'https://www.bungie.net/Platform/GroupV2/{}/'.format(clan_id)
clan_resp = await self.get_cached_json('clan_{}'.format(clan_id), '{} clan check'.format(clan_id), url,
expires_in=86400)
clan_json = clan_resp
try:
code = clan_json['ErrorCode']
except KeyError:
continue
if code in [622, 621]:
try:
clan_cursor.execute('''DELETE FROM clans WHERE id=?''', (clan_id,))
# clan_db.commit()
except mariadb.Error:
pass
clan_db.close()
return 'Finished'
async def token_update(self):
# check to see if token.json exists, if not we have to start with oauth
try:
f = open('token.json', 'r')
except FileNotFoundError:
if self.is_oauth:
self.oauth.get_oauth()
else:
print('token file not found! run the script with --oauth or add a valid token.js file!')
return False
try:
f = open('token.json', 'r')
self.token = json.loads(f.read())
except json.decoder.JSONDecodeError:
if self.is_oauth:
self.oauth.get_oauth()
else:
print('token file invalid! run the script with --oauth or add a valid token.js file!')
return False
# check if token has expired, if so we have to oauth, if not just refresh the token
if self.token['expires'] < time.time():
if self.is_oauth:
self.oauth.get_oauth()
else:
print('refresh token expired! run the script with --oauth or add a valid token.js file!')
return False
else:
await self.refresh_token(self.token['refresh'])
| 51.470787
| 210
| 0.495172
|
4a00c5766513e41ae3b3d5d456b8b258f6bb47c6
| 7,120
|
py
|
Python
|
Lib/copy_reg.py
|
Whosemario/stackless-python
|
437aca219bbb82fb7f01d05a22d061cdb6f28ea1
|
[
"PSF-2.0"
] | 1
|
2019-05-14T05:05:42.000Z
|
2019-05-14T05:05:42.000Z
|
Lib/copy_reg.py
|
Whosemario/stackless-python
|
437aca219bbb82fb7f01d05a22d061cdb6f28ea1
|
[
"PSF-2.0"
] | 4
|
2020-03-17T03:29:59.000Z
|
2021-06-10T21:01:47.000Z
|
venv/Lib/copy_reg.py
|
wenyueFan/devopsOfBk
|
ab5f53f2296101ecb40f8f1b3eead7aa736d12fa
|
[
"Apache-2.0"
] | 2
|
2018-07-15T06:35:21.000Z
|
2019-05-14T05:05:31.000Z
|
"""Helper to provide extensibility for pickle/cPickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
from types import ClassType as _ClassType
__all__ = ["pickle", "constructor",
"add_extension", "remove_extension", "clear_extension_cache"]
dispatch_table = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if type(ob_type) is _ClassType:
raise TypeError("copy_reg is not intended for use with classes")
if not hasattr(pickle_function, '__call__'):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
# The constructor_ob function is a vestige of safe for unpickling.
# There is no reason for the caller to pass it anymore.
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not hasattr(object, '__call__'):
raise TypeError("constructors must be callable")
# Example: provide pickling support for complex numbers.
try:
complex
except NameError:
pass
else:
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(complex, pickle_complex, complex)
# Support for pickling new-style objects
def _reconstructor(cls, base, state):
if base is object:
obj = object.__new__(cls)
else:
obj = base.__new__(cls, state)
if base.__init__ != object.__init__:
base.__init__(obj, state)
return obj
_HEAPTYPE = 1<<9
# Python code for object.__reduce_ex__ for protocols 0 and 1
def _reduce_ex(self, proto):
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
else:
if base is self.__class__:
raise TypeError, "can't pickle %s objects" % base.__name__
## Stackless addition BEGIN
# if base is only supported by our shadow types in copy_reg,
# we need to substitute here:
reducer = dispatch_table.get(base)
if reducer and reducer.__module__ == "stackless._wrap":
base = reducer(self)[0]
## Stackless addition END
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return _reconstructor, args, dict
else:
return _reconstructor, args
# Helper for __reduce_ex__ protocol 2
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def _slotnames(cls):
"""Return a list of slot names for a given class.
This needs to find slots defined by the class and its bases, so we
can't simply return the __slots__ attribute. We must walk down
the Method Resolution Order and concatenate the __slots__ of each
class found there. (This assumes classes don't modify their
__slots__ attribute to misrepresent their slots after the class is
defined.)
"""
# Get the value from a cache in the class if possible
names = cls.__dict__.get("__slotnames__")
if names is not None:
return names
# Not cached -- calculate the value
names = []
if not hasattr(cls, "__slots__"):
# This class has no slots
pass
else:
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
# if class has a single slot, it can be given as a string
if isinstance(slots, basestring):
slots = (slots,)
for name in slots:
# special descriptors
if name in ("__dict__", "__weakref__"):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
names.append('_%s%s' % (c.__name__, name))
else:
names.append(name)
# Cache the outcome in the class if at all possible
try:
cls.__slotnames__ = names
except:
pass # But don't die if we can't
return names
# A registry of extension codes. This is an ad-hoc compression
# mechanism. Whenever a global reference to <module>, <name> is about
# to be pickled, the (<module>, <name>) tuple is looked up here to see
# if it is a registered extension code for it. Extension codes are
# universal, so that the meaning of a pickle does not depend on
# context. (There are also some codes reserved for local use that
# don't have this restriction.) Codes are positive ints; 0 is
# reserved.
_extension_registry = {} # key -> code
_inverted_registry = {} # code -> key
_extension_cache = {} # code -> object
# Don't ever rebind those names: cPickle grabs a reference to them when
# it's initialized, and won't see a rebinding.
def add_extension(module, name, code):
"""Register an extension code."""
code = int(code)
if not 1 <= code <= 0x7fffffff:
raise ValueError, "code out of range"
key = (module, name)
if (_extension_registry.get(key) == code and
_inverted_registry.get(code) == key):
return # Redundant registrations are benign
if key in _extension_registry:
raise ValueError("key %s is already registered with code %s" %
(key, _extension_registry[key]))
if code in _inverted_registry:
raise ValueError("code %s is already in use for key %s" %
(code, _inverted_registry[code]))
_extension_registry[key] = code
_inverted_registry[code] = key
def remove_extension(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code]
def clear_extension_cache():
_extension_cache.clear()
# Standard extension code assignments
# Reserved ranges
# First Last Count Purpose
# 1 127 127 Reserved for Python standard library
# 128 191 64 Reserved for Zope
# 192 239 48 Reserved for 3rd parties
# 240 255 16 Reserved for private use (will never be assigned)
# 256 Inf Inf Reserved for future assignment
# Extension codes are assigned by the Python Software Foundation.
| 34.066986
| 75
| 0.638062
|
4a00c5b387e91f7ca5bab68aa58527658f59d4a1
| 14,554
|
py
|
Python
|
Agent_PPO.py
|
supersglzc/Social-Learning
|
cea6551a9c587a1f839f7122e8e05d680bfad2e6
|
[
"MIT"
] | null | null | null |
Agent_PPO.py
|
supersglzc/Social-Learning
|
cea6551a9c587a1f839f7122e8e05d680bfad2e6
|
[
"MIT"
] | null | null | null |
Agent_PPO.py
|
supersglzc/Social-Learning
|
cea6551a9c587a1f839f7122e8e05d680bfad2e6
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
from collections import deque
import numpy.random as rd
from env import IntersectionEnv
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from utils import MovingAverage, RewardTracker
SEED = 42
IMAGE = True
GAMMA = 0.99
BATCH_SIZE = 64
MEAN_REWARD_EVERY = 300 # Episodes
FRAME_STACK_SIZE = 3
N_PREDATOR = 2
N_agents = 2
env = IntersectionEnv(n_predator=N_PREDATOR, image=IMAGE)
class PPOReplayBuffer:
def __init__(self, max_len, state_dim, action_dim, if_discrete):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.max_len = max_len
self.now_len = 0
self.next_idx = 0
self.if_full = False
self.action_dim = 1 if if_discrete else action_dim
other_dim = 1 + 1 + self.action_dim + action_dim
self.buf_other = np.empty((max_len, other_dim), dtype=np.float32)
self.buf_state = np.empty((max_len, 5, 5, 9), dtype=np.float32)
def append_buffer(self, state, other):
self.buf_state[self.next_idx] = state
self.buf_other[self.next_idx] = other
self.next_idx += 1
if self.next_idx >= self.max_len:
self.if_full = True
self.next_idx = 0
def extend_buffer(self, state, other): # CPU array to CPU array
size = len(other)
next_idx = self.next_idx + size
if next_idx > self.max_len:
if next_idx > self.max_len:
self.buf_state[self.next_idx:self.max_len] = state[:self.max_len - self.next_idx]
self.buf_other[self.next_idx:self.max_len] = other[:self.max_len - self.next_idx]
self.if_full = True
next_idx = next_idx - self.max_len
self.buf_state[0:next_idx] = state[-next_idx:]
self.buf_other[0:next_idx] = other[-next_idx:]
else:
self.buf_state[self.next_idx:next_idx] = state
self.buf_other[self.next_idx:next_idx] = other
self.next_idx = next_idx
def extend_buffer_from_list(self, trajectory_list):
state_ary = np.array([item[0] for item in trajectory_list], dtype=np.float32)
other_ary = np.array([item[1] for item in trajectory_list], dtype=np.float32)
self.extend_buffer(state_ary, other_ary)
def sample_batch(self, batch_size):
indices = rd.randint(self.now_len - 1, size=batch_size)
r_m_a = self.buf_other[indices]
return (r_m_a[:, 0:1], # reward
r_m_a[:, 1:2], # mask = 0.0 if done else gamma
r_m_a[:, 2:], # action
self.buf_state[indices], # state
self.buf_state[indices + 1]) # next_state
def sample_all(self):
all_other = torch.as_tensor(self.buf_other[:self.now_len], device=self.device)
return (all_other[:, 0], # reward
all_other[:, 1], # mask = 0.0 if done else gamma
all_other[:, 2:2 + self.action_dim], # action
all_other[:, 2 + self.action_dim:], # noise
self.buf_state[:self.now_len]) # state
def update_now_len(self):
self.now_len = self.max_len if self.if_full else self.next_idx
def empty_buffer(self):
self.next_idx = 0
self.now_len = 0
self.if_full = False
class ActorDiscretePPO(nn.Module):
def __init__(self, state_dim, action_dim):
super().__init__()
self.net = nn.Sequential(nn.Conv2d(state_dim, 32, kernel_size=(5, 5), stride=(1, 1)), nn.Tanh(),
nn.Dropout(0.2),
nn.Flatten(),
nn.Linear(32, 16), nn.Tanh(),
nn.Linear(16, action_dim), )
layer_norm(self.net[-1], std=0.1) # output layer for action
self.a_logstd = nn.Parameter(torch.zeros((1, action_dim)) - 0.5, requires_grad=True)
self.sqrt_2pi_log = np.log(np.sqrt(2 * np.pi))
self.soft_max = nn.Softmax(dim=1)
self.Categorical = torch.distributions.Categorical
def forward(self, state):
return self.net(state)
def get_action_prob(self, state):
action_prob = self.soft_max(self.net(state))
action_int = torch.multinomial(action_prob, 1, True)
return action_int.squeeze(1), action_prob
def get_new_logprob_entropy(self, state, action):
a_prob = self.soft_max(self.net(state))
dist = self.Categorical(a_prob)
a_int = action.squeeze(1).long()
return dist.log_prob(a_int), dist.entropy().mean()
def get_old_logprob(self, action, a_prob):
dist = self.Categorical(a_prob)
return dist.log_prob(action.long().squeeze(1))
class CriticAdv(nn.Module):
def __init__(self, state_dim):
super().__init__()
self.net = nn.Sequential(nn.Conv2d(state_dim, 32, kernel_size=(5, 5), stride=(1, 1)), nn.Tanh(),
nn.Dropout(0.2),
nn.Flatten(),
nn.Linear(32, 16), nn.Tanh(),
nn.Linear(16, 1), )
layer_norm(self.net[-1], std=0.5) # output layer for Q value
def forward(self, state):
return self.net(state) # Q value
def layer_norm(layer, std=1.0, bias_const=1e-6):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
class AgentPPO:
def __init__(self, agent_id, player, role, punishment, if_per_or_gae=False):
super().__init__()
self.ratio_clip = 0.20 # ratio.clamp(1 - clip, 1 + clip)
self.lambda_entropy = 0.02
self.lambda_gae_adv = 0.98
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.compute_reward = self.compute_reward_gae if if_per_or_gae else self.compute_reward_raw
self.agent_id = agent_id
self.role = role
self.player = player
self.punishment = punishment
action_dim = env.action_space.n # 2
state_dim = 3 * FRAME_STACK_SIZE
self.act = ActorDiscretePPO(state_dim, action_dim).to(self.device)
self.cri = CriticAdv(state_dim).to(self.device)
self.criterion = torch.nn.SmoothL1Loss()
self.optimizer = torch.optim.Adam([{'params': self.act.parameters(), 'lr': 0.001},
{'params': self.cri.parameters(), 'lr': 0.001}])
self.reward_tracker = RewardTracker(MEAN_REWARD_EVERY)
self.replay_buffer = PPOReplayBuffer(max_len=500, state_dim=9, action_dim=action_dim,
if_discrete=True)
self.learning_plot_initialised = False
def select_action(self, state) -> tuple:
state = torch.tensor(state)
state = torch.reshape(state, (9, 5, 5)).unsqueeze(0)
actions, action_prob = self.act.get_action_prob(state)
return actions[0].detach().cpu().numpy(), action_prob[0].detach().cpu().numpy()
def update_net(self, batch_size=64, repeat_times=1):
self.replay_buffer.update_now_len()
buf_len = self.replay_buffer.now_len
'''compute reverse reward'''
with torch.no_grad():
buf_reward, buf_mask, buf_action, buf_a_prob, buf_state = self.replay_buffer.sample_all()
buf_state = torch.tensor(buf_state)
buf_state = torch.reshape(buf_state, (buf_len, 9, 5, 5))
bs = 2 ** 3
buf_value = torch.cat([self.cri(buf_state[i:i + bs]) for i in range(0, buf_state.shape[0], bs)], dim=0)
buf_logprob = self.act.get_old_logprob(buf_action, buf_a_prob)
buf_r_sum, buf_advantage = self.compute_reward(self, buf_len, buf_reward, buf_mask, buf_value)
del buf_reward, buf_mask, buf_a_prob
'''PPO: Surrogate objective of Trust Region'''
obj_critic = obj_actor = new_logprob = None
for update_c in range(int(buf_len / batch_size * repeat_times)):
indices = torch.randint(buf_len, size=(batch_size,), requires_grad=False, device=self.device)
state = buf_state[indices]
action = buf_action[indices]
r_sum = buf_r_sum[indices]
logprob = buf_logprob[indices]
advantage = buf_advantage[indices]
new_logprob, obj_entropy = self.act.get_new_logprob_entropy(state, action)
ratio = (new_logprob - logprob.detach()).exp()
obj_surrogate1 = advantage * ratio
obj_surrogate2 = advantage * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip)
obj_surrogate = -torch.min(obj_surrogate1, obj_surrogate2).mean()
obj_actor = obj_surrogate + obj_entropy * self.lambda_entropy
value = self.cri(state).squeeze(1)
obj_critic = self.criterion(value, r_sum)
obj_united = obj_actor + obj_critic / (r_sum.std() + 1e-6)
self.optimizer.zero_grad()
obj_united.backward()
self.optimizer.step()
logging_tuple = (obj_critic.item(), obj_actor.item(), new_logprob.mean().item())
return logging_tuple
@staticmethod
def compute_reward_raw(self, buf_len, buf_reward, buf_mask, buf_value) -> (torch.Tensor, torch.Tensor):
buf_r_sum = torch.empty(buf_len, dtype=torch.float32, device=self.device) # reward sum
pre_r_sum = 0 # reward sum of previous step
for i in range(buf_len - 1, -1, -1):
buf_r_sum[i] = buf_reward[i] + buf_mask[i] * pre_r_sum
pre_r_sum = buf_r_sum[i]
buf_advantage = buf_r_sum - (buf_mask * buf_value.squeeze(1))
buf_advantage = (buf_advantage - buf_advantage.mean()) / (buf_advantage.std() + 1e-5)
return buf_r_sum, buf_advantage
@staticmethod
def compute_reward_gae(self, buf_len, buf_reward, buf_mask, buf_value) -> (torch.Tensor, torch.Tensor):
buf_r_sum = torch.empty(buf_len, dtype=torch.float32, device=self.device) # old policy value
buf_advantage = torch.empty(buf_len, dtype=torch.float32, device=self.device) # advantage value
pre_r_sum = 0 # reward sum of previous step
pre_advantage = 0 # advantage value of previous step
for i in range(buf_len - 1, -1, -1):
buf_r_sum[i] = buf_reward[i] + buf_mask[i] * pre_r_sum
pre_r_sum = buf_r_sum[i]
buf_advantage[i] = buf_reward[i] + buf_mask[i] * (pre_advantage - buf_value[i]) # fix a bug here
pre_advantage = buf_value[i] + buf_advantage[i] * self.lambda_gae_adv
buf_advantage = (buf_advantage - buf_advantage.mean()) / (buf_advantage.std() + 1e-5)
return buf_r_sum, buf_advantage
def plot_learning_curve(self, image_path=None, csv_path=None):
colour_palette = get_cmap(name='Set1').colors
if not self.learning_plot_initialised:
self.fig, self.ax = plt.subplots()
self.learning_plot_initialised = True
self.ax.clear()
reward_data = self.reward_tracker.get_reward_data()
x = reward_data[:, 0]
y = reward_data[:, 1]
# Save raw reward data
if csv_path:
np.savetxt(csv_path, reward_data, delimiter=",")
# Compute moving average
tracker = MovingAverage(maxlen=MEAN_REWARD_EVERY)
mean_rewards = np.zeros(len(reward_data))
for i, (_, reward) in enumerate(reward_data):
tracker.append(reward)
mean_rewards[i] = tracker.mean()
# Create plot
self.ax.plot(x, y, alpha=0.2, c=colour_palette[0])
self.ax.plot(x[MEAN_REWARD_EVERY // 2:], mean_rewards[MEAN_REWARD_EVERY // 2:],
c=colour_palette[0])
self.ax.set_xlabel('episode')
self.ax.set_ylabel('reward per episode')
self.ax.grid(True, ls=':')
# Save plot
if image_path:
self.fig.savefig(image_path)
def explore_env(prediction_1, prediction_2):
trajectory_list1 = list()
trajectory_list2 = list()
episode_reward = np.zeros(N_PREDATOR)
# Reset env
[observations_row, observations_column, _] = env.reset(prediction_1.role, prediction_2.role, 3, 3,
prediction_1.punishment, prediction_2.punishment)
step = 0
pred1_state = observations_row
pred2_state = observations_column
pred1_initial_stack = [pred1_state for _ in range(FRAME_STACK_SIZE)]
pred1_frame_stack = deque(pred1_initial_stack, maxlen=FRAME_STACK_SIZE)
pred1_state = np.concatenate(pred1_frame_stack, axis=2)
pred2_initial_stack = [pred2_state for _ in range(FRAME_STACK_SIZE)]
pred2_frame_stack = deque(pred2_initial_stack, maxlen=FRAME_STACK_SIZE)
pred2_state = np.concatenate(pred2_frame_stack, axis=2)
# sample one trajectory
while True:
# Get actions
pred1_action, pred1_prob = prediction_1.select_action(pred1_state)
pred2_action, pred2_prob = prediction_2.select_action(pred2_state)
pred1_action = int(pred1_action)
pred2_action = int(pred2_action)
actions = [pred1_action, pred2_action]
# Take actions, observe next states and rewards
[next_observations_row, next_observations_column, next_observations], reward_vectors, done, _ = env.step(
actions)
next_pred1_state = next_observations_row
next_pred2_state = next_observations_column
pred1_reward, pred2_reward = reward_vectors
rewards = [pred1_reward, pred2_reward]
# Store in replay buffers
other1 = (pred1_reward, 0.0 if done else GAMMA, pred1_action, *pred1_prob)
trajectory_list1.append((pred1_state, other1))
other2 = (pred2_reward, 0.0 if done else GAMMA, pred2_action, *pred2_prob)
trajectory_list2.append((pred2_state, other2))
pred1_frame_stack.append(next_pred1_state)
next_pred1_state = np.concatenate(pred1_frame_stack, axis=2)
pred2_frame_stack.append(next_pred2_state)
next_pred2_state = np.concatenate(pred2_frame_stack, axis=2)
# Assign next state to current state !!
pred1_state = next_pred1_state
pred2_state = next_pred2_state
step += 1
episode_reward += np.array(rewards)
if done:
break
prediction_1.reward_tracker.append(episode_reward[0])
prediction_2.reward_tracker.append(episode_reward[1])
return trajectory_list1, trajectory_list2, episode_reward[0], episode_reward[1]
| 41.346591
| 115
| 0.634533
|
4a00c60e05bc5ac78f86b8f7643aaa6ba24a1951
| 3,158
|
py
|
Python
|
examples/example_routing.py
|
dwaardenburg/gds_tools
|
7faf42070890924add9c2c5a7937ec474b78bc4d
|
[
"MIT"
] | null | null | null |
examples/example_routing.py
|
dwaardenburg/gds_tools
|
7faf42070890924add9c2c5a7937ec474b78bc4d
|
[
"MIT"
] | null | null | null |
examples/example_routing.py
|
dwaardenburg/gds_tools
|
7faf42070890924add9c2c5a7937ec474b78bc4d
|
[
"MIT"
] | null | null | null |
import sys, os
import numpy as np
import gdspy as gp
import gds_tools as gdst
file_authors = "Daan Waardenburg"
last_author_email = "daanwaardenburg@gmail.com"
created_on_date = "14 November 2019"
filename = os.path.basename(__file__)
filepath = __file__.replace(filename, "")
source_file = "example_design.gds"
top_cell_name = 'TOP'
print("-- Importing file --")
imported_library = gp.GdsLibrary()
imported_library.read_gds(filepath + source_file)
cell = imported_library.cells[top_cell_name].flatten()
cell.remove_paths(lambda x: True)
cell.remove_labels(lambda x: True)
grid_size = 20
polygon_buffer = grid_size / 2
mapped_layers = [13, 14, 20]
map_precision = 0.001
GdsMap = gdst.GdsMap(
cell,
grid_size = grid_size,
buffer = polygon_buffer,
layers = mapped_layers,
precision = map_precision
)
routing_pairs = [
[(-200,1903.57),(-1000,1000)],
[(-400,2308.36),(-1000,1000)],
[(-650,1903.57),(-1000,500)],
[(-850,2308.36),(-1000,500)],
[(-1300,2308.36),(-1000,0)],
[(-1100,1903.57),(-1000,0)],
[(-1750,2308.36),(-1000,-500)],
[(-1550,1903.57),(-1000,-500)],
[(-2200,2308.36),(-1000,-1000)],
[(-2000,1903.57),(-1000,-1000)],
[(50,2308.36),(-500,1000)],
[(250,1903.57),(-500,1000)],
[(500,2308.36),(-500,500)],
[(700,1903.57),(-500,500)],
[(950,2308.36),(-500,0)],
[(1150,1903.57),(-500,0)],
[(1400,2308.36),(-500,-500)],
[(1600,1903.57),(-500,-500)]
]
routing_directions = [
[3 * np.pi / 2, 0],
[3 * np.pi / 2, np.pi],
[3 * np.pi / 2, 0],
[3 * np.pi / 2, np.pi],
[3 * np.pi / 2, np.pi],
[3 * np.pi / 2, 0],
[3 * np.pi / 2, np.pi],
[3 * np.pi / 2, 0],
[3 * np.pi / 2, np.pi],
[3 * np.pi / 2, 0],
[3 * np.pi / 2, np.pi],
[3 * np.pi / 2, 0],
[3 * np.pi / 2, np.pi],
[3 * np.pi / 2, 0],
[3 * np.pi / 2, np.pi],
[3 * np.pi / 2, 0],
[3 * np.pi / 2, np.pi],
[3 * np.pi / 2, 0]
]
# routing_pairs, routing_directions = gdst.routing.suggest_order(routing_pairs, routing_directions)
segment_widths = [3, 2, 1]
segment_distances = [1000, 500, 0]
segment_buffers = [2, 2, 2]
segment_layers = [3, 2, 1]
for i, pair in enumerate(routing_pairs):
route_segments = gdst.router(
GdsMap,
end_points = pair,
end_directions = routing_directions[i],
segment_widths = segment_widths,
segment_distances = segment_distances,
segment_buffers = segment_buffers)
path_list = []
try:
for j, segment in enumerate(route_segments):
path_list.append(
gp.FlexPath(
points = segment,
width = segment_widths[j],
layer = segment_layers[j],
bend_radius = segment_widths[j]
)
)
cell.add([path for path in path_list])
except:
pass
cell.add(GdsMap.draw_map())
print('-- Writing resulting paths to file --')
gdst.save(cell, filepath + 'routing_results.gds')
| 27.46087
| 100
| 0.550348
|
4a00c6e1f93455dcd730cd8c43d3c2b8e6e4bee0
| 199
|
py
|
Python
|
src/user/urls.py
|
SUNGOD3/solution-challenge-backend
|
b89399f4f344f9b5ddbd2a0a437f230c6c51a994
|
[
"MIT"
] | null | null | null |
src/user/urls.py
|
SUNGOD3/solution-challenge-backend
|
b89399f4f344f9b5ddbd2a0a437f230c6c51a994
|
[
"MIT"
] | 4
|
2021-03-03T12:22:28.000Z
|
2021-03-21T05:02:38.000Z
|
src/user/urls.py
|
SUNGOD3/solution-challenge-backend
|
b89399f4f344f9b5ddbd2a0a437f230c6c51a994
|
[
"MIT"
] | 2
|
2021-02-03T12:12:38.000Z
|
2021-03-06T17:53:24.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.UserList.as_view()),
path('<str:email>', views.UserDetail.as_view()),
path('login/', views.user_login),
]
| 22.111111
| 52
| 0.663317
|
4a00c71916e3304b9c52172fd3301ec7a66d6cdc
| 229
|
py
|
Python
|
data/spiders/basic.py
|
unreal-estate/chicago
|
8cacef1ce883217939f2b97d4d0dd8a88ed43a1c
|
[
"MIT"
] | null | null | null |
data/spiders/basic.py
|
unreal-estate/chicago
|
8cacef1ce883217939f2b97d4d0dd8a88ed43a1c
|
[
"MIT"
] | null | null | null |
data/spiders/basic.py
|
unreal-estate/chicago
|
8cacef1ce883217939f2b97d4d0dd8a88ed43a1c
|
[
"MIT"
] | 1
|
2019-06-17T15:11:30.000Z
|
2019-06-17T15:11:30.000Z
|
# -*- coding: utf-8 -*-
import scrapy
class BasicSpider(scrapy.Spider):
name = 'basic'
allowed_domains = ['cityofchicago.org']
start_urls = ['http://cityofchicago.org/']
def parse(self, response):
pass
| 19.083333
| 46
| 0.628821
|
4a00c72e71b30c1e5230090b08a8a08edc11d260
| 537
|
py
|
Python
|
orchestration/celery.py
|
spectrum-dev/django-orchestration
|
3b3257a00cbb086bc6a3b367e76de1081f751cc9
|
[
"MIT"
] | null | null | null |
orchestration/celery.py
|
spectrum-dev/django-orchestration
|
3b3257a00cbb086bc6a3b367e76de1081f751cc9
|
[
"MIT"
] | null | null | null |
orchestration/celery.py
|
spectrum-dev/django-orchestration
|
3b3257a00cbb086bc6a3b367e76de1081f751cc9
|
[
"MIT"
] | null | null | null |
import os
from celery import Celery
# this code copied from manage.py
# set the default Django settings module for the 'celery' app.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "orchestration.settings")
# you change change the name here
app = Celery("orchestration")
# read config from Django settings, the CELERY namespace would make celery
# config keys has `CELERY` prefix
app.config_from_object("django.conf:settings", namespace="CELERY")
# load tasks.py in django apps
app.autodiscover_tasks()
app.conf.beat_schedule = {}
| 26.85
| 74
| 0.778399
|
4a00c8445301c5f7db68a7746b379f2955427bad
| 7,892
|
py
|
Python
|
nanotune/fit/coulomboscillationfit.py
|
theatlasroom/nanotune
|
444edb47b34739db82e1c58a6c963cb14b223398
|
[
"MIT"
] | 1
|
2021-07-03T11:58:52.000Z
|
2021-07-03T11:58:52.000Z
|
nanotune/fit/coulomboscillationfit.py
|
Ayushparikh-code/nanotune
|
6d63adc64c89aa38592cf732345d38f7c18f05e1
|
[
"MIT"
] | null | null | null |
nanotune/fit/coulomboscillationfit.py
|
Ayushparikh-code/nanotune
|
6d63adc64c89aa38592cf732345d38f7c18f05e1
|
[
"MIT"
] | null | null | null |
import copy
import logging
import os
from typing import Dict, List, Optional, Tuple
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy as sc
import nanotune as nt
from nanotune.data.plotting import (colors_dict, default_plot_params,
plot_params_type)
from nanotune.fit.datafit import DataFit
from nanotune.utils import format_axes
logger = logging.getLogger(__name__)
AxesTuple = Tuple[matplotlib.axes.Axes, matplotlib.colorbar.Colorbar]
class CoulombOscillationFit(DataFit):
""""""
def __init__(
self,
qc_run_id: int,
db_name: str,
db_folder: Optional[str] = None,
relative_height_threshold: float = 0.5,
sigma_dV: float = 0.005,
) -> None:
if db_folder is None:
db_folder = nt.config["db_folder"]
DataFit.__init__(
self,
qc_run_id,
db_name,
db_folder=db_folder,
)
self.relative_height_threshold = relative_height_threshold
self.sigma_dV = sigma_dV
self.peak_indx: Dict[str, List[int]] = {}
self.peak_distances: Dict[str, List[float]] = {}
@property
def range_update_directives(self) -> List[str]:
""""""
raise NotImplementedError
def find_fit(self) -> None:
"""
Find peaks and extract distances between them (in voltage space)
"""
self.peak_indx = self.find_peaks()
self.peak_distances = self.calculate_peak_distances(self.peak_indx)
self.peak_locations = self.get_peak_locations()
self._retain_fit_result()
self.save_features()
def _retain_fit_result(self):
self._features = {}
for read_meth in self.readout_methods.keys():
self._features[read_meth] = {}
self._features[read_meth]["peak_indx"] = self.peak_indx[read_meth]
temp = self.peak_locations[read_meth]
self._features[read_meth]["peak_locations"] = temp
temp = self.peak_distances[read_meth]
self._features[read_meth]["peak_distances"] = temp
def calculate_voltage_distances(self) -> Dict[str, float]:
"""
Get voltage spacing between Coulomb peaks
"""
voltage_distances = {}
for read_meth in self.readout_methods.keys():
voltage_distances[read_meth] = np.max(self.peak_distances[read_meth])
return voltage_distances
def get_peak_locations(self) -> Dict[str, List[float]]:
peak_locations = {}
for read_meth in self.readout_methods.keys():
v_x = self.data[read_meth].voltage_x.values
peak_idx = self.peak_indx[read_meth]
peak_locations[read_meth] = v_x[peak_idx].tolist()
return peak_locations
def find_peaks(
self,
absolute_height_threshold: Optional[float] = None,
minimal_index_distance: int = 3,
) -> Dict[str, List[int]]:
"""
wrapper around scipy.signal.peaks
the threshold calculated with:
height = height_threshold * np.max(self.signal)
"""
peaks = {}
for read_meth in self.readout_methods.keys():
if absolute_height_threshold is None:
absolute_height_threshold = self.relative_height_threshold
smooth_curr = self.filtered_data[read_meth].values
absolute_height_threshold *= np.max(smooth_curr)
self.absolute_height_threshold = absolute_height_threshold
found_peaks, _ = sc.signal.find_peaks(
self.filtered_data[read_meth].values,
height=[self.absolute_height_threshold, None],
distance=minimal_index_distance,
)
peaks[read_meth] = found_peaks.tolist()
return peaks
def calculate_peak_distances(
self,
peak_indx: Dict[str, List[int]],
) -> Dict[str, List[float]]:
""""""
peak_distances: Dict[str, List[float]] = {}
for read_meth in self.readout_methods.keys():
voltage = self.data[read_meth].voltage_x.values
peak_distances[read_meth] = []
peaks = peak_indx[read_meth]
if len(peaks) > 1:
for ip in range(len(peaks) - 1):
peak = peaks[ip]
next_peak = peaks[ip + 1]
d = voltage[peak] - voltage[next_peak]
peak_distances[read_meth].append(abs(d))
return peak_distances
def plot_fit(
self,
ax: Optional[matplotlib.axes.Axes] = None,
save_figures: bool = True,
filename: Optional[str] = None,
file_location: Optional[str] = None,
plot_params: Optional[plot_params_type] = None,
) -> AxesTuple:
""""""
if plot_params is None:
plot_params = default_plot_params
matplotlib.rcParams.update(plot_params)
fig_title = f"Coulomboscillation fit {self.guid}"
if not self.peak_indx:
self.find_fit()
if ax is None:
fig_size = copy.deepcopy(plot_params["figure.figsize"])
fig_size[1] *= len(self.data) * 0.8 # type: ignore
fig, ax = plt.subplots(len(self.data), 1, squeeze=False, figsize=fig_size)
for r_i, read_meth in enumerate(self.readout_methods.keys()):
voltage = self.data[read_meth]["voltage_x"].values
signal = self.data[read_meth].values
smooth_sig = self.filtered_data[read_meth].values
ax[r_i, 0].plot(
voltage,
signal,
color=colors_dict["blue"],
label="signal",
zorder=6,
)
ax[r_i, 0].set_xlabel(self.get_plot_label(read_meth, 0))
ax[r_i, 0].set_ylabel(self.get_plot_label(read_meth, 1))
ax[r_i, 0].set_title(fig_title)
ax[r_i, 0].plot(
voltage,
smooth_sig,
color=colors_dict["orange"],
label="smooth",
zorder=2,
)
ax[r_i, 0].plot(
voltage[self.peak_indx[read_meth]],
smooth_sig[self.peak_indx[read_meth]],
"x",
color=colors_dict["teal"],
label="peaks",
)
ax[r_i, 0].vlines(
x=voltage[self.peak_indx[read_meth]],
ymin=0,
ymax=smooth_sig[self.peak_indx[read_meth]],
color=colors_dict["teal"],
linestyles="dashed",
)
height = self.absolute_height_threshold
ax[r_i, 0].plot(
voltage,
np.zeros_like(smooth_sig) + height,
"--",
color="gray",
label="threshold",
)
ax[r_i, 0].legend(
loc="upper right",
bbox_to_anchor=(1, 1),
frameon=False,
)
ax[r_i, 0].set_ylabel("normalized signal")
ax[r_i, 0].set_aspect("auto")
ax[r_i, 0].figure.tight_layout()
fig.tight_layout()
if save_figures:
if file_location is None:
file_location = os.path.join(
nt.config["db_folder"], "tuning_results", self.device_name
)
if not os.path.exists(file_location):
os.makedirs(file_location)
if filename is None:
filename = f"coulomboscillationfit_{self.guid}"
else:
filename = os.path.splitext(filename)[0]
path = os.path.join(file_location, filename + ".png")
plt.savefig(path, format="png", dpi=600, bbox_inches="tight")
return ax, None
| 33.299578
| 86
| 0.565763
|
4a00c97f233b5fe5f48a15db238e0d0c20f7d4e5
| 1,042
|
py
|
Python
|
oauth/models.py
|
wjzhangcsu/MyDjangoBlog
|
6f1a1d9205ad84b38ba1cbc1bf3bdba46eaaa9d7
|
[
"MIT"
] | null | null | null |
oauth/models.py
|
wjzhangcsu/MyDjangoBlog
|
6f1a1d9205ad84b38ba1cbc1bf3bdba46eaaa9d7
|
[
"MIT"
] | 15
|
2020-02-11T21:37:20.000Z
|
2022-03-11T23:12:25.000Z
|
oauth/models.py
|
wjzhangcsu/MyDjangoBlog
|
6f1a1d9205ad84b38ba1cbc1bf3bdba46eaaa9d7
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
from django.conf import settings
from django.utils.timezone import now
class OAuthUser(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='用户', blank=True, null=True,
on_delete=models.CASCADE)
openid = models.CharField(max_length=50)
nikename = models.CharField(max_length=50, verbose_name='昵称')
token = models.CharField(max_length=150, null=True, blank=True)
picture = models.CharField(max_length=350, blank=True, null=True)
type = models.CharField(blank=False, null=False, max_length=50)
email = models.CharField(max_length=50, null=True, blank=True)
matedata = models.CharField(max_length=2000, null=True, blank=True)
created_time = models.DateTimeField('创建时间', default=now)
last_mod_time = models.DateTimeField('修改时间', default=now)
def __str__(self):
return self.nikename
class Meta:
verbose_name = 'oauth用户'
verbose_name_plural = verbose_name
| 38.592593
| 98
| 0.715931
|
4a00c98302d126729e22203a9697556eaa3cb81e
| 380
|
py
|
Python
|
other/dingding/dingtalk/api/rest/OapiCrmObjectdataListRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiCrmObjectdataListRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
other/dingding/dingtalk/api/rest/OapiCrmObjectdataListRequest.py
|
hth945/pytest
|
83e2aada82a2c6a0fdd1721320e5bf8b8fd59abc
|
[
"Apache-2.0"
] | null | null | null |
'''
Created by auto_sdk on 2021.01.28
'''
from dingtalk.api.base import RestApi
class OapiCrmObjectdataListRequest(RestApi):
def __init__(self,url=None):
RestApi.__init__(self,url)
self.current_operator_userid = None
self.data_id_list = None
self.name = None
def getHttpMethod(self):
return 'POST'
def getapiname(self):
return 'dingtalk.oapi.crm.objectdata.list'
| 22.352941
| 44
| 0.757895
|
4a00ca08aa54cdd19ac20d49a3ca8ec922cb3a80
| 31,390
|
py
|
Python
|
src/viewer/app/plots.py
|
mappin/asxtrade
|
2b97ffcdefae642a49ce5bfcc131db17796f1691
|
[
"Apache-2.0"
] | null | null | null |
src/viewer/app/plots.py
|
mappin/asxtrade
|
2b97ffcdefae642a49ce5bfcc131db17796f1691
|
[
"Apache-2.0"
] | 1
|
2021-04-13T05:00:40.000Z
|
2021-04-13T05:00:40.000Z
|
src/viewer/app/plots.py
|
mappin/asxtrade
|
2b97ffcdefae642a49ce5bfcc131db17796f1691
|
[
"Apache-2.0"
] | null | null | null |
"""
Responsible for production of data visualisations and rendering this data as inline
base64 data for various django templates to use.
"""
import base64
import io
from datetime import datetime
from collections import Counter, defaultdict
import numpy as np
import pandas as pd
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import plotnine as p9
from app.models import stocks_by_sector, day_low_high, Timeframe, valid_quotes_only
def price_change_bins():
"""
Return the bins and their label as a tuple for heatmap_market() to use and the
plotting code
"""
bins = [
-1000.0,
-100.0,
-10.0,
-5.0,
-3.0,
-2.0,
-1.0,
-1e-6,
0.0,
1e-6,
1.0,
2.0,
3.0,
5.0,
10.0,
25.0,
100.0,
1000.0,
]
labels = ["{}".format(b) for b in bins[1:]]
return (bins, labels)
def plot_as_base64(fig, charset='utf-8'):
"""
Convert supplied figure into string buffer and then return as base64-encoded string
(in the specified charset) for insertion into a page as a context attribute
"""
assert fig is not None
with io.BytesIO(bytearray(200*1024)) as buf:
fig.savefig(buf, format="png")
buf.seek(0)
b64data = base64.b64encode(buf.read())
return b64data.decode(charset)
def make_sentiment_plot(sentiment_df, exclude_zero_bin=True, plot_text_labels=True):
rows = []
print(
"Sentiment plot: exclude zero bins? {} show text? {}".format(
exclude_zero_bin, plot_text_labels
)
)
for column in filter(lambda c: c.startswith("bin_"), sentiment_df.columns):
c = Counter(sentiment_df[column])
date = column[4:]
for bin_name, val in c.items():
if exclude_zero_bin and (bin_name == "0.0" or not isinstance(bin_name, str)):
continue
bin_name = str(bin_name)
assert isinstance(bin_name, str)
val = int(val)
rows.append(
{
"date": datetime.strptime(date, "%Y-%m-%d"),
"bin": bin_name,
"value": val,
}
)
df = pd.DataFrame.from_records(rows)
# print(df['bin'].unique())
# HACK TODO FIXME: should get from price_change_bins()...
order = [
"-1000.0",
"-100.0",
"-10.0",
"-5.0",
"-3.0",
"-2.0",
"-1.0",
"-1e-06",
"1e-06",
"1.0",
"2.0",
"3.0",
"5.0",
"10.0",
"25.0",
"100.0",
"1000.0",
]
df["bin_ordered"] = pd.Categorical(df["bin"], categories=order)
plot = (
p9.ggplot(df, p9.aes("date", "bin_ordered", fill="value"))
+ p9.geom_tile(show_legend=False)
+ p9.theme_bw()
+ p9.xlab("")
+ p9.ylab("Percentage daily change")
+ p9.theme(axis_text_x=p9.element_text(angle=30, size=7), figure_size=(10, 5))
)
if plot_text_labels:
plot = plot + p9.geom_text(p9.aes(label="value"), size=8, color="white")
return plot_as_inline_html_data(plot)
def plot_fundamentals(df, stock) -> str:
assert isinstance(df, pd.DataFrame)
columns_to_report = ["pe", "eps", "annual_dividend_yield", "volume", \
"last_price", "change_in_percent_cumulative", \
"change_price", "market_cap", "number_of_shares"]
colnames = df.columns
for column in columns_to_report:
assert column in colnames
df["volume"] = df["last_price"] * df["volume"] / 1000000 # again, express as $(M)
df["market_cap"] /= 1000 * 1000
df["number_of_shares"] /= 1000 * 1000
df["fetch_date"] = df.index
plot_df = pd.melt(
df,
id_vars="fetch_date",
value_vars=columns_to_report,
var_name="indicator",
value_name="value",
)
plot_df["value"] = pd.to_numeric(plot_df["value"])
plot_df["fetch_date"] = pd.to_datetime(plot_df["fetch_date"])
plot = (
p9.ggplot(plot_df, p9.aes("fetch_date", "value", color="indicator"))
+ p9.geom_line(size=1.5, show_legend=False)
+ p9.facet_wrap("~ indicator", nrow=len(columns_to_report), ncol=1, scales="free_y")
+ p9.theme(axis_text_x=p9.element_text(angle=30, size=7),
axis_text_y=p9.element_text(size=7),
figure_size=(8, len(columns_to_report)))
# + p9.aes(ymin=0)
+ p9.xlab("")
+ p9.ylab("")
)
return plot_as_inline_html_data(plot)
def plot_as_inline_html_data(plot, charset="utf-8") -> str:
"""
Return utf-8 encoded base64 image data for inline insertion into HTML content
using the template engine. Plot must be a valid plotnine ggplot instance (or compatible)
This function performs all required cleanup of the figure state, so callers can be clean.
"""
assert plot is not None
fig = plot.draw()
data = plot_as_base64(fig, charset=charset)
plt.close(fig)
return data
def plot_portfolio(portfolio_df, figure_size=(12, 4), line_size=1.5, date_text_size=7):
"""
Given a daily snapshot of virtual purchases plot both overall and per-stock
performance. Return a tuple of figures representing the performance as inline data.
"""
assert portfolio_df is not None
#print(portfolio_df)
portfolio_df["date"] = pd.to_datetime(portfolio_df["date"])
avg_profit_over_period = (
portfolio_df.filter(items=["stock", "stock_profit"]).groupby("stock").mean()
)
avg_profit_over_period["contribution"] = [
"positive" if profit >= 0.0 else "negative"
for profit in avg_profit_over_period.stock_profit
]
# dont want to override actual profit with average
avg_profit_over_period = avg_profit_over_period.drop("stock_profit", axis="columns")
portfolio_df = portfolio_df.merge(
avg_profit_over_period, left_on="stock", right_index=True, how="inner"
)
# print(portfolio_df)
# 1. overall performance
df = portfolio_df.filter(
items=["portfolio_cost", "portfolio_worth", "portfolio_profit", "date"]
)
df = df.melt(id_vars=["date"], var_name="field")
plot = (
p9.ggplot(df, p9.aes("date", "value", group="field", color="field"))
+ p9.labs(x="", y="$ AUD")
+ p9.geom_line(size=1.5)
+ p9.facet_wrap("~ field", nrow=3, ncol=1, scales="free_y")
+ p9.theme(
axis_text_x=p9.element_text(angle=30, size=date_text_size),
figure_size=figure_size,
legend_position="none",
)
)
overall_figure = plot_as_inline_html_data(plot)
df = portfolio_df.filter(
items=["stock", "date", "stock_profit", "stock_worth", "contribution"]
)
melted_df = df.melt(id_vars=["date", "stock", "contribution"], var_name="field")
all_dates = sorted(melted_df["date"].unique())
df = melted_df[melted_df["date"] == all_dates[-1]]
df = df[df["field"] == "stock_profit"] # only latest profit is plotted
df["contribution"] = [
"positive" if profit >= 0.0 else "negative" for profit in df["value"]
]
# 2. plot contributors ie. winners and losers
plot = (
p9.ggplot(df, p9.aes("stock", "value", fill="stock"))
+ p9.geom_bar(stat="identity")
+ p9.labs(x="", y="$ AUD")
+ p9.facet_grid("contribution ~ field", scales="free_y")
+ p9.theme(legend_position="none", figure_size=figure_size)
)
profit_contributors = plot_as_inline_html_data(plot)
# 3. per purchased stock performance
plot = (
p9.ggplot(melted_df, p9.aes("date", "value", group="stock", colour="stock"))
+ p9.xlab("")
+ p9.geom_line(size=1.0)
+ p9.facet_grid("field ~ contribution", scales="free_y")
+ p9.theme(
axis_text_x=p9.element_text(angle=30, size=date_text_size),
figure_size=figure_size,
panel_spacing=0.5, # more space between plots to avoid tick mark overlap
subplots_adjust={"right": 0.8},
)
)
stock_figure = plot_as_inline_html_data(plot)
return overall_figure, stock_figure, profit_contributors
def plot_company_rank(df: pd.DataFrame):
# assert 'sector' in df.columns
n_bin = len(df["bin"].unique())
#print(df)
plot = (
p9.ggplot(df, p9.aes("date", "rank", group="asx_code", color="asx_code"))
+ p9.geom_smooth(span=0.3, se=False)
+ p9.geom_text(
p9.aes(label="asx_code", x="x", y="y"),
nudge_x=1.2,
size=6,
show_legend=False,
)
+ p9.xlab("")
+ p9.facet_wrap("~bin", nrow=n_bin, ncol=1, scales="free_y")
+ p9.theme(
axis_text_x=p9.element_text(angle=30, size=7),
figure_size=(8, 20),
subplots_adjust={"right": 0.8},
)
)
return plot_as_inline_html_data(plot)
def plot_company_versus_sector(df, stock, sector):
assert isinstance(df, pd.DataFrame)
assert isinstance(stock, str)
assert isinstance(sector, str)
df["date"] = pd.to_datetime(df["date"])
# print(df)
plot = (
p9.ggplot(
df, p9.aes("date", "value", group="group", color="group", fill="group")
)
+ p9.geom_line(size=1.5)
+ p9.xlab("")
+ p9.ylab("Percentage change since start")
+ p9.theme(
axis_text_x=p9.element_text(angle=30, size=7),
figure_size=(8, 4),
subplots_adjust={"right": 0.8},
)
)
return plot_as_inline_html_data(plot)
def plot_market_wide_sector_performance(all_stocks_cip: pd.DataFrame):
"""
Display specified dates for average sector performance. Each company is assumed to have at zero
at the start of the observation period. A plot as base64 data is returned.
"""
n_stocks = len(all_stocks_cip)
# merge in sector information for each company
code_and_sector = stocks_by_sector()
n_unique_sectors = len(code_and_sector["sector_name"].unique())
print("Found {} unique sectors".format(n_unique_sectors))
#print(df)
#print(code_and_sector)
df = all_stocks_cip.merge(code_and_sector, left_index=True, right_on="asx_code")
print(
"Found {} stocks, {} sectors and merged total: {}".format(
n_stocks, len(code_and_sector), len(df)
)
)
# compute average change in percent of each unique sector over each day and sum over the dates
cumulative_pct_change = df.expanding(axis="columns").sum()
# merge date-wise into df
for date in cumulative_pct_change.columns:
df[date] = cumulative_pct_change[date]
# df.to_csv('/tmp/crap.csv')
grouped_df = df.groupby("sector_name").mean()
# grouped_df.to_csv('/tmp/crap.csv')
# ready the dataframe for plotting
grouped_df = pd.melt(
grouped_df,
ignore_index=False,
var_name="date",
value_name="cumulative_change_percent",
)
grouped_df["sector"] = grouped_df.index
grouped_df["date"] = pd.to_datetime(grouped_df["date"])
n_col = 3
plot = (
p9.ggplot(
grouped_df, p9.aes("date", "cumulative_change_percent", color="sector")
)
+ p9.geom_line(size=1.0)
+ p9.facet_wrap(
"~sector", nrow=n_unique_sectors // n_col + 1, ncol=n_col, scales="free_y"
)
+ p9.xlab("")
+ p9.ylab("Average sector change (%)")
+ p9.theme(
axis_text_x=p9.element_text(angle=30, size=6),
axis_text_y=p9.element_text(size=6),
figure_size=(12, 6),
panel_spacing=0.3,
legend_position="none",
)
)
return plot_as_inline_html_data(plot)
def plot_series(
df,
x=None,
y=None,
tick_text_size=6,
line_size=1.5,
y_axis_label="Point score",
x_axis_label="",
color="stock",
use_smooth_line=False
):
assert len(df) > 0
assert len(x) > 0 and len(y) > 0
assert line_size > 0.0
assert isinstance(tick_text_size, int) and tick_text_size > 0
assert y_axis_label is not None
assert x_axis_label is not None
args = {'x': x, 'y': y}
if color:
args['color'] = color
plot = p9.ggplot(df, p9.aes(**args)) \
+ p9.labs(x=x_axis_label, y=y_axis_label) \
+ p9.theme(
axis_text_x=p9.element_text(angle=30, size=tick_text_size),
axis_text_y=p9.element_text(size=tick_text_size),
legend_position="none",
)
if use_smooth_line:
plot += p9.geom_smooth(size=line_size)
else:
plot += p9.geom_line(size=line_size)
return plot_as_inline_html_data(plot)
def bin_market_cap(row):
mc = row[0] # NB: expressed in millions $AUD already (see plot_market_cap_distribution() below)
if mc < 2000:
return "small"
elif mc > 10000:
return "large"
elif mc is not None:
return "med"
else:
return "NA"
def make_quote_df(quotes, asx_codes, prefix):
df = pd.DataFrame.from_dict({q.asx_code: (q.market_cap / (1000 * 1000), q.last_price, q.number_of_shares)
for q in quotes if q.market_cap is not None and q.asx_code in asx_codes},
orient="index", columns=["market_cap", "last_price", "shares"])
df['bin'] = df.apply(bin_market_cap, axis=1)
df['market'] = prefix
return df
def plot_market_cap_distribution(stocks, ymd: str, ymd_start_of_timeframe: str):
#print(ymd)
latest_quotes = valid_quotes_only(ymd)
earliest_quotes = valid_quotes_only(ymd_start_of_timeframe)
asx_codes = set(stocks)
latest_df = make_quote_df(latest_quotes, asx_codes, ymd)
earliest_df = make_quote_df(earliest_quotes, asx_codes, ymd_start_of_timeframe)
df = latest_df.append(earliest_df)
#print(df)
small_text = p9.element_text(size=7)
plot = p9.ggplot(df) + \
p9.geom_boxplot(p9.aes(x='market', y='market_cap')) + \
p9.facet_wrap("bin", scales="free_y") + \
p9.labs(x='', y='Market cap. ($AUD Millions)') + \
p9.theme(subplots_adjust={'wspace': 0.30},
axis_text_x=small_text,
axis_text_y=small_text)
return plot_as_inline_html_data(plot)
def plot_breakdown(cip_df: pd.DataFrame):
"""Stacked bar plot of increasing and decreasing stocks per sector in the specified df"""
cols_to_drop = [colname for colname in cip_df.columns if colname.startswith('bin_')]
df = cip_df.drop(columns=cols_to_drop)
df = pd.DataFrame(df.sum(axis='columns'), columns=['sum'])
df = df.merge(stocks_by_sector(), left_index=True, right_on='asx_code')
if len(df) == 0: # no stock in cip_df have a sector? ie. ETF?
return None
assert set(df.columns) == set(['sum', 'asx_code', 'sector_name'])
df['increasing'] = df.apply(lambda row: 'up' if row['sum'] >= 0.0 else 'down', axis=1)
sector_names = df['sector_name'].value_counts().index.tolist() # sort bars by value count (ascending)
sector_names_cat = pd.Categorical(df['sector_name'], categories=sector_names)
df = df.assign(sector_name_cat=sector_names_cat)
#print(df)
plot = (
p9.ggplot(df, p9.aes(x='factor(sector_name_cat)', fill='factor(increasing)'))
+ p9.geom_bar()
+ p9.labs(x="Sector", y="Number of stocks")
+ p9.theme(axis_text_y=p9.element_text(size=7),
subplots_adjust={"left": 0.2, 'right': 0.85},
legend_title=p9.element_blank()
)
+ p9.coord_flip()
)
return plot_as_inline_html_data(plot)
def plot_heatmap(
df: pd.DataFrame,
timeframe: Timeframe,
bin_cb=price_change_bins,
n_top_bottom=10,
):
"""
Plot the specified data matrix as binned values (heatmap) with X axis being dates over the specified timeframe and Y axis being
the percentage change on the specified date (other metrics may also be used, but you will likely need to adjust the bins)
Also computes top10/worst10 and returns a tuple (plot, top10, bottom10, n_stocks). Top10/Bottom10 will contain n_top_bottom stocks.
"""
bins, labels = bin_cb()
# compute totals across all dates for the specified companies to look at top10/bottom10 in the timeframe
sum_by_company = df.sum(axis=1)
top10 = sum_by_company.nlargest(n_top_bottom)
bottom10 = sum_by_company.nsmallest(n_top_bottom)
# print(df.columns)
# print(bins)
try:
# NB: this may fail if no prices are available so we catch that error and handle accordingly...
for date in df.columns:
df["bin_{}".format(date)] = pd.cut(df[date], bins, labels=labels)
sentiment_plot = make_sentiment_plot(df, plot_text_labels=timeframe.n_days <= 30) # show counts per bin iff not too many bins
return (sentiment_plot, top10, bottom10)
except KeyError:
return (None, None, None)
def plot_sector_performance(dataframe, descriptor, window_size=14):
assert len(descriptor) > 0
assert len(dataframe) > 0
fig, axes = plt.subplots(3, 1, figsize=(6, 5), sharex=True)
timeline = pd.to_datetime(dataframe["date"])
locator, formatter = auto_dates()
# now do the plot
for name, ax, linecolour, title in zip(
["n_pos", "n_neg", "n_unchanged"],
axes,
["darkgreen", "red", "grey"],
[
"{} stocks up >5%".format(descriptor),
"{} stocks down >5%".format(descriptor),
"Remaining stocks",
],
):
# use a moving average to smooth out 5-day trading weeks and see the trend
series = dataframe[name].rolling(window_size).mean()
ax.plot(timeline, series, color=linecolour)
ax.set_ylabel("", fontsize=8)
ax.set_ylim(0, max(series.fillna(0)) + 10)
ax.set_title(title, fontsize=8)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
# Remove the automatic x-axis label from all but the bottom subplot
if ax != axes[-1]:
ax.set_xlabel("")
plt.plot()
ret = plt.gcf()
data = plot_as_base64(ret)
plt.close(fig)
return data
def auto_dates():
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
formatter.formats = [
"%y", # ticks are mostly years
"%b", # ticks are mostly months
"%d", # ticks are mostly days
"%H:%M", # hrs
"%H:%M", # min
"%S.%f",
] # secs
# these are mostly just the level above...
formatter.zero_formats = [""] + formatter.formats[:-1]
# ...except for ticks that are mostly hours, then it is nice to have
# month-day:
formatter.zero_formats[3] = "%d-%b"
formatter.offset_formats = [
"",
"%Y",
"%b %Y",
"%d %b %Y",
"%d %b %Y",
"%d %b %Y %H:%M",
]
return (locator, formatter)
def relative_strength(prices, n=14):
# see https://stackoverflow.com/questions/20526414/relative-strength-index-in-python-pandas
assert n > 0
assert prices is not None
# Get the difference in price from previous step
delta = prices.diff()
# Get rid of the first row, which is NaN since it did not have a previous
# row to calculate the differences
delta = delta[1:]
# Make the positive gains (up) and negative gains (down) Series
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
# Calculate the EWMA
roll_up1 = up.ewm(span=n).mean()
roll_down1 = down.abs().ewm(span=n).mean()
# Calculate the RSI based on EWMA
rs = roll_up1 / roll_down1
rsi = 100.0 - (100.0 / (1.0 + rs))
# NB: format is carefully handled here, so downstream code doesnt break
new_date = datetime.strftime(
datetime.now(), "%Y-%m-%d "
) # make sure it is not an existing date
# print(new_date)
rsi.at[new_date] = np.nan # ensure data series are the same length for matplotlib
# print(len(rsi), " ", len(prices))
# assert len(rsi) == len(prices)
return rsi
def make_rsi_plot(stock: str, stock_df: pd.DataFrame):
assert len(stock) > 0
# print(last_price)
# print(volume)
# print(day_low_price)
# print(day_high_price)
last_price = stock_df["last_price"]
volume = stock_df["volume"]
day_low_price = stock_df["day_low_price"]
day_high_price = stock_df["day_high_price"]
plt.rc("axes", grid=True)
plt.rc("grid", color="0.75", linestyle="-", linewidth=0.5)
textsize = 8
left, width = 0.1, 0.8
rect1 = [left, 0.7, width, 0.2]
rect2 = [left, 0.3, width, 0.4]
rect3 = [left, 0.1, width, 0.2]
fig = plt.figure(facecolor="white", figsize=(12, 6))
axescolor = "#f6f6f6" # the axes background color
ax1 = fig.add_axes(rect1, facecolor=axescolor) # left, bottom, width, height
ax2 = fig.add_axes(rect2, facecolor=axescolor, sharex=ax1)
ax2t = ax2.twinx()
ax3 = fig.add_axes(rect3, facecolor=axescolor, sharex=ax1)
fig.autofmt_xdate()
# plot the relative strength indicator
rsi = relative_strength(last_price)
# print(len(rsi))
fillcolor = "darkgoldenrod"
timeline = pd.to_datetime(last_price.index)
# print(values)
ax1.plot(timeline, rsi, color=fillcolor)
ax1.axhline(70, color="darkgreen")
ax1.axhline(30, color="darkgreen")
ax1.fill_between(
timeline, rsi, 70, where=(rsi >= 70), facecolor=fillcolor, edgecolor=fillcolor
)
ax1.fill_between(
timeline, rsi, 30, where=(rsi <= 30), facecolor=fillcolor, edgecolor=fillcolor
)
ax1.text(
0.6,
0.9,
">70 = overbought",
va="top",
transform=ax1.transAxes,
fontsize=textsize,
)
ax1.text(0.6, 0.1, "<30 = oversold", transform=ax1.transAxes, fontsize=textsize)
ax1.set_ylim(0, 100)
ax1.set_yticks([30, 70])
ax1.text(
0.025, 0.95, "RSI (14)", va="top", transform=ax1.transAxes, fontsize=textsize
)
# ax1.set_title('{} daily'.format(stock))
# plot the price and volume data
dx = 0.0
low = day_low_price + dx
high = day_high_price + dx
deltas = np.zeros_like(last_price)
deltas[1:] = np.diff(last_price)
up = deltas > 0
ax2.vlines(timeline[up], low[up], high[up], color="black", label="_nolegend_")
ax2.vlines(timeline[~up], low[~up], high[~up], color="black", label="_nolegend_")
ma20 = last_price.rolling(window=20).mean()
ma200 = last_price.rolling(window=200).mean()
# timeline = timeline.to_list()
(linema20,) = ax2.plot(timeline, ma20, color="blue", lw=2, label="MA (20)")
(linema200,) = ax2.plot(timeline, ma200, color="red", lw=2, label="MA (200)")
assert linema20 is not None
assert linema200 is not None
# last = dataframe[-1]
# s = '%s O:%1.2f H:%1.2f L:%1.2f C:%1.2f, V:%1.1fM Chg:%+1.2f' % (
# today.strftime('%d-%b-%Y'),
# last.open, last.high,
# last.low, last.close,
# last.volume*1e-6,
# last.close - last.open)
# t4 = ax2.text(0.3, 0.9, s, transform=ax2.transAxes, fontsize=textsize)
props = font_manager.FontProperties(size=10)
leg = ax2.legend(loc="center left", shadow=True, fancybox=True, prop=props)
leg.get_frame().set_alpha(0.5)
volume = (last_price * volume) / 1e6 # dollar volume in millions
# print(volume)
vmax = max(volume)
poly = ax2t.fill_between(
timeline,
volume.to_list(),
0,
alpha=0.5,
label="Volume",
facecolor=fillcolor,
edgecolor=fillcolor,
)
assert poly is not None # avoid unused variable from pylint
ax2t.set_ylim(0, 5 * vmax)
ax2t.set_yticks([])
# compute the MACD indicator
fillcolor = "darkslategrey"
n_fast = 12
n_slow = 26
n_ema = 9
emafast = last_price.ewm(span=n_fast, adjust=False).mean()
emaslow = last_price.ewm(span=n_slow, adjust=False).mean()
macd = emafast - emaslow
nema = macd.ewm(span=n_ema, adjust=False).mean()
ax3.plot(timeline, macd, color="black", lw=2)
ax3.plot(timeline, nema, color="blue", lw=1)
ax3.fill_between(
timeline, macd - nema, 0, alpha=0.3, facecolor=fillcolor, edgecolor=fillcolor
)
ax3.text(
0.025,
0.95,
"MACD ({}, {}, {})".format(n_fast, n_slow, n_ema),
va="top",
transform=ax3.transAxes,
fontsize=textsize,
)
ax3.set_yticks([])
locator, formatter = auto_dates()
for ax in ax1, ax2, ax2t, ax3:
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
plt.xticks(fontsize=8)
try:
plt.xlim(left=timeline[200])
except IndexError:
print("WARNING: 200 datapoints not available - some momentum data not available")
fig = plt.gcf()
rsi_data = plot_as_base64(fig)
plt.close(fig)
return rsi_data
def plot_trend(dataframe, sample_period="M"):
"""
Given a dataframe of a single stock from company_prices() this plots the highest price
in each month over the time period of the dataframe.
"""
assert dataframe is not None
dataframe = dataframe.transpose()
dataframe.index = pd.to_datetime(dataframe.index)
dataframe = dataframe.resample(sample_period, kind="period").max()
plot = (
p9.ggplot(dataframe, p9.aes(x="dataframe.index", y=dataframe.columns[0]))
+ p9.geom_bar(stat="identity", fill="#880000", alpha=0.5)
+ p9.labs(x="", y="$AUD")
+ p9.theme(axis_text_x=p9.element_text(angle=30, size=7))
)
return plot_as_inline_html_data(plot)
def plot_point_scores(stock: str, sector_companies, all_stocks_cip: pd.DataFrame, rules):
"""
Visualise the stock in terms of point scores as described on the stock view page. Rules to apply
can be specified by rules (default rules are provided by rule_*())
Points are lost for equivalent downturns and the result plotted. All rows in all_stocks_cip will be
used to calculate the market average on a given trading day, whilst only sector_companies will
be used to calculate the sector average. A utf-8 base64 encoded plot image is returned
"""
assert len(stock) >= 3
assert all_stocks_cip is not None
assert rules is not None and len(rules) > 0
rows = []
points = 0
day_low_high_df = day_low_high(stock, all_dates=all_stocks_cip.columns)
state = {
"day_low_high_df": day_low_high_df, # never changes each day, so we init it here
"all_stocks_change_in_percent_df": all_stocks_cip,
"stock": stock,
"daily_range_threshold": 0.20, # 20% at either end of the daily range gets a point
}
net_points_by_rule = defaultdict(int)
for date in all_stocks_cip.columns:
market_avg = all_stocks_cip[date].mean()
sector_avg = all_stocks_cip[date].filter(items=sector_companies).mean()
stock_move = all_stocks_cip.at[stock, date]
state.update(
{
"market_avg": market_avg,
"sector_avg": sector_avg,
"stock_move": stock_move,
"date": date,
}
)
points += sum(map(lambda r: r(state), rules))
for r in rules:
k = r.__name__
if k.startswith("rule_"):
k = k[5:]
net_points_by_rule[k] += r(state)
rows.append({"points": points, "stock": stock, "date": date})
df = pd.DataFrame.from_records(rows)
df["date"] = pd.to_datetime(df["date"])
point_score_plot = plot_series(df, x="date", y="points")
rows = []
for k, v in net_points_by_rule.items():
rows.append({"rule": str(k), "net_points": v})
df = pd.DataFrame.from_records(rows)
net_rule_contributors_plot = (
p9.ggplot(df, p9.aes(x="rule", y="net_points"))
+ p9.labs(x="Rule", y="Contribution to points by rule")
+ p9.geom_bar(stat="identity")
+ p9.theme(axis_text_y=p9.element_text(size=7), subplots_adjust={"left": 0.2})
+ p9.coord_flip()
)
return point_score_plot, plot_as_inline_html_data(net_rule_contributors_plot)
def plot_boxplot_series(df, normalisation_method=None):
"""
Treating each column as a separate boxplot and each row as an independent observation
(ie. different company)
render a series of box plots to identify a shift in performance from the observations.
normalisation_method should be one of the values present in
SectorSentimentSearchForm.normalisation_choices
"""
# compute star performers: those who are above the mean on a given day counted over all days
count = defaultdict(int)
for col in df.columns:
avg = df.mean(axis=0)
winners = df[df[col] > avg[col]][col]
for winner in winners.index:
count[winner] += 1
winner_results = []
for asx_code, n_wins in count.items():
x = df.loc[asx_code].sum()
# avoid "dead cat bounce" stocks which fall spectacularly and then post major increases in percentage terms
if x > 0.0:
winner_results.append((asx_code, n_wins, x))
# and plot the normalised data
if normalisation_method is None or normalisation_method == "1":
normalized_df = df
y_label = "Percentage change"
elif normalisation_method == "2":
normalized_df = (df - df.min()) / (df.max() - df.min())
y_label = "Percentage change (min/max. scaled)"
else:
normalized_df = df / df.max(axis=0) # div by max if all else fails...
y_label = "Percentage change (normalised by dividing by max)"
n_inches = len(df.columns) / 5
melted = normalized_df.melt(ignore_index=False).dropna()
plot = (
p9.ggplot(melted, p9.aes(x="fetch_date", y="value"))
+ p9.geom_boxplot(outlier_colour="blue")
+ p9.theme(
axis_text_x=p9.element_text(size=7),
axis_text_y=p9.element_text(size=7),
figure_size=(12, n_inches),
)
+ p9.labs(x="Date (YYYY-MM-DD)", y=y_label)
+ p9.coord_flip()
)
return (
plot_as_inline_html_data(plot),
list(reversed(sorted(winner_results, key=lambda t: t[2]))),
)
def plot_sector_field(df: pd.DataFrame, field, n_col=3):
print(df.columns)
#assert set(df.columns) == set(['sector', 'date', 'mean_pe', 'sum_pe', 'sum_eps', 'mean_eps', 'n_stocks'])
n_unique_sectors = df['sector'].nunique()
df['date'] = pd.to_datetime(df['date'])
plot = (
p9.ggplot(df, p9.aes("date", field, group="sector", color="sector"))
+ p9.geom_line(size=1.0)
+ p9.facet_wrap("~sector", nrow=n_unique_sectors // n_col + 1, ncol=n_col, scales="free_y")
+ p9.xlab("")
+ p9.ylab(f"Sector-wide {field}")
+ p9.theme(
axis_text_x=p9.element_text(angle=30, size=6),
axis_text_y=p9.element_text(size=6),
figure_size=(12, 6),
panel_spacing=0.3,
legend_position="none",
)
)
return plot_as_inline_html_data(plot)
| 35.309336
| 135
| 0.616311
|
4a00caa79e7feacc4e9cbd2c2e1cea085171854e
| 130
|
py
|
Python
|
sqlite_file_index/__init__.py
|
0xf0f/sqlite-file-index
|
8037ef6e86b91db130ecc01fd181140c024acd5e
|
[
"MIT"
] | 2
|
2020-01-27T00:26:16.000Z
|
2021-01-14T21:00:14.000Z
|
sqlite_file_index/__init__.py
|
0xf0f/sqlite-file-index
|
8037ef6e86b91db130ecc01fd181140c024acd5e
|
[
"MIT"
] | null | null | null |
sqlite_file_index/__init__.py
|
0xf0f/sqlite-file-index
|
8037ef6e86b91db130ecc01fd181140c024acd5e
|
[
"MIT"
] | null | null | null |
from .file_index import FileIndex
from .file_index_task import FileIndexTask
__all__ = [
'FileIndex',
'FileIndexTask',
]
| 16.25
| 42
| 0.738462
|
4a00cb9ef40e2ae8cdf91c47a47afac57b929326
| 11,045
|
py
|
Python
|
platformio/commands/debug/client.py
|
adlerweb/platformio-core
|
cd8dc24454176d05ab1360cb51a32b40f1fa7e7f
|
[
"Apache-2.0"
] | null | null | null |
platformio/commands/debug/client.py
|
adlerweb/platformio-core
|
cd8dc24454176d05ab1360cb51a32b40f1fa7e7f
|
[
"Apache-2.0"
] | null | null | null |
platformio/commands/debug/client.py
|
adlerweb/platformio-core
|
cd8dc24454176d05ab1360cb51a32b40f1fa7e7f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
import signal
import time
from hashlib import sha1
from os.path import abspath, basename, dirname, isdir, join, splitext
from tempfile import mkdtemp
from twisted.internet import protocol # pylint: disable=import-error
from twisted.internet import reactor # pylint: disable=import-error
from twisted.internet import stdio # pylint: disable=import-error
from twisted.internet import task # pylint: disable=import-error
from platformio import app, exception, fs, proc, util
from platformio.commands.debug import helpers, initcfgs
from platformio.commands.debug.process import BaseProcess
from platformio.commands.debug.server import DebugServer
from platformio.compat import hashlib_encode_data
from platformio.project.helpers import get_project_cache_dir
from platformio.telemetry import MeasurementProtocol
LOG_FILE = None
class GDBClient(BaseProcess): # pylint: disable=too-many-instance-attributes
PIO_SRC_NAME = ".pioinit"
INIT_COMPLETED_BANNER = "PlatformIO: Initialization completed"
def __init__(self, project_dir, args, debug_options, env_options):
self.project_dir = project_dir
self.args = list(args)
self.debug_options = debug_options
self.env_options = env_options
self._debug_server = DebugServer(debug_options, env_options)
self._session_id = None
if not isdir(get_project_cache_dir()):
os.makedirs(get_project_cache_dir())
self._gdbsrc_dir = mkdtemp(dir=get_project_cache_dir(),
prefix=".piodebug-")
self._target_is_run = False
self._last_server_activity = 0
self._auto_continue_timer = None
def spawn(self, gdb_path, prog_path):
session_hash = gdb_path + prog_path
self._session_id = sha1(hashlib_encode_data(session_hash)).hexdigest()
self._kill_previous_session()
patterns = {
"PROJECT_DIR": self.project_dir,
"PROG_PATH": prog_path,
"PROG_DIR": dirname(prog_path),
"PROG_NAME": basename(splitext(prog_path)[0]),
"DEBUG_PORT": self.debug_options['port'],
"UPLOAD_PROTOCOL": self.debug_options['upload_protocol'],
"INIT_BREAK": self.debug_options['init_break'] or "",
"LOAD_CMDS": "\n".join(self.debug_options['load_cmds'] or []),
}
self._debug_server.spawn(patterns)
if not patterns['DEBUG_PORT']:
patterns['DEBUG_PORT'] = self._debug_server.get_debug_port()
self.generate_pioinit(self._gdbsrc_dir, patterns)
# start GDB client
args = [
"piogdb",
"-q",
"--directory", self._gdbsrc_dir,
"--directory", self.project_dir,
"-l", "10"
] # yapf: disable
args.extend(self.args)
if not gdb_path:
raise exception.DebugInvalidOptions("GDB client is not configured")
gdb_data_dir = self._get_data_dir(gdb_path)
if gdb_data_dir:
args.extend(["--data-directory", gdb_data_dir])
args.append(patterns['PROG_PATH'])
return reactor.spawnProcess(self,
gdb_path,
args,
path=self.project_dir,
env=os.environ)
@staticmethod
def _get_data_dir(gdb_path):
if "msp430" in gdb_path:
return None
gdb_data_dir = abspath(join(dirname(gdb_path), "..", "share", "gdb"))
return gdb_data_dir if isdir(gdb_data_dir) else None
def generate_pioinit(self, dst_dir, patterns):
server_exe = (self.debug_options.get("server")
or {}).get("executable", "").lower()
if "jlink" in server_exe:
cfg = initcfgs.GDB_JLINK_INIT_CONFIG
elif "st-util" in server_exe:
cfg = initcfgs.GDB_STUTIL_INIT_CONFIG
elif "mspdebug" in server_exe:
cfg = initcfgs.GDB_MSPDEBUG_INIT_CONFIG
elif "qemu" in server_exe:
cfg = initcfgs.GDB_QEMU_INIT_CONFIG
elif self.debug_options['require_debug_port']:
cfg = initcfgs.GDB_BLACKMAGIC_INIT_CONFIG
else:
cfg = initcfgs.GDB_DEFAULT_INIT_CONFIG
commands = cfg.split("\n")
if self.debug_options['init_cmds']:
commands = self.debug_options['init_cmds']
commands.extend(self.debug_options['extra_cmds'])
if not any("define pio_reset_target" in cmd for cmd in commands):
commands = [
"define pio_reset_target",
" echo Warning! Undefined pio_reset_target command\\n",
" mon reset",
"end"
] + commands # yapf: disable
if not any("define pio_reset_halt_target" in cmd for cmd in commands):
commands = [
"define pio_reset_halt_target",
" echo Warning! Undefined pio_reset_halt_target command\\n",
" mon reset halt",
"end"
] + commands # yapf: disable
if not any("define pio_restart_target" in cmd for cmd in commands):
commands += [
"define pio_restart_target",
" pio_reset_halt_target",
" $INIT_BREAK",
" %s" % ("continue" if patterns['INIT_BREAK'] else "next"),
"end"
] # yapf: disable
banner = [
"echo PlatformIO Unified Debugger -> http://bit.ly/pio-debug\\n",
"echo PlatformIO: debug_tool = %s\\n" % self.debug_options['tool'],
"echo PlatformIO: Initializing remote target...\\n"
]
footer = ["echo %s\\n" % self.INIT_COMPLETED_BANNER]
commands = banner + commands + footer
with open(join(dst_dir, self.PIO_SRC_NAME), "w") as fp:
fp.write("\n".join(self.apply_patterns(commands, patterns)))
def connectionMade(self):
self._lock_session(self.transport.pid)
p = protocol.Protocol()
p.dataReceived = self.onStdInData
stdio.StandardIO(p)
def onStdInData(self, data):
if LOG_FILE:
with open(LOG_FILE, "ab") as fp:
fp.write(data)
self._last_server_activity = time.time()
if b"-exec-run" in data:
if self._target_is_run:
token, _ = data.split(b"-", 1)
self.outReceived(token + b"^running\n")
return
data = data.replace(b"-exec-run", b"-exec-continue")
if b"-exec-continue" in data:
self._target_is_run = True
if b"-gdb-exit" in data or data.strip() in (b"q", b"quit"):
# Allow terminating via SIGINT/CTRL+C
signal.signal(signal.SIGINT, signal.default_int_handler)
self.transport.write(b"pio_reset_target\n")
self.transport.write(data)
def processEnded(self, reason): # pylint: disable=unused-argument
self._unlock_session()
if self._gdbsrc_dir and isdir(self._gdbsrc_dir):
fs.rmtree(self._gdbsrc_dir)
if self._debug_server:
self._debug_server.terminate()
reactor.stop()
def outReceived(self, data):
if LOG_FILE:
with open(LOG_FILE, "ab") as fp:
fp.write(data)
self._last_server_activity = time.time()
super(GDBClient, self).outReceived(data)
self._handle_error(data)
# go to init break automatically
if self.INIT_COMPLETED_BANNER.encode() in data:
self._auto_continue_timer = task.LoopingCall(
self._auto_exec_continue)
self._auto_continue_timer.start(0.1)
def errReceived(self, data):
super(GDBClient, self).errReceived(data)
self._handle_error(data)
def console_log(self, msg):
if helpers.is_mi_mode(self.args):
self.outReceived(('~"%s\\n"\n' % msg).encode())
else:
self.outReceived(("%s\n" % msg).encode())
def _auto_exec_continue(self):
auto_exec_delay = 0.5 # in seconds
if self._last_server_activity > (time.time() - auto_exec_delay):
return
if self._auto_continue_timer:
self._auto_continue_timer.stop()
self._auto_continue_timer = None
if not self.debug_options['init_break'] or self._target_is_run:
return
self.console_log(
"PlatformIO: Resume the execution to `debug_init_break = %s`" %
self.debug_options['init_break'])
self.console_log("PlatformIO: More configuration options -> "
"http://bit.ly/pio-debug")
self.transport.write(b"0-exec-continue\n" if helpers.
is_mi_mode(self.args) else b"continue\n")
self._target_is_run = True
def _handle_error(self, data):
if (self.PIO_SRC_NAME.encode() not in data
or b"Error in sourced" not in data):
return
configuration = {"debug": self.debug_options, "env": self.env_options}
exd = re.sub(r'\\(?!")', "/", json.dumps(configuration))
exd = re.sub(r'"(?:[a-z]\:)?((/[^"/]+)+)"',
lambda m: '"%s"' % join(*m.group(1).split("/")[-2:]), exd,
re.I | re.M)
mp = MeasurementProtocol()
mp['exd'] = "DebugGDBPioInitError: %s" % exd
mp['exf'] = 1
mp.send("exception")
self.transport.loseConnection()
def _kill_previous_session(self):
assert self._session_id
pid = None
with app.ContentCache() as cc:
pid = cc.get(self._session_id)
cc.delete(self._session_id)
if not pid:
return
if "windows" in util.get_systype():
kill = ["Taskkill", "/PID", pid, "/F"]
else:
kill = ["kill", pid]
try:
proc.exec_command(kill)
except: # pylint: disable=bare-except
pass
def _lock_session(self, pid):
if not self._session_id:
return
with app.ContentCache() as cc:
cc.set(self._session_id, str(pid), "1h")
def _unlock_session(self):
if not self._session_id:
return
with app.ContentCache() as cc:
cc.delete(self._session_id)
| 37.696246
| 79
| 0.601811
|
4a00cbf03a3bf122dc286d5d67b3fc5045d4c93d
| 1,898
|
py
|
Python
|
dis_snek/event_processors/user_events.py
|
Catalyst4222/Dis-Snek
|
1578733acb204f7de45a22b132cd7e8a430e7ace
|
[
"MIT"
] | null | null | null |
dis_snek/event_processors/user_events.py
|
Catalyst4222/Dis-Snek
|
1578733acb204f7de45a22b132cd7e8a430e7ace
|
[
"MIT"
] | null | null | null |
dis_snek/event_processors/user_events.py
|
Catalyst4222/Dis-Snek
|
1578733acb204f7de45a22b132cd7e8a430e7ace
|
[
"MIT"
] | null | null | null |
import logging
from typing import Union
from dis_snek.const import logger_name
from dis_snek.event_processors._template import EventMixinTemplate
from dis_snek.models import listen, events, User, Member, BaseChannel, Timestamp, to_snowflake, Activity
from dis_snek.models.enums import Status
from dis_snek.models.events import RawGatewayEvent
log = logging.getLogger(logger_name)
class UserEvents(EventMixinTemplate):
@listen()
async def _on_raw_typing_start(self, event: RawGatewayEvent) -> None:
"""
Process raw typing start and dispatch a processed typing event.
Args:
event: raw typing start event
"""
author: Union[User, Member]
channel: BaseChannel
guild = None
if member := event.data.get("member"):
author = self.cache.place_member_data(event.data.get("guild_id"), member)
guild = await self.cache.get_guild(event.data.get("guild_id"))
else:
author = await self.cache.get_user(event.data.get("user_id"))
channel = await self.cache.get_channel(event.data.get("channel_id"))
self.dispatch(
events.TypingStart(
author=author,
channel=channel,
guild=guild,
timestamp=Timestamp.utcfromtimestamp(event.data.get("timestamp")),
)
)
@listen()
async def _on_raw_presence_update(self, event: RawGatewayEvent) -> None:
g_id = to_snowflake(event.data["guild_id"])
user = await self.cache.get_user(event.data["user"]["id"], request_fallback=False)
if user:
status = Status[event.data["status"].upper()]
activities = [Activity.from_dict(a) for a in event.data.get("activities")]
self.dispatch(events.PresenceUpdate(user, status, activities, event.data.get("client_status", None), g_id))
| 36.5
| 119
| 0.655954
|
4a00cc5488ea0e31763fa394e0eeb4e2a18b910c
| 16,043
|
py
|
Python
|
tests/ut/python/dataset/test_cache_nomap.py
|
i4oolish/mindspore
|
dac3be31d0f2c0a3516200f47af30980e566601b
|
[
"Apache-2.0"
] | 2
|
2020-08-12T16:14:40.000Z
|
2020-12-04T03:05:57.000Z
|
tests/ut/python/dataset/test_cache_nomap.py
|
dilingsong/mindspore
|
4276050f2494cfbf8682560a1647576f859991e8
|
[
"Apache-2.0"
] | null | null | null |
tests/ut/python/dataset/test_cache_nomap.py
|
dilingsong/mindspore
|
4276050f2494cfbf8682560a1647576f859991e8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing cache operator with non-mappable datasets
"""
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.vision.c_transforms as c_vision
from mindspore import log as logger
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
GENERATE_GOLDEN = False
def test_cache_nomap_basic1():
"""
A random dataset (a non mappable dataset) with a cache over it just after the leaf
"""
logger.info("Test cache nomap basic 1")
schema = ds.Schema()
schema.add_column('image', de_type=mstype.uint8,
shape=[640, 480, 3]) # 921600 bytes (a bit less than 1 MB per image)
schema.add_column('label', de_type=mstype.uint8, shape=[1])
# create a cache. arbitrary session_id for now
some_cache = ds.DatasetCache(session_id=1, size=0, spilling=True)
# User-created sampler here
ds1 = ds.RandomDataset(schema=schema, total_rows=10, num_parallel_workers=4, cache=some_cache)
ds1 = ds1.repeat(4)
num_iter = 0
for data in ds1.create_dict_iterator():
logger.info("printing the label: {}".format(data["label"]))
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 40
logger.info("test_cache_nomap_basic1 Ended.\n")
def test_cache_nomap_basic2():
"""
A random dataset (a non mappable dataset) with a cache over it just after the leaf
"""
logger.info("Test cache nomap basic 2")
schema = ds.Schema()
schema.add_column('image', de_type=mstype.uint8,
shape=[640, 480, 3]) # 921600 bytes (a bit less than 1 MB per image)
schema.add_column('label', de_type=mstype.uint8, shape=[1])
# create a cache. arbitrary session_id for now
some_cache = ds.DatasetCache(session_id=1, size=0, spilling=True)
# sampler arg not given directly, however any of these args will auto-generate an appropriate sampler:
# num_samples, shuffle, num_shards, shard_id
# In this case, the presence of num_samples chooses a sampler.
ds1 = ds.RandomDataset(schema=schema, total_rows=20, num_samples=20, num_parallel_workers=4, cache=some_cache)
ds1 = ds1.repeat(2)
num_iter = 0
for data in ds1.create_dict_iterator():
logger.info("printing the label: {}".format(data["label"]))
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 40
logger.info("test_cache_nomap_basic2 Ended.\n")
def test_cache_nomap_basic3():
"""
A TF reader dataset (a non mappable dataset) with a cache over it just after the leaf
Repeat
|
Map(decode)
|
Cache
|
TFReader
"""
logger.info("Test cache nomap basic 3")
some_cache = ds.DatasetCache(session_id=1, size=0, spilling=True)
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 12
logger.info("test_cache_nomap_basic3 Ended.\n")
def test_cache_nomap_basic4():
"""
A TF reader dataset (a non mappable dataset) with a map decode and cache after it
Since a global shuffle is used for the tf reader, it will inject a shuffle op over the tf.
But, if there's a cache later, that shuffle becomes invalid and should be removed.
Repeat
|
Cache
|
Map(decode)
|
TFReader
"""
logger.info("Test cache nomap basic 4")
# This dataset has 3 records in it only
some_cache = ds.DatasetCache(session_id=1, size=0, spilling=True)
# With shuffle not being set, TF defaults to a "global" shuffle when there is no cache
# in the picture. This causes a shuffle-injection over the TF. For clarify, this test will
# explicitly give the global option, even though it's the default in python.
# But, when caching is added in the ascendent tree above TF, we do global shuffling
# through the sampler over the cache, not by the shuffle op. In that case, tree prepare
# will remove the shuffle op that got injected by the initial tree creation.
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=ds.Shuffle.GLOBAL)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 12
logger.info("test_cache_nomap_basic4 Ended.\n")
def test_cache_nomap_basic5():
"""
A TF reader dataset (a non mappable dataset) with a cache over it just after the leaf
Same as test 3, but this one does not have shuffle arg, causing tf to default to global
shuffle which attempts to inject a shuffle operator. However, since there is a cache
we do not need global shuffle, so the shuffle will not be built. It ends up being
identical to test basic 3, however we arrive at the same tree in different codepaths
(if there was no cache, then the shuffle IS built)
Repeat
|
Map(decode)
|
Cache
|
TFReader
"""
logger.info("Test cache nomap basic 5")
# This dataset has 3 records in it only
some_cache = ds.DatasetCache(session_id=1, size=0, spilling=True)
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 12
logger.info("test_cache_nomap_basic5 Ended.\n")
def test_cache_nomap_basic6():
"""
A TF reader dataset (a non mappable dataset) with a cache over it just after the leaf
In this one, the tf dataset will be given sharding configuration, however since a cache is
used, the tree prepare should undo the sharding configuration and instead, a distributed
sampler will be chosen with the same shard config.
Repeat
|
Map(decode)
|
Cache
|
TFReader
"""
logger.info("Test cache nomap basic 6")
# This dataset has 3 records in it only
some_cache = ds.DatasetCache(session_id=1, size=0, spilling=True)
# With only 3 records shard into 3, we expect only 1 record returned for this shard
# However, the sharding will be done by the sampler, not by the tf record leaf node
# In this case, it is a row-based sharding, not the file-based sharding that would happen if
# there was not any cache.
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], num_shards=3, shard_id=1, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 4
logger.info("test_cache_nomap_basic6 Ended.\n")
def test_cache_nomap_basic7():
"""
A TF reader dataset (a non mappable dataset) that uses global shuffle, and is cached followed by
map.
In this one, the tf dataset with global shuffle might want to inject a shuffle op over top of the
tf reader, but since a cache is given, it will choose not to.
Repeat
|
Map(decode)
|
cache
|
TFReader
"""
logger.info("Test cache nomap basic 7")
# This dataset has 3 records in it only
some_cache = ds.DatasetCache(session_id=1, size=0, spilling=True)
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=ds.Shuffle.GLOBAL, cache=some_cache)
decode_op = c_vision.Decode()
ds1 = ds1.map(input_columns=["image"], operations=decode_op)
ds1 = ds1.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 12
logger.info("test_cache_nomap_basic7 Ended.\n")
def test_cache_nomap_allowed_share1():
"""
It is allowed to share the cache between the following two trees:
Repeat Shuffle
| |
Cache Cache
| |
TFReader TFReader
"""
logger.info("Test cache nomap allowed share 1")
ds.config.set_seed(1)
# This dataset has 3 records in it only
some_cache = ds.DatasetCache(session_id=1, size=0, spilling=True)
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False, cache=some_cache)
ds1 = ds1.repeat(4)
ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False, cache=some_cache)
ds2 = ds2.shuffle(buffer_size=2)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
assert num_iter == 12
logger.info("Number of data in ds1: {} ".format(num_iter))
num_iter = 0
for _ in ds2.create_dict_iterator():
num_iter += 1
assert num_iter == 3
logger.info("test_cache_nomap_allowed_share1 Ended.\n")
def test_cache_nomap_allowed_share2():
"""
It is allowed to share the cache between the following two trees (with map decode):
Repeat Shuffle
| |
Cache Cache
| |
Map(decode) Map(decode)
| |
TFReader TFReader
"""
logger.info("Test cache nomap allowed share 2")
ds.config.set_seed(1)
# This dataset has 3 records in it only
some_cache = ds.DatasetCache(session_id=2, size=0, spilling=True)
decode_op = c_vision.Decode()
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds1 = ds1.repeat(4)
ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds2 = ds2.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds2 = ds2.shuffle(buffer_size=2)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 12
num_iter = 0
for _ in ds2.create_dict_iterator():
num_iter += 1
assert num_iter == 3
logger.info("test_cache_nomap_allowed_share2 Ended.\n")
def test_cache_nomap_allowed_share3():
"""
It is allowed to share the cache between the following two trees (different shard ids):
Repeat Repeat
| |
Cache Cache
| |
TFReader(shard_id = 0) TFReader(shard_id = 1)
"""
logger.info("Test cache nomap allowed share 3")
some_cache = ds.DatasetCache(session_id=1, size=0, spilling=True)
tf_files = ["../data/dataset/tf_file_dataset/test1.data", "../data/dataset/tf_file_dataset/test2.data"]
ds1 = ds.TFRecordDataset(tf_files, num_shards=2, shard_id=0, num_samples=3, shuffle=False, cache=some_cache)
ds1 = ds1.repeat(4)
ds2 = ds.TFRecordDataset(tf_files, num_shards=2, shard_id=1, num_samples=3, shuffle=False, cache=some_cache)
ds2 = ds2.repeat(4)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 12
num_iter = 0
for _ in ds2.create_dict_iterator():
num_iter += 1
assert num_iter == 12
logger.info("test_cache_nomap_allowed_share3 Ended.\n")
def test_cache_nomap_allowed_share4():
"""
It is allowed to share the cache between the following two trees:
Cache Cache
| |
Map(decode, num_parallel_workers=1) Map(decode, num_parallel_workers=2)
| |
TFReader TFReader
"""
logger.info("Test cache nomap allowed share 4")
# This dataset has 3 records in it only
some_cache = ds.DatasetCache(session_id=2, size=0, spilling=True)
decode_op = c_vision.Decode()
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache, num_parallel_workers=1)
ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds2 = ds2.map(input_columns=["image"], operations=decode_op, cache=some_cache, num_parallel_workers=2)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 3
num_iter = 0
for _ in ds2.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds2: {} ".format(num_iter))
assert num_iter == 3
logger.info("test_cache_nomap_allowed_share4 Ended.\n")
def test_cache_nomap_disallowed_share1():
"""
It is not allowed to share the cache between the following two trees:
Cache Cache
| |
Map(decode) Map(rescale)
| |
TFReader TFReader
"""
logger.info("Test cache nomap disallowed share1")
# This dataset has 3 records in it only
some_cache = ds.DatasetCache(session_id=1, size=0, spilling=True)
decode_op = c_vision.Decode()
rescale_op = c_vision.Rescale(1.0 / 255.0, -1.0)
ds1 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds1 = ds1.map(input_columns=["image"], operations=decode_op, cache=some_cache)
ds2 = ds.TFRecordDataset(DATA_DIR, SCHEMA_DIR, columns_list=["image"], shuffle=False)
ds2 = ds2.map(input_columns=["image"], operations=rescale_op, cache=some_cache)
num_iter = 0
for _ in ds1.create_dict_iterator():
num_iter += 1
logger.info("Number of data in ds1: {} ".format(num_iter))
assert num_iter == 3
try:
sum([1 for _ in ds2])
except RuntimeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "Attempt to re-use a cache for a different tree!" in str(e)
logger.info("test_cache_nomap_disallowed_share1 Ended.\n")
if __name__ == '__main__':
test_cache_nomap_basic1()
test_cache_nomap_basic2()
test_cache_nomap_basic3()
test_cache_nomap_basic4()
test_cache_nomap_basic5()
test_cache_nomap_basic6()
test_cache_nomap_basic7()
test_cache_nomap_allowed_share1()
test_cache_nomap_allowed_share2()
test_cache_nomap_allowed_share3()
test_cache_nomap_allowed_share4()
test_cache_nomap_disallowed_share1()
| 34.206823
| 119
| 0.660724
|
4a00cc7abecae35f7db67142617b521ce3d03ee4
| 1,008
|
py
|
Python
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/streambus/models/AddTopic.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 14
|
2018-04-19T09:53:56.000Z
|
2022-01-27T06:05:48.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/streambus/models/AddTopic.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 15
|
2018-09-11T05:39:54.000Z
|
2021-07-02T12:38:02.000Z
|
python_code/vnev/Lib/site-packages/jdcloud_sdk/services/streambus/models/AddTopic.py
|
Ureimu/weather-robot
|
7634195af388538a566ccea9f8a8534c5fb0f4b6
|
[
"MIT"
] | 33
|
2018-04-20T05:29:16.000Z
|
2022-02-17T09:10:05.000Z
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class AddTopic(object):
def __init__(self, topic=None, target=None, parameterList=None):
"""
:param topic: (Optional)
:param target: (Optional)
:param parameterList: (Optional) 归档相关的具体参数
"""
self.topic = topic
self.target = target
self.parameterList = parameterList
| 31.5
| 75
| 0.705357
|
4a00cd2b9984c265db424e1f3e8f72fa416de7d6
| 794
|
py
|
Python
|
scripts/python/grafana.py
|
tesla-cm/tulong
|
f0dcddb5dbfa8a99a29ceea76ca2d68ebfd0c8cd
|
[
"Apache-2.0"
] | null | null | null |
scripts/python/grafana.py
|
tesla-cm/tulong
|
f0dcddb5dbfa8a99a29ceea76ca2d68ebfd0c8cd
|
[
"Apache-2.0"
] | null | null | null |
scripts/python/grafana.py
|
tesla-cm/tulong
|
f0dcddb5dbfa8a99a29ceea76ca2d68ebfd0c8cd
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# coding=UTF-8
import urllib
import urllib2
import json
def _get_metric_sum():
api_url = u"http://10.3.204.91:4242/api/query?start=1508342400&end=1508428800&m=sum:1h-sum:durian.adx.bidding.throughput"
req = urllib2.Request(url=api_url)
resp = urllib2.urlopen(req)
resp_data = json.loads(resp.read())
print("--> %s" % resp_data)
dps_data = resp_data[0]['dps']
for key in dps_data.keys():
print("--> %s, %s" %(key, dps_data[key]))
vs = tuple(dps_data.values())
print("values: type: %s, value: %s" % (type(vs), vs))
dps_total = reduce(lambda x,y: x+y, vs)
print("--> %s" % dps_total)
if __name__ == "__main__":
#_get_metric_sum()
sss = Person()
sss.set('name', 'ljx')
print("--> %s" % sss.name)
| 25.612903
| 125
| 0.610831
|
4a00cd5eed082cf7ab4bef0c6e2f0806e8669e4e
| 648
|
py
|
Python
|
setup.py
|
nmgcfyxl/apollo_config
|
936ec200a92f17acf7fa9a59033bd2b315aaee5a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
nmgcfyxl/apollo_config
|
936ec200a92f17acf7fa9a59033bd2b315aaee5a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
nmgcfyxl/apollo_config
|
936ec200a92f17acf7fa9a59033bd2b315aaee5a
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""
apollo 连接工具包
"""
from setuptools import setup, find_packages
import apollo_config
SHORT = u'apollo_config'
setup(
name='apollo_config',
version=apollo_config.__version__,
packages=find_packages(),
install_requires=[
'requests'
],
author=apollo_config.__author__,
author_email=apollo_config.__email__,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
include_package_data=True,
package_data={'': ['*.py', '*.pyc']},
zip_safe=False,
platforms='any',
description=SHORT,
long_description=__doc__,
)
| 20.25
| 48
| 0.660494
|
4a00ce460b1ee2e5a69c1f090eea249211bca857
| 51,553
|
py
|
Python
|
statsmodels/genmod/families/family.py
|
varunjha089/statsmodels
|
98f5db981be17aab3801f7266ee8810cef8ee18a
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/genmod/families/family.py
|
varunjha089/statsmodels
|
98f5db981be17aab3801f7266ee8810cef8ee18a
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/genmod/families/family.py
|
varunjha089/statsmodels
|
98f5db981be17aab3801f7266ee8810cef8ee18a
|
[
"BSD-3-Clause"
] | null | null | null |
'''
The one parameter exponential family distributions used by GLM.
'''
# TODO: quasi, quasibinomial, quasipoisson
# see http://www.biostat.jhsph.edu/~qli/biostatistics_r_doc/library/stats/html/family.html
# for comparison to R, and McCullagh and Nelder
import warnings
import inspect
import numpy as np
from scipy import special
from . import links as L
from . import varfuncs as V
FLOAT_EPS = np.finfo(float).eps
class Family(object):
"""
The parent class for one-parameter exponential families.
Parameters
----------
link : a link function instance
Link is the linear transformation function.
See the individual families for available links.
variance : a variance function
Measures the variance as a function of the mean probabilities.
See the individual families for the default variance function.
See Also
--------
:ref:`links`
"""
# TODO: change these class attributes, use valid somewhere...
valid = [-np.inf, np.inf]
links = []
def _setlink(self, link):
"""
Helper method to set the link for a family.
Raises a ValueError exception if the link is not available. Note that
the error message might not be that informative because it tells you
that the link should be in the base class for the link function.
See glm.GLM for a list of appropriate links for each family but note
that not all of these are currently available.
"""
# TODO: change the links class attribute in the families to hold
# meaningful information instead of a list of links instances such as
# [<statsmodels.family.links.Log object at 0x9a4240c>,
# <statsmodels.family.links.Power object at 0x9a423ec>,
# <statsmodels.family.links.Power object at 0x9a4236c>]
# for Poisson...
self._link = link
if not isinstance(link, L.Link):
raise TypeError("The input should be a valid Link object.")
if hasattr(self, "links"):
validlink = link in self.links
validlink = max([isinstance(link, _) for _ in self.links])
if not validlink:
errmsg = "Invalid link for family, should be in %s. (got %s)"
raise ValueError(errmsg % (repr(self.links), link))
def _getlink(self):
"""
Helper method to get the link for a family.
"""
return self._link
# link property for each family is a pointer to link instance
link = property(_getlink, _setlink, doc="Link function for family")
def __init__(self, link, variance):
if inspect.isclass(link):
warnmssg = "Calling Family(..) with a link class as argument "
warnmssg += "is deprecated.\n"
warnmssg += "Use an instance of a link class instead."
warnings.warn(warnmssg, category=DeprecationWarning)
self.link = link()
else:
self.link = link
self.variance = variance
def starting_mu(self, y):
r"""
Starting value for mu in the IRLS algorithm.
Parameters
----------
y : array
The untransformed response variable.
Returns
-------
mu_0 : array
The first guess on the transformed response variable.
Notes
-----
.. math::
\mu_0 = (Y + \overline{Y})/2
Only the Binomial family takes a different initial value.
"""
return (y + y.mean())/2.
def weights(self, mu):
r"""
Weights for IRLS steps
Parameters
----------
mu : array-like
The transformed mean response variable in the exponential family
Returns
-------
w : array
The weights for the IRLS steps
Notes
-----
.. math::
w = 1 / (g'(\mu)^2 * Var(\mu))
"""
return 1. / (self.link.deriv(mu)**2 * self.variance(mu))
def deviance(self, endog, mu, iweights=1., scale=1.):
r"""
The deviance function evaluated at (endog,mu,iweights,mu).
Deviance is usually defined as twice the loglikelihood ratio.
Parameters
----------
endog : array-like
The endogenous response variable
mu : array-like
The inverse of the link function at the linear predicted values.
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
-------
Deviance : array
The value of deviance function defined below.
Notes
-----
Deviance is defined
.. math::
D = 2\sum_i (iweights_i * (llf(Y_i, Y_i) -
llf(Y_i, \mu_i)))
where y is the endogenous variable. The deviance functions are
analytically defined for each family.
"""
raise NotImplementedError
def resid_dev(self, endog, mu, scale=1.):
r"""
The deviance residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
Deviance residuals.
Notes
-----
The deviance residuals are defined by the contribution D_i of
observation i to the deviance as
.. math::
resid\_dev_i = sign(y_i-\mu_i) \sqrt{D_i}
"""
raise NotImplementedError
def fitted(self, lin_pred):
"""
Fitted values based on linear predictors lin_pred.
Parameters
-----------
lin_pred : array
Values of the linear predictor of the model.
dot(X,beta) in a classical linear model.
Returns
--------
mu : array
The mean response variables given by the inverse of the link
function.
"""
fits = self.link.inverse(lin_pred)
return fits
def predict(self, mu):
"""
Linear predictors based on given mu values.
Parameters
----------
mu : array
The mean response variables
Returns
-------
lin_pred : array
Linear predictors based on the mean response variables. The value
of the link function at the given mu.
"""
return self.link(mu)
def loglike(self, endog, mu, iweights=1., scale=1.):
"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array
Usually the endogenous response variable.
mu : array
Usually but not always the fitted mean response variable.
iweights : array-like
1d array of weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
llf : float
The value of the loglikelihood evaluated at
(endog,mu,iweights,scale) as defined below.
Notes
-----
This is defined for each family. endog and mu are not restricted to
`endog` and `mu` respectively. For instance, the deviance function
calls both loglike(endog,endog) and loglike(endog,mu) to get the
likelihood ratio.
"""
raise NotImplementedError
def resid_anscombe(self, endog, mu, iweights=1., scale=1.):
r"""
The Anscombe residuals
Parameters
----------
endog : array
The endogenous response variable
mu : array
The inverse of the link function at the linear predicted values.
iweights : array-like
1d array of frequency weights. The default is 1.
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
See Also
--------
statsmodels.genmod.families.family.Family : `resid_anscombe` for the
individual families for more information
Notes
-----
Anscombe residuals are defined by
.. math::
resid\_anscombe_i = \frac{A(y)-A(\mu)}{A'(\mu)\sqrt{Var[\mu]}}
where :math:`A'(y)=v(y)^{-\frac{1}{3}}` and :math:`v(\mu)` is the
variance function :math:`Var[y]=\frac{\phi}{w}v(mu)`.
The transformation :math:`A(y)` makes the residuals more normal
distributed.
"""
raise NotImplementedError
def _clean(self, x):
"""
Helper function to trim the data so that it is in (0,inf)
Notes
-----
The need for this function was discovered through usage and its
possible that other families might need a check for validity of the
domain.
"""
return np.clip(x, FLOAT_EPS, np.inf)
class Poisson(Family):
"""
Poisson exponential family.
Parameters
----------
link : a link instance, optional
The default link for the Poisson family is the log link. Available
links are log, identity, and sqrt. See statsmodels.family.links for
more information.
Attributes
----------
Poisson.link : a link instance
The link function of the Poisson instance.
Poisson.variance : varfuncs instance
`variance` is an instance of
statsmodels.genmod.families.family.varfuncs.mu
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.sqrt]
variance = V.mu
valid = [0, np.inf]
safe_links = [L.Log, ]
def __init__(self, link=None):
if link is None:
link = L.log()
super(Poisson, self).__init__(link=link, variance=Poisson.variance)
def resid_dev(self, endog, mu, scale=1.):
r"""Poisson deviance residual
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
.. math::
resid\_dev_i = sign(Y_i - \mu_i) * \sqrt{2 *
(Y_i * \log(Y_i / \mu_i) - (Y_i - \mu_i))/ scale}
"""
endog_mu = self._clean(endog / mu)
return (np.sign(endog - mu) *
np.sqrt(2 * (endog * np.log(endog_mu) - (endog - mu)) / scale))
def deviance(self, endog, mu, iweights=1., scale=1.):
r'''
Poisson deviance function
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
-------
deviance : float
The deviance function at (endog,mu,iweights,scale) as defined
below.
Notes
-----
If a constant term is included it is defined as
.. math::
D = 2 * \sum_i (iweights_i *
(Y_i * \log(Y_i / \mu_i) - (Y_i - \mu_i)))/ scale
'''
endog_mu = self._clean(endog / mu)
return 2 * np.sum(iweights * (endog * np.log(endog_mu) -
(endog - mu))) / scale
def loglike(self, endog, mu, iweights=1., scale=1.):
r"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
Not used for in the Poisson loglike.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,iweights,scale) as defined below.
Notes
-----
.. math::
llf = scale * \sum_i iweights_i * (Y_i * \log(\mu_i) - \mu_i -
\ln \Gamma(Y_i + 1))
"""
return np.sum(iweights * (endog * np.log(mu) - mu -
special.gammaln(endog + 1)))
def resid_anscombe(self, endog, mu, scale=1.):
r"""
Anscombe residuals for the Poisson distribution
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscome residuals for the Poisson family defined below
Notes
-----
.. math::
resid\_anscombe_i = (3/2) * (Y_i^{2/3} - \mu_i^{2/3}) / \mu_i^{1/6}
"""
return ( (3 / 2.) * (endog**(2/3.) - mu**(2 / 3.)) /
(mu**(1 / 6.) * scale**(0.5)) )
class Gaussian(Family):
"""
Gaussian exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gaussian family is the identity link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gaussian.link : a link instance
The link function of the Gaussian instance
Gaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.constant
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.constant
safe_links = links
def __init__(self, link=None):
if link is None:
link = L.identity()
super(Gaussian, self).__init__(link=link, variance=Gaussian.variance)
def resid_dev(self, endog, mu, scale=1.):
r"""
Gaussian deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by scale. The default
is 1.
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
--------
.. math::
resid\_dev_i = (Y_i - \mu_i) / \sqrt{scale}
"""
return (endog - mu) / scale**(0.5)
def deviance(self, endog, mu, iweights=1., scale=1.):
r"""
Gaussian deviance function
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
-------
deviance : float
The deviance function at (endog,mu,iweights,scale)
as defined below.
Notes
--------
.. math::
D = \sum_i iweights_i * (Y_i - \mu_i)^2 / scale
"""
return np.sum((iweights * (endog - mu) ** 2)) / scale
def loglike(self, endog, mu, iweights=1., scale=1.):
r"""
The log-likelihood in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
Scales the loglikelihood function. The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,iweights,scale) as defined below.
Notes
-----
If the link is the identity link function then the
loglikelihood function is the same as the classical OLS model.
.. math::
llf = -nobs / 2 * (\log(SSR) + (1 + \log(2 \pi / nobs)))
where
.. math::
SSR = \sum_i (Y_i - g^{-1}(\mu_i))^2
If the links is not the identity link then the loglikelihood
function is defined as
.. math::
llf = -1 / 2 \sum_i * iweights_i * ((Y_i - mu_i)^2 / scale +
\log(2 * \pi * scale))
"""
if isinstance(self.link, L.Power) and self.link.power == 1:
# This is just the loglikelihood for classical OLS
nobs2 = np.sum(iweights, axis=0) / 2.
SSR = np.sum((endog-self.fitted(mu))**2, axis=0)
llf = -np.log(SSR) * nobs2
llf -= (1+np.log(np.pi/nobs2))*nobs2
return llf
else:
return np.sum(-0.5 * iweights * ((endog - mu) ** 2 / scale +
np.log(2 * np.pi * scale)))
def resid_anscombe(self, endog, mu, scale=1.):
r"""
The Anscombe residuals for the Gaussian distribution
Parameters
----------
endog : array
Endogenous response variable
mu : array
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gaussian family defined below
Notes
-----
For the Gaussian distribution, Anscombe residuals are the same as
deviance residuals.
.. math::
resid\_anscombe_i = (Y_i - \mu_i) / \sqrt{scale}
"""
return (endog - mu) / scale**(0.5)
class Gamma(Family):
"""
Gamma exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Gamma family is the inverse link.
Available links are log, identity, and inverse.
See statsmodels.family.links for more information.
Attributes
----------
Gamma.link : a link instance
The link function of the Gamma instance
Gamma.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_squared
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
"""
links = [L.log, L.identity, L.inverse_power]
variance = V.mu_squared
safe_links = [L.Log, ]
def __init__(self, link=None):
if link is None:
link = L.inverse_power()
super(Gamma, self).__init__(link=link, variance=Gamma.variance)
def deviance(self, endog, mu, iweights=1., scale=1.):
r"""
Gamma deviance function
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
.. math::
D = 2 * \sum_i iweights_i *
((Y_i - \mu_i)/\mu_i - \log(Y_i / \mu_i)) / scale
"""
endog_mu = self._clean(endog / mu)
return 2*np.sum(iweights*((endog-mu)/mu-np.log(endog_mu)))/scale
def resid_dev(self, endog, mu, scale=1.):
r"""
Gamma deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
.. math::
resid\_dev_i = sign(Y_i - \mu_i) \sqrt{2 *
((Y_i - \mu_i) / \mu_i - \log(Y_i / \mu_i))/scale}
"""
endog_mu = self._clean(endog / mu)
return np.sign(endog - mu) * np.sqrt(2 *
((endog - mu)/mu - np.log(endog_mu))/scale)
def loglike(self, endog, mu, iweights=1., scale=1.):
r"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,iweights,scale) as defined below.
Notes
-----
.. math::
llf = -1 / scale * \sum_i *(Y_i / \mu_i+ \log(\mu_i)+
(scale -1) * \log(Y) + \log(scale) + scale *
\ln \Gamma(1 / scale))
"""
endog_mu = self._clean(endog / mu)
return - np.sum((endog_mu - np.log(endog_mu) + scale *
np.log(endog) + np.log(scale) + scale *
special.gammaln(1./scale)) * iweights) / scale
# in Stata scale is set to equal 1 for reporting llf
# in R it's the dispersion, though there is a loss of precision vs.
# our results due to an assumed difference in implementation
def resid_anscombe(self, endog, mu, scale=1.):
r"""
The Anscombe residuals for Gamma distribution
Parameters
----------
endog : array
Endogenous response variable
mu : array
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals for the Gamma family defined below
Notes
-----
.. math::
resid\_anscombe_i = 3 * (Y_i^{1/3} - \mu_i^{1/3}) / \mu_i^{1/3}
/ \sqrt{scale}
"""
return 3 * (endog**(1/3.) - mu**(1/3.)) / mu**(1/3.) / scale**(0.5)
class Binomial(Family):
"""
Binomial exponential family distribution.
Parameters
----------
link : a link instance, optional
The default link for the Binomial family is the logit link.
Available links are logit, probit, cauchy, log, and cloglog.
See statsmodels.family.links for more information.
Attributes
----------
Binomial.link : a link instance
The link function of the Binomial instance
Binomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.binary
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
endog for Binomial can be specified in one of three ways.
"""
links = [L.logit, L.probit, L.cauchy, L.log, L.cloglog, L.identity]
variance = V.binary # this is not used below in an effort to include n
# Other safe links, e.g. cloglog and probit are subclasses
safe_links = [L.Logit, L.CDFLink]
def __init__(self, link=None): # , n=1.):
if link is None:
link = L.logit()
# TODO: it *should* work for a constant n>1 actually, if freq_weights
# is equal to n
self.n = 1
# overwritten by initialize if needed but always used to initialize
# variance since endog is assumed/forced to be (0,1)
super(Binomial, self).__init__(link=link, variance=V.Binomial(n=self.n))
def starting_mu(self, y):
r"""
The starting values for the IRLS algorithm for the Binomial family.
A good choice for the binomial family is :math:`\mu_0 = (Y_i + 0.5)/2`
"""
return (y + .5)/2
def initialize(self, endog, freq_weights):
'''
Initialize the response variable.
Parameters
----------
endog : array
Endogenous response variable
freq_weights : array
1d array of frequency weights
Returns
--------
If `endog` is binary, returns `endog`
If `endog` is a 2d array, then the input is assumed to be in the format
(successes, failures) and
successes/(success + failures) is returned. And n is set to
successes + failures.
'''
# if not np.all(np.asarray(freq_weights) == 1):
# self.variance = V.Binomial(n=freq_weights)
if (endog.ndim > 1 and endog.shape[1] > 1):
y = endog[:, 0]
# overwrite self.freq_weights for deviance below
self.n = endog.sum(1)
return y*1./self.n, self.n
else:
return endog, np.ones(endog.shape[0])
def deviance(self, endog, mu, iweights=1, scale=1.):
r'''
Deviance function for either Bernoulli or Binomial data.
Parameters
----------
endog : array-like
Endogenous response variable (already transformed to a probability
if appropriate).
mu : array
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
--------
deviance : float
The deviance function as defined below
Notes
-----
Binomial in general:
.. math::
D = 2 * \sum_i iweights * (Y_i * \log(Y_i / \mu_i)
+ (n_i - Y_i) * \log((n_i - Y_i) / (n_i - \mu_i))) / scale
Since :math:`Y_i` and :math:`\mu_i` are transformed to :math:`[0,1]`
in Binomial.initialize, the following version is implemented:
.. math::
D = 2 * \sum_i iweights n_i * (Y_i * \log(Y_i / \mu_i)
+ (1 - Y_i) * \log((1 - Y_i) / (1 - \mu_i))) / scale
'''
endog_mu = self._clean(endog / mu)
n_endog_mu = self._clean((1. - endog) / (1. - mu))
return 2 * np.sum(iweights * self.n *
(endog * np.log(endog_mu) +
(1. - endog) * np.log(n_endog_mu))) / scale
def resid_dev(self, endog, mu, scale=1.):
r"""
Binomial deviance residuals
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
Binomial in general:
.. math::
resid\_dev_i = sign(Y_i - \mu_i) \sqrt{2 *
(Y_i * \log(Y_i / \mu_i) + (n_i - Y_i) *
\log(n_i - Y_i)/(n_i - \mu_i))/scale}
Since :math:`Y_i` and :math:`\mu_i` are transformed to :math:`[0,1]`
in Binomial.initialize, the following version is implemented:
.. math::
resid\_dev_i = sign(Y_i - \mu_i) \sqrt{ 2 *
n_i * (Y_i * \log(Y_i / \mu_i) + (1 - Y_i) *
\log((1 - Y_i) / (1 - \mu_i)))/scale}
"""
endog_mu = self._clean(endog / mu)
n_endog_mu = self._clean((1. - endog) / (1. - mu))
return (np.sign(endog - mu) *
np.sqrt(2 * self.n * (endog * np.log(endog_mu) +
(1. - endog) * np.log(n_endog_mu))/scale))
def loglike(self, endog, mu, iweights=1, scale=1.):
r"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
Not used in the Binomial loglike.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,iweights,scale) as defined below.
Notes
-----
If the endogenous variable is binary:
.. math::
llf = \sum_i (y_i * \log(\mu_i/(1-\mu_i)) + \log(1-\mu_i)) *
iweights_i
If the endogenous variable is binomial:
.. math::
llf = \sum_i iweights_i * (\ln \Gamma(n+1) -
\ln \Gamma(y_i + 1) - \ln \Gamma(n_i - y_i +1) + y_i *
\log(\mu_i / (n_i - \mu_i)) + n * \log(1 - \mu_i/n_i))
where :math:`y_i = Y_i * n_i` with :math:`Y_i` and :math:`n_i` as
defined in Binomial initialize. This simply makes :math:`y_i` the
original number of successes.
"""
if np.shape(self.n) == () and self.n == 1:
return np.sum((endog * np.log(mu/(1 - mu)) +
np.log(1 - mu)) * iweights)
else:
y = endog * self.n # convert back to successes
# note that mu is still in (0,1), i.e. not convertet back
return np.sum((special.gammaln(self.n + 1) -
special.gammaln(y + 1) -
special.gammaln(self.n - y + 1) + y *
np.log(mu/(1 - mu)) + self.n *
np.log(1 - mu)) * iweights)
def resid_anscombe(self, endog, mu, scale=1.):
r'''
The Anscombe residuals
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
.. math::
n^{2/3}*(cox_snell(endog)-cox_snell(mu))
/ (mu*(1-mu/n)*scale^3)^{1/6}
where cox_snell is defined as
cox_snell(x) = betainc(2/3., 2/3., x)*betainc(2/3.,2/3.)
where betainc is the incomplete beta function as defined in scipy,
which uses a regularized version (with the unregularized version, one
would just have :math:`cox_snell(x) = Betainc(2/3., 2/3., x)`).
The name 'cox_snell' is idiosyncratic and is simply used for
convenience following the approach suggested in Cox and Snell (1968).
Further note that
:math:`cox_snell(x) = \frac{3}{2}*x^{2/3} *
hyp2f1(2/3.,1/3.,5/3.,x)`
where hyp2f1 is the hypergeometric 2f1 function. The Anscombe
residuals are sometimes defined in the literature using the
hyp2f1 formulation. Both betainc and hyp2f1 can be found in scipy.
References
----------
Anscombe, FJ. (1953) "Contribution to the discussion of H. Hotelling's
paper." Journal of the Royal Statistical Society B. 15, 229-30.
Cox, DR and Snell, EJ. (1968) "A General Definition of Residuals."
Journal of the Royal Statistical Society B. 30, 248-75.
'''
endog = endog * self.n # convert back to successes
mu = mu * self.n # convert back to successes
cox_snell = lambda x: (special.betainc(2/3., 2/3., x)
* special.beta(2/3., 2/3.))
return self.n**(2/3.) * (cox_snell(endog*1./self.n) \
- cox_snell(mu*1./self.n)) \
/ (mu * (1 - mu*1./self.n) * scale**3)**(1/6.)
class InverseGaussian(Family):
"""
InverseGaussian exponential family.
Parameters
----------
link : a link instance, optional
The default link for the inverse Gaussian family is the
inverse squared link.
Available links are inverse_squared, inverse, log, and identity.
See statsmodels.family.links for more information.
Attributes
----------
InverseGaussian.link : a link instance
The link function of the inverse Gaussian instance
InverseGaussian.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.mu_cubed
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
The inverse Guassian distribution is sometimes referred to in the
literature as the Wald distribution.
"""
links = [L.inverse_squared, L.inverse_power, L.identity, L.log]
variance = V.mu_cubed
safe_links = [L.inverse_squared, L.Log, ]
def __init__(self, link=None):
if link is None:
link = L.inverse_squared()
super(InverseGaussian, self).__init__(
link=link, variance=InverseGaussian.variance)
def resid_dev(self, endog, mu, scale=1.):
r"""
Returns the deviance residuals for the inverse Gaussian family.
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_dev : array
Deviance residuals as defined below
Notes
-----
.. math::
resid\_dev_i = sign(Y_i - \mu_i) *
\sqrt{(Y_i - \mu_i)^2 / (Y_i * \mu_i^2) / scale}
"""
return np.sign(endog-mu) * np.sqrt((endog-mu)**2/(endog*mu**2)/scale)
def deviance(self, endog, mu, iweighs=1., scale=1.):
r"""
Inverse Gaussian deviance function
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
.. math::
D = \sum_i iweights_i * ((Y_i - \mu_i)^2 / (Y_i *\mu_i^2)) /
scale
"""
return np.sum(iweighs*(endog-mu)**2/(endog*mu**2))/scale
def loglike(self, endog, mu, iweights=1., scale=1.):
r"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,iweights,scale) as defined below.
Notes
-----
.. math::
llf = -1/2 * \sum_i iweights_i * ((Y_i - \mu_i)^2 / (Y_i *
\mu_i^2 * scale) + \log(scale * Y_i^3) + \log(2 * \pi))
"""
return -.5 * np.sum(((endog - mu)**2/(endog * mu**2 * scale) +
np.log(scale * endog**3) + np.log(2 * np.pi)) *
iweights)
def resid_anscombe(self, endog, mu, scale=1.):
r"""
The Anscombe residuals for the inverse Gaussian distribution
Parameters
----------
endog : array
Endogenous response variable
mu : array
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals for the inverse Gaussian distribution as
defined below
Notes
-----
.. math::
resid\_anscombe_i = \log(Y_i / \mu_i) / \sqrt{\mu_i * scale}
"""
return np.log(endog / mu) / np.sqrt(mu * scale)
class NegativeBinomial(Family):
r"""
Negative Binomial exponential family.
Parameters
----------
link : a link instance, optional
The default link for the negative binomial family is the log link.
Available links are log, cloglog, identity, nbinom and power.
See statsmodels.family.links for more information.
alpha : float, optional
The ancillary parameter for the negative binomial distribution.
For now `alpha` is assumed to be nonstochastic. The default value
is 1. Permissible values are usually assumed to be between .01 and 2.
Attributes
----------
NegativeBinomial.link : a link instance
The link function of the negative binomial instance
NegativeBinomial.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.nbinom
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
Power link functions are not yet supported.
Parameterization for :math:`y=0,1,2,\ldots` is
:math:`f(y) = \frac{\Gamma(y+\frac{1}{\alpha})}{y!\Gamma(\frac{1}{\alpha})}
\left(\frac{1}{1+\alpha\mu}\right)^{\frac{1}{\alpha}}
\left(\frac{\alpha\mu}{1+\alpha\mu}\right)^y`
with :math:`E[Y]=\mu\,` and :math:`Var[Y]=\mu+\alpha\mu^2`.
"""
links = [L.log, L.cloglog, L.identity, L.nbinom, L.Power]
# TODO: add the ability to use the power links with an if test
# similar to below
variance = V.nbinom
safe_links = [L.Log, ]
def __init__(self, link=None, alpha=1.):
self.alpha = 1. * alpha # make it at least float
if link is None:
link = L.log()
super(NegativeBinomial, self).__init__(
link=link, variance=V.NegativeBinomial(alpha=self.alpha))
def deviance(self, endog, mu, iweights=1., scale=1.):
r"""
Returns the value of the deviance function.
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
.. math:
D = 2 * Y_i * \log(Y_i / \mu_i) - (2 / \alpha) *
(1 + \alpha * Y_i) * \ln(1 + \alpha * Y_i) / (1 + \alpha * \mu_i)
"""
endog_mu = self._clean(endog / mu)
tmp = self._clean((1 + self.alpha * endog) / (1 + self.alpha * mu))
return np.sum(iweights * (2 * endog * np.log(endog_mu) -
2 / self.alpha * (1 + self.alpha * endog) *
np.log(tmp))) / scale
def resid_dev(self, endog, mu, scale=1.):
r"""
Negative Binomial Deviance Residual
Parameters
----------
endog : array-like
`endog` is the response variable
mu : array-like
`mu` is the fitted value of the model
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
--------
resid_dev : array
The array of deviance residuals
Notes
-----
.. math::
resid_dev_i = sign(Y_i-\mu_i) * \sqrt{(2 * Y_i * \log(Y_i / \mu_i)
- (2 / \alpha) * (1 + \alpha * Y_i)
* \log((1 + \alpha * Y_i) / (1 + \alpha * \mu_i)))/scale}
"""
endog_mu = self._clean(endog / mu)
tmp = self._clean((1 + self.alpha * endog) / (1 + self.alpha * mu))
return (np.sign(endog - mu) *
np.sqrt((2 * endog * np.log(endog_mu) -
2 / self.alpha * (1 + self.alpha * endog) *
np.log(tmp)) / scale))
def loglike(self, endog, mu, iweights=1., scale=1.):
r"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
The fitted mean response values
iweights : array-like
1d array of weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,iweights,scale) as defined below.
Notes
-----
Defined as:
.. math::
llf = \sum_i iweights_i * (Y_i * \log{(\alpha * \mu_i /
(1 + \alpha * \mu_i))} - \log{(1 + \alpha * \mu_i)}/
\alpha + Constant)
where :math:`Constant` is defined as:
.. math::
Constant = \ln \Gamma{(Y_i + 1/ \alpha )} - \ln \Gamma(Y_i + 1) -
\ln \Gamma{(1/ \alpha )}
"""
constant = (special.gammaln(endog + 1 / self.alpha) -
special.gammaln(endog+1)-special.gammaln(1/self.alpha))
return np.sum((endog * np.log(self.alpha * mu /
(1 + self.alpha * mu)) -
np.log(1 + self.alpha * mu) / self.alpha +
constant) * iweights)
def resid_anscombe(self, endog, mu, scale=1.):
r"""
The Anscombe residuals for the negative binomial family
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
Anscombe residuals for Negative Binomial are the same as for Binomial
upon setting :math:`n=-\frac{1}{\alpha}`. Due to the negative value of
:math:`-\alpha*Y` the representation with the hypergeometric function
:math:`H2F1(x) = hyp2f1(2/3.,1/3.,5/3.,x)` is advantageous
.. math::
resid_anscombe_i = \frac{3}{2} *
(Y_i^(2/3)*H2F1(-\alpha*Y_i) - \mu_i^(2/3)*H2F1(-\alpha*\mu_i))
/ (\mu_i * (1+\alpha*\mu_i) * scale^3)^(1/6)
Note that for the (unregularized) Beta function, one has
:math:`Beta(z,a,b) = z^a/a * H2F1(a,1-b,a+1,z)`
"""
hyp2f1 = lambda x: special.hyp2f1(2 / 3., 1 / 3., 5 / 3., x)
return 3/2. * (endog**(2/3.) * hyp2f1(-self.alpha * endog) -
mu**(2/3.) * hyp2f1(-self.alpha * mu)) \
/ (mu * (1+self.alpha * mu)*scale**3)**(1/6)
class Tweedie(Family):
"""
Tweedie family.
Parameters
----------
link : a link instance, optional
The default link for the Tweedie family is the log link.
Available links are log and Power.
See statsmodels.family.links for more information.
var_power : float, optional
The variance power. The default is 1.
Attributes
----------
Tweedie.link : a link instance
The link function of the Tweedie instance
Tweedie.variance : varfunc instance
`variance` is an instance of statsmodels.family.varfuncs.Power
Tweedie.var_power : float
The power of the variance function.
See also
--------
statsmodels.genmod.families.family.Family
:ref:`links`
Notes
-----
Logliklihood function not implemented because of the complexity of
calculating an infinite series of summations. The variance power can be
estimated using the `estimate_tweedie_power` function that is part of the
`GLM` class.
"""
links = [L.log, L.Power]
variance = V.Power
safe_links = [L.log, L.Power]
def __init__(self, link=None, var_power=1.):
self.var_power = var_power
if link is None:
link = L.log()
super(Tweedie, self).__init__(
link=link, variance=V.Power(power=var_power * 1.))
def deviance(self, endog, mu, iweights=1., scale=1.):
r"""
Returns the value of the deviance function.
Parameters
-----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
iweights : array-like
1d array of weights. The default is 1.
scale : float, optional
An optional scale argument. The default is 1.
Returns
-------
deviance : float
Deviance function as defined below
Notes
-----
When :math:`p = 1`,
.. math::
dev_i = \mu
when :math:`Y_i = 0` and
.. math::
dev_i = Y_i * \log(Y_i / \mu_i) + (\mu_i - Y_i)
otherwise.
When :math:`p = 2`,
.. math::
dev_i = (Y_i - \mu_i) / \mu_i - \log(Y_i / \mu_i)
For all other p,
.. math::
dev_i = Y_i^{2 - p} / ((1 - p) * (2 - p)) -
Y_i * \mu_i^{1 - p} / (1 - p) + \mu_i^{2 - p} /
(2 - p)
Once :math:`dev_i` is calculated, then deviance is calculated as
.. math::
D = \sum{2 * iweights * dev_i / scale}
"""
p = self.var_power
if p == 1:
dev = np.where(endog == 0,
mu,
endog * np.log(endog / mu) + (mu - endog))
elif p == 2:
endog1 = np.clip(endog, FLOAT_EPS, np.inf)
dev = ((endog - mu) / mu) - np.log(endog1 / mu)
else:
dev = (endog ** (2 - p) / ((1 - p) * (2 - p)) -
endog * mu ** (1 - p) / (1 - p) + mu ** (2 - p) / (2 - p))
return np.sum(2 * iweights * dev / scale)
def resid_dev(self, endog, mu, scale=1.):
r"""
Tweedie Deviance Residual
Parameters
----------
endog : array-like
`endog` is the response variable
mu : array-like
`mu` is the fitted value of the model
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
--------
resid_dev : array
The array of deviance residuals
Notes
-----
When :math:`p = 1`,
.. math::
dev_i = \mu_i
when :math:`Y_i = 0` and
.. math::
dev_i = Y_i * \log(Y_i / \mu_i) + (\mu_i - Y_i)
otherwise.
When :math:`p = 2`,
.. math::
dev_i = (Y_i - \mu_i) / \mu_i - \log(Y_i / \mu_i)
For all other p,
.. math::
dev_i = Y_i^{2 - p} / ((1 - p) * (2 - p)) -
Y_i * \mu_i^{1 - p} / (1 - p) + \mu_i^{2 - p} /
(2 - p)
The deviance residual is then
.. math::
resid\_dev_i = sign(Y_i-\mu_i) * \sqrt{2 * dev_i / scale}
"""
p = self.var_power
if p == 1:
dev = np.where(endog == 0,
mu,
endog * np.log(endog / mu) + (mu - endog))
elif p == 2:
endog1 = self._clean(endog)
dev = ((endog - mu) / mu) - np.log(endog1 / mu)
else:
dev = (endog ** (2 - p) / ((1 - p) * (2 - p)) -
endog * mu ** (1-p) / (1 - p) + mu ** (2 - p) / (2 - p))
return np.sign(endog - mu) * np.sqrt(2 * dev / scale)
def loglike(self, endog, mu, iweights=1., scale=1.):
r"""
The log-likelihood function in terms of the fitted mean response.
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
The fitted mean response values
iweights : array-like
1d array of weights. The default is 1.
scale : float
The scale parameter. The default is 1.
Returns
-------
llf : float
The value of the loglikelihood function evaluated at
(endog,mu,iweights,scale) as defined below.
Notes
-----
This is not implemented because of the complexity of calculating an
infinite series of sums.
"""
return np.nan
def resid_anscombe(self, endog, mu, scale=1.):
r"""
The Anscombe residuals for the Tweedie family
Parameters
----------
endog : array-like
Endogenous response variable
mu : array-like
Fitted mean response variable
scale : float, optional
An optional argument to divide the residuals by sqrt(scale).
The default is 1.
Returns
-------
resid_anscombe : array
The Anscombe residuals as defined below.
Notes
-----
When :math:`p = 3`, then
.. math::
resid\_anscombe_i = \log(Y_i / \mu_i) / \sqrt{\mu_i * scale}
Otherwise,
.. math::
c = (3 - p) / 3
.. math::
resid\_anscombe_i = (1 / c) * (Y_i^c - \mu_i^c) / \mu_i^{p / 6}
/ \sqrt{scale}
"""
if self.var_power == 3:
return np.log(endog / mu) / np.sqrt(mu * scale)
else:
c = (3. - self.var_power) / 3.
return ((1. / c) * (endog ** c - mu ** c) /
mu ** (self.var_power / 6.)) / scale**(0.5)
| 30.025044
| 90
| 0.529979
|
4a00ce47f63a1ead42fc9c72e6648584279bb9b8
| 789
|
py
|
Python
|
contact/models.py
|
Cifram/shadowcon
|
46ff5921390743d2b64af7e15c08b86470e070ac
|
[
"MIT"
] | null | null | null |
contact/models.py
|
Cifram/shadowcon
|
46ff5921390743d2b64af7e15c08b86470e070ac
|
[
"MIT"
] | null | null | null |
contact/models.py
|
Cifram/shadowcon
|
46ff5921390743d2b64af7e15c08b86470e070ac
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import Group, User
class EmailList(models.Model):
name = models.CharField(max_length=256)
def __str__(self):
return self.name
class GroupEmailEntry(models.Model):
list = models.ForeignKey(EmailList, on_delete=models.CASCADE)
group = models.ForeignKey(Group, on_delete=models.CASCADE)
class UserEmailEntry(models.Model):
list = models.ForeignKey(EmailList, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
class ContactReason(models.Model):
name = models.CharField(max_length=256, unique=True)
list = models.ForeignKey(EmailList, on_delete=models.CASCADE)
def __str__(self):
return self.name
| 26.3
| 65
| 0.752852
|
4a00ce94bc1df00fb99cea493a26983a68fca841
| 6,382
|
py
|
Python
|
nncf/torch/dynamic_graph/graph_tracer.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | null | null | null |
nncf/torch/dynamic_graph/graph_tracer.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | null | null | null |
nncf/torch/dynamic_graph/graph_tracer.py
|
MaximProshin/nncf
|
2290d2f4cebcf6749e419dc76850e7bd8b7d8da1
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2019-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from typing import Callable, Any, List, Optional
from copy import deepcopy
import torch
from nncf.torch.dynamic_graph.graph import DynamicGraph
from nncf.torch.utils import get_model_device
class ModelInputInfo:
FILLER_TYPE_ONES = "ones"
FILLER_TYPE_ZEROS = "zeros"
FILLER_TYPE_RANDOM = "random"
FILLER_TYPES = [FILLER_TYPE_ONES, FILLER_TYPE_ZEROS, FILLER_TYPE_RANDOM]
def __init__(self, shape: List[int], type_str: str = "float", keyword=None, filler=None):
self.shape = shape
self.type = self._string_to_torch_type(type_str)
self.keyword = keyword
if filler is None:
self.filler = self.FILLER_TYPE_ONES
else:
self.filler = filler
if self.filler not in self.FILLER_TYPES:
raise RuntimeError("Unknown input filler type: {}".format(filler))
@staticmethod
def _string_to_torch_type(string):
if string == "long":
return torch.long
return torch.float32
@staticmethod
def torch_type_to_string(dtype: torch.dtype):
if dtype is torch.long:
return "long"
return "float"
def is_integer_input(self):
return self.type != torch.float32
def __eq__(self, other):
return self.type == other.type and self.keyword == other.keyword
def create_input_infos(config) -> List[ModelInputInfo]:
input_infos = config.get("input_info", [])
if isinstance(input_infos, dict):
return [ModelInputInfo(input_infos.get("sample_size"),
input_infos.get("type"),
input_infos.get("keyword"),
input_infos.get("filler")), ]
if isinstance(input_infos, list):
if not input_infos:
return [ModelInputInfo([1, 3, 224, 224])]
return [ModelInputInfo(info_dict.get("sample_size"),
info_dict.get("type"),
info_dict.get("keyword"),
info_dict.get("filler")) for info_dict in input_infos]
raise RuntimeError("Invalid input_infos specified in config - should be either dict or list of dicts")
def create_mock_tensor(input_info: ModelInputInfo, device: str):
args = {"size": input_info.shape, "dtype": input_info.type, "device": device}
if input_info.filler == ModelInputInfo.FILLER_TYPE_ZEROS:
return torch.zeros(**args)
if input_info.filler == ModelInputInfo.FILLER_TYPE_ONES:
return torch.ones(**args)
if input_info.filler == ModelInputInfo.FILLER_TYPE_RANDOM:
return torch.rand(**args)
raise RuntimeError
class GraphTracer:
def __init__(self, custom_forward_fn: Callable[[torch.nn.Module], Any]):
self.custom_forward_fn = custom_forward_fn
def trace_graph(self, model: torch.nn.Module, context_to_use: Optional['TracingContext'] = None,
as_eval: bool = False) -> DynamicGraph:
sd = deepcopy(model.state_dict())
from nncf.torch.dynamic_graph.context import TracingContext
if context_to_use is None:
context_to_use = TracingContext()
context_to_use.enable_trace_dynamic_graph()
from nncf.torch.utils import training_mode_switcher
with context_to_use as _ctx:
_ctx.base_module_thread_local_replica = model
with torch.no_grad():
if as_eval:
with training_mode_switcher(model, is_training=False):
self.custom_forward_fn(model)
else:
self.custom_forward_fn(model)
model.load_state_dict(sd)
if isinstance(model, PostGraphBuildActing):
model.post_build_graph_actions()
context_to_use.disable_trace_dynamic_graph()
return context_to_use.graph
class PostGraphBuildActing:
def post_build_graph_actions(self):
pass
def create_dummy_forward_fn(input_infos: List[ModelInputInfo], with_input_tracing=False,
wrap_inputs_fn=None,
wrap_outputs_fn=None,
with_output_tracing=False):
def default_dummy_forward_fn(model):
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_inputs_with_objwalk
from nncf.torch.dynamic_graph.io_handling import wrap_nncf_model_outputs_with_objwalk
from nncf.torch.dynamic_graph.io_handling import replicate_same_tensors
device = get_model_device(model)
args_list = [create_mock_tensor(info, device) for info in input_infos if info.keyword is None]
kwargs = OrderedDict()
for info in input_infos:
if info.keyword is not None:
kwargs[info.keyword] = create_mock_tensor(info, device)
args = tuple(args_list)
if with_input_tracing:
if wrap_inputs_fn is None:
# We control the input argument structure w.r.t. tensors
# - a simple objwalk application should be sufficient in this simple case.
# For more control, wrap_inputs_fn is used when this is used in NNCFNetwork
# which is guaranteed to be the same as during the actual NNCFNetwork.forward
args, kwargs = wrap_nncf_model_inputs_with_objwalk(args, kwargs)
else:
args, kwargs = wrap_inputs_fn(args, kwargs)
retval = model(*args, **kwargs)
if with_output_tracing:
retval = replicate_same_tensors(retval)
if wrap_outputs_fn is not None:
return wrap_outputs_fn(retval)
return wrap_nncf_model_outputs_with_objwalk(retval)
return retval
return default_dummy_forward_fn
| 40.649682
| 106
| 0.660138
|
4a00cebd61f86590cabea70f3d60977eedb18847
| 6,108
|
py
|
Python
|
keystone/endpoint_policy/backends/sql.py
|
ISCAS-VDI/keystone
|
11af181c06d78026c89a873f62931558e80f3192
|
[
"Apache-2.0"
] | null | null | null |
keystone/endpoint_policy/backends/sql.py
|
ISCAS-VDI/keystone
|
11af181c06d78026c89a873f62931558e80f3192
|
[
"Apache-2.0"
] | null | null | null |
keystone/endpoint_policy/backends/sql.py
|
ISCAS-VDI/keystone
|
11af181c06d78026c89a873f62931558e80f3192
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import sqlalchemy
from keystone.common import sql
from keystone import exception
class PolicyAssociation(sql.ModelBase, sql.ModelDictMixin):
__tablename__ = 'policy_association'
attributes = ['policy_id', 'endpoint_id', 'region_id', 'service_id']
# The id column is never exposed outside this module. It only exists to
# provide a primary key, given that the real columns we would like to use
# (endpoint_id, service_id, region_id) can be null
id = sql.Column(sql.String(64), primary_key=True)
policy_id = sql.Column(sql.String(64), nullable=False)
endpoint_id = sql.Column(sql.String(64), nullable=True)
service_id = sql.Column(sql.String(64), nullable=True)
region_id = sql.Column(sql.String(64), nullable=True)
__table_args__ = (sql.UniqueConstraint('endpoint_id', 'service_id',
'region_id'),)
def to_dict(self):
"""Return the model's attributes as a dictionary.
We override the standard method in order to hide the id column,
since this only exists to provide the table with a primary key.
"""
d = {}
for attr in self.__class__.attributes:
d[attr] = getattr(self, attr)
return d
class EndpointPolicy(object):
def create_policy_association(self, policy_id, endpoint_id=None,
service_id=None, region_id=None):
with sql.session_for_write() as session:
try:
# See if there is already a row for this association, and if
# so, update it with the new policy_id
query = session.query(PolicyAssociation)
query = query.filter_by(endpoint_id=endpoint_id)
query = query.filter_by(service_id=service_id)
query = query.filter_by(region_id=region_id)
association = query.one()
association.policy_id = policy_id
except sql.NotFound:
association = PolicyAssociation(id=uuid.uuid4().hex,
policy_id=policy_id,
endpoint_id=endpoint_id,
service_id=service_id,
region_id=region_id)
session.add(association)
def check_policy_association(self, policy_id, endpoint_id=None,
service_id=None, region_id=None):
sql_constraints = sqlalchemy.and_(
PolicyAssociation.policy_id == policy_id,
PolicyAssociation.endpoint_id == endpoint_id,
PolicyAssociation.service_id == service_id,
PolicyAssociation.region_id == region_id)
# NOTE(henry-nash): Getting a single value to save object
# management overhead.
with sql.session_for_read() as session:
if session.query(PolicyAssociation.id).filter(
sql_constraints).distinct().count() == 0:
raise exception.PolicyAssociationNotFound()
def delete_policy_association(self, policy_id, endpoint_id=None,
service_id=None, region_id=None):
with sql.session_for_write() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(policy_id=policy_id)
query = query.filter_by(endpoint_id=endpoint_id)
query = query.filter_by(service_id=service_id)
query = query.filter_by(region_id=region_id)
query.delete()
def get_policy_association(self, endpoint_id=None,
service_id=None, region_id=None):
sql_constraints = sqlalchemy.and_(
PolicyAssociation.endpoint_id == endpoint_id,
PolicyAssociation.service_id == service_id,
PolicyAssociation.region_id == region_id)
try:
with sql.session_for_read() as session:
policy_id = session.query(PolicyAssociation.policy_id).filter(
sql_constraints).distinct().one()
return {'policy_id': policy_id}
except sql.NotFound:
raise exception.PolicyAssociationNotFound()
def list_associations_for_policy(self, policy_id):
with sql.session_for_read() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(policy_id=policy_id)
return [ref.to_dict() for ref in query.all()]
def delete_association_by_endpoint(self, endpoint_id):
with sql.session_for_write() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(endpoint_id=endpoint_id)
query.delete()
def delete_association_by_service(self, service_id):
with sql.session_for_write() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(service_id=service_id)
query.delete()
def delete_association_by_region(self, region_id):
with sql.session_for_write() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(region_id=region_id)
query.delete()
def delete_association_by_policy(self, policy_id):
with sql.session_for_write() as session:
query = session.query(PolicyAssociation)
query = query.filter_by(policy_id=policy_id)
query.delete()
| 43.319149
| 78
| 0.632286
|
4a00cee2129dd09de714eabcdcdb4c9450cb33f1
| 7,746
|
py
|
Python
|
V2RaycSpider1225/src/BusinessLogicLayer/cluster/cook.py
|
codemonkeyBeginner/V2RayCloudSpider
|
9cb8acc0bab3c81168256e9498f5a6a926396646
|
[
"MIT"
] | 1
|
2021-12-10T14:28:14.000Z
|
2021-12-10T14:28:14.000Z
|
V2RaycSpider1225/src/BusinessLogicLayer/cluster/cook.py
|
codemonkeyBeginner/V2RayCloudSpider
|
9cb8acc0bab3c81168256e9498f5a6a926396646
|
[
"MIT"
] | null | null | null |
V2RaycSpider1225/src/BusinessLogicLayer/cluster/cook.py
|
codemonkeyBeginner/V2RayCloudSpider
|
9cb8acc0bab3c81168256e9498f5a6a926396646
|
[
"MIT"
] | null | null | null |
# TODO :demand:用于补充某种类型的链接,将抽象机场信息实例化
# 1. 遍历所有"可用"机场实例
# 2. 审核授权
# if 该机场不具备该类型链接的采集权限,剔除。
# elif 该机场同时具备其他类型的采集权限,权限收缩(改写),实例入队。
# else 该机场仅具备该类型任务的采集权限,实例入队。
__all__ = ["ActionShunt", "devil_king_armed", "reset_task", "DevilKingArmed"]
import os
from loguru import logger
from requests import HTTPError, ConnectionError
from selenium.common.exceptions import (
TimeoutException,
WebDriverException,
StaleElementReferenceException,
)
from BusinessCentralLayer.setting import CRAWLER_SEQUENCE, CHROMEDRIVER_PATH
from .master import ActionMasterGeneral
from .slavers import __entropy__
class ActionShunt:
def __init__(self, class_, silence=True, beat_sync=True):
"""
:param class_: 订阅类型
:param silence:
:param beat_sync:
"""
self.class_ = class_
self.work_seq = CRAWLER_SEQUENCE
self.silence, self.beat_sync = silence, beat_sync
# output
self.shunt_seq = []
self.atomic_seq = []
# -----------------------------------------
# public
# -----------------------------------------
@staticmethod
def generate_entity(atomic: dict, silence=True, beat_sync=True, assault=False):
return ActionMasterGeneral(
silence=silence,
beat_sync=beat_sync,
action_name=atomic["name"],
register_url=atomic["register_url"],
anti_slider=atomic["anti_slider"],
life_cycle=atomic["life_cycle"],
email=atomic["email"],
hyper_params=atomic["hyper_params"],
assault=assault,
).run
def shunt(self):
self._shunt_action()
self._pop_atomic()
return self.shunt_seq
# -----------------------------------------
# private
# -----------------------------------------
def _shunt_action(self):
action_list = __entropy__.copy()
for action_tag in action_list:
action_entropy = action_tag.get("hyper_params")
# if 该订阅源不具备某指定类型链接的采集权限,剔除。
if not action_entropy.get(self.class_):
continue
self.atomic_seq.append(action_tag)
def _pop_atomic(self):
while True:
# 当步态特征列表无剩余选项时结束迭代任务
if self.atomic_seq.__len__() < 1:
break
# 取出机场步态特征的原子描述
atomic = self.atomic_seq.pop()
# 权限原子化 ‘ssr’ or 'v2ray' ...
for passable_trace in self.work_seq:
if passable_trace != self.class_:
atomic["hyper_params"][passable_trace] = False
# 根据步态特征实例化任务
entity_ = self.generate_entity(
atomic=atomic, silence=self.silence, beat_sync=self.beat_sync
)
# 将实例化任务加入待执行队列
self.shunt_seq.append(entity_)
class DevilKingArmed(ActionMasterGeneral):
def __init__(
self,
register_url,
chromedriver_path,
silence: bool = True,
assault: bool = False,
beat_sync: bool = True,
email: str = None,
life_cycle: int = None,
anti_slider: bool = False,
hyper_params: dict = None,
action_name: str = None,
debug: bool = False,
):
super(DevilKingArmed, self).__init__(
register_url=register_url,
chromedriver_path=chromedriver_path,
silence=silence,
assault=assault,
beat_sync=beat_sync,
email=email,
life_cycle=life_cycle,
anti_slider=anti_slider,
hyper_params=hyper_params,
action_name=action_name,
debug=debug,
)
def synergy(self, api=None):
api = (
self.set_spider_option(guise=self.hyper_params.get("proxy"))
if api is None
else api
)
if not api:
return
try:
logger.debug(self.runtime_flag({"session_id": api.session_id}, "SYNERGY"))
self.get_html_handle(api=api, url=self.register_url, wait_seconds=45)
self.sign_up(api)
# 进入站点并等待核心元素渲染完成
self.wait(api, 40, "//div[@class='card-body']")
except TimeoutException:
raise TimeoutError
except StaleElementReferenceException as e:
logger.exception(e)
except WebDriverException as e:
logger.warning(e)
except (HTTPError, ConnectionError, ConnectionRefusedError, ConnectionResetError):
raise ConnectionError
except Exception as e:
logger.warning(f">>> Exception <{self.action_name}> -- {e}")
finally:
api.quit()
def devil_king_armed(atomic: dict, silence=True, beat_sync=True, assault=False):
return DevilKingArmed(
beat_sync=beat_sync,
assault=assault,
silence=silence,
chromedriver_path=CHROMEDRIVER_PATH,
register_url=atomic["register_url"],
action_name=atomic["name"],
anti_slider=atomic["anti_slider"],
life_cycle=atomic["life_cycle"],
email=atomic["email"],
hyper_params=atomic["hyper_params"],
)
def reset_task() -> list:
"""
为 deploy.collector 提供任务内容
:return:
"""
import random
from BusinessCentralLayer.middleware.redis_io import RedisClient, EntropyHeap
from BusinessCentralLayer.setting import SINGLE_TASK_CAP, REDIS_SECRET_KEY
rc = RedisClient()
eh = EntropyHeap()
# 根据 scaffold.deploy 接口参数 beat_sync ,决定使用「本地任务队列(源)」还是使用「共享任务队列」
beat_attitude = os.getenv("beat_dance", "")
if beat_attitude == "remote":
pending_entropy = eh.sync()
action_list = pending_entropy if pending_entropy else __entropy__
else:
action_list = __entropy__.copy()
# TODO 加入日志溯源功能替换random方案
# 具体思路为:reset_task() 被调用时:
# 1. 获取当前系统时间(Asia/Shanghai)
# 2. 获取昨日对应时间段(±1h)的链接需求量
# 3. 将记录信息转化为 {需求量:相对占用率} 映射
# 4. 将相对占用率转化为任务优选概率(当需要剔除实例时生效)
# 5. 根据优选概率执行(当前时间段)高需求量实例,静默/剔除(当前时间段)低需求量实例
# 6. 仅在不符合方案启动条件时使用 random
random.shuffle(action_list)
qsize = len(action_list)
# running_state={"v2ray":[], "ssr":[], "xray":[], ...}
running_state = dict(
zip(CRAWLER_SEQUENCE, [[] for _ in range(len(CRAWLER_SEQUENCE))])
)
try:
# --------------------------
# 进行各个类型的实体任务的分类
# --------------------------
for task_name in CRAWLER_SEQUENCE:
# 获取池中对应类型的数据剩余
storage_remain: int = rc.get_len(REDIS_SECRET_KEY.format(f"{task_name}"))
# --------------------------
# 进行各个类型的实体任务的分类
# --------------------------
for atomic in action_list:
permission = (
{}
if atomic.get("hyper_params") is None
else atomic.get("hyper_params")
)
if permission.get(task_name) is True:
running_state[task_name].append(atomic)
# --------------------------
# 在库数据溢出 返回空执行队列
# --------------------------
if storage_remain >= SINGLE_TASK_CAP:
running_state[task_name] = []
# 缓存+保存数据超过风险阈值
while storage_remain + qsize > int(SINGLE_TASK_CAP * 0.8):
if len(running_state[task_name]) < 1:
break
running_state[task_name].pop()
qsize -= 1
# 将各种类型的运行实体混合到同一个列表中
instances = [atomic for i in list(running_state.values()) if i for atomic in i]
return instances
# 网络异常,主动捕获RedisClient()的连接错误
except ConnectionError:
return []
| 32.008264
| 90
| 0.562742
|
4a00cf06bceed7c6dc2eeb84ed95bfa040815943
| 7,481
|
py
|
Python
|
rqalpha/data/base_data_source/storages.py
|
lucifersteph/rqalpha
|
34b2404872203486e19d18b0d4b64e2e589ba414
|
[
"Apache-2.0"
] | null | null | null |
rqalpha/data/base_data_source/storages.py
|
lucifersteph/rqalpha
|
34b2404872203486e19d18b0d4b64e2e589ba414
|
[
"Apache-2.0"
] | null | null | null |
rqalpha/data/base_data_source/storages.py
|
lucifersteph/rqalpha
|
34b2404872203486e19d18b0d4b64e2e589ba414
|
[
"Apache-2.0"
] | 1
|
2021-09-04T01:05:11.000Z
|
2021-09-04T01:05:11.000Z
|
# -*- coding: utf-8 -*-
# 版权所有 2020 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),
# 您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、
# 本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,
# 否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 public@ricequant.com 获取。
import os
import sys
import locale
import codecs
import pickle
from copy import copy
from rqalpha.utils.functools import lru_cache
import json
import h5py
import pandas
import numpy as np
from rqalpha.utils.datetime_func import convert_date_to_date_int
from rqalpha.utils.i18n import gettext as _
from rqalpha.const import COMMISSION_TYPE, INSTRUMENT_TYPE
from rqalpha.model.instrument import Instrument
from .storage_interface import AbstractCalendarStore, AbstractInstrumentStore, AbstractDayBarStore, AbstractDateSet
class ExchangeTradingCalendarStore(AbstractCalendarStore):
def __init__(self, f):
self._f = f
def get_trading_calendar(self):
# type: () -> pandas.DatetimeIndex
return pandas.to_datetime([str(d) for d in np.load(self._f, allow_pickle=False)])
class FutureInfoStore(object):
COMMISSION_TYPE_MAP = {
"by_volume": COMMISSION_TYPE.BY_VOLUME,
"by_money": COMMISSION_TYPE.BY_MONEY
}
def __init__(self, f, custom_future_info):
with open(f, "r") as json_file:
self._default_data = {
item.get("order_book_id") or item.get("underlying_symbol"): self._process_future_info_item(
item
) for item in json.load(json_file)
}
self._custom_data = custom_future_info
self._future_info = {}
@classmethod
def _process_future_info_item(cls, item):
item["commission_type"] = cls.COMMISSION_TYPE_MAP[item["commission_type"]]
return item
def get_future_info(self, instrument):
order_book_id = instrument.order_book_id
try:
return self._future_info[order_book_id]
except KeyError:
custom_info = self._custom_data.get(order_book_id) or self._custom_data.get(instrument.underlying_symbol)
info = self._default_data.get(order_book_id) or self._default_data.get(instrument.underlying_symbol)
if custom_info:
info = copy(info) or {}
info.update(custom_info)
elif not info:
raise NotImplementedError(_("unsupported future instrument {}").format(order_book_id))
return self._future_info.setdefault(order_book_id, info)
class InstrumentStore(AbstractInstrumentStore):
SUPPORTED_TYPES = (
INSTRUMENT_TYPE.CS, INSTRUMENT_TYPE.FUTURE, INSTRUMENT_TYPE.ETF, INSTRUMENT_TYPE.LOF, INSTRUMENT_TYPE.INDX,
INSTRUMENT_TYPE.PUBLIC_FUND,
)
def __init__(self, f, future_info_store):
# type: (str, FutureInfoStore) -> None
with open(f, 'rb') as store:
d = pickle.load(store)
self._instruments = []
for i in d:
ins = Instrument(i, future_info_store)
if ins.type in self.SUPPORTED_TYPES:
self._instruments.append(ins)
def get_all_instruments(self):
return self._instruments
class ShareTransformationStore(object):
def __init__(self, f):
with codecs.open(f, 'r', encoding="utf-8") as store:
self._share_transformation = json.load(store)
def get_share_transformation(self, order_book_id):
try:
transformation_data = self._share_transformation[order_book_id]
except KeyError:
return
return transformation_data["successor"], transformation_data["share_conversion_ratio"]
def open_h5(path, *args, **kwargs):
# why do this? non-ascii path in windows!!
if sys.platform == "win32":
try:
l = locale.getlocale(locale.LC_ALL)[1]
except TypeError:
l = None
if l and l.lower() == "utf-8":
path = path.encode("utf-8")
try:
return h5py.File(path, *args, **kwargs)
except OSError as e:
raise RuntimeError(_(
"open data bundle failed, you can remove {} and try to regenerate bundle: {}"
).format(path, e))
class DayBarStore(AbstractDayBarStore):
DEFAULT_DTYPE = np.dtype([
('datetime', np.uint64),
('open', np.float),
('close', np.float),
('high', np.float),
('low', np.float),
('volume', np.float),
])
def __init__(self, path):
if not os.path.exists(path):
raise FileExistsError("File {} not exist,please update bundle.".format(path))
self._h5 = open_h5(path, mode="r")
def get_bars(self, order_book_id):
try:
return self._h5[order_book_id][:]
except KeyError:
return np.empty(0, dtype=self.DEFAULT_DTYPE)
def get_date_range(self, order_book_id):
try:
data = self._h5[order_book_id]
return data[0]['datetime'], data[-1]['datetime']
except KeyError:
return 20050104, 20050104
class DividendStore:
def __init__(self, path):
self._h5 = open_h5(path, mode="r")
def get_dividend(self, order_book_id):
try:
return self._h5[order_book_id][:]
except KeyError:
return None
class YieldCurveStore:
def __init__(self, path):
self._data = open_h5(path, mode="r")["data"][:]
def get_yield_curve(self, start_date, end_date, tenor):
d1 = convert_date_to_date_int(start_date)
d2 = convert_date_to_date_int(end_date)
s = self._data['date'].searchsorted(d1)
e = self._data['date'].searchsorted(d2, side='right')
if e == len(self._data):
e -= 1
if self._data[e]['date'] == d2:
e += 1
if e < s:
return None
df = pandas.DataFrame(self._data[s:e])
df.index = pandas.to_datetime([str(d) for d in df['date']])
del df['date']
if tenor is not None:
return df[tenor]
return df
class SimpleFactorStore:
def __init__(self, path):
self._h5 = open_h5(path, mode="r")
def get_factors(self, order_book_id):
try:
return self._h5[order_book_id][:]
except KeyError:
return None
class DateSet(AbstractDateSet):
def __init__(self, f):
self._h5 = open_h5(f, mode="r")
@lru_cache(None)
def get_days(self, order_book_id):
try:
days = self._h5[order_book_id][:]
return set(days.tolist())
except KeyError:
return set()
def contains(self, order_book_id, dates):
date_set = self.get_days(order_book_id)
if not date_set:
return None
def _to_dt_int(d):
if isinstance(d, (int, np.int64, np.uint64)):
return int(d // 1000000) if d > 100000000 else int(d)
else:
return d.year * 10000 + d.month * 100 + d.day
return [(_to_dt_int(d) in date_set) for d in dates]
| 31.432773
| 117
| 0.627991
|
4a00cf9fe9613137445da56347de4866b1e0f596
| 4,335
|
py
|
Python
|
scripts/accuracy_bandwidth/post_process/filterforward.py
|
viscloud/ff
|
480eb87c6832b3f0bc72a87771abf801c340cab4
|
[
"Apache-2.0"
] | 20
|
2019-06-05T02:32:14.000Z
|
2022-01-15T15:35:00.000Z
|
scripts/accuracy_bandwidth/post_process/filterforward.py
|
viscloud/filterforward
|
480eb87c6832b3f0bc72a87771abf801c340cab4
|
[
"Apache-2.0"
] | null | null | null |
scripts/accuracy_bandwidth/post_process/filterforward.py
|
viscloud/filterforward
|
480eb87c6832b3f0bc72a87771abf801c340cab4
|
[
"Apache-2.0"
] | 5
|
2019-06-13T08:56:16.000Z
|
2020-08-03T02:48:45.000Z
|
# Copyright 2016 The FilterForward Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import h5py
import numpy as np
import sys
import argparse
import math
import os
import event_metrics
import datetime
import uuid
import time
import skvideo.io
import skimage
import math
def iff(mc_preds, iff_preds, selectivity, buflen):
ff_max_frames_per_buffer = int(math.ceil(selectivity * float(buflen)))
extra_count = 0
ff_confs = np.array(list(map(lambda x: 1 if x > 0.5 else 0, mc_preds)))
for i in range(0, len(ff_confs), buflen):
# The last buffer may have fewer than buflen frames in it
true_len = buflen # if i + buflen < len(ff_confs) else len(ff_confs) - i
# Skip this many frames in the current buffer
cur_buf_num_skipped_frames = true_len - ff_max_frames_per_buffer
iff_slice = iff_preds[i:i+true_len]
while(np.count_nonzero(ff_confs[i:i+true_len]) > ff_max_frames_per_buffer):
zero_idxs = np.argwhere(iff_slice == 0).flatten()
zero_idxs = [k for k in zero_idxs if ff_confs[i + k] > 0]
if len(zero_idxs) > 0:
ff_confs[i + np.random.choice(zero_idxs)] = 0
else:
extra_count += 1
break
print("EXTRA: {}".format(extra_count))
print("num selected frames: {}".format(np.count_nonzero(ff_confs)))
return ff_confs
def k_voting(mc_confs, k=5, pessimistic=False):
if k % 2 == 0:
print("Cannot k-vote on an even-length interval because Thomas was too lazy to implement this case")
preds = np.asarray(np.asarray(mc_confs) >= 0.5).astype(int)
for i in range(k // 2 + 1, len(preds)):
interval_start = i - (k // 2)
interval_end = i + (k // 2) + 1
if np.count_nonzero(preds[interval_start:interval_end]) > k // 2:
preds[i] = 1
else:
preds[i] = 0
return preds
fps = 15.0
def cut_video(source_video, start, end, event_number, output_dir):
duration = str(datetime.timedelta(seconds = float(end - start) / fps))
start_time = str(datetime.timedelta(seconds = float(start) / fps))
output_path = os.path.join(output_dir, "{}.mp4".format(event_number))
cmd = "ffmpeg -ss {} -i {} -c copy -t {} {}".format(start, source_video, duration, output_path)
os.system(cmd)
def smooth_confs(confs, k=10, pessimistic=False):
return k_voting(confs, k=k, pessimistic=pessimistic)
def cut_events(confs, labels, source_video, output_dir, iff_labels = None):
out_bitrate = 2248
reader = skvideo.io.vreader(source_video)
writer = skvideo.io.FFmpegWriter(os.path.join(output_dir, "vid.mp4"), outputdict={
'-vcodec': 'libx264', '-b': str(out_bitrate)
})
for i in range(len(confs)):
if i % 10000 == 0:
print(i)
frame = next(reader)
if(confs[i] > 0.5):
writer.writeFrame(frame)
writer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='FilterForward')
parser.add_argument('--confidences', type=str, required=False, help='Path to the confidences.npy file.')
parser.add_argument('--labels', type=str, required=True, help='Path to the labels.h5 file.')
parser.add_argument('--video', type=str, required=False, help='Path to the video file.')
parser.add_argument('--outdir', type=str, required=False, help='output dir')
args = parser.parse_args(sys.argv[1:])
mc_confs = np.load(args.confidences)
confs = get_confs(mc_confs, k=10)
labels = h5py.File(args.labels)['labels'][0:]
orig_event_recall = event_metrics.compute_event_detection_metric(mc_confs, labels, existence_coeff=0.9, overlap_coeff=0.1)
new_event_recall = event_metrics.compute_event_detection_metric(confs, labels, existence_coeff=0.9, overlap_coeff=0.1)
#source_video_path = args.video
# Smooth the detections
# encode a video
| 39.409091
| 126
| 0.699654
|
4a00cfd969ffba99c0a8040c6b439d690d6a7d87
| 2,307
|
py
|
Python
|
ironic/tests/unit/drivers/modules/oneview/test_inspect.py
|
lenovo-lxca-ci/ironic
|
333f983198360a791bc0ecf8ac2c4036729f7d3a
|
[
"Apache-2.0"
] | null | null | null |
ironic/tests/unit/drivers/modules/oneview/test_inspect.py
|
lenovo-lxca-ci/ironic
|
333f983198360a791bc0ecf8ac2c4036729f7d3a
|
[
"Apache-2.0"
] | 5
|
2018-03-28T07:52:38.000Z
|
2020-05-15T09:35:46.000Z
|
ironic/tests/unit/drivers/modules/oneview/test_inspect.py
|
lenovo-lxca-ci/ironic
|
333f983198360a791bc0ecf8ac2c4036729f7d3a
|
[
"Apache-2.0"
] | 1
|
2019-05-08T14:20:54.000Z
|
2019-05-08T14:20:54.000Z
|
# Copyright (2015-2017) Hewlett Packard Enterprise Development LP
# Copyright (2015-2017) Universidade Federal de Campina Grande
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from ironic.conductor import task_manager
from ironic.drivers.modules.oneview import common as ov_common
from ironic.drivers.modules.oneview import deploy_utils
from ironic.tests.unit.drivers.modules.oneview import test_common
class OneViewInspectTestCase(test_common.BaseOneViewTest):
def setUp(self):
super(OneViewInspectTestCase, self).setUp()
self.config(enabled=True, group='inspector')
self.config(manager_url='https://1.2.3.4', group='oneview')
def test_get_properties(self):
expected = deploy_utils.get_properties()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.inspect.get_properties())
@mock.patch.object(ov_common, 'validate_oneview_resources_compatibility',
autospect=True)
def test_validate(self, mock_validate):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.validate(task)
self.assertTrue(mock_validate.called)
@mock.patch.object(deploy_utils, 'allocate_server_hardware_to_ironic',
autospect=True)
def test_inspect_hardware(self, mock_allocate_server_hardware_to_ironic):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.inspect_hardware(task)
self.assertTrue(mock_allocate_server_hardware_to_ironic.called)
| 44.365385
| 78
| 0.70091
|
4a00d1208911097554af8469b865bc935ab37dfe
| 5,302
|
py
|
Python
|
bigml/tests/create_anomaly_steps.py
|
devs-cloud/python_ml
|
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
|
[
"Apache-2.0"
] | null | null | null |
bigml/tests/create_anomaly_steps.py
|
devs-cloud/python_ml
|
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
|
[
"Apache-2.0"
] | null | null | null |
bigml/tests/create_anomaly_steps.py
|
devs-cloud/python_ml
|
05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2014-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import os
from datetime import datetime, timedelta
from nose.tools import eq_, ok_, assert_less
from world import world, res_filename
from read_anomaly_steps import i_get_the_anomaly
from bigml.api import HTTP_CREATED
from bigml.api import HTTP_ACCEPTED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from bigml.anomaly import Anomaly
#@step(r'I check the anomaly detector stems from the original dataset list')
def i_check_anomaly_datasets_and_datasets_ids(step):
anomaly = world.anomaly
ok_('datasets' in anomaly and anomaly['datasets'] == world.dataset_ids,
("The anomaly detector contains only %s and the dataset ids are %s" %
(",".join(anomaly['datasets']), ",".join(world.dataset_ids))))
#@step(r'I check the anomaly detector stems from the original dataset')
def i_check_anomaly_dataset_and_datasets_ids(step):
anomaly = world.anomaly
ok_('dataset' in anomaly and anomaly['dataset'] == world.dataset['resource'],
("The anomaly detector contains only %s and the dataset id is %s" %
(anomaly['dataset'], world.dataset['resource'])))
#@step(r'I create an anomaly detector$')
def i_create_an_anomaly(step):
i_create_an_anomaly_from_dataset(step)
#@step(r'I create an anomaly detector from a dataset$')
def i_create_an_anomaly_from_dataset(step):
dataset = world.dataset.get('resource')
resource = world.api.create_anomaly(dataset, {'seed': 'BigML'})
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.anomaly = resource['object']
world.anomalies.append(resource['resource'])
#@step(r'I create an anomaly detector with (\d+) anomalies from a dataset$')
def i_create_an_anomaly_with_top_n_from_dataset(step, top_n):
dataset = world.dataset.get('resource')
resource = world.api.create_anomaly(
dataset, {'seed': 'BigML', 'top_n': int(top_n)})
world.status = resource['code']
eq_(world.status, HTTP_CREATED,
"Expected: %s, found: %s" % (HTTP_CREATED, world.status))
world.location = resource['location']
world.anomaly = resource['object']
world.anomalies.append(resource['resource'])
#@step(r'I create an anomaly detector from a dataset list$')
def i_create_an_anomaly_from_dataset_list(step):
resource = world.api.create_anomaly(world.dataset_ids, {'seed': 'BigML'})
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.anomaly = resource['object']
world.anomalies.append(resource['resource'])
#@step(r'I wait until the anomaly detector status code is either (\d) or (-\d) less than (\d+)')
def wait_until_anomaly_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
delta = int(secs) * world.delta
i_get_the_anomaly(step, world.anomaly['resource'])
status = get_status(world.anomaly)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
if (datetime.utcnow() - start).seconds % 60 == 3:
print "Waiting for anomaly for %s seconds" % \
(datetime.utcnow() - start).seconds
assert_less((datetime.utcnow() - start).seconds, delta)
i_get_the_anomaly(step, world.anomaly['resource'])
status = get_status(world.anomaly)
print "Anomaly created."
eq_(status['code'], int(code1))
#@step(r'I wait until the anomaly detector is ready less than (\d+)')
def the_anomaly_is_finished_in_less_than(step, secs):
wait_until_anomaly_status_code_is(step, FINISHED, FAULTY, secs)
#@step(r'I create a dataset with only the anomalies')
def create_dataset_with_anomalies(step):
local_anomalies = Anomaly(world.anomaly['resource'])
world.dataset = world.api.create_dataset(
world.dataset['resource'],
{"lisp_filter": local_anomalies.anomalies_filter()})
world.datasets.append(world.dataset['resource'])
#@step(r'I check that the dataset has (\d+) rows')
def the_dataset_has_n_rows(step, rows):
eq_(world.dataset['rows'], int(rows))
#@step(r'I export the anomaly$')
def i_export_anomaly(step, filename):
world.api.export(world.anomaly.get('resource'),
filename=res_filename(filename))
#@step(r'I create a local anomaly from file "(.*)"')
def i_create_local_anomaly_from_file(step, export_file):
world.local_anomaly = Anomaly(res_filename(export_file))
#@step(r'the anomaly ID and the local anomaly ID match')
def check_anomaly_id_local_id(step):
eq_(world.local_anomaly.resource_id, world.anomaly["resource"])
| 40.166667
| 96
| 0.713127
|
4a00d2946413b38ab89de3447182c01a77b696b9
| 2,261
|
py
|
Python
|
stream.py
|
Watemlifts/spark-kinesis-redshift
|
f979954e982865966e20403dbe9b0857df18d7ea
|
[
"MIT"
] | 8
|
2019-07-29T05:58:37.000Z
|
2022-01-03T11:03:32.000Z
|
stream.py
|
Watemlifts/spark-kinesis-redshift
|
f979954e982865966e20403dbe9b0857df18d7ea
|
[
"MIT"
] | null | null | null |
stream.py
|
Watemlifts/spark-kinesis-redshift
|
f979954e982865966e20403dbe9b0857df18d7ea
|
[
"MIT"
] | 3
|
2018-12-12T01:18:40.000Z
|
2021-04-21T06:25:25.000Z
|
from __future__ import print_function
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kinesis import KinesisUtils, InitialPositionInStream
import datetime
import json
from pyspark.sql import SQLContext, Row
from pyspark.sql.types import *
aws_region = 'us-east-1'
kinesis_stream = 'stream_name'
kinesis_endpoint = 'https://kinesis.us-east-1.amazonaws.com/'
kinesis_app_name = 'app_name'
kinesis_initial_position = InitialPositionInStream.LATEST
kinesis_checkpoint_interval = 5
spark_batch_interval = 5
if __name__ == "__main__":
spark_context = SparkContext(appName=kinesis_app_name)
spark_streaming_context = StreamingContext(spark_context, spark_batch_interval)
sql_context = SQLContext(spark_context)
kinesis_stream = KinesisUtils.createStream(
spark_streaming_context, kinesis_app_name, kinesis_stream, kinesis_endpoint,
aws_region, kinesis_initial_position, kinesis_checkpoint_interval)
kinesis_stream.pprint()
py_rdd = kinesis_stream.map(lambda x: json.loads(x))
def process(time, rdd):
print("========= %s =========" % str(time))
try:
sqlContext = getSqlContextInstance(rdd.context)
schema = StructType([
StructField('user_id', IntegerType(), True),
StructField('username', StringType(), True),
StructField('first_name', StringType(), True),
StructField('surname', StringType(), True),
StructField('age', IntegerType(), True),
])
df = sqlContext.createDataFrame(rdd, schema)
df.registerTempTable("activity_log")
df.write \
.format("com.databricks.spark.redshift") \
.option("url", "jdbc:redshiftURL.com:5439/database?user=USERNAME&password=PASSWORD") \
.option("dbtable", "activity_log") \
.option("tempdir", "s3n://spark-temp-data/") \
.mode("append") \
.save()
except Exception as e:
print(e)
pass
py_rdd.foreachRDD(process)
spark_streaming_context.start()
spark_streaming_context.awaitTermination()
spark_streaming_context.stop()
| 36.467742
| 102
| 0.666962
|
4a00d2bb9b49bae360bcbed183183d14aa3bb41b
| 3,014
|
py
|
Python
|
projects/graph/test_graph.py
|
Nolanole/Graphs
|
fedfb61d81794c5a0e10e59ae67f447947c8d43c
|
[
"MIT"
] | null | null | null |
projects/graph/test_graph.py
|
Nolanole/Graphs
|
fedfb61d81794c5a0e10e59ae67f447947c8d43c
|
[
"MIT"
] | 8
|
2020-03-24T17:47:23.000Z
|
2022-03-12T00:33:21.000Z
|
cs/lambda_cs/06_graphs/projects/graph/test_graph.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
import unittest
import sys
import io
from graph import Graph
class Test(unittest.TestCase):
def setUp(self):
self.graph = Graph()
self.graph.add_vertex(1)
self.graph.add_vertex(2)
self.graph.add_vertex(3)
self.graph.add_vertex(4)
self.graph.add_vertex(5)
self.graph.add_vertex(6)
self.graph.add_vertex(7)
self.graph.add_edge(5, 3)
self.graph.add_edge(6, 3)
self.graph.add_edge(7, 1)
self.graph.add_edge(4, 7)
self.graph.add_edge(1, 2)
self.graph.add_edge(7, 6)
self.graph.add_edge(2, 4)
self.graph.add_edge(3, 5)
self.graph.add_edge(2, 3)
self.graph.add_edge(4, 6)
def test_vertices(self):
vertices = {
1: {2},
2: {3, 4},
3: {5},
4: {6, 7},
5: {3},
6: {3},
7: {1, 6}
}
self.assertDictEqual(self.graph.vertices, vertices)
def test_bft(self):
bft = [
"1\n2\n3\n4\n5\n6\n7\n",
"1\n2\n3\n4\n5\n7\n6\n",
"1\n2\n3\n4\n6\n7\n5\n",
"1\n2\n3\n4\n6\n5\n7\n",
"1\n2\n3\n4\n7\n6\n5\n",
"1\n2\n3\n4\n7\n5\n6\n",
"1\n2\n4\n3\n5\n6\n7\n",
"1\n2\n4\n3\n5\n7\n6\n",
"1\n2\n4\n3\n6\n7\n5\n",
"1\n2\n4\n3\n6\n5\n7\n",
"1\n2\n4\n3\n7\n6\n5\n",
"1\n2\n4\n3\n7\n5\n6\n"
]
stdout_ = sys.stdout
sys.stdout = io.StringIO()
self.graph.bft(1)
output = sys.stdout.getvalue()
self.assertIn(output, bft)
sys.stdout = stdout_ # Restore stdout
def test_dft(self):
dft = [
"1\n2\n3\n5\n4\n6\n7\n",
"1\n2\n3\n5\n4\n7\n6\n",
"1\n2\n4\n7\n6\n3\n5\n",
"1\n2\n4\n6\n3\n5\n7\n"
]
stdout_ = sys.stdout
sys.stdout = io.StringIO()
self.graph.dft(1)
output = sys.stdout.getvalue()
self.assertIn(output, dft)
sys.stdout = stdout_ # Restore stdout
def test_dft_recursive(self):
dft = [
"1\n2\n3\n5\n4\n6\n7\n",
"1\n2\n3\n5\n4\n7\n6\n",
"1\n2\n4\n7\n6\n3\n5\n",
"1\n2\n4\n6\n3\n5\n7\n"
]
stdout_ = sys.stdout
sys.stdout = io.StringIO()
self.graph.dft_recursive(1)
output = sys.stdout.getvalue()
self.assertIn(output, dft)
sys.stdout = stdout_ # Restore stdout
def test_bfs(self):
bfs = [1, 2, 4, 6]
self.assertListEqual(self.graph.bfs(1, 6), bfs)
def test_dfs(self):
dfs = [
[1, 2, 4, 6],
[1, 2, 4, 7, 6]
]
self.assertIn(self.graph.dfs(1,6), dfs)
def test_dfs_recursive(self):
dfs = [
[1, 2, 4, 6],
[1, 2, 4, 7, 6]
]
self.assertIn(self.graph.dfs_recursive(1,6), dfs)
if __name__ == '__main__':
unittest.main()
| 25.116667
| 59
| 0.483743
|
4a00d2bcd4e7b011a25c43eb625ff87b09a4c88e
| 2,036
|
py
|
Python
|
app.py
|
ManualDoCodigo/pyhexeditor
|
211cc360d468de98367cfd5b4972e7fa3da46712
|
[
"MIT"
] | null | null | null |
app.py
|
ManualDoCodigo/pyhexeditor
|
211cc360d468de98367cfd5b4972e7fa3da46712
|
[
"MIT"
] | null | null | null |
app.py
|
ManualDoCodigo/pyhexeditor
|
211cc360d468de98367cfd5b4972e7fa3da46712
|
[
"MIT"
] | null | null | null |
# 2021 - Douglas Diniz - www.manualdocodigo.com.br
import sys
from PyQt5 import QtCore, QtWidgets, uic
from PyQt5.QtCore import QFile, QTextStream
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.filename = ""
# Load the UI Page
uic.loadUi("mainwindow.ui", self)
self.actionopen.triggered.connect(self.open)
self.actionsave.triggered.connect(self.save)
self.actionsave_as.triggered.connect(self.saveAs)
self.lineEditAddress.textChanged.connect(self.serCursorPosition)
def open(self):
fName, filter = QtWidgets.QFileDialog.getOpenFileName(self, "OpenFile")
f = QtCore.QFile(fName)
f.open(QtCore.QFile.ReadOnly)
data = f.readAll()
self.hexwidget.setData(data)
self.filename = fName
def save(self):
if self.filename:
data = self.hexwidget.getData()
f = open(self.filename, "wb")
f.write(data)
f.close()
print("Saved successfully...")
else:
print("No file to save")
def saveAs(self):
fName, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Save File")
if fName:
self.filename = fName
self.save()
else:
print("Invalid File")
def serCursorPosition(self):
try:
address = int(self.lineEditAddress.text(), 16)
self.hexwidget.setCursorPosition(address)
except:
print("Invalid hexadecimal number")
def main():
app = QtWidgets.QApplication(sys.argv)
# Theme test from:
# https://github.com/Alexhuszagh/BreezeStyleSheets
if False:
file = QFile("./dark.qss")
file.open(QFile.ReadOnly | QFile.Text)
stream = QTextStream(file)
app.setStyleSheet(stream.readAll())
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 25.772152
| 79
| 0.608055
|
4a00d317e66325f91110d1977a1b846c595d9953
| 3,995
|
py
|
Python
|
src/PAWS/paws_train.py
|
annilea/potholes_detection_sys
|
6604225c2db867ffebbef42395251af7ec9ad53b
|
[
"MIT"
] | 1
|
2021-09-11T11:34:42.000Z
|
2021-09-11T11:34:42.000Z
|
src/PAWS/paws_train.py
|
annilea/potholes_detection_sys
|
6604225c2db867ffebbef42395251af7ec9ad53b
|
[
"MIT"
] | 1
|
2021-08-13T15:57:07.000Z
|
2021-08-13T15:57:07.000Z
|
src/PAWS/paws_train.py
|
annilea/potholes_detection_sys
|
6604225c2db867ffebbef42395251af7ec9ad53b
|
[
"MIT"
] | 1
|
2021-07-30T16:31:42.000Z
|
2021-07-30T16:31:42.000Z
|
# Imports
from utils import (
multicrop_loader,
labeled_loader,
paws_trainer,
config,
lr_scheduler,
lars_optimizer,
data_loader
)
from models import wide_resnet
import matplotlib.pyplot as plt
import tensorflow as tf
import time
# Load dataset
x_train,_,y_train,_ = data_loader.get_train_test_ds()
# Constants
AUTO = tf.data.AUTOTUNE
STEPS_PER_EPOCH = int(len(x_train) // config.MULTICROP_BS)
WARMUP_EPOCHS = 2
WARMUP_STEPS = int(WARMUP_EPOCHS * STEPS_PER_EPOCH)
# Prepare Dataset object for multicrop
train_ds = tf.data.Dataset.from_tensor_slices(x_train)
multicrop_ds = multicrop_loader.get_multicrop_loader(train_ds)
multicrop_ds = (
multicrop_ds.shuffle(config.MULTICROP_BS * 10)
.batch(config.MULTICROP_BS)
.prefetch(AUTO)
)
# Prepare dataset object for the support samples
support_ds = labeled_loader.get_support_ds(config.SUPPORT_BS)
print("Data loaders prepared.")
# Initialize encoder and optimizer
wide_resnet_enc = wide_resnet.get_network()
scheduled_lrs = lr_scheduler.WarmUpCosine(
learning_rate_base=config.WARMUP_LR,
total_steps=config.PRETRAINING_EPOCHS * STEPS_PER_EPOCH,
warmup_learning_rate=config.START_LR,
warmup_steps=WARMUP_STEPS,
)
optimizer = lars_optimizer.LARS(
learning_rate=scheduled_lrs,
momentum=0.9,
exclude_from_weight_decay=["batch_normalization", "bias"],
exclude_from_layer_adaptation=["batch_normalization", "bias"],
)
print("Model and optimizer initialized.")
# Loss trackers
epoch_ce_losses = []
epoch_me_losses = []
############## Training ##############
for e in range(config.PRETRAINING_EPOCHS):
print(f"=======Starting epoch: {e}=======")
start_time = time.time()
epoch_ce_loss_avg = tf.keras.metrics.Mean()
epoch_me_loss_avg = tf.keras.metrics.Mean()
for i, unsup_imgs in enumerate(multicrop_ds):
# Sample support images, concat the images and labels, and
# then apply label-smoothing.
global iter_supervised
try:
sdata = next(iter_supervised)
except Exception:
iter_supervised = iter(support_ds)
sdata = next(iter_supervised)
support_images_one, support_images_two = sdata
support_images = tf.concat(
[support_images_one[0], support_images_two[0]], axis=0
)
support_labels = tf.concat(
[support_images_one[1], support_images_two[1]], axis=0
)
support_labels = labeled_loader.onehot_encode(
support_labels, config.LABEL_SMOOTHING
)
# Perform training step
batch_ce_loss, batch_me_loss, gradients = paws_trainer.train_step(
unsup_imgs, (support_images, support_labels), wide_resnet_enc
)
# Update the parameters of the encoder
optimizer.apply_gradients(zip(gradients, wide_resnet_enc.trainable_variables))
if (i % 50) == 0:
print(
"[%d, %5d] loss: %.3f (%.3f %.3f)"
% (
e,
i,
batch_ce_loss.numpy() + batch_me_loss.numpy(),
batch_ce_loss.numpy(),
batch_me_loss.numpy(),
)
)
epoch_ce_loss_avg.update_state(batch_ce_loss)
epoch_me_loss_avg.update_state(batch_me_loss)
print(
f"Epoch: {e} CE Loss: {epoch_ce_loss_avg.result():.3f}"
f" ME-MAX Loss: {epoch_me_loss_avg.result():.3f}"
f" Time elapsed: {time.time()-start_time:.2f} secs"
)
print("")
epoch_ce_losses.append(epoch_ce_loss_avg.result())
epoch_me_losses.append(epoch_me_loss_avg.result())
# Create a plot to see the cross-entropy losses
plt.figure(figsize=(8, 8))
plt.plot(epoch_ce_losses)
plt.title("PAWS Training Cross-Entropy Loss", fontsize=12)
plt.grid()
plt.savefig(config.PRETRAINING_PLOT, dpi=300)
# Serialize model
wide_resnet_enc.save(config.PRETRAINED_MODEL)
print(f"Encoder serialized to : {config.PRETRAINED_MODEL}")
| 31.456693
| 86
| 0.677096
|
4a00d355e534ff70ab1801e908c00a49a45bd817
| 4,806
|
py
|
Python
|
test/test_ssl.py
|
PleasantMachine9/urllib3
|
a1fd6bc5c196b8cbc10c67ddd64643492ac09205
|
[
"MIT"
] | 1
|
2020-12-10T06:47:40.000Z
|
2020-12-10T06:47:40.000Z
|
test/test_ssl.py
|
PleasantMachine9/urllib3
|
a1fd6bc5c196b8cbc10c67ddd64643492ac09205
|
[
"MIT"
] | 1
|
2020-10-01T13:47:23.000Z
|
2020-10-01T13:47:23.000Z
|
test/test_ssl.py
|
PleasantMachine9/urllib3
|
a1fd6bc5c196b8cbc10c67ddd64643492ac09205
|
[
"MIT"
] | null | null | null |
from test import notPyPy2
import mock
import pytest
from urllib3.exceptions import SNIMissingWarning
from urllib3.util import ssl_
@pytest.mark.parametrize(
"addr",
[
# IPv6
"::1",
"::",
"FE80::8939:7684:D84b:a5A4%251",
# IPv4
"127.0.0.1",
"8.8.8.8",
b"127.0.0.1",
# IPv6 w/ Zone IDs
"FE80::8939:7684:D84b:a5A4%251",
b"FE80::8939:7684:D84b:a5A4%251",
"FE80::8939:7684:D84b:a5A4%19",
b"FE80::8939:7684:D84b:a5A4%19",
],
)
def test_is_ipaddress_true(addr):
assert ssl_.is_ipaddress(addr)
@pytest.mark.parametrize(
"addr",
[
"www.python.org",
b"www.python.org",
"v2.sg.media-imdb.com",
b"v2.sg.media-imdb.com",
],
)
def test_is_ipaddress_false(addr):
assert not ssl_.is_ipaddress(addr)
@pytest.mark.parametrize(
["has_sni", "server_hostname", "uses_sni"],
[
(True, "127.0.0.1", False),
(False, "www.python.org", False),
(False, "0.0.0.0", False),
(True, "www.google.com", True),
(True, None, False),
(False, None, False),
],
)
def test_context_sni_with_ip_address(monkeypatch, has_sni, server_hostname, uses_sni):
monkeypatch.setattr(ssl_, "HAS_SNI", has_sni)
sock = mock.Mock()
context = mock.create_autospec(ssl_.SSLContext)
ssl_.ssl_wrap_socket(sock, server_hostname=server_hostname, ssl_context=context)
if uses_sni:
context.wrap_socket.assert_called_with(sock, server_hostname=server_hostname)
else:
context.wrap_socket.assert_called_with(sock)
@pytest.mark.parametrize(
["has_sni", "server_hostname", "should_warn"],
[
(True, "www.google.com", False),
(True, "127.0.0.1", False),
(False, "127.0.0.1", False),
(False, "www.google.com", True),
(True, None, False),
(False, None, False),
],
)
def test_sni_missing_warning_with_ip_addresses(
monkeypatch, has_sni, server_hostname, should_warn
):
monkeypatch.setattr(ssl_, "HAS_SNI", has_sni)
sock = mock.Mock()
context = mock.create_autospec(ssl_.SSLContext)
with mock.patch("warnings.warn") as warn:
ssl_.ssl_wrap_socket(sock, server_hostname=server_hostname, ssl_context=context)
if should_warn:
assert warn.call_count >= 1
warnings = [call[0][1] for call in warn.call_args_list]
assert SNIMissingWarning in warnings
else:
assert warn.call_count == 0
@pytest.mark.parametrize(
["ciphers", "expected_ciphers"],
[
(None, ssl_.DEFAULT_CIPHERS),
("ECDH+AESGCM:ECDH+CHACHA20", "ECDH+AESGCM:ECDH+CHACHA20"),
],
)
def test_create_urllib3_context_set_ciphers(monkeypatch, ciphers, expected_ciphers):
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context(ciphers=ciphers) is context
assert context.set_ciphers.call_count == 1
assert context.set_ciphers.call_args == mock.call(expected_ciphers)
def test_wrap_socket_given_context_no_load_default_certs():
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ssl_context=context)
context.load_default_certs.assert_not_called()
@notPyPy2
def test_wrap_socket_given_ca_certs_no_load_default_certs(monkeypatch):
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ca_certs="/tmp/fake-file")
context.load_default_certs.assert_not_called()
context.load_verify_locations.assert_called_with("/tmp/fake-file", None, None)
def test_wrap_socket_default_loads_default_certs(monkeypatch):
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock)
context.load_default_certs.assert_called_with()
@pytest.mark.parametrize(
["pha", "expected_pha"], [(None, None), (False, True), (True, True)]
)
def test_create_urllib3_context_pha(monkeypatch, pha, expected_pha):
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
context.post_handshake_auth = pha
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context() is context
assert context.post_handshake_auth == expected_pha
| 28.105263
| 88
| 0.677486
|
4a00d6f7d6f04680dc02730c4975ea4e6f75173f
| 5,238
|
py
|
Python
|
rpython/jit/metainterp/optimizeopt/rawbuffer.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
rpython/jit/metainterp/optimizeopt/rawbuffer.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
rpython/jit/metainterp/optimizeopt/rawbuffer.py
|
nanjekyejoannah/pypy
|
e80079fe13c29eda7b2a6b4cd4557051f975a2d9
|
[
"Apache-2.0",
"OpenSSL"
] | 55
|
2015-08-16T02:41:30.000Z
|
2022-03-20T20:33:35.000Z
|
from rpython.rlib.debug import debug_start, debug_stop, debug_print
from rpython.rlib.objectmodel import compute_unique_id, we_are_translated
class InvalidRawOperation(Exception):
pass
class InvalidRawWrite(InvalidRawOperation):
pass
class InvalidRawRead(InvalidRawOperation):
pass
class RawBuffer(object):
def __init__(self, cpu, logops=None):
# the following lists represents the writes in the buffer: values[i]
# is the value of length lengths[i] stored at offset[i].
#
# the invariant is that they are ordered by offset, and that
# offset[i]+length[i] <= offset[i+1], i.e. that the writes never
# overlaps
self.cpu = cpu
self.logops = logops
self.offsets = []
self.lengths = []
self.descrs = []
self.values = []
def _get_memory(self):
"""
NOT_RPYTHON
for testing only
"""
return zip(self.offsets, self.lengths, self.descrs, self.values)
def _repr_of_descr(self, descr):
if self.logops:
s = self.logops.repr_of_descr(descr)
else:
s = str(descr)
s += " at %d" % compute_unique_id(descr)
return s
def _repr_of_value(self, value):
if not we_are_translated() and isinstance(value, str):
return value # for tests
if self.logops:
s = self.logops.repr_of_arg(value)
else:
s = str(value)
s += " at %d" % compute_unique_id(value)
return s
def _dump_to_log(self):
debug_print("RawBuffer state")
debug_print("offset, length, descr, box")
debug_print("(box == None means that the value is still virtual)")
for i in range(len(self.offsets)):
descr = self._repr_of_descr(self.descrs[i])
box = self._repr_of_value(self.values[i])
debug_print("%d, %d, %s, %s" % (self.offsets[i], self.lengths[i], descr, box))
def _invalid_write(self, message, offset, length, descr, value):
debug_start('jit-log-rawbuffer')
debug_print('Invalid write: %s' % message)
debug_print(" offset: %d" % offset)
debug_print(" length: %d" % length)
debug_print(" descr: %s" % self._repr_of_descr(descr))
debug_print(" value: %s" % self._repr_of_value(value))
self._dump_to_log()
debug_stop('jit-log-rawbuffer')
raise InvalidRawWrite
def _invalid_read(self, message, offset, length, descr):
debug_start('jit-log-rawbuffer')
debug_print('Invalid read: %s' % message)
debug_print(" offset: %d" % offset)
debug_print(" length: %d" % length)
debug_print(" descr: %s" % self._repr_of_descr(descr))
self._dump_to_log()
debug_stop('jit-log-rawbuffer')
raise InvalidRawRead
def _descrs_are_compatible(self, d1, d2):
# two arraydescrs are compatible if they have the same basesize,
# itemsize and sign, even if they are not identical
unpack = self.cpu.unpack_arraydescr_size
return unpack(d1) == unpack(d2)
def write_value(self, offset, length, descr, value):
i = 0
N = len(self.offsets)
while i < N:
if self.offsets[i] == offset:
if (length != self.lengths[i] or not
self._descrs_are_compatible(descr, self.descrs[i])):
# in theory we could add support for the cases in which
# the length or descr is different, but I don't think we
# need it in practice
self._invalid_write('length or descr not compatible',
offset, length, descr, value)
# update the value at this offset
self.values[i] = value
return
elif self.offsets[i] > offset:
break
i += 1
#
if i < len(self.offsets) and offset+length > self.offsets[i]:
self._invalid_write("overlap with next bytes",
offset, length, descr, value)
if i > 0 and self.offsets[i-1]+self.lengths[i-1] > offset:
self._invalid_write("overlap with previous bytes",
offset, length, descr, value)
# insert a new value at offset
self.offsets.insert(i, offset)
self.lengths.insert(i, length)
self.descrs.insert(i, descr)
self.values.insert(i, value)
def read_value(self, offset, length, descr):
i = 0
N = len(self.offsets)
while i < N:
if self.offsets[i] == offset:
if (length != self.lengths[i] or
not self._descrs_are_compatible(descr, self.descrs[i])):
self._invalid_read('length or descr not compatible',
offset, length, descr)
return self.values[i]
i += 1
# memory location not found: this means we are reading from
# uninitialized memory, give up the optimization
self._invalid_read('uninitialized memory',
offset, length, descr)
| 38.8
| 90
| 0.57331
|
4a00d80afd4aec8043d4ffcee1f2a0c6416e9225
| 5,334
|
py
|
Python
|
src/m2_todo_and_commit_push.py
|
landod/01-IntroductionToPython
|
c7d2e8a3514fd52f39c2e254edce386ee8e3c46b
|
[
"MIT"
] | null | null | null |
src/m2_todo_and_commit_push.py
|
landod/01-IntroductionToPython
|
c7d2e8a3514fd52f39c2e254edce386ee8e3c46b
|
[
"MIT"
] | null | null | null |
src/m2_todo_and_commit_push.py
|
landod/01-IntroductionToPython
|
c7d2e8a3514fd52f39c2e254edce386ee8e3c46b
|
[
"MIT"
] | null | null | null |
print('Hello, World')
print('My name is Owen Land')
print(3607 * 34227)
###############################################################################
#
# This line is a COMMENT -- a note to human readers of this file.
# When a program runs, it ignores everything from a # (hash) mark
# to the end of the line with the # mark.
#
# We call files that have Python code in them MODULES.
# Line 1 of this module (look at it now) prints (displays) the STRING
# Hello, World
# on the Run window (to the right).
#
# Anything surrounded by quote marks (single or double) is a STRING.
#
###############################################################################
###############################################################################
#
# DONE: 1.
# (Yes, that means for YOU to DO things per these instructions:)
#
# Run this module by right clicking anywhere in this window and selecting
# Run 'name of file'
# Or, use the keyboard shortcut: Control + Shift + Function-F10
#
# After running, find the Run window (to the right) and confirm that
# Hello, World
# did indeed get printed (displayed) on that window.
#
###############################################################################
###############################################################################
#
# DONE 2.
# Notice the small horizontal BLUE bars on the scrollbar-like thing
# on the right. Each blue bar indicates a thing TO-DO in this module.
#
# a. You can use the blue bars to go from one TO-DO to the next
# by clicking on the blue bars. ** Try that now. **
#
# b. When you have completed a TO-DO, you should change the word
# _TODO_ (ignore the underscores on this line)
# to
# DONE
# Try it now on line 20 above, and note that its blue bar on
# the scrollbar-like thing to the right has gone away.
#
# If you change TODOs to DONEs like this, you can tell when you have
# finished all the exercises in a module -- there will be no blue bars
# left on the scrollbar-like thing to the right.
#
# You have now completed TO-DO #2, so change its TO-DO on line 35 to DONE
# (and proceed similarly for all forthcoming TODOs in this course).
#
###############################################################################
###############################################################################
#
# DONE: 3.
# Add another print statement below the current Line 1 above.
# It should print any string that you want (but keep it G-rated!)
#
# Test your code by re-running this module, either by proceeding as you did
# when you ran this module the first time, or by pressing the green-triangle
# "Play" button that is on the toolbar at the top of this window.
# Look at the Run window to be sure that your string printed as expected.
#
###############################################################################
###############################################################################
#
# DONE: 4.
# Add yet another print statement.
# This one should print the *product* of 3,607 and 34,227.
# Let the computer do the arithmetic for you (no calculators!).
# You do NOT have to use strings for this, so no quotation marks!
#
# TEST your code by re-running this module, then asking someone
# whom you trust: What number did your print display for this TO-DO?
# (HINT: It is an INTERESTING number.) Get help if your value is wrong.
#
###############################################################################
###############################################################################
#
# DONE: 5.
# Look at the list of files in the Project window (to the left). Note that
# this file (m2_todo_and_commit_push.py) is now displayed in BLUE.
#
# The BLUE font color means that you have made changes to this file which
# have not yet been COMMITTED to version control and PUSHED to the cloud.
#
# COMMIT and PUSH your work by:
# 1. Select VCS from the menu bar (above).
# 2. Choose Commit from the pull-down menu that appears.
# 3. In the Commit Changes window that pops up:
# - HOVER over the Commit button
# (in the lower-right corner of the window)
# - CLICK on Commit and Push.
#
# COMMIT adds the changed work to the version control on your computer
# and PUSH adds the changed work into your Github repository in the "cloud".
#
# PyCharm forces you to add a Commit message intended to describe
# your work to teammates, but until your team project use Done
# or whatever you want for the Commit message.
#
# Always PUSH (in addition to the COMMIT) so that your work is backed-up
# in the cloud, and also so that your instructor can access it as needed.
# If you COMMIT but forget to PUSH, you can subsequently do the PUSH by:
# VCS ~ Git ~ Push...
#
# Oh, one more thing:
# Do you have any blue bars on the scrollbar-like thing to the right?
# If so, click on each blue bar and change its TO-DO to DONE,
# then run the file (to make sure you didn't break anything)
# and COMMIT-and-PUSH again.
#
# You can COMMIT-and-PUSH as often as you like. DO IT FREQUENTLY.
#
###############################################################################
| 42.672
| 79
| 0.55943
|
4a00d91981464013a3ca42b44af826a6a6fa5e81
| 1,312
|
py
|
Python
|
vdf.py
|
CaptainZidgel/StraightToVDM
|
66086063eeac0edae1b818d116dc2d10aa8d1a26
|
[
"CNRI-Python"
] | 1
|
2020-10-30T07:29:51.000Z
|
2020-10-30T07:29:51.000Z
|
vdf.py
|
CaptainZidgel/StraightToVDM
|
66086063eeac0edae1b818d116dc2d10aa8d1a26
|
[
"CNRI-Python"
] | null | null | null |
vdf.py
|
CaptainZidgel/StraightToVDM
|
66086063eeac0edae1b818d116dc2d10aa8d1a26
|
[
"CNRI-Python"
] | null | null | null |
'''
VDM appears to be VDF (Valve Demo Format).
There are other modules that already implement read/write for VDF
But I decided to write my own for 2 reasons:
0 - I like writing code :)
1 - I didn't want to have to learn an external module
2 - VDM files are extremely simple anyway and I don't feature any nesting or reading functions (to reiterate: This does not support nesting)
'''
class VDF:
def __init__(self, path, header="demoactions"):
self.path = path
self.header = header
self.buffer = ''
self.elements = 0
def commit(self, dictionary, indent=1):
self.elements += 1
indent = "\t" * indent
indent2 = indent + "\t"
self.buffer += '{}"{}"\n{}{{\n'.format(indent, self.elements, indent) # "1" \n { \n
for key,value in dictionary.items():
self.buffer += indent2+'{} "{}"\n'.format(key, value)
self.buffer += indent+'}\n'
def write(self):
with open(self.path, "w+") as f:
f.write("{}\n{{\n".format(self.header)) #header \n { \n
f.write(self.buffer)
f.write("}\n")
def __str__(self):
return self.buffer
def Test():
vdf = VDF("testvdf.vdm")
vdf.commit({"factory": "SkipAhead", "name": "skip", "starttick": 1, "skiptotick": "19490"})
vdf.commit({"factory": "PlayCommands", "name": "startec", "starttick": 26261, "commands": "startrecording"})
vdf.write()
| 32.8
| 141
| 0.653963
|
4a00d9eecdab208e207885a452d700ea8c1ee991
| 2,075
|
py
|
Python
|
exe0900car.py
|
juniorpedroso/Curso-Intensivo-de-Python
|
4357e313660e77f5e43bb516c1b19fd979de5070
|
[
"MIT"
] | null | null | null |
exe0900car.py
|
juniorpedroso/Curso-Intensivo-de-Python
|
4357e313660e77f5e43bb516c1b19fd979de5070
|
[
"MIT"
] | null | null | null |
exe0900car.py
|
juniorpedroso/Curso-Intensivo-de-Python
|
4357e313660e77f5e43bb516c1b19fd979de5070
|
[
"MIT"
] | null | null | null |
"""[Uma Classe que pode ser usada para representar um carro]"""
class Car():
"""[Uma tentativa simples de representar um carro]"""
def __init__(self, make, model, year):
"""[Inicializa os atribuitos que descrevem um carro] """
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
"""[Devolve um nome descritivo, formatado de modo elegante] """
long_name = f'{self.year} {self.make} {self.model}'
return long_name.title()
def read_odometer(self):
"""[Exibe uma frase que mostra a milhagem do carro]"""
print(f'Este carro está com {self.odometer_reading} Km.')
def update_odometer(self, kilometragem):
"""[Define o valor de leitura do hodometro com o valor especificado
Rejeita a alteração se for tentativa de definir um valor menor
para o hodômetro]"""
if kilometragem >= self.odometer_reading:
self.odometer_reading = kilometragem
else:
print('Você não pode voltar o hodômetro!')
def increment_odometer(self, kilometragem):
"""[Soma a quantidade especificada ao valor de leitura
do hodômetro]"""
if kilometragem > 0:
self.odometer_reading += kilometragem
else:
print('Você não pode voltar o hodômetro!')
my_new_car = Car('audi', 'a4', 2016)
my_new_car.update_odometer(50000)
meu_jeep = Car('jeep', 'renegade', 2018)
meu_jeep.update_odometer(30000)
carro_zé = Car('toyota', 'corolla', 2020)
carro_zé.update_odometer(5000)
carros = [my_new_car, meu_jeep, carro_zé]
for carro in carros:
print()
print(carro.get_descriptive_name())
carro.read_odometer()
print()
print('Tentando voltar o hodômetro do Audi.')
my_new_car.update_odometer(10000)
print()
print('Adicionando 500 km no Jeep')
meu_jeep.increment_odometer(500)
meu_jeep.read_odometer()
print()
print('Tentando diminuir 300 km no Toyota')
carro_zé.increment_odometer(-300)
carro_zé.read_odometer()
print()
| 30.072464
| 75
| 0.66988
|
4a00da8a5b5f4ca04bb5802cf13d9c49eb1fb63c
| 4,073
|
py
|
Python
|
rlkit/rlkit/envs/goal_generation/pickup_goal_dataset.py
|
mihdalal/raps
|
4818769adc7496f60f819c875a9995950bd5ed19
|
[
"MIT"
] | 36
|
2021-10-29T21:23:11.000Z
|
2022-03-30T15:38:13.000Z
|
rlkit/rlkit/envs/goal_generation/pickup_goal_dataset.py
|
mihdalal/raps
|
4818769adc7496f60f819c875a9995950bd5ed19
|
[
"MIT"
] | null | null | null |
rlkit/rlkit/envs/goal_generation/pickup_goal_dataset.py
|
mihdalal/raps
|
4818769adc7496f60f819c875a9995950bd5ed19
|
[
"MIT"
] | 4
|
2021-10-31T16:32:53.000Z
|
2022-02-11T11:09:03.000Z
|
import os.path as osp
import random
import cv2
import numpy as np
from multiworld.envs.mujoco.sawyer_xyz.sawyer_pick_and_place import (
get_image_presampled_goals,
)
from rlkit.util.io import local_path_from_s3_or_local_path
def setup_pickup_image_env(image_env, num_presampled_goals):
"""
Image env and pickup env will have presampled goals. VAE wrapper should
encode whatever presampled goal is sampled.
"""
presampled_goals = get_image_presampled_goals(image_env, num_presampled_goals)
image_env._presampled_goals = presampled_goals
image_env.num_goals_presampled = presampled_goals[
random.choice(list(presampled_goals))
].shape[0]
def get_image_presampled_goals_from_vae_env(env, num_presampled_goals, env_id=None):
image_env = env.wrapped_env
return get_image_presampled_goals(image_env, num_presampled_goals)
def get_image_presampled_goals_from_image_env(env, num_presampled_goals, env_id=None):
return get_image_presampled_goals(env, num_presampled_goals)
def generate_vae_dataset(variant):
return generate_vae_dataset_from_params(**variant)
def generate_vae_dataset_from_params(
env_class=None,
env_kwargs=None,
env_id=None,
N=10000,
test_p=0.9,
use_cached=True,
imsize=84,
num_channels=1,
show=False,
init_camera=None,
dataset_path=None,
oracle_dataset=False,
n_random_steps=100,
vae_dataset_specific_env_kwargs=None,
save_file_prefix=None,
):
import time
from multiworld.core.image_env import ImageEnv, unormalize_image
assert oracle_dataset == True
if env_kwargs is None:
env_kwargs = {}
if save_file_prefix is None:
save_file_prefix = env_id
if save_file_prefix is None:
save_file_prefix = env_class.__name__
filename = "/tmp/{}_N{}_{}_imsize{}_oracle{}.npy".format(
save_file_prefix,
str(N),
init_camera.__name__ if init_camera else "",
imsize,
oracle_dataset,
)
info = {}
if dataset_path is not None:
filename = local_path_from_s3_or_local_path(dataset_path)
dataset = np.load(filename)
np.random.shuffle(dataset)
N = dataset.shape[0]
elif use_cached and osp.isfile(filename):
dataset = np.load(filename)
np.random.shuffle(dataset)
print("loaded data from saved file", filename)
else:
now = time.time()
if env_id is not None:
import gym
import multiworld
multiworld.register_all_envs()
env = gym.make(env_id)
else:
if vae_dataset_specific_env_kwargs is None:
vae_dataset_specific_env_kwargs = {}
for key, val in env_kwargs.items():
if key not in vae_dataset_specific_env_kwargs:
vae_dataset_specific_env_kwargs[key] = val
env = env_class(**vae_dataset_specific_env_kwargs)
if not isinstance(env, ImageEnv):
env = ImageEnv(
env,
imsize,
init_camera=init_camera,
transpose=True,
normalize=True,
)
setup_pickup_image_env(env, num_presampled_goals=N)
env.reset()
info["env"] = env
dataset = np.zeros((N, imsize * imsize * num_channels), dtype=np.uint8)
for i in range(N):
img = env._presampled_goals["image_desired_goal"][i]
dataset[i, :] = unormalize_image(img)
if show:
img = img.reshape(3, imsize, imsize).transpose()
img = img[::-1, :, ::-1]
cv2.imshow("img", img)
cv2.waitKey(1)
time.sleep(0.2)
# radius = input('waiting...')
print("done making training data", filename, time.time() - now)
np.random.shuffle(dataset)
np.save(filename, dataset)
n = int(N * test_p)
train_dataset = dataset[:n, :]
test_dataset = dataset[n:, :]
return train_dataset, test_dataset, info
| 31.091603
| 86
| 0.646207
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.