blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7e0357f2ae4526370c589848cf13f75364459aa9
|
7d232f51e2330a4f537c50ede9c6bc023d656fd4
|
/examples/python/hellostreamingworld/hellostreamingworld_pb2_grpc.py
|
b0ba42dfc229098504726e6cf9ab2b6ec7757e75
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
grpc/grpc
|
6975af3ba6f07a6fe965b875a0c09abf18999a52
|
e4d598ab64aa54f1da78c6ed6133b741742d11d4
|
refs/heads/master
| 2023-08-31T01:10:22.666618
| 2023-08-30T22:35:17
| 2023-08-30T22:35:17
| 27,729,880
| 42,330
| 13,022
|
Apache-2.0
| 2023-09-14T21:54:19
| 2014-12-08T18:58:53
|
C++
|
UTF-8
|
Python
| false
| false
| 2,455
|
py
|
hellostreamingworld_pb2_grpc.py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import hellostreamingworld_pb2 as hellostreamingworld__pb2
class MultiGreeterStub(object):
"""The greeting service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.sayHello = channel.unary_stream(
'/hellostreamingworld.MultiGreeter/sayHello',
request_serializer=hellostreamingworld__pb2.HelloRequest.SerializeToString,
response_deserializer=hellostreamingworld__pb2.HelloReply.FromString,
)
class MultiGreeterServicer(object):
"""The greeting service definition.
"""
def sayHello(self, request, context):
"""Sends multiple greetings
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MultiGreeterServicer_to_server(servicer, server):
rpc_method_handlers = {
'sayHello': grpc.unary_stream_rpc_method_handler(
servicer.sayHello,
request_deserializer=hellostreamingworld__pb2.HelloRequest.FromString,
response_serializer=hellostreamingworld__pb2.HelloReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'hellostreamingworld.MultiGreeter', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class MultiGreeter(object):
"""The greeting service definition.
"""
@staticmethod
def sayHello(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/hellostreamingworld.MultiGreeter/sayHello',
hellostreamingworld__pb2.HelloRequest.SerializeToString,
hellostreamingworld__pb2.HelloReply.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
6a88b57c2fccc8fbdb4aef20a3ecb324d4340cfd
|
74fa258218004bcdb3f33459800f82c9d7097afb
|
/cli/__init__.py
|
6b35179571762b935bf4ddb1a131638b1b62f7dd
|
[
"MIT"
] |
permissive
|
algolia/docsearch-scraper
|
1f1524e67585454c0d9e101fadf5aaaf429781e4
|
70509a564fe76b34ab28a81189ee5abd99b1a440
|
refs/heads/master
| 2023-08-10T15:26:07.762797
| 2022-01-31T08:29:21
| 2022-01-31T09:29:12
| 43,568,573
| 293
| 117
|
NOASSERTION
| 2023-07-20T00:41:27
| 2015-10-02T18:14:25
|
Python
|
UTF-8
|
Python
| false
| false
| 102
|
py
|
__init__.py
|
from sys import path
from os.path import dirname, join
path.insert(0, join(dirname(__file__), ".."))
|
3ca6c969c488ce8be6574970f8c1d9e02657e20f
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/L1TriggerConfig/L1GtConfigProducers/test/L1GtTester_cfg.py
|
96f9d2474b13d22a22f8c45358a9ff134a8a9bd1
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 6,152
|
py
|
L1GtTester_cfg.py
|
from __future__ import print_function
# cfg file to test L1 GT records
#
# V M Ghete 2008 - 2010 - 2012
import FWCore.ParameterSet.Config as cms
# choose a valid global tag for the release you are using
#
# 5_2_X
#useGlobalTag='GR_R_52_V9'
useGlobalTag='START52_V10'
# run number to retrieve the records - irrelevant if records are overwritten or
# the global tag is a MC global tag, with infinite IoV
useRunNumber = 194251
# print L1 GT prescale factors and trigger mask
printPrescaleFactorsAndMasks = True
#printPrescaleFactorsAndMasks = False
# print L1 GT board maps
printBoardMaps = True
#printBoardMaps = False
# print L1 GT stable parameters
printStableParameters = True
#printStableParameters = False
# print L1 GT parameters
printParameters = True
#printParameters = False
# print L1 GT PSB setup
printPsbSetup = True
#printPsbSetup = False
##########################################################################################
# process
processName = "L1GtTester"
process = cms.Process(processName)
# number of events and source
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('runnumber'),
firstValue = cms.uint64(useRunNumber),
lastValue = cms.uint64(useRunNumber),
interval = cms.uint64(1)
)
# import standard configurations, load and configure modules via Global Tag
# https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFrontierConditions
# retrieve also the HLT menu
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = useGlobalTag+'::All'
# records to be printed
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTester_cff')
# prescale factors and masks
#process.l1GtPrescaleFactorsAndMasksTester.TesterPrescaleFactors = True
#process.l1GtPrescaleFactorsAndMasksTester.TesterTriggerMask = True
#process.l1GtPrescaleFactorsAndMasksTester.TesterTriggerVetoMask = True
#process.l1GtPrescaleFactorsAndMasksTester.RetrieveInBeginRun = True
#process.l1GtPrescaleFactorsAndMasksTester.RetrieveInBeginLuminosityBlock = False
#process.l1GtPrescaleFactorsAndMasksTester.RetrieveInAnalyze = False
#process.l1GtPrescaleFactorsAndMasksTester.PrintInBeginRun = True
#process.l1GtPrescaleFactorsAndMasksTester.PrintInBeginLuminosityBlock = False
#process.l1GtPrescaleFactorsAndMasksTester.PrintInAnalyze = False
#process.l1GtPrescaleFactorsAndMasksTester.PrintOutput = 0
# Path definitions
process.pathL1GtStableParameters = cms.Path(process.seqL1GtStableParameters)
process.pathL1GtParameters = cms.Path(process.seqL1GtParameters)
process.pathL1GtBoardMaps = cms.Path(process.seqL1GtBoardMaps)
process.pathL1GtPsbSetup = cms.Path(process.seqL1GtPsbSetup)
process.pathL1GtPrescaleFactorsAndMasks = cms.Path(process.seqL1GtPrescaleFactorsAndMasks)
# Schedule definition
process.schedule = cms.Schedule()
print('')
if printStableParameters == True :
process.schedule.extend([process.pathL1GtStableParameters])
print("Printing L1 GT stable parameters from global tag ", useGlobalTag)
else :
print("L1 GT stable parameters from ", useGlobalTag, " not requested to be printed")
if printParameters == True :
process.schedule.extend([process.pathL1GtParameters])
print("Printing L1 GT parameters from global tag ", useGlobalTag)
else :
print("L1 GT parameters from ", useGlobalTag, " not requested to be printed")
if printBoardMaps == True :
process.schedule.extend([process.pathL1GtBoardMaps])
print("Printing L1 GT board maps from global tag ", useGlobalTag)
else :
print("L1 GT board maps from ", useGlobalTag, " not requested to be printed")
if printPsbSetup == True :
process.schedule.extend([process.pathL1GtPsbSetup])
print("Printing L1 GT PSB setup from global tag ", useGlobalTag)
else :
print("L1 GT PSB setup from ", useGlobalTag, " not requested to be printed")
if printPrescaleFactorsAndMasks == True :
process.schedule.extend([process.pathL1GtPrescaleFactorsAndMasks])
print("Printing L1 GT prescale factors and masks from global tag ", useGlobalTag)
else :
print("L1 GT prescale factors and masks from ", useGlobalTag, " not requested to be printed")
# services
# Message Logger
process.MessageLogger.cerr.enable = False
process.MessageLogger.files.L1GtTester_errors = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR'),
ERROR = cms.untracked.PSet( limit = cms.untracked.int32(-1) ),
L1GtPrescaleFactorsAndMasksTester = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
process.MessageLogger.files.L1GtTester_warnings = cms.untracked.PSet(
threshold = cms.untracked.string('WARNING'),
WARNING = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
ERROR = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
L1GtPrescaleFactorsAndMasksTester = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
process.MessageLogger.files.L1GtTester_info = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
INFO = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
WARNING = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
ERROR = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
L1GtPrescaleFactorsAndMasksTester = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
process.MessageLogger.files.L1GtTester_debug = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG'),
DEBUG = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
INFO = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
WARNING = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
ERROR = cms.untracked.PSet( limit = cms.untracked.int32(0) ),
L1GtPrescaleFactorsAndMasksTester = cms.untracked.PSet( limit = cms.untracked.int32(-1) )
)
|
11ccacd0e9b298d583c74febbe2fcb0a5b3768e6
|
e40ae10cf7e780a38105c02560985450fb4e41fd
|
/pipelines/skylab/slideseq/test_inputs/Plumbing/slideseq_downsample.py
|
1443730e65902957bdce9005b3082de9fd5b0858
|
[
"BSD-3-Clause"
] |
permissive
|
broadinstitute/warp
|
4deed8cb985ed48490fafe2431fe04cf19efdd4b
|
9d0097a1ac17cd97dc58aab52ba8d7d42062b212
|
refs/heads/develop
| 2023-08-16T19:19:20.421128
| 2023-08-15T15:42:52
| 2023-08-15T15:42:52
| 271,609,826
| 165
| 83
|
BSD-3-Clause
| 2023-09-12T12:57:56
| 2020-06-11T17:42:11
|
WDL
|
UTF-8
|
Python
| false
| false
| 4,771
|
py
|
slideseq_downsample.py
|
#!/usr/bin/env python3
import argparse
import re
import pandas as pd
import numpy as np
def main():
description = """This script downsamples data for the SlideSeq pipeline for use in testing."""
parser = argparse.ArgumentParser(description=description)
# Input file arguments
parser.add_argument("--xy_coords", dest="xy_coords", required=True, help="Path to bead xy-coordinates file",
type=str)
parser.add_argument("--reads_umi", dest="reads_umi", required=True, help="Path to reads per UMI file", type=str)
parser.add_argument("--reads_cbc", dest="reads_cbc", required=True, help="Path to reads per cell barcode", type=str)
parser.add_argument("--dexpr_sum", dest="dexpr_sum", required=True, help="Path to digital expression summary file",
type=str)
parser.add_argument("--raw_cbcs", dest="raw_cbcs", required=True, help="Path to raw cell barcodes file", type=str)
# Cropping and sampling arguments
# Test sample cropped from X: 2500 to 4000, Y: 1000 to 4200, sampling 10 cells per chunk in a 30 by 30 chunk region
parser.add_argument("--xcoor_min", dest="xcoor_min", required=True, help="Minimum x-coordinate for sample cropping",
type=int)
parser.add_argument("--xcoor_max", dest="xcoor_max", required=True, help="Maximum x-coordinate for sample cropping",
type=int)
parser.add_argument("--ycoor_min", dest="ycoor_min", required=True, help="Minimum y-coordinate for sample cropping",
type=int)
parser.add_argument("--ycoor_max", dest="ycoor_max", required=True, help="Maximum y-coordinate for sample cropping",
type=int)
parser.add_argument("--cells_per_chunk", dest="cells_per_chunk", required=True, help="Cells per chunk to sample",
type=int)
parser.add_argument("--nx_slices", dest="nx_slices", required=True,
help="Number of slices to sample along the x-axis", type=int)
parser.add_argument("--ny_slices", dest="ny_slices", required=True,
help="Number of slices to sample along the y-axis", type=int)
# Parse and store arguments
args = parser.parse_args()
# Create dataframes for input files
xy_coord = pd.read_csv(args.xy_coords, sep="\t", header=None)
reads_umi = pd.read_csv(args.reads_umi, sep="\t")
nReads_CB = pd.read_csv(args.reads_cbc, sep="\t")
expr_summary = pd.read_csv(args.dexpr_sum, sep="\t", skiprows=6)
raw_barcodes = pd.read_csv(args.raw_cbcs, sep="\t", header=None, skiprows=1)
# Clean up and merge dataframes
xy_coord.columns = ["CELL_BARCODE", "x_coor", "y_coor"]
expr_coor = expr_summary.merge(xy_coord)
expr_coor['log_NUM_TRANSCRIPTS'] = np.log10(1 + expr_coor.NUM_TRANSCRIPTS)
raw_barcodes.columns = ["CELL_BARCODE_RAW", "x_coor", "y_coor"]
CB_counts = nReads_CB[nReads_CB.columns[0:2]]
CB_counts.columns = ["nReads", "CELL_BARCODE"]
CB_counts["log_nReads"] = np.log10(1 + CB_counts.nReads)
raw_counts = xy_coord.merge(CB_counts)
# Crop an interesting region from the sample
filtered_counts = raw_counts.loc[(raw_counts.x_coor > args.xcoor_min) &
(raw_counts.x_coor < args.xcoor_max) &
(raw_counts.y_coor > args.ycoor_min) &
(raw_counts.y_coor < args.ycoor_max)]
# Downsample the cropped region by sampling chunks and write to a TSV file
sample_set = filtered_counts.sample(1)
xsteps = (args.xcoor_max - args.xcoor_min) / args.nx_slices
ysteps = (args.ycoor_max - args.ycoor_min) / args.ny_slices
for i in range(args.nx_slices):
xlow = args.xcoor_min + xsteps * i
xhigh = args.xcoor_min + xsteps * (i + 1)
for j in range(args.ny_slices):
ylow = args.ycoor_min + ysteps * j
yhigh = args.ycoor_min + ysteps * (j + 1)
subset_counts = filtered_counts.loc[(filtered_counts.x_coor > xlow) & (filtered_counts.x_coor < xhigh) &
(filtered_counts.y_coor > ylow) & (filtered_counts.y_coor < yhigh)]
sample_set = sample_set.append(subset_counts.sample(args.cells_per_chunk))
sample_set.to_csv("sample.tsv", index=False, sep="\t")
# Add raw barcodes to sample set and write to a TSV file
barcodes_raw = []
for x in sample_set.CELL_BARCODE:
barcodes_raw.append(re.sub(r'-\d+$', '', x))
sample_set['CELL_BARCODE_RAW'] = barcodes_raw
matched_sample = sample_set.merge(raw_barcodes)
matched_sample.to_csv("sample_with_barcodes.tsv", index=False, sep="\t")
if __name__ == "__main__":
main()
|
ef307b08e15bf158cfcaf4a9f48ab0557431502e
|
4d330238c7eb97fac95f3674ab4ddb5114fdf3d7
|
/tests/test_facilities.py
|
7ae68caa19e89baf8fd0e115da541f67c3cb2fce
|
[
"LicenseRef-scancode-unknown-license-reference",
"NCSA",
"MIT"
] |
permissive
|
BioSTEAMDevelopmentGroup/biosteam
|
d064c7d5a16d79a966caa68ed3f4cca089f9c49c
|
0501214b7e7fb16b89d1e45c94938b0e08b1331f
|
refs/heads/master
| 2023-08-20T04:47:10.816994
| 2023-08-12T19:22:20
| 2023-08-12T19:22:20
| 164,639,830
| 115
| 29
|
NOASSERTION
| 2023-06-10T15:56:37
| 2019-01-08T12:02:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,659
|
py
|
test_facilities.py
|
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2023, Yoel Cortes-Pena <yoelcortes@gmail.com>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
import pytest
import biosteam as bst
from numpy.testing import assert_allclose
from biorefineries import cane
def test_facility_inheritance():
with pytest.raises(bst.exceptions.UnitInheritanceError):
class NewFacility(bst.Facility): pass
class NewFacility(bst.Facility):
network_priority = 2
def test_boiler_turbogenerator():
chemicals = cane.create_sugarcane_chemicals()
chemicals.define_group(
name='Fiber',
IDs=['Cellulose', 'Hemicellulose', 'Lignin'],
composition=[0.4704 , 0.2775, 0.2520],
wt=True, # Composition is given as weight
)
bst.settings.set_thermo(chemicals)
dilute_ethanol = bst.Stream('dilute_ethanol', Water=1390, Ethanol=590)
bagasse = bst.Stream('bagasse', Water=0.4, Fiber=0.6, total_flow=8e4, units='kg/hr')
with bst.System('sys') as sys:
D1 = bst.BinaryDistillation('D1', ins=dilute_ethanol, Lr=0.999, Hr=0.89, k=1.25, LHK=('Ethanol', 'Water'))
BT = bst.BoilerTurbogenerator('BT')
BT.ins[0] = bagasse
# Make sure no natural gas is consumed when excess electricity is produced
for BT.satisfy_system_electricity_demand in (False, True):
sys.simulate()
assert_allclose(
-BT.results().loc['Low pressure steam', 'Duty']['BT'],
D1.results().loc['Low pressure steam', 'Duty']['D1'],
)
assert BT.natural_gas.isempty()
assert sys.power_utility.rate < 0.
# Natural gas should meet electricity demand
bagasse.empty()
sys.simulate()
assert_allclose(
-BT.results().loc['Low pressure steam', 'Duty']['BT'],
D1.results().loc['Low pressure steam', 'Duty']['D1'],
)
assert_allclose(sys.power_utility.rate, 0., atol=1e-6)
# No natural gas should be used to satisfy electricity demand (only for steam utilities)
BT.satisfy_system_electricity_demand = False
sys.simulate()
assert_allclose(
-BT.results().loc['Low pressure steam', 'Duty']['BT'],
D1.results().loc['Low pressure steam', 'Duty']['D1'],
) # Steam utility should always be satisfied
assert_allclose(sys.power_utility.production, 0., atol=1e-6)
assert sys.power_utility.consumption > 0
if __name__ == '__main__':
test_facility_inheritance()
test_boiler_turbogenerator()
|
a48ec74cb1529c42bf162502f93437c85981fd8d
|
df1254b56f35b24644e00493c50d4b6eb3c15b7b
|
/colour/recovery/datasets/otsu2018.py
|
791c16ab8a75316efc1105bb3ca2c4579b833242
|
[
"BSD-3-Clause"
] |
permissive
|
colour-science/colour
|
908400b227cf81668675e41099256ce50b23ae4b
|
1fdf3b3042922e8d4f86b989b00a06e7e5d81102
|
refs/heads/develop
| 2023-09-01T23:17:07.186869
| 2023-08-26T09:40:45
| 2023-08-26T09:40:45
| 17,114,363
| 1,756
| 301
|
BSD-3-Clause
| 2023-09-14T10:24:37
| 2014-02-23T18:55:40
|
Python
|
UTF-8
|
Python
| false
| false
| 47,600
|
py
|
otsu2018.py
|
"""
Otsu et al. (2018) - Reflectance Recovery
=========================================
Defines the datasets for reflectance recovery using *Otsu et al. (2018)*
method.
References
----------
- :cite:`Otsu2018` : Otsu, H., Yamamoto, M., & Hachisuka, T. (2018).
Reproducing Spectral Reflectances From Tristimulus Colours. Computer
Graphics Forum, 37(6), 370-381. doi:10.1111/cgf.13332
"""
from __future__ import annotations
import numpy as np
from colour.colorimetry import SpectralShape
from colour.hints import NDArrayFloat
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"SPECTRAL_SHAPE_OTSU2018",
"BASIS_FUNCTIONS_OTSU2018",
"CLUSTER_MEANS_OTSU2018",
"SELECTOR_ARRAY_OTSU2018",
]
SPECTRAL_SHAPE_OTSU2018: SpectralShape = SpectralShape(380, 730, 10)
SPECTRAL_SHAPE_OTSU2018.__doc__ = """
The spectral shape of *Otsu et al. (2018)* basis functions and means.
References
----------
:cite:`Otsu2018`
"""
BASIS_FUNCTIONS_OTSU2018: NDArrayFloat = np.array(
[
[
[
0.033359794,
0.069816766,
0.145858662,
0.208587748,
0.225045781,
0.230260467,
0.234686490,
0.237479132,
0.237922746,
0.235743337,
0.230636966,
0.224868019,
0.220538851,
0.214245998,
0.205011936,
0.194449589,
0.182172137,
0.166753810,
0.149673107,
0.135884660,
0.127383558,
0.121562634,
0.115070543,
0.110303774,
0.108658862,
0.110404095,
0.114133490,
0.118971322,
0.123157804,
0.123054245,
0.119622289,
0.116466163,
0.114699356,
0.115051812,
0.118079600,
0.125700322,
],
[
-0.015712659,
-0.036497435,
-0.083631104,
-0.123972215,
-0.129432141,
-0.113757935,
-0.074978412,
-0.023837819,
0.051049315,
0.138878712,
0.218967535,
0.278285048,
0.297475087,
0.286292061,
0.257830950,
0.206696101,
0.144324772,
0.085519078,
0.038286146,
0.001619797,
-0.035117557,
-0.072027247,
-0.096533704,
-0.103087741,
-0.102368364,
-0.114391855,
-0.139967122,
-0.171162687,
-0.196658516,
-0.207454048,
-0.204801733,
-0.203879004,
-0.213328289,
-0.228958920,
-0.241698598,
-0.251464790,
],
[
-0.020337631,
-0.053622913,
-0.122405706,
-0.179462494,
-0.200564443,
-0.232332240,
-0.283247005,
-0.304278573,
-0.276059936,
-0.220589316,
-0.139053387,
-0.046042855,
0.031991873,
0.103372660,
0.170370407,
0.214905881,
0.240971387,
0.254238542,
0.255631209,
0.238008218,
0.209534818,
0.177808852,
0.153095310,
0.139956117,
0.131477876,
0.116280805,
0.095933679,
0.075698905,
0.065977800,
0.069857488,
0.082988360,
0.090329558,
0.082929925,
0.067734041,
0.053242860,
0.041115251,
],
],
[
[
-0.02257427717386271300,
-0.04580454714541371000,
-0.08954033658115419200,
-0.12005530771055017000,
-0.12563459701293636000,
-0.12595333439645837000,
-0.12670444234219508000,
-0.12715705435395483000,
-0.12913057707506276000,
-0.13354123002336557000,
-0.13855666514343304000,
-0.14308799991330715000,
-0.14951791383958515000,
-0.15845145652482151000,
-0.16495787527646999000,
-0.16781427384520564000,
-0.17170232277018521000,
-0.18042142403466258000,
-0.18962359564086589000,
-0.19407674590552579000,
-0.19587634795792669000,
-0.19670397522552724000,
-0.19619975275648865000,
-0.19518165653001343000,
-0.19403913994258670000,
-0.19368980706846228000,
-0.19277035941743720000,
-0.19234347800755142000,
-0.19207998789953343000,
-0.19183499892170250000,
-0.19191989052413799000,
-0.19216208684132857000,
-0.19273004434759020000,
-0.19273134081681903000,
-0.19197345910434815000,
-0.19195334021916627000,
],
[
-0.02190524719876366800,
-0.04789585619670318200,
-0.10618234949979000000,
-0.15900405113964777000,
-0.17702417653425023000,
-0.18536131560391853000,
-0.19315079255300505000,
-0.20224319049559464000,
-0.21765026428606477000,
-0.24359160942259997000,
-0.27299579285349473000,
-0.29049279880094941000,
-0.28346659709985672000,
-0.24283365807615256000,
-0.19188028232791318000,
-0.15475085829598653000,
-0.12084943547312378000,
-0.07129023972829869000,
-0.01545410159994778000,
0.02855797827859832300,
0.06332656026225293200,
0.09556345974296330500,
0.12304807393347031000,
0.14150866887002808000,
0.15188057104091590000,
0.15797342142055862000,
0.16213144452203984000,
0.16563351215078684000,
0.16485760626112306000,
0.15991188547906210000,
0.15381118424011236000,
0.14657408643195061000,
0.14252150862995686000,
0.14149924680385922000,
0.14426259164189553000,
0.14568116947965443000,
],
[
0.03862192593327843500,
0.08607977892966384200,
0.18758937472970783000,
0.26380287344461750000,
0.27264656532342524000,
0.26415033785924025000,
0.25431550822206783000,
0.24061274238989599000,
0.21646081169191303000,
0.16375249929345426000,
0.06540594390641826800,
-0.05748957147471461100,
-0.16090875920779929000,
-0.22967562960590965000,
-0.28665433153182635000,
-0.32394421018597008000,
-0.31885416680273615000,
-0.26443039994661971000,
-0.19096541067598802000,
-0.13794182744763242000,
-0.09283207180313692000,
-0.04863827809054062400,
-0.00705701194648979300,
0.02590864491028844700,
0.04516812589249209800,
0.05543340685273387400,
0.06264866705777927300,
0.06999797364036092200,
0.07285747008320048200,
0.06852130459855278200,
0.05932205006228336000,
0.04734424846797920200,
0.03456970594041587600,
0.02854412320123144900,
0.03157197229148767800,
0.03359122371504807300,
],
],
[
[
-0.02709280096187565900,
-0.05686223110461408000,
-0.10035667073983964000,
-0.11655686561575981000,
-0.11422957067657943000,
-0.11025418465506862000,
-0.10767542796968975000,
-0.10388290237276118000,
-0.10158225848918870000,
-0.10042808265232403000,
-0.09444391157646114800,
-0.08539718671110264900,
-0.08140351716465142600,
-0.07899836366862676700,
-0.07092361786822773400,
-0.06431797115426903000,
-0.06551130033522271900,
-0.07449341517075493100,
-0.08108361889808800300,
-0.09089442129206014300,
-0.12404701968906916000,
-0.16786332728947920000,
-0.20250158743930527000,
-0.22377717318551535000,
-0.23406228761612988000,
-0.24023286395208010000,
-0.24230652318518972000,
-0.24409552662937148000,
-0.24451095295280248000,
-0.24396897026804176000,
-0.24334193238409188000,
-0.24303731113979424000,
-0.24384555693930871000,
-0.24439637959579152000,
-0.24397837395728775000,
-0.24435930927502705000,
],
[
0.02001148601020115100,
0.04899841658049408600,
0.09110231699495907600,
0.10753208990080826000,
0.09847067341821445800,
0.09702556509879775600,
0.10316156120973860000,
0.10877098463664207000,
0.11985177404764036000,
0.13576834462543336000,
0.14149065311281014000,
0.13685946675439886000,
0.13973613295200196000,
0.14909977184067982000,
0.14654199509091873000,
0.14130994015312165000,
0.14757252813883043000,
0.18077817300610374000,
0.21009387986393199000,
0.23369907594299305000,
0.29227081282780004000,
0.31933306627506691000,
0.26565290061291041000,
0.17228196686094716000,
0.07639027259115091500,
-0.00292291840580726410,
-0.06142800318223973200,
-0.10401144539762601000,
-0.13589472821785523000,
-0.15816114518317095000,
-0.17335642699648807000,
-0.18902786670833094000,
-0.20610333417513085000,
-0.22242224746425526000,
-0.23664147979214531000,
-0.24998288325425144000,
],
[
-0.01860488852357716800,
-0.06851902564207997000,
-0.21326717698986006000,
-0.30102165576283019000,
-0.31649926306090304000,
-0.30060756567987956000,
-0.27084117252238304000,
-0.23425912778142560000,
-0.20338041193211276000,
-0.17461428657405265000,
-0.14854465986513404000,
-0.12781379545378718000,
-0.11230428006257434000,
-0.08797584482037290600,
-0.05537699619205544600,
-0.03089875066975936200,
-0.02743502714477403700,
-0.01578802100640397700,
-0.00205272748385193100,
0.01009006831808588100,
0.08098296928681815400,
0.21731539216862897000,
0.31846566331995801000,
0.32348510302323596000,
0.26108337233732459000,
0.18705889148548510000,
0.11973584972940020000,
0.06706555745724066000,
0.03124326526269944500,
0.00792265230294038160,
-0.00379962533995196120,
-0.01980851148987728500,
-0.03821643563126132800,
-0.06166276636610668700,
-0.08003315946555962500,
-0.09697997451524445500,
],
],
[
[
0.02676824304188611300,
0.05946407475965782100,
0.12900702308575138000,
0.17092577830538522000,
0.17433273953929146000,
0.16835540326878368000,
0.15826424805464265000,
0.14647224313786575000,
0.13460263040798950000,
0.12159264960543423000,
0.10837126623117754000,
0.09545938036189148500,
0.08454657372677823500,
0.07503059715637522300,
0.06496740345758979200,
0.05638854309961657100,
0.05408497845708886600,
0.05543735257043472300,
0.05555858715239536000,
0.05759583879709017500,
0.06345289056498749900,
0.07321840953073149900,
0.09394017460665737800,
0.12959719899113231000,
0.17348415950453580000,
0.21285437318563405000,
0.23979589047651886000,
0.25571215090479932000,
0.26147369410179094000,
0.26060735062830592000,
0.25695755826517525000,
0.25495084519503508000,
0.25508140335236734000,
0.25382248374599431000,
0.25315232973604851000,
0.25321847259615987000,
],
[
-0.01873744847940216800,
-0.05990899326442069400,
-0.13587937041765039000,
-0.19250932779711649000,
-0.21685711379342484000,
-0.23833290753023634000,
-0.25940907727956292000,
-0.27129217353501439000,
-0.27152046701232635000,
-0.25903073272885363000,
-0.23448070616812938000,
-0.19749538078381751000,
-0.16551798851884006000,
-0.14072688005214101000,
-0.11914770422601269000,
-0.11063268885678886000,
-0.10948247432054088000,
-0.10943336389685750000,
-0.10553405777913306000,
-0.10897919714631589000,
-0.11556837510924602000,
-0.12678816534296078000,
-0.12909988927091548000,
-0.09285888955799707900,
-0.00949866580609674310,
0.06301250034760524900,
0.10646985745101370000,
0.12256333095253447000,
0.13523848767144181000,
0.14386501113297953000,
0.15441065150742014000,
0.16682910934595863000,
0.17982703518357265000,
0.19629872698899911000,
0.20993169814916909000,
0.22283191255022117000,
],
[
0.00687799096172833900,
0.00632982236190463440,
-0.01630024725336848600,
-0.05415778649574832400,
-0.07289153491194883000,
-0.08545489000605219200,
-0.09046789436531833800,
-0.08072015476785890400,
-0.07025393562283961700,
-0.05667429771892082400,
-0.04043014967453874300,
-0.01876275509068558700,
-0.00248907267489111750,
0.00499985179874099400,
0.00719666613671487470,
0.00141123759501328960,
0.00138660492172908220,
0.00412730855466753730,
0.00492544274519941470,
0.00863927652266950270,
0.01768264714224524300,
0.02700767827843724200,
0.05358757306348460600,
0.12515958723848053000,
0.24428784772611917000,
0.34242457928205289000,
0.38094858322069408000,
0.35070697286866737000,
0.25959544573108218000,
0.13920978087352909000,
0.00655712404118758820,
-0.11671906463074669000,
-0.21483468555348154000,
-0.28912185847014771000,
-0.34279486721072366000,
-0.38307271805096821000,
],
],
[
[
-0.02119674836738755600,
-0.04591441602701418200,
-0.10481010401668686000,
-0.16258395638429893000,
-0.17889545337044299000,
-0.18072228226884079000,
-0.18112664009792892000,
-0.18032434720631510000,
-0.17971618615047016000,
-0.17871153535087211000,
-0.17563285652502023000,
-0.17119808391063290000,
-0.16853689988040146000,
-0.16561345617512713000,
-0.16019419889858222000,
-0.15573208563516067000,
-0.15491795255226662000,
-0.15656042280911464000,
-0.15661531826131711000,
-0.15764891623922786000,
-0.16300024791746612000,
-0.16902407524171240000,
-0.17285036097124651000,
-0.17473874057599853000,
-0.17535320823704051000,
-0.17679028377314515000,
-0.17819373966686389000,
-0.18018711283913955000,
-0.18137104389357972000,
-0.18095677225518114000,
-0.18017559421686788000,
-0.17972235196479699000,
-0.17972279615663084000,
-0.17977929580104804000,
-0.17996014494612850000,
-0.18116676892356900000,
],
[
0.00361061838890439770,
0.00403832244766750420,
0.00890962969648889660,
0.03524815707980788500,
0.05694248722530626000,
0.07284328219269969900,
0.08929685703045094100,
0.10884229731927056000,
0.12746765305849969000,
0.14422870726029338000,
0.16634052273914524000,
0.18968300551257519000,
0.20235756903744331000,
0.21187377992143250000,
0.23160746600613533000,
0.24467573264157011000,
0.23387224613948968000,
0.20772756148326568000,
0.18558478756428221000,
0.15537257935462540000,
0.08870855418533021800,
0.01177297349282160400,
-0.05488997670980050300,
-0.10700795151145540000,
-0.14115288511720003000,
-0.16117542399322698000,
-0.17218230632555048000,
-0.18073298721042264000,
-0.18906539182769619000,
-0.20004311114038048000,
-0.21240795927734304000,
-0.22370131442259125000,
-0.22996533792354246000,
-0.23257699977177257000,
-0.23162059375757346000,
-0.22709792076398444000,
],
[
0.02648512192955599500,
0.06258688757724231700,
0.14234632723786383000,
0.21547368046916726000,
0.24347885216491971000,
0.24958734564816068000,
0.24081448101006372000,
0.22418376109960433000,
0.19465144188536176000,
0.15445250087507756000,
0.11514475901976122000,
0.07351250327715834500,
0.02535169523252229400,
-0.02399041639806054300,
-0.05483824103107217500,
-0.06922149547192454900,
-0.09331536945909434100,
-0.14508948941010041000,
-0.19321726430075248000,
-0.22742341157660259000,
-0.27490607958077012000,
-0.29854536089141326000,
-0.28507620653536159000,
-0.25817167239338712000,
-0.23466391476319501000,
-0.19576301983826125000,
-0.13733990379390060000,
-0.07219380245437045900,
-0.01490762225910524300,
0.02545250361032095800,
0.04811592885693794000,
0.06542845020422376200,
0.08455968994091871100,
0.10445141018651627000,
0.12195299963801620000,
0.14191426767047791000,
],
],
[
[
0.02949905076348713400,
0.05380114492830617700,
0.08079555140864556900,
0.08673461177222968600,
0.08228344477860428900,
0.07805333953848162000,
0.07658066145809679100,
0.07426332465408649900,
0.07368750818425572100,
0.07497674474470043800,
0.07186747732133795500,
0.06522606040102924000,
0.06403448204208103700,
0.06747959315712472000,
0.06420025096863182800,
0.05857099606594507600,
0.06154483368131926500,
0.08432979913189758700,
0.12590749478955982000,
0.16082638200698035000,
0.19171530712010598000,
0.21543531303958838000,
0.22759452497177649000,
0.23276904041357468000,
0.23427367585435840000,
0.23511801139892841000,
0.23630540140724776000,
0.23765549341378889000,
0.23815849278526871000,
0.23735655259306632000,
0.23776190401926256000,
0.23901602361789720000,
0.23966837305669569000,
0.23992004299222611000,
0.23963837239716360000,
0.24055974593482360000,
],
[
-0.05749317690390509700,
-0.10889049665477113000,
-0.16808106208696935000,
-0.18188449289155134000,
-0.17213928808785378000,
-0.16248122054252026000,
-0.15940007865141995000,
-0.15446135954382603000,
-0.15394359073226441000,
-0.15955177134499235000,
-0.15476803378874412000,
-0.14270086430739456000,
-0.14150192246537266000,
-0.15299122361377698000,
-0.14659024215186020000,
-0.13280364521210400000,
-0.13654216732616947000,
-0.18711606479886939000,
-0.27646809095728458000,
-0.33376341079739913000,
-0.31149240794382088000,
-0.21605192415199054000,
-0.08872261441759031500,
0.01501716445322282900,
0.07459564202244076100,
0.10463473412925806000,
0.12001248564282355000,
0.12979112919629543000,
0.13792378105731262000,
0.14268404174686664000,
0.14677817922155648000,
0.15510473763112062000,
0.16050632288055705000,
0.16731616487345491000,
0.17324998571439973000,
0.17991527864215207000,
],
[
-0.08244436089607173900,
-0.15336030301728437000,
-0.24426622711321333000,
-0.26384931585866828000,
-0.24892857366446980000,
-0.23077245427661355000,
-0.22241952671928703000,
-0.20354553247501470000,
-0.19616122335466987000,
-0.19079938068965510000,
-0.17229835840027194000,
-0.14208870523710324000,
-0.12630253767591793000,
-0.10363604991038586000,
-0.07201968219532281500,
-0.05312886843809411200,
-0.04004075771120371000,
-0.00053440568762490803,
0.16020422722625777000,
0.40718001627624578000,
0.41639470243864790000,
0.27850647135244561000,
0.14470374762313470000,
0.04281535650889119900,
-0.01040587995462491900,
-0.03113779622382366000,
-0.03588005442805115200,
-0.03833924733056591500,
-0.03518356199038780200,
-0.02313756381588575800,
-0.02203870658163908600,
-0.02967782008259734300,
-0.02175113614544999000,
-0.01613469018407163300,
-0.01367555588545224900,
-0.01158219640370229700,
],
],
[
[
0.01870695442590027500,
0.03569161579037078600,
0.04809268054013512300,
0.04888694654858050800,
0.04578808196032359300,
0.04344586049591366000,
0.04325685570994483600,
0.04222189771723057500,
0.04055681087575285100,
0.04028180207781234500,
0.03760574100567435500,
0.03341449865332147400,
0.03052287976995381500,
0.03007254225309254400,
0.02810981770811881200,
0.02523222177779721500,
0.02538661338781571200,
0.02866947214354276900,
0.03807571631578088300,
0.05772133246350924500,
0.10170369006245189000,
0.16294646278543309000,
0.20902545675000525000,
0.23926676595591634000,
0.25599658226130739000,
0.26216124571088945000,
0.26294121758247357000,
0.26471231240392479000,
0.26430141917656441000,
0.26240021240987121000,
0.26487052223495711000,
0.26522291338114623000,
0.26660542213939931000,
0.26541584484337871000,
0.26390547737136760000,
0.26275220730492782000,
],
[
-0.05925964858380286500,
-0.09295695535933538900,
-0.11130621819722603000,
-0.10992697128373242000,
-0.10115389795872447000,
-0.09595515343251508100,
-0.09677723424692862000,
-0.09606038254348969700,
-0.09736670241206574200,
-0.09992202944918748800,
-0.09704383093180786500,
-0.09002079222538757200,
-0.08673352046691186500,
-0.08557770147265368600,
-0.08064000246969554900,
-0.07913645121578150000,
-0.08271807685423399100,
-0.09807361937813430200,
-0.11457149326572891000,
-0.14765147379188692000,
-0.23874210320885786000,
-0.36798022743083686000,
-0.43048231090135342000,
-0.35215701510596309000,
-0.19641466614055952000,
-0.05913838831484832100,
0.02635518902441718500,
0.07834688271614538600,
0.11384481744795055000,
0.13409102253165275000,
0.14769925378529281000,
0.16322353175604573000,
0.18252922264663107000,
0.20305737442069113000,
0.22128357114965899000,
0.23621196298998062000,
],
[
0.11512389978331515000,
0.22413006962808238000,
0.31954389282025103000,
0.32586304644263525000,
0.30397127558670756000,
0.28352212433907487000,
0.26492834051113151000,
0.24455825069511528000,
0.23050435252217924000,
0.21847071243995733000,
0.19137596848708158000,
0.16019742328929820000,
0.13917066569959388000,
0.11837326624126067000,
0.09003968113096123800,
0.07053180362500022200,
0.06633604481687069300,
0.05751726174543667800,
0.01088835472471969500,
-0.03853843360012083900,
-0.08254453556203576700,
-0.10202561217467844000,
-0.14265191289777529000,
-0.20811311672905380000,
-0.22597980923018762000,
-0.16968023714155689000,
-0.09188792560239997200,
-0.03476358458153469600,
0.00581952483262489300,
0.02773258068377005800,
0.01150936667521866600,
0.01175763239248749100,
0.03672792702540145900,
0.07368275531823710600,
0.10450314791523628000,
0.13170821119526419000,
],
],
[
[
-0.01391512397332992000,
-0.02355616580370156800,
-0.03217974689527575100,
-0.03389713623044773900,
-0.03301036281363833200,
-0.03285354371423714800,
-0.03377540548706994800,
-0.03489680631234950300,
-0.03685746938516745000,
-0.04079745564524299400,
-0.04592855526325046600,
-0.05252409573372079900,
-0.06485881641780162600,
-0.08872150998392475100,
-0.12062905249920253000,
-0.14663172901574814000,
-0.16374285973859237000,
-0.17854545670524613000,
-0.19285090669470276000,
-0.20310119935107607000,
-0.20944935602779791000,
-0.21419896526525989000,
-0.21710861272899887000,
-0.21862667803330973000,
-0.22006016766927428000,
-0.22215077344601830000,
-0.22323849303277751000,
-0.22463343282950998000,
-0.22589338966445646000,
-0.22594179947730525000,
-0.22663921592189393000,
-0.22783675444090351000,
-0.22865193500586228000,
-0.22945714446182908000,
-0.22963380305061912000,
-0.23154386034935404000,
],
[
-0.02372876400796839600,
-0.03805126826063243200,
-0.04915951914715022100,
-0.04774626206533361100,
-0.04297236442671773000,
-0.03848115601428303600,
-0.03397642565799539900,
-0.02760803110730725800,
-0.02042716476955505300,
-0.00852478908021793260,
0.01408937245236512900,
0.04675928332177815100,
0.09005268401626519100,
0.15996855175372468000,
0.27983350884234798000,
0.40454760673506523000,
0.46557425740279146000,
0.42988255821256582000,
0.30040479908227047000,
0.16016147242639570000,
0.06193514273781868000,
-0.00634158525039273980,
-0.05591728808815835100,
-0.08801614369298063600,
-0.10645480712256676000,
-0.11439528123368227000,
-0.12148762118556097000,
-0.12853160184622442000,
-0.13211749271913961000,
-0.13124291058777687000,
-0.12758412629342486000,
-0.12246334197729329000,
-0.11696553437732297000,
-0.11538122270705850000,
-0.11848205894639263000,
-0.12091688597538751000,
],
[
0.13614121087578832000,
0.22163172747838700000,
0.29458621263180562000,
0.30561224752530813000,
0.29501884928368216000,
0.28598447595430682000,
0.28333441324709346000,
0.27912705630178736000,
0.27528060133650206000,
0.27037172224265682000,
0.25595129646913195000,
0.23112118692222730000,
0.20491022536680245000,
0.18294409783499188000,
0.09167575728467461400,
-0.03963638360275884000,
-0.11054285410554264000,
-0.07373458712121995500,
0.02683953896149155600,
0.06870196457024961600,
0.05013136970896505500,
0.02356262876335300100,
0.00258884870471166790,
-0.00534038674246432950,
-0.01181644034067877400,
-0.01890402997198788200,
-0.02401552609276516200,
-0.03039072884274394300,
-0.03697543824746714300,
-0.04372079068719348500,
-0.05526250446342715500,
-0.06518385184466982100,
-0.07978737224572156300,
-0.09035377751454824700,
-0.09485714223592185700,
-0.09799962802996180200,
],
],
]
)
"""
Basis functions for *Otsu et al. (2018)*. This is a list of eight arrays,
with one for entry for each cluster. Each array contains three basis functions,
quantised in accordance with :attr:`colour.recovery.SPECTRAL_SHAPE_OTSU2018`
attribute.
References
----------
:cite:`Otsu2018`
"""
CLUSTER_MEANS_OTSU2018: NDArrayFloat = np.array(
[
[
0.10085069182389943000,
0.14557836477987415000,
0.21618955974842774000,
0.26241761006289305000,
0.27539660377358477000,
0.28531383647798736000,
0.29863773584905656000,
0.30855169811320765000,
0.31710716981132059000,
0.32332276729559734000,
0.32056880503144641000,
0.30730465408805036000,
0.29085635220125783000,
0.26974641509433944000,
0.24537761006289302000,
0.22229106918238989000,
0.20037320754716983000,
0.17840641509433955000,
0.15444679245283027000,
0.14225157232704402000,
0.13367911949685529000,
0.12840981132075477000,
0.12297345911949682000,
0.11905270440251571000,
0.11778465408805038000,
0.11974377358490564000,
0.12402981132075469000,
0.12976943396226415000,
0.13483974842767291000,
0.13579132075471703000,
0.13499685534591194000,
0.13219408805031446000,
0.13386704402515726000,
0.13686591194968545000,
0.14213257861635226000,
0.15195597484276738000,
],
[
0.09973996241240880300,
0.13832746329562026000,
0.19263781475547445000,
0.22097216947810219000,
0.22491500950364970000,
0.22584267932481750000,
0.22858438229562053000,
0.23144331213138686000,
0.23944196714598540000,
0.25520659802554735000,
0.27474755001824847000,
0.29176247448175174000,
0.30942757119707986000,
0.32833185188321135000,
0.34152471104014598000,
0.34492205800000048000,
0.34524719568978096000,
0.35006971621532840000,
0.35226373910948888000,
0.35257366194525530000,
0.34635072814963563000,
0.33788450155109495000,
0.32730242764963524000,
0.31817969820072978000,
0.31196273630291999000,
0.30864278988686139000,
0.30511056795255503000,
0.30259412779197087000,
0.30141088878467137000,
0.30166522981386845000,
0.30447054271897822000,
0.30566773899999983000,
0.30979884475912473000,
0.31087716667883208000,
0.30918824107664239000,
0.30983656317883246000,
],
[
0.10163148148148149000,
0.13874148148148155000,
0.18048629629629634000,
0.19058296296296293000,
0.18498296296296293000,
0.17870703703703697000,
0.17455444444444446000,
0.16798407407407415000,
0.16265481481481481000,
0.15838185185185186000,
0.14838851851851853000,
0.13549851851851852000,
0.12967518518518517000,
0.12491777777777781000,
0.11482962962962959000,
0.10857518518518521000,
0.11038555555555557000,
0.11997444444444444000,
0.12557666666666670000,
0.13899777777777780000,
0.18298407407407410000,
0.24777518518518515000,
0.30390333333333330000,
0.34408296296296298000,
0.37026074074074061000,
0.38781962962962946000,
0.39744703703703704000,
0.40430592592592590000,
0.40855074074074083000,
0.41022444444444450000,
0.41229259259259260000,
0.41340555555555564000,
0.41682222222222221000,
0.41964370370370369000,
0.42174666666666671000,
0.42481148148148135000,
],
[
0.10518588235294116000,
0.15928000000000003000,
0.23761058823529413000,
0.27525764705882355000,
0.27820000000000000000,
0.26746470588235294000,
0.24924588235294115000,
0.22593176470588236000,
0.20151176470588236000,
0.17872352941176470000,
0.15638352941176470000,
0.13379058823529413000,
0.11768588235294120000,
0.10634235294117647000,
0.09573411764705883100,
0.08831411764705882100,
0.08577882352941176800,
0.08672941176470587400,
0.08687764705882353200,
0.08794588235294117900,
0.09118941176470588000,
0.09866235294117646500,
0.11569764705882354000,
0.14323176470588234000,
0.17372588235294120000,
0.20689176470588236000,
0.24427647058823532000,
0.28684470588235295000,
0.33063882352941182000,
0.37028352941176473000,
0.40439058823529417000,
0.43227882352941183000,
0.45577764705882357000,
0.47360352941176476000,
0.48674117647058823000,
0.49979882352941163000,
],
[
0.10779898591549292000,
0.15870338028169012000,
0.24335419718309842000,
0.30154478873239432000,
0.31312512676056359000,
0.31187740845070405000,
0.30963605633802815000,
0.30475785915492964000,
0.30064743661971804000,
0.29648360563380305000,
0.28867476056338026000,
0.27758146478873252000,
0.27091515492957735000,
0.26409166197183093000,
0.25295690140845062000,
0.24406754929577457000,
0.24216214084507048000,
0.24517898591549292000,
0.24370647887323940000,
0.24752726760563384000,
0.25809814084507027000,
0.27048856338028177000,
0.27946467605633790000,
0.28564805633802820000,
0.28950569014084515000,
0.29394118309859157000,
0.29796377464788726000,
0.30271183098591542000,
0.30628552112676050000,
0.30746732394366194000,
0.30875188732394343000,
0.30856918309859155000,
0.31044997183098588000,
0.31218912676056354000,
0.31383464788732385000,
0.31724743661971833000,
],
[
0.08891588235294117800,
0.11170882352941176000,
0.13155529411764705000,
0.13345352941176472000,
0.12773352941176472000,
0.12348058823529412000,
0.12199294117647061000,
0.11907411764705882000,
0.11792117647058822000,
0.11871352941176466000,
0.11512823529411763000,
0.10845000000000000000,
0.10843000000000001000,
0.11113294117647059000,
0.10788647058823528000,
0.10542588235294116000,
0.11212294117647059000,
0.13771529411764705000,
0.18127705882352937000,
0.22836529411764706000,
0.28692647058823539000,
0.35075411764705883000,
0.39709058823529408000,
0.42251705882352941000,
0.43389176470588225000,
0.43937470588235289000,
0.44075882352941176000,
0.44195411764705883000,
0.44239529411764700000,
0.44206588235294109000,
0.44273058823529410000,
0.44242411764705880000,
0.44468470588235293000,
0.44592764705882354000,
0.44652235294117637000,
0.44930823529411767000,
],
[
0.08701444444444445000,
0.10270333333333331000,
0.11151777777777777000,
0.10953777777777778000,
0.10569999999999999000,
0.10276222222222224000,
0.10133111111111109000,
0.09741555555555554700,
0.09392333333333334500,
0.09028111111111110400,
0.08486222222222221400,
0.07873666666666664900,
0.07606111111111110700,
0.07359000000000001700,
0.07044000000000000300,
0.06970444444444444400,
0.07159777777777778800,
0.07601444444444444000,
0.07899666666666667300,
0.09216222222222221500,
0.12827333333333332000,
0.20277777777777778000,
0.30198333333333338000,
0.40135111111111121000,
0.48075777777777773000,
0.53332888888888896000,
0.56130111111111125000,
0.57818000000000003000,
0.58851111111111121000,
0.59322111111111120000,
0.59782555555555550000,
0.60259555555555566000,
0.61115000000000008000,
0.61758111111111114000,
0.62303444444444445000,
0.62989222222222208000,
],
[
0.07023857142857142800,
0.08111642857142858900,
0.08923309523809523600,
0.09028619047619047800,
0.08905285714285708600,
0.08834642857142854800,
0.08952357142857139700,
0.09056880952380949600,
0.09293809523809525000,
0.09693595238095237300,
0.10173238095238095000,
0.10764285714285717000,
0.12443142857142857000,
0.16565285714285716000,
0.22232071428571437000,
0.26490809523809533000,
0.29603785714285719000,
0.33891190476190480000,
0.39374523809523820000,
0.44306047619047628000,
0.46590380952380966000,
0.47824261904761900000,
0.48273738095238095000,
0.48290047619047627000,
0.48206547619047607000,
0.48232547619047622000,
0.48180452380952365000,
0.48200666666666664000,
0.48249571428571431000,
0.48179785714285728000,
0.48414190476190488000,
0.48353404761904767000,
0.48836785714285708000,
0.49024928571428561000,
0.49035238095238093000,
0.49339000000000011000,
],
]
)
"""
Cluster means for *Otsu et al. (2018)*. This is a list of eight arrays, with
one for entry for each cluster. Each array is the mean of all the spectral
distributions used to create the particular cluster, quantised in accordance
with :attr:`colour.recovery.SPECTRAL_SHAPE_OTSU2018` attribute.
References
----------
:cite:`Otsu2018`
"""
SELECTOR_ARRAY_OTSU2018: NDArrayFloat = np.array(
[
[0.333444973048471, 1, -3, -1],
[0.428556829741043, 0, 1, -2],
[0.368343583792887, 1, 5, 7],
[0.389059234962091, 0, -5, -4],
[0.464102042665547, 0, 2, 6],
[0.288243127874986, 0, 0, -6],
[0.247072787814766, 1, 3, 4],
]
)
"""
Array describing how to select the appropriate cluster for given *CIE xy*
chromaticity coordinates.
"""
|
8f2ff713b4cab0ca0a2ba2e36193625b9fba8e74
|
6c37d1d2437a08e43b13d621d4a8da4da7135b3a
|
/yt_dlp/extractor/seeker.py
|
65eb16a09d7b98f5f0918e94829de2db80c4adb4
|
[
"Unlicense",
"GPL-2.0-or-later",
"MPL-2.0",
"BSD-3-Clause",
"GPL-3.0-or-later",
"LGPL-2.1-only",
"BSD-2-Clause",
"MIT"
] |
permissive
|
yt-dlp/yt-dlp
|
be040bde10cc40258c879c75ab30215686352824
|
d3d81cc98f554d0adb87d24bfd6fabaaa803944d
|
refs/heads/master
| 2023-09-05T21:15:21.050538
| 2023-09-05T20:35:23
| 2023-09-05T20:35:23
| 307,260,205
| 52,742
| 5,376
|
Unlicense
| 2023-09-14T05:22:08
| 2020-10-26T04:22:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,240
|
py
|
seeker.py
|
import re
from .common import InfoExtractor
from ..utils import (
get_element_by_class,
strip_or_none,
)
class SeekerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?seeker\.com/(?P<display_id>.*)-(?P<article_id>\d+)\.html'
_TESTS = [{
'url': 'http://www.seeker.com/should-trump-be-required-to-release-his-tax-returns-1833805621.html',
'md5': '897d44bbe0d8986a2ead96de565a92db',
'info_dict': {
'id': 'Elrn3gnY',
'ext': 'mp4',
'title': 'Should Trump Be Required To Release His Tax Returns?',
'description': 'md5:41efa8cfa8d627841045eec7b018eb45',
'timestamp': 1490090165,
'upload_date': '20170321',
}
}, {
'url': 'http://www.seeker.com/changes-expected-at-zoos-following-recent-gorilla-lion-shootings-1834116536.html',
'playlist': [
{
'md5': '0497b9f20495174be73ae136949707d2',
'info_dict': {
'id': 'FihYQ8AE',
'ext': 'mp4',
'title': 'The Pros & Cons Of Zoos',
'description': 'md5:d88f99a8ea8e7d25e6ff77f271b1271c',
'timestamp': 1490039133,
'upload_date': '20170320',
},
}
],
'info_dict': {
'id': '1834116536',
'title': 'After Gorilla Killing, Changes Ahead for Zoos',
'description': 'The largest association of zoos and others are hoping to learn from recent incidents that led to the shooting deaths of a gorilla and two lions.',
},
}]
def _real_extract(self, url):
display_id, article_id = self._match_valid_url(url).groups()
webpage = self._download_webpage(url, display_id)
entries = []
for jwp_id in re.findall(r'data-video-id="([a-zA-Z0-9]{8})"', webpage):
entries.append(self.url_result(
'jwplatform:' + jwp_id, 'JWPlatform', jwp_id))
return self.playlist_result(
entries, article_id,
self._og_search_title(webpage),
strip_or_none(get_element_by_class('subtitle__text', webpage)) or self._og_search_description(webpage))
|
a2bec0dbacdaf9e60cd163c1fb78727e49140382
|
c1b8b6080f29c8037100080298b897618a826475
|
/gammapy/stats/tests/test_variability.py
|
31e59a2053fc9dedc14030d1d1e97e817e2d3e24
|
[
"BSD-3-Clause"
] |
permissive
|
gammapy/gammapy
|
a5d7acbdde848e92e124fefbce9716faa296f572
|
60f03adb8fc7851b9f3ca039512c03a669e3fe10
|
refs/heads/main
| 2023-08-16T21:19:06.624561
| 2023-08-04T12:13:08
| 2023-08-04T12:13:08
| 10,073,640
| 204
| 184
|
BSD-3-Clause
| 2023-09-14T15:26:05
| 2013-05-15T07:50:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,704
|
py
|
test_variability.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.table import Column, Table
from astropy.time import Time
from gammapy.estimators import FluxPoints
from gammapy.stats.variability import compute_chisq, compute_fvar
from gammapy.utils.testing import assert_quantity_allclose
@pytest.fixture(scope="session")
def lc_table():
meta = dict(TIMESYS="utc")
table = Table(
meta=meta,
data=[
Column(Time(["2010-01-01", "2010-01-03"]).mjd, "time_min"),
Column(Time(["2010-01-03", "2010-01-10"]).mjd, "time_max"),
Column([1e-11, 3e-11], "flux", unit="cm-2 s-1"),
Column([0.1e-11, 0.3e-11], "flux_err", unit="cm-2 s-1"),
Column([np.nan, 3.6e-11], "flux_ul", unit="cm-2 s-1"),
Column([False, True], "is_ul"),
],
)
return table
def lc():
meta = dict(TIMESYS="utc", SED_TYPE="flux")
table = Table(
meta=meta,
data=[
Column(Time(["2010-01-01", "2010-01-03", "2010-01-07"]).mjd, "time_min"),
Column(Time(["2010-01-03", "2010-01-07", "2010-01-10"]).mjd, "time_max"),
Column([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]], "e_min", unit="TeV"),
Column([[2.0, 5.0], [2.0, 5.0], [2.0, 5.0]], "e_max", unit="TeV"),
Column(
[[1e-11, 4e-12], [3e-11, np.nan], [1e-11, 1e-12]],
"flux",
unit="cm-2 s-1",
),
Column(
[[0.1e-11, 0.4e-12], [0.3e-11, np.nan], [0.1e-11, 0.1e-12]],
"flux_err",
unit="cm-2 s-1",
),
Column(
[[np.nan, np.nan], [3.6e-11, 1e-11], [1e-11, 1e-12]],
"flux_ul",
unit="cm-2 s-1",
),
Column([[False, False], [True, True], [True, True]], "is_ul"),
Column([[True, True], [True, True], [True, True]], "success"),
],
)
return FluxPoints.from_table(table=table, format="lightcurve")
def test_lightcurve_fvar():
flux = np.array([[1e-11, 4e-12], [3e-11, np.nan], [1e-11, 1e-12]])
flux_err = np.array([[0.1e-11, 0.4e-12], [0.3e-11, np.nan], [0.1e-11, 0.1e-12]])
time_id = 0
fvar, fvar_err = compute_fvar(flux, flux_err, axis=time_id)
assert_allclose(fvar, [0.68322763, 0.84047606])
assert_allclose(fvar_err, [0.06679978, 0.08285806])
def test_lightcurve_chisq(lc_table):
flux = lc_table["flux"].astype("float64")
chi2, pval = compute_chisq(flux)
assert_quantity_allclose(chi2, 1e-11)
assert_quantity_allclose(pval, 0.999997476867478)
|
021ed57f240d9239f72a48d07c98edd3acffd3ce
|
dc1f176cdb1d6658430582c3b8dbeec2559ce074
|
/tests/urlconfs/additional_fields_checks.py
|
c7cea8e6b33191c0b36535095f6668974548753c
|
[
"BSD-3-Clause"
] |
permissive
|
axnsan12/drf-yasg
|
662b5c4b079c44107d46fee7c88eb9d21f14f171
|
78031f0c189585c30fccb5005a6899f2d34289a9
|
refs/heads/master
| 2023-08-31T02:20:29.467209
| 2023-07-20T14:29:45
| 2023-07-20T14:29:45
| 112,461,400
| 3,320
| 518
|
NOASSERTION
| 2023-09-07T09:51:28
| 2017-11-29T10:30:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
additional_fields_checks.py
|
from django.urls import re_path
from rest_framework import serializers
from testproj.urls import required_urlpatterns
from .url_versioning import SnippetList, SnippetSerializer, VersionedSchemaView, VERSION_PREFIX_URL
class SnippetsSerializer(serializers.HyperlinkedModelSerializer, SnippetSerializer):
ipv4 = serializers.IPAddressField(required=False)
uri = serializers.URLField(required=False)
tracks = serializers.RelatedField(
read_only=True,
allow_null=True,
allow_empty=True,
many=True,
)
class Meta:
fields = tuple(SnippetSerializer().fields.keys()) + ('ipv4', 'uri', 'tracks', 'url',)
model = SnippetList.queryset.model
class SnippetsV2Serializer(SnippetSerializer):
url = serializers.HyperlinkedRelatedField(view_name='snippets-detail', source='*', read_only=True)
other_owner_snippets = serializers.PrimaryKeyRelatedField(
read_only=True,
source='owner.snippets',
many=True
)
owner_snippets = serializers.PrimaryKeyRelatedField(
read_only=True,
many=True
)
class SnippetsV1(SnippetList):
serializer_class = SnippetsSerializer
def get_serializer_class(self):
return self.serializer_class
class SnippetsV2(SnippetsV1):
serializer_class = SnippetsV2Serializer
urlpatterns = required_urlpatterns + [
re_path(VERSION_PREFIX_URL + r"snippets/$", SnippetsV1.as_view()),
re_path(VERSION_PREFIX_URL + r"other_snippets/$", SnippetsV2.as_view()),
re_path(VERSION_PREFIX_URL + r'swagger(?P<format>.json|.yaml)$', VersionedSchemaView.without_ui(),
name='vschema-json'),
]
|
4410b3ee592087d34b5fd5ea2d029d611550ba2a
|
29c88ae9d8ac5e498c28f747f54d265408011857
|
/tests/api/test_controller.py
|
08a0f3f5a8ee3b7e1c683c4856275c4da0c5e84e
|
[
"Apache-2.0"
] |
permissive
|
getsentry/freight
|
049878d4fdfc1ec7ba35caca094cd3f8c78f364a
|
17a9c819504f25f1e01178a43018dd660be1e879
|
refs/heads/master
| 2023-08-22T06:01:00.378812
| 2023-02-03T03:07:48
| 2023-02-03T03:07:48
| 29,835,895
| 571
| 55
|
Apache-2.0
| 2023-05-01T21:03:31
| 2015-01-25T23:17:09
|
Python
|
UTF-8
|
Python
| false
| false
| 386
|
py
|
test_controller.py
|
from freight.testutils import TestCase
class CatchallTest(TestCase):
def test_simple(self):
path = "/api/0/not-a-real-endpoint"
for method in ("get", "post", "put", "delete", "patch"):
resp = getattr(self.client, method)(path)
assert resp.status_code == 404
assert b"".join(resp.data.splitlines()) == b'{"error": "Not Found"}'
|
5b2b977287bbf116fbc87159b6d51242ce746e96
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/BOJ/3040.백설 공주와 일곱 난쟁이/6047198844.py
|
3b7121936e8557f66c39cafada4eede6435e53df
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641
| 2023-02-09T07:16:14
| 2023-02-09T07:16:14
| 258,455,710
| 175
| 49
| null | 2023-02-09T07:16:16
| 2020-04-24T08:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 744
|
py
|
6047198844.py
|
# 문제
# 아홉 난쟁이의 모자에 쓰여 있는 수가 주어졌을 때, 일곱 난쟁이를 찾는 프로그램을 작성하시오. (아홉 개의 수 중 합이 100이 되는 일곱 개의 수를 찾으시오)
#
# 입력
# 총 아홉개 줄에 1보다 크거나 같고 99보다 작거나 같은 자연수가 주어진다. 모든 숫자는 서로 다르다. 또, 항상 답이 유일한 경우만 입력으로 주어진다.
#
# 출력
# 일곱 난쟁이가 쓴 모자에 쓰여 있는 수를 한 줄에 하나씩 출력한다.
from itertools import combinations
dwalfs = [int(input()) for _ in range(9)]
for combi in combinations(dwalfs, 7):
if sum(combi) == 100:
for dwalf in combi:
print(dwalf)
break
|
3d38f1483a58285dc179f31c93c98e3d9814d9d8
|
b74320ad439e37dfa48cd8db38dab3b7a20a36ff
|
/examples/unconditional_image_generation/train_unconditional.py
|
bfa48269026a9f555c3e2e34aeac8dab7e4f53a1
|
[
"Apache-2.0"
] |
permissive
|
huggingface/diffusers
|
c82beba1ec5f0aba01b6744040a5accc41ec2493
|
5eeedd9e3336882d598091e191559f67433b6427
|
refs/heads/main
| 2023-08-29T01:22:52.237910
| 2023-08-28T18:16:27
| 2023-08-28T18:16:27
| 498,011,141
| 17,308
| 3,158
|
Apache-2.0
| 2023-09-14T20:57:44
| 2022-05-30T16:04:02
|
Python
|
UTF-8
|
Python
| false
| false
| 30,039
|
py
|
train_unconditional.py
|
import argparse
import inspect
import logging
import math
import os
import shutil
from datetime import timedelta
from pathlib import Path
from typing import Optional
import accelerate
import datasets
import torch
import torch.nn.functional as F
from accelerate import Accelerator, InitProcessGroupKwargs
from accelerate.logging import get_logger
from accelerate.utils import ProjectConfiguration
from datasets import load_dataset
from huggingface_hub import HfFolder, Repository, create_repo, whoami
from packaging import version
from torchvision import transforms
from tqdm.auto import tqdm
import diffusers
from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel
from diffusers.optimization import get_scheduler
from diffusers.training_utils import EMAModel
from diffusers.utils import check_min_version, is_accelerate_version, is_tensorboard_available, is_wandb_available
from diffusers.utils.import_utils import is_xformers_available
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.21.0.dev0")
logger = get_logger(__name__, log_level="INFO")
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
if not isinstance(arr, torch.Tensor):
arr = torch.from_numpy(arr)
res = arr[timesteps].float().to(timesteps.device)
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that HF Datasets can understand."
),
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The config of the Dataset, leave as None if there's only one config.",
)
parser.add_argument(
"--model_config_name_or_path",
type=str,
default=None,
help="The config of the UNet model to train, leave as None to use standard DDPM configuration.",
)
parser.add_argument(
"--train_data_dir",
type=str,
default=None,
help=(
"A folder containing the training data. Folder contents must follow the structure described in"
" https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
" must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="ddpm-model-64",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--overwrite_output_dir", action="store_true")
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument(
"--resolution",
type=int,
default=64,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop",
default=False,
action="store_true",
help=(
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
" cropped. The images will be resized to the resolution first before cropping."
),
)
parser.add_argument(
"--random_flip",
default=False,
action="store_true",
help="whether to randomly flip images horizontally",
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument(
"--eval_batch_size", type=int, default=16, help="The number of images to generate for evaluation."
)
parser.add_argument(
"--dataloader_num_workers",
type=int,
default=0,
help=(
"The number of subprocesses to use for data loading. 0 means that the data will be loaded in the main"
" process."
),
)
parser.add_argument("--num_epochs", type=int, default=100)
parser.add_argument("--save_images_epochs", type=int, default=10, help="How often to save images during training.")
parser.add_argument(
"--save_model_epochs", type=int, default=10, help="How often to save the model during training."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="cosine",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--adam_beta1", type=float, default=0.95, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument(
"--adam_weight_decay", type=float, default=1e-6, help="Weight decay magnitude for the Adam optimizer."
)
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer.")
parser.add_argument(
"--use_ema",
action="store_true",
help="Whether to use Exponential Moving Average for the final model weights.",
)
parser.add_argument("--ema_inv_gamma", type=float, default=1.0, help="The inverse gamma value for the EMA decay.")
parser.add_argument("--ema_power", type=float, default=3 / 4, help="The power value for the EMA decay.")
parser.add_argument("--ema_max_decay", type=float, default=0.9999, help="The maximum decay magnitude for EMA.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--hub_private_repo", action="store_true", help="Whether or not to create a private repository."
)
parser.add_argument(
"--logger",
type=str,
default="tensorboard",
choices=["tensorboard", "wandb"],
help=(
"Whether to use [tensorboard](https://www.tensorflow.org/tensorboard) or [wandb](https://www.wandb.ai)"
" for experiment tracking and logging of model metrics and model checkpoints"
),
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU."
),
)
parser.add_argument(
"--prediction_type",
type=str,
default="epsilon",
choices=["epsilon", "sample"],
help="Whether the model should predict the 'epsilon'/noise error or directly the reconstructed image 'x0'.",
)
parser.add_argument("--ddpm_num_steps", type=int, default=1000)
parser.add_argument("--ddpm_num_inference_steps", type=int, default=1000)
parser.add_argument("--ddpm_beta_schedule", type=str, default="linear")
parser.add_argument(
"--checkpointing_steps",
type=int,
default=500,
help=(
"Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming"
" training using `--resume_from_checkpoint`."
),
)
parser.add_argument(
"--checkpoints_total_limit",
type=int,
default=None,
help=("Max number of checkpoints to store."),
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help=(
"Whether training should be resumed from a previous checkpoint. Use a path saved by"
' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
),
)
parser.add_argument(
"--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
)
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
if args.dataset_name is None and args.train_data_dir is None:
raise ValueError("You must specify either a dataset name from the hub or a train data directory.")
return args
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def main(args):
logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=7200)) # a big number for high resolution or big dataset
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with=args.logger,
project_config=accelerator_project_config,
kwargs_handlers=[kwargs],
)
if args.logger == "tensorboard":
if not is_tensorboard_available():
raise ImportError("Make sure to install tensorboard if you want to use it for logging during training.")
elif args.logger == "wandb":
if not is_wandb_available():
raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
import wandb
# `accelerate` 0.16.0 will have better support for customized saving
if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
# create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
def save_model_hook(models, weights, output_dir):
if args.use_ema:
ema_model.save_pretrained(os.path.join(output_dir, "unet_ema"))
for i, model in enumerate(models):
model.save_pretrained(os.path.join(output_dir, "unet"))
# make sure to pop weight so that corresponding model is not saved again
weights.pop()
def load_model_hook(models, input_dir):
if args.use_ema:
load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DModel)
ema_model.load_state_dict(load_model.state_dict())
ema_model.to(accelerator.device)
del load_model
for i in range(len(models)):
# pop models so that they are not loaded again
model = models.pop()
# load diffusers style into model
load_model = UNet2DModel.from_pretrained(input_dir, subfolder="unet")
model.register_to_config(**load_model.config)
model.load_state_dict(load_model.state_dict())
del load_model
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
create_repo(repo_name, exist_ok=True, token=args.hub_token)
repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token)
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# Initialize the model
if args.model_config_name_or_path is None:
model = UNet2DModel(
sample_size=args.resolution,
in_channels=3,
out_channels=3,
layers_per_block=2,
block_out_channels=(128, 128, 256, 256, 512, 512),
down_block_types=(
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"DownBlock2D",
"AttnDownBlock2D",
"DownBlock2D",
),
up_block_types=(
"UpBlock2D",
"AttnUpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D",
"UpBlock2D",
),
)
else:
config = UNet2DModel.load_config(args.model_config_name_or_path)
model = UNet2DModel.from_config(config)
# Create EMA for the model.
if args.use_ema:
ema_model = EMAModel(
model.parameters(),
decay=args.ema_max_decay,
use_ema_warmup=True,
inv_gamma=args.ema_inv_gamma,
power=args.ema_power,
model_cls=UNet2DModel,
model_config=model.config,
)
if args.enable_xformers_memory_efficient_attention:
if is_xformers_available():
import xformers
xformers_version = version.parse(xformers.__version__)
if xformers_version == version.parse("0.0.16"):
logger.warn(
"xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
)
model.enable_xformers_memory_efficient_attention()
else:
raise ValueError("xformers is not available. Make sure it is installed correctly")
# Initialize the scheduler
accepts_prediction_type = "prediction_type" in set(inspect.signature(DDPMScheduler.__init__).parameters.keys())
if accepts_prediction_type:
noise_scheduler = DDPMScheduler(
num_train_timesteps=args.ddpm_num_steps,
beta_schedule=args.ddpm_beta_schedule,
prediction_type=args.prediction_type,
)
else:
noise_scheduler = DDPMScheduler(num_train_timesteps=args.ddpm_num_steps, beta_schedule=args.ddpm_beta_schedule)
# Initialize the optimizer
optimizer = torch.optim.AdamW(
model.parameters(),
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
# Get the datasets: you can either provide your own training and evaluation files (see below)
# or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
dataset = load_dataset(
args.dataset_name,
args.dataset_config_name,
cache_dir=args.cache_dir,
split="train",
)
else:
dataset = load_dataset("imagefolder", data_dir=args.train_data_dir, cache_dir=args.cache_dir, split="train")
# See more about loading custom images at
# https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder
# Preprocessing the datasets and DataLoaders creation.
augmentations = transforms.Compose(
[
transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def transform_images(examples):
images = [augmentations(image.convert("RGB")) for image in examples["image"]]
return {"input": images}
logger.info(f"Dataset size: {len(dataset)}")
dataset.set_transform(transform_images)
train_dataloader = torch.utils.data.DataLoader(
dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers
)
# Initialize the learning rate scheduler
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=(len(train_dataloader) * args.num_epochs),
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, lr_scheduler
)
if args.use_ema:
ema_model.to(accelerator.device)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
run = os.path.split(__file__)[-1].split(".")[0]
accelerator.init_trackers(run)
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
max_train_steps = args.num_epochs * num_update_steps_per_epoch
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(dataset)}")
logger.info(f" Num Epochs = {args.num_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_train_steps}")
global_step = 0
first_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint != "latest":
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = os.listdir(args.output_dir)
dirs = [d for d in dirs if d.startswith("checkpoint")]
dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
path = dirs[-1] if len(dirs) > 0 else None
if path is None:
accelerator.print(
f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
)
args.resume_from_checkpoint = None
else:
accelerator.print(f"Resuming from checkpoint {path}")
accelerator.load_state(os.path.join(args.output_dir, path))
global_step = int(path.split("-")[1])
resume_global_step = global_step * args.gradient_accumulation_steps
first_epoch = global_step // num_update_steps_per_epoch
resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
# Train!
for epoch in range(first_epoch, args.num_epochs):
model.train()
progress_bar = tqdm(total=num_update_steps_per_epoch, disable=not accelerator.is_local_main_process)
progress_bar.set_description(f"Epoch {epoch}")
for step, batch in enumerate(train_dataloader):
# Skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
if step % args.gradient_accumulation_steps == 0:
progress_bar.update(1)
continue
clean_images = batch["input"]
# Sample noise that we'll add to the images
noise = torch.randn(
clean_images.shape, dtype=(torch.float32 if args.mixed_precision == "no" else torch.float16)
).to(clean_images.device)
bsz = clean_images.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=clean_images.device
).long()
# Add noise to the clean images according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps)
with accelerator.accumulate(model):
# Predict the noise residual
model_output = model(noisy_images, timesteps).sample
if args.prediction_type == "epsilon":
loss = F.mse_loss(model_output, noise) # this could have different weights!
elif args.prediction_type == "sample":
alpha_t = _extract_into_tensor(
noise_scheduler.alphas_cumprod, timesteps, (clean_images.shape[0], 1, 1, 1)
)
snr_weights = alpha_t / (1 - alpha_t)
loss = snr_weights * F.mse_loss(
model_output, clean_images, reduction="none"
) # use SNR weighting from distillation paper
loss = loss.mean()
else:
raise ValueError(f"Unsupported prediction type: {args.prediction_type}")
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
if args.use_ema:
ema_model.step(model.parameters())
progress_bar.update(1)
global_step += 1
if global_step % args.checkpointing_steps == 0:
# _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
if args.checkpoints_total_limit is not None:
checkpoints = os.listdir(args.output_dir)
checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
# before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
if len(checkpoints) >= args.checkpoints_total_limit:
num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
removing_checkpoints = checkpoints[0:num_to_remove]
logger.info(
f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
)
logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
for removing_checkpoint in removing_checkpoints:
removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
shutil.rmtree(removing_checkpoint)
if accelerator.is_main_process:
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
accelerator.save_state(save_path)
logger.info(f"Saved state to {save_path}")
logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0], "step": global_step}
if args.use_ema:
logs["ema_decay"] = ema_model.cur_decay_value
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
progress_bar.close()
accelerator.wait_for_everyone()
# Generate sample images for visual inspection
if accelerator.is_main_process:
if epoch % args.save_images_epochs == 0 or epoch == args.num_epochs - 1:
unet = accelerator.unwrap_model(model)
if args.use_ema:
ema_model.store(unet.parameters())
ema_model.copy_to(unet.parameters())
pipeline = DDPMPipeline(
unet=unet,
scheduler=noise_scheduler,
)
generator = torch.Generator(device=pipeline.device).manual_seed(0)
# run pipeline in inference (sample random noise and denoise)
images = pipeline(
generator=generator,
batch_size=args.eval_batch_size,
num_inference_steps=args.ddpm_num_inference_steps,
output_type="numpy",
).images
if args.use_ema:
ema_model.restore(unet.parameters())
# denormalize the images and save to tensorboard
images_processed = (images * 255).round().astype("uint8")
if args.logger == "tensorboard":
if is_accelerate_version(">=", "0.17.0.dev0"):
tracker = accelerator.get_tracker("tensorboard", unwrap=True)
else:
tracker = accelerator.get_tracker("tensorboard")
tracker.add_images("test_samples", images_processed.transpose(0, 3, 1, 2), epoch)
elif args.logger == "wandb":
# Upcoming `log_images` helper coming in https://github.com/huggingface/accelerate/pull/962/files
accelerator.get_tracker("wandb").log(
{"test_samples": [wandb.Image(img) for img in images_processed], "epoch": epoch},
step=global_step,
)
if epoch % args.save_model_epochs == 0 or epoch == args.num_epochs - 1:
# save the model
unet = accelerator.unwrap_model(model)
if args.use_ema:
ema_model.store(unet.parameters())
ema_model.copy_to(unet.parameters())
pipeline = DDPMPipeline(
unet=unet,
scheduler=noise_scheduler,
)
pipeline.save_pretrained(args.output_dir)
if args.use_ema:
ema_model.restore(unet.parameters())
if args.push_to_hub:
repo.push_to_hub(commit_message=f"Epoch {epoch}", blocking=False)
accelerator.end_training()
if __name__ == "__main__":
args = parse_args()
main(args)
|
6150fd8893ef2b8cc9184adcfc52b7a99858566d
|
e22eeb5256e17a96a98b3ff25433aec2d641cd2c
|
/openstack/clustering/v1/cluster.py
|
618d7af3bd8e9f9ac9af880a13a51028a6dc0e76
|
[
"Apache-2.0"
] |
permissive
|
openstack/openstacksdk
|
b4b95fd7869653feea5a3b783e9a5c588235c039
|
d474eb84c605c429bb9cccb166cabbdd1654d73c
|
refs/heads/master
| 2023-09-03T22:50:03.398512
| 2023-07-27T14:09:35
| 2023-08-29T16:28:46
| 16,223,378
| 124
| 130
|
Apache-2.0
| 2023-09-06T02:52:47
| 2014-01-25T02:48:00
|
Python
|
UTF-8
|
Python
| false
| false
| 6,608
|
py
|
cluster.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.clustering.v1 import _async_resource
from openstack.common import metadata
from openstack import resource
from openstack import utils
class Cluster(_async_resource.AsyncResource, metadata.MetadataMixin):
resource_key = 'cluster'
resources_key = 'clusters'
base_path = '/clusters'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
commit_method = 'PATCH'
_query_mapping = resource.QueryParameters(
'name', 'status', 'sort', 'global_project'
)
# Properties
#: The name of the cluster.
name = resource.Body('name')
#: The ID of the profile used by this cluster.
profile_id = resource.Body('profile_id')
#: The ID of the user who created this cluster, thus the owner of it.
user_id = resource.Body('user')
#: The ID of the project this cluster belongs to.
project_id = resource.Body('project')
#: The domain ID of the cluster owner.
domain_id = resource.Body('domain')
#: Timestamp of when the cluster was initialized.
#: *Type: datetime object parsed from ISO 8601 formatted string*
init_at = resource.Body('init_at')
#: Timestamp of when the cluster was created.
#: *Type: datetime object parsed from ISO 8601 formatted string*
created_at = resource.Body('created_at')
#: Timestamp of when the cluster was last updated.
#: *Type: datetime object parsed from ISO 8601 formatted string*
updated_at = resource.Body('updated_at')
#: Lower bound (inclusive) for the size of the cluster.
min_size = resource.Body('min_size', type=int)
#: Upper bound (inclusive) for the size of the cluster. A value of
#: -1 indicates that there is no upper limit of cluster size.
max_size = resource.Body('max_size', type=int)
#: Desired capacity for the cluster. A cluster would be created at the
#: scale specified by this value.
desired_capacity = resource.Body('desired_capacity', type=int)
#: Default timeout (in seconds) for cluster operations.
timeout = resource.Body('timeout')
#: A string representation of the cluster status.
status = resource.Body('status')
#: A string describing the reason why the cluster in current status.
status_reason = resource.Body('status_reason')
#: A dictionary configuration for cluster.
config = resource.Body('config', type=dict)
#: A collection of key-value pairs that are attached to the cluster.
metadata = resource.Body('metadata', type=dict)
#: A dictionary with some runtime data associated with the cluster.
data = resource.Body('data', type=dict)
#: A list IDs of nodes that are members of the cluster.
node_ids = resource.Body('nodes')
#: Name of the profile used by the cluster.
profile_name = resource.Body('profile_name')
#: Specify whether the cluster update should only pertain to the profile.
is_profile_only = resource.Body('profile_only', type=bool)
#: A dictionary with dependency information of the cluster
dependents = resource.Body('dependents', type=dict)
def action(self, session, body):
url = utils.urljoin(self.base_path, self._get_id(self), 'actions')
resp = session.post(url, json=body)
return resp.json()
def add_nodes(self, session, nodes):
body = {
'add_nodes': {
'nodes': nodes,
}
}
return self.action(session, body)
def del_nodes(self, session, nodes, **params):
data = {'nodes': nodes}
data.update(params)
body = {'del_nodes': data}
return self.action(session, body)
def replace_nodes(self, session, nodes):
body = {
'replace_nodes': {
'nodes': nodes,
}
}
return self.action(session, body)
def scale_out(self, session, count=None):
body = {
'scale_out': {
'count': count,
}
}
return self.action(session, body)
def scale_in(self, session, count=None):
body = {
'scale_in': {
'count': count,
}
}
return self.action(session, body)
def resize(self, session, **params):
body = {'resize': params}
return self.action(session, body)
def policy_attach(self, session, policy_id, **params):
data = {'policy_id': policy_id}
data.update(params)
body = {'policy_attach': data}
return self.action(session, body)
def policy_detach(self, session, policy_id):
body = {
'policy_detach': {
'policy_id': policy_id,
}
}
return self.action(session, body)
def policy_update(self, session, policy_id, **params):
data = {'policy_id': policy_id}
data.update(params)
body = {'policy_update': data}
return self.action(session, body)
def check(self, session, **params):
body = {'check': params}
return self.action(session, body)
def recover(self, session, **params):
body = {'recover': params}
return self.action(session, body)
def op(self, session, operation, **params):
"""Perform an operation on the cluster.
:param session: A session object used for sending request.
:param operation: A string representing the operation to be performed.
:param dict params: An optional dict providing the parameters for the
operation.
:returns: A dictionary containing the action ID.
"""
url = utils.urljoin(self.base_path, self.id, 'ops')
resp = session.post(url, json={operation: params})
return resp.json()
def force_delete(self, session):
"""Force delete a cluster."""
body = {'force': True}
url = utils.urljoin(self.base_path, self.id)
response = session.delete(url, json=body)
return self._delete_response(response)
|
501d83860f75406871835865bddccfc022750268
|
9b0160854984c06c76697e41c8912ea0507776d0
|
/scripts/ml_transferKeytimes.py
|
b86ca2eee84ff73424d504672ac96739e1e709f4
|
[
"MIT"
] |
permissive
|
morganloomis/ml_tools
|
1909ac410b91b76e4892bc21d13f70bb23faf972
|
347e28eeb7d99f6e8cd478a7d70a6644c5238e0b
|
refs/heads/master
| 2023-07-06T09:02:05.883016
| 2023-04-21T23:05:52
| 2023-04-21T23:05:52
| 63,684,121
| 163
| 60
|
MIT
| 2020-04-30T09:58:44
| 2016-07-19T10:20:38
|
Python
|
UTF-8
|
Python
| false
| false
| 5,280
|
py
|
ml_transferKeytimes.py
|
# -= ml_transferKeytimes.py =-
# __ by Morgan Loomis
# ____ ___ / / http://morganloomis.com
# / __ `__ \/ / Revision 3
# / / / / / / / 2018-05-13
# /_/ /_/ /_/_/ _________
# /_________/
#
# ______________
# - -/__ License __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copyright 2018 Morgan Loomis
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# ___________________
# - -/__ Installation __/- - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copy this file into your maya scripts directory, for example:
# C:/Documents and Settings/user/My Documents/maya/scripts/ml_transferKeytimes.py
#
# Run the tool in a python shell or shelf button by importing the module,
# and then calling the primary function:
#
# import ml_transferKeytimes
# ml_transferKeytimes.main()
#
#
# __________________
# - -/__ Description __/- - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copy keytimes from one node to another. Animation isn't fundamentally changed,
# but keys will be added or deleted.
#
# ____________
# - -/__ Usage __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Select the source and destination nodes, keytimes will be transferred from
# first to second selection. Run the command.
#
#
# ___________________
# - -/__ Requirements __/- - - - - - - - - - - - - - - - - - - - - - - - - -
#
# This script requires the ml_utilities module, which can be downloaded here:
# https://raw.githubusercontent.com/morganloomis/ml_tools/master/ml_utilities.py
#
# __________
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /_ Enjoy! _/- - -
__author__ = 'Morgan Loomis'
__license__ = 'MIT'
__revision__ = 3
__category__ = 'animation'
shelfButton = {'annotation': 'Select two nodes to transfer the keytimes from the first to the second.',
'imageOverlayLabel': 'xfer',
'order': 9}
import maya.cmds as mc
from maya import OpenMaya
try:
import ml_utilities as utl
utl.upToDateCheck(32)
except ImportError:
result = mc.confirmDialog( title='Module Not Found',
message='This tool requires the ml_utilities module. Once downloaded you will need to restart Maya.',
button=['Download Module','Cancel'],
defaultButton='Cancel', cancelButton='Cancel', dismissString='Cancel' )
if result == 'Download Module':
mc.showHelp('http://morganloomis.com/tool/ml_utilities/',absolute=True)
def main():
sel = mc.ls(sl=True)
if len(sel) < 2:
OpenMaya.MGlobal.displayWarning('Select 2 or more objects.')
return
transferKeytimes(sel[0], sel[1:])
mc.select(sel)
def transferKeytimes(source, destinations):
if not isinstance(destinations, (list, tuple)):
destinations = [destinations]
attributes = mc.listAttr(source, keyable=True, unlocked=True)
keytimes = dict()
start = None
end = None
for a in attributes:
currKeytimes = mc.keyframe(source, attribute=a, query=True, timeChange=True)
if not currKeytimes:
continue
if start == None or currKeytimes[0] < start:
start = currKeytimes[0]
if end == None or currKeytimes[-1] > end:
end = currKeytimes[-1]
keytimes[a] = currKeytimes
#allKeyTimes.extend(currKeytimes)
if not keytimes:
return
with utl.IsolateViews():
mc.bakeResults(destinations, time=(start,end), sampleBy=1, preserveOutsideKeys=True, simulation=True)
#euler filter
mc.filterCurve(mc.listConnections(destinations,type='animCurve'))
#go through all keys and delete
for k in keytimes:
for f in range(int(start), int(end)):
if not f in keytimes[k]:
mc.cutKey(destinations, attribute=k, time=(f,))
if __name__ == '__main__': main()
# ______________________
# - -/__ Revision History __/- - - - - - - - - - - - - - - - - - - - - - - -
#
# Revision 1: 2014-03-02 : First publish.
#
# Revision 2: 2018-02-17 : Updating license to MIT.
#
# Revision 3: 2018-05-13 : shelf support
|
132b20af8c2799e683c321b035d4639fd839bb7d
|
1bc67a91d85a7106106ca31307ef9ee93f1d1a20
|
/src/py/flwr/common/typing.py
|
4257bf8e3279fee21560a1ec9287d05f2bd9697a
|
[
"Apache-2.0"
] |
permissive
|
adap/flower
|
4915d143c674eb675504d585e1e90ed06833812f
|
55be690535e5f3feb33c888c3e4a586b7bdbf489
|
refs/heads/main
| 2023-08-17T01:18:12.168723
| 2023-08-16T17:17:48
| 2023-08-16T17:17:48
| 241,095,326
| 2,999
| 658
|
Apache-2.0
| 2023-09-14T15:43:22
| 2020-02-17T11:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,952
|
py
|
typing.py
|
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Flower type definitions."""
from dataclasses import dataclass
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import numpy.typing as npt
NDArray = npt.NDArray[Any]
NDArrayInt = npt.NDArray[np.int_]
NDArrayFloat = npt.NDArray[np.float_]
NDArrays = List[NDArray]
# The following union type contains Python types corresponding to ProtoBuf types that
# ProtoBuf considers to be "Scalar Value Types", even though some of them arguably do
# not conform to other definitions of what a scalar is. Source:
# https://developers.google.com/protocol-buffers/docs/overview#scalar
Scalar = Union[bool, bytes, float, int, str]
Value = Union[
bool,
bytes,
float,
int,
str,
List[bool],
List[bytes],
List[float],
List[int],
List[str],
]
Metrics = Dict[str, Scalar]
MetricsAggregationFn = Callable[[List[Tuple[int, Metrics]]], Metrics]
Config = Dict[str, Scalar]
Properties = Dict[str, Scalar]
class Code(Enum):
"""Client status codes."""
OK = 0
GET_PROPERTIES_NOT_IMPLEMENTED = 1
GET_PARAMETERS_NOT_IMPLEMENTED = 2
FIT_NOT_IMPLEMENTED = 3
EVALUATE_NOT_IMPLEMENTED = 4
@dataclass
class Status:
"""Client status."""
code: Code
message: str
@dataclass
class Parameters:
"""Model parameters."""
tensors: List[bytes]
tensor_type: str
@dataclass
class GetParametersIns:
"""Parameters request for a client."""
config: Config
@dataclass
class GetParametersRes:
"""Response when asked to return parameters."""
status: Status
parameters: Parameters
@dataclass
class FitIns:
"""Fit instructions for a client."""
parameters: Parameters
config: Dict[str, Scalar]
@dataclass
class FitRes:
"""Fit response from a client."""
status: Status
parameters: Parameters
num_examples: int
metrics: Dict[str, Scalar]
@dataclass
class EvaluateIns:
"""Evaluate instructions for a client."""
parameters: Parameters
config: Dict[str, Scalar]
@dataclass
class EvaluateRes:
"""Evaluate response from a client."""
status: Status
loss: float
num_examples: int
metrics: Dict[str, Scalar]
@dataclass
class GetPropertiesIns:
"""Properties request for a client."""
config: Config
@dataclass
class GetPropertiesRes:
"""Properties response from a client."""
status: Status
properties: Properties
@dataclass
class ReconnectIns:
"""ReconnectIns message from server to client."""
seconds: Optional[int]
@dataclass
class DisconnectRes:
"""DisconnectRes message from client to server."""
reason: str
@dataclass
class ServerMessage:
"""ServerMessage is a container used to hold one instruction message."""
get_properties_ins: Optional[GetPropertiesIns] = None
get_parameters_ins: Optional[GetParametersIns] = None
fit_ins: Optional[FitIns] = None
evaluate_ins: Optional[EvaluateIns] = None
@dataclass
class ClientMessage:
"""ClientMessage is a container used to hold one result message."""
get_properties_res: Optional[GetPropertiesRes] = None
get_parameters_res: Optional[GetParametersRes] = None
fit_res: Optional[FitRes] = None
evaluate_res: Optional[EvaluateRes] = None
|
32b959db57e2f921017ff2bdddeb4ca48b58529c
|
bd2dce4de0473673b8b553b31963ae34c123b94c
|
/pecan/commands/base.py
|
1cb92c82423af486c9973ac095d9248a80758b43
|
[
"BSD-3-Clause"
] |
permissive
|
pecan/pecan
|
538d3f39acc4041ee2346fd2e06ae6cd6fe7947c
|
606ff430696f493bd00fc3532c3997d4753c3564
|
refs/heads/master
| 2023-08-21T10:03:24.017804
| 2023-07-15T15:26:32
| 2023-07-15T15:26:45
| 39,793,378
| 119
| 49
|
BSD-3-Clause
| 2023-07-14T16:09:07
| 2015-07-27T19:24:58
|
Python
|
UTF-8
|
Python
| false
| false
| 4,783
|
py
|
base.py
|
import pkg_resources
import argparse
import logging
import sys
from warnings import warn
log = logging.getLogger(__name__)
class HelpfulArgumentParser(argparse.ArgumentParser):
def error(self, message): # pragma: nocover
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_help(sys.stderr)
self._print_message('\n')
self.exit(2, '%s: %s\n' % (self.prog, message))
class CommandManager(object):
""" Used to discover `pecan.command` entry points. """
def __init__(self):
self.commands = {}
self.load_commands()
def load_commands(self):
for ep in pkg_resources.iter_entry_points('pecan.command'):
log.debug('%s loading plugin %s', self.__class__.__name__, ep)
if ep.name in self.commands:
warn(
"Duplicate entry points found on `%s` - ignoring %s" % (
ep.name,
ep
),
RuntimeWarning
)
continue
try:
cmd = ep.load()
cmd.run # ensure existance; catch AttributeError otherwise
except Exception as e: # pragma: nocover
warn("Unable to load plugin %s: %s" % (ep, e), RuntimeWarning)
continue
self.add({ep.name: cmd})
def add(self, cmd):
self.commands.update(cmd)
class CommandRunner(object):
""" Dispatches `pecan` command execution requests. """
def __init__(self):
self.manager = CommandManager()
self.parser = HelpfulArgumentParser(add_help=True)
self.parser.add_argument(
'--version',
action='version',
version='Pecan %s' % self.version
)
self.parse_sub_commands()
def parse_sub_commands(self):
subparsers = self.parser.add_subparsers(
dest='command_name',
metavar='command'
)
for name, cmd in self.commands.items():
sub = subparsers.add_parser(
name,
help=cmd.summary
)
for arg in getattr(cmd, 'arguments', tuple()):
arg = arg.copy()
if isinstance(arg.get('name'), str):
sub.add_argument(arg.pop('name'), **arg)
elif isinstance(arg.get('name'), list):
sub.add_argument(*arg.pop('name'), **arg)
def run(self, args):
ns = self.parser.parse_args(args)
if ns.command_name is None:
self.run(['--help'])
return
self.commands[ns.command_name]().run(ns)
@classmethod
def handle_command_line(cls): # pragma: nocover
runner = CommandRunner()
runner.run(sys.argv[1:])
@property
def version(self):
return pkg_resources.get_distribution('pecan').version
@property
def commands(self):
return self.manager.commands
class BaseCommandMeta(type):
@property
def summary(cls):
"""
This is used to populate the --help argument on the command line.
This provides a default behavior which takes the first sentence of the
command's docstring and uses it.
"""
return cls.__doc__.strip().splitlines()[0].rstrip('.')
class BaseCommandParent(object):
"""
A base interface for Pecan commands.
Can be extended to support ``pecan`` command extensions in individual Pecan
projects, e.g.,
$ ``pecan my-custom-command config.py``
::
# myapp/myapp/custom_command.py
class CustomCommand(pecan.commands.base.BaseCommand):
'''
(First) line of the docstring is used to summarize the command.
'''
arguments = ({
'name': '--extra_arg',
'help': 'an extra command line argument',
'optional': True
})
def run(self, args):
super(SomeCommand, self).run(args)
if args.extra_arg:
pass
"""
arguments = ({
'name': 'config_file',
'help': 'a Pecan configuration file',
'nargs': '?',
'default': None,
},)
def run(self, args):
"""To be implemented by subclasses."""
self.args = args
def load_app(self):
from pecan import load_app
return load_app(self.args.config_file)
BaseCommand = BaseCommandMeta('BaseCommand', (BaseCommandParent,), {
'__doc__': BaseCommandParent.__doc__
})
|
dc8e365f27018e8e4d8a83070848f903fd097cc8
|
d87acfc6fa8dcf71ac26eebbd6069a938222efc3
|
/captum/insights/attr_vis/widget/__init__.py
|
82f0af8d40acba40a629f0030ca1346af22fd364
|
[
"BSD-3-Clause"
] |
permissive
|
pytorch/captum
|
aedeec58d34c7611ae8928144e9f2314f820c1ca
|
945c582cc0b08885c4e2bfecb020abdfac0122f3
|
refs/heads/master
| 2023-09-04T08:49:54.120380
| 2023-07-08T00:30:37
| 2023-07-08T00:30:37
| 204,734,444
| 4,230
| 491
|
BSD-3-Clause
| 2023-09-08T17:58:15
| 2019-08-27T15:34:41
|
Python
|
UTF-8
|
Python
| false
| false
| 394
|
py
|
__init__.py
|
from captum.insights.attr_vis.widget._version import __version__, version_info # noqa
from captum.insights.attr_vis.widget.widget import * # noqa
def _jupyter_nbextension_paths():
return [
{
"section": "notebook",
"src": "static",
"dest": "jupyter-captum-insights",
"require": "jupyter-captum-insights/extension",
}
]
|
0de5c69de292c1f33c8b77aa5310efac4b200c4e
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/icloud/const.py
|
231f2cc1d0a0a1529026ba0474621e2fb064d4ae
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,972
|
py
|
const.py
|
"""iCloud component constants."""
from homeassistant.const import Platform
DOMAIN = "icloud"
CONF_WITH_FAMILY = "with_family"
CONF_MAX_INTERVAL = "max_interval"
CONF_GPS_ACCURACY_THRESHOLD = "gps_accuracy_threshold"
DEFAULT_WITH_FAMILY = False
DEFAULT_MAX_INTERVAL = 30 # min
DEFAULT_GPS_ACCURACY_THRESHOLD = 500 # meters
# to store the cookie
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 2
PLATFORMS = [Platform.DEVICE_TRACKER, Platform.SENSOR]
# pyicloud.AppleDevice status
DEVICE_BATTERY_LEVEL = "batteryLevel"
DEVICE_BATTERY_STATUS = "batteryStatus"
DEVICE_CLASS = "deviceClass"
DEVICE_DISPLAY_NAME = "deviceDisplayName"
DEVICE_ID = "id"
DEVICE_LOCATION = "location"
DEVICE_LOCATION_HORIZONTAL_ACCURACY = "horizontalAccuracy"
DEVICE_LOCATION_LATITUDE = "latitude"
DEVICE_LOCATION_LONGITUDE = "longitude"
DEVICE_LOST_MODE_CAPABLE = "lostModeCapable"
DEVICE_LOW_POWER_MODE = "lowPowerMode"
DEVICE_NAME = "name"
DEVICE_PERSON_ID = "prsId"
DEVICE_RAW_DEVICE_MODEL = "rawDeviceModel"
DEVICE_STATUS = "deviceStatus"
DEVICE_STATUS_SET = [
"features",
"maxMsgChar",
"darkWake",
"fmlyShare",
DEVICE_STATUS,
"remoteLock",
"activationLocked",
DEVICE_CLASS,
DEVICE_ID,
"deviceModel",
DEVICE_RAW_DEVICE_MODEL,
"passcodeLength",
"canWipeAfterLock",
"trackingInfo",
DEVICE_LOCATION,
"msg",
DEVICE_BATTERY_LEVEL,
"remoteWipe",
"thisDevice",
"snd",
DEVICE_PERSON_ID,
"wipeInProgress",
DEVICE_LOW_POWER_MODE,
"lostModeEnabled",
"isLocating",
DEVICE_LOST_MODE_CAPABLE,
"mesg",
DEVICE_NAME,
DEVICE_BATTERY_STATUS,
"lockedTimestamp",
"lostTimestamp",
"locationCapable",
DEVICE_DISPLAY_NAME,
"lostDevice",
"deviceColor",
"wipedTimestamp",
"modelDisplayName",
"locationEnabled",
"isMac",
"locFoundEnabled",
]
DEVICE_STATUS_CODES = {
"200": "online",
"201": "offline",
"203": "pending",
"204": "unregistered",
}
|
a79da4a67d52a2b92022af25dd85781a9763d0e5
|
270dec496b274c98dcc6b1b6d163c8f09166ad15
|
/lib/tests/py_test/test_protofile.py
|
887b0c9005327c1236b84d7bc10c8d0fd6295dd4
|
[
"MIT"
] |
permissive
|
nccgroup/blackboxprotobuf
|
c4b96d2939a723e81774e17d654756d50e51c25c
|
c5fdc580735cead4b17dbf13a4481360b14247e6
|
refs/heads/master
| 2023-07-08T04:11:47.929673
| 2023-05-08T20:49:54
| 2023-05-08T20:49:54
| 133,435,465
| 396
| 61
|
MIT
| 2023-08-22T16:22:11
| 2018-05-15T00:07:03
|
Python
|
UTF-8
|
Python
| false
| false
| 12,063
|
py
|
test_protofile.py
|
# Copyright (c) 2018-2022 NCC Group Plc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import six
import math
import glob
import shutil
import base64
import struct
import pytest
import logging
import tempfile
import subprocess
import hypothesis
from hypothesis import given, assume, note, settings, HealthCheck
import hypothesis.strategies as st
import google.protobuf.json_format
import strategies
import blackboxprotobuf.lib
import blackboxprotobuf.lib.protofile as protofile
from blackboxprotobuf.lib.types import length_delim
from blackboxprotobuf.lib.config import Config
to_suppress = []
if six.PY3:
to_suppress = (HealthCheck.function_scoped_fixture,)
@given(
typedef=strategies.message_typedef_gen(named_fields=False),
name=st.from_regex(protofile.NAME_REGEX),
)
@settings(suppress_health_check=to_suppress)
def test_proto_export(tmp_path, typedef, name):
"""Check to make sure our generated protofiles don't throw an error"""
with tempfile.NamedTemporaryFile(
mode="w", dir=str(tmp_path), suffix=".proto", delete=True
) as outfile:
typedef_map = {name: typedef}
note(typedef_map)
protofile.export_proto(typedef_map, output_file=outfile)
py_out = str(tmp_path / "py_out")
if os.path.exists(py_out):
shutil.rmtree(py_out)
os.mkdir(py_out)
outfile.flush()
subprocess.check_call(
"/usr/bin/protoc --python_out ./py_out %s" % os.path.basename(outfile.name),
shell=True,
cwd=str(tmp_path),
)
@given(
x=strategies.gen_message(named_fields=False),
name=st.from_regex(protofile.NAME_REGEX),
)
@settings(suppress_health_check=to_suppress)
def test_proto_export_inverse(tmp_path, x, name):
"""Generate a proto file and try to re-import it. This does not cover all
possible proto files we want to try importing"""
config = Config()
typedef, message = x
with tempfile.NamedTemporaryFile(
mode="r+", dir=str(tmp_path), suffix=".proto", delete=True
) as outfile:
typedef_map = {name: typedef}
protofile.export_proto(typedef_map, output_file=outfile)
outfile.flush()
outfile.seek(0)
new_typedef_map = protofile.import_proto(config, input_file=outfile)
config.known_types.update(new_typedef_map)
# validate
for name, typedef in new_typedef_map.items():
blackboxprotobuf.validate_typedef(typedef, config=config)
def _check_field_types(typedef1, typedef2):
for field_num in typedef1.keys():
# make sure we don't drop keys
assert field_num in typedef2
assert typedef1[field_num]["type"] == typedef2[field_num]["type"]
if typedef1[field_num]["type"] == "message":
message_typedef1 = None
message_typedef2 = None
if "message_typedef" in typedef1[field_num]:
message_typedef1 = typedef1[field_num]["message_typedef"]
elif "message_type_name" in typedef1[field_num]:
assert typedef1[field_num]["message_type_name"] in typedef_map
message_typedef1 = typedef_map[
typedef1[field_num]["message_type_name"]
]
if "message_typedef" in typedef2[field_num]:
message_typedef2 = typedef2[field_num]["message_typedef"]
elif "message_type_name" in typedef2[field_num]:
assert (
typedef2[field_num]["message_type_name"] in new_typedef_map
)
message_typedef2 = new_typedef_map[
typedef2[field_num]["message_type_name"]
]
_check_field_types(message_typedef1, message_typedef2)
note(typedef_map)
note(new_typedef_map)
for name, typedef in typedef_map.items():
_check_field_types(typedef, new_typedef_map[name])
note(new_typedef_map[name])
# try to actually encode a message with the typedef
encode_forward = length_delim.encode_message(message, config, typedef_map[name])
config.known_types = new_typedef_map
encode_backward = length_delim.encode_message(
message, config, new_typedef_map[name]
)
decode_forward, _, _, _ = length_delim.decode_message(
encode_forward, config, new_typedef_map[name]
)
decode_backward, _, _, _ = length_delim.decode_message(
encode_backward, config, typedef_map[name]
)
@pytest.mark.filterwarnings("ignore:Call to deprecated create function.*")
def test_proto_import_examples():
config = Config()
# try importing all the examples pulled from protobuf repo
protofiles = glob.glob("../burp/deps/protobuf/src/google/protobuf/*.proto")
# These files have some mechanism we don't support, mostly imports
unsupported_files = {
"../burp/deps/protobuf/src/google/protobuf/api.proto", # import
"../burp/deps/protobuf/src/google/protobuf/unittest_optimize_for.proto", # import
"../burp/deps/protobuf/src/google/protobuf/type.proto", # import
"../burp/deps/protobuf/src/google/protobuf/unittest_lite_imports_nonlite.proto", # import
"../burp/deps/protobuf/src/google/protobuf/unittest_lite.proto", # group type not supported
"../burp/deps/protobuf/src/google/protobuf/unittest_embed_optimize_for.proto", # import
"../burp/deps/protobuf/src/google/protobuf/unittest.proto", # group
"../burp/deps/protobuf/src/google/protobuf/unittest_lazy_dependencies.proto", # import
}
assert len(protofiles) != 0
for target_file in protofiles:
if target_file in unsupported_files:
print("Skipping file: %s" % target_file)
continue
print("Testing file: %s" % target_file)
typedef_map_out = protofile.import_proto(config, input_filename=target_file)
config.known_types = typedef_map_out
for name, typedef in typedef_map_out.items():
logging.debug("known messages: %s" % config.known_types)
blackboxprotobuf.lib.validate_typedef(typedef, config=config)
@given(
x=strategies.gen_message(named_fields=False),
name=st.from_regex(protofile.NAME_REGEX),
)
@settings(suppress_health_check=to_suppress)
@pytest.mark.filterwarnings("ignore:Call to deprecated create function.*")
def test_proto_decode(tmp_path, x, name):
config = Config()
typedef, message = x
""" Export to protobuf and try to decoe a message we encodedd with it """
with tempfile.NamedTemporaryFile(
mode="w", dir=str(tmp_path), suffix=".proto", delete=True
) as outfile:
typedef_map = {name: typedef}
encoded_message = length_delim.encode_message(message, config, typedef)
note(typedef_map)
basename = os.path.basename(outfile.name)
# Export the protobuf file and compile it
protofile.export_proto(typedef_map, output_file=outfile, package=basename[:-6])
py_out = str(tmp_path / "py_out")
if os.path.exists(py_out):
shutil.rmtree(py_out)
os.mkdir(py_out)
outfile.flush()
subprocess.check_call(
"/usr/bin/protoc --python_out ./py_out %s" % basename,
shell=True,
cwd=str(tmp_path),
)
# Try to import the file
sys.path.insert(0, str(tmp_path) + "/py_out/")
# Trim off .proto
try:
proto_module = __import__(basename[:-6] + "_pb2")
del sys.path[0]
except SyntaxError:
logging.debug("Caught syntax error in protoc import")
return
message_class = getattr(proto_module, name)
note(encoded_message)
my_message = message_class()
my_message.ParseFromString(encoded_message)
decoded_message = google.protobuf.json_format.MessageToDict(
my_message, including_default_value_fields=True
)
note(message)
note(decoded_message)
note(
google.protobuf.json_format.MessageToJson(
my_message, including_default_value_fields=True
)
)
def _check_field_match(orig_value, new_value):
note(type(new_value))
note(type(orig_value))
if isinstance(orig_value, six.integer_types) and isinstance(new_value, str):
assert str(orig_value) == new_value
elif isinstance(orig_value, bytes):
assert orig_value == base64.b64decode(new_value)
elif isinstance(new_value, dict):
_check_message_match(orig_value, new_value)
elif isinstance(orig_value, float):
# normalize floats
if isinstance(new_value, str):
if "Infinity" in new_value:
assert math.isinf(orig_value)
else:
assert new_value == "NaN"
assert math.isnan(new_value)
else:
# pack and unpack floats to try and normalize them
try:
orig_value_packed = struct.pack("<f", orig_value)
(orig_value,) = struct.unpack("<f", orig_value_packed)
new_value_packed = struct.pack("<f", orig_value)
(new_value,) = struct.unpack("<f", orig_value_packed)
assert orig_value == new_value
except OverflowError:
orig_value_packed = struct.pack("<d", orig_value)
(orig_value,) = struct.unpack("<d", orig_value_packed)
new_value_packed = struct.pack("<d", new_value)
(new_value,) = struct.unpack("<d", new_value_packed)
assert orig_value == new_value
else:
assert orig_value == new_value
def _check_message_match(message_orig, message_new):
for field_key, field_value in message_new.items():
if field_key.startswith("field"):
field_key = field_key[5:]
orig_value = message_orig[field_key]
if isinstance(field_value, list):
if not isinstance(orig_value, list):
orig_value = [orig_value]
assert len(orig_value) == len(field_value)
for orig_value, new_value in zip(orig_value, field_value):
_check_field_match(orig_value, new_value)
else:
_check_field_match(orig_value, field_value)
# Check all the fields match each other
_check_message_match(message, decoded_message)
|
b977a54bc2968fe949364c1d0e115a72832271c9
|
e61e664d95af3b93150cda5b92695be6551d2a7c
|
/vega/networks/tensorflow/necks/mask_rcnn_box.py
|
b4a9be84b5e3d8825fcd75e91172434914c32c52
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
huawei-noah/vega
|
44aaf8bb28b45f707ed6cd4e871ba70fc0c04846
|
12e37a1991eb6771a2999fe0a46ddda920c47948
|
refs/heads/master
| 2023-09-01T20:16:28.746745
| 2023-02-15T09:36:59
| 2023-02-15T09:36:59
| 273,667,533
| 850
| 184
|
NOASSERTION
| 2023-02-15T09:37:01
| 2020-06-20T08:20:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,430
|
py
|
mask_rcnn_box.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defined faster rcnn detector."""
from object_detection.predictors.heads import box_head
from object_detection.predictors import mask_rcnn_box_predictor
from object_detection.predictors.heads import class_head
from vega.common import ClassType, ClassFactory
from vega.networks.tensorflow.utils.hyperparams import scope_generator
@ClassFactory.register(ClassType.NETWORK)
class MaskRCNNBox(object):
"""Mask RCNN Box."""
def __init__(self, desc):
"""Init MaskRCNNBox.
:param desc: config dict
"""
self.model = None
self.num_classes = desc.num_classes
self.add_background_class = desc.add_background_class if 'add_background_class' in desc else True
self.num_class_slots = self.num_classes + \
1 if self.add_background_class else self.num_classes
self.use_dropout = desc.use_dropout if 'use_dropout' in desc else False
self.dropout_keep_prob = desc.dropout_keep_prob if 'dropout_keep_prob' in desc else 1.0
self.box_code_size = desc.box_code_size if 'box_code_size' in desc else 4
self.share_box_across_classes = desc.share_box_across_classes if 'share_box_across_classes' in desc else False
self.fc_hyperparams = scope_generator.get_hyper_params_scope(
desc.fc_hyperparams)
def get_real_model(self, training):
"""Get real model of maskRcnnBox."""
if self.model:
return self.model
else:
self.box_prediction_head = box_head.MaskRCNNBoxHead(
is_training=training,
num_classes=self.num_classes,
fc_hyperparams_fn=self.fc_hyperparams,
use_dropout=self.use_dropout,
dropout_keep_prob=self.dropout_keep_prob,
box_code_size=self.box_code_size,
share_box_across_classes=self.share_box_across_classes)
self.class_prediction_head = class_head.MaskRCNNClassHead(
is_training=training,
num_class_slots=self.num_class_slots,
fc_hyperparams_fn=self.fc_hyperparams,
use_dropout=self.use_dropout,
dropout_keep_prob=self.dropout_keep_prob)
third_stage_heads = {}
self.model = mask_rcnn_box_predictor.MaskRCNNBoxPredictor(
is_training=training,
num_classes=self.num_classes,
box_prediction_head=self.box_prediction_head,
class_prediction_head=self.class_prediction_head,
third_stage_heads=third_stage_heads)
return self.model
def __call__(self, features, labels, training):
"""Forward function of maskRcnnBox."""
return self.get_real_model(training).predict(features, labels)
|
5258f9a571708d49bbc50b4442a145082cbf7084
|
dd221d1ab80a49190a0c93277e2471debaa2db95
|
/hanlp/components/mtl/tasks/ner/biaffine_ner.py
|
f463d8d0d138f6f443fa815d4daff58acff0df14
|
[
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] |
permissive
|
hankcs/HanLP
|
29a22d4e240617e4dc67929c2f9760a822402cf7
|
be2f04905a12990a527417bd47b79b851874a201
|
refs/heads/doc-zh
| 2023-08-18T12:48:43.533453
| 2020-02-15T17:19:28
| 2023-03-14T02:46:03
| 24,976,755
| 32,454
| 9,770
|
Apache-2.0
| 2023-08-13T03:11:39
| 2014-10-09T06:36:16
|
Python
|
UTF-8
|
Python
| false
| false
| 5,782
|
py
|
biaffine_ner.py
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-08-05 01:49
import logging
from copy import copy
from typing import Dict, Any, Union, Iterable, List
import torch
from torch.utils.data import DataLoader
from hanlp.common.dataset import SamplerBuilder, PadSequenceDataLoader
from hanlp.common.transform import VocabDict, TransformList
from hanlp.components.mtl.tasks import Task
from hanlp.components.ner.biaffine_ner.biaffine_ner import BiaffineNamedEntityRecognizer
from hanlp.components.ner.biaffine_ner.biaffine_ner_model import BiaffineNamedEntityRecognitionDecoder
from hanlp.datasets.ner.loaders.json_ner import unpack_ner
from hanlp.layers.scalar_mix import ScalarMixWithDropoutBuilder
from hanlp.metrics.metric import Metric
from hanlp.metrics.mtl import MetricDict
from hanlp_common.util import merge_locals_kwargs
class BiaffineNamedEntityRecognition(Task, BiaffineNamedEntityRecognizer):
def __init__(self, trn: str = None, dev: str = None, tst: str = None, sampler_builder: SamplerBuilder = None,
dependencies: str = None, scalar_mix: ScalarMixWithDropoutBuilder = None, use_raw_hidden_states=False,
lr=None, separate_optimizer=False,
doc_level_offset=True, is_flat_ner=True, tagset=None, ret_tokens=' ',
ffnn_size=150, loss_reduction='mean', **kwargs) -> None:
"""An implementation of Named Entity Recognition as Dependency Parsing (:cite:`yu-etal-2020-named`). It treats
every possible span as a candidate of entity and predicts its entity label. Non-entity spans are assigned NULL
label to be excluded. The label prediction is done with a biaffine layer (:cite:`dozat:17a`). As it makes no
assumption about the spans, it naturally supports flat NER and nested NER.
Args:
trn: Path to training set.
dev: Path to dev set.
tst: Path to test set.
sampler_builder: A builder which builds a sampler.
dependencies: Its dependencies on other tasks.
scalar_mix: A builder which builds a `ScalarMixWithDropout` object.
use_raw_hidden_states: Whether to use raw hidden states from transformer without any pooling.
lr: Learning rate for this task.
separate_optimizer: Use customized separate optimizer for this task.
doc_level_offset: ``True`` to indicate the offsets in ``jsonlines`` are of document level.
is_flat_ner: ``True`` for flat NER, otherwise nested NER.
tagset: Optional tagset to prune entities outside of this tagset from datasets.
ret_tokens: A delimiter between tokens in entities so that the surface form of an entity can be rebuilt.
ffnn_size: Feedforward size for MLPs extracting the head/tail representations.
loss_reduction: The loss reduction used in aggregating losses.
**kwargs: Not used.
"""
super().__init__(**merge_locals_kwargs(locals(), kwargs))
self.vocabs = VocabDict()
def update_metrics(self, batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
prediction: Dict[str, Any], metric: Union[MetricDict, Metric]):
BiaffineNamedEntityRecognizer.update_metrics(self, batch, prediction, metric)
def decode_output(self,
output: Dict[str, Any],
mask: torch.BoolTensor,
batch: Dict[str, Any],
decoder,
**kwargs) -> Union[Dict[str, Any], Any]:
return self.get_pred_ner(batch['token'], output['candidate_ner_scores'])
def compute_loss(self, batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any], criterion) -> \
Union[torch.FloatTensor, Dict[str, torch.FloatTensor]]:
return output['loss']
def build_dataloader(self, data,
transform: TransformList = None,
training=False,
device=None,
logger: logging.Logger = None,
gradient_accumulation=1,
**kwargs) -> DataLoader:
transform = copy(transform)
transform.append(unpack_ner)
dataset = BiaffineNamedEntityRecognizer.build_dataset(self, data, self.vocabs, transform)
dataset.purge_cache()
if self.vocabs.mutable:
BiaffineNamedEntityRecognizer.build_vocabs(self, dataset, logger, self.vocabs)
return PadSequenceDataLoader(
batch_sampler=self.sampler_builder.build(self.compute_lens(data, dataset), shuffle=training,
gradient_accumulation=gradient_accumulation),
device=device,
dataset=dataset)
def build_model(self, encoder_size, training=True, **kwargs) -> torch.nn.Module:
return BiaffineNamedEntityRecognitionDecoder(encoder_size, self.config.ffnn_size, len(self.vocabs.label),
self.config.loss_reduction)
def build_metric(self, **kwargs):
return BiaffineNamedEntityRecognizer.build_metric(self, **kwargs)
def input_is_flat(self, data) -> bool:
return BiaffineNamedEntityRecognizer.input_is_flat(data)
def prediction_to_result(self, prediction: Dict[str, Any], batch: Dict[str, Any]) -> List:
results = []
BiaffineNamedEntityRecognizer.prediction_to_result(batch['token'], prediction, results,
ret_tokens=self.config.get('ret_tokens', ' '))
return results
|
5765457f9c1667f21ac2879fa324a103b396dc39
|
db64e76033c80eaa0ad12d4877d808d60a62b57d
|
/tests/test_symspellpy_word_segmentation.py
|
2322d05abb3b3fe45c1ec11f28e06130a58792bc
|
[
"MIT"
] |
permissive
|
mammothb/symspellpy
|
3389006d8b9af8510bc1fba5c5830e37e9c6c550
|
142c2c70255701e37d92d661c8779b2629d6f2a6
|
refs/heads/master
| 2023-07-07T03:02:37.550709
| 2023-04-14T01:06:59
| 2023-04-14T01:06:59
| 144,558,951
| 712
| 132
|
MIT
| 2023-06-30T02:30:49
| 2018-08-13T09:37:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,675
|
py
|
test_symspellpy_word_segmentation.py
|
import pytest
from symspellpy import SymSpell
@pytest.fixture
def symspell_edit_distance_load(dictionary_path, request):
sym_spell = SymSpell(request.param)
sym_spell.load_dictionary(dictionary_path, 0, 1)
return sym_spell, request.param
class TestSymSpellPyWordSegmentation:
@pytest.mark.parametrize("symspell_default_load", ["unigram"], indirect=True)
def test_word_segmentation_ignore_token(self, symspell_default_load):
sym_spell, _ = symspell_default_load
typo = "24th december"
result = sym_spell.word_segmentation(typo, ignore_token=r"\d{2}\w*\b")
assert typo == result.corrected_string
@pytest.mark.parametrize(
"symspell_edit_distance_load, get_fortests_data, with_arguments, capitalize",
[
(0, "word_segmentation_data.json", False, False),
(0, "word_segmentation_data.json", True, False),
(0, "word_segmentation_data.json", False, True),
],
indirect=["symspell_edit_distance_load", "get_fortests_data"],
)
def test_word_segmentation(
self,
symspell_edit_distance_load,
get_fortests_data,
with_arguments,
capitalize,
):
sym_spell, edit_distance = symspell_edit_distance_load
for entry in get_fortests_data:
if capitalize:
typo = entry["typo"].capitalize()
correction = entry[str(edit_distance)]["term"].capitalize()
else:
typo = entry["typo"]
correction = entry[str(edit_distance)]["term"]
if with_arguments:
result = sym_spell.word_segmentation(typo, edit_distance, 11)
else:
result = sym_spell.word_segmentation(typo)
assert correction == result.corrected_string
@pytest.mark.parametrize("symspell_edit_distance_load", [0], indirect=True)
def test_word_segmentation_apostrophe(self, symspell_edit_distance_load):
sym_spell, _ = symspell_edit_distance_load
typo = "There'resomewords"
correction = "There' re some words"
result = sym_spell.word_segmentation(typo)
assert correction == result[1]
@pytest.mark.parametrize("symspell_edit_distance_load", [0], indirect=True)
def test_word_segmentation_ligature(self, symspell_edit_distance_load):
sym_spell, _ = symspell_edit_distance_load
typo = "Therearesomescientificwords"
correction = "There are some scientific words"
result = sym_spell.word_segmentation(typo)
assert correction == result[1]
|
923b3a300a8f04ae2741de5d7acecf5c7f3dbad0
|
6bf3efa384abc11398ab9c6cc902c6415bf7e478
|
/example/driver.py
|
c304e72035b518e18b1bedbe1ee7036862120279
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
dropbox/pyannotate
|
d9ab4919672e69853c6aa47f6b8d4ba85696de98
|
a7a46f394f0ba91a1b5fbf657e2393af542969ae
|
refs/heads/master
| 2022-08-14T03:55:19.082430
| 2021-10-12T20:53:49
| 2021-10-12T20:53:49
| 110,597,280
| 1,482
| 73
|
Apache-2.0
| 2023-02-01T22:35:54
| 2017-11-13T20:18:08
|
Python
|
UTF-8
|
Python
| false
| false
| 232
|
py
|
driver.py
|
from gcd import main
from pyannotate_runtime import collect_types
if __name__ == '__main__':
collect_types.init_types_collection()
with collect_types.collect():
main()
collect_types.dump_stats('type_info.json')
|
d0af92f19a548b1e174388150731b9675d9921ef
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/third_party/gsutil/gslib/utils/encryption_helper.py
|
b6e984420c7623d541277b101c3192201772ec5d
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 7,178
|
py
|
encryption_helper.py
|
# -*- coding: utf-8 -*-
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for dealing with encryption keys used with cloud APIs."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import base64
import binascii
from hashlib import sha256
import re
import sys
import six
from gslib.exception import CommandException
from gslib.lazy_wrapper import LazyWrapper
MAX_DECRYPTION_KEYS = 100
VALID_CMEK_RE = LazyWrapper(
lambda: re.compile('projects/([^/]+)/'
'locations/([a-zA-Z0-9_-]{1,63})/'
'keyRings/([a-zA-Z0-9_-]{1,63})/'
'cryptoKeys/([a-zA-Z0-9_-]{1,63})$'))
class CryptoKeyType(object):
"""Enum of valid types of encryption keys used with cloud API requests."""
CSEK = 'CSEK'
CMEK = 'CMEK'
class CryptoKeyWrapper(object):
"""Class describing a crypto key used with cloud API requests.
This class should be instantiated via the `CryptoKeyWrapperFromKey` method.
"""
def __init__(self, crypto_key):
"""Initialize the CryptoKeyWrapper.
Args:
crypto_key: Base64-encoded string of a CSEK, or the name of a Cloud KMS
CMEK.
Raises:
CommandException: The specified crypto key was neither a CMEK key name nor
a valid base64-encoded string of a CSEK.
"""
self.crypto_key = crypto_key
# Base64-encoded CSEKs always have a length of 44 characters, whereas
# fully-qualified CMEK names are guaranteed to be longer than 45 characters.
if len(crypto_key) == 44:
self.crypto_type = CryptoKeyType.CSEK
self.crypto_alg = 'AES256' # Only currently supported algorithm for CSEK.
try:
self.crypto_key_sha256 = Base64Sha256FromBase64EncryptionKey(crypto_key)
except:
raise CommandException(
'Configured encryption_key or decryption_key looked like a CSEK, '
'but it was not a valid 44-character base64 string. Please '
'double-check your configuration and ensure the key is correct.')
else: # CMEK
try:
ValidateCMEK(crypto_key)
except CommandException as e:
raise CommandException(
'Configured encryption_key or decryption_key looked like a CMEK, '
'but the key failed validation:\n%s' % e.reason)
self.crypto_type = CryptoKeyType.CMEK
self.crypto_alg = None
self.crypto_key_sha256 = None
def CryptoKeyWrapperFromKey(crypto_key):
"""Returns a CryptoKeyWrapper for crypto_key, or None for no key."""
return CryptoKeyWrapper(crypto_key) if crypto_key else None
def FindMatchingCSEKInBotoConfig(key_sha256, boto_config):
"""Searches boto_config for a CSEK with the given base64-encoded SHA256 hash.
Args:
key_sha256: (str) Base64-encoded SHA256 hash of the AES256 encryption key.
boto_config: (boto.pyami.config.Config) The boto config in which to check
for a matching encryption key.
Returns:
(str) Base64-encoded encryption key string if a match is found, None
otherwise.
"""
if six.PY3:
if not isinstance(key_sha256, bytes):
key_sha256 = key_sha256.encode('ascii')
keywrapper = CryptoKeyWrapperFromKey(
boto_config.get('GSUtil', 'encryption_key', None))
if (keywrapper is not None and
keywrapper.crypto_type == CryptoKeyType.CSEK and
keywrapper.crypto_key_sha256 == key_sha256):
return keywrapper.crypto_key
for i in range(MAX_DECRYPTION_KEYS):
key_number = i + 1
keywrapper = CryptoKeyWrapperFromKey(
boto_config.get('GSUtil', 'decryption_key%s' % str(key_number), None))
if keywrapper is None:
# Reading 100 config values can take ~1ms in testing. To avoid adding
# this tax, stop reading keys as soon as we encounter a non-existent
# entry (in lexicographic order).
break
elif (keywrapper.crypto_type == CryptoKeyType.CSEK and
keywrapper.crypto_key_sha256 == key_sha256):
return keywrapper.crypto_key
def GetEncryptionKeyWrapper(boto_config):
"""Returns a CryptoKeyWrapper for the configured encryption key.
Reads in the value of the "encryption_key" attribute in boto_config, and if
present, verifies it is a valid base64-encoded string and returns a
CryptoKeyWrapper for it.
Args:
boto_config: (boto.pyami.config.Config) The boto config in which to check
for a matching encryption key.
Returns:
CryptoKeyWrapper for the specified encryption key, or None if no encryption
key was specified in boto_config.
"""
encryption_key = boto_config.get('GSUtil', 'encryption_key', None)
return CryptoKeyWrapper(encryption_key) if encryption_key else None
def Base64Sha256FromBase64EncryptionKey(csek_encryption_key):
if six.PY3:
if not isinstance(csek_encryption_key, bytes):
csek_encryption_key = csek_encryption_key.encode('ascii')
decoded_bytes = base64.decodestring(csek_encryption_key)
key_sha256 = _CalculateSha256FromString(decoded_bytes)
sha256_bytes = binascii.unhexlify(key_sha256)
sha256_base64 = base64.encodestring(sha256_bytes)
return sha256_base64.replace(b'\n', b'')
def ValidateCMEK(key):
if not key:
raise CommandException('KMS key is empty.')
if key.startswith('/'):
raise CommandException(
'KMS key should not start with leading slash (/): "%s"' % key)
if not VALID_CMEK_RE().match(key):
raise CommandException(
'Invalid KMS key name: "%s".\nKMS keys should follow the format '
'"projects/<project-id>/locations/<location>/keyRings/<keyring>/'
'cryptoKeys/<key-name>"' % key)
def _CalculateSha256FromString(input_string):
sha256_hash = sha256()
sha256_hash.update(input_string)
return sha256_hash.hexdigest()
def _GetAndVerifyBase64EncryptionKey(boto_config):
"""Reads the encryption key from boto_config and ensures it is base64-encoded.
Args:
boto_config: (boto.pyami.config.Config) The boto config in which to check
for a matching encryption key.
Returns:
(str) Base64-encoded encryption key string, or None if no encryption key
exists in configuration.
"""
encryption_key = boto_config.get('GSUtil', 'encryption_key', None)
if encryption_key:
# Ensure the key has a valid encoding.
try:
base64.decodestring(encryption_key)
except:
raise CommandException(
'Configured encryption_key is not a valid base64 string. Please '
'double-check your configuration and ensure the key is valid and in '
'base64 format.')
return encryption_key
|
548b91984a98a1483f6ebaeee52f5f608d5182d7
|
533fe9cb56a370acf7600d538495a615efbc561c
|
/minibatch/contrib/mongodb.py
|
fc27fb2bdee082e94a92a57046626e67366acb8c
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"AGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"MPL-1.0",
"SSPL-1.0"
] |
permissive
|
omegaml/minibatch
|
38c6fbc99a6205b11e35c159958023abeae74622
|
a45f477136c62f1e1ad6d745b621fe1083fcc95a
|
refs/heads/master
| 2023-06-07T06:17:40.363182
| 2023-06-06T18:13:29
| 2023-06-06T18:13:29
| 237,193,338
| 192
| 14
|
Apache-2.0
| 2023-06-06T18:13:30
| 2020-01-30T10:52:40
|
Python
|
UTF-8
|
Python
| false
| false
| 3,926
|
py
|
mongodb.py
|
import pymongo
from time import sleep
from minibatch import logger
class MongoSource:
"""
A mongodb collection source
Usage:
# start consuming from mongo collection
stream = mb.stream('test')
source = MongoSource(collection)
stream.attach(source)
# stream to a python callable
streaming('test')(lambda v: print(v))
Args:
collection (pymongo.Collection): a mongo collection
size (int): the number of new documents to fetch for each stream.append
defaults to 1
idcol (str): the name of the id column, defaults to _id
delay (float): the wait time in seconds between change queries, defaults
to .1
Notes:
* the collection must have a key column that is naturally ordered (ascending),
that is a new records' key must be compare greater as any previous key
* by default MongoSources uses the object id (_id column) as the sort order,
because it is naturally increasing for each new inserted object. Specify
as idcol='column name'
* MongoSource implements a polling change observer that is executed on
once every delay seconds, for every query it retrieves at most N=size
messages. All messages are appended to the stream one by one.
If you know that new messages arrive more or less frequently change
either size or delay to optimize polling behavior. For example if
messages arrive more frequently than .1 seconds but processing them
in steps of > .1 seconds is ok, specify size=number of messages in each
interval. If messages arrive less frequently than every .1 seconds,
considering specifying a delay > .1 seconds to reduce the polling load
on the database.
* For a Mongo replicaset, use the MongoReplicasetSource instead. It uses
the MongoDB native change stream instead of a polling change observer
which is more efficient.
"""
def __init__(self, collection, size=1, idcol=None, delay=.1):
self.collection = collection
self._cancel = None
self._lastid = None
self._size = size
self._idcol = '_id'
self._delay = delay
def changes(self, N=1):
latest_id = None
while not self._cancel:
sortkey = {
'sort': [('_id', pymongo.ASCENDING)],
}
query = {}
if latest_id is not None:
query[self._idcol] = {
'$gt': latest_id
}
docs = self.collection.find(query, **sortkey).limit(N)
for doc in docs:
latest_id = doc[self._idcol]
yield doc
sleep(self._delay)
def stream(self, stream):
self._cancel = False
while not self._cancel:
for doc in self.changes(N=self._size):
if self._cancel:
break
stream.append(doc)
logger.debug("stream done")
def cancel(self):
self._cancel = True
class MongoSink:
"""
A mongodb collection sink
"""
def __init__(self, collection):
self.collection = collection
def put(self, messages):
if isinstance(messages, dict):
messages = [messages]
return self.collection.insert_many(messages)
class MongoReplicasetSource(MongoSource):
def changes(self, N=1):
criteria = [
{'$match': {
'operationType': 'insert'
}}
]
self._cancel = False
docs = []
with self.collection.watch(criteria) as changes:
for doc in changes:
if self._cancel:
break
docs.append(doc)
if len(docs) >= N:
yield docs
docs = []
|
06258bd9f7b11b1164ed56add74031a5ea9e7034
|
751fe2de18f00596e4f1ed342b56bd6f38ee2053
|
/wisdem/test/test_ccblade/test_ccblade.py
|
3048051ce966b6b05ed7ea28d0d62f5ffdd08138
|
[
"Apache-2.0"
] |
permissive
|
WISDEM/WISDEM
|
42fa780915d62fd4e4203050e886093ecc806c8a
|
d7270ebe1c554293a9d36730d67ab555c071cb17
|
refs/heads/master
| 2023-08-04T01:22:43.215105
| 2023-06-22T23:36:07
| 2023-06-22T23:36:07
| 23,678,280
| 120
| 86
|
Apache-2.0
| 2023-06-22T19:26:34
| 2014-09-04T20:30:24
|
Python
|
UTF-8
|
Python
| false
| false
| 7,775
|
py
|
test_ccblade.py
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import unittest
from os import path
import numpy as np
from wisdem.ccblade.ccblade import CCBlade, CCAirfoil
class TestNREL5MW(unittest.TestCase):
def setUp(self):
# geometry
Rhub = 1.5
Rtip = 63.0
r = np.array(
[
2.8667,
5.6000,
8.3333,
11.7500,
15.8500,
19.9500,
24.0500,
28.1500,
32.2500,
36.3500,
40.4500,
44.5500,
48.6500,
52.7500,
56.1667,
58.9000,
61.6333,
]
)
chord = np.array(
[
3.542,
3.854,
4.167,
4.557,
4.652,
4.458,
4.249,
4.007,
3.748,
3.502,
3.256,
3.010,
2.764,
2.518,
2.313,
2.086,
1.419,
]
)
theta = np.array(
[
13.308,
13.308,
13.308,
13.308,
11.480,
10.162,
9.011,
7.795,
6.544,
5.361,
4.188,
3.125,
2.319,
1.526,
0.863,
0.370,
0.106,
]
)
B = 3 # number of blades
# atmosphere
rho = 1.225
mu = 1.81206e-5
afinit = CCAirfoil.initFromAerodynFile # just for shorthand
basepath = path.join(path.dirname(path.realpath(__file__)), "../../../examples/_airfoil_files")
# load all airfoils
airfoil_types = [0] * 8
airfoil_types[0] = afinit(path.join(basepath, "Cylinder1.dat"))
airfoil_types[1] = afinit(path.join(basepath, "Cylinder2.dat"))
airfoil_types[2] = afinit(path.join(basepath, "DU40_A17.dat"))
airfoil_types[3] = afinit(path.join(basepath, "DU35_A17.dat"))
airfoil_types[4] = afinit(path.join(basepath, "DU30_A17.dat"))
airfoil_types[5] = afinit(path.join(basepath, "DU25_A17.dat"))
airfoil_types[6] = afinit(path.join(basepath, "DU21_A17.dat"))
airfoil_types[7] = afinit(path.join(basepath, "NACA64_A17.dat"))
# place at appropriate radial stations
af_idx = [0, 0, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 7, 7, 7, 7]
af = [0] * len(r)
for i in range(len(r)):
af[i] = airfoil_types[af_idx[i]]
tilt = -5.0
precone = 2.5
yaw = 0.0
# create CCBlade object
self.rotor = CCBlade(r, chord, theta, af, Rhub, Rtip, B, rho, mu, precone, tilt, yaw, shearExp=0.2, hubHt=90.0)
def test_thrust_torque(self):
Uinf = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25])
Omega = np.array(
[
6.972,
7.183,
7.506,
7.942,
8.469,
9.156,
10.296,
11.431,
11.890,
12.100,
12.100,
12.100,
12.100,
12.100,
12.100,
12.100,
12.100,
12.100,
12.100,
12.100,
12.100,
12.100,
12.100,
]
)
pitch = np.array(
[
0.000,
0.000,
0.000,
0.000,
0.000,
0.000,
0.000,
0.000,
0.000,
3.823,
6.602,
8.668,
10.450,
12.055,
13.536,
14.920,
16.226,
17.473,
18.699,
19.941,
21.177,
22.347,
23.469,
]
)
Pref = np.array(
[
42.9,
188.2,
427.9,
781.3,
1257.6,
1876.2,
2668.0,
3653.0,
4833.2,
5296.6,
5296.6,
5296.6,
5296.6,
5296.6,
5296.6,
5296.6,
5296.6,
5296.7,
5296.6,
5296.7,
5296.6,
5296.6,
5296.7,
]
)
Tref = np.array(
[
171.7,
215.9,
268.9,
330.3,
398.6,
478.0,
579.2,
691.5,
790.6,
690.0,
608.4,
557.9,
520.5,
491.2,
467.7,
448.4,
432.3,
418.8,
406.7,
395.3,
385.1,
376.7,
369.3,
]
)
Qref = np.array(
[
58.8,
250.2,
544.3,
939.5,
1418.1,
1956.9,
2474.5,
3051.1,
3881.3,
4180.1,
4180.1,
4180.1,
4180.1,
4180.1,
4180.1,
4180.1,
4180.1,
4180.1,
4180.1,
4180.1,
4180.1,
4180.1,
4180.1,
]
)
m_rotor = 110.0 # kg
g = 9.81
tilt = 5 * math.pi / 180.0
Tref -= m_rotor * g * math.sin(tilt) # remove weight of rotor that is included in reported results
outputs, derivs = self.rotor.evaluate(Uinf, Omega, pitch)
P, T, Q = [outputs[key] for key in ("P", "T", "Q")]
# import matplotlib.pyplot as plt
# plt.plot(Uinf, P/1e6)
# plt.plot(Uinf, Pref/1e3)
# plt.figure()
# plt.plot(Uinf, T/1e6)
# plt.plot(Uinf, Tref/1e3)
# plt.show()
idx = Uinf < 15
np.testing.assert_allclose(Q[idx] / 1e6, Qref[idx] / 1e3, atol=0.15)
np.testing.assert_allclose(P[idx] / 1e6, Pref[idx] / 1e3, atol=0.2) # within 0.2 of 1MW
np.testing.assert_allclose(T[idx] / 1e6, Tref[idx] / 1e3, atol=0.15)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestNREL5MW))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
|
9727cebd393cddc24120426ea4c4f4de6de033eb
|
8b4d84fcf32c03783dcaca0dd470dc60ed829282
|
/Chap18ProjAutoFormFiller.py
|
ff6417806caf568f5d1e43e19bc5fca211080800
|
[
"MIT"
] |
permissive
|
lotspaih/automateBoringstuffPython
|
1776c1054e3d3e6babf76f64e9147c32f6295e1a
|
51a402e31df01c36ae45dccad662d04b30378fea
|
refs/heads/master
| 2020-04-29T00:36:05.267875
| 2019-03-14T21:18:20
| 2019-03-14T21:18:20
| 60,571,508
| 107
| 51
|
CC0-1.0
| 2018-01-10T06:54:00
| 2016-06-07T01:11:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
Chap18ProjAutoFormFiller.py
|
#! python3
# Chapter 18 Project Automatic Form Filler
# Automatically fills in the form (http://autbor.com/form)
import pyautogui
import time
nameField = (648, 319)
submitButton = (651, 817)
submitButtonColor = (75, 141, 249)
submitAnotherLink = (760, 224)
formData = [{'name': 'Alice', 'fear': 'eavesdroppers', 'source': 'wand',
'robocop': 4, 'comments': 'Tell Bob I said hi.'},
{'name': 'Bob', 'fear': 'bees', 'source': 'amulet', 'robocop': 4,
'comments': 'n/a'}]
pyautogui.PAUSE = 0.5
for person in formData:
print('>>> 5 SECOND PAUSE TO LET USER PRESS CTRL-C <<<')
time.sleep(5)
while not pyautogui.pixelMatchesColor(submitButton[0], submitButton[1],
submitButtonColor):
time.sleep(0.5)
print('Entering %s info...' % (person['name']))
pyautogui.click(nameField[0], nameField[1])
pyautogui.typewrite(person['name'] + '\t')
pyautogui.typewrite(person['fear'] + '\t')
if person['source'] == 'wand':
pyautogui.typewrite(['down', '\t'])
elif person['source'] == 'amulet':
pyautogui.typewrite(['down', 'down', '\t'])
elif person['source'] == 'crystal ball':
pyautogui.typewrite(['down', 'down', 'down', '\t'])
elif person['source'] == 'money':
pyautogui.typewrite(['down', 'down', 'down', 'down', '\t'])
if person['robocop'] == 1:
pyautogui.typewrite([' ', '\t'])
elif person['robocop'] == 2:
pyautogui.typewrite(['right', '\t'])
elif person['robocop'] == 3:
pyautogui.typewrite(['right', 'right', '\t'])
elif person['robocop'] == 4:
pyautogui.typewrite(['right', 'right', 'right', '\t'])
elif person['robocop'] == 5:
pyautogui.typewrite(['right', 'right', 'right', 'right', '\t'])
pyautogui.typwrite(person['comments'] + '\t')
pyautogui.press('enter')
print('Clicked Submit.')
time.sleep(5)
pyautogui.click(submitAnotherLink[0], submitAnotherLink[1])
|
e4c46a788f1eff89cb7991383320724c392b0a6d
|
9d8948ba3c48ad50db0b33e4db69722516ff2451
|
/kubernetes_asyncio/stream/ws_client.py
|
5337d5c0024909220578e3d75d588a3cb4c1f459
|
[
"Apache-2.0"
] |
permissive
|
tomplus/kubernetes_asyncio
|
f8d38b4f4546868499a19f5e2338f0c0cff0c68a
|
9a41443088b999c00b6fbc7a88dcfbb86bf5c1c5
|
refs/heads/master
| 2023-08-31T07:45:33.728246
| 2023-08-21T11:58:21
| 2023-08-21T11:58:21
| 128,578,499
| 296
| 64
|
Apache-2.0
| 2023-09-05T06:10:05
| 2018-04-07T23:55:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,369
|
py
|
ws_client.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves.urllib.parse import urlencode, urlparse, urlunparse
from kubernetes_asyncio.client import ApiClient
from kubernetes_asyncio.client.rest import RESTResponse
STDIN_CHANNEL = 0
STDOUT_CHANNEL = 1
STDERR_CHANNEL = 2
ERROR_CHANNEL = 3
RESIZE_CHANNEL = 4
def get_websocket_url(url):
parsed_url = urlparse(url)
parts = list(parsed_url)
if parsed_url.scheme == 'http':
parts[0] = 'ws'
elif parsed_url.scheme == 'https':
parts[0] = 'wss'
return urlunparse(parts)
class WsResponse(RESTResponse):
def __init__(self, status, data):
self.status = status
self.data = data
self.headers = {}
self.reason = None
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
class WsApiClient(ApiClient):
def __init__(self, configuration=None, header_name=None, header_value=None,
cookie=None, pool_threads=1, heartbeat=None):
super().__init__(configuration, header_name, header_value, cookie, pool_threads)
self.heartbeat = heartbeat
async def request(self, method, url, query_params=None, headers=None,
post_params=None, body=None, _preload_content=True,
_request_timeout=None):
# Expand command parameter list to indivitual command params
if query_params:
new_query_params = []
for key, value in query_params:
if key == 'command' and isinstance(value, list):
for command in value:
new_query_params.append((key, command))
else:
new_query_params.append((key, value))
query_params = new_query_params
if headers is None:
headers = {}
if 'sec-websocket-protocol' not in headers:
headers['sec-websocket-protocol'] = 'v4.channel.k8s.io'
if query_params:
url += '?' + urlencode(query_params)
url = get_websocket_url(url)
if _preload_content:
resp_all = ''
async with self.rest_client.pool_manager.ws_connect(url, headers=headers, heartbeat=self.heartbeat) as ws:
async for msg in ws:
msg = msg.data.decode('utf-8')
if len(msg) > 1:
channel = ord(msg[0])
data = msg[1:]
if data:
if channel in [STDOUT_CHANNEL, STDERR_CHANNEL]:
resp_all += data
return WsResponse(200, resp_all.encode('utf-8'))
else:
return await self.rest_client.pool_manager.ws_connect(url, headers=headers, heartbeat=self.heartbeat)
|
71b7d6f95d3aacd6ac72225e1e99f8b61cf0e84d
|
fb369693686cbd93799f68bcd0b4fdcf4c65d49a
|
/zavod/zavod/helpers/dates.py
|
7032549de2351267be6df48fece32ad162ac273a
|
[
"MIT",
"CC-BY-NC-4.0"
] |
permissive
|
opensanctions/opensanctions
|
8a43c173bd9c1422b5ca3e2ec35bcac70f8f1573
|
229b59247e67ad0661abb0a6f7155a61042a32ea
|
refs/heads/main
| 2023-09-03T23:59:34.785846
| 2023-09-03T08:46:14
| 2023-09-03T08:46:14
| 47,451,451
| 155
| 32
|
MIT
| 2023-09-14T05:46:11
| 2015-12-05T10:19:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,996
|
py
|
dates.py
|
import re
from typing import Iterable, Set, Optional, List
from prefixdate import parse_formats
from datetime import datetime, timedelta
from functools import cache
NUMBERS = re.compile(r"\d+")
__all__ = ["parse_date", "check_no_year", "parse_formats", "extract_years"]
def extract_years(text: str) -> List[str]:
"""Try to locate year numbers in a string such as 'circa 1990'. This will fail if
any numbers that don't look like years are found in the string, a strong indicator
that a more precise date is encoded (e.g. '1990 Mar 03').
This is bounded to years between 1800 and 2100.
Args:
text: a string to extract years from.
Returns:
a set of year strings.
"""
years: Set[str] = set()
for match in NUMBERS.finditer(text):
year = match.group()
number = int(year)
if number < 1800 or number > 2100:
continue
years.add(year)
return list(years)
def check_no_year(text: Optional[str]) -> bool:
"""Check for a few formats in which dates are given as day/month, with no year
specified."""
if text is None:
return True
return len(extract_years(text)) == 0
def parse_date(
text: Optional[str], formats: Iterable[str], default: Optional[str] = None
) -> List[str]:
"""Parse a date two ways: first, try and apply a set of structured formats and
return a partial date if any of them parse correctly. Otherwise, apply
`extract_years` on the remaining string."""
if text is None:
return [default] if default is not None else []
parsed = parse_formats(text, formats)
if parsed.text is not None:
return [parsed.text]
years = extract_years(text)
if len(years):
return years
return [default or text]
@cache
def backdate(date: datetime, days: int) -> str:
"""Return a partial ISO8601 date string backdated by the number of days provided"""
dt = date - timedelta(days=days)
return dt.isoformat()[:10]
|
b9f6cca6f3cbbc29ac1e372adf23bac784d88a3f
|
b3950a2a6912c9b494d22b9353322c3357df0110
|
/tock/organizations/admin.py
|
9208e96de58775d86df781339c6d312007632bfd
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
18F/tock
|
df1fa5e817e690ce0bff315a15799e2f78915882
|
99005d8f6c4605a69fbb620c41f38447ecbee459
|
refs/heads/main
| 2023-08-31T01:34:55.299577
| 2023-08-23T18:49:10
| 2023-08-23T18:49:10
| 30,162,008
| 135
| 50
|
NOASSERTION
| 2023-09-07T18:40:30
| 2015-02-01T22:19:32
|
Python
|
UTF-8
|
Python
| false
| false
| 478
|
py
|
admin.py
|
from django.contrib import admin
from .models import Organization, Unit
class OrganizationAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'active',)
list_filter = ('active',)
search_fields = ('name',)
class UnitAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'org',)
list_filter = ('org', 'active',)
search_fields = ('name',)
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(Unit, UnitAdmin)
|
47ac46862c1f3ec3ab34d2c21ad3d2cba21eaf4f
|
de5a4d1d7f49d22b62e26d9d38db5a7c49a6ccb9
|
/tests/test_api.py
|
23c18e1751623260881ff8c8d03a5a2aa36c07ca
|
[
"LicenseRef-scancode-proprietary-license",
"MIT-0"
] |
permissive
|
aws-samples/aws-aurora-serverless-data-api-sam
|
77dc858de6d01395d78f70053193c4f114e02a71
|
d80d6a2e6c767b4ce11eabf6e4ff746ec2995a79
|
refs/heads/master
| 2022-09-04T04:54:42.215880
| 2022-08-05T15:28:55
| 2022-08-05T15:28:55
| 177,176,982
| 112
| 35
|
MIT-0
| 2022-08-05T15:29:40
| 2019-03-22T16:44:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,001
|
py
|
test_api.py
|
'''
* Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this
* software and associated documentation files (the "Software"), to deal in the Software
* without restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
* INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os
import requests
import boto3
import pytest
import uuid
from http import HTTPStatus
def get_cfn_output(key, outputs):
result = [ v['OutputValue'] for v in outputs if v['OutputKey'] == key ]
return result[0] if len(result) > 0 else ''
@pytest.fixture(scope="module")
def api_endpoint():
cloudformation = boto3.resource('cloudformation')
api_stack_name = os.getenv('api_stack_name')
stack = cloudformation.Stack(api_stack_name)
return get_cfn_output('ApiEndpoint', stack.outputs)
@pytest.fixture()
def ec2_input_data():
return {
'instance_id': uuid.uuid4(),
'input_data': {
"aws_region": "us-east-1",
"aws_account": "123456789012",
"packages": [
{"package_name": "package-1", "package_version": "v1"},
{"package_name": "package-1", "package_version": "v2"},
{"package_name": "package-2", "package_version": "v1"}
]
}
}
# TODO: add_ec2* tests have side effects (create DB record for test but does not delete it)
# TODO: Warm up Aurora Serverless with an initial request + sleep
def test_add_ec2_info_returns_expected_attributes(api_endpoint, ec2_input_data):
r = requests.post(f'{api_endpoint}/ec2/{ec2_input_data["instance_id"]}', json = ec2_input_data['input_data'])
assert HTTPStatus.OK == r.status_code
response = r.json()
assert 'new_record' in response
assert ec2_input_data['input_data']['aws_region'] == response['new_record']['aws_region']
assert ec2_input_data['input_data']['aws_account'] == response['new_record']['aws_account']
assert ec2_input_data['input_data']['packages'] == response['new_record']['packages']
def test_add_ec2_info_error_duplicate(api_endpoint, ec2_input_data):
r = requests.post(f'{api_endpoint}/ec2/{ec2_input_data["instance_id"]}', json = ec2_input_data['input_data'])
assert HTTPStatus.OK == r.status_code
r = requests.post(f'{api_endpoint}/ec2/{ec2_input_data["instance_id"]}', json = ec2_input_data['input_data'])
response = r.json()
assert HTTPStatus. BAD_REQUEST == r.status_code
def test_add_ec2_info_invalid_input_field(api_endpoint):
r = requests.post(f'{api_endpoint}/ec2/{uuid.uuid4()}', json = {'invalid_field_name': 'any-value'})
assert HTTPStatus. BAD_REQUEST == r.status_code
def test_get_ec2_info_record_found(api_endpoint, ec2_input_data):
r = requests.post(f'{api_endpoint}/ec2/{ec2_input_data["instance_id"]}', json = ec2_input_data['input_data'])
assert HTTPStatus.OK == r.status_code
r = requests.get(f'{api_endpoint}/ec2/{ec2_input_data["instance_id"]}')
assert r.status_code == HTTPStatus.OK
response = r.json()
assert True == response['record_found']
def test_get_ec2_info_record_not_found(api_endpoint):
instance_id = uuid.uuid4()
r = requests.get(f'{api_endpoint}/ec2/{instance_id}')
assert r.status_code == HTTPStatus.OK
response = r.json()
assert False == response['record_found']
|
d854e714c097c665d4082e7e6566d0113a9df949
|
5b0ff689a3e14f42bdf688864cae40c931a5f685
|
/msa/voto/controllers/voto.py
|
a9479d8645dc32d8badf443a2d1d52891311b967
|
[] |
no_license
|
prometheus-ar/vot.ar
|
cd7012f2792a2504fb7f0ee43796a197fc82bd28
|
72d8fa1ea08fe417b64340b98dff68df8364afdf
|
refs/heads/2017-ago-salta
| 2021-01-02T22:19:41.591077
| 2017-08-25T11:55:49
| 2017-08-25T11:55:49
| 37,735,555
| 171
| 110
| null | 2020-06-30T13:33:49
| 2015-06-19T17:15:52
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 26,862
|
py
|
voto.py
|
# -*- coding: utf-8 -*-
import os
from urllib2 import quote
from random import shuffle
from time import sleep
from zaguan.actions import BaseActionController
from zaguan import WebContainerController
from msa.constants import COD_LISTA_BLANCO
from msa.core import get_tipo_elec, get_config
from msa.core.clases import Seleccion
from msa.core.data import TemplateImpresion, TemplateMap
from msa.core.data.candidaturas import Categoria, Candidato, Lista, Partido, \
Alianza
from msa.core.data.settings import JUEGO_DE_DATOS
from msa.core.settings import USA_ARMVE, USAR_BUFFER_IMPRESION
from msa.helpers import cambiar_locale
from msa.voto.constants import BOTON_VOTAR_POR_CATEGORIAS, \
PANTALLA_CONSULTA, BOTON_LISTA_COMPLETA, NUMEROS_TEMPLATES, E_VOTANDO
from msa.voto.controllers.helpers import _image_name
from msa.voto.sesion import get_sesion
from msa.voto.settings import BOTONES_SELECCION_MODO, MOSTRAR_CURSOR, \
PATH_TEMPLATES_VOTO, EXT_IMG_VOTO, MEZCLAR_CANDIDATOS, MEZCLAR_LISTAS, \
MEZCLAR_INTERNAS, IDIOMAS_DISPONIBLES, EFECTOS_VOTO, FLAVOR, \
MEZCLAR_CONSULTA, AGRUPAR_POR_PARTIDO, BARRA_SELECCION, \
AGRUPAR_POR_ALIANZA
def solo_votando(func):
def _inner(self, *args, **kwargs):
if self.parent.estado == E_VOTANDO:
return func(self, *args, **kwargs)
else:
self.parent.rampa.maestro()
return _inner
class Actions(BaseActionController):
def document_ready(self, data):
self.controller.parent.rampa.expulsar_boleta()
self.controller.mostrar_loader()
def cargar_cache(self, data):
self.controller._precache_categorias()
self.controller._precache_generacion_img()
self.controller.ocultar_loader()
def inicializar_interfaz(self, data):
parent = self.controller.parent
parent.pantalla_insercion()
def cargar_categorias(self, data):
self.controller.cargar_categorias(force=True, force_cat=data[0])
def get_pantalla_voto(self, data):
self.controller.parent.get_pantalla_inicial_voto()
def get_partidos(self, data):
self.controller.set_pantalla_partidos()
def reiniciar_seleccion(self, data):
self.reiniciar_seleccion()
def prepara_impresion(self, data):
self.controller.prepara_impresion()
def imagen_consulta(self, data):
self.controller.imagen_consulta()
def dialogo(self, data):
self.controller.procesar_dialogo(data)
def log(self, data):
self.sesion.logger.debug("LOG >>> %s" % data)
class ControllerVoto(WebContainerController):
"""Controller para la interfaz web de voto."""
def __init__(self, parent):
super(ControllerVoto, self).__init__()
self.sesion = get_sesion()
self.parent = parent
self.agrupador = Alianza if AGRUPAR_POR_ALIANZA else Partido
self._cache_categorias = {}
self.agrupacion = None
self.precache_data()
self.add_processor("voto", Actions(self))
def mostrar_loader(self):
self.send_constants()
self.send_command("mostrar_loader")
def ocultar_loader(self):
self.send_command("ocultar_loader")
def precache_data(self):
classes = (Candidato, Categoria, Partido, Alianza, Lista,
TemplateImpresion, TemplateMap)
for class_ in classes:
class_.all()
@solo_votando
def confirmar_seleccion(self, data):
self.parent._guardar_voto()
self.reiniciar_seleccion()
def _get_categorias(self, consulta_popular=False, todas=False):
"""Devuelve las categorias para esta mesa y para esta partido en caso
de que sea una interna no PASO."""
if not get_tipo_elec("interna"):
filter = {'sorted': "posicion",
'consulta_popular': consulta_popular}
if not todas:
filter['adhiere'] = None
categorias = Categoria.many(**filter)
else:
candidatos = Candidato.many(cod_partido=self.agrupacion.codigo)
cod_cats = set([candidato.categoria.codigo for candidato in
candidatos])
filter = {'sorted': "posicion",
'consulta_popular': consulta_popular,
'codigo__in': cod_cats}
if not todas:
filter['adhiere'] = None
categorias = Categoria.all(**filter)
return categorias
def get_data_categorias(self, consulta_popular=False, todas=False):
"""Devuelve la informacion de las categorias y los candidatos elegidos
para cada una de ellas en caso de que los haya."""
categorias = self._get_categorias(consulta_popular, todas)
cat_list = []
for categoria in categorias:
candidatos = self.parent.seleccion.candidato_categoria(
categoria.codigo)
categoria_dict = categoria.to_dict()
# si la categoria tiene algun candidato seleccionado
if candidatos is not None:
for candidato in candidatos:
# vamos a devolver una categoria por candidato, despues la
# parte del JS lo sabe manejar bien.
candidato_dict = candidato.full_dict(_image_name)
cat_dict = {'categoria': categoria_dict,
'candidato': candidato_dict}
cat_list.append(cat_dict)
else:
cat_dict = {'categoria': categoria_dict,
'candidato': None}
cat_list.append(cat_dict)
return cat_list
@solo_votando
def cargar_categorias(self, force=False, force_cat=None):
"""Envia el comando de cargar categorias o el comando de mostrar
confirmacion dependiendo del contexto.
En caso de que haya una categoria que no se voto o se este modificando
una categoria envia el comando "cargar_categorias" en caso de que las
categorias ya esten llenas envia el comando de mostrar confirmacion.
"""
cat_list = self.get_data_categorias()
next_cat = self.get_next_cat()
run_command = True
if next_cat is None:
# este es el caso en que no tenemos ninguna categoria sin candidatos
# seleccionados.
if force:
if force_cat is None:
# si ya esta todo lleno y no forzamos ninguna categoria
# vamos a la primera
next_cat = self._get_categorias()[0].codigo
else:
# este es el caso en el que forzamos una categoria
categoria = Categoria.one(force_cat)
if not categoria.consulta_popular:
# tenemos que ver si la categoria que forzamos esta
# adherida a otra categoria y cambiar por la madre en
# caso de que sea asi.
madre = categoria.get_madre()
if madre is not None:
next_cat = madre.codigo
else:
next_cat = force_cat
else:
# si la categoria que forzamos es una consulta_popular
# vamos a levantara en modo consulta_popular
self.mostrar_consulta_popular(force_cat)
run_command = False
else:
# si no hay siguiente categoria quiere decir que tenemos que
# llamar a la confirmacion
self.mostrar_confirmacion()
run_command = False
if run_command:
# solo va a entrar aca el caso de haber una proxima categoria que no
# sea consulta popular
self.send_command("cargar_categorias", [cat_list, next_cat])
@solo_votando
def get_next_cat(self, consulta_popular=False):
"""Devuelve el codigo de la proxima categoria sin votos."""
ret = None
categorias = self._get_categorias(consulta_popular)
for categoria in categorias:
candidato = self.parent.seleccion.candidato_categoria(
categoria.codigo)
if candidato is None:
ret = categoria.codigo
break
return ret
def _precache_categorias(self):
sleep(0.1)
for categoria in Categoria.all():
self._get_candidatos_categoria(categoria.codigo, None)
def _precache_generacion_img(self):
#Imagen dummy para importar lo relacionado a generar imagenes
test_seleccion = Seleccion(self.sesion.mesa)
test_seleccion.rellenar_de_blanco()
test_seleccion.a_imagen(svg=True)
del test_seleccion
def _get_candidatos_categoria(self, cod_categoria, cod_partido):
key = (cod_categoria, cod_partido)
if key in self._cache_categorias:
cand_list = self._cache_categorias[key]
else:
categoria = Categoria.one(cod_categoria)
candidatos = categoria.candidatos(cod_partido, self.agrupador)
cand_list = [candidato.full_dict(_image_name) for candidato in
candidatos]
self._cache_categorias[key] = cand_list
return cand_list
@solo_votando
def cargar_candidatos(self, cod_categoria, cod_partido=None):
""""Envia los candidatos a la interfaz web."""
if self.agrupacion is not None and cod_partido is None:
cod_partido = self.agrupacion.codigo
cand_list = self._get_candidatos_categoria(cod_categoria, cod_partido)
if MEZCLAR_CANDIDATOS:
shuffle(cand_list)
# si es una PASO y hay mas listas de las permitidas agrupamos por
# Partido o Alianza segun sea el caso
if cod_partido is None and get_tipo_elec("paso") and len(cand_list) > \
get_tipo_elec("colapsar_listas"):
partidos = self._get_partidos()
self.send_command("cargar_partido_categorias",
{'candidatos': cand_list,
'cod_categoria': cod_categoria,
'partidos': partidos,
'agrupador': self.agrupador.__name__.lower()})
else:
# En caso de que haya un solo candidato lo seleccionamos y pasamos
# a la proxima categoria, esto es porque puede pasar que la
# organizacion politica tenga una sola lista
if len(cand_list) == 1:
self.seleccionar_candidatos([cod_categoria,
[cand_list[0]['codigo']]])
else:
self.send_command("cargar_candidatos",
{'candidatos': cand_list,
'cod_categoria': cod_categoria})
@solo_votando
def mostrar_consulta_popular(self, cod_categoria):
candidatos = Candidato.principales(cod_categoria)
candidatos_dict = candidatos.full_dict(_image_name)
if MEZCLAR_CONSULTA:
shuffle(candidatos_dict)
self.send_command("cargar_consulta_popular", [candidatos_dict,
cod_categoria])
@solo_votando
def mostrar_confirmacion(self):
"""Envia el comando para mostrar confirmacion. En caso de haber
Consultas Populares disponibles en las que no hayamos votadonos la
va a mostrar.
"""
next_cat_consulta = None
consultas = self.get_data_categorias(consulta_popular=True)
if len(consultas):
next_cat_consulta = self.get_next_cat(consulta_popular=True)
if next_cat_consulta is not None:
self.mostrar_consulta_popular(next_cat_consulta)
else:
cat_list = self.get_data_categorias(todas=True)
cat_list += consultas
self.send_command("mostrar_confirmacion", cat_list)
def previsualizar_voto(self, data):
imagen = self.parent.seleccion.a_imagen(verificador=False,
solo_mostrar=True, svg=True)
image_data = quote(imagen.encode("utf-8"))
self.send_command("mostrar_voto", image_data)
@solo_votando
def prepara_impresion(self):
self.parent.registrador._prepara_impresion(self.parent.seleccion)
@solo_votando
def seleccionar_candidatos(self, data):
"""Selecciona el candidato y envia el comando para cargar las
categorias.
"""
cod_categoria, cod_candidatos = data
muchos_candidatos = len(cod_candidatos) > 1
if muchos_candidatos:
self.parent.seleccion.borrar_categoria(cod_categoria)
for elem in data[1]:
candidato = Candidato.one(elem)
self.parent.seleccion.elegir_candidato(candidato,
not muchos_candidatos)
categoria = Categoria.one(cod_categoria)
if categoria.consulta_popular:
self.mostrar_confirmacion()
else:
self.cargar_categorias()
@solo_votando
def seleccionar_partido(self, data):
"""Selecciona el partido y envia el comando para ver la pantalla de
modos.
"""
if data[1] is None:
self.agrupacion = self.agrupador.one(data[0])
if get_tipo_elec("paso"):
if data[1] is None:
self.cargar_listas()
else:
self.cargar_candidatos(data[1], data[0])
else:
self.get_pantalla_modos()
@solo_votando
def seleccionar_lista(self, data):
"""Selecciona la lista y envia el comando para ver la pantalla de
confirmacion.
"""
cod_lista, categoria_adhesion, cod_candidatos, es_ultima = data
if es_ultima or cod_lista == COD_LISTA_BLANCO:
lista = Lista.one(cod_lista)
for candidato in lista.candidatos:
if not candidato.categoria.consulta_popular:
self.parent.seleccion.elegir_candidato(candidato)
self.parent.seleccion.rellenar_de_blanco()
categorias = self.get_data_categorias()
self.send_command("actualizar_categorias", categorias)
self.mostrar_confirmacion()
else:
if cod_candidatos is None:
cod_candidatos = []
self.cargar_listas(cod_candidatos + [cod_lista],
categoria_adhesion)
@solo_votando
def seleccionar_modo(self, modo):
"""Envia el comando de cargar categorias o el de cargar listas
dependiendo del modo de votacion elegido.
"""
self.reiniciar_seleccion()
if get_tipo_elec("paso"):
self.agrupacion = None
if modo == BOTON_VOTAR_POR_CATEGORIAS:
self.cargar_categorias()
elif modo == BOTON_LISTA_COMPLETA:
if get_tipo_elec("paso"):
self.set_pantalla_partidos()
else:
self.cargar_listas()
@solo_votando
def seleccionar_idioma(self, idioma):
cambiar_locale(idioma)
self.send_constants()
if get_tipo_elec("interna"):
self.set_pantalla_partidos()
else:
self.get_pantalla_modos()
def _contiene(self, hash_lista, candidatos):
contiene = True
for i in range(len(candidatos)):
if candidatos[i] != hash_lista[i]:
contiene = False
break
return contiene
def _matchea_adhesiones(self, cod_candidatos, listas, cat):
lis_con_cand = []
for lista in listas:
if self._contiene(lista['hash'], cod_candidatos):
lis_con_cand.append(lista)
return lis_con_cand
def _cat_con_adh(self, listas, cat, search_cat, cod_candidatos,
repite=False):
if cat is not None and len(cod_candidatos):
lis_con_cand = self._matchea_adhesiones(cod_candidatos, listas,
cat)
listas = lis_con_cand
ids_lista = set()
listas_filtradas = []
for lista in listas:
for candidato in lista['candidatos']:
if candidato['cod_categoria'] == search_cat.codigo:
if candidato['codigo'] in ids_lista:
repite = True
else:
ids_lista.add(candidato['codigo'])
listas_filtradas.append(candidato)
next_search_cat = search_cat.next(consulta_popular=False)
if (len(listas_filtradas) == len(listas) or
len(listas_filtradas) < 2 or
get_tipo_elec("adh_segmentada_nivel") <= len(cod_candidatos)) and \
next_search_cat is not None:
repite, listas_filtradas, next_search_cat = \
self._cat_con_adh(listas, cat, next_search_cat, cod_candidatos,
repite)
return repite, listas_filtradas, next_search_cat
@solo_votando
def cargar_listas(self, cod_candidatos=None, cat=None):
"""Envia el comando para cargar las listas."""
if cod_candidatos is None:
cod_candidatos = []
cod_partido = self.agrupacion.codigo if self.agrupacion is not None \
else None
if cod_partido is None:
listas = Lista.all()
else:
listas = self.agrupador.one(cod_partido).listas
listas_dict = []
if MEZCLAR_LISTAS:
listas.shuffle()
for lista in listas:
candidatos = lista.candidatos
if len(candidatos):
hash_lista = [candidato.codigo for candidato in candidatos]
lista_dict = lista.to_dict()
lista_dict['hash'] = hash_lista
lista_dict['imagen'] = _image_name(lista.codigo)
lista_dict['candidatos'] = []
for candidato in candidatos:
candidato_dict = candidato.full_dict(_image_name)
candidato_dict['hash_lista'] = hash_lista
candidato_dict['categoria'] = candidato.categoria.nombre
lista_dict['candidatos'].append(candidato_dict)
listas_dict.append(lista_dict)
if get_tipo_elec("adh_segmentada"):
if cat is None:
search_cat = Categoria.one(sorted="posicion")
else:
cat = Categoria.one(cat)
search_cat = cat.next(consulta_popular=False)
repite, listas_filtradas, next_search_cat = \
self._cat_con_adh(listas_dict, cat, search_cat, cod_candidatos)
if repite and next_search_cat is not None:
listas_dict = listas_filtradas
ultima_cat = False
else:
ultima_cat = True
listas_finales = []
for lista in listas_dict:
for candidato in listas_filtradas:
if lista['hash'] == candidato['hash_lista']:
listas_finales.append(lista)
listas_dict = listas_finales
if len(listas_dict) > 1:
if get_tipo_elec("adh_segmentada") and repite and \
get_tipo_elec("adh_segmentada_nivel") > len(cod_candidatos):
self.send_command("cargar_adhesiones",
[listas_dict, search_cat.codigo,
cod_candidatos, ultima_cat])
else:
self.send_command("cargar_listas_params", [listas_dict, None,
cod_candidatos])
else:
self.seleccionar_lista([listas_dict[0]['codigo'], None, None,
True])
def consulta(self, seleccion_tag):
try:
self._datos_verificacion = seleccion_tag
imagen = self._datos_verificacion.a_imagen(verificador=False,
solo_mostrar=True,
svg=True)
self._imagen_verificacion = imagen
self.set_screen(PANTALLA_CONSULTA)
except AttributeError:
self.parent.rampa.expulsar_boleta()
def imagen_consulta(self):
# Sr. desarrollador, resista la tentacion de mandar base64 encoded
# SVG es mas rapido
image_data = quote(self._imagen_verificacion.encode("utf-8"))
self.send_command("imagen_consulta", image_data)
self._imagen_verificacion = None
self._datos_verificacion = None
@solo_votando
def get_candidatos(self, data):
"""Devuelve los candidatos para la proxima categoria vacia. En caso de
estar llenos los candidatos muestra la confimacion.
"""
cod_categoria, revisando, partido = data
if not revisando:
if cod_categoria is None:
cod_categoria = self.get_next_cat()
if cod_categoria is None:
self.mostrar_confirmacion()
else:
self.cargar_candidatos(cod_categoria, partido)
else:
if cod_categoria is None:
cod_categoria = self._get_categorias()[0].codigo
self.cargar_candidatos(cod_categoria, partido)
def _get_partidos(self):
"""Devuelve las partidos."""
partidos = [agr.full_dict(_image_name) for agr in self.agrupador.all()
if not agr.es_blanco()]
if MEZCLAR_INTERNAS:
shuffle(partidos)
return partidos
@solo_votando
def get_pantalla_modos(self):
"""Devuelve la pantalla de modos. En caso de que las listas sean para
una sola categoria se saltea la pantalla de modos y directo va a
lista completa.
"""
if len(BOTONES_SELECCION_MODO) > 1 and len(self._get_categorias()) > 1:
self.send_command("pantalla_modos", BOTONES_SELECCION_MODO)
elif len(BOTONES_SELECCION_MODO) == 1 and \
BOTONES_SELECCION_MODO[0] == BOTON_LISTA_COMPLETA:
self.send_command("guardar_modo", BOTON_LISTA_COMPLETA)
self.send_command("set_unico_modo", True)
self.seleccionar_modo(BOTON_LISTA_COMPLETA)
else:
self.send_command("guardar_modo", BOTON_VOTAR_POR_CATEGORIAS)
self.send_command("set_unico_modo", True)
self.seleccionar_modo(BOTON_VOTAR_POR_CATEGORIAS)
@solo_votando
def set_pantalla_partidos(self):
"""Envia el comando para mostrar los botones para seleccionar la
partido.
"""
partidos = self._get_partidos()
listas = Lista.all()
if not get_tipo_elec("interna") and len(partidos) == len(listas):
self.cargar_listas()
else:
self.send_command("seleccion_partido", partidos)
@solo_votando
def set_pantalla_idiomas(self):
self.send_command("pantalla_idiomas", IDIOMAS_DISPONIBLES)
def procesar_dialogo(self, respuesta):
if(respuesta):
if self.callback_aceptar is not None:
self.callback_aceptar()
else:
if self.callback_cancelar is not None:
self.callback_cancelar()
def show_dialogo(self, mensaje, callback_cancelar=None,
callback_aceptar=None, btn_cancelar=False,
btn_aceptar=False):
self.callback_aceptar = callback_aceptar
self.callback_cancelar = callback_cancelar
dialogo = {"mensaje": mensaje,
"btn_aceptar": btn_aceptar,
"btn_cancelar": btn_cancelar}
self.send_command("show_dialogo", dialogo)
def hide_dialogo(self):
self.send_command("hide_dialogo")
def reiniciar_seleccion(self):
"""Resetea la seleccion. Elimina lo que el usuario eligió."""
self.parent.seleccion = Seleccion(self.sesion.mesa)
def send_constants(self):
"""Envia todas las constantes de la eleccion."""
constants_dict = get_constants(self.sesion.mesa.codigo)
self.send_command("set_constants", constants_dict)
def get_constants(ubicacion=None):
translations = (
"conformar_voto", "si",
"votar_por_categorias", "votar_lista_completa", "su_seleccion",
"votar_en_blanco", "confirmar_voto", "alto_contraste",
"introduzca_boleta", "si_tiene_dudas", "su_voto_impreso", "no",
"muchas_gracias", "puede_retirar_boleta", "si_desea_verificarlo",
"imprimiendo_voto", "no_retirar_boleta", "agradecimiento",
"este_es_su_voto", "volver_al_inicio", "aguarde_unos_minutos",
"seleccionar_idioma", "aceptar", "cancelar", "confirmar_seleccion",
"cargando_interfaz", "espere_por_favor", "verificando_voto")
encabezado = get_config('datos_eleccion')
constants_dict = {
"juego_de_datos": JUEGO_DE_DATOS,
"ubicacion": ubicacion,
"cod_lista_blanco": COD_LISTA_BLANCO,
"elecciones_internas": get_tipo_elec("interna"),
"elecciones_paso": get_tipo_elec("paso"),
"agrupar_por_partido": AGRUPAR_POR_PARTIDO,
"mostrar_cursor": MOSTRAR_CURSOR,
"encabezado": [(texto, encabezado[texto]) for texto in encabezado],
"i18n": [(trans, _(trans)) for trans in translations],
"palabra_lista": _("lista"),
"sus_candidatos": _("sus_candidatos"),
"candidato_no_seleccionado": _("candidato_no_seleccionado"),
"usa_armve": USA_ARMVE,
"ext_img_voto": EXT_IMG_VOTO,
"effects": EFECTOS_VOTO,
"flavor": FLAVOR,
"templates": get_templates(),
"numeros_templates": NUMEROS_TEMPLATES[FLAVOR],
"PATH_TEMPLATES_VOTO": "file:///%s/" % PATH_TEMPLATES_VOTO,
"ADHESION_SEGMENTADA": get_tipo_elec("adh_segmentada"),
"USAR_BUFFER_IMPRESION": USAR_BUFFER_IMPRESION,
"COLAPSAR_LISTAS_PASO": get_tipo_elec("colapsar_listas"),
"COLAPSAR_INTERNAS_PASO": get_tipo_elec("colapsar_partidos"),
"BARRA_SELECCION": BARRA_SELECCION,
"asistida": False,
}
return constants_dict
def get_templates():
templates = {}
template_names = ("candidato", "candidato_confirmacion", "categoria",
"lista", "partido")
for template in template_names:
file_name = "%s.html" % template
template_file = os.path.join("flavors", FLAVOR, file_name)
templates[template] = template_file
return templates
|
d0177891c27bae512256e1a1211a2a8d69e9e176
|
27b86f422246a78704e0e84983b2630533a47db6
|
/src/ezdxf/explode.py
|
0e57542d71da832527bd6489882ea42ed13ce4a7
|
[
"MIT"
] |
permissive
|
mozman/ezdxf
|
7512decd600896960660f0f580cab815bf0d7a51
|
ba6ab0264dcb6833173042a37b1b5ae878d75113
|
refs/heads/master
| 2023-09-01T11:55:13.462105
| 2023-08-15T11:50:05
| 2023-08-15T12:00:04
| 79,697,117
| 750
| 194
|
MIT
| 2023-09-14T09:40:41
| 2017-01-22T05:55:55
|
Python
|
UTF-8
|
Python
| false
| false
| 13,554
|
py
|
explode.py
|
# Copyright (c) 2020-2022, Manfred Moitzi
# License: MIT License
from __future__ import annotations
import logging
from typing import (
TYPE_CHECKING,
Iterable,
Callable,
Optional,
cast,
)
from ezdxf.lldxf import const
from ezdxf.entities import factory
from ezdxf.entities.boundary_paths import (
PolylinePath,
EdgePath,
LineEdge,
ArcEdge,
EllipseEdge,
SplineEdge,
)
from ezdxf.math import OCS, Vec3, ABS_TOL
from ezdxf.math.transformtools import (
NonUniformScalingError,
InsertTransformationError,
)
from ezdxf.query import EntityQuery
if TYPE_CHECKING:
from ezdxf.entities import (
DXFGraphic,
Insert,
Attrib,
Text,
LWPolyline,
)
from ezdxf.entities.polygon import DXFPolygon
from ezdxf.layouts import BaseLayout
logger = logging.getLogger("ezdxf")
__all__ = [
"virtual_block_reference_entities",
"virtual_boundary_path_entities",
"explode_block_reference",
"explode_entity",
"attrib_to_text",
]
def default_logging_callback(entity, reason):
logger.debug(
f'(Virtual Block Reference Entities) Ignoring {str(entity)}: "{reason}"'
)
def explode_block_reference(
block_ref: Insert,
target_layout: BaseLayout,
*,
redraw_order=False,
) -> EntityQuery:
"""Explode a block reference into DXF primitives.
Transforms the block entities into the required WCS location by applying the
block reference attributes `insert`, `extrusion`, `rotation` and the scaling
values `xscale`, `yscale` and `zscale`.
Returns an EntityQuery() container with all exploded DXF entities.
Attached ATTRIB entities are converted to TEXT entities, this is the
behavior of the BURST command of the AutoCAD Express Tools.
Args:
block_ref: Block reference entity (INSERT)
target_layout: explicit target layout for exploded DXF entities
redraw_order: create entities in ascending redraw order if ``True``
.. warning::
**Non uniform scaling** may lead to incorrect results for text entities
(TEXT, MTEXT, ATTRIB) and maybe some other entities.
(internal API)
"""
if target_layout is None:
raise const.DXFStructureError("Target layout is None.")
if block_ref.doc is None:
raise const.DXFStructureError(
"Block reference has to be assigned to a DXF document."
)
def _explode_single_block_ref(block_ref):
for entity in virtual_block_reference_entities(
block_ref, redraw_order=redraw_order
):
dxftype = entity.dxftype()
target_layout.add_entity(entity)
if dxftype == "DIMENSION":
# Render a graphical representation for each exploded DIMENSION
# entity as anonymous block.
cast("Dimension", entity).render()
entities.append(entity)
# Convert attached ATTRIB entities to TEXT entities:
# This is the behavior of the BURST command of the AutoCAD Express Tools
for attrib in block_ref.attribs:
# Attached ATTRIB entities are already located in the WCS
text = attrib_to_text(attrib)
target_layout.add_entity(text)
entities.append(text)
entitydb = block_ref.doc.entitydb
assert (
entitydb is not None
), "Exploding a block reference requires an entity database."
entities: list[DXFGraphic] = []
if block_ref.mcount > 1:
for virtual_insert in block_ref.multi_insert():
_explode_single_block_ref(virtual_insert)
else:
_explode_single_block_ref(block_ref)
source_layout = block_ref.get_layout()
if source_layout is not None:
# Remove and destroy exploded INSERT if assigned to a layout
source_layout.delete_entity(block_ref)
else:
entitydb.delete_entity(block_ref)
return EntityQuery(entities)
IGNORE_FROM_ATTRIB = {
"version",
"prompt",
"tag",
"flags",
"field_length",
"lock_position",
"attribute_type",
}
def attrib_to_text(attrib: Attrib) -> Text:
dxfattribs = attrib.dxfattribs(drop=IGNORE_FROM_ATTRIB)
# ATTRIB has same owner as INSERT but does not reside in any EntitySpace()
# and must not deleted from any layout.
# New TEXT entity has same handle as the replaced ATTRIB entity and replaces
# the ATTRIB entity in the database.
text = factory.new("TEXT", dxfattribs=dxfattribs)
if attrib.doc:
factory.bind(text, attrib.doc)
return cast("Text", text)
def virtual_block_reference_entities(
block_ref: Insert,
*,
skipped_entity_callback: Optional[Callable[[DXFGraphic, str], None]] = None,
redraw_order=False,
) -> Iterable[DXFGraphic]:
"""Yields 'virtual' parts of block reference `block_ref`. This method is meant
to examine the block reference entities without the need to explode the
block reference. The `skipped_entity_callback()` will be called for all
entities which are not processed, signature:
:code:`skipped_entity_callback(entity: DXFGraphic, reason: str)`,
`entity` is the original (untransformed) DXF entity of the block definition,
the `reason` string is an explanation why the entity was skipped.
These entities are located at the 'exploded' positions, but are not stored in
the entity database, have no handle and are not assigned to any layout.
Args:
block_ref: Block reference entity (INSERT)
skipped_entity_callback: called whenever the transformation of an entity
is not supported and so was skipped.
redraw_order: yield entities in ascending redraw order if ``True``
.. warning::
**Non uniform scaling** may lead to incorrect results for text entities
(TEXT, MTEXT, ATTRIB) and maybe some other entities.
(internal API)
"""
assert block_ref.dxftype() == "INSERT"
from ezdxf.entities import Ellipse
skipped_entity_callback = (
skipped_entity_callback or default_logging_callback
)
def disassemble(layout) -> Iterable[DXFGraphic]:
for entity in (
layout.entities_in_redraw_order() if redraw_order else layout
):
# Do not explode ATTDEF entities. Already available in Insert.attribs
if entity.dxftype() == "ATTDEF":
continue
try:
copy = entity.copy()
except const.DXFTypeError:
if hasattr(entity, "virtual_entities"):
yield from entity.virtual_entities()
else:
skipped_entity_callback(entity, "non copyable") # type: ignore
else:
if hasattr(copy, "remove_association"):
copy.remove_association()
yield copy
def transform(entities):
for entity in entities:
try:
entity.transform(m)
except NotImplementedError:
skipped_entity_callback(entity, "non transformable")
except NonUniformScalingError:
dxftype = entity.dxftype()
if dxftype in {"ARC", "CIRCLE"}:
if abs(entity.dxf.radius) > ABS_TOL:
yield Ellipse.from_arc(entity).transform(m)
else:
skipped_entity_callback(
entity, f"Invalid radius in entity {str(entity)}."
)
elif dxftype in {"LWPOLYLINE", "POLYLINE"}: # has arcs
yield from transform(entity.virtual_entities())
else:
skipped_entity_callback(
entity, "unsupported non-uniform scaling"
)
except InsertTransformationError:
# INSERT entity can not be represented in the target coordinate
# system defined by transformation matrix `m`.
# Yield transformed sub-entities of the INSERT entity:
yield from transform(
virtual_block_reference_entities(
entity, skipped_entity_callback=skipped_entity_callback
)
)
else:
yield entity
m = block_ref.matrix44()
block_layout = block_ref.block()
if block_layout is None:
raise const.DXFStructureError(
f'Required block definition for "{block_ref.dxf.name}" does not exist.'
)
yield from transform(disassemble(block_layout))
EXCLUDE_FROM_EXPLODE = {"POINT"}
def explode_entity(
entity: DXFGraphic, target_layout: Optional[BaseLayout] = None
) -> EntityQuery:
"""Explode parts of an entity as primitives into target layout, if target
layout is ``None``, the target layout is the layout of the source entity.
Returns an :class:`~ezdxf.query.EntityQuery` container with all DXF parts.
Args:
entity: DXF entity to explode, has to have a :meth:`virtual_entities()`
method
target_layout: target layout for DXF parts, ``None`` for same layout as
source entity
(internal API)
"""
dxftype = entity.dxftype()
virtual_entities = getattr(entity, "virtual_entities")
if virtual_entities is None or dxftype in EXCLUDE_FROM_EXPLODE:
raise const.DXFTypeError(f"Can not explode entity {dxftype}.")
if entity.doc is None:
raise const.DXFStructureError(
f"{dxftype} has to be assigned to a DXF document."
)
entitydb = entity.doc.entitydb
if entitydb is None:
raise const.DXFStructureError(
f"Exploding {dxftype} requires an entity database."
)
if target_layout is None:
target_layout = entity.get_layout()
if target_layout is None:
raise const.DXFStructureError(
f"{dxftype} without layout assignment, specify target layout."
)
entities = []
for e in virtual_entities():
target_layout.add_entity(e)
entities.append(e)
source_layout = entity.get_layout()
if source_layout is not None:
source_layout.delete_entity(entity)
else:
entitydb.delete_entity(entity)
return EntityQuery(entities)
def virtual_boundary_path_entities(
polygon: DXFPolygon,
) -> list[list[DXFGraphic]]:
from ezdxf.entities import LWPolyline
def polyline():
p = LWPolyline.new(dxfattribs=dict(graphic_attribs))
p.append_formatted_vertices(path.vertices, format="xyb")
p.dxf.extrusion = ocs.uz
p.dxf.elevation = elevation
p.closed = path.is_closed
return p
graphic_attribs = polygon.graphic_properties()
elevation = float(polygon.dxf.elevation.z)
ocs = polygon.ocs()
entities = []
for path in polygon.paths:
if isinstance(path, PolylinePath):
entities.append([polyline()])
elif isinstance(path, EdgePath):
entities.append(
_virtual_edge_path(path, dict(graphic_attribs), ocs, elevation)
)
return entities
def _virtual_edge_path(
path: EdgePath, dxfattribs, ocs: OCS, elevation: float
) -> list[DXFGraphic]:
from ezdxf.entities import Line, Arc, Ellipse, Spline
def pnt_to_wcs(v):
return ocs.to_wcs(Vec3(v).replace(z=elevation))
def dir_to_wcs(v):
return ocs.to_wcs(v)
edges: list[DXFGraphic] = []
for edge in path.edges:
attribs = dict(dxfattribs)
if isinstance(edge, LineEdge):
attribs["start"] = pnt_to_wcs(edge.start)
attribs["end"] = pnt_to_wcs(edge.end)
edges.append(Line.new(dxfattribs=attribs))
elif isinstance(edge, ArcEdge):
attribs["center"] = edge.center
attribs["radius"] = edge.radius
attribs["elevation"] = elevation
# Arcs angles are always stored in counter-clockwise orientation
# around the extrusion vector!
attribs["start_angle"] = edge.start_angle
attribs["end_angle"] = edge.end_angle
attribs["extrusion"] = ocs.uz
edges.append(Arc.new(dxfattribs=attribs))
elif isinstance(edge, EllipseEdge):
attribs["center"] = pnt_to_wcs(edge.center)
attribs["major_axis"] = dir_to_wcs(edge.major_axis)
attribs["ratio"] = edge.ratio
# Ellipse angles are always stored in counter-clockwise orientation
# around the extrusion vector!
attribs["start_param"] = edge.start_param
attribs["end_param"] = edge.end_param
attribs["extrusion"] = ocs.uz
edges.append(Ellipse.new(dxfattribs=attribs))
elif isinstance(edge, SplineEdge):
spline = Spline.new(dxfattribs=attribs)
spline.dxf.degree = edge.degree
spline.knots = edge.knot_values
spline.control_points = [pnt_to_wcs(v) for v in edge.control_points]
if edge.weights:
spline.weights = edge.weights
if edge.fit_points:
spline.fit_points = [pnt_to_wcs(v) for v in edge.fit_points]
if edge.start_tangent is not None:
spline.dxf.start_tangent = dir_to_wcs(edge.start_tangent)
if edge.end_tangent is not None:
spline.dxf.end_tangent = dir_to_wcs(edge.end_tangent)
edges.append(spline)
return edges
|
67b23a234e5c270b539a1870cfe26ecdd058396f
|
e78c8e9aa0468e5d0cd8f33fbc2f09f7a580feeb
|
/tasks/multi_length_sequences.py
|
12b54e6b64bdf30fa3e84acced6d03260ef79d61
|
[
"MIT"
] |
permissive
|
philipperemy/keras-tcn
|
213b3219d765490a141bb490cc671b06fcf045bf
|
f612f6e4a4320d168655c5c6c437d666aa4695a1
|
refs/heads/master
| 2023-09-03T10:09:28.794148
| 2023-08-08T04:48:33
| 2023-08-08T04:48:33
| 126,269,318
| 1,792
| 465
|
MIT
| 2023-08-07T08:32:29
| 2018-03-22T02:40:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,416
|
py
|
multi_length_sequences.py
|
import numpy as np
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tcn import TCN
# if you increase the sequence length make sure the receptive field of the TCN is big enough.
MAX_TIME_STEP = 30
"""
Input: sequence of length 7
Input: sequence of length 25
Input: sequence of length 29
Input: sequence of length 21
Input: sequence of length 20
Input: sequence of length 13
Input: sequence of length 9
Input: sequence of length 7
Input: sequence of length 4
Input: sequence of length 14
Input: sequence of length 10
Input: sequence of length 11
...
"""
def get_x_y(max_time_steps):
for k in range(int(1e9)):
time_steps = np.random.choice(range(1, max_time_steps), size=1)[0]
if k % 2 == 0:
x_train = np.expand_dims([np.insert(np.zeros(shape=(time_steps, 1)), 0, 1)], axis=-1)
y_train = [1]
else:
x_train = np.array([np.zeros(shape=(time_steps, 1))])
y_train = [0]
if k % 100 == 0:
print(f'({k}) Input: sequence of length {time_steps}.')
yield x_train, np.expand_dims(y_train, axis=-1)
m = Sequential([
TCN(input_shape=(None, 1)),
Dense(1, activation='sigmoid')
])
m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
gen = get_x_y(max_time_steps=MAX_TIME_STEP)
m.fit(gen, epochs=1, steps_per_epoch=1000, max_queue_size=1, verbose=2)
|
6312bef192c427cae15926d64697a5016f7852fd
|
7343ece3b82ac87a594865c4074623b45b0297b4
|
/synapse/storage/databases/main/transactions.py
|
efd21b5bfceb32ba182eb029c7ad0e13071abe34
|
[
"Apache-2.0"
] |
permissive
|
matrix-org/synapse
|
a00111f83310783b78e2996557f8bbae4d9fb229
|
d35bed8369514fe727b4fe1afb68f48cc8b2655a
|
refs/heads/develop
| 2023-09-05T05:24:20.808942
| 2023-09-04T16:14:09
| 2023-09-04T16:14:09
| 22,844,864
| 12,215
| 2,869
|
Apache-2.0
| 2023-09-14T15:20:48
| 2014-08-11T15:51:42
|
Python
|
UTF-8
|
Python
| false
| false
| 20,420
|
py
|
transactions.py
|
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from enum import Enum
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, cast
import attr
from canonicaljson import encode_canonical_json
from synapse.api.constants import Direction
from synapse.metrics.background_process_metrics import wrap_as_background_process
from synapse.storage._base import db_to_json
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
)
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
from synapse.types import JsonDict, StrCollection
from synapse.util.caches.descriptors import cached, cachedList
if TYPE_CHECKING:
from synapse.server import HomeServer
db_binary_type = memoryview
logger = logging.getLogger(__name__)
class DestinationSortOrder(Enum):
"""Enum to define the sorting method used when returning destinations."""
DESTINATION = "destination"
RETRY_LAST_TS = "retry_last_ts"
RETTRY_INTERVAL = "retry_interval"
FAILURE_TS = "failure_ts"
LAST_SUCCESSFUL_STREAM_ORDERING = "last_successful_stream_ordering"
@attr.s(slots=True, frozen=True, auto_attribs=True)
class DestinationRetryTimings:
"""The current destination retry timing info for a remote server."""
# The first time we tried and failed to reach the remote server, in ms.
failure_ts: int
# The last time we tried and failed to reach the remote server, in ms.
retry_last_ts: int
# How long since the last time we tried to reach the remote server before
# trying again, in ms.
retry_interval: int
class TransactionWorkerStore(CacheInvalidationWorkerStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
if hs.config.worker.run_background_tasks:
self._clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000)
@wrap_as_background_process("cleanup_transactions")
async def _cleanup_transactions(self) -> None:
now = self._clock.time_msec()
month_ago = now - 30 * 24 * 60 * 60 * 1000
def _cleanup_transactions_txn(txn: LoggingTransaction) -> None:
txn.execute("DELETE FROM received_transactions WHERE ts < ?", (month_ago,))
await self.db_pool.runInteraction(
"_cleanup_transactions", _cleanup_transactions_txn
)
async def get_received_txn_response(
self, transaction_id: str, origin: str
) -> Optional[Tuple[int, JsonDict]]:
"""For an incoming transaction from a given origin, check if we have
already responded to it. If so, return the response code and response
body (as a dict).
Args:
transaction_id
origin
Returns:
None if we have not previously responded to this transaction or a
2-tuple of (int, dict)
"""
return await self.db_pool.runInteraction(
"get_received_txn_response",
self._get_received_txn_response,
transaction_id,
origin,
)
def _get_received_txn_response(
self, txn: LoggingTransaction, transaction_id: str, origin: str
) -> Optional[Tuple[int, JsonDict]]:
result = self.db_pool.simple_select_one_txn(
txn,
table="received_transactions",
keyvalues={"transaction_id": transaction_id, "origin": origin},
retcols=(
"transaction_id",
"origin",
"ts",
"response_code",
"response_json",
"has_been_referenced",
),
allow_none=True,
)
if result and result["response_code"]:
return result["response_code"], db_to_json(result["response_json"])
else:
return None
async def set_received_txn_response(
self, transaction_id: str, origin: str, code: int, response_dict: JsonDict
) -> None:
"""Persist the response we returned for an incoming transaction, and
should return for subsequent transactions with the same transaction_id
and origin.
Args:
transaction_id: The incoming transaction ID.
origin: The origin server.
code: The response code.
response_dict: The response, to be encoded into JSON.
"""
await self.db_pool.simple_upsert(
table="received_transactions",
keyvalues={
"transaction_id": transaction_id,
"origin": origin,
},
values={},
insertion_values={
"response_code": code,
"response_json": db_binary_type(encode_canonical_json(response_dict)),
"ts": self._clock.time_msec(),
},
desc="set_received_txn_response",
)
@cached(max_entries=10000)
async def get_destination_retry_timings(
self,
destination: str,
) -> Optional[DestinationRetryTimings]:
"""Gets the current retry timings (if any) for a given destination.
Args:
destination (str)
Returns:
None if not retrying
Otherwise a dict for the retry scheme
"""
result = await self.db_pool.runInteraction(
"get_destination_retry_timings",
self._get_destination_retry_timings,
destination,
)
return result
def _get_destination_retry_timings(
self, txn: LoggingTransaction, destination: str
) -> Optional[DestinationRetryTimings]:
result = self.db_pool.simple_select_one_txn(
txn,
table="destinations",
keyvalues={"destination": destination},
retcols=("failure_ts", "retry_last_ts", "retry_interval"),
allow_none=True,
)
# check we have a row and retry_last_ts is not null or zero
# (retry_last_ts can't be negative)
if result and result["retry_last_ts"]:
return DestinationRetryTimings(**result)
else:
return None
@cachedList(
cached_method_name="get_destination_retry_timings", list_name="destinations"
)
async def get_destination_retry_timings_batch(
self, destinations: StrCollection
) -> Dict[str, Optional[DestinationRetryTimings]]:
rows = await self.db_pool.simple_select_many_batch(
table="destinations",
iterable=destinations,
column="destination",
retcols=("destination", "failure_ts", "retry_last_ts", "retry_interval"),
desc="get_destination_retry_timings_batch",
)
return {
row.pop("destination"): DestinationRetryTimings(**row)
for row in rows
if row["retry_last_ts"] and row["failure_ts"] and row["retry_interval"]
}
async def set_destination_retry_timings(
self,
destination: str,
failure_ts: Optional[int],
retry_last_ts: int,
retry_interval: int,
) -> None:
"""Sets the current retry timings for a given destination.
Both timings should be zero if retrying is no longer occurring.
Args:
destination
failure_ts: when the server started failing (ms since epoch)
retry_last_ts: time of last retry attempt in unix epoch ms
retry_interval: how long until next retry in ms
"""
await self.db_pool.runInteraction(
"set_destination_retry_timings",
self._set_destination_retry_timings_txn,
destination,
failure_ts,
retry_last_ts,
retry_interval,
db_autocommit=True, # Safe as it's a single upsert
)
def _set_destination_retry_timings_txn(
self,
txn: LoggingTransaction,
destination: str,
failure_ts: Optional[int],
retry_last_ts: int,
retry_interval: int,
) -> None:
# Upsert retry time interval if retry_interval is zero (i.e. we're
# resetting it) or greater than the existing retry interval.
#
# WARNING: This is executed in autocommit, so we shouldn't add any more
# SQL calls in here (without being very careful).
sql = """
INSERT INTO destinations (
destination, failure_ts, retry_last_ts, retry_interval
)
VALUES (?, ?, ?, ?)
ON CONFLICT (destination) DO UPDATE SET
failure_ts = EXCLUDED.failure_ts,
retry_last_ts = EXCLUDED.retry_last_ts,
retry_interval = EXCLUDED.retry_interval
WHERE
EXCLUDED.retry_interval = 0
OR EXCLUDED.retry_last_ts = 0
OR destinations.retry_interval IS NULL
OR destinations.retry_interval < EXCLUDED.retry_interval
OR destinations.retry_last_ts < EXCLUDED.retry_last_ts
"""
txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval))
self._invalidate_cache_and_stream(
txn, self.get_destination_retry_timings, (destination,)
)
async def store_destination_rooms_entries(
self,
destinations: Iterable[str],
room_id: str,
stream_ordering: int,
) -> None:
"""
Updates or creates `destination_rooms` entries in batch for a single event.
Args:
destinations: list of destinations
room_id: the room_id of the event
stream_ordering: the stream_ordering of the event
"""
await self.db_pool.simple_upsert_many(
table="destinations",
key_names=("destination",),
key_values=[(d,) for d in destinations],
value_names=[],
value_values=[],
desc="store_destination_rooms_entries_dests",
)
rows = [(destination, room_id) for destination in destinations]
await self.db_pool.simple_upsert_many(
table="destination_rooms",
key_names=("destination", "room_id"),
key_values=rows,
value_names=["stream_ordering"],
value_values=[(stream_ordering,)] * len(rows),
desc="store_destination_rooms_entries_rooms",
)
async def get_destination_last_successful_stream_ordering(
self, destination: str
) -> Optional[int]:
"""
Gets the stream ordering of the PDU most-recently successfully sent
to the specified destination, or None if this information has not been
tracked yet.
Args:
destination: the destination to query
"""
return await self.db_pool.simple_select_one_onecol(
"destinations",
{"destination": destination},
"last_successful_stream_ordering",
allow_none=True,
desc="get_last_successful_stream_ordering",
)
async def set_destination_last_successful_stream_ordering(
self, destination: str, last_successful_stream_ordering: int
) -> None:
"""
Marks that we have successfully sent the PDUs up to and including the
one specified.
Args:
destination: the destination we have successfully sent to
last_successful_stream_ordering: the stream_ordering of the most
recent successfully-sent PDU
"""
await self.db_pool.simple_upsert(
"destinations",
keyvalues={"destination": destination},
values={"last_successful_stream_ordering": last_successful_stream_ordering},
desc="set_last_successful_stream_ordering",
)
async def get_catch_up_room_event_ids(
self,
destination: str,
last_successful_stream_ordering: int,
) -> List[str]:
"""
Returns at most 50 event IDs and their corresponding stream_orderings
that correspond to the oldest events that have not yet been sent to
the destination.
Args:
destination: the destination in question
last_successful_stream_ordering: the stream_ordering of the
most-recently successfully-transmitted event to the destination
Returns:
list of event_ids
"""
return await self.db_pool.runInteraction(
"get_catch_up_room_event_ids",
self._get_catch_up_room_event_ids_txn,
destination,
last_successful_stream_ordering,
)
@staticmethod
def _get_catch_up_room_event_ids_txn(
txn: LoggingTransaction,
destination: str,
last_successful_stream_ordering: int,
) -> List[str]:
q = """
SELECT event_id FROM destination_rooms
JOIN events USING (stream_ordering)
WHERE destination = ?
AND stream_ordering > ?
ORDER BY stream_ordering
LIMIT 50
"""
txn.execute(
q,
(destination, last_successful_stream_ordering),
)
event_ids = [row[0] for row in txn]
return event_ids
async def get_catch_up_outstanding_destinations(
self, after_destination: Optional[str]
) -> List[str]:
"""
Gets at most 25 destinations which have outstanding PDUs to be caught up,
and are not being backed off from
Args:
after_destination:
If provided, all destinations must be lexicographically greater
than this one.
Returns:
list of up to 25 destinations with outstanding catch-up.
These are the lexicographically first destinations which are
lexicographically greater than after_destination (if provided).
"""
time = self.hs.get_clock().time_msec()
return await self.db_pool.runInteraction(
"get_catch_up_outstanding_destinations",
self._get_catch_up_outstanding_destinations_txn,
time,
after_destination,
)
@staticmethod
def _get_catch_up_outstanding_destinations_txn(
txn: LoggingTransaction, now_time_ms: int, after_destination: Optional[str]
) -> List[str]:
q = """
SELECT DISTINCT destination FROM destinations
INNER JOIN destination_rooms USING (destination)
WHERE
stream_ordering > last_successful_stream_ordering
AND destination > ?
AND (
retry_last_ts IS NULL OR
retry_last_ts + retry_interval < ?
)
ORDER BY destination
LIMIT 25
"""
txn.execute(
q,
(
# everything is lexicographically greater than "" so this gives
# us the first batch of up to 25.
after_destination or "",
now_time_ms,
),
)
destinations = [row[0] for row in txn]
return destinations
async def get_destinations_paginate(
self,
start: int,
limit: int,
destination: Optional[str] = None,
order_by: str = DestinationSortOrder.DESTINATION.value,
direction: Direction = Direction.FORWARDS,
) -> Tuple[List[JsonDict], int]:
"""Function to retrieve a paginated list of destinations.
This will return a json list of destinations and the
total number of destinations matching the filter criteria.
Args:
start: start number to begin the query from
limit: number of rows to retrieve
destination: search string in destination
order_by: the sort order of the returned list
direction: sort ascending or descending
Returns:
A tuple of a list of mappings from destination to information
and a count of total destinations.
"""
def get_destinations_paginate_txn(
txn: LoggingTransaction,
) -> Tuple[List[JsonDict], int]:
order_by_column = DestinationSortOrder(order_by).value
if direction == Direction.BACKWARDS:
order = "DESC"
else:
order = "ASC"
args: List[object] = []
where_statement = ""
if destination:
args.extend(["%" + destination.lower() + "%"])
where_statement = "WHERE LOWER(destination) LIKE ?"
sql_base = f"FROM destinations {where_statement} "
sql = f"SELECT COUNT(*) as total_destinations {sql_base}"
txn.execute(sql, args)
count = cast(Tuple[int], txn.fetchone())[0]
sql = f"""
SELECT destination, retry_last_ts, retry_interval, failure_ts,
last_successful_stream_ordering
{sql_base}
ORDER BY {order_by_column} {order}, destination ASC
LIMIT ? OFFSET ?
"""
txn.execute(sql, args + [limit, start])
destinations = self.db_pool.cursor_to_dict(txn)
return destinations, count
return await self.db_pool.runInteraction(
"get_destinations_paginate_txn", get_destinations_paginate_txn
)
async def get_destination_rooms_paginate(
self,
destination: str,
start: int,
limit: int,
direction: Direction = Direction.FORWARDS,
) -> Tuple[List[JsonDict], int]:
"""Function to retrieve a paginated list of destination's rooms.
This will return a json list of rooms and the
total number of rooms.
Args:
destination: the destination to query
start: start number to begin the query from
limit: number of rows to retrieve
direction: sort ascending or descending by room_id
Returns:
A tuple of a dict of rooms and a count of total rooms.
"""
def get_destination_rooms_paginate_txn(
txn: LoggingTransaction,
) -> Tuple[List[JsonDict], int]:
if direction == Direction.BACKWARDS:
order = "DESC"
else:
order = "ASC"
sql = """
SELECT COUNT(*) as total_rooms
FROM destination_rooms
WHERE destination = ?
"""
txn.execute(sql, [destination])
count = cast(Tuple[int], txn.fetchone())[0]
rooms = self.db_pool.simple_select_list_paginate_txn(
txn=txn,
table="destination_rooms",
orderby="room_id",
start=start,
limit=limit,
retcols=("room_id", "stream_ordering"),
order_direction=order,
)
return rooms, count
return await self.db_pool.runInteraction(
"get_destination_rooms_paginate_txn", get_destination_rooms_paginate_txn
)
async def is_destination_known(self, destination: str) -> bool:
"""Check if a destination is known to the server."""
result = await self.db_pool.simple_select_one_onecol(
table="destinations",
keyvalues={"destination": destination},
retcol="1",
allow_none=True,
desc="is_destination_known",
)
return bool(result)
|
083479bbc1849f0663934bbfe1e03af69dd0fb8c
|
6647c484a6601f70dd348076c484843807238ddf
|
/solutionbox/ml_workbench/test_tensorflow/test_training.py
|
976d4fae35fe1e8b1d5a53e54f541013a8a35a6a
|
[
"Apache-2.0"
] |
permissive
|
googledatalab/pydatalab
|
43624c271e25edfd97ac0ecf39ec4f55e9ad27b2
|
8bf007da3e43096aa3a3dca158fc56b286ba6f5c
|
refs/heads/master
| 2022-09-13T10:23:59.112507
| 2022-09-02T21:16:23
| 2022-09-02T21:16:23
| 58,776,721
| 200
| 91
|
Apache-2.0
| 2023-03-28T20:55:15
| 2016-05-13T22:42:57
|
Python
|
UTF-8
|
Python
| false
| false
| 38,306
|
py
|
test_training.py
|
from __future__ import absolute_import
import base64
import glob
import json
import logging
import os
import pandas as pd
from PIL import Image
import random
import shutil
from six.moves.urllib.request import urlopen
import subprocess
import sys
import tempfile
import unittest
import tensorflow as tf
from tensorflow.python.lib.io import file_io
CODE_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', 'tensorflow'))
def run_exported_model(model_path, csv_data):
"""Runs an exported model.
Model should have one placeholder of csv data.
Args:
model_path: path to the saved_model.pb
csv_data: list of csv strings
Return:
The result of session.run
"""
with tf.Graph().as_default(), tf.Session() as sess:
meta_graph_pb = tf.saved_model.loader.load(
sess=sess,
tags=[tf.saved_model.tag_constants.SERVING],
export_dir=model_path)
signature = meta_graph_pb.signature_def['serving_default']
input_alias_map = {
friendly_name: tensor_info_proto.name
for (friendly_name, tensor_info_proto) in signature.inputs.items()}
output_alias_map = {
friendly_name: tensor_info_proto.name
for (friendly_name, tensor_info_proto) in signature.outputs.items()}
_, csv_tensor_name = input_alias_map.items()[0]
result = sess.run(fetches=output_alias_map,
feed_dict={csv_tensor_name: csv_data})
return result
class TestSpecialCharacters(unittest.TestCase):
"""Test special characters are supported."""
def testCommaQuote(self):
"""Test when csv input data has quotes and commas."""
output_dir = tempfile.mkdtemp()
try:
features = {
'target': {'transform': 'target'},
'cat': {'transform': 'one_hot'},
'text': {'transform': 'bag_of_words'}}
schema = [
{'name': 'target', 'type': 'string'},
{'name': 'cat', 'type': 'string'},
{'name': 'text', 'type': 'string'}]
# Target column = cat column
data = [{'cat': 'red,', 'text': 'one, two, three', 'target': 'red,'},
{'cat': 'blue"', 'text': 'one, "two"', 'target': 'blue"'},
{'cat': '"green"', 'text': '"two', 'target': '"green"'},
{'cat': "yellow, 'brown", 'text': "'one, two'", 'target': "yellow, 'brown"}]
file_io.recursive_create_dir(output_dir)
file_io.write_string_to_file(os.path.join(output_dir, 'schema.json'),
json.dumps(schema, indent=2))
file_io.write_string_to_file(os.path.join(output_dir, 'features.json'),
json.dumps(features, indent=2))
file_io.write_string_to_file(
os.path.join(output_dir, 'data.csv'),
pd.DataFrame(data, columns=['target', 'cat', 'text']).to_csv(index=False, header=False))
# Run analysis and check the output vocabs are correctly encoded in csv
cmd = ['python %s' % os.path.join(CODE_PATH, 'analyze.py'),
'--output=' + os.path.join(output_dir, 'analysis'),
'--csv=' + os.path.join(output_dir, 'data.csv'),
'--schema=' + os.path.join(output_dir, 'schema.json'),
'--features=' + os.path.join(output_dir, 'features.json')]
subprocess.check_call(' '.join(cmd), shell=True)
df_vocab_cat = pd.read_csv(
os.path.join(output_dir, 'analysis', 'vocab_cat.csv'),
header=None,
names=['label', 'count'],
dtype=str,
na_filter=False)
self.assertEqual(df_vocab_cat['count'].tolist(), ['1', '1', '1', '1'])
self.assertItemsEqual(
df_vocab_cat['label'].tolist(),
['blue"', '"green"', "yellow, 'brown", 'red,'])
df_vocab_target = pd.read_csv(
os.path.join(output_dir, 'analysis', 'vocab_target.csv'),
header=None,
names=['label', 'count'],
dtype=str,
na_filter=False)
self.assertEqual(df_vocab_target['count'].tolist(), ['1', '1', '1', '1'])
self.assertItemsEqual(
df_vocab_target['label'].tolist(),
['blue"', '"green"', "yellow, 'brown", 'red,'])
df_vocab_text = pd.read_csv(
os.path.join(output_dir, 'analysis', 'vocab_text.csv'),
header=None,
names=['label', 'count'],
dtype=str,
na_filter=False)
vocab_text = df_vocab_text['label'].tolist()
self.assertEqual(vocab_text[0], 'one,')
self.assertItemsEqual(vocab_text[1:], ['two,', '"two"', "'one,", '"two', "two'", 'three'])
vocab_count = df_vocab_text['count'].tolist()
self.assertEqual(vocab_count[0], '2')
self.assertEqual(vocab_count[1:], ['1', '1', '1', '1', '1', '1'])
# Run transform, and check there are no reported errors.
cmd = ['python %s' % os.path.join(CODE_PATH, 'transform.py'),
'--csv=' + os.path.join(output_dir, 'data.csv'),
'--analysis=' + os.path.join(output_dir, 'analysis'),
'--prefix=features_train',
'--output=' + os.path.join(output_dir, 'transform')]
subprocess.check_call(' '.join(cmd), shell=True)
error_files = glob.glob(os.path.join(output_dir, 'transform', 'error*'))
self.assertEqual(1, len(error_files))
self.assertEqual(0, os.path.getsize(error_files[0]))
# Run training
cmd = ['cd %s && ' % CODE_PATH,
'python -m trainer.task',
'--train=' + os.path.join(output_dir, 'data.csv'),
'--eval=' + os.path.join(output_dir, 'data.csv'),
'--job-dir=' + os.path.join(output_dir, 'training'),
'--analysis=' + os.path.join(output_dir, 'analysis'),
'--model=linear_classification',
'--train-batch-size=4',
'--eval-batch-size=4',
'--max-steps=500',
'--learning-rate=1.0',
'--transform']
subprocess.check_call(' '.join(cmd), shell=True)
result = run_exported_model(
model_path=os.path.join(output_dir, 'training', 'model'),
csv_data=['"red,","one, two, three"'])
# The prediction data is a training row. As the data is samll, the model
# should have near 100% accuracy. Check it made the correct prediction.
self.assertEqual(result['predicted'], 'red,')
finally:
shutil.rmtree(output_dir)
class TestClassificationTopN(unittest.TestCase):
"""Test top_n works."""
def testTopNZero(self):
"""Test top_n=0 gives all the classes."""
output_dir = tempfile.mkdtemp()
try:
features = {
'num': {'transform': 'identity'},
'target': {'transform': 'target'}}
schema = [
{'name': 'num', 'type': 'integer'},
{'name': 'target', 'type': 'string'}]
data = ['1,1\n', '4,2\n', '5,3\n', '11,1\n']
file_io.recursive_create_dir(output_dir)
file_io.write_string_to_file(os.path.join(output_dir, 'schema.json'),
json.dumps(schema, indent=2))
file_io.write_string_to_file(os.path.join(output_dir, 'features.json'),
json.dumps(features, indent=2))
file_io.write_string_to_file(os.path.join(output_dir, 'data.csv'),
''.join(data))
cmd = ['python %s' % os.path.join(CODE_PATH, 'analyze.py'),
'--output=' + os.path.join(output_dir, 'analysis'),
'--csv=' + os.path.join(output_dir, 'data.csv'),
'--schema=' + os.path.join(output_dir, 'schema.json'),
'--features=' + os.path.join(output_dir, 'features.json')]
subprocess.check_call(' '.join(cmd), shell=True)
cmd = ['cd %s && ' % CODE_PATH,
'python -m trainer.task',
'--train=' + os.path.join(output_dir, 'data.csv'),
'--eval=' + os.path.join(output_dir, 'data.csv'),
'--job-dir=' + os.path.join(output_dir, 'training'),
'--analysis=' + os.path.join(output_dir, 'analysis'),
'--model=linear_classification',
'--train-batch-size=4',
'--eval-batch-size=4',
'--max-steps=1',
'--top-n=0', # This parameter is tested in this test!
'--learning-rate=0.1',
'--transform']
subprocess.check_call(' '.join(cmd), shell=True)
result = run_exported_model(
model_path=os.path.join(output_dir, 'training', 'model'),
csv_data=['20'])
keys = result.keys()
self.assertIn('predicted', keys)
self.assertIn('1', keys)
self.assertIn('2', keys)
self.assertIn('3', keys)
finally:
shutil.rmtree(output_dir)
class TestMultipleFeatures(unittest.TestCase):
"""Test one source column can be used in many features."""
def testMultipleColumnsRaw(self):
"""Test training starting from raw csv."""
output_dir = tempfile.mkdtemp()
try:
features = {
'num': {'transform': 'identity'},
'num2': {'transform': 'key', 'source_column': 'num'},
'target': {'transform': 'target'},
'text': {'transform': 'bag_of_words'},
'text2': {'transform': 'multi_hot', 'source_column': 'text'},
'text3': {'transform': 'tfidf', 'source_column': 'text'},
'text4': {'transform': 'key', 'source_column': 'text'}}
schema = [
{'name': 'num', 'type': 'integer'},
{'name': 'target', 'type': 'float'},
{'name': 'text', 'type': 'string'}]
data = ['1,2,hello world\n', '4,8,bye moon\n', '5,10,hello moon\n', '11,22,moon moon\n']
file_io.recursive_create_dir(output_dir)
file_io.write_string_to_file(os.path.join(output_dir, 'schema.json'),
json.dumps(schema, indent=2))
file_io.write_string_to_file(os.path.join(output_dir, 'features.json'),
json.dumps(features, indent=2))
file_io.write_string_to_file(os.path.join(output_dir, 'data.csv'),
''.join(data))
cmd = ['python %s' % os.path.join(CODE_PATH, 'analyze.py'),
'--output=' + os.path.join(output_dir, 'analysis'),
'--csv=' + os.path.join(output_dir, 'data.csv'),
'--schema=' + os.path.join(output_dir, 'schema.json'),
'--features=' + os.path.join(output_dir, 'features.json')]
subprocess.check_call(' '.join(cmd), shell=True)
cmd = ['cd %s && ' % CODE_PATH,
'python -m trainer.task',
'--train=' + os.path.join(output_dir, 'data.csv'),
'--eval=' + os.path.join(output_dir, 'data.csv'),
'--job-dir=' + os.path.join(output_dir, 'training'),
'--analysis=' + os.path.join(output_dir, 'analysis'),
'--model=linear_regression',
'--train-batch-size=4',
'--eval-batch-size=4',
'--max-steps=200',
'--learning-rate=0.1',
'--transform']
subprocess.check_call(' '.join(cmd), shell=True)
result = run_exported_model(
model_path=os.path.join(output_dir, 'training', 'model'),
csv_data=['20,hello moon'])
# check keys were made
self.assertEqual(20, result['num2'])
self.assertEqual('hello moon', result['text4'])
finally:
shutil.rmtree(output_dir)
def testMultipleColumnsTransformed(self):
"""Test training starting from tf.example."""
output_dir = tempfile.mkdtemp()
try:
features = {
'num': {'transform': 'identity'},
'num2': {'transform': 'key', 'source_column': 'num'},
'target': {'transform': 'target'},
'text': {'transform': 'bag_of_words'},
'text2': {'transform': 'multi_hot', 'source_column': 'text'},
'text3': {'transform': 'tfidf', 'source_column': 'text'},
'text4': {'transform': 'key', 'source_column': 'text'}}
schema = [
{'name': 'num', 'type': 'integer'},
{'name': 'target', 'type': 'float'},
{'name': 'text', 'type': 'string'}]
data = ['1,2,hello world\n', '4,8,bye moon\n', '5,10,hello moon\n', '11,22,moon moon\n']
file_io.recursive_create_dir(output_dir)
file_io.write_string_to_file(os.path.join(output_dir, 'schema.json'),
json.dumps(schema, indent=2))
file_io.write_string_to_file(os.path.join(output_dir, 'features.json'),
json.dumps(features, indent=2))
file_io.write_string_to_file(os.path.join(output_dir, 'data.csv'),
''.join(data))
cmd = ['python %s' % os.path.join(CODE_PATH, 'analyze.py'),
'--output=' + os.path.join(output_dir, 'analysis'),
'--csv=' + os.path.join(output_dir, 'data.csv'),
'--schema=' + os.path.join(output_dir, 'schema.json'),
'--features=' + os.path.join(output_dir, 'features.json')]
subprocess.check_call(' '.join(cmd), shell=True)
cmd = ['python %s' % os.path.join(CODE_PATH, 'transform.py'),
'--output=' + os.path.join(output_dir, 'transform'),
'--csv=' + os.path.join(output_dir, 'data.csv'),
'--analysis=' + os.path.join(output_dir, 'analysis'),
'--prefix=features']
subprocess.check_call(' '.join(cmd), shell=True)
# Check tf.example file has the expected features
file_list = file_io.get_matching_files(os.path.join(output_dir, 'transform', 'features*'))
options = tf.python_io.TFRecordOptions(
compression_type=tf.python_io.TFRecordCompressionType.GZIP)
record_iter = tf.python_io.tf_record_iterator(path=file_list[0], options=options)
tf_example = tf.train.Example()
tf_example.ParseFromString(next(record_iter))
self.assertEqual(1, len(tf_example.features.feature['num'].int64_list.value))
self.assertEqual(1, len(tf_example.features.feature['num2'].int64_list.value))
self.assertEqual(1, len(tf_example.features.feature['target'].float_list.value))
self.assertEqual(2, len(tf_example.features.feature['text_ids'].int64_list.value))
self.assertEqual(2, len(tf_example.features.feature['text_weights'].float_list.value))
self.assertEqual(2, len(tf_example.features.feature['text2'].int64_list.value))
self.assertEqual(2, len(tf_example.features.feature['text3_ids'].int64_list.value))
self.assertEqual(2, len(tf_example.features.feature['text3_weights'].float_list.value))
self.assertEqual(1, len(tf_example.features.feature['text4'].bytes_list.value))
cmd = ['cd %s && ' % CODE_PATH,
'python -m trainer.task',
'--train=' + os.path.join(output_dir, 'transform', 'features*'),
'--eval=' + os.path.join(output_dir, 'transform', 'features*'),
'--job-dir=' + os.path.join(output_dir, 'training'),
'--analysis=' + os.path.join(output_dir, 'analysis'),
'--model=linear_regression',
'--train-batch-size=4',
'--eval-batch-size=4',
'--max-steps=200',
'--learning-rate=0.1']
subprocess.check_call(' '.join(cmd), shell=True)
result = run_exported_model(
model_path=os.path.join(output_dir, 'training', 'model'),
csv_data=['20,hello moon'])
# check keys were made
self.assertEqual(20, result['num2'])
self.assertEqual('hello moon', result['text4'])
finally:
shutil.rmtree(output_dir)
class TestOptionalKeys(unittest.TestCase):
def testNoKeys(self):
output_dir = tempfile.mkdtemp()
try:
features = {
'num': {'transform': 'identity'},
'target': {'transform': 'target'}}
schema = [
{'name': 'num', 'type': 'integer'},
{'name': 'target', 'type': 'float'}]
data = ['1,2\n', '4,8\n', '5,10\n', '11,22\n']
file_io.recursive_create_dir(output_dir)
file_io.write_string_to_file(os.path.join(output_dir, 'schema.json'),
json.dumps(schema, indent=2))
file_io.write_string_to_file(os.path.join(output_dir, 'features.json'),
json.dumps(features, indent=2))
file_io.write_string_to_file(os.path.join(output_dir, 'data.csv'),
''.join(data))
cmd = ['python %s' % os.path.join(CODE_PATH, 'analyze.py'),
'--output=' + os.path.join(output_dir, 'analysis'),
'--csv=' + os.path.join(output_dir, 'data.csv'),
'--schema=' + os.path.join(output_dir, 'schema.json'),
'--features=' + os.path.join(output_dir, 'features.json')]
subprocess.check_call(' '.join(cmd), shell=True)
cmd = ['cd %s && ' % CODE_PATH,
'python -m trainer.task',
'--train=' + os.path.join(output_dir, 'data.csv'),
'--eval=' + os.path.join(output_dir, 'data.csv'),
'--job-dir=' + os.path.join(output_dir, 'training'),
'--analysis=' + os.path.join(output_dir, 'analysis'),
'--model=linear_regression',
'--train-batch-size=4',
'--eval-batch-size=4',
'--max-steps=2000',
'--learning-rate=0.1',
'--transform']
subprocess.check_call(' '.join(cmd), shell=True)
result = run_exported_model(
model_path=os.path.join(output_dir, 'training', 'model'),
csv_data=['20'])
self.assertTrue(abs(40 - result['predicted']) < 5)
finally:
shutil.rmtree(output_dir)
def testManyKeys(self):
output_dir = tempfile.mkdtemp()
try:
features = {
'keyint': {'transform': 'key'},
'keyfloat': {'transform': 'key'},
'keystr': {'transform': 'key'},
'num': {'transform': 'identity'},
'target': {'transform': 'target'}}
schema = [
{'name': 'keyint', 'type': 'integer'},
{'name': 'keyfloat', 'type': 'float'},
{'name': 'keystr', 'type': 'string'},
{'name': 'num', 'type': 'integer'},
{'name': 'target', 'type': 'float'}]
data = ['1,1.5,one,1,2\n', '2,2.5,two,4,8\n', '3,3.5,three,5,10\n']
file_io.recursive_create_dir(output_dir)
file_io.write_string_to_file(os.path.join(output_dir, 'schema.json'),
json.dumps(schema, indent=2))
file_io.write_string_to_file(os.path.join(output_dir, 'features.json'),
json.dumps(features, indent=2))
file_io.write_string_to_file(os.path.join(output_dir, 'data.csv'),
''.join(data))
cmd = ['python %s' % os.path.join(CODE_PATH, 'analyze.py'),
'--output=' + os.path.join(output_dir, 'analysis'),
'--csv=' + os.path.join(output_dir, 'data.csv'),
'--schema=' + os.path.join(output_dir, 'schema.json'),
'--features=' + os.path.join(output_dir, 'features.json')]
subprocess.check_call(' '.join(cmd), shell=True)
cmd = ['cd %s && ' % CODE_PATH,
'python -m trainer.task',
'--train=' + os.path.join(output_dir, 'data.csv'),
'--eval=' + os.path.join(output_dir, 'data.csv'),
'--job-dir=' + os.path.join(output_dir, 'training'),
'--analysis=' + os.path.join(output_dir, 'analysis'),
'--model=linear_regression',
'--train-batch-size=4',
'--eval-batch-size=4',
'--max-steps=2000',
'--transform']
subprocess.check_call(' '.join(cmd), shell=True)
result = run_exported_model(
model_path=os.path.join(output_dir, 'training', 'model'),
csv_data=['7,4.5,hello,1'])
self.assertEqual(7, result['keyint'])
self.assertAlmostEqual(4.5, result['keyfloat'])
self.assertEqual('hello', result['keystr'])
finally:
shutil.rmtree(output_dir)
class TestTrainer(unittest.TestCase):
"""Tests training.
Runs analyze.py and transform.py on generated test data. Also loads
the exported graphs and checks they run. No validation of the test results is
done (i.e., the training loss is not checked).
"""
def __init__(self, *args, **kwargs):
super(TestTrainer, self).__init__(*args, **kwargs)
# Allow this class to be subclassed for quick tests that only care about
# training working, not model loss/accuracy.
self._max_steps = 2000
self._check_model_fit = True
# Log everything
self._logger = logging.getLogger('TestStructuredDataLogger')
self._logger.setLevel(logging.DEBUG)
if not self._logger.handlers:
self._logger.addHandler(logging.StreamHandler(stream=sys.stdout))
def setUp(self):
self._test_dir = tempfile.mkdtemp()
self._analysis_output = os.path.join(self._test_dir, 'analysis_output')
self._transform_output = os.path.join(self._test_dir, 'transform_output')
self._train_output = os.path.join(self._test_dir, 'train_output')
file_io.recursive_create_dir(self._analysis_output)
file_io.recursive_create_dir(self._transform_output)
file_io.recursive_create_dir(self._train_output)
self._csv_train_filename = os.path.join(self._test_dir, 'train_csv_data.csv')
self._csv_eval_filename = os.path.join(self._test_dir, 'eval_csv_data.csv')
self._csv_predict_filename = os.path.join(self._test_dir, 'predict_csv_data.csv')
self._schema_filename = os.path.join(self._test_dir, 'schema_file.json')
self._features_filename = os.path.join(self._test_dir, 'features_file.json')
def tearDown(self):
self._logger.debug('TestTrainer: removing test dir ' + self._test_dir)
shutil.rmtree(self._test_dir)
def make_image_files(self):
img1_file = os.path.join(self._test_dir, 'img1.jpg')
image1 = Image.new('RGB', size=(300, 300), color=(155, 0, 0))
image1.save(img1_file)
img2_file = os.path.join(self._test_dir, 'img2.jpg')
image2 = Image.new('RGB', size=(50, 50), color=(125, 240, 0))
image2.save(img2_file)
img3_file = os.path.join(self._test_dir, 'img3.jpg')
image3 = Image.new('RGB', size=(800, 600), color=(33, 55, 77))
image3.save(img3_file)
self._image_files = [img1_file, img2_file, img3_file]
def make_csv_data(self, filename, num_rows, problem_type, keep_target=True, with_image=False):
"""Writes csv data for preprocessing and training.
There is one csv column for each supported transform.
Args:
filename: writes data to local csv file.
num_rows: how many rows of data will be generated.
problem_type: 'classification' or 'regression'. Changes the target value.
keep_target: if false, the csv file will have an empty column ',,' for the
target.
"""
random.seed(12321)
def _drop_out(x):
# Make 5% of the data missing
if random.uniform(0, 1) < 0.05:
return ''
return x
with open(filename, 'w') as f:
for i in range(num_rows):
num_id = random.randint(0, 20)
num_scale = random.uniform(0, 30)
str_one_hot = random.choice(['red', 'blue', 'green', 'pink', 'yellow',
'brown', 'black'])
str_embedding = random.choice(['abc', 'def', 'ghi', 'jkl', 'mno', 'pqr'])
def _word_fn():
return random.choice(['car', 'truck', 'van', 'bike', 'train', 'drone'])
str_bow = [_word_fn() for _ in range(random.randint(1, 4))]
str_tfidf = [_word_fn() for _ in range(random.randint(1, 4))]
color_map = {'red': 2, 'blue': 6, 'green': 4, 'pink': -5, 'yellow': -6,
'brown': -1, 'black': -7}
abc_map = {'abc': -1, 'def': -1, 'ghi': 1, 'jkl': 1, 'mno': 2, 'pqr': 1}
transport_map = {'car': 5, 'truck': 10, 'van': 15, 'bike': 20,
'train': -25, 'drone': -30}
# Build some model: t id the dependent variable
t = 0.5 + 0.5 * num_id - 2.5 * num_scale
t += color_map[str_one_hot]
t += abc_map[str_embedding]
t += sum([transport_map[x] for x in str_bow])
t += sum([transport_map[x] * 0.5 for x in str_tfidf])
if problem_type == 'classification':
# If you cange the weights above or add more columns, look at the new
# distribution of t values and try to divide them into 3 buckets.
if t < -40:
t = 100
elif t < 0:
t = 101
else:
t = 102
str_bow = ' '.join(str_bow)
str_tfidf = ' '.join(str_tfidf)
if with_image:
img_url = random.choice(self._image_files)
_drop_out(img_url)
num_id = _drop_out(num_id)
num_scale = _drop_out(num_scale)
str_one_hot = _drop_out(str_one_hot)
str_embedding = _drop_out(str_embedding)
str_bow = _drop_out(str_bow)
str_tfidf = _drop_out(str_tfidf)
if keep_target:
if with_image:
csv_line = "{key},{target},{num_id},{num_scale},{str_one_hot},{str_embedding},{str_bow},{str_tfidf},{img_url}\n".format( # noqa
key=i,
target=t,
num_id=num_id,
num_scale=num_scale,
str_one_hot=str_one_hot,
str_embedding=str_embedding,
str_bow=str_bow,
str_tfidf=str_tfidf,
img_url=img_url)
else:
csv_line = "{key},{target},{num_id},{num_scale},{str_one_hot},{str_embedding},{str_bow},{str_tfidf}\n".format( # noqa
key=i,
target=t,
num_id=num_id,
num_scale=num_scale,
str_one_hot=str_one_hot,
str_embedding=str_embedding,
str_bow=str_bow,
str_tfidf=str_tfidf)
else:
if with_image:
csv_line = "{key},{num_id},{num_scale},{str_one_hot},{str_embedding},{str_bow},{str_tfidf},{img_url}\n".format( # noqa
key=i,
num_id=num_id,
num_scale=num_scale,
str_one_hot=str_one_hot,
str_embedding=str_embedding,
str_bow=str_bow,
str_tfidf=str_tfidf,
img_url=img_url)
else:
csv_line = "{key},{num_id},{num_scale},{str_one_hot},{str_embedding},{str_bow},{str_tfidf}\n".format( # noqa
key=i,
num_id=num_id,
num_scale=num_scale,
str_one_hot=str_one_hot,
str_embedding=str_embedding,
str_bow=str_bow,
str_tfidf=str_tfidf)
f.write(csv_line)
def _create_schema_features(self, problem_type, with_image=False):
features = {
'num_id': {'transform': 'identity'},
'num_scale': {'transform': 'scale', 'value': 4},
'str_one_hot': {'transform': 'one_hot'},
'str_embedding': {'transform': 'embedding', 'embedding_dim': 3},
'str_bow': {'transform': 'bag_of_words'},
'str_tfidf': {'transform': 'tfidf'},
'target': {'transform': 'target'},
'key': {'transform': 'key'}}
if with_image:
# Download inception checkpoint. Note that gs url doesn't work because
# we may not have gcloud signed in when running the test.
url = ('https://storage.googleapis.com/cloud-ml-data/img/' +
'flower_photos/inception_v3_2016_08_28.ckpt')
checkpoint_path = os.path.join(self._test_dir, "checkpoint")
response = urlopen(url)
with open(checkpoint_path, 'wb') as f:
f.write(response.read())
features['image'] = {'transform': 'image_to_vec', 'checkpoint': checkpoint_path}
schema = [
{'name': 'key', 'type': 'integer'},
{'name': 'target', 'type': 'string' if problem_type == 'classification' else 'float'},
{'name': 'num_id', 'type': 'integer'},
{'name': 'num_scale', 'type': 'float'},
{'name': 'str_one_hot', 'type': 'string'},
{'name': 'str_embedding', 'type': 'string'},
{'name': 'str_bow', 'type': 'string'},
{'name': 'str_tfidf', 'type': 'string'}]
if with_image:
schema.append({'name': 'image', 'type': 'string'})
self._schema = schema
file_io.write_string_to_file(self._schema_filename, json.dumps(schema, indent=2))
file_io.write_string_to_file(self._features_filename, json.dumps(features, indent=2))
if with_image:
self.make_image_files()
self.make_csv_data(self._csv_train_filename, 50, problem_type, True, with_image)
self.make_csv_data(self._csv_eval_filename, 30, problem_type, True, with_image)
self.make_csv_data(self._csv_predict_filename, 10, problem_type, False, with_image)
def _run_analyze(self, problem_type, with_image=False):
self._create_schema_features(problem_type, with_image=with_image)
cmd = ['python %s' % os.path.join(CODE_PATH, 'analyze.py'),
'--output=' + self._analysis_output,
'--csv=' + self._csv_train_filename,
'--schema=' + self._schema_filename,
'--features=' + self._features_filename]
subprocess.check_call(' '.join(cmd), shell=True)
def _run_transform(self):
cmd = ['python %s' % os.path.join(CODE_PATH, 'transform.py'),
'--csv=' + self._csv_train_filename,
'--analysis=' + self._analysis_output,
'--prefix=features_train',
'--output=' + self._transform_output,
'--shuffle']
self._logger.debug('Running subprocess: %s \n\n' % ' '.join(cmd))
subprocess.check_call(' '.join(cmd), shell=True)
cmd = ['python %s' % os.path.join(CODE_PATH, 'transform.py'),
'--csv=' + self._csv_eval_filename,
'--analysis=' + self._analysis_output,
'--prefix=features_eval',
'--output=' + self._transform_output]
self._logger.debug('Running subprocess: %s \n\n' % ' '.join(cmd))
subprocess.check_call(' '.join(cmd), shell=True)
def _run_training_transform(self, problem_type, model_type, extra_args=[]):
"""Runs training starting with transformed tf.example files.
Args:
problem_type: 'regression' or 'classification'
model_type: 'linear' or 'dnn'
extra_args: list of strings to pass to the trainer.
"""
cmd = ['cd %s && ' % CODE_PATH,
'python -m trainer.task',
'--train=' + os.path.join(self._transform_output, 'features_train*'),
'--eval=' + os.path.join(self._transform_output, 'features_eval*'),
'--job-dir=' + self._train_output,
'--analysis=' + self._analysis_output,
'--model=%s_%s' % (model_type, problem_type),
'--train-batch-size=100',
'--eval-batch-size=50',
'--max-steps=' + str(self._max_steps)] + extra_args
self._logger.debug('Running subprocess: %s \n\n' % ' '.join(cmd))
subprocess.check_call(' '.join(cmd), shell=True)
def _run_training_raw(self, problem_type, model_type, extra_args=[]):
"""Runs training starting from raw csv data.
Args:
problem_type: 'regression' or 'classification'
model_type: 'linear' or 'dnn'
extra_args: list of strings to pass to the trainer.
"""
cmd = ['cd %s && ' % CODE_PATH,
'python -m trainer.task',
'--train=' + self._csv_train_filename,
'--eval=' + self._csv_eval_filename,
'--job-dir=' + self._train_output,
'--analysis=' + self._analysis_output,
'--model=%s_%s' % (model_type, problem_type),
'--train-batch-size=100',
'--eval-batch-size=50',
'--max-steps=' + str(self._max_steps),
'--transform'] + extra_args
self._logger.debug('Running subprocess: %s \n\n' % ' '.join(cmd))
subprocess.check_call(' '.join(cmd), shell=True)
def _run_training_with_analysis(self, problem_type, model_type, extra_args=[]):
"""Runs training starting from raw csv data.
Args:
problem_type: 'regression' or 'classification'
model_type: 'linear' or 'dnn'
extra_args: list of strings to pass to the trainer.
"""
cmd = ['cd %s && ' % CODE_PATH,
'python -m trainer.task',
'--train=' + self._csv_train_filename,
'--eval=' + self._csv_eval_filename,
'--job-dir=' + self._train_output,
'--model=%s_%s' % (model_type, problem_type),
'--train-batch-size=100',
'--eval-batch-size=50',
'--max-steps=' + str(self._max_steps),
'--features=' + self._features_filename,
'--schema=' + self._schema_filename,
'--transform'] + extra_args
self._logger.debug('Running subprocess: %s \n\n' % ' '.join(cmd))
subprocess.check_call(' '.join(cmd), shell=True)
def _check_model(self, problem_type, model_type, with_image=False):
"""Checks that both exported prediction graphs work."""
for has_target in [True, False]:
if has_target:
model_path = os.path.join(self._train_output, 'evaluation_model')
else:
model_path = os.path.join(self._train_output, 'model')
self._logger.debug('Checking model %s %s at %s' % (problem_type, model_type, model_path))
# Check there is a saved model.
self.assertTrue(os.path.isfile(os.path.join(model_path, 'saved_model.pb')))
# Must create new graphs as multiple graphs are loaded into memory.
with tf.Graph().as_default(), tf.Session() as sess:
meta_graph_pb = tf.saved_model.loader.load(
sess=sess,
tags=[tf.saved_model.tag_constants.SERVING],
export_dir=model_path)
signature = meta_graph_pb.signature_def['serving_default']
input_alias_map = {
friendly_name: tensor_info_proto.name
for (friendly_name, tensor_info_proto) in signature.inputs.items()}
output_alias_map = {
friendly_name: tensor_info_proto.name
for (friendly_name, tensor_info_proto) in signature.outputs.items()}
prediction_data = {
'key': [12, 11],
'target': [-49, -9] if problem_type == 'regression' else ['100', '101'],
'num_id': [11, 10],
'num_scale': [22.29, 5.20],
'str_one_hot': ['brown', 'brown'],
'str_embedding': ['def', 'def'],
'str_bow': ['drone', 'drone truck bike truck'],
'str_tfidf': ['bike train train car', 'train']}
if with_image:
image_bytes = []
for image_file in [self._image_files[0], self._image_files[2]]:
with file_io.FileIO(image_file, 'r') as ff:
image_bytes.append(base64.urlsafe_b64encode(ff.read()))
prediction_data.update({'image': image_bytes})
# Convert the prediciton data to csv.
csv_header = [col['name']
for col in self._schema
if (has_target or col['name'] != 'target')]
if not has_target:
del prediction_data['target']
csv_data = []
for i in range(2):
data = [str(prediction_data[name][i]) for name in csv_header]
csv_data.append(','.join(data))
# Test the *_alias_maps have the expected keys
expected_output_keys = ['predicted', 'key']
if has_target:
expected_output_keys.append('target')
if problem_type == 'classification':
expected_output_keys.extend(
['probability', 'probability_2', 'probability_3', 'predicted_2', 'predicted_3'])
self.assertEqual(1, len(input_alias_map.keys()))
self.assertItemsEqual(expected_output_keys, output_alias_map.keys())
_, csv_tensor_name = input_alias_map.items()[0]
result = sess.run(fetches=output_alias_map,
feed_dict={csv_tensor_name: csv_data})
self.assertItemsEqual(expected_output_keys, result.keys())
self.assertEqual([12, 11], result['key'].flatten().tolist())
def testClassificationLinear(self):
self._logger.debug('\n\nTesting Classification Linear')
problem_type = 'classification'
model_type = 'linear'
self._run_analyze(problem_type)
self._run_training_raw(
problem_type=problem_type,
model_type=model_type,
extra_args=['--top-n=3'])
self._check_model(
problem_type=problem_type,
model_type=model_type)
def testRegressionLinear(self):
self._logger.debug('\n\nTesting Regression Linear')
problem_type = 'regression'
model_type = 'linear'
self._run_analyze(problem_type)
self._run_transform()
self._run_training_transform(
problem_type=problem_type,
model_type=model_type)
self._check_model(
problem_type=problem_type,
model_type=model_type)
def testRegressionDNN(self):
self._logger.debug('\n\nTesting Regression DNN')
problem_type = 'regression'
model_type = 'dnn'
self._run_analyze(problem_type)
self._run_training_raw(
problem_type=problem_type,
model_type=model_type,
extra_args=['--top-n=3',
'--hidden-layer-size1=10',
'--hidden-layer-size2=2'])
self._check_model(
problem_type=problem_type,
model_type=model_type)
def testClassificationDNNWithImage(self):
self._logger.debug('\n\nTesting Classification DNN With Image')
problem_type = 'classification'
model_type = 'dnn'
self._run_analyze(problem_type, with_image=True)
self._run_transform()
self._run_training_transform(
problem_type=problem_type,
model_type=model_type,
extra_args=['--top-n=3',
'--hidden-layer-size1=10'])
self._check_model(
problem_type=problem_type,
model_type=model_type,
with_image=True)
def testTrainingWithAnalysis(self):
self._logger.debug('\n\nTesting Training with Analysis')
self._create_schema_features('classification')
self._run_training_with_analysis(
problem_type='classification',
model_type='linear',
extra_args=['--top-n=3'])
self._check_model(
problem_type='classification',
model_type='linear')
if __name__ == '__main__':
unittest.main()
|
94cc40e21a79530d355de72cb7eb5fa2321c79ce
|
18e7a4e8005cfd514b6c624133db172172857713
|
/im2im_pred/model_segnet_mtan.py
|
a565f911ea0f986b7f00bc781cf190f5522215a0
|
[
"MIT"
] |
permissive
|
lorenmt/mtan
|
4eb3be0b5bfbf1c303f6c85eabacba6dd1bd09ed
|
c36c30baa18968dec74fe9039abcfd4f132edfa1
|
refs/heads/master
| 2022-07-29T04:57:48.453497
| 2022-02-07T21:46:43
| 2022-02-07T21:46:43
| 164,354,872
| 623
| 120
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,114
|
py
|
model_segnet_mtan.py
|
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import argparse
import torch.utils.data.sampler as sampler
from create_dataset import *
from utils import *
parser = argparse.ArgumentParser(description='Multi-task: Attention Network')
parser.add_argument('--weight', default='equal', type=str, help='multi-task weighting: equal, uncert, dwa')
parser.add_argument('--dataroot', default='nyuv2', type=str, help='dataset root')
parser.add_argument('--temp', default=2.0, type=float, help='temperature for DWA (must be positive)')
parser.add_argument('--apply_augmentation', action='store_true', help='toggle to apply data augmentation on NYUv2')
opt = parser.parse_args()
class SegNet(nn.Module):
def __init__(self):
super(SegNet, self).__init__()
# initialise network parameters
filter = [64, 128, 256, 512, 512]
self.class_nb = 13
# define encoder decoder layers
self.encoder_block = nn.ModuleList([self.conv_layer([3, filter[0]])])
self.decoder_block = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
self.encoder_block.append(self.conv_layer([filter[i], filter[i + 1]]))
self.decoder_block.append(self.conv_layer([filter[i + 1], filter[i]]))
# define convolution layer
self.conv_block_enc = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
self.conv_block_dec = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for i in range(4):
if i == 0:
self.conv_block_enc.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.conv_block_dec.append(self.conv_layer([filter[i], filter[i]]))
else:
self.conv_block_enc.append(nn.Sequential(self.conv_layer([filter[i + 1], filter[i + 1]]),
self.conv_layer([filter[i + 1], filter[i + 1]])))
self.conv_block_dec.append(nn.Sequential(self.conv_layer([filter[i], filter[i]]),
self.conv_layer([filter[i], filter[i]])))
# define task attention layers
self.encoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])])])
self.decoder_att = nn.ModuleList([nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])])])
self.encoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[1]])])
self.decoder_block_att = nn.ModuleList([self.conv_layer([filter[0], filter[0]])])
for j in range(3):
if j < 2:
self.encoder_att.append(nn.ModuleList([self.att_layer([filter[0], filter[0], filter[0]])]))
self.decoder_att.append(nn.ModuleList([self.att_layer([2 * filter[0], filter[0], filter[0]])]))
for i in range(4):
self.encoder_att[j].append(self.att_layer([2 * filter[i + 1], filter[i + 1], filter[i + 1]]))
self.decoder_att[j].append(self.att_layer([filter[i + 1] + filter[i], filter[i], filter[i]]))
for i in range(4):
if i < 3:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 2]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i]]))
else:
self.encoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.decoder_block_att.append(self.conv_layer([filter[i + 1], filter[i + 1]]))
self.pred_task1 = self.conv_layer([filter[0], self.class_nb], pred=True)
self.pred_task2 = self.conv_layer([filter[0], 1], pred=True)
self.pred_task3 = self.conv_layer([filter[0], 3], pred=True)
# define pooling and unpooling functions
self.down_sampling = nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.up_sampling = nn.MaxUnpool2d(kernel_size=2, stride=2)
self.logsigma = nn.Parameter(torch.FloatTensor([-0.5, -0.5, -0.5]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight)
nn.init.constant_(m.bias, 0)
def conv_layer(self, channel, pred=False):
if not pred:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=3, padding=1),
nn.BatchNorm2d(num_features=channel[1]),
nn.ReLU(inplace=True),
)
else:
conv_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[0], kernel_size=3, padding=1),
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
)
return conv_block
def att_layer(self, channel):
att_block = nn.Sequential(
nn.Conv2d(in_channels=channel[0], out_channels=channel[1], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[1]),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=channel[1], out_channels=channel[2], kernel_size=1, padding=0),
nn.BatchNorm2d(channel[2]),
nn.Sigmoid(),
)
return att_block
def forward(self, x):
g_encoder, g_decoder, g_maxpool, g_upsampl, indices = ([0] * 5 for _ in range(5))
for i in range(5):
g_encoder[i], g_decoder[-i - 1] = ([0] * 2 for _ in range(2))
# define attention list for tasks
atten_encoder, atten_decoder = ([0] * 3 for _ in range(2))
for i in range(3):
atten_encoder[i], atten_decoder[i] = ([0] * 5 for _ in range(2))
for i in range(3):
for j in range(5):
atten_encoder[i][j], atten_decoder[i][j] = ([0] * 3 for _ in range(2))
# define global shared network
for i in range(5):
if i == 0:
g_encoder[i][0] = self.encoder_block[i](x)
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
else:
g_encoder[i][0] = self.encoder_block[i](g_maxpool[i - 1])
g_encoder[i][1] = self.conv_block_enc[i](g_encoder[i][0])
g_maxpool[i], indices[i] = self.down_sampling(g_encoder[i][1])
for i in range(5):
if i == 0:
g_upsampl[i] = self.up_sampling(g_maxpool[-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
else:
g_upsampl[i] = self.up_sampling(g_decoder[i - 1][-1], indices[-i - 1])
g_decoder[i][0] = self.decoder_block[-i - 1](g_upsampl[i])
g_decoder[i][1] = self.conv_block_dec[-i - 1](g_decoder[i][0])
# define task dependent attention module
for i in range(3):
for j in range(5):
if j == 0:
atten_encoder[i][j][0] = self.encoder_att[i][j](g_encoder[j][0])
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
else:
atten_encoder[i][j][0] = self.encoder_att[i][j](torch.cat((g_encoder[j][0], atten_encoder[i][j - 1][2]), dim=1))
atten_encoder[i][j][1] = (atten_encoder[i][j][0]) * g_encoder[j][1]
atten_encoder[i][j][2] = self.encoder_block_att[j](atten_encoder[i][j][1])
atten_encoder[i][j][2] = F.max_pool2d(atten_encoder[i][j][2], kernel_size=2, stride=2)
for j in range(5):
if j == 0:
atten_decoder[i][j][0] = F.interpolate(atten_encoder[i][-1][-1], scale_factor=2, mode='bilinear', align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat((g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
else:
atten_decoder[i][j][0] = F.interpolate(atten_decoder[i][j - 1][2], scale_factor=2, mode='bilinear', align_corners=True)
atten_decoder[i][j][0] = self.decoder_block_att[-j - 1](atten_decoder[i][j][0])
atten_decoder[i][j][1] = self.decoder_att[i][-j - 1](torch.cat((g_upsampl[j], atten_decoder[i][j][0]), dim=1))
atten_decoder[i][j][2] = (atten_decoder[i][j][1]) * g_decoder[j][-1]
# define task prediction layers
t1_pred = F.log_softmax(self.pred_task1(atten_decoder[0][-1][-1]), dim=1)
t2_pred = self.pred_task2(atten_decoder[1][-1][-1])
t3_pred = self.pred_task3(atten_decoder[2][-1][-1])
t3_pred = t3_pred / torch.norm(t3_pred, p=2, dim=1, keepdim=True)
return [t1_pred, t2_pred, t3_pred], self.logsigma
# define model, optimiser and scheduler
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
SegNet_MTAN = SegNet().to(device)
optimizer = optim.Adam(SegNet_MTAN.parameters(), lr=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
print('Parameter Space: ABS: {:.1f}, REL: {:.4f}'.format(count_parameters(SegNet_MTAN),
count_parameters(SegNet_MTAN) / 24981069))
print('LOSS FORMAT: SEMANTIC_LOSS MEAN_IOU PIX_ACC | DEPTH_LOSS ABS_ERR REL_ERR | NORMAL_LOSS MEAN MED <11.25 <22.5 <30')
# define dataset
dataset_path = opt.dataroot
if opt.apply_augmentation:
nyuv2_train_set = NYUv2(root=dataset_path, train=True, augmentation=True)
print('Applying data augmentation on NYUv2.')
else:
nyuv2_train_set = NYUv2(root=dataset_path, train=True)
print('Standard training strategy without data augmentation.')
nyuv2_test_set = NYUv2(root=dataset_path, train=False)
batch_size = 2
nyuv2_train_loader = torch.utils.data.DataLoader(
dataset=nyuv2_train_set,
batch_size=batch_size,
shuffle=True)
nyuv2_test_loader = torch.utils.data.DataLoader(
dataset=nyuv2_test_set,
batch_size=batch_size,
shuffle=False)
# Train and evaluate multi-task network
multi_task_trainer(nyuv2_train_loader,
nyuv2_test_loader,
SegNet_MTAN,
device,
optimizer,
scheduler,
opt,
200)
|
9a542b798a5b9aeed132b1235e51db64ccc111dc
|
462b8a2326486dd41bf0d1ddbb19bbcee9532411
|
/blender/arm/logicnode/miscellaneous/LN_set_time_scale.py
|
77bc3f5932472a8a992f67c6028c3ff72c39bc2a
|
[
"Zlib",
"GPL-2.0-only"
] |
permissive
|
armory3d/armory
|
b751fb23d6590f2ca421ace7cf7cbeaef91f472c
|
511657981bd2716eddcee8dff26820d27f2bc610
|
refs/heads/main
| 2023-08-12T02:57:02.898742
| 2023-08-04T18:55:45
| 2023-08-04T18:55:45
| 45,202,654
| 3,077
| 530
|
Zlib
| 2023-09-12T11:24:38
| 2015-10-29T18:27:56
|
Python
|
UTF-8
|
Python
| false
| false
| 418
|
py
|
LN_set_time_scale.py
|
from arm.logicnode.arm_nodes import *
class SetTimeScaleNode(ArmLogicTreeNode):
"""Sets the global time scale."""
bl_idname = 'LNSetTimeScaleNode'
bl_label = 'Set Time Scale'
arm_version = 1
def arm_init(self, context):
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmFloatSocket', 'Scale', default_value=1.0)
self.add_output('ArmNodeSocketAction', 'Out')
|
e5f6ce977d0593ac8c9a907ac1ff64ccda43e226
|
13a9fba2e0b8edbb4af771f3fb0ee2ec905ea067
|
/torchprofile/handlers.py
|
cded54a94399a33b06000627906a074f1d21de37
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
zhijian-liu/torchprofile
|
81f17f3a649e6fcd021080b61f1635d1d3926823
|
6d80fe57bb8c6bc9f789da7925fac6547fa9502b
|
refs/heads/master
| 2023-02-26T06:21:05.684585
| 2023-02-21T22:22:04
| 2023-02-21T22:22:04
| 205,957,008
| 258
| 26
|
MIT
| 2023-02-21T22:22:05
| 2019-09-03T00:30:33
|
Python
|
UTF-8
|
Python
| false
| false
| 5,152
|
py
|
handlers.py
|
from .utils import math
__all__ = ['handlers']
def addmm(node):
# [n, p] = aten::addmm([n, p], [n, m], [m, p], *, *)
n, m = node.inputs[1].shape
m, p = node.inputs[2].shape
return n * m * p
def addmv(node):
# [n] = aten::addmv([n], [n, m], [m], *, *)
n, m = node.inputs[1].shape
return n * m
def bmm(node):
# [b, n, p] = aten::bmm([b, n, m], [b, m, p])
b, n, m = node.inputs[0].shape
b, m, p = node.inputs[1].shape
return b * n * m * p
def baddbmm(node):
# [b, n, p] = aten::baddbmm([b, n, p], [b, n, m], [b, m, p])
b, n, p = node.inputs[0].shape
b, n1, m = node.inputs[1].shape
b, m1, p1 = node.inputs[2].shape
assert n == n1 and m == m1 and p == p1
return b * n * m * p
def matmul(node):
if node.inputs[0].ndim == 1 and node.inputs[1].ndim == 1:
# [] = aten::matmul([n], [n])
n = node.inputs[0].shape[0]
return n
elif node.inputs[0].ndim == 1 and node.inputs[1].ndim == 2:
# [m] = aten::matmul([n], [n, m])
n, m = node.inputs[1].shape
return n * m
elif node.inputs[0].ndim == 2 and node.inputs[1].ndim == 1:
# [n] = aten::matmul([n, m], [m])
n, m = node.inputs[0].shape
return n * m
elif node.inputs[0].ndim == 2 and node.inputs[1].ndim == 2:
# [n, p] = aten::matmul([n, m], [m, p])
n, m = node.inputs[0].shape
m, p = node.inputs[1].shape
return n * m * p
elif node.inputs[0].ndim == 1:
# [..., m] = aten::matmul([n], [..., n, m])
*b, n, m = node.inputs[1].shape
return math.prod(b) * n * m
elif node.inputs[1].ndim == 1:
# [..., n] = aten::matmul([..., n, m], [m])
*b, n, m = node.inputs[0].shape
return math.prod(b) * n * m
else:
# [..., n, p] = aten::matmul([..., n, m], [..., m, p])
*b, n, p = node.outputs[0].shape
*_, n, m = node.inputs[0].shape
*_, m, p = node.inputs[1].shape
return math.prod(b) * n * m * p
def mul(node):
os = node.outputs[0].shape
return math.prod(os)
def convolution(node):
if node.outputs[0].shape[1] == node.inputs[1].shape[0]:
oc, ic, *ks = node.inputs[1].shape
else:
ic, oc, *ks = node.inputs[1].shape
os = node.outputs[0].shape
return math.prod(os) * ic * math.prod(ks)
def norm(node):
if node.operator in ['aten::batch_norm', 'aten::instance_norm']:
affine = node.inputs[1].shape is not None
elif node.operator in ['aten::layer_norm', 'aten::group_norm']:
affine = node.inputs[2].shape is not None
else:
raise ValueError(node.operator)
os = node.outputs[0].shape
return math.prod(os) if affine else 0
def avg_pool_or_mean(node):
os = node.outputs[0].shape
return math.prod(os)
def leaky_relu(node):
os = node.outputs[0].shape
return math.prod(os)
def upsample_bilinear2d(node):
os = node.outputs[0].shape
return math.prod(os) * 4
handlers = (
('aten::addmm', addmm),
('aten::addmv', addmv),
('aten::bmm', bmm),
('aten::baddbmm', baddbmm),
(('aten::linear', 'aten::matmul'), matmul),
(('aten::mul', 'aten::mul_'), mul),
('aten::_convolution', convolution),
(('aten::batch_norm', 'aten::instance_norm', 'aten::layer_norm',
'aten::group_norm'), norm),
(('aten::adaptive_avg_pool1d', 'aten::adaptive_avg_pool2d',
'aten::adaptive_avg_pool3d', 'aten::avg_pool1d', 'aten::avg_pool2d',
'aten::avg_pool3d', 'aten::mean'), avg_pool_or_mean),
('aten::leaky_relu', leaky_relu),
('aten::upsample_bilinear2d', upsample_bilinear2d),
(('aten::adaptive_max_pool1d', 'aten::adaptive_max_pool2d',
'aten::adaptive_max_pool3d', 'aten::add', 'aten::add_',
'aten::alpha_dropout', 'aten::cat', 'aten::chunk', 'aten::clamp',
'aten::clone', 'aten::constant_pad_nd', 'aten::contiguous',
'aten::detach', 'aten::div', 'aten::div_', 'aten::dropout',
'aten::dropout_', 'aten::embedding', 'aten::eq', 'aten::feature_dropout',
'aten::flatten', 'aten::floor', 'aten::floor_divide', 'aten::gt',
'aten::hardtanh_', 'aten::hardtanh', 'aten::index', 'aten::int', 'aten::log_softmax',
'aten::lt', 'aten::max_pool1d', 'aten::max_pool1d_with_indices',
'aten::max_pool2d', 'aten::max_pool2d_with_indices', 'aten::max_pool3d',
'aten::max_pool3d_with_indices', 'aten::max_unpool1d',
'aten::max_unpool2d', 'aten::max_unpool3d', 'aten::ne',
'aten::reflection_pad1d', 'aten::reflection_pad2d',
'aten::reflection_pad3d', 'aten::relu', 'aten::relu_',
'aten::replication_pad1d', 'aten::replication_pad2d',
'aten::replication_pad3d', 'aten::rsub', 'aten::select', 'aten::sigmoid',
'aten::size', 'aten::slice', 'aten::softmax', 'aten::softshrink',
'aten::squeeze', 'aten::stack', 'aten::sub', 'aten::sum', 'aten::t',
'aten::tanh', 'aten::threshold', 'aten::to', 'aten::transpose',
'aten::upsample_nearest2d', 'aten::view', 'aten::zeros',
'prim::constant', 'prim::listconstruct', 'prim::listunpack',
'prim::numtotensor', 'prim::tupleconstruct'), None),
)
|
33472e59d82d5548c37aede5151939b2d96f1c01
|
de527d4dcbad261226e4bdc2742ef2a901fa119d
|
/tf_verify/__main__.py
|
82c19fe0538b5623c743d7059bad6a7750dbc458
|
[
"Apache-2.0"
] |
permissive
|
eth-sri/eran
|
6c131079686f87b83d771b20a53a500e480a57d6
|
8771d3158b2c64a360d5bdfd4433490863257dd6
|
refs/heads/master
| 2023-02-07T06:29:26.827401
| 2022-05-30T11:08:38
| 2022-05-30T11:08:38
| 157,565,932
| 306
| 128
|
Apache-2.0
| 2023-01-27T01:14:21
| 2018-11-14T15:03:42
|
Python
|
UTF-8
|
Python
| false
| false
| 76,847
|
py
|
__main__.py
|
"""
Copyright 2020 ETH Zurich, Secure, Reliable, and Intelligent Systems Lab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
cpu_affinity = os.sched_getaffinity(0)
sys.path.insert(0, '../ELINA/python_interface/')
sys.path.insert(0, '../deepg/code/')
import torch
import numpy as np
from eran import ERAN
from read_net_file import *
from read_zonotope_file import read_zonotope
import tensorflow as tf
import csv
import time
from tqdm import tqdm
from ai_milp import *
import argparse
from config import config
from constraint_utils import *
import re
import itertools
from multiprocessing import Pool, Value
import onnxruntime.backend as rt
import logging
import spatial
from copy import deepcopy
from tensorflow_translator import *
from onnx_translator import *
from optimizer import *
from analyzer import *
from pprint import pprint
# if config.domain=='gpupoly' or config.domain=='refinegpupoly':
from refine_gpupoly import *
from utils import parse_vnn_lib_prop, translate_output_constraints, translate_input_to_box, negate_cstr_or_list_old
#ZONOTOPE_EXTENSION = '.zt'
EPS = 10**(-9)
is_tf_version_2=tf.__version__[0]=='2'
if is_tf_version_2:
tf= tf.compat.v1
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def isnetworkfile(fname):
_, ext = os.path.splitext(fname)
if ext not in ['.pyt', '.meta', '.tf','.onnx', '.pb']:
raise argparse.ArgumentTypeError('only .pyt, .tf, .onnx, .pb, and .meta formats supported')
return fname
def parse_input_box(text):
intervals_list = []
for line in text.split('\n'):
if line!="":
interval_strings = re.findall("\[-?\d*\.?\d+, *-?\d*\.?\d+\]", line)
intervals = []
for interval in interval_strings:
interval = interval.replace('[', '')
interval = interval.replace(']', '')
[lb,ub] = interval.split(",")
intervals.append((np.double(lb), np.double(ub)))
intervals_list.append(intervals)
# return every combination
boxes = itertools.product(*intervals_list)
return list(boxes)
def show_ascii_spec(lb, ub, n_rows, n_cols, n_channels):
print('==================================================================')
for i in range(n_rows):
print(' ', end='')
for j in range(n_cols):
print('#' if lb[n_cols*n_channels*i+j*n_channels] >= 0.5 else ' ', end='')
print(' | ', end='')
for j in range(n_cols):
print('#' if ub[n_cols*n_channels*i+j*n_channels] >= 0.5 else ' ', end='')
print(' | ')
print('==================================================================')
def normalize(image, means, stds, dataset):
# normalization taken out of the network
if len(means) == len(image):
for i in range(len(image)):
image[i] -= means[i]
if stds!=None:
image[i] /= stds[i]
elif dataset == 'mnist' or dataset == 'fashion':
for i in range(len(image)):
image[i] = (image[i] - means[0])/stds[0]
elif(dataset=='cifar10'):
count = 0
tmp = np.zeros(3072)
for i in range(1024):
tmp[count] = (image[count] - means[0])/stds[0]
count = count + 1
tmp[count] = (image[count] - means[1])/stds[1]
count = count + 1
tmp[count] = (image[count] - means[2])/stds[2]
count = count + 1
is_gpupoly = (domain=='gpupoly' or domain=='refinegpupoly')
if is_conv and not is_gpupoly:
for i in range(3072):
image[i] = tmp[i]
#for i in range(1024):
# image[i*3] = tmp[i]
# image[i*3+1] = tmp[i+1024]
# image[i*3+2] = tmp[i+2048]
else:
count = 0
for i in range(1024):
image[i] = tmp[count]
count = count+1
image[i+1024] = tmp[count]
count = count+1
image[i+2048] = tmp[count]
count = count+1
def normalize_plane(plane, mean, std, channel, is_constant):
plane_ = plane.clone()
if is_constant:
plane_ -= mean[channel]
plane_ /= std[channel]
return plane_
def normalize_poly(num_params, lexpr_cst, lexpr_weights, lexpr_dim, uexpr_cst, uexpr_weights, uexpr_dim, means, stds, dataset):
# normalization taken out of the network
if dataset == 'mnist' or dataset == 'fashion':
for i in range(len(lexpr_cst)):
lexpr_cst[i] = (lexpr_cst[i] - means[0]) / stds[0]
uexpr_cst[i] = (uexpr_cst[i] - means[0]) / stds[0]
for i in range(len(lexpr_weights)):
lexpr_weights[i] /= stds[0]
uexpr_weights[i] /= stds[0]
else:
for i in range(len(lexpr_cst)):
lexpr_cst[i] = (lexpr_cst[i] - means[i % 3]) / stds[i % 3]
uexpr_cst[i] = (uexpr_cst[i] - means[i % 3]) / stds[i % 3]
for i in range(len(lexpr_weights)):
lexpr_weights[i] /= stds[(i // num_params) % 3]
uexpr_weights[i] /= stds[(i // num_params) % 3]
def denormalize(image, means, stds, dataset):
if dataset == 'mnist' or dataset == 'fashion':
for i in range(len(image)):
image[i] = image[i]*stds[0] + means[0]
elif(dataset=='cifar10'):
count = 0
tmp = np.zeros(3072)
for i in range(1024):
tmp[count] = image[count]*stds[0] + means[0]
count = count + 1
tmp[count] = image[count]*stds[1] + means[1]
count = count + 1
tmp[count] = image[count]*stds[2] + means[2]
count = count + 1
for i in range(3072):
image[i] = tmp[i]
def model_predict(base, input):
if is_onnx:
pred = base.run(input)
else:
pred = base.run(base.graph.get_operation_by_name(model.op.name).outputs[0], {base.graph.get_operations()[0].name + ':0': input})
return pred
def estimate_grads(specLB, specUB, dim_samples=3, input_shape=[1]):
# Estimate gradients using central difference quotient and average over dim_samples+1 in the range of the input bounds
# Very computationally costly
specLB = np.array(specLB, dtype=np.float32)
specUB = np.array(specUB, dtype=np.float32)
inputs = [(((dim_samples - i) * specLB + i * specUB) / dim_samples).reshape(*input_shape) for i in range(dim_samples + 1)]
diffs = np.zeros(len(specLB))
# refactor this out of this method
if is_onnx:
runnable = rt.prepare(model, 'CPU')
elif sess is None:
config = tf.ConfigProto(device_count={'GPU': 0})
runnable = tf.Session(config=config)
else:
runnable = sess
for sample in range(dim_samples + 1):
pred = model_predict(runnable, inputs[sample])
for index in range(len(specLB)):
if sample < dim_samples:
l_input = [m if i != index else u for i, m, u in zip(range(len(specLB)), inputs[sample], inputs[sample+1])]
l_input = np.array(l_input, dtype=np.float32)
l_i_pred = model_predict(runnable, l_input)
else:
l_i_pred = pred
if sample > 0:
u_input = [m if i != index else l for i, m, l in zip(range(len(specLB)), inputs[sample], inputs[sample-1])]
u_input = np.array(u_input, dtype=np.float32)
u_i_pred = model_predict(runnable, u_input)
else:
u_i_pred = pred
diff = np.sum([abs(li - m) + abs(ui - m) for li, m, ui in zip(l_i_pred, pred, u_i_pred)])
diffs[index] += diff
return diffs / dim_samples
progress = 0.0
def print_progress(depth):
if config.debug:
global progress, rec_start
progress += np.power(2.,-depth)
sys.stdout.write('\r%.10f percent, %.02f s\n' % (100 * progress, time.time()-rec_start))
def acasxu_recursive(specLB, specUB, max_depth=10, depth=0):
hold,nn,nlb,nub,_,_ = eran.analyze_box(specLB, specUB, domain, config.timeout_lp, config.timeout_milp, config.use_default_heuristic, constraints)
global failed_already
if hold:
print_progress(depth)
return hold, None
elif depth >= max_depth:
if failed_already.value and config.complete:
try:
verified_flag, adv_examples, _ = verify_network_with_milp(nn, specLB, specUB, nlb, nub, constraints)
except Exception as ex:
print(f"{ex}Exception occured for the following inputs:")
print(specLB, specUB, max_depth, depth)
#verified_flag, adv_examples, _ = verify_network_with_milp(nn, specLB, specUB, nlb, nub, constraints)
raise ex
print_progress(depth)
found_adex = False
if verified_flag == False:
if adv_examples!=None:
#print("adv image ", adv_image)
for adv_image in adv_examples:
for or_list in constraints:
if found_adex: break
negated_cstr = negate_cstr_or_list_old(or_list)
hold_adex,_,nlb,nub,_,_ = eran.analyze_box(adv_image, adv_image, domain, config.timeout_lp, config.timeout_milp, config.use_default_heuristic, negated_cstr)
found_adex = hold_adex or found_adex
#print("hold ", hold, "domain", domain)
if found_adex:
print("property violated at ", adv_image, "output_score", nlb[-1])
failed_already.value = 0
break
return verified_flag, None if not found_adex else adv_image
else:
return False, None
else:
# grads = estimate_grads(specLB, specUB, input_shape=eran.input_shape)
# # grads + small epsilon so if gradient estimation becomes 0 it will divide the biggest interval.
# smears = np.multiply(grads + 0.00001, [u-l for u, l in zip(specUB, specLB)])
#start = time.time()
nn.set_last_weights(constraints)
grads_lower, grads_upper = nn.back_propagate_gradient(nlb, nub)
smears = [max(-grad_l, grad_u) * (u-l) for grad_l, grad_u, l, u in zip(grads_lower, grads_upper, specLB, specUB)]
index = np.argmax(smears)
m = (specLB[index]+specUB[index])/2
result_a, adex_a = acasxu_recursive(specLB, [ub if i != index else m for i, ub in enumerate(specUB)], max_depth, depth + 1)
if adex_a is None:
result_b, adex_b = acasxu_recursive([lb if i != index else m for i, lb in enumerate(specLB)], specUB, max_depth, depth + 1)
else:
adex_b = None
result_b = False
adex = adex_a if adex_a is not None else (adex_b if adex_b is not None else None)
return failed_already.value and result_a and result_b, adex
def get_tests(dataset, geometric):
if geometric:
csvfile = open('../deepg/code/datasets/{}_test.csv'.format(dataset), 'r')
else:
if config.subset == None:
try:
csvfile = open('../data/{}_test_full.csv'.format(dataset), 'r')
except:
csvfile = open('../data/{}_test.csv'.format(dataset), 'r')
print("Only the first 100 samples are available.")
else:
filename = '../data/'+ dataset+ '_test_' + config.subset + '.csv'
csvfile = open(filename, 'r')
tests = csv.reader(csvfile, delimiter=',')
return tests
def init_domain(d):
if d == 'refinezono':
return 'deepzono'
elif d == 'refinepoly':
return 'deeppoly'
else:
return d
parser = argparse.ArgumentParser(description='ERAN Example', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--netname', type=isnetworkfile, default=config.netname, help='the network name, the extension can be only .pb, .pyt, .tf, .meta, and .onnx')
parser.add_argument('--epsilon', type=float, default=config.epsilon, help='the epsilon for L_infinity perturbation')
parser.add_argument('--zonotope', type=str, default=config.zonotope, help='file to specify the zonotope matrix')
parser.add_argument('--subset', type=str, default=config.subset, help='suffix of the file to specify the subset of the test dataset to use')
parser.add_argument('--target', type=str, default=config.target, help='file specify the targets for the attack')
parser.add_argument('--epsfile', type=str, default=config.epsfile, help='file specify the epsilons for the L_oo attack')
parser.add_argument('--vnn_lib_spec', type=str, default=config.vnn_lib_spec, help='VNN_LIB spec file, defining input and output constraints')
parser.add_argument('--specnumber', type=int, default=config.specnumber, help='the property number for the acasxu networks')
parser.add_argument('--domain', type=str, default=config.domain, help='the domain name can be either deepzono, refinezono, deeppoly, refinepoly, gpupoly, refinegpupoly')
parser.add_argument('--dataset', type=str, default=config.dataset, help='the dataset, can be either mnist, cifar10, acasxu, or fashion')
parser.add_argument('--complete', type=str2bool, default=config.complete, help='flag specifying where to use complete verification or not')
parser.add_argument('--timeout_lp', type=float, default=config.timeout_lp, help='timeout for the LP solver')
parser.add_argument('--timeout_final_lp', type=float, default=config.timeout_final_lp, help='timeout for the final LP solver')
parser.add_argument('--timeout_milp', type=float, default=config.timeout_milp, help='timeout for the MILP solver')
parser.add_argument('--timeout_final_milp', type=float, default=config.timeout_final_lp, help='timeout for the final MILP solver')
parser.add_argument('--timeout_complete', type=float, default=None, help='Cumulative timeout for the complete verifier, superseeds timeout_final_milp if set')
parser.add_argument('--max_milp_neurons', type=int, default=config.max_milp_neurons, help='number of layers to encode using MILP.')
parser.add_argument('--partial_milp', type=int, default=config.partial_milp, help='Maximum number of neurons to use for partial MILP encoding')
parser.add_argument('--numproc', type=int, default=config.numproc, help='number of processes for MILP / LP / k-ReLU')
parser.add_argument('--sparse_n', type=int, default=config.sparse_n, help='Number of variables to group by k-ReLU')
parser.add_argument('--use_default_heuristic', type=str2bool, default=config.use_default_heuristic, help='whether to use the area heuristic for the DeepPoly ReLU approximation or to always create new noise symbols per relu for the DeepZono ReLU approximation')
parser.add_argument('--use_milp', type=str2bool, default=config.use_milp, help='whether to use milp or not')
parser.add_argument('--refine_neurons', action='store_true', default=config.refine_neurons, help='whether to refine intermediate neurons')
parser.add_argument('--n_milp_refine', type=int, default=config.n_milp_refine, help='Number of milp refined layers')
parser.add_argument('--mean', nargs='+', type=float, default=config.mean, help='the mean used to normalize the data with')
parser.add_argument('--std', nargs='+', type=float, default=config.std, help='the standard deviation used to normalize the data with')
parser.add_argument('--data_dir', type=str, default=config.data_dir, help='data location')
parser.add_argument('--geometric_config', type=str, default=config.geometric_config, help='config location')
parser.add_argument('--num_params', type=int, default=config.num_params, help='Number of transformation parameters')
parser.add_argument('--num_tests', type=int, default=config.num_tests, help='Number of images to test')
parser.add_argument('--from_test', type=int, default=config.from_test, help='Number of images to test')
parser.add_argument('--debug', type=str2bool, default=config.debug, help='Whether to display debug info')
parser.add_argument('--attack', action='store_true', default=config.attack, help='Whether to attack')
parser.add_argument('--geometric', '-g', dest='geometric', default=config.geometric, action='store_true', help='Whether to do geometric analysis')
parser.add_argument('--input_box', default=config.input_box, help='input box to use')
parser.add_argument('--output_constraints', default=config.output_constraints, help='custom output constraints to check')
parser.add_argument('--normalized_region', type=str2bool, default=config.normalized_region, help='Whether to normalize the adversarial region')
parser.add_argument('--spatial', action='store_true', default=config.spatial, help='whether to do vector field analysis')
parser.add_argument('--t-norm', type=str, default=config.t_norm, help='vector field norm (1, 2, or inf)')
parser.add_argument('--delta', type=float, default=config.delta, help='vector field displacement magnitude')
parser.add_argument('--gamma', type=float, default=config.gamma, help='vector field smoothness constraint')
parser.add_argument('--k', type=int, default=config.k, help='refine group size')
parser.add_argument('--s', type=int, default=config.s, help='refine group sparsity parameter')
parser.add_argument('--quant_step', type=float, default=config.quant_step, help='Quantization step for quantized networks')
parser.add_argument("--approx_k", type=str2bool, default=config.approx_k, help="Use approximate fast k neuron constraints")
# Logging options
parser.add_argument('--logdir', type=str, default=None, help='Location to save logs to. If not specified, logs are not saved and emitted to stdout')
parser.add_argument('--logname', type=str, default=None, help='Directory of log files in `logdir`, if not specified timestamp is used')
args = parser.parse_args()
for k, v in vars(args).items():
setattr(config, k, v)
# if args.timeout_complete is not None:
# raise DeprecationWarning("'--timeout_complete' is depreciated. Use '--timeout_final_milp' instead")
config.json = vars(args)
pprint(config.json)
if config.specnumber and not config.input_box and not config.output_constraints:
config.input_box = '../data/acasxu/specs/acasxu_prop_' + str(config.specnumber) + '_input_prenormalized.txt'
config.output_constraints = '../data/acasxu/specs/acasxu_prop_' + str(config.specnumber) + '_constraints.txt'
assert config.netname, 'a network has to be provided for analysis.'
#if len(sys.argv) < 4 or len(sys.argv) > 5:
# print('usage: python3.6 netname epsilon domain dataset')
# exit(1)
netname = config.netname
assert os.path.isfile(netname), f"Model file not found. Please check \"{netname}\" is correct."
filename, file_extension = os.path.splitext(netname)
is_trained_with_pytorch = file_extension==".pyt"
is_saved_tf_model = file_extension==".meta"
is_pb_file = file_extension==".pb"
is_tensorflow = file_extension== ".tf"
is_onnx = file_extension == ".onnx"
assert is_trained_with_pytorch or is_saved_tf_model or is_pb_file or is_tensorflow or is_onnx, "file extension not supported"
epsilon = config.epsilon
#assert (epsilon >= 0) and (epsilon <= 1), "epsilon can only be between 0 and 1"
zonotope_file = config.zonotope
zonotope = None
zonotope_bool = (zonotope_file!=None)
if zonotope_bool:
zonotope = read_zonotope(zonotope_file)
domain = config.domain
if zonotope_bool:
assert domain in ['deepzono', 'refinezono'], "domain name can be either deepzono or refinezono"
elif not config.geometric:
assert domain in ['deepzono', 'refinezono', 'deeppoly', 'refinepoly', 'gpupoly', 'refinegpupoly'], "domain name can be either deepzono, refinezono, deeppoly, refinepoly, gpupoly, refinegpupoly"
dataset = config.dataset
if zonotope_bool==False:
assert dataset in ['mnist', 'cifar10', 'acasxu', 'fashion'], "only mnist, cifar10, acasxu, and fashion datasets are supported"
mean = 0
std = 0
complete = (config.complete==True)
if(dataset=='acasxu'):
print("netname ", netname, " specnumber ", config.specnumber, " domain ", domain, " dataset ", dataset, "args complete ", config.complete, " complete ",complete, " timeout_lp ",config.timeout_lp)
else:
print("netname ", netname, " epsilon ", epsilon, " domain ", domain, " dataset ", dataset, "args complete ", config.complete, " complete ",complete, " timeout_lp ",config.timeout_lp)
non_layer_operation_types = ['NoOp', 'Assign', 'Const', 'RestoreV2', 'SaveV2', 'PlaceholderWithDefault', 'IsVariableInitialized', 'Placeholder', 'Identity']
sess = None
if is_saved_tf_model or is_pb_file:
netfolder = os.path.dirname(netname)
tf.logging.set_verbosity(tf.logging.ERROR)
sess = tf.Session()
if is_saved_tf_model:
saver = tf.train.import_meta_graph(netname)
saver.restore(sess, tf.train.latest_checkpoint(netfolder+'/'))
else:
with tf.gfile.GFile(netname, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.graph_util.import_graph_def(graph_def, name='')
ops = sess.graph.get_operations()
last_layer_index = -1
while ops[last_layer_index].type in non_layer_operation_types:
last_layer_index -= 1
model = sess.graph.get_tensor_by_name(ops[last_layer_index].name + ':0')
eran = ERAN(model, sess)
else:
if(zonotope_bool==True):
num_pixels = len(zonotope)
elif(dataset=='mnist'):
num_pixels = 784
elif (dataset=='cifar10'):
num_pixels = 3072
elif(dataset=='acasxu'):
num_pixels = 5
if is_onnx:
model, is_conv = read_onnx_net(netname)
else:
model, is_conv, means, stds = read_tensorflow_net(netname, num_pixels, is_trained_with_pytorch, (domain == 'gpupoly' or domain == 'refinegpupoly'))
if domain == 'gpupoly' or domain == 'refinegpupoly':
if is_onnx:
translator = ONNXTranslator(model, True)
else:
translator = TFTranslator(model)
operations, resources = translator.translate()
optimizer = Optimizer(operations, resources)
nn = layers()
network, relu_layers, num_gpu_layers = optimizer.get_gpupoly(nn)
else:
eran = ERAN(model, is_onnx=is_onnx)
if not is_trained_with_pytorch:
if dataset == 'mnist' and not config.geometric:
means = [0]
stds = [1]
elif dataset == 'acasxu':
means = [1.9791091e+04, 0.0, 0.0, 650.0, 600.0]
stds = [60261.0, 6.28318530718, 6.28318530718, 1100.0, 1200.0]
elif dataset == "cifar10":
means = [0.4914, 0.4822, 0.4465]
stds = [0.2023, 0.1994, 0.2010]
else:
means = [0.5, 0.5, 0.5]
stds = [1, 1, 1]
is_trained_with_pytorch = is_trained_with_pytorch or is_onnx
if config.mean is not None:
means = config.mean
stds = config.std
os.sched_setaffinity(0,cpu_affinity)
correctly_classified_images = 0
verified_images = 0
unsafe_images = 0
cum_time = 0
if config.vnn_lib_spec is not None:
# input and output constraints in homogenized representation x >= C_lb * [x_0, eps, 1]; C_out [y, 1] >= 0
C_lb, C_ub, C_out = parse_vnn_lib_prop(config.vnn_lib_spec)
constraints = translate_output_constraints(C_out)
boxes = translate_input_to_box(C_lb, C_ub, x_0=None, eps=None, domain_bounds=None)
else:
if config.output_constraints:
constraints = get_constraints_from_file(config.output_constraints)
else:
constraints = None
if dataset and config.input_box is None:
tests = get_tests(dataset, config.geometric)
else:
tests = open(config.input_box, 'r').read()
boxes = parse_input_box(tests)
def init(args):
global failed_already
failed_already = args
if dataset=='acasxu':
use_parallel_solve = True
failed_already = Value('i', 1)
if config.debug:
print('Constraints: ', constraints)
total_start = time.time()
for box_index, box in enumerate(boxes):
specLB = [interval[0] for interval in box]
specUB = [interval[1] for interval in box]
normalize(specLB, means, stds, dataset)
normalize(specUB, means, stds, dataset)
e = None
holds = True
x_adex = None
found_adex = False
rec_start = time.time()
# start = time.time()
verified_flag, nn, nlb, nub, _ , x_adex = eran.analyze_box(specLB, specUB, init_domain(domain), config.timeout_lp, config.timeout_milp, config.use_default_heuristic, constraints)
if not verified_flag and x_adex is not None:
for or_list in constraints:
if found_adex: break
negated_cstr = negate_cstr_or_list_old(or_list)
hold_adex, _, nlb, nub, _, _ = eran.analyze_box(x_adex, x_adex, "deeppoly", config.timeout_lp,
config.timeout_milp, config.use_default_heuristic,
negated_cstr)
found_adex = hold_adex or found_adex
# adex_holds, _, _, _, _, _ = eran.analyze_box(x_adex, x_adex, "deeppoly", config.timeout_lp, config.timeout_milp, config.use_default_heuristic, constraints)
if (not verified_flag) and (not found_adex):
# expensive min/max gradient calculation
verified_flag = True
nn.set_last_weights(constraints)
grads_lower, grads_upper = nn.back_propagate_gradient(nlb, nub)
smears = [max(-grad_l, grad_u) * (u-l) for grad_l, grad_u, l, u in zip(grads_lower, grads_upper, specLB, specUB)]
split_multiple = 20 / np.sum(smears)
num_splits = [int(np.ceil(smear * split_multiple)) for smear in smears]
step_size = []
for i in range(5):
if num_splits[i]==0:
num_splits[i] = 1
step_size.append((specUB[i]-specLB[i])/num_splits[i])
#sorted_indices = np.argsort(widths)
#input_to_split = sorted_indices[0]
#print("input to split ", input_to_split)
#step_size = widths/num_splits
#print("step size", step_size,num_splits)
start_val = np.copy(specLB)
end_val = np.copy(specUB)
# _,nn,_,_,_,_ = eran.analyze_box(specLB, specUB, init_domain(domain), config.timeout_lp, config.timeout_milp, config.use_default_heuristic, constraints)
#complete_list = []
multi_bounds = []
for i in range(num_splits[0]):
if not holds: break
specLB[0] = start_val[0] + i*step_size[0]
specUB[0] = np.fmin(end_val[0],start_val[0]+ (i+1)*step_size[0])
for j in range(num_splits[1]):
if not holds: break
specLB[1] = start_val[1] + j*step_size[1]
specUB[1] = np.fmin(end_val[1],start_val[1]+ (j+1)*step_size[1])
for k in range(num_splits[2]):
if not holds: break
specLB[2] = start_val[2] + k*step_size[2]
specUB[2] = np.fmin(end_val[2],start_val[2]+ (k+1)*step_size[2])
for l in range(num_splits[3]):
if not holds: break
specLB[3] = start_val[3] + l*step_size[3]
specUB[3] = np.fmin(end_val[3],start_val[3]+ (l+1)*step_size[3])
for m in range(num_splits[4]):
specLB[4] = start_val[4] + m*step_size[4]
specUB[4] = np.fmin(end_val[4],start_val[4]+ (m+1)*step_size[4])
if use_parallel_solve:
# add bounds to input for multiprocessing map
multi_bounds.append((specLB.copy(), specUB.copy()))
else:
res = acasxu_recursive(specLB.copy(),specUB.copy())
#print("RES ", res, res[0]==True,type(res)==tuple)
if type(res)==tuple and res[0]==False:
verified_flag = False
break
elif res==False:
verified_flag = False
break
#print("verified flag", verified_flag)
# --- VERSION WITHOUT MULTIPROCESSING ---
#holds, _, nlb, nub, _, x_adex = eran.analyze_box(specLB, specUB, domain, config.timeout_lp, config.timeout_milp, config.use_default_heuristic, constraints)
#if not holds:
# if x_adex is not None:
# adex_holds, _, _, _, _, _ = eran.analyze_box(x_adex, x_adex, "deeppoly", config.timeout_lp, config.timeout_milp, config.use_default_heuristic, constraints)
# if not adex_holds:
# verified_flag = False
# break
# if complete:
# holds, adv_image, adv_val = verify_network_with_milp(nn, specLB, specUB, nlb, nub, constraints)
#complete_list.append((i,j,k,l,m))
# if not holds:
# verified_flag = False
# break
# else:
# verified_flag = False
# break
#if config.debug:
# sys.stdout.write('\rsplit %i, %i, %i, %i, %i %.02f sec\n' % (i, j, k, l, m, time.time()-rec_start))
#print(time.time() - rec_start, "seconds")
#print("LENGTH ", len(multi_bounds))
if use_parallel_solve:
failed_already = Value('i', 1)
try:
with Pool(processes=10, initializer=init, initargs=(failed_already,)) as pool:
pool_return = pool.starmap(acasxu_recursive, multi_bounds)
res = [x[0] for x in pool_return]
adex = [x[1] for x in pool_return if x[1] is not None]
for x_adex in adex: # Should be redundant as only confirmed counterexamples should be returned.
for or_list in constraints:
if found_adex: break
negated_cstr = negate_cstr_or_list_old(or_list)
hold_adex,_,nlb,nub,_,_ = eran.analyze_box(x_adex, x_adex, "deeppoly", config.timeout_lp, config.timeout_milp, config.use_default_heuristic, negated_cstr)
found_adex = hold_adex or found_adex
# adex_holds, _, _, _, _, _ = eran.analyze_box(x_adex, x_adex, "deeppoly", config.timeout_lp,
# config.timeout_milp, config.use_default_heuristic,
# constraints)
if found_adex:
break
else:
assert False, "This should not be reachable"
if all(res):
verified_flag = True
else:
verified_flag = False
except Exception as ex:
verified_flag = False
e = ex
ver_str = "Verified correct" if verified_flag else "Failed"
if found_adex:
ver_str = "Verified unsound (with adex)"
if e is None:
print("AcasXu property", config.specnumber, f"{ver_str} for Box", box_index, "out of", len(boxes))
else:
print("AcasXu property", config.specnumber, "Failed for Box", box_index, "out of", len(boxes), "because of an exception ", e)
print(time.time() - rec_start, "seconds")
print("Total time needed:", time.time() - total_start, "seconds")
elif zonotope_bool:
perturbed_label, nn, nlb, nub,_,_ = eran.analyze_zonotope(zonotope, domain, config.timeout_lp, config.timeout_milp, config.use_default_heuristic)
print("nlb ",nlb[-1])
print("nub ",nub[-1])
if(perturbed_label!=-1):
print("Verified")
elif(complete==True):
constraints = get_constraints_for_dominant_label(perturbed_label, 10)
verified_flag, adv_image, _ = verify_network_with_milp(nn, zonotope, [], nlb, nub, constraints)
if(verified_flag==True):
print("Verified")
else:
print("Failed")
else:
print("Failed")
elif config.geometric:
from geometric_constraints import *
total, attacked, standard_correct, tot_time = 0, 0, 0, 0
correct_box, correct_poly = 0, 0
cver_box, cver_poly = [], []
if config.geometric_config:
transform_attack_container = get_transform_attack_container(config.geometric_config)
for i, test in enumerate(tests):
if config.from_test and i < config.from_test:
continue
if config.num_tests is not None and i >= config.num_tests:
break
set_transform_attack_for(transform_attack_container, i, config.attack, config.debug)
attack_params = get_attack_params(transform_attack_container)
attack_images = get_attack_images(transform_attack_container)
print('Test {}:'.format(i))
image = np.float64(test[1:])
if config.dataset == 'mnist' or config.dataset == 'fashion':
n_rows, n_cols, n_channels = 28, 28, 1
else:
n_rows, n_cols, n_channels = 32, 32, 3
spec_lb = np.copy(image)
spec_ub = np.copy(image)
normalize(spec_lb, means, stds, config.dataset)
normalize(spec_ub, means, stds, config.dataset)
label, nn, nlb, nub,_,_ = eran.analyze_box(spec_lb, spec_ub, 'deeppoly', config.timeout_lp, config.timeout_milp,
config.use_default_heuristic)
print('Label: ', label)
begtime = time.time()
if label != int(test[0]):
print('Label {}, but true label is {}, skipping...'.format(label, int(test[0])))
print('Standard accuracy: {} percent'.format(standard_correct / float(i + 1) * 100))
continue
else:
standard_correct += 1
print('Standard accuracy: {} percent'.format(standard_correct / float(i + 1) * 100))
dim = n_rows * n_cols * n_channels
ok_box, ok_poly = True, True
k = config.num_params + 1 + 1 + dim
attack_imgs, checked, attack_pass = [], [], 0
cex_found = False
if config.attack:
for j in tqdm(range(0, len(attack_params))):
params = attack_params[j]
values = np.array(attack_images[j])
attack_lb = values[::2]
attack_ub = values[1::2]
normalize(attack_lb, means, stds, config.dataset)
normalize(attack_ub, means, stds, config.dataset)
attack_imgs.append((params, attack_lb, attack_ub))
checked.append(False)
predict_label, _, _, _, _, _ = eran.analyze_box(
attack_lb[:dim], attack_ub[:dim], 'deeppoly',
config.timeout_lp, config.timeout_milp, config.use_default_heuristic)
if predict_label != int(test[0]):
print('counter-example, params: ', params, ', predicted label: ', predict_label)
cex_found = True
break
else:
attack_pass += 1
print('tot attacks: ', len(attack_imgs))
lines = get_transformations(transform_attack_container)
print('Number of lines: ', len(lines))
assert len(lines) % k == 0
spec_lb = np.zeros(config.num_params + dim)
spec_ub = np.zeros(config.num_params + dim)
expr_size = config.num_params
lexpr_cst, uexpr_cst = [], []
lexpr_weights, uexpr_weights = [], []
lexpr_dim, uexpr_dim = [], []
ver_chunks_box, ver_chunks_poly, tot_chunks = 0, 0, 0
for i, line in enumerate(lines):
if i % k < config.num_params:
# read specs for the parameters
values = line
assert len(values) == 2
param_idx = i % k
spec_lb[dim + param_idx] = values[0]
spec_ub[dim + param_idx] = values[1]
if config.debug:
print('parameter %d: [%.4f, %.4f]' % (param_idx, values[0], values[1]))
elif i % k == config.num_params:
# read interval bounds for image pixels
values = line
spec_lb[:dim] = values[::2]
spec_ub[:dim] = values[1::2]
# if config.debug:
# show_ascii_spec(spec_lb, spec_ub)
elif i % k < k - 1:
# read polyhedra constraints for image pixels
tokens = line
assert len(tokens) == 2 + 2 * config.num_params
bias_lower, weights_lower = tokens[0], tokens[1:1 + config.num_params]
bias_upper, weights_upper = tokens[config.num_params + 1], tokens[2 + config.num_params:]
assert len(weights_lower) == config.num_params
assert len(weights_upper) == config.num_params
lexpr_cst.append(bias_lower)
uexpr_cst.append(bias_upper)
for j in range(config.num_params):
lexpr_dim.append(dim + j)
uexpr_dim.append(dim + j)
lexpr_weights.append(weights_lower[j])
uexpr_weights.append(weights_upper[j])
else:
assert (len(line) == 0)
for p_idx in range(config.num_params):
lexpr_cst.append(spec_lb[dim + p_idx])
for l in range(config.num_params):
lexpr_weights.append(0)
lexpr_dim.append(dim + l)
uexpr_cst.append(spec_ub[dim + p_idx])
for l in range(config.num_params):
uexpr_weights.append(0)
uexpr_dim.append(dim + l)
normalize(spec_lb[:dim], means, stds, config.dataset)
normalize(spec_ub[:dim], means, stds, config.dataset)
normalize_poly(config.num_params, lexpr_cst, lexpr_weights, lexpr_dim, uexpr_cst, uexpr_weights,
uexpr_dim, means, stds, config.dataset)
for attack_idx, (attack_params, attack_lb, attack_ub) in enumerate(attack_imgs):
ok_attack = True
for j in range(num_pixels):
low, up = lexpr_cst[j], uexpr_cst[j]
for idx in range(config.num_params):
low += lexpr_weights[j * config.num_params + idx] * attack_params[idx]
up += uexpr_weights[j * config.num_params + idx] * attack_params[idx]
if low > attack_lb[j] + EPS or attack_ub[j] > up + EPS:
ok_attack = False
if ok_attack:
checked[attack_idx] = True
# print('checked ', attack_idx)
if config.debug:
print('Running the analysis...')
t_begin = time.time()
perturbed_label_poly, _, _, _, _, _ = eran.analyze_box(
spec_lb, spec_ub, 'deeppoly',
config.timeout_lp, config.timeout_milp, config.use_default_heuristic, None,
lexpr_weights, lexpr_cst, lexpr_dim,
uexpr_weights, uexpr_cst, uexpr_dim,
expr_size)
perturbed_label_box, _, _, _, _, _ = eran.analyze_box(
spec_lb[:dim], spec_ub[:dim], 'deeppoly',
config.timeout_lp, config.timeout_milp, config.use_default_heuristic)
t_end = time.time()
print('DeepG: ', perturbed_label_poly, '\tInterval: ', perturbed_label_box, '\tlabel: ', label,
'[Time: %.4f]' % (t_end - t_begin))
tot_chunks += 1
if perturbed_label_box != label:
ok_box = False
else:
ver_chunks_box += 1
if perturbed_label_poly != label:
ok_poly = False
else:
ver_chunks_poly += 1
lexpr_cst, uexpr_cst = [], []
lexpr_weights, uexpr_weights = [], []
lexpr_dim, uexpr_dim = [], []
total += 1
if ok_box:
correct_box += 1
if ok_poly:
correct_poly += 1
if cex_found:
assert (not ok_box) and (not ok_poly)
attacked += 1
cver_poly.append(ver_chunks_poly / float(tot_chunks))
cver_box.append(ver_chunks_box / float(tot_chunks))
tot_time += time.time() - begtime
print('Verified[box]: {}, Verified[poly]: {}, CEX found: {}'.format(ok_box, ok_poly, cex_found))
assert not cex_found or not ok_box, 'ERROR! Found counter-example, but image was verified with box!'
assert not cex_found or not ok_poly, 'ERROR! Found counter-example, but image was verified with poly!'
else:
for i, test in enumerate(tests):
if config.from_test and i < config.from_test:
continue
if config.num_tests is not None and i >= config.num_tests:
break
attacks_file = os.path.join(config.data_dir, 'attack_{}.csv'.format(i))
print('Test {}:'.format(i))
image = np.float64(test[1:])
if config.dataset == 'mnist' or config.dataset == 'fashion':
n_rows, n_cols, n_channels = 28, 28, 1
else:
n_rows, n_cols, n_channels = 32, 32, 3
spec_lb = np.copy(image)
spec_ub = np.copy(image)
normalize(spec_lb, means, stds, config.dataset)
normalize(spec_ub, means, stds, config.dataset)
label, nn, nlb, nub, _, _ = eran.analyze_box(spec_lb, spec_ub, 'deeppoly', config.timeout_lp, config.timeout_milp,
config.use_default_heuristic)
print('Label: ', label)
begtime = time.time()
if label != int(test[0]):
print('Label {}, but true label is {}, skipping...'.format(label, int(test[0])))
print('Standard accuracy: {} percent'.format(standard_correct / float(i + 1) * 100))
continue
else:
standard_correct += 1
print('Standard accuracy: {} percent'.format(standard_correct / float(i + 1) * 100))
dim = n_rows * n_cols * n_channels
ok_box, ok_poly = True, True
k = config.num_params + 1 + 1 + dim
attack_imgs, checked, attack_pass = [], [], 0
cex_found = False
if config.attack:
with open(attacks_file, 'r') as fin:
lines = fin.readlines()
for j in tqdm(range(0, len(lines), config.num_params + 1)):
params = [float(line[:-1]) for line in lines[j:j + config.num_params]]
tokens = lines[j + config.num_params].split(',')
values = np.array(list(map(float, tokens)))
attack_lb = values[::2]
attack_ub = values[1::2]
normalize(attack_lb, means, stds, config.dataset)
normalize(attack_ub, means, stds, config.dataset)
attack_imgs.append((params, attack_lb, attack_ub))
checked.append(False)
predict_label, _, _, _, _, _ = eran.analyze_box(
attack_lb[:dim], attack_ub[:dim], 'deeppoly',
config.timeout_lp, config.timeout_milp, config.use_default_heuristic)
if predict_label != int(test[0]):
print('counter-example, params: ', params, ', predicted label: ', predict_label)
cex_found = True
break
else:
attack_pass += 1
print('tot attacks: ', len(attack_imgs))
specs_file = os.path.join(config.data_dir, '{}.csv'.format(i))
with open(specs_file, 'r') as fin:
lines = fin.readlines()
print('Number of lines: ', len(lines))
assert len(lines) % k == 0
spec_lb = np.zeros(config.num_params + dim)
spec_ub = np.zeros(config.num_params + dim)
expr_size = config.num_params
lexpr_cst, uexpr_cst = [], []
lexpr_weights, uexpr_weights = [], []
lexpr_dim, uexpr_dim = [], []
ver_chunks_box, ver_chunks_poly, tot_chunks = 0, 0, 0
for i, line in enumerate(lines):
if i % k < config.num_params:
# read specs for the parameters
values = np.array(list(map(float, line[:-1].split(' '))))
assert values.shape[0] == 2
param_idx = i % k
spec_lb[dim + param_idx] = values[0]
spec_ub[dim + param_idx] = values[1]
if config.debug:
print('parameter %d: [%.4f, %.4f]' % (param_idx, values[0], values[1]))
elif i % k == config.num_params:
# read interval bounds for image pixels
values = np.array(list(map(float, line[:-1].split(','))))
spec_lb[:dim] = values[::2]
spec_ub[:dim] = values[1::2]
# if config.debug:
# show_ascii_spec(spec_lb, spec_ub)
elif i % k < k - 1:
# read polyhedra constraints for image pixels
tokens = line[:-1].split(' ')
assert len(tokens) == 2 + 2 * config.num_params + 1
bias_lower, weights_lower = float(tokens[0]), list(map(float, tokens[1:1 + config.num_params]))
assert tokens[config.num_params + 1] == '|'
bias_upper, weights_upper = float(tokens[config.num_params + 2]), list(
map(float, tokens[3 + config.num_params:]))
assert len(weights_lower) == config.num_params
assert len(weights_upper) == config.num_params
lexpr_cst.append(bias_lower)
uexpr_cst.append(bias_upper)
for j in range(config.num_params):
lexpr_dim.append(dim + j)
uexpr_dim.append(dim + j)
lexpr_weights.append(weights_lower[j])
uexpr_weights.append(weights_upper[j])
else:
assert (line == 'SPEC_FINISHED\n')
for p_idx in range(config.num_params):
lexpr_cst.append(spec_lb[dim + p_idx])
for l in range(config.num_params):
lexpr_weights.append(0)
lexpr_dim.append(dim + l)
uexpr_cst.append(spec_ub[dim + p_idx])
for l in range(config.num_params):
uexpr_weights.append(0)
uexpr_dim.append(dim + l)
normalize(spec_lb[:dim], means, stds, config.dataset)
normalize(spec_ub[:dim], means, stds, config.dataset)
normalize_poly(config.num_params, lexpr_cst, lexpr_weights, lexpr_dim, uexpr_cst, uexpr_weights,
uexpr_dim, means, stds, config.dataset)
for attack_idx, (attack_params, attack_lb, attack_ub) in enumerate(attack_imgs):
ok_attack = True
for j in range(num_pixels):
low, up = lexpr_cst[j], uexpr_cst[j]
for idx in range(config.num_params):
low += lexpr_weights[j * config.num_params + idx] * attack_params[idx]
up += uexpr_weights[j * config.num_params + idx] * attack_params[idx]
if low > attack_lb[j] + EPS or attack_ub[j] > up + EPS:
ok_attack = False
if ok_attack:
checked[attack_idx] = True
# print('checked ', attack_idx)
if config.debug:
print('Running the analysis...')
t_begin = time.time()
perturbed_label_poly, _, _, _ , _, _ = eran.analyze_box(
spec_lb, spec_ub, 'deeppoly',
config.timeout_lp, config.timeout_milp, config.use_default_heuristic, None,
lexpr_weights, lexpr_cst, lexpr_dim,
uexpr_weights, uexpr_cst, uexpr_dim,
expr_size)
perturbed_label_box, _, _, _, _, _ = eran.analyze_box(
spec_lb[:dim], spec_ub[:dim], 'deeppoly',
config.timeout_lp, config.timeout_milp, config.use_default_heuristic)
t_end = time.time()
print('DeepG: ', perturbed_label_poly, '\tInterval: ', perturbed_label_box, '\tlabel: ', label,
'[Time: %.4f]' % (t_end - t_begin))
tot_chunks += 1
if perturbed_label_box != label:
ok_box = False
else:
ver_chunks_box += 1
if perturbed_label_poly != label:
ok_poly = False
else:
ver_chunks_poly += 1
lexpr_cst, uexpr_cst = [], []
lexpr_weights, uexpr_weights = [], []
lexpr_dim, uexpr_dim = [], []
total += 1
if ok_box:
correct_box += 1
if ok_poly:
correct_poly += 1
if cex_found:
assert (not ok_box) and (not ok_poly)
attacked += 1
cver_poly.append(ver_chunks_poly / float(tot_chunks))
cver_box.append(ver_chunks_box / float(tot_chunks))
tot_time += time.time() - begtime
print('Verified[box]: {}, Verified[poly]: {}, CEX found: {}'.format(ok_box, ok_poly, cex_found))
assert not cex_found or not ok_box, 'ERROR! Found counter-example, but image was verified with box!'
assert not cex_found or not ok_poly, 'ERROR! Found counter-example, but image was verified with poly!'
print('Attacks found: %.2f percent, %d/%d' % (100.0 * attacked / total, attacked, total))
print('[Box] Provably robust: %.2f percent, %d/%d' % (100.0 * correct_box / total, correct_box, total))
print('[Poly] Provably robust: %.2f percent, %d/%d' % (100.0 * correct_poly / total, correct_poly, total))
print('Empirically robust: %.2f percent, %d/%d' % (100.0 * (total - attacked) / total, total - attacked, total))
print('[Box] Average chunks verified: %.2f percent' % (100.0 * np.mean(cver_box)))
print('[Poly] Average chunks verified: %.2f percent' % (100.0 * np.mean(cver_poly)))
print('Average time: ', tot_time / total)
elif config.input_box is not None:
boxes = parse_input_box(tests)
index = 1
correct = 0
for box in boxes:
specLB = [interval[0] for interval in box]
specUB = [interval[1] for interval in box]
normalize(specLB, means, stds, dataset)
normalize(specUB, means, stds, dataset)
hold, nn, nlb, nub, _, _ = eran.analyze_box(specLB, specUB, domain, config.timeout_lp, config.timeout_milp, config.use_default_heuristic, constraints)
if hold:
print('constraints hold for box ' + str(index) + ' out of ' + str(sum([1 for b in boxes])))
correct += 1
else:
print('constraints do NOT hold for box ' + str(index) + ' out of ' + str(sum([1 for b in boxes])))
index += 1
print('constraints hold for ' + str(correct) + ' out of ' + str(sum([1 for b in boxes])) + ' boxes')
elif config.spatial:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if config.dataset in ['mnist', 'fashion']:
height, width, channels = 28, 28, 1
else:
height, width, channels = 32, 32, 3
for idx, test in enumerate(tests):
if idx < config.from_test:
continue
if (config.num_tests is not None) and (config.from_test + config.num_tests == idx):
break
image = torch.from_numpy(np.float64(test[1:len(test)]) / np.float64(255)).reshape(1, height, width, channels).permute(0, 3, 1, 2).to(device)
label = np.int(test[0])
specLB = image.clone().permute(0, 2, 3, 1).flatten().cpu()
specUB = image.clone().permute(0, 2, 3, 1).flatten().cpu()
normalize(specLB, means, stds, dataset)
normalize(specUB, means, stds, dataset)
predicted_label, nn, nlb, nub, _, _ = eran.analyze_box(
specLB=specLB, specUB=specUB, domain=init_domain(domain),
timeout_lp=config.timeout_lp, timeout_milp=config.timeout_milp,
use_default_heuristic=config.use_default_heuristic
)
print(f'concrete {nlb[-1]}')
if label != predicted_label:
print(f'img {idx} not considered, correct_label {label}, classified label {predicted_label}')
continue
correctly_classified_images += 1
start = time.time()
transformer = getattr(
spatial, f'T{config.t_norm.capitalize()}NormTransformer'
)(image, config.delta)
box_lb, box_ub = transformer.box_constraints()
lower_bounds = box_lb.permute(0, 2, 3, 1).flatten()
upper_bounds = box_ub.permute(0, 2, 3, 1).flatten()
normalize(lower_bounds, means, stds, dataset)
normalize(upper_bounds, means, stds, dataset)
specLB, specUB = lower_bounds.clone(), upper_bounds.clone()
LB_N0, UB_N0 = lower_bounds.clone(), upper_bounds.clone()
expr_size = 0
lexpr_weights = lexpr_cst = lexpr_dim = None
uexpr_weights = uexpr_cst = uexpr_dim = None
lower_planes = upper_planes = None
deeppoly_spatial_constraints = milp_spatial_constraints = None
if config.gamma < float('inf'):
expr_size = 2
lower_planes, upper_planes = list(), list()
lexpr_weights, lexpr_cst, lexpr_dim = list(), list(), list()
uexpr_weights, uexpr_cst, uexpr_dim = list(), list(), list()
linear_lb, linear_ub = transformer.linear_constraints()
for channel in range(image.shape[1]):
lb_a, lb_b, lb_c = linear_lb[channel]
ub_a, ub_b, ub_c = linear_ub[channel]
linear_lb[channel][0] = normalize_plane(
lb_a, means, stds, channel, is_constant=True
)
linear_lb[channel][1] = normalize_plane(
lb_b, means, stds, channel, is_constant=False
)
linear_lb[channel][2] = normalize_plane(
lb_c, means, stds, channel, is_constant=False
)
linear_ub[channel][0] = normalize_plane(
ub_a, means, stds, channel, is_constant=True
)
linear_ub[channel][1] = normalize_plane(
ub_b, means, stds, channel, is_constant=False
)
linear_ub[channel][2] = normalize_plane(
ub_c, means, stds, channel, is_constant=False
)
for i in range(3):
lower_planes.append(
torch.cat(
[
linear_lb[channel][i].unsqueeze(-1)
for channel in range(image.shape[1])
], dim=-1
).flatten().tolist()
)
upper_planes.append(
torch.cat(
[
linear_ub[channel][i].unsqueeze(-1)
for channel in range(image.shape[1])
], dim=-1
).flatten().tolist()
)
deeppoly_spatial_constraints = {'gamma': config.gamma}
for key, val in transformer.flow_constraint_pairs.items():
deeppoly_spatial_constraints[key] = val.cpu()
milp_spatial_constraints = {
'delta': config.delta, 'gamma': config.gamma,
'channels': image.shape[1], 'lower_planes': lower_planes,
'upper_planes': upper_planes,
'add_norm_constraints': transformer.add_norm_constraints,
'neighboring_indices': transformer.flow_constraint_pairs
}
num_pixels = image.flatten().shape[0]
num_flows = 2 * num_pixels
flows_LB = torch.full((num_flows,), -config.delta).to(device)
flows_UB = torch.full((num_flows,), config.delta).to(device)
specLB = torch.cat((specLB, flows_LB))
specUB = torch.cat((specUB, flows_UB))
lexpr_cst = deepcopy(lower_planes[0]) + flows_LB.tolist()
uexpr_cst = deepcopy(upper_planes[0]) + flows_UB.tolist()
lexpr_weights = [
v for p in zip(lower_planes[1], lower_planes[2]) for v in p
] + torch.zeros(2 * num_flows).tolist()
uexpr_weights = [
v for p in zip(upper_planes[1], upper_planes[2]) for v in p
] + torch.zeros(2 * num_flows).tolist()
lexpr_dim = torch.cat([
num_pixels + torch.arange(num_flows),
torch.zeros(2 * num_flows).long()
]).tolist()
uexpr_dim = torch.cat([
num_pixels + torch.arange(num_flows),
torch.zeros(2 * num_flows).long()
]).tolist()
perturbed_label, _, nlb, nub, failed_labels, _ = eran.analyze_box(
specLB=specLB.cpu(), specUB=specUB.cpu(), domain=domain,
timeout_lp=config.timeout_lp, timeout_milp=config.timeout_milp,
use_default_heuristic=config.use_default_heuristic,
label=label, lexpr_weights=lexpr_weights, lexpr_cst=lexpr_cst,
lexpr_dim=lexpr_dim, uexpr_weights=uexpr_weights,
uexpr_cst=uexpr_cst, uexpr_dim=uexpr_dim, expr_size=expr_size,
spatial_constraints=deeppoly_spatial_constraints
)
end = time.time()
print(f'nlb {nlb[-1]} nub {nub[-1]} adv labels {failed_labels}')
if perturbed_label == label:
print(f'img {idx} verified {label}')
verified_images += 1
print(end - start, "seconds")
continue
if (not complete) or (domain not in ['deeppoly', 'deepzono']):
print(f'img {idx} Failed')
print(end - start, "seconds")
continue
verified_flag, adv_image, _ = verify_network_with_milp(
nn=nn, LB_N0=LB_N0, UB_N0=UB_N0, nlb=nlb, nub=nub,
constraints=get_constraints_for_dominant_label(
predicted_label, failed_labels=failed_labels
), spatial_constraints=milp_spatial_constraints
)
if verified_flag:
print(f'img {idx} Verified as Safe {label}')
verified_images += 1
else:
print(f'img {idx} Failed')
end = time.time()
print(end - start, "seconds")
print(f'analysis precision {verified_images} / {correctly_classified_images}')
else:
target = []
if config.target != None:
targetfile = open(config.target, 'r')
targets = csv.reader(targetfile, delimiter=',')
for i, val in enumerate(targets):
target = val
if config.epsfile != None:
epsfile = open(config.epsfile, 'r')
epsilons = csv.reader(epsfile, delimiter=',')
for i, val in enumerate(epsilons):
eps_array = val
for i, test in enumerate(tests):
if config.from_test and i < config.from_test:
continue
if config.num_tests is not None and i >= config.from_test + config.num_tests:
break
image= np.float64(test[1:len(test)])/np.float64(255)
specLB = np.copy(image)
specUB = np.copy(image)
if config.quant_step:
specLB = np.round(specLB/config.quant_step)
specUB = np.round(specUB/config.quant_step)
#cifarfile = open('/home/gagandeepsi/eevbnn/input.txt', 'r')
#cifarimages = csv.reader(cifarfile, delimiter=',')
#for _, image in enumerate(cifarimages):
# specLB = np.float64(image)
#specUB = np.copy(specLB)
normalize(specLB, means, stds, dataset)
normalize(specUB, means, stds, dataset)
#print("specLB ", len(specLB), "specUB ", specUB)
is_correctly_classified = False
start = time.time()
if domain == 'gpupoly' or domain == 'refinegpupoly':
#specLB = np.reshape(specLB, (32,32,3))#np.ascontiguousarray(specLB, dtype=np.double)
#specUB = np.reshape(specUB, (32,32,3))
#print("specLB ", specLB)
is_correctly_classified = network.test(specLB, specUB, int(test[0]), True)
else:
label,nn,nlb,nub,_,_ = eran.analyze_box(specLB, specUB, init_domain(domain), config.timeout_lp, config.timeout_milp, config.use_default_heuristic)
print("concrete ", nlb[-1])
if label == int(test[0]):
is_correctly_classified = True
#for number in range(len(nub)):
# for element in range(len(nub[number])):
# if(nub[number][element]<=0):
# print('False')
# else:
# print('True')
if config.epsfile!= None:
epsilon = np.float64(eps_array[i])
#if(label == int(test[0])):
if is_correctly_classified == True:
label = int(test[0])
perturbed_label = None
correctly_classified_images +=1
if config.normalized_region==True:
specLB = np.clip(image - epsilon,0,1)
specUB = np.clip(image + epsilon,0,1)
normalize(specLB, means, stds, dataset)
normalize(specUB, means, stds, dataset)
else:
specLB = specLB - epsilon
specUB = specUB + epsilon
if config.quant_step:
specLB = np.round(specLB/config.quant_step)
specUB = np.round(specUB/config.quant_step)
if config.target == None:
prop = -1
else:
prop = int(target[i])
if domain == 'gpupoly' or domain =='refinegpupoly':
is_verified = network.test(specLB, specUB, int(test[0]))
#print("res ", res)
if is_verified:
print("img", i, "Verified", int(test[0]))
verified_images+=1
elif domain == 'refinegpupoly':
num_outputs = len(nn.weights[-1])
# Matrix that computes the difference with the expected layer.
diffMatrix = np.delete(-np.eye(num_outputs), int(test[0]), 0)
diffMatrix[:, label] = 1
diffMatrix = diffMatrix.astype(np.float64)
# gets the values from GPUPoly.
res = network.evalAffineExpr(diffMatrix, back_substitute=network.BACKSUBSTITUTION_WHILE_CONTAINS_ZERO)
labels_to_be_verified = []
var = 0
nn.specLB = specLB
nn.specUB = specUB
nn.predecessors = []
for pred in range(0, nn.numlayer+1):
predecessor = np.zeros(1, dtype=np.int)
predecessor[0] = int(pred-1)
nn.predecessors.append(predecessor)
#print("predecessors ", nn.predecessors[0][0])
for labels in range(num_outputs):
#print("num_outputs ", num_outputs, nn.numlayer, len(nn.weights[-1]))
if labels != int(test[0]):
if res[var][0] < 0:
labels_to_be_verified.append(labels)
var = var+1
#print("relu layers", relu_layers)
is_verified, x = refine_gpupoly_results(nn, network, num_gpu_layers, relu_layers, int(test[0]),
labels_to_be_verified, K=config.k, s=config.s,
complete=config.complete,
timeout_final_lp=config.timeout_final_lp,
timeout_final_milp=config.timeout_final_milp,
timeout_lp=config.timeout_lp,
timeout_milp=config.timeout_milp,
use_milp=config.use_milp,
partial_milp=config.partial_milp,
max_milp_neurons=config.max_milp_neurons,
approx=config.approx_k)
if is_verified:
print("img", i, "Verified", int(test[0]))
verified_images += 1
else:
if x != None:
adv_image = np.array(x)
res = np.argmax((network.eval(adv_image))[:,0])
if res!=int(test[0]):
denormalize(x,means, stds, dataset)
# print("img", i, "Verified unsafe with adversarial image ", adv_image, "cex label", cex_label, "correct label ", int(test[0]))
print("img", i, "Verified unsafe against label ", res, "correct label ", int(test[0]))
unsafe_images += 1
else:
print("img", i, "Failed")
else:
print("img", i, "Failed")
else:
print("img", i, "Failed")
else:
if domain.endswith("poly"):
perturbed_label, _, nlb, nub, failed_labels, x = eran.analyze_box(specLB, specUB, "deeppoly",
config.timeout_lp,
config.timeout_milp,
config.use_default_heuristic,
label=label, prop=prop, K=0, s=0,
timeout_final_lp=config.timeout_final_lp,
timeout_final_milp=config.timeout_final_milp,
use_milp=False,
complete=False,
terminate_on_failure=not config.complete,
partial_milp=0,
max_milp_neurons=0,
approx_k=0)
print("nlb ", nlb[-1], " nub ", nub[-1],"adv labels ", failed_labels)
if not domain.endswith("poly") or not (perturbed_label==label):
perturbed_label, _, nlb, nub, failed_labels, x = eran.analyze_box(specLB, specUB, domain,
config.timeout_lp,
config.timeout_milp,
config.use_default_heuristic,
label=label, prop=prop,
K=config.k, s=config.s,
timeout_final_lp=config.timeout_final_lp,
timeout_final_milp=config.timeout_final_milp,
use_milp=config.use_milp,
complete=config.complete,
terminate_on_failure=not config.complete,
partial_milp=config.partial_milp,
max_milp_neurons=config.max_milp_neurons,
approx_k=config.approx_k)
print("nlb ", nlb[-1], " nub ", nub[-1], "adv labels ", failed_labels)
if (perturbed_label==label):
print("img", i, "Verified", label)
verified_images += 1
else:
if complete==True and failed_labels is not None:
failed_labels = list(set(failed_labels))
constraints = get_constraints_for_dominant_label(label, failed_labels)
verified_flag, adv_image, adv_val = verify_network_with_milp(nn, specLB, specUB, nlb, nub, constraints)
if(verified_flag==True):
print("img", i, "Verified as Safe using MILP", label)
verified_images += 1
else:
if adv_image != None:
cex_label,_,_,_,_,_ = eran.analyze_box(adv_image[0], adv_image[0], 'deepzono', config.timeout_lp, config.timeout_milp, config.use_default_heuristic, approx_k=config.approx_k)
if(cex_label!=label):
denormalize(adv_image[0], means, stds, dataset)
# print("img", i, "Verified unsafe with adversarial image ", adv_image, "cex label", cex_label, "correct label ", label)
print("img", i, "Verified unsafe against label ", cex_label, "correct label ", label)
unsafe_images+=1
else:
print("img", i, "Failed with MILP, without a adeversarial example")
else:
print("img", i, "Failed with MILP")
else:
if x != None:
cex_label,_,_,_,_,_ = eran.analyze_box(x,x,'deepzono',config.timeout_lp, config.timeout_milp, config.use_default_heuristic, approx_k=config.approx_k)
print("cex label ", cex_label, "label ", label)
if(cex_label!=label):
denormalize(x,means, stds, dataset)
# print("img", i, "Verified unsafe with adversarial image ", x, "cex label ", cex_label, "correct label ", label)
print("img", i, "Verified unsafe against label ", cex_label, "correct label ", label)
unsafe_images += 1
else:
print("img", i, "Failed, without a adversarial example")
else:
print("img", i, "Failed")
end = time.time()
cum_time += end - start # only count samples where we did try to certify
else:
print("img",i,"not considered, incorrectly classified")
end = time.time()
print(f"progress: {1 + i - config.from_test}/{config.num_tests}, "
f"correct: {correctly_classified_images}/{1 + i - config.from_test}, "
f"verified: {verified_images}/{correctly_classified_images}, "
f"unsafe: {unsafe_images}/{correctly_classified_images}, ",
f"time: {end - start:.3f}; {0 if cum_time==0 else cum_time / correctly_classified_images:.3f}; {cum_time:.3f}")
print('analysis precision ',verified_images,'/ ', correctly_classified_images)
|
fa9bf707da56183fcff92b1e1b39f94249925b9d
|
74eec44040b840653b8c1fbdb208f373bdadbbde
|
/scripts/release/utils/http_helper.py
|
cd993453b2e4223cfdc719b0904b9ed5c00dcc4f
|
[
"Apache-2.0"
] |
permissive
|
jbosstm/narayana
|
a2296b0043ac9358255861d2bdcc98ab2a724569
|
d7abcb8009f3fac5e8cad1816659ac680be1a409
|
refs/heads/main
| 2023-09-03T05:01:52.562054
| 2023-07-10T14:56:05
| 2023-09-01T11:02:18
| 3,010,630
| 204
| 158
|
Apache-2.0
| 2023-09-14T20:22:31
| 2011-12-19T08:13:03
|
Java
|
UTF-8
|
Python
| false
| false
| 674
|
py
|
http_helper.py
|
import http.client
def request(method, host, path, body=None, headers={}):
"""
Generic HTTP request method
"""
connection = http.client.HTTPConnection(host)
connection.request(method, path, body, headers)
return connection.getresponse()
def get(host, path, body=None, headers={}):
"""
HTTP GET request
"""
return request('GET', host, path, body, headers)
def post(host, path, body=None, headers={}):
"""
HTTP POST request
"""
return request('POST', host, path, body, headers)
def put(host, path, body=None, headers={}):
"""
HTTP PUT request
"""
return request('PUT', host, path, body, headers)
|
8b85ba36b88b94e727c6663210f6db355acba4f4
|
6f247f5400c6a840b6dfcb12388116dc3bb7bd49
|
/rnn/lookup.py
|
f071213003968aa4b5c277ec423837289b66eba5
|
[
"MIT"
] |
permissive
|
envytools/envytools
|
c062fbc3b8af90d3df9c6e0f57e9abbfc5690d01
|
e11d670a70ae0455261ead53cdd09c321974cc64
|
refs/heads/master
| 2023-08-26T23:44:47.131591
| 2022-04-30T21:15:56
| 2022-04-30T21:15:56
| 11,620,001
| 402
| 103
|
MIT
| 2022-12-07T01:35:18
| 2013-07-23T21:43:43
|
C
|
UTF-8
|
Python
| false
| false
| 3,203
|
py
|
lookup.py
|
#!/usr/bin/env python3
# Copyright (C) 2010 Marcelina Kościelnicka <mwk@0x04.net>
# Copyright (C) 2011 Martin Peres <martin.peres@ensi-bourges.fr>
# Copyright (C) 2011 Witold Waligóra <witold.waligora@gmail.com>
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import argparse
import sys
import rnn
import rnndec
import colors
def aint(x):
return int(x, 0)
parser = argparse.ArgumentParser(description="rnn database lookup.")
parser.add_argument('-f', '--file', default='root.xml')
parser.add_argument('-a', '--chipset')
parser.add_argument('-c', '--nocolor', action='store_const', const=colors.null, dest='colors', default=colors.term)
select = parser.add_mutually_exclusive_group()
select.add_argument('-d', '--domain', default='NV_MMIO')
select.add_argument('-e', '--enum')
select.add_argument('-b', '--bitset')
parser.add_argument('-w', '--write', action='store_true', default=False)
parser.add_argument('-v', '--variant', nargs=2, action='append', default=[])
parser.add_argument('address', type=aint, help="Address to be looked up")
parser.add_argument('value', type=aint, nargs='?', help="Value to be looked up")
args = parser.parse_args()
db = rnn.Database()
rnn.parsefile(db, args.file)
db.prep()
vc = rnndec.Context(db, args.colors)
if args.chipset is not None:
vc.varadd('chipset', args.chipset)
for varset, variant in args.variant:
vc.varadd(varset, variant)
if args.enum:
en = db.findenum(args.enum)
if not en:
sys.stderr.write("Not an enum: '{}'\n".format(args.enum))
sys.exit(1)
print(vc.decodeval(en, args.address, None))
elif args.bitset:
bs = db.findbitset(args.bitset)
if not bs:
sys.stderr.write("Not a bitset: '{}'\n".format(args.bitset))
sys.exit(1)
print(vc.decodeval(bs, args.address, None))
else:
dom = db.finddomain(args.domain)
if not dom:
sys.stderr.write("Not a domain: '{}'\n".format(args.domain))
sys.exit(1)
name, typeinfo, width = vc.decodeaddr(dom, args.address, args.write)
if typeinfo is None:
print(name)
else:
print(name, '=>', vc.decodeval(typeinfo, args.value, width))
|
056efe073ae3c5ba57b7598d7d492e5d7756d529
|
44d1936bbc8e256534f3946f100bb0028e92fee5
|
/src/hatch/project/utils.py
|
641c4edfdea91f97ba9bbb467f1a8972a723a3b7
|
[
"MIT"
] |
permissive
|
pypa/hatch
|
aeb72e6a465a39073a020f63a931def16ce90ce8
|
7dac9856d2545393f7dd96d31fc8620dde0dc12d
|
refs/heads/master
| 2023-09-04T04:04:25.079348
| 2023-09-03T23:48:21
| 2023-09-03T23:48:21
| 92,997,800
| 1,869
| 125
|
MIT
| 2023-09-13T19:39:25
| 2017-05-31T23:37:53
|
Python
|
UTF-8
|
Python
| false
| false
| 576
|
py
|
utils.py
|
def parse_script_command(command):
possible_script, _, args = command.partition(' ')
if possible_script == '-':
ignore_exit_code = True
possible_script, _, args = args.partition(' ')
else:
ignore_exit_code = False
return possible_script, args, ignore_exit_code
def format_script_commands(commands, args, ignore_exit_code):
for command in commands:
if args:
command = f'{command} {args}'
if ignore_exit_code and not command.startswith('- '):
command = f'- {command}'
yield command
|
6dc31df39f61f45a50bfe62428b981524897e16c
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/gsoap/all/conanfile.py
|
38b4629ae4211b4ee673c8a7850a6ae0d193f625
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,854
|
py
|
conanfile.py
|
from conan import ConanFile
from conan.tools.build import cross_building
from conan.tools.cmake import CMakeToolchain, CMakeDeps, CMake, cmake_layout
from conan.tools.env import VirtualBuildEnv
from conan.tools.files import copy, get
import os
required_conan_version = ">=1.52.0"
class GsoapConan(ConanFile):
name = "gsoap"
description = "The gSOAP toolkit is a C and C++ software development toolkit for SOAP and " \
"REST XML Web services and generic C/C++ XML data bindings."
license = ("gSOAP-1.3b", "GPL-2.0-or-later")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://sourceforge.net/projects/gsoap2"
topics = ("logging",)
package_type = "static-library"
settings = "os", "arch", "compiler", "build_type"
options = {
"fPIC": [True, False],
"with_openssl": [True, False],
"with_ipv6": [True, False],
"with_cookies": [True, False],
"with_c_locale": [True, False],
}
default_options = {
"fPIC": True,
"with_openssl": True,
"with_ipv6": True,
"with_cookies": True,
"with_c_locale": True,
}
exports_sources = "CMakeLists.txt", "cmake/*.cmake"
short_paths = True
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if self.options.with_openssl:
self.requires("openssl/[>=1.1 <4]", transitive_headers=True)
self.requires("zlib/1.2.13")
def build_requirements(self):
if cross_building(self, skip_x64_x86=True) and hasattr(self, "settings_build"):
self.tool_requires(f"gsoap/{self.version}")
if self._settings_build.os == "Windows":
self.tool_requires("winflexbison/2.5.24")
else:
self.tool_requires("bison/3.8.2")
self.tool_requires("flex/2.6.4")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
toolchain = CMakeToolchain(self)
toolchain.variables["GSOAP_PATH"] = self.source_folder.replace("\\", "/")
toolchain.variables["BUILD_TOOLS"] = True
toolchain.variables["WITH_OPENSSL"] = self.options.with_openssl
toolchain.variables["WITH_IPV6"] = self.options.with_ipv6
toolchain.variables["WITH_COOKIES"] = self.options.with_cookies
toolchain.variables["WITH_C_LOCALE"] = self.options.with_c_locale
toolchain.generate()
deps = CMakeDeps(self)
deps.generate()
ms = VirtualBuildEnv(self)
ms.generate()
def build(self):
cmake = CMake(self)
cmake.configure(build_script_folder=os.path.join(self.source_folder, os.pardir))
cmake.build()
def package(self):
copy(self, "GPLv2_license.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
copy(self, "LICENSE.txt", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
def package_info(self):
defines = []
if self.options.with_openssl:
libs = ["gsoapssl++", ]
defines.append("WITH_OPENSSL")
defines.append("WITH_GZIP")
else:
libs = ["gsoap++", ]
self.cpp_info.libs = libs
if self.options.with_ipv6:
defines.append("WITH_IPV6")
if self.options.with_cookies:
defines.append("WITH_COOKIES")
if self.options.with_c_locale:
defines.append("WITH_C_LOCALE")
self.cpp_info.defines = defines
|
c897bfd4d5fcf8050e63251c32f5c4e8c0261ebf
|
ed576926efb18a7383e84327b4c626b8c9482a0f
|
/tests/test_binding.py
|
ce53cf1b2126112dc88a197b9288d10b673e4d28
|
[
"Apache-2.0"
] |
permissive
|
edmBernard/pybind11_opencv_numpy
|
8c29503ec6f992994f5f0b220234f8e84a7f6e21
|
ad25cbacca30b31e249884dfb0ed3f99ea2cb5d4
|
refs/heads/master
| 2022-02-05T05:20:36.898223
| 2022-01-17T07:34:07
| 2022-01-17T07:35:26
| 75,200,590
| 193
| 47
|
Apache-2.0
| 2021-09-22T12:16:45
| 2016-11-30T15:33:52
|
C++
|
UTF-8
|
Python
| false
| false
| 1,887
|
py
|
test_binding.py
|
import numpy as np
from tests import test_module as tm
from tests.utils import generate_matrix, check_matrix_content
import copy
def test_pass_py2cpp():
mat = generate_matrix()
assert(mat.shape == (10, 12, 3))
assert(mat.flags['C_CONTIGUOUS'])
assert(mat.dtype == np.uint16)
assert(tm.check_matrix_content(mat))
assert(tm.get_shape(mat) == (10, 12, 3))
def test_pass_cpp2py():
mat = tm.generate_matrix()
assert(mat.shape == (10, 12, 3))
assert(mat.flags['C_CONTIGUOUS'])
assert(mat.dtype == np.uint16)
assert(check_matrix_content(mat))
def test_passthough_cpp2cpp():
mat = tm.generate_matrix()
assert(mat.shape == (10, 12, 3))
assert(mat.flags['C_CONTIGUOUS'])
assert(check_matrix_content(mat))
assert(tm.check_matrix_content(mat))
def test_passthough_py2py():
mat = generate_matrix()
returned_mat = tm.passthru(mat)
assert(returned_mat.flags['C_CONTIGUOUS'])
assert(returned_mat.shape == (10, 12, 3))
assert(check_matrix_content(returned_mat))
def test_pointer():
pass
def test_class_member():
pass
# # Read from c++
# a = tm.read_image("tests/images/tm.png")
# print('init a: 0x%x' % id(a))
# tm.show_image(a) # work
# # Check continuous problem from old version
# b = a[:, :, 0]
# tm.show_image(b) # work no more continous problem
# print('diff b: 0x%x' % id(b))
# c = copy.deepcopy(b)
# tm.show_image(c) # still works
# print('diff c: 0x%x' % id(c))
# # Proves that it's still the same thing
# d = tm.passthru(a)
# print('same d: 0x%x' % id(d))
# # Make a copy
# e = tm.clone(d)
# print('diff e: 0x%x' % id(e))
# # different allocator
# f = np.zeros(shape=(100, 100), dtype=np.uint8)
# print('\ninit e: 0x%x' % id(f))
# g = tm.passthru(f)
# print('same f: 0x%x' % id(g))
# # example of class
# my_class = tm.AddClass(1)
# h = my_class.add(f)
# print(f[0, 0]) # expected 0
# print(h[0, 0]) # expected 1
|
77d2bd61017842ac0cba2ead083ce7e63bf48485
|
916c1313c623c799e98d1bd897b3aef510172639
|
/py/phl/phlsys_pid.py
|
2a4059af99891b0f3e724d1c0ef5651bdeeb54e8
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
bloomberg/phabricator-tools
|
377ba3dba299c5d21a015bb039ae920fae5478ef
|
09bd1587fe8945d93a891162fd4c89640c6fada7
|
refs/heads/master
| 2021-01-02T19:43:48.274684
| 2019-01-11T13:34:55
| 2019-01-11T13:34:55
| 8,464,182
| 154
| 40
|
Apache-2.0
| 2022-02-14T09:57:48
| 2013-02-27T20:02:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,539
|
py
|
phlsys_pid.py
|
"""Work with process ids easily."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# phlsys_pid
#
# Public Functions:
# get
# is_running
# request_terminate
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import errno
import os
import signal
def get():
"""Return the integer pid of the current process.
:returns: int
"""
return os.getpid()
def is_running(pid):
"""Return True if 'pid' is a running process."""
# XXX: make sure we're UNIX not Windows
if pid <= 0:
# this is not a valid pid
return False
try:
os.kill(pid, 0) # signal 0 doesn't kill, it just checks
except OSError as err:
if err.errno == errno.ESRCH:
# The pid or process group does not exist. Note that an existing
# process might be a zombie, a process which already committed
# termination, but has not yet been wait(2)ed for.
return False
elif err.errno == errno.EPERM:
# The process does not have permission to send the signal to any
# of the target processes.
return True
else:
raise
return True
def request_terminate(pid):
"""Signal the specified 'pid' to terminate.
The process may ignore this signal.
:pid: integer pid, as returned by get()
"""
os.kill(pid, signal.SIGTERM)
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
|
d9ab5d8c967e16c274cd7674f27461fec0cf927f
|
72f6d3ad72b2a4a9b6c5f93c5d1b744e2940b884
|
/deps/quicly/misc/find-cids.py
|
35f5441ee28710a79d3a95cd1ee9a8cc163f9cac
|
[
"MIT"
] |
permissive
|
h2o/h2o
|
70012b6527ceb54e9e2819c9c75242b18e381485
|
b165770ce704c782ddee7428ea4a0b23c8bb7894
|
refs/heads/master
| 2023-08-16T13:16:35.018003
| 2023-08-16T03:56:28
| 2023-08-16T03:56:28
| 23,029,617
| 9,377
| 983
|
MIT
| 2023-09-12T04:49:14
| 2014-08-16T23:59:03
|
C
|
UTF-8
|
Python
| false
| false
| 354
|
py
|
find-cids.py
|
#!/usr/bin/env python
import sys
import json
if len(sys.argv) != 2:
print "Usage: find-cids.py inTrace.jsonl"
sys.exit(1)
cids = {}
f = open(sys.argv[1], 'r')
for line in f:
event = json.loads(line)
if event["type"] != "" and event["type"] == "accept":
cids[event["conn"]] = None
print "Connection IDs:", cids.keys()
f.close()
|
41635d714f783bf5c868ad5ddc8d19d47470621d
|
39568e19301a7a112398be542154950af25591de
|
/util/py/packages/lib/register_usage_report.py
|
eecf6e604b97a6994b7a1203c77f65d470248eb0
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lowRISC/opentitan
|
493995bc7cf7cb3aee486a5203af3fd62bba3bfc
|
51f6017b8425b14d5a4aa9abace8fe5a25ef08c8
|
refs/heads/master
| 2023-08-31T22:05:09.425796
| 2023-08-14T14:52:15
| 2023-08-31T20:31:13
| 204,516,692
| 2,077
| 634
|
Apache-2.0
| 2023-09-14T21:16:21
| 2019-08-26T16:30:16
|
SystemVerilog
|
UTF-8
|
Python
| false
| false
| 10,612
|
py
|
register_usage_report.py
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import collections
import dataclasses
import functools
import pickle
from typing import Optional
import clang.cindex
from util.py.packages.lib import ot_logging
log = ot_logging.log
@dataclasses.dataclass
class SingleFileSourceRange:
"""Represents a range of lines in a source file.
It's like clang.cindex.SourceRange, but hashable and serializable.
Critically, we can be certain it does not contain any pointers to libclang
objects.
"""
path: str
line_begin: int
line_end: int
@staticmethod
def from_source_range(
extent: clang.cindex.SourceRange) -> 'SingleFileSourceRange':
start: clang.cindex.SourceLocation = extent.start
end: clang.cindex.SourceLocation = extent.end
if start.file.name != end.file.name:
# It should be impossible for libclang to return an extent that
# spans files.
raise Exception(
"SingleFileSourceRange cannot span files: {}, {}".format(
start.file.name, end.file.name))
return SingleFileSourceRange(start.file.name, start.line, end.line)
def __hash__(self):
return hash((self.path, self.line_begin, self.line_end))
def preview(self) -> str:
"""Construct a multi-line string showing context around this source range."""
NUM_CONTEXT_LINES = 2
with open(self.path) as f:
lines = f.readlines()
line_begin = max(0, self.line_begin - NUM_CONTEXT_LINES)
lines = lines[line_begin:self.line_end + NUM_CONTEXT_LINES + 1]
preview_lines = [
f"{self.path}:{line_begin + i + 1} {s.rstrip()}"
for i, s in enumerate(lines)
]
return "\n".join(preview_lines)
@dataclasses.dataclass
class RegisterUsageReport:
"""A report that says which registers are passed to the named function.
Fields:
function_name: The function this report is about.
registers_to_callsites: Maps register tokens to callsites.
unparsed_callsites: Callsites where a register could not be automatically
detected. These are included because they require human review.
"""
function_name: str
registers_to_callsites: dict[str, set[SingleFileSourceRange]]
unparsed_callsites: set[SingleFileSourceRange]
@staticmethod
def merge_reports(
reports: list['RegisterUsageReport']
) -> Optional['RegisterUsageReport']:
if len(reports) == 0:
return None
functions = set(r.function_name for r in reports)
if len(functions) != 1:
raise Exception(
f"Reports unexpectly cover {len(functions)} functions: " +
f"{sorted(list(functions))}")
function_name = functions.pop()
registers_to_callsites: dict[
str, set[SingleFileSourceRange]] = collections.defaultdict(set)
unparsed_callsites = set()
for report in reports:
for register, callsites in report.registers_to_callsites.items():
registers_to_callsites[register] |= callsites
unparsed_callsites |= report.unparsed_callsites
return RegisterUsageReport(function_name, registers_to_callsites,
unparsed_callsites)
@dataclasses.dataclass
class RegisterUsageReportGroup:
reports: dict[str, RegisterUsageReport]
@staticmethod
def merge(
groups: list['RegisterUsageReportGroup']
) -> Optional['RegisterUsageReportGroup']:
if len(groups) == 0:
return None
reports_by_function = collections.defaultdict(list)
for group in groups:
for function, report in group.reports.items():
reports_by_function[function].append(report)
return RegisterUsageReportGroup(
reports={
function: RegisterUsageReport.merge_reports(reports)
for function, reports in reports_by_function.items()
})
@staticmethod
def deserialize(data: bytes) -> Optional['RegisterUsageReportGroup']:
try:
out = pickle.loads(data)
except Exception as e:
log.info(f"Failed to deserialize pickle: {e}")
return None
if out is None:
return None
if not isinstance(out, RegisterUsageReportGroup):
raise Exception(
f"Unpickled object has unexpected type: {type(out)}")
return out
def serialize(self) -> bytes:
return pickle.dumps(self)
class RegisterTokenPattern:
def __init__(self, pattern: list[str]):
"""Construct a TokenPattern from the given `pattern`.
Args:
pattern: A list of token values. Wildcards are represented by None.
"""
self._pattern = pattern
def count_wildcards(self):
return len([w for w in self._pattern if w is None])
def find_matches(self, tokens: list[str]) -> Optional[list[str]]:
"""Process the given tokens and return all matches.
Args:
tokens: A non-empty list of token values. No wildcards are allowed.
"""
assert [t for t in tokens if t is not None]
if len(tokens) != len(self._pattern):
return None
out = []
for t, p in zip(tokens, self._pattern):
if p is None: # The wildcard matches any token.
out.append(t)
elif t != p: # Non-wildcards must be exact matches.
return None
return out
@staticmethod
def find_first_match(
patterns: list['RegisterTokenPattern'], tokens: list[str],
function_name: str,
call_site: clang.cindex.Cursor) -> RegisterUsageReport:
for pattern in patterns:
report = RegisterUsageReport(
function_name=function_name,
registers_to_callsites=collections.defaultdict(set),
unparsed_callsites=set())
matches = pattern.find_matches(tokens)
if matches is None:
continue
assert len(matches) == pattern.count_wildcards()
extent = SingleFileSourceRange.from_source_range(call_site.extent)
print("--", function_name)
print(extent.preview())
if len(matches) > 0:
for match in matches:
report.registers_to_callsites[match].add(extent)
elif pattern.count_wildcards() == 0:
report.unparsed_callsites.add(extent)
return report
raise Exception("No pattern matched tokens at call-site for " +
f"{call_site.displayname}: {tokens}")
@functools.cache
def _walk_callsites(cursor: clang.cindex.Cursor,
function_name: str) -> list[clang.cindex.Cursor]:
"""Preorder walk over `cursor` that selects call-sites of `function-name`.
This is likely the most expensive operation in a program that uses
CallSiteAnalyzer, so it's worth memoizing.
"""
out = []
for cursor in cursor.walk_preorder():
if cursor.kind != clang.cindex.CursorKind.CALL_EXPR:
continue
if cursor.displayname != function_name:
continue
print("Function call to '{}' found at {}:{}:{}".format(
cursor.spelling, cursor.location.file, cursor.location.line,
cursor.location.column))
out.append(cursor)
return out
class CallSiteAnalyzer:
def __init__(self, function_name: str, arg_index: int,
reg_token_patterns: list[RegisterTokenPattern]):
"""Create a call-site analyzer for a given function.
Token semantics:
The list of tokens patterns is intended to be complete. If we ever
hit a call site for the given function that does not match any of the
token patterns, we will raise an exception.
None values in a token pattern are interpreted as wildcards. They
should only be used to match register offsets. If there is no
wildcard, call sites will be filed in the report as items for human
review.
Args:
function_name: The function whose call sites we wish to analyze.
arg_index: The index of the argument we wish to pattern-match.
reg_token_patterns: A list of token patterns to match with. If the
list of token patterns is incomplete, `CallSiteAnalyzer.run()` will
raise an exception.
The list of token patterns where None is a stand-in for
a register offset. If the list of token patterns is incomplete, we will
intentionally raise an uncaught exception in order to fail the Bazel
build.
"""
self.function_name = function_name
self._patterns = reg_token_patterns
self._arg_index = arg_index
def run(self,
cursor: clang.cindex.Cursor) -> Optional[RegisterUsageReport]:
"""Analyze all relevant call-sites under `cursor`.
Returns the result of merging the reports generated for each call-site.
If no reports were generated, returns None.
"""
reports: list[RegisterUsageReport] = []
for cursor in _walk_callsites(cursor, self.function_name):
args_tokens: list[list[str]] = []
arg: clang.cindex.Cursor
for i, arg in enumerate(cursor.get_arguments()):
tokens = [t.spelling for t in arg.get_tokens()]
args_tokens.append(tokens)
assert self._arg_index < len(args_tokens)
tokens = args_tokens[self._arg_index]
# It would be surprising if any tokens produced by libclang were
# None or empty strings.
assert all(tokens), f"Each token should be truthy: {tokens}"
# It would be nice to assert that `len(tokens) > 0` since we are
# only analyzing functions that have parameters, but this property
# doesn't seem to hold when the function's call site is lost by a
# macro, e.g. `OT_DISCARD(foo(1))` would give us `tokens == [[]]`.
if len(tokens) == 0:
continue
report = RegisterTokenPattern.find_first_match(
self._patterns, tokens, cursor.displayname, cursor)
reports.append(report)
return RegisterUsageReport.merge_reports(reports)
|
2ae03e5c9820d7a30a2f0dbb373ba75cd4179760
|
407d194b52fe9cf75cca9d6f3c162a565549a1ae
|
/TestHandlerLinux/bin/disable.py
|
5fe7ac60e7ac7848efc4bb52fefe487771f2635e
|
[
"Apache-2.0"
] |
permissive
|
Azure/azure-linux-extensions
|
808761f927045f00548aa68e38d4bec8651c0eba
|
3cea1567fc4f4eb5beea9884153e92d70610394d
|
refs/heads/master
| 2023-08-27T14:06:05.775617
| 2023-08-23T01:56:05
| 2023-08-23T01:56:05
| 19,841,123
| 300
| 314
|
Apache-2.0
| 2023-09-14T04:21:26
| 2014-05-16T01:38:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,241
|
py
|
disable.py
|
#!/usr/bin/env python
"""
Example Azure Handler script for Linux IaaS
Diable example
"""
import os
import imp
import time
import json
waagent=imp.load_source('waagent','/usr/sbin/waagent')
from waagent import LoggerInit
hutil=imp.load_source('HandlerUtil','./resources/HandlerUtil.py')
LoggerInit('/var/log/waagent.log','/dev/stdout')
waagent.Log("disable.py starting.")
logfile=waagent.Log
name,seqNo,version,config_dir,log_dir,settings_file,status_file,heartbeat_file,config=hutil.doParse(logfile,'Disable')
LoggerInit('/var/log/'+name+'_Disable.log','/dev/stdout')
waagent.Log(name+" - disable.py starting.")
logfile=waagent.Log
hutil.doStatusReport(name,seqNo,version,status_file,time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),name,
'Disable', 'transitioning', '0', 'Disabling', 'Process Config', 'transitioning', '0', 'Parsing ' + settings_file)
hutil.doHealthReport(heartbeat_file,'NotReady','0','Proccessing Settings')
error_string=''
pid=None
pidfile='./service_pid.txt'
if not os.path.isfile(pidfile):
error_string += pidfile +" is missing."
error_string = "Error: " + error_string
waagent.Error(error_string)
hutil.doStatusReport(name,seqNo,version,status_file,time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),name,
'Disable', 'transitioning', '0', 'Disabling', 'Process Config', 'transitioning', '0', 'Parsing ' + settings_file)
else:
pid = waagent.GetFileContents(pidfile)
#stop service.py
try:
os.kill(int(pid),7)
except Exception as e:
pass
# remove pifdile
try:
os.unlink(pidfile)
except Exception as e:
pass
#Kill heartbeat.py if required.
manifest = waagent.GetFileContents('./HandlerManifest.json')
try:
s=json.loads(manifest)
except:
waagent.Error('Error parsing HandlerManifest.json. Heath report will not be available.')
hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service.py succeeded.' + str(pid) + ' created.', 'Exit Successfull', 'success', '0', 'Enable Completed.','NotReady','0',name+' enabled.')
if s[0]['handlerManifest']['reportHeartbeat'] != True :
hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service.py succeeded.' + str(pid) + ' created.', 'Exit Successfull', 'success', '0', 'Enable Completed.','Ready','0',name+' enabled.')
try:
pid = waagent.GetFileContents('./heartbeat.pid')
except:
waagent.Error('Error reading ./heartbeat.pid.')
hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service.py succeeded.' + str(pid) + ' created.', 'Exit Successfull', 'success', '0', 'Enable Completed.','NotReady','0',name+' enabled.')
if waagent.Run('kill '+pid)==0:
waagent.Log(name+" disabled.")
hutil.doExit(name,seqNo,version,0,status_file,heartbeat_file,'Disable','NotReady','0', 'Disable service Succeed. Health reporting stoppped.', 'Exit Successfull', 'success', '0', 'Disable Completed.','NotReady','0',name+' disabled.')
|
73f93eff2eaa6ecd99f15e540263e0c411dbd20e
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/scaleform/daapi/view/meta/cybersportunitslistmeta.py
|
5bf8b21d065e67710badf4905a27a6d63d6e37fc
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,342
|
py
|
cybersportunitslistmeta.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/CyberSportUnitsListMeta.py
from gui.Scaleform.daapi.view.lobby.rally.BaseRallyListView import BaseRallyListView
class CyberSportUnitsListMeta(BaseRallyListView):
def getTeamData(self, index):
self._printOverrideError('getTeamData')
def refreshTeams(self):
self._printOverrideError('refreshTeams')
def filterVehicles(self):
self._printOverrideError('filterVehicles')
def loadPrevious(self):
self._printOverrideError('loadPrevious')
def loadNext(self):
self._printOverrideError('loadNext')
def as_setDummyS(self, data):
return self.flashObject.as_setDummy(data) if self._isDAAPIInited() else None
def as_setDummyVisibleS(self, visible):
return self.flashObject.as_setDummyVisible(visible) if self._isDAAPIInited() else None
def as_setHeaderS(self, data):
return self.flashObject.as_setHeader(data) if self._isDAAPIInited() else None
def as_updateNavigationBlockS(self, data):
return self.flashObject.as_updateNavigationBlock(data) if self._isDAAPIInited() else None
def as_updateRallyIconS(self, iconPath):
return self.flashObject.as_updateRallyIcon(iconPath) if self._isDAAPIInited() else None
|
5888dd0ec6a856fad3e806b72f7ef69dcce68118
|
dd40ae522aee10d0e9e6ddacda0b86502512cbf3
|
/src/models/multi_stage.py
|
a8b2bc3e4fe0e70a39ac5be304dd75e01ba77dd2
|
[
"MIT"
] |
permissive
|
weecology/DeepTreeAttention
|
b6900f74e435f8267d3dd19715d7dcd25c7798d5
|
cae13f1e4271b5386e2379068f8239de3033ec40
|
refs/heads/main
| 2023-09-01T13:36:43.905518
| 2023-03-13T18:49:18
| 2023-03-13T18:49:18
| 268,541,551
| 101
| 38
|
MIT
| 2022-04-04T20:25:56
| 2020-06-01T14:14:29
|
Python
|
UTF-8
|
Python
| false
| false
| 24,465
|
py
|
multi_stage.py
|
#Multiple stage model
from functools import reduce
from src.models.year import learned_ensemble
from src.data import TreeDataset
from src import utils
from pytorch_lightning import LightningModule
import pandas as pd
import math
import numpy as np
from torch.nn import Module
from torch.nn import functional as F
from torch import nn
import torchmetrics
import torch
class base_model(Module):
def __init__(self, years, classes, config):
super().__init__()
#Load from state dict of previous run
self.model = learned_ensemble(classes=classes, years=years, config=config)
micro_recall = torchmetrics.Accuracy(average="micro")
macro_recall = torchmetrics.Accuracy(average="macro", num_classes=classes)
self.metrics = torchmetrics.MetricCollection(
{"Micro Accuracy":micro_recall,
"Macro Accuracy":macro_recall,
})
def forward(self,x):
score = self.model(x)
return score
class MultiStage(LightningModule):
def __init__(self, train_df, test_df, crowns, config, train_mode=True):
super().__init__()
# Generate each model
self.years = train_df.tile_year.unique()
self.config = config
self.models = nn.ModuleList()
self.species_label_dict = train_df[["taxonID","label"]].drop_duplicates().set_index("taxonID").to_dict()["label"]
self.index_to_label = {v:k for k,v in self.species_label_dict.items()}
self.crowns = crowns
self.level_label_dicts = []
self.label_to_taxonIDs = []
self.train_df = train_df
self.test_df = test_df
#hotfix for old naming schema
try:
self.test_df["individual"] = self.test_df["individualID"]
self.train_df["individual"] = self.train_df["individualID"]
except:
pass
if train_mode:
self.train_datasets, self.test_datasets = self.create_datasets()
self.levels = len(self.train_datasets)
self.classes = len(self.train_df.label.unique())
for index, ds in enumerate([self.level_0_train, self.level_1_train, self.level_2_train, self.level_3_train, self.level_4_train]):
labels = ds.label
classes = self.num_classes[index]
base = base_model(classes=classes, years=len(self.years), config=self.config)
self.models.append(base)
loss_weight = []
for x in range(classes):
try:
w = 1/np.sum(labels==x)
except:
w = 1
loss_weight.append(w)
loss_weight = np.array(loss_weight/np.max(loss_weight))
loss_weight[loss_weight < self.config["min_loss_weight"]] = self.config["min_loss_weight"]
loss_weight = torch.tensor(loss_weight, dtype=torch.float)
pname = 'loss_weight_{}'.format(index)
self.register_buffer(pname, loss_weight)
self.save_hyperparameters()
def create_datasets(self):
#Create levels for each year
## Level 0
train_datasets = []
test_datasets = []
self.num_classes = []
self.level_id = []
self.level_label_dicts.append({"PIPA2":0,"OTHER":1})
self.label_to_taxonIDs.append({v: k for k, v in self.level_label_dicts[0].items()})
self.level_0_train = self.train_df.copy()
PIPA2 = self.level_0_train[self.level_0_train.taxonID=="PIPA2"]
nonPIPA2 = self.level_0_train[~(self.level_0_train.taxonID=="PIPA2")]
nonPIPA2ids = nonPIPA2.groupby("individual").apply(lambda x: x.head(1)).groupby("taxonID").apply(lambda x: x.head(self.config["other_sampling_ceiling"])).individual
nonPIPA2 = nonPIPA2[nonPIPA2.individual.isin(nonPIPA2ids)]
self.level_0_train = pd.concat([PIPA2, nonPIPA2])
self.level_0_train.loc[~(self.level_0_train.taxonID == "PIPA2"),"taxonID"] = "OTHER"
self.level_0_train["label"] = [self.level_label_dicts[0][x] for x in self.level_0_train.taxonID]
self.level_0_train_ds = TreeDataset(df=self.level_0_train, config=self.config)
train_datasets.append(self.level_0_train_ds)
self.num_classes.append(len(self.level_0_train.taxonID.unique()))
self.level_0_test = self.test_df.copy()
self.level_0_test.loc[~(self.level_0_test.taxonID == "PIPA2"),"taxonID"] = "OTHER"
self.level_0_test["label"]= [self.level_label_dicts[0][x] for x in self.level_0_test.taxonID]
self.level_0_test_ds = TreeDataset(df=self.level_0_test, config=self.config)
test_datasets.append(self.level_0_test_ds)
self.level_id.append(0)
## Level 1
self.level_label_dicts.append({"CONIFER":0,"BROADLEAF":1})
self.label_to_taxonIDs.append({v: k for k, v in self.level_label_dicts[1].items()})
self.level_1_train = self.train_df.copy()
self.level_1_train = self.level_1_train[~(self.level_1_train.taxonID=="PIPA2")]
self.level_1_train.loc[~self.level_1_train.taxonID.isin(["PICL","PIEL","PITA"]),"taxonID"] = "BROADLEAF"
self.level_1_train.loc[self.level_1_train.taxonID.isin(["PICL","PIEL","PITA"]),"taxonID"] = "CONIFER"
#subsample broadleaf, labels have not been converted, relate to original taxonID
conifer_ids = self.level_1_train[self.level_1_train.taxonID=="CONIFER"].individual
broadleaf_ids = self.level_1_train[self.level_1_train.taxonID=="BROADLEAF"].groupby("label").apply(
lambda x: x.sample(frac=1).groupby(
"individual").apply(lambda x: x.head(1)).head(
math.ceil(len(conifer_ids)/11)
)).individual
ids_to_keep = np.concatenate([broadleaf_ids, conifer_ids])
self.level_1_train = self.level_1_train[self.level_1_train.individual.isin(ids_to_keep)].reset_index(drop=True)
self.level_1_train["label"] = [self.level_label_dicts[1][x] for x in self.level_1_train.taxonID]
self.level_1_train_ds = TreeDataset(df=self.level_1_train, config=self.config)
train_datasets.append(self.level_1_train_ds)
self.num_classes.append(len(self.level_1_train.taxonID.unique()))
self.level_1_test = self.test_df.copy()
self.level_1_test = self.level_1_test[~(self.level_1_test.taxonID=="PIPA2")].reset_index(drop=True)
self.level_1_test.loc[~self.level_1_test.taxonID.isin(["PICL","PIEL","PITA"]),"taxonID"] = "BROADLEAF"
self.level_1_test.loc[self.level_1_test.taxonID.isin(["PICL","PIEL","PITA"]),"taxonID"] = "CONIFER"
self.level_1_test["label"] = [self.level_label_dicts[1][x] for x in self.level_1_test.taxonID]
self.level_1_test_ds = TreeDataset(df=self.level_1_test, config=self.config)
test_datasets.append(self.level_1_test_ds)
self.level_id.append(1)
## Level 2
broadleaf = [x for x in list(self.species_label_dict.keys()) if (not x in ["PICL","PIEL","PITA","PIPA2"]) & (not "QU" in x)]
broadleaf = {v:k for k, v in enumerate(broadleaf)}
broadleaf["OAK"] = len(broadleaf)
self.level_label_dicts.append(broadleaf)
self.label_to_taxonIDs.append({v: k for k, v in broadleaf.items()})
self.level_2_train = self.train_df.copy()
self.level_2_train = self.level_2_train[~self.level_2_train.taxonID.isin(["PICL","PIEL","PITA","PIPA2"])].reset_index(drop=True)
self.level_2_train.loc[self.level_2_train.taxonID.str.contains("QU"),"taxonID"] = "OAK"
non_oakid = self.level_2_train[~(self.level_2_train.taxonID=="OAK")].individual
oak_ids = self.level_2_train[self.level_2_train.taxonID=="OAK"].groupby("label").apply(lambda x: x.sample(frac=1).head(
int(len(non_oakid)/5))
).individual
ids_to_keep = np.concatenate([oak_ids, non_oakid])
self.level_2_train = self.level_2_train[self.level_2_train.individual.isin(ids_to_keep)].reset_index(drop=True)
self.level_2_train["label"] = [self.level_label_dicts[2][x] for x in self.level_2_train.taxonID]
self.level_2_train_ds = TreeDataset(df=self.level_2_train, config=self.config)
train_datasets.append(self.level_2_train_ds)
self.num_classes.append(len(self.level_2_train.taxonID.unique()))
self.level_2_test = self.test_df.copy()
self.level_2_test = self.level_2_test[~self.level_2_test.taxonID.isin(["PICL","PIEL","PITA","PIPA2"])].reset_index(drop=True)
self.level_2_test.loc[self.level_2_test.taxonID.str.contains("QU"),"taxonID"] = "OAK"
self.level_2_test["label"] = [self.level_label_dicts[2][x] for x in self.level_2_test.taxonID]
self.level_2_test_ds = TreeDataset(df=self.level_2_test, config=self.config)
test_datasets.append(self.level_2_test_ds)
self.level_id.append(2)
## Level 3
evergreen = [x for x in list(self.species_label_dict.keys()) if x in ["PICL","PIEL","PITA"]]
evergreen = {v:k for k, v in enumerate(evergreen)}
self.level_label_dicts.append(evergreen)
self.label_to_taxonIDs.append({v: k for k, v in self.level_label_dicts[3].items()})
self.level_3_train = self.train_df.copy()
self.level_3_train = self.level_3_train[self.level_3_train.taxonID.isin(["PICL","PIEL","PITA"])].reset_index(drop=True)
self.level_3_train = self.level_3_train.groupby("taxonID").apply(lambda x: x.head(self.config["evergreen_ceiling"])).reset_index(drop=True)
self.level_3_train["label"] = [self.level_label_dicts[3][x] for x in self.level_3_train.taxonID]
self.level_3_train_ds = TreeDataset(df=self.level_3_train, config=self.config)
train_datasets.append(self.level_3_train_ds)
self.num_classes.append(len(self.level_3_train.taxonID.unique()))
self.level_3_test = self.test_df.copy()
self.level_3_test = self.level_3_test[self.level_3_test.taxonID.isin(["PICL","PIEL","PITA"])].reset_index(drop=True)
self.level_3_test["label"] = [self.level_label_dicts[3][x] for x in self.level_3_test.taxonID]
self.level_3_test_ds = TreeDataset(df=self.level_3_test, config=self.config)
test_datasets.append(self.level_3_test_ds)
self.level_id.append(3)
## Level 4
oak = [x for x in list(self.species_label_dict.keys()) if "QU" in x]
self.level_label_dicts.append({v:k for k, v in enumerate(oak)})
self.label_to_taxonIDs.append({v: k for k, v in self.level_label_dicts[4].items()})
#Balance the train in OAKs
self.level_4_train = self.train_df.copy()
self.level_4_train = self.level_4_train[self.level_4_train.taxonID.str.contains("QU")].reset_index(drop=True)
self.level_4_train["label"] = [self.level_label_dicts[4][x] for x in self.level_4_train.taxonID]
ids_to_keep = self.level_4_train.groupby("taxonID").apply(
lambda x: x.sample(frac=1).groupby("individual").apply(
lambda x: x.head(1)).head(
self.config["oaks_sampling_ceiling"])).individual
self.level_4_train = self.level_4_train[self.level_4_train.individual.isin(ids_to_keep)].reset_index(drop=True)
self.level_4_train_ds = TreeDataset(df=self.level_4_train, config=self.config)
train_datasets.append(self.level_4_train_ds)
self.num_classes.append(len(self.level_4_train.taxonID.unique()))
self.level_4_test = self.test_df.copy()
self.level_4_test = self.level_4_test[self.level_4_test.taxonID.str.contains("QU")].reset_index(drop=True)
self.level_4_test["label"] = [self.level_label_dicts[4][x] for x in self.level_4_test.taxonID]
self.level_4_test_ds = TreeDataset(df=self.level_4_test, config=self.config)
test_datasets.append(self.level_4_test_ds)
self.level_id.append(4)
return train_datasets, test_datasets
def train_dataloader(self):
data_loaders = []
for ds in self.train_datasets:
data_loader = torch.utils.data.DataLoader(
ds,
batch_size=self.config["batch_size"],
shuffle=True,
num_workers=self.config["workers"],
)
data_loaders.append(data_loader)
return data_loaders
def val_dataloader(self):
## Validation loaders are a list https://github.com/PyTorchLightning/pytorch-lightning/issues/10809
data_loaders = []
for ds in self.test_datasets:
data_loader = torch.utils.data.DataLoader(
ds,
batch_size=self.config["batch_size"],
shuffle=False,
num_workers=self.config["workers"],
)
data_loaders.append(data_loader)
return data_loaders
def predict_dataloader(self, ds):
data_loader = torch.utils.data.DataLoader(
ds,
batch_size=self.config["predict_batch_size"],
shuffle=False,
num_workers=self.config["workers"]
)
return data_loader
def configure_optimizers(self):
"""Create a optimizer for each level"""
optimizers = []
for x, ds in enumerate(self.train_datasets):
optimizer = torch.optim.Adam(self.models[x].parameters(), lr=self.config["lr_{}".format(x)])
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
factor=0.75,
patience=8,
verbose=True,
threshold=0.0001,
threshold_mode='rel',
cooldown=0,
eps=1e-08)
optimizers.append({'optimizer':optimizer, 'lr_scheduler': {"scheduler":scheduler, "monitor":'val_loss/dataloader_idx_{}'.format(x)}})
return optimizers
def training_step(self, batch, batch_idx, optimizer_idx):
"""Calculate train_df loss
"""
#get loss weight
loss_weights = self.__getattr__('loss_weight_'+str(optimizer_idx))
individual, inputs, y = batch[optimizer_idx]
images = inputs["HSI"]
y_hat = self.models[optimizer_idx].forward(images)
loss = F.cross_entropy(y_hat, y, weight=loss_weights)
self.log("train_loss_{}".format(optimizer_idx),loss, on_epoch=True, on_step=False)
return loss
def validation_step(self, batch, batch_idx, dataloader_idx):
"""Calculate val loss
"""
loss_weight = self.__getattr__('loss_weight_'+str(dataloader_idx))
individual, inputs, y = batch
images = inputs["HSI"]
y_hat = self.models[dataloader_idx].forward(images)
loss = F.cross_entropy(y_hat, y, weight=loss_weight)
self.log("val_loss",loss)
metric_dict = self.models[dataloader_idx].metrics(y_hat, y)
self.log_dict(metric_dict, on_epoch=True, on_step=False)
y_hat = F.softmax(y_hat, dim=1)
return {"individual":individual, "yhat":y_hat, "label":y}
def predict_step(self, batch, batch_idx):
"""Calculate predictions
"""
individual, inputs = batch
images = inputs["HSI"]
y_hats = []
for model in self.models:
y_hat = model.forward(images)
y_hat = F.softmax(y_hat, dim=1)
y_hats.append(y_hat)
return individual, y_hats
def on_predict_epoch_end(self, outputs):
outputs = self.all_gather(outputs)
def validation_epoch_end(self, validation_step_outputs):
for level, results in enumerate(validation_step_outputs):
yhat = torch.cat([x["yhat"] for x in results]).cpu().numpy()
labels = torch.cat([x["label"] for x in results]).cpu().numpy()
yhat = np.argmax(yhat, 1)
epoch_micro = torchmetrics.functional.accuracy(
preds=torch.tensor(labels),
target=torch.tensor(yhat),
average="micro")
epoch_macro = torchmetrics.functional.accuracy(
preds=torch.tensor(labels),
target=torch.tensor(yhat),
average="macro",
num_classes=len(self.species_label_dict)
)
self.log("Epoch Micro Accuracy level {}".format(level), epoch_micro)
self.log("Epoch Macro Accuracy level {}".format(level), epoch_macro)
# Log results by species
taxon_accuracy = torchmetrics.functional.accuracy(
preds=torch.tensor(yhat),
target=torch.tensor(labels),
average="none",
num_classes=len(self.level_label_dicts[level])
)
taxon_precision = torchmetrics.functional.precision(
preds=torch.tensor(yhat),
target=torch.tensor(labels),
average="none",
num_classes=len(self.level_label_dicts[level])
)
species_table = pd.DataFrame(
{"taxonID":self.level_label_dicts[level].keys(),
"accuracy":taxon_accuracy,
"precision":taxon_precision
})
for key, value in species_table.set_index("taxonID").accuracy.to_dict().items():
self.log("Epoch_{}_accuracy".format(key), value)
for key, value in species_table.set_index("taxonID").precision.to_dict().items():
self.log("Epoch_{}_precision".format(key), value)
def gather_predictions(self, predict_df):
"""Post-process the predict method to create metrics"""
individuals = []
yhats = []
levels = []
for output in predict_df:
for index, level_results in enumerate(output[1]):
batch_individuals = np.stack(output[0])
for individual, yhat in zip(batch_individuals, level_results):
individuals.append(individual)
yhats.append(yhat)
levels.append(index)
temporal_average = pd.DataFrame({"individual":individuals,"level":levels,"yhat":yhats})
#Argmax and score for each level
predicted_label = temporal_average.groupby(["individual","level"]).yhat.apply(
lambda x: np.argmax(np.vstack(x))).reset_index().pivot(
index=["individual"],columns="level",values="yhat").reset_index()
predicted_label.columns = ["individual","pred_label_top1_level_0","pred_label_top1_level_1",
"pred_label_top1_level_2","pred_label_top1_level_3","pred_label_top1_level_4"]
predicted_score = temporal_average.groupby(["individual","level"]).yhat.apply(
lambda x: np.vstack(x).max()).reset_index().pivot(
index=["individual"],columns="level",values="yhat").reset_index()
predicted_score.columns = ["individual","top1_score_level_0","top1_score_level_1",
"top1_score_level_2","top1_score_level_3","top1_score_level_4"]
results = pd.merge(predicted_label,predicted_score)
#Label taxa
for level, label_dict in enumerate(self.label_to_taxonIDs):
results["pred_taxa_top1_level_{}".format(level)] = results["pred_label_top1_level_{}".format(level)].apply(lambda x: label_dict[x])
return results
def ensemble(self, results):
"""Given a multi-level model, create a final output prediction and score"""
ensemble_taxonID = []
ensemble_label = []
ensemble_score = []
for index,row in results.iterrows():
if row["pred_taxa_top1_level_0"] == "PIPA2":
ensemble_taxonID.append("PIPA2")
ensemble_label.append(self.species_label_dict["PIPA2"])
ensemble_score.append(row["top1_score_level_0"])
else:
if row["pred_taxa_top1_level_1"] == "BROADLEAF":
if row["pred_taxa_top1_level_2"] == "OAK":
ensemble_taxonID.append(row["pred_taxa_top1_level_4"])
ensemble_label.append(self.species_label_dict[row["pred_taxa_top1_level_4"]])
ensemble_score.append(row["top1_score_level_4"])
else:
ensemble_taxonID.append(row["pred_taxa_top1_level_2"])
ensemble_label.append(self.species_label_dict[row["pred_taxa_top1_level_2"]])
ensemble_score.append(row["top1_score_level_2"])
else:
ensemble_taxonID.append(row["pred_taxa_top1_level_3"])
ensemble_label.append(self.species_label_dict[row["pred_taxa_top1_level_3"]])
ensemble_score.append(row["top1_score_level_3"])
results["ensembleTaxonID"] = ensemble_taxonID
results["ens_score"] = ensemble_score
results["ens_label"] = ensemble_label
return results
def evaluation_scores(self, ensemble_df, experiment):
ensemble_df = ensemble_df.groupby("individual").apply(lambda x: x.head(1))
taxon_accuracy = torchmetrics.functional.accuracy(
preds=torch.tensor(ensemble_df.ens_label.values),
target=torch.tensor(ensemble_df.label.values),
average="none",
num_classes=len(self.species_label_dict)
)
taxon_precision = torchmetrics.functional.precision(
preds=torch.tensor(ensemble_df.ens_label.values),
target=torch.tensor(ensemble_df.label.values),
average="none",
num_classes=len(self.species_label_dict)
)
taxon_labels = list(self.species_label_dict)
taxon_labels.sort()
species_table = pd.DataFrame(
{"taxonID":taxon_labels,
"accuracy":taxon_accuracy,
"precision":taxon_precision
})
if experiment:
experiment.log_metrics(species_table.set_index("taxonID").accuracy.to_dict(),prefix="accuracy")
experiment.log_metrics(species_table.set_index("taxonID").precision.to_dict(),prefix="precision")
# Log result by site
if experiment:
site_data_frame =[]
for name, group in ensemble_df.groupby("siteID"):
site_micro = np.sum(group.ens_label.values == group.label.values)/len(group.ens_label.values)
site_macro = torchmetrics.functional.accuracy(
preds=torch.tensor(group.ens_label.values),
target=torch.tensor(group.label.values),
average="macro",
num_classes=len(self.species_label_dict))
experiment.log_metric("{}_macro".format(name), site_macro)
experiment.log_metric("{}_micro".format(name), site_micro)
row = pd.DataFrame({"Site":[name], "Micro Recall": [site_micro], "Macro Recall": [site_macro]})
site_data_frame.append(row)
site_data_frame = pd.concat(site_data_frame)
experiment.log_table("site_results.csv", site_data_frame)
return ensemble_df
|
a0975bf4e5b5eb3e8e10c993551abbc3beab3ff8
|
263170e7dca79883314273bb35aef1449e018361
|
/tests/fixtures/examples/nested.py
|
68b03c2b79bdbd6a76a24a687edd107351dda3a2
|
[
"CC-BY-4.0",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
se2p/pynguin
|
029cfd9c43c08a2f687a816749828054e409646e
|
cc083252c7054824bfaf200533a8b7ad45f7c4fb
|
refs/heads/main
| 2023-08-23T16:58:04.568755
| 2023-08-18T13:11:44
| 2023-08-18T13:11:44
| 282,944,472
| 1,223
| 65
|
MIT
| 2023-08-18T13:12:29
| 2020-07-27T15:50:19
|
Python
|
UTF-8
|
Python
| false
| false
| 304
|
py
|
nested.py
|
# This file is part of Pynguin.
#
# SPDX-FileCopyrightText: 2019–2023 Pynguin Contributors
#
# SPDX-License-Identifier: MIT
#
def test_me(x, y):
if x <= y:
if x == y:
print("Some output")
if x > 0:
if y == 17:
return True
return False
|
e37aeb46f99437f4d7cfbd1693ef8a446a6c10f3
|
9cdd1751bc27310f486427aaaae901ca06b79003
|
/bin/jenkins/critique-gerrit-review.py
|
c165017c4c6abb73aa02c1d2a87276c39b4cc3d6
|
[
"Apache-2.0",
"OpenSSL",
"bzip2-1.0.6",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-google-patent-license-webrtc",
"PSF-2.0",
"BSD-3-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-mit-modification-obligations",
"Minpack",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
apache/impala
|
bb9970c64a43824950ec5b69f2ef4b50158a1e8a
|
b718d63860356a04814e07d91711c3c748b3e769
|
refs/heads/master
| 2023-09-03T04:29:12.639452
| 2023-06-07T23:51:15
| 2023-08-30T04:56:51
| 56,128,733
| 985
| 475
|
Apache-2.0
| 2023-08-31T14:15:44
| 2016-04-13T07:00:08
|
C++
|
UTF-8
|
Python
| false
| false
| 9,658
|
py
|
critique-gerrit-review.py
|
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Usage: critique-gerrit-review.py [--dryrun]
#
# This script is meant to run on an jenkins.impala.io build slave and post back comments
# to a code review. It does not need to run on all supported platforms so we use system
# python instead of the full Impala virtualenv.
#
# This script runs in the context of a source checkout. It posts comments for issues
# introduced between HEAD^ and HEAD. It picks up metadata from environment variables
# set by the jenkins gerrit trigger: GERRIT_CHANGE_NUMBER, GERRIT_PATCHSET_NUMBER, etc.
#
# It uses the gerrit ssh interface to post the review, connecting as
# impala-public-jenkins.
# Ref: https://gerrit-review.googlesource.com/Documentation/cmd-review.html
#
# Dependencies:
# ssh, pip, virtualenv
#
# TODO: generalise to other warnings
# * clang-tidy
from __future__ import absolute_import, division, print_function
from argparse import ArgumentParser
from collections import defaultdict
import json
import os
from os import environ
import os.path
import re
from subprocess import check_call, check_output, Popen, PIPE
import sys
import virtualenv
FLAKE8_VERSION = "3.9.2"
FLAKE8_DIFF_VERSION = "0.2.2"
VENV_PATH = "gerrit_critic_venv"
VENV_BIN = os.path.join(VENV_PATH, "bin")
PIP_PATH = os.path.join(VENV_BIN, "pip")
FLAKE8_DIFF_PATH = os.path.join(VENV_BIN, "flake8-diff")
# Limit on length of lines in source files.
LINE_LIMIT = 90
# Source file extensions that we should apply our line limit and whitespace rules to.
SOURCE_EXTENSIONS = set([".cc", ".h", ".java", ".py", ".sh", ".thrift"])
# Source file patterns that we exclude from our checks.
EXCLUDE_FILE_PATTERNS = [
re.compile(r".*be/src/kudu.*"), # Kudu source code may have different rules.
re.compile(r".*-benchmark.cc"), # Benchmark files tend to have long lines.
re.compile(r".*/function-registry/impala_functions.py"), # Many long strings.
re.compile(r".*/catalog/BuiltinsDb.java"), # Many long strings.
re.compile(r".*/codegen/gen_ir_descriptions.py"), # Many long strings.
re.compile(r".*shell/ext-py/.*"), # Third-party code.
re.compile(r".*be/src/thirdparty/.*"), # Third-party code.
re.compile(r".*/.*\.xml\.py") # Long lines in config template files.
]
def setup_virtualenv():
"""Set up virtualenv with flake8-diff."""
virtualenv.cli_run([VENV_PATH])
check_call([PIP_PATH, "install",
"flake8=={0}".format(FLAKE8_VERSION),
"flake8-diff=={0}".format(FLAKE8_DIFF_VERSION)])
def get_flake8_comments(revision):
"""Get flake8 warnings for code changes made in the git commit 'revision'.
Returns a dict with file path as keys and a list of CommentInput objects. See
https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#review-input
for information on the format."""
comments = defaultdict(lambda: [])
# flake8 needs to be on the path.
flake8_env = os.environ.copy()
flake8_env["PATH"] = "{0}:{1}".format(VENV_BIN, flake8_env["PATH"])
base_revision = "{0}^".format(revision)
flake8_diff_proc = Popen(
[FLAKE8_DIFF_PATH, "--standard-flake8-output", "--color", "off", base_revision,
revision],
stdin=PIPE, stdout=PIPE, stderr=PIPE, env=flake8_env)
stdout, stderr = flake8_diff_proc.communicate()
# Ignore the return code since it will be non-zero if any violations are found. We want
# to continue in that case. Instead check stderr for any errors.
if stderr:
raise Exception("Did not expect flake8-diff to write to stderr:\n{0}".format(stderr))
# Match output lines like:
# bin/jenkins/flake8-gerrit-review.py:25:1: F401 'json' imported but unused
VIOLATION_RE = re.compile(r"^([^:]*):([0-9]*):([0-9]*): (.*)$")
for line in stdout.splitlines():
match = VIOLATION_RE.match(line)
if not match:
raise Exception("Pattern did not match line:\n{0}".format(line))
file, line, col, details = match.groups()
line = int(line)
col = int(col)
skip_file = False
for pattern in EXCLUDE_FILE_PATTERNS:
if pattern.match(file):
skip_file = True
break
if skip_file:
continue
comments_for_file = comments[file]
comment = {"message": "flake8: {0}".format(details)}
# Heuristic: if the error is on the first column, assume it applies to the whole line.
if col == 1:
comment["line"] = line
else:
comment["range"] = {"start_line": line, "end_line": line,
"start_character": col - 1, "end_character": col}
comments_for_file.append(comment)
return comments
def get_misc_comments(revision):
"""Get miscellaneous warnings for code changes made in the git commit 'revision', e.g.
long lines and trailing whitespace. These warnings are produced by directly parsing the
diff output."""
comments = defaultdict(lambda: [])
# Matches range information like:
# @@ -128 +133,2 @@ if __name__ == "__main__":
RANGE_RE = re.compile(r"^@@ -[0-9,]* \+([0-9]*).*$")
diff = check_output(["git", "diff", "-U0", "{0}^..{0}".format(revision)],
universal_newlines=True)
curr_file = None
check_source_file = False
curr_line_num = 0
for diff_line in diff.splitlines():
if diff_line.startswith("+++ "):
# Start of diff for a file. Strip off "+++ b/" to get the file path.
curr_file = diff_line[6:]
check_source_file = os.path.splitext(curr_file)[1] in SOURCE_EXTENSIONS
if check_source_file:
for pattern in EXCLUDE_FILE_PATTERNS:
if pattern.match(curr_file):
check_source_file = False
break
elif diff_line.startswith("@@ "):
# Figure out the starting line of the hunk. Format of unified diff is:
# @@ -128 +133,2 @@ if __name__ == "__main__":
# We want to extract the start line for the added lines
match = RANGE_RE.match(diff_line)
if not match:
raise Exception("Pattern did not match diff line:\n{0}".format(diff_line))
curr_line_num = int(match.group(1))
elif diff_line.startswith("+") and check_source_file:
# An added or modified line - check it to see if we should generate warnings.
add_misc_comments_for_line(comments, diff_line[1:], curr_file, curr_line_num)
curr_line_num += 1
return comments
def add_misc_comments_for_line(comments, line, curr_file, curr_line_num):
"""Helper for get_misc_comments to generate comments for 'line' at 'curr_line_num' in
'curr_file' and append them to 'comments'."""
# Check for trailing whitespace.
if line.rstrip() != line:
comments[curr_file].append(
{"message": "line has trailing whitespace", "line": curr_line_num})
# Check for long lines. Skip .py files since flake8 already flags long lines.
if len(line) > LINE_LIMIT and os.path.splitext(curr_file)[1] != ".py":
msg = "line too long ({0} > {1})".format(len(line), LINE_LIMIT)
comments[curr_file].append(
{"message": msg, "line": curr_line_num})
if '\t' in line:
comments[curr_file].append(
{"message": "tab used for whitespace", "line": curr_line_num})
if 'ThriftDebugString' in line:
comments[curr_file].append(
{"message": ("Please make sure you don't output sensitive data with "
"ThriftDebugString(). If so, use impala::RedactedDebugString() "
"instead."),
"line": curr_line_num })
def post_review_to_gerrit(review_input):
"""Post a review to the gerrit patchset. 'review_input' is a ReviewInput JSON object
containing the review comments. The gerrit change and patchset are picked up from
environment variables set by the gerrit jenkins trigger."""
change_num = environ["GERRIT_CHANGE_NUMBER"]
patch_num = environ["GERRIT_PATCHSET_NUMBER"]
proc = Popen(["ssh", "-p", environ["GERRIT_PORT"],
"impala-public-jenkins@" + environ["GERRIT_HOST"], "gerrit", "review",
"--project", environ["GERRIT_PROJECT"], "--json",
"{0},{1}".format(change_num, patch_num)], stdin=PIPE)
proc.communicate(json.dumps(review_input))
if proc.returncode != 0:
raise Exception("Error posting review to gerrit.")
def merge_comments(a, b):
for k, v in b.items():
a[k].extend(v)
if __name__ == "__main__":
parser = ArgumentParser(description="Generate and post gerrit comments")
parser.add_argument("--dryrun", action='store_true',
help="Don't post comments back to gerrit")
args = parser.parse_args()
setup_virtualenv()
# flake8-diff only actually works correctly on HEAD, so this is the only revision
# we can correctly handle.
revision = 'HEAD'
comments = get_flake8_comments(revision)
merge_comments(comments, get_misc_comments(revision))
review_input = {"comments": comments}
print(json.dumps(review_input, indent=True))
if not args.dryrun:
post_review_to_gerrit(review_input)
|
91559679671202d5199e5a549a865f4a64552160
|
6be59c81f3f6a17c14b812be0de3346a82eb33dd
|
/network/logic_gates.py
|
2a33d164f9a894095ecfb855d554e458676c1004
|
[] |
no_license
|
chunhuizhang/bilibili_vlogs
|
6851fdcd43f08fcf7195e345b0bc85d99c0b9128
|
0efd921b24f2af43f5972ea6909deb2fc069d305
|
refs/heads/master
| 2023-08-17T15:47:04.299072
| 2023-08-14T13:46:31
| 2023-08-14T13:46:31
| 220,612,967
| 170
| 70
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
logic_gates.py
|
import torch
def and_gate():
and_weights = torch.tensor([1, 1, -2])
if __name__ == '__main__':
# x1, x2, b
X = torch.tensor([(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)], dtype=torch.double)
print(X)
pass
|
743816c82442619c8f662d8cdc321a16f9a4da45
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/11_动态规划/经典题/index+remain(k)/2463. 最小移动总距离.py
|
b06d68cfa12817fa503c99339de3dcbe868f5393
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,877
|
py
|
2463. 最小移动总距离.py
|
from functools import lru_cache
from typing import List
# 1 <= robot.length, factory.length <= 100
# factory[j].length == 2
# -109 <= robot[i], positionj <= 109
# 0 <= limitj <= robot.length
# !暗示O(n*m*k)的解法1e6
# 老鼠进洞模型 / 安排邮筒模型 / 最小移动距离
INF = int(1e18)
class Solution:
def minimumTotalDistance3(self, robot: List[int], factory: List[List[int]]) -> int:
"""堆+反悔 O(nlogn)"""
...
def minimumTotalDistance(self, robot: List[int], factory: List[List[int]]) -> int:
"""dp[i][j] 表示前i个工厂处理前j个机器人的最小距离之和
Args:
robot (List[int]): 机器人的位置
factory (List[List[int]]): 每个工厂的(位置,可以修理的机器人个数)
Returns:
int: 请你返回所有机器人移动的最小总距离。测试数据保证所有机器人都可以被维修。
"""
n, m = len(robot), len(factory)
robot.sort()
factory.sort(key=lambda x: x[0])
dp = [INF] * (n + 1)
dp[0] = 0
for i in range(m):
ndp = dp[:]
for j in range(n):
if dp[j] == INF:
break
dist = 0
for k in range(factory[i][1]):
if j + k >= n:
break
dist += abs(robot[j + k] - factory[i][0])
ndp[j + k + 1] = min(ndp[j + k + 1], dp[j] + dist)
dp = ndp
return dp[n]
def minimumTotalDistance2(self, robot: List[int], factory: List[List[int]]) -> int:
"""dp[i][j][k] 表示前i个工厂处理前j个机器人,最后一个工厂放k个机器人的最小距离之和"""
@lru_cache(None)
def dfs(fi: int, ri: int, count: int) -> int:
"""`以工厂为着眼点`,当前工厂为fi,当前处理机器人ri,当前工厂已经修理了count个机器人"""
if ri == R:
return 0
if fi == F:
return INF
res = dfs(fi + 1, ri, 0) # !这个工厂不修了
if count < factory[fi][1]:
cand = abs(robot[ri] - factory[fi][0]) + dfs(fi, ri + 1, count + 1)
res = cand if cand < res else res
return res
# !排序之后, 每个工厂修复的机器人肯定是连续的一段(贪心,邻位交换可以证明)
F, R = len(factory), len(robot)
robot.sort()
factory.sort(key=lambda x: x[0])
res = dfs(0, 0, 0)
dfs.cache_clear()
return res
print(Solution().minimumTotalDistance([1, 2, 3, 4, 5], [[1, 2], [3, 2], [4, 1]]))
# https://leetcode.cn/problems/minimum-total-distance-traveled/solution/-by-meyi-vbl2/
# !nlogn 解法
|
e73b80581e0efc94bc8cf3a5b3aa1f55cac9e146
|
4207698ab3d15c6d81bb205264b7de8297ba47e3
|
/deep_learning/seq2seq/translation_mt5/seq2seq_eval.py
|
abe913c6b384d064af8e8a90890a6c19f5caee14
|
[
"MIT",
"Python-2.0"
] |
permissive
|
ethen8181/machine-learning
|
f86c52389ea41d3da2f66720dc3459eee4aebcf6
|
efbc9b8d5c56b2ce4780d8b10ab62cb884352122
|
refs/heads/master
| 2023-08-31T00:35:57.532536
| 2023-08-23T22:19:48
| 2023-08-23T22:19:48
| 39,610,055
| 2,704
| 657
|
MIT
| 2023-07-10T04:39:36
| 2015-07-24T03:35:49
|
HTML
|
UTF-8
|
Python
| false
| false
| 5,512
|
py
|
seq2seq_eval.py
|
"""
Run a seq2seq Marian translation model evaluation on wmt16 dataset.
"""
import os
import torch
import random
import evaluate
import numpy as np
from datasets import load_dataset
from dataclasses import dataclass
from transformers import (
AutoTokenizer,
AutoModelForSeq2SeqLM,
Seq2SeqTrainingArguments,
Seq2SeqTrainer,
DataCollatorForSeq2Seq
)
from translation_utils import download_file, create_translation_data
@dataclass
class Config:
cache_dir: str = "./translation"
data_dir: str = os.path.join(cache_dir, "wmt16")
source_lang: str = 'de'
target_lang: str = 'en'
batch_size: int = 16
num_workers: int = 4
seed: int = 42
max_source_length: int = 128
max_target_length: int = 128
device: torch.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_checkpoint: str = "Helsinki-NLP/opus-mt-de-en"
def __post_init__(self):
random.seed(self.seed)
np.random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_checkpoint,
cache_dir=self.cache_dir
)
self.model = AutoModelForSeq2SeqLM.from_pretrained(
self.model_checkpoint,
cache_dir=self.cache_dir
)
print('# of parameters: ', self.model.num_parameters())
def batch_tokenize_fn(examples):
"""
Generate the input_ids and labels field for huggingface dataset/dataset dict.
Truncation is enabled where we cap the sentence to the max length. Padding will be done later
in a data collator, so we pad examples to the longest length within a mini-batch and not
the whole dataset.
"""
sources = examples[config.source_lang]
targets = examples[config.target_lang]
model_inputs = config.tokenizer(sources, max_length=config.max_source_length, truncation=True)
# setup the tokenizer for targets,
# huggingface expects the target tokenized ids to be stored in the labels field
with config.tokenizer.as_target_tokenizer():
labels = config.tokenizer(targets, max_length=config.max_target_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def compute_metrics(eval_pred):
"""
note: we can run trainer.predict on our eval/test dataset to see what a sample
eval_pred object would look like when implementing custom compute metrics function
"""
predictions, labels = eval_pred
# Decode generated summaries into text
decoded_preds = config.tokenizer.batch_decode(predictions, skip_special_tokens=True)
# Replace -100 in the labels as we can't decode them
labels = np.where(labels != -100, labels, config.tokenizer.pad_token_id)
# Decode reference summaries into text
decoded_labels = config.tokenizer.batch_decode(labels, skip_special_tokens=True)
result = rouge_score.compute(
predictions=decoded_preds,
references=decoded_labels,
rouge_types=["rouge1", "rouge2", "rougeL"]
)
score = sacrebleu_score.compute(
predictions=decoded_preds,
references=decoded_labels
)
result["sacrebleu"] = score["score"]
return {k: round(v, 4) for k, v in result.items()}
def create_wmt16_data_files(config: Config):
# files are downloaded from
# http://www.statmt.org/wmt16/multimodal-task.html
urls = [
'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',
'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',
'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/mmt16_task1_test.tar.gz'
]
for url in urls:
download_file(url, config.data_dir)
data_files = {}
for split in ["train", "val", "test"]:
source_input_path = os.path.join(config.data_dir, f"{split}.{config.source_lang}")
target_input_path = os.path.join(config.data_dir, f"{split}.{config.target_lang}")
output_path = os.path.join(config.cache_dir, f"{split}.tsv")
create_translation_data(source_input_path, target_input_path, output_path)
data_files[split] = [output_path]
return data_files
if __name__ == "__main__":
config = Config()
data_files = create_wmt16_data_files(config)
dataset_dict = load_dataset(
'csv',
delimiter='\t',
column_names=[config.source_lang, config.target_lang],
data_files=data_files
)
dataset_dict_tokenized = dataset_dict.map(
batch_tokenize_fn,
batched=True
)
model_name = config.model_checkpoint.split("/")[-1]
output_dir = os.path.join(
config.cache_dir,
f"{model_name}_{config.source_lang}-{config.target_lang}"
)
args = Seq2SeqTrainingArguments(
output_dir=output_dir,
per_device_eval_batch_size=config.batch_size,
predict_with_generate=True
)
data_collator = DataCollatorForSeq2Seq(config.tokenizer, model=config.model)
rouge_score = evaluate.load("rouge", cache_dir=config.cache_dir)
sacrebleu_score = evaluate.load("sacrebleu", cache_dir=config.cache_dir)
trainer = Seq2SeqTrainer(
config.model,
args,
train_dataset=dataset_dict_tokenized["train"],
eval_dataset=dataset_dict_tokenized["val"],
data_collator=data_collator,
tokenizer=config.tokenizer,
compute_metrics=compute_metrics,
)
print(trainer.evaluate())
|
a0a50d7e278bc056adbac469c244751b53ae93c9
|
652d0577d5f9716423cf7f1aae116a7bfd190c6f
|
/LASS-Simulator/test/test.py
|
9a0aa9fb16ed693719d8cb666334a830ab7da4bd
|
[
"MIT"
] |
permissive
|
LinkItONEDevGroup/LASS
|
714cf74a07840161ce6a3cd9a9ac525a3d5d88d3
|
f06bd202f37f2a8fafe932feabcb119a292f016e
|
refs/heads/master
| 2023-04-30T09:03:36.609950
| 2023-04-27T00:03:26
| 2023-04-27T00:03:26
| 38,099,296
| 174
| 108
|
MIT
| 2021-02-05T10:09:21
| 2015-06-26T08:14:22
|
C
|
UTF-8
|
Python
| false
| false
| 848
|
py
|
test.py
|
import urllib
import requests
import simplejson
import wget
def test_json():
response = urllib.request.urlopen("http://nrl.iis.sinica.edu.tw/LASS/last-all-lass.json")
#print(response.read())
#data = simplejson.load(response)
print( data)
#print(data["version"])
#print(data["feeds"])
#print(len(data["feeds"]))
#print(data["feeds"][0]['device_id'])
def test_vincenty():
from vincenty import vincenty
boston = (42.3541165, -71.0693514) #緯度,經度
newyork = (40.7791472, -73.9680804)
print(vincenty(boston, newyork))
p1=(34,117)
p2=(32,108)
print(vincenty(p1, p2))
#test_vincenty()
def test_urldownload():
url= 'http://opendata.cwb.gov.tw/datadownload?dataid=O-A0001-001'
out_file = "../output/O-A0001-001"
wget.download(url,out = out_file)
test_urldownload()
|
2093f4664a0f590efc1270e2b160e55982347b5a
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/PhysicsTools/PatExamples/test/analyzePatMuons_edm_cfg.py
|
a6b9382960aed8f61fb32fdd7c459213e49b43c3
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 1,182
|
py
|
analyzePatMuons_edm_cfg.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Test")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
"file:patTuple_standard.root"
)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
process.MessageLogger = cms.Service("MessageLogger")
## ---
## This is an example of the use of the BasicAnalyzer concept used to exploit C++ classes to do anaysis
## in full framework or FWLite using the same class. You can find the implementation of this module in
## PhysicsTools/PatExamples/plugins/PatMuonEDAnlyzer.cc. You can find the EDAnalyzerWrapper.h class in
## PhysicsTools/UtilAlgos/interface/EDAnalyzerWrapper.h. You can find the implementation of the
## PatMuonAnalyzer class in PhysicsTools/PatExamples/interface/PatMuonAnlyzer.h. You will also find
## back the input parameters to the module.
process.patMuonAnalyzer = cms.EDAnalyzer("PatMuonEDAnalyzer",
muons = cms.InputTag("selectedPatMuons"),
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('analyzePatMuons.root')
)
process.p = cms.Path(process.patMuonAnalyzer)
|
e920e7dfdf7026c83202a3367196b2ff9649d966
|
5475ca8d4ad2b0b68135d3ea70f32c1c55d21b65
|
/lore/pipelines/__init__.py
|
529548bce4ed65310ef0bc79437e8601e644bae9
|
[
"MIT"
] |
permissive
|
instacart/lore
|
e5c9aa49439a635ba80d66ecf5d76dc763e88308
|
a14f65a96d0ea2513a35e424b4e16d948115b89c
|
refs/heads/master
| 2023-05-25T08:09:53.463945
| 2022-09-27T19:41:48
| 2022-09-27T19:41:48
| 107,602,547
| 1,578
| 139
|
MIT
| 2023-05-13T02:26:19
| 2017-10-19T21:51:45
|
Python
|
UTF-8
|
Python
| false
| false
| 126
|
py
|
__init__.py
|
from __future__ import absolute_import
from collections import namedtuple
Observations = namedtuple('Observations', 'x y')
|
b5b30acba10d82a7d81d4ca1de64c533edff6e29
|
1194fe679b552cf1c8b63282cf59ffefa3923288
|
/app/api/cms/log.py
|
8da23436902dbe3fcc8089a76c74ef7ce0a08391
|
[
"MIT",
"ISC"
] |
permissive
|
TaleLin/lin-cms-flask
|
531b0a32b925bbfc2c24dc1d57564cd514084917
|
ae4a649a678e9e57d537d92c9a634648d6985e2d
|
refs/heads/master
| 2023-09-05T17:48:21.866364
| 2023-07-04T07:11:03
| 2023-07-04T07:11:03
| 165,864,588
| 881
| 260
|
NOASSERTION
| 2023-06-14T02:39:47
| 2019-01-15T14:25:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,634
|
py
|
log.py
|
import math
from flask import Blueprint, g
from lin import DocResponse, Log, db, group_required, permission_meta
from sqlalchemy import text
from app.api import AuthorizationBearerSecurity, api
from app.api.cms.schema.log import LogPageSchema, LogQuerySearchSchema, UsernameListSchema
log_api = Blueprint("log", __name__)
@log_api.route("")
@permission_meta(name="查询日志", module="日志")
@group_required
@api.validate(
resp=DocResponse(r=LogPageSchema),
before=LogQuerySearchSchema.offset_handler,
security=[AuthorizationBearerSecurity],
tags=["日志"],
)
def get_logs(query: LogQuerySearchSchema):
"""
日志浏览查询(人员,时间, 关键字),分页展示
"""
logs = Log.query.filter()
total = logs.count()
items = logs.order_by(text("create_time desc")).offset(g.offset).limit(g.count).all()
total_page = math.ceil(total / g.count)
return LogPageSchema(
page=g.page,
count=g.count,
total=total,
items=items,
total_page=total_page,
)
@log_api.route("/search")
@permission_meta(name="搜索日志", module="日志")
@group_required
@api.validate(
resp=DocResponse(r=LogPageSchema),
security=[AuthorizationBearerSecurity],
before=LogQuerySearchSchema.offset_handler,
tags=["日志"],
)
def search_logs(query: LogQuerySearchSchema):
"""
日志搜索(人员,时间, 关键字),分页展示
"""
if g.keyword:
logs = Log.query.filter(Log.message.like(f"%{g.keyword}%"))
else:
logs = Log.query.filter()
if g.name:
logs = logs.filter(Log.username == g.name)
if g.start and g.end:
logs = logs.filter(Log.create_time.between(g.start, g.end))
total = logs.count()
items = logs.order_by(text("create_time desc")).offset(g.offset).limit(g.count).all()
total_page = math.ceil(total / g.count)
return LogPageSchema(
page=g.page,
count=g.count,
total=total,
items=items,
total_page=total_page,
)
@log_api.route("/users")
@permission_meta(name="查询日志记录的用户", module="日志")
@group_required
@api.validate(
resp=DocResponse(r=UsernameListSchema),
security=[AuthorizationBearerSecurity],
tags=["日志"],
)
def get_users_for_log():
"""
获取所有记录行为日志的用户名
"""
usernames = (
db.session.query(Log.username)
.filter_by(soft=False)
.group_by(text("username"))
.having(text("count(username) > 0"))
.all()
)
return UsernameListSchema(items=[u.username for u in usernames])
|
58b3b642d496d42ef62800327ab1a897d68fecb3
|
6f2fef1b207299681f8d67d3831c400bb91de04b
|
/data_collection/gazette/spiders/pb_associacao_municipios.py
|
3356ee0eabe96efc1a458c7fb7e730e34db37299
|
[
"MIT"
] |
permissive
|
okfn-brasil/querido-diario
|
76177747aa5ad47e99514f38402e6bc747b9a715
|
548a9b1b2718dc78ba8ccb06b36cf337543ad71d
|
refs/heads/main
| 2023-08-22T04:26:30.798196
| 2023-08-18T14:12:37
| 2023-08-18T14:12:37
| 127,598,755
| 402
| 233
|
MIT
| 2023-09-14T18:56:02
| 2018-04-01T05:01:21
|
Python
|
UTF-8
|
Python
| false
| false
| 248
|
py
|
pb_associacao_municipios.py
|
from gazette.spiders.base.sigpub import SigpubGazetteSpider
class PbAssociacaoMunicipiosSpider(SigpubGazetteSpider):
name = "pb_associacao_municipios"
TERRITORY_ID = "2500000"
CALENDAR_URL = "https://www.diariomunicipal.com.br/famup"
|
a4ff7c29bbce35556c2fa88659c960668e03d74d
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/AlipayCommerceDataHotelServiceSyncResponse.py
|
accd576946880cd6db447a35ab9c76f378da2912
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 3,131
|
py
|
AlipayCommerceDataHotelServiceSyncResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceDataHotelServiceSyncResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceDataHotelServiceSyncResponse, self).__init__()
self._audit_msg = None
self._audit_status = None
self._hotel_app_id = None
self._hotel_id = None
self._outer_hotel_id = None
self._service_id = None
self._service_name = None
self._service_status = None
self._service_url = None
@property
def audit_msg(self):
return self._audit_msg
@audit_msg.setter
def audit_msg(self, value):
self._audit_msg = value
@property
def audit_status(self):
return self._audit_status
@audit_status.setter
def audit_status(self, value):
self._audit_status = value
@property
def hotel_app_id(self):
return self._hotel_app_id
@hotel_app_id.setter
def hotel_app_id(self, value):
self._hotel_app_id = value
@property
def hotel_id(self):
return self._hotel_id
@hotel_id.setter
def hotel_id(self, value):
self._hotel_id = value
@property
def outer_hotel_id(self):
return self._outer_hotel_id
@outer_hotel_id.setter
def outer_hotel_id(self, value):
self._outer_hotel_id = value
@property
def service_id(self):
return self._service_id
@service_id.setter
def service_id(self, value):
self._service_id = value
@property
def service_name(self):
return self._service_name
@service_name.setter
def service_name(self, value):
self._service_name = value
@property
def service_status(self):
return self._service_status
@service_status.setter
def service_status(self, value):
self._service_status = value
@property
def service_url(self):
return self._service_url
@service_url.setter
def service_url(self, value):
self._service_url = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceDataHotelServiceSyncResponse, self).parse_response_content(response_content)
if 'audit_msg' in response:
self.audit_msg = response['audit_msg']
if 'audit_status' in response:
self.audit_status = response['audit_status']
if 'hotel_app_id' in response:
self.hotel_app_id = response['hotel_app_id']
if 'hotel_id' in response:
self.hotel_id = response['hotel_id']
if 'outer_hotel_id' in response:
self.outer_hotel_id = response['outer_hotel_id']
if 'service_id' in response:
self.service_id = response['service_id']
if 'service_name' in response:
self.service_name = response['service_name']
if 'service_status' in response:
self.service_status = response['service_status']
if 'service_url' in response:
self.service_url = response['service_url']
|
786d06927420b3ded7f282b2d0511d0547d634cb
|
2b0c04c439f5130bab3d1d983018c9f06947f296
|
/helper.py
|
b6d63dd2cd7d060719df405ec526b647ead5ba41
|
[] |
no_license
|
swordest/mec_drl
|
1c5677fc8c694895146274a17abd0f0d617d62c1
|
10d03702080bba6f4e4c4d0b4be0a7ba249eaaca
|
refs/heads/master
| 2022-10-06T09:48:03.571529
| 2022-09-02T04:05:52
| 2022-09-02T04:05:52
| 166,520,752
| 290
| 115
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,997
|
py
|
helper.py
|
import os
import numpy as np
import tensorflow as tf
from ddpg_lib import *
import ipdb as pdb
import matplotlib.pyplot as plt
state_avg = np.array([0.031,0.153,0.399,0.772,1.274,1.911,2.694,3.630,4.730,6.021,7.902])
trans_p = np.array([[0.514,0.514,1.000,],
[0.513,0.696,1.000,],
[0.513,0.745,1.000,],
[0.515,0.776,1.000,],
[0.513,0.799,1.000,],
[0.514,0.821,1.000,],
[0.516,0.842,1.000,],
[0.511,0.858,1.000,],
[0.516,0.880,1.000,],
[0.512,0.897,1.000,],
[0.671,1.000,1.000,],]) # stay, lower, higher
alpha = 3.0
ref_loss = 0.001
class MarkovModel(object):
"""docstring for MarkovModel"""
def __init__(self, dis, seed=123):
# self.f_m = f_m
# self.t_p = t_p
self.dis = dis
self.path_loss = ref_loss*np.power(1./dis, alpha)
np.random.seed([seed])
# calculate the transition prob
self.trans_p = trans_p
self.state_avg = state_avg
self.state = np.random.randint(0, 11) # states in {0,...,9}
def getCh(self):
return np.array([np.sqrt(self.path_loss*self.state_avg[self.state])])
def sampleCh(self):
temp = np.random.random()
if temp >= trans_p[self.state, 1]:
self.state += 1
elif temp >= trans_p[self.state, 0]:
self.state -= 1
self.state = np.fmax(np.fmin(self.state, 11), 0)
return self.getCh()
def complexGaussian(row=1, col=1, amp=1.0):
real = np.random.normal(size=[row,col])[0]*np.sqrt(0.5)
img = np.random.normal(size=[row,col])[0]*np.sqrt(0.5)
return amp*(real + 1j*img)
class ARModel(object):
"""docstring for AR channel Model"""
def __init__(self, dis, n_t=1, n_r=1, rho=0.95, seed=123):
self.dis = dis
self.n_t = n_t
self.n_r = n_r
self.path_loss = ref_loss*np.power(1./dis, alpha)
np.random.seed([seed])
self.rho = rho
self.H = complexGaussian(self.n_t, self.n_r)
def getCh(self):
return self.H*np.sqrt(self.path_loss)
def sampleCh(self):
self.H = self.rho*self.H + complexGaussian(self.n_t, self.n_r, np.sqrt(1-self.rho*self.rho))
return self.getCh()
class DDPGAgentLD(object):
"""agent initialization from saved model"""
def __init__(self, sess, user_config):
self.sess = sess
self.user_id = user_config['id']
self.state_dim = user_config['state_dim']
self.action_dim = user_config['action_dim']
# restore the input and output for the actor network
self.actor = ActorNetworkLD(sess, self.user_id)
def predict(self, s):
# pdb.set_trace()
return self.actor.predict(np.reshape(s, (1, self.state_dim)))[0]
def init_target_network(self):
pass
class DQNAgent(object):
"""docstring for DQNAgent"""
def __init__(self, sess, user_config, train_config):
self.sess = sess
self.user_id = user_config['id']
self.state_dim = user_config['state_dim']
self.action_dim = user_config['action_dim']
self.action_bound = user_config['action_bound']
self.action_level = user_config['action_level']
self.minibatch_size = int(train_config['minibatch_size'])
self.epsilon = float(train_config['epsilon'])
self.action_nums = 1
for i in range(self.action_dim):
self.action_nums *= self.action_level
self.max_step = 100000
self.pre_train_steps = 5000
self.total_step = 0
self.DQN = DeepQNetwork(sess, self.state_dim, self.action_nums, float(train_config['critic_lr']), float(train_config['tau']), float(train_config['gamma']), self.user_id)
self.replay_buffer = ReplayBuffer(int(train_config['buffer_size']), int(train_config['random_seed']))
def init_target_network(self):
self.DQN.update_target_network()
def predict(self, s):
if self.total_step <= self.max_step:
self.epsilon *= 0.999976
if np.random.rand(1) < self.epsilon or self.total_step < self.pre_train_steps:
action = np.random.randint(0, self.action_nums)
else:
action, _ = self.DQN.predict(np.reshape(s, (1, self.state_dim)))
self.total_step += 1
return action, np.zeros([1])
def update(self, s, a, r, t, s2):
self.replay_buffer.add(np.reshape(s, (self.state_dim,)), a, r,
t, np.reshape(s2, (self.state_dim,)))
if self.replay_buffer.size() > self.minibatch_size:
s_batch, a_batch, r_batch, t_batch, s2_batch = \
self.replay_buffer.sample_batch(self.minibatch_size)
# calculate targets
_, q_out = self.DQN.predict(s_batch)
target_prediction, target_q_out = self.DQN.predict_target(s2_batch)
for k in range(self.minibatch_size):
if t_batch[k]:
q_out[k][a_batch[k]] = r_batch[k]
else:
q_out[k][a_batch[k]] = r_batch[k] + self.DQN.gamma * target_q_out[k][target_prediction[k]]
# Update the critic given the targets
q_loss, _ = self.DQN.train(
s_batch, q_out)
# losses.append(q_loss)
# Update target networks
self.DQN.update_target_network()
class DDPGAgent(object):
"""docstring for DDPGAgent"""
def __init__(self, sess, user_config, train_config):
self.sess = sess
self.user_id = user_config['id']
self.state_dim = user_config['state_dim']
self.action_dim = user_config['action_dim']
self.action_bound = user_config['action_bound']
self.init_path = user_config['init_path'] if 'init_path' in user_config else ''
self.minibatch_size = int(train_config['minibatch_size'])
self.noise_sigma = float(train_config['noise_sigma'])
# initalize the required modules: actor, critic and replaybuffer
self.actor = ActorNetwork(sess, self.state_dim, self.action_dim, self.action_bound, float(train_config['actor_lr']), float(train_config['tau']), self.minibatch_size, self.user_id)
self.critic = CriticNetwork(sess, self.state_dim, self.action_dim, float(train_config['critic_lr']), float(train_config['tau']), float(train_config['gamma']), self.actor.get_num_trainable_vars())
self.replay_buffer = ReplayBuffer(int(train_config['buffer_size']), int(train_config['random_seed']))
# mu, sigma=0.12, theta=.15, dt=1e-2,
self.actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(self.action_dim),sigma=self.noise_sigma)
# self.actor_noise = GaussianNoise(0.1, 0.01, size=np.array([self.action_dim]))
def init_target_network(self):
# Initialize the original network and target network with pre-trained model
if len(self.init_path) == 0:
self.actor.update_target_network()
else:
self.actor.init_target_network(self.init_path)
self.critic.update_target_network()
# input current state and then return the next action
def predict(self, s, isUpdateActor):
if isUpdateActor:
noise = self.actor_noise()
else:
noise = np.zeros(self.action_dim)
return self.actor.predict(np.reshape(s, (1, self.actor.s_dim)))[0] + noise, noise
# return self.actor.predict(np.reshape(s, (1, self.actor.s_dim))) + np.random.normal(0.0,0.1,[self.action_dim])
def update(self, s, a, r, t, s2, isUpdateActor):
self.replay_buffer.add(np.reshape(s, (self.actor.s_dim,)), np.reshape(a, (self.actor.a_dim,)), r,
t, np.reshape(s2, (self.actor.s_dim,)))
if self.replay_buffer.size() > self.minibatch_size:
s_batch, a_batch, r_batch, t_batch, s2_batch = \
self.replay_buffer.sample_batch(self.minibatch_size)
# calculate targets
target_q = self.critic.predict_target(
s2_batch, self.actor.predict_target(s2_batch))
y_i = []
for k in range(self.minibatch_size):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + self.critic.gamma * target_q[k])
# Update the critic given the targets
predicted_q_value, _ = self.critic.train(
s_batch, a_batch, np.reshape(y_i, (self.minibatch_size, 1)))
if isUpdateActor:
# Update the actor policy using the sampled gradient
a_outs = self.actor.predict(s_batch)
grads = self.critic.action_gradients(s_batch, a_outs)
self.actor.train(s_batch, grads[0])
# Update target networks
self.actor.update_target_network()
self.critic.update_target_network()
def test_helper(env, num_steps):
cur_init_ds_ep = env.reset()
user_list = env.user_list
cur_r_ep = np.zeros(len(user_list))
cur_p_ep = np.zeros(len(user_list))
cur_ts_ep = np.zeros(len(user_list))
cur_ps_ep = np.zeros(len(user_list))
cur_rs_ep = np.zeros(len(user_list))
cur_ds_ep = np.zeros(len(user_list))
cur_ch_ep = np.zeros(len(user_list))
for j in range(num_steps):
# first try to transmit from current state
[cur_r, done, cur_p, cur_n, cur_ts, cur_ps, cur_rs, cur_ds, cur_ch] = env.step_transmit()
cur_r_ep += cur_r
cur_p_ep += cur_p
cur_ts_ep += cur_ts
cur_rs_ep += cur_rs
cur_ds_ep += cur_ds
cur_ch_ep += cur_ch
if cur_r <= -1000:
print("<-----!!!----->")
print('%d:r:%f,p:%s,n:%s,tr:%s,ps:%s, rev:%s,dbuf:%s,ch:%s,ibuf:%s' % (j, cur_r, cur_p, cur_n, cur_ts, cur_ps, cur_rs, cur_ds, cur_ch, cur_init_ds_ep))
print('r:%.4f,p:%.4f,tr:%.4f,pr:%.4f,rev:%.4f,dbuf:%.4f,ch:%.8f,ibuf:%d' % (cur_r_ep/MAX_EPISODE_LEN, cur_p_ep/MAX_EPISODE_LEN, cur_ts_ep/MAX_EPISODE_LEN, cur_ps_ep/MAX_EPISODE_LEN, cur_rs_ep/MAX_EPISODE_LEN, cur_ds_ep/MAX_EPISODE_LEN, cur_ch_ep/MAX_EPISODE_LEN, cur_init_ds_ep[0]))
def plot_everything(res, win=10):
length = len(res)
temp = np.array(res)
rewards = temp[:,:,0]
avg_r = np.sum(rewards, axis=1)/rewards.shape[1]
plt.plot(range(avg_r.shape[0]), avg_r)
avg_r_sm = moving_average(avg_r, win)
plt.plot(range(avg_r_sm.shape[0]), avg_r_sm)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
powers = temp[:,:,2]
avg_p = np.sum(powers, axis=1)/powers.shape[1]
plt.plot(range(avg_p.shape[0]), avg_p)
avg_p_sm = moving_average(avg_p, win)
plt.plot(range(avg_p_sm.shape[0]), avg_p_sm)
plt.xlabel('step')
plt.ylabel('power')
plt.show()
bufs = temp[:,:,7]
avg_b = np.sum(bufs, axis=1)/bufs.shape[1]
plt.plot(range(avg_b.shape[0]), avg_b)
avg_b_sm = moving_average(avg_b, win)
plt.plot(range(avg_b_sm.shape[0]), avg_b_sm)
plt.xlabel('step')
plt.ylabel('buffer length')
plt.show()
ofs = temp[:,:,9]
avg_o = np.sum(ofs, axis=1)/ofs.shape[1]
plt.plot(range(avg_o.shape[0]), avg_o)
avg_o_sm = moving_average(avg_o, win)
plt.plot(range(avg_o_sm.shape[0]), avg_o_sm)
plt.xlabel('step')
plt.ylabel('buffer length')
plt.show()
return avg_r, avg_p, avg_b, avg_o
def read_log(dir_path, user_idx=0):
fileList = os.listdir(dir_path)
fileList = [name for name in fileList if '.npz' in name]
avg_rs = []
avg_ps = []
avg_bs = []
avg_os = []
for name in fileList:
path = dir_path + name
res = np.load(path)
temp_rs = np.array(res['arr_0'])
avg_rs.append(temp_rs[:, user_idx])
temp_ps = np.array(res['arr_1'])
avg_ps.append(temp_ps[:, user_idx])
temp_bs = np.array(res['arr_2'])
avg_bs.append(temp_bs[:, user_idx])
temp_os = np.array(res['arr_3'])
avg_os.append(temp_os[:, user_idx])
avg_rs = np.array(avg_rs)
avg_ps = np.array(avg_ps)
avg_bs = np.array(avg_bs)
avg_os = np.array(avg_os)
return avg_rs, avg_ps, avg_bs, avg_os
def plot_curve(rs, ps, bs, os, win=10):
for avg_r in rs:
avg_r_sm = moving_average(avg_r, win)
plt.plot(range(avg_r.shape[0]), avg_r)
plt.plot(range(avg_r_sm.shape[0]), avg_r_sm)
plt.xlabel('step')
plt.ylabel('Total moving reward')
plt.show()
for avg_p in ps:
avg_p_sm = moving_average(avg_p, win)
plt.plot(range(avg_p.shape[0]), avg_p)
plt.plot(range(avg_p_sm.shape[0]), avg_p_sm)
plt.xlabel('step')
plt.ylabel('power')
plt.show()
for avg_b in bs:
avg_b_sm = moving_average(avg_b, win)
plt.plot(range(avg_b.shape[0]), avg_b)
plt.plot(range(avg_b_sm.shape[0]), avg_b_sm)
plt.xlabel('step')
plt.ylabel('buffer length')
plt.show()
for avg_o in os:
avg_o_sm = moving_average(avg_o, win)
plt.plot(range(avg_o.shape[0]), avg_o)
plt.plot(range(avg_o_sm.shape[0]), avg_o_sm)
plt.xlabel('step')
plt.ylabel('overflow probability')
plt.show()
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float, axis=0)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
# import matplotlib.pyplot as plt
# N = 8
# y = np.zeros(N)
# x1 = np.linspace(0, 10, N, endpoint=True)
# x2 = np.linspace(0, 10, N, endpoint=False)
# plt.plot(x1, y, 'o')
# plt.plot(x2, y + 0.5, 'o')
# plt.ylim([-0.5, 1])
# plt.savefig('ex.eps', format='eps', dpi=1000)
# plt.show()
|
76fa6eefb4c028f7e5c4dc3641611a3333c4d49d
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/terraform/checks/resource/azure/AzureBatchAccountUsesKeyVaultEncryption.py
|
5b67c3c733b9397d2d5ece61bbf0726e74f730d9
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 810
|
py
|
AzureBatchAccountUsesKeyVaultEncryption.py
|
from checkov.common.models.consts import ANY_VALUE
from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class AzureBatchAccountUsesKeyVaultEncryption(BaseResourceValueCheck):
def __init__(self):
name = "Ensure that Azure Batch account uses key vault to encrypt data"
id = "CKV_AZURE_76"
supported_resources = ['azurerm_batch_account']
categories = [CheckCategories.ENCRYPTION]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return 'key_vault_reference/[0]/id'
def get_expected_value(self):
return ANY_VALUE
check = AzureBatchAccountUsesKeyVaultEncryption()
|
e252f3aae3086f6d4bc373b05e46fa8ef02af837
|
8bd04e1685be72706b3c28a159cc8f744a4a5f65
|
/tests/test_compression.py
|
845e7734414be71fe116538bdf5a580a8a57c9cf
|
[
"MIT"
] |
permissive
|
theopolis/uefi-firmware-parser
|
e55384b638026d79c03f51e2760ada6047db3269
|
f05ed14eaf4013f62aa19e74c434a6a465583423
|
refs/heads/master
| 2023-08-24T03:14:54.480442
| 2023-08-12T15:59:06
| 2023-08-12T15:59:06
| 16,303,018
| 656
| 171
|
NOASSERTION
| 2023-08-12T17:12:01
| 2014-01-28T05:25:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,367
|
py
|
test_compression.py
|
import unittest
import struct
from uefi_firmware import efi_compressor
class CompressionTest(unittest.TestCase):
def _test_compress(self, compress_algorithm):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = compress_algorithm(
default_buffer, len(default_buffer))
self.assertTrue(compressed_buffer is not None)
self.assertGreater(len(compressed_buffer), 8)
compressed_size, uncompressed_size = struct.unpack(
"<II", compressed_buffer[:8])
self.assertEqual(len(compressed_buffer) - 8, compressed_size)
def _test_decompress(self, compress_algorithm, decompress_algorithm):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = compress_algorithm(
default_buffer, len(default_buffer))
decompressed_buffer = decompress_algorithm(
compressed_buffer, len(compressed_buffer))
self.assertTrue(decompressed_buffer is not None)
self.assertEqual(len(decompressed_buffer), len(default_buffer))
self.assertEqual(decompressed_buffer, default_buffer)
def test_efi_compress(self):
self._test_compress(efi_compressor.EfiCompress)
def test_efi_decompress(self):
self._test_decompress(
efi_compressor.EfiCompress, efi_compressor.EfiDecompress)
def test_tiano_compress(self):
self._test_compress(efi_compressor.TianoCompress)
def test_tiano_decompress(self):
self._test_decompress(
efi_compressor.TianoCompress, efi_compressor.TianoDecompress)
def test_lzma_compress(self):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = efi_compressor.LzmaCompress(
default_buffer, len(default_buffer))
self.assertTrue(compressed_buffer is not None)
def test_lzma_decompress(self):
default_buffer = b"AAAAAAAA" * 90
compressed_buffer = efi_compressor.LzmaCompress(
default_buffer, len(default_buffer))
decompressed_buffer = efi_compressor.LzmaDecompress(
compressed_buffer,
len(compressed_buffer)
)
self.assertTrue(decompressed_buffer is not None)
self.assertEqual(len(decompressed_buffer), len(default_buffer))
self.assertEqual(decompressed_buffer, default_buffer)
if __name__ == '__main__':
unittest.main()
|
2bf271353b8b0e2dca381d7a39cd725916b7ba06
|
6630694f401f6f475dd81bb01ff9368db844ccff
|
/configs/repvgg/repvgg-D2se_8xb32_in1k.py
|
f532dcd79686a119e1bed528a1e7c36195e70857
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpretrain
|
98a4d6b3bb747efc3d50decebf84fc3ffa41076a
|
d2ccc44a2c8e5d49bb26187aff42f2abc90aee28
|
refs/heads/main
| 2023-08-30T19:11:24.771498
| 2023-08-23T02:45:18
| 2023-08-23T02:45:18
| 278,415,292
| 652
| 186
|
Apache-2.0
| 2023-09-08T08:01:40
| 2020-07-09T16:25:04
|
Python
|
UTF-8
|
Python
| false
| false
| 675
|
py
|
repvgg-D2se_8xb32_in1k.py
|
_base_ = './repvgg-B3_8xb32_in1k.py'
model = dict(backbone=dict(arch='D2se'), head=dict(in_channels=2560))
param_scheduler = [
# warm up learning rate scheduler
dict(
type='LinearLR',
start_factor=0.0001,
by_epoch=True,
begin=0,
end=5,
# update by iter
convert_to_iter_based=True),
# main learning rate scheduler
dict(
type='CosineAnnealingLR',
T_max=295,
eta_min=1.0e-6,
by_epoch=True,
begin=5,
end=300)
]
train_cfg = dict(by_epoch=True, max_epochs=300)
default_hooks = dict(
checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3))
|
6c436f0d63c82a0c80b2f9cc2d27d1d478b43055
|
42e62cdf4280d70e62522afa4597e9f84b9ce4d6
|
/tractseg/experiments/pretrained_models/DmReg_All_xtract_PeakRot4.py
|
adac1723ad21087afbf042e72b018f5348e68ea8
|
[
"Apache-2.0"
] |
permissive
|
MIC-DKFZ/TractSeg
|
7f8c224662b7d55cf7668cb3014e777b29caacc7
|
4098bc264bb454431a4a87c76ee1d70f531f3287
|
refs/heads/master
| 2023-06-25T15:46:01.804643
| 2023-04-14T07:02:25
| 2023-04-14T07:02:25
| 109,972,425
| 211
| 74
|
Apache-2.0
| 2022-10-11T13:45:26
| 2017-11-08T12:23:08
|
Python
|
UTF-8
|
Python
| false
| false
| 884
|
py
|
DmReg_All_xtract_PeakRot4.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from tractseg.data import dataset_specific_utils
from tractseg.experiments.dm_reg import Config as DmRegConfig
class Config(DmRegConfig):
EXP_NAME = os.path.basename(__file__).split(".")[0]
DATASET = "HCP_all"
DATASET_FOLDER = "HCP_preproc_all"
FEATURES_FILENAME = "32g90g270g_CSD_BX"
CLASSES = "xtract"
NR_OF_CLASSES = len(dataset_specific_utils.get_bundle_names(CLASSES)[1:])
RESOLUTION = "1.25mm"
LABELS_FILENAME = "bundle_masks_xtract_dm"
# Final DM wil be thresholded at this value
THRESHOLD = 0.0001 # use lower value so user has more choice
NUM_EPOCHS = 300
EPOCH_MULTIPLIER = 0.5
DAUG_ROTATE = True
SPATIAL_TRANSFORM = "SpatialTransformPeaks"
# rotation: 2*np.pi = 360 degree (-> 0.8 ~ 45 degree, 0.4 ~ 22 degree))
DAUG_ROTATE_ANGLE = (-0.4, 0.4)
|
c201242bc598ee671d780e0c5ef2e2085a71a09b
|
be7a79f3c590f0923f1e793c6a36cfebd9ca4d01
|
/brocolli/converter/onnx_layers/gelu_layer.py
|
e533a6de583b81e3c2aaf5359acfeea3bdfdbe27
|
[
"MIT"
] |
permissive
|
inisis/brocolli
|
f255d44dc9148fd2b3bc82f6a21e429a579399b4
|
46a3d8c5e19e481746a9c8a85c5e9a71a49b846c
|
refs/heads/master
| 2023-07-22T09:37:19.480983
| 2023-07-17T14:25:35
| 2023-07-17T14:25:35
| 168,733,444
| 326
| 72
|
MIT
| 2023-06-04T17:03:43
| 2019-02-01T17:17:22
|
Python
|
UTF-8
|
Python
| false
| false
| 875
|
py
|
gelu_layer.py
|
import torch
import torch.nn.functional as F
from loguru import logger
from onnx import helper
from onnxruntime_extensions import onnx_op, PyOp
from .base_layer import BaseLayer
class GELULayer(BaseLayer):
def __init__(self, source_node, module=None, auto_gen=True):
super(GELULayer, self).__init__(source_node, module, auto_gen)
def generate_node(self, name=None, params=None, attr_dict=None):
node = helper.make_node(
"GELU",
self._in_names,
self._out_names,
self._name,
domain="ai.onnx.contrib",
)
logger.info(f"{self.__class__.__name__}: {self._name} created")
self._node.append(node)
@onnx_op(
op_type="GELU",
inputs=[PyOp.dt_float],
outputs=[PyOp.dt_float],
)
def GELU(x):
x = torch.from_numpy(x)
output = F.gelu(x)
return output
|
14ca33ec822a6eb8f49685fd6c3aba8fe9b211a5
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/psi/assignment/TuplePack.py
|
e1b4b0e34d0eb56135f0a76cdd2178d0cc59fb17
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
TuplePack.py
|
<dst>some_tuple = <src1>1, <src2>2
|
6ebab22687dee893894deafef2ea41e435629e2c
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/ios/tests/ShowLldpNeighborsDetail/cli/equal/golden_output_5_expected.py
|
f2aaa679c885a13b6f55e6922f767cf8b65a3009
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
golden_output_5_expected.py
|
expected_output = {
"interfaces": {
"GigabitEthernet1/0/19": {
"if_name": "GigabitEthernet1/0/19",
"port_id": {
"6400.33ff.4444": {
"neighbors": {
"not advertised": {
"neighbor_id": "not advertised",
"chassis_id": "6400.33ff.4444",
"port_id": "6400.33ff.4444",
"system_name": "not advertised",
"time_remaining": 3284,
"management_address": "not advertised",
"auto_negotiation": "supported, enabled",
"physical_media_capabilities": ["1000baseT(FD)"],
}
}
}
},
}
},
"med_information": {
"device_type": "Endpoint Class I",
"location": "not advertised",
},
"total_entries": 1,
}
|
b90a31562dbf98acd41ee128eb7abb7b160c66d2
|
7bc1d8634529eac952490399fb71f10bcedf05cc
|
/tests/scripts/thread-cert/border_router/test_multi_thread_networks.py
|
4e0823b0aa3c3817a8110794e198065c51cda92e
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] |
permissive
|
openthread/openthread
|
6a9e25d1cd224bde9796d9616f04f423dba27d77
|
102a631cb3f8938389d0d10199a14c59184039cd
|
refs/heads/main
| 2023-08-18T10:46:03.820124
| 2023-08-17T22:20:55
| 2023-08-17T22:20:55
| 55,808,787
| 3,485
| 1,296
|
BSD-3-Clause
| 2023-09-14T15:50:53
| 2016-04-08T20:47:41
|
C++
|
UTF-8
|
Python
| false
| false
| 6,384
|
py
|
test_multi_thread_networks.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import logging
import unittest
import config
import thread_cert
# Test description:
# This test verifies bi-directional connectivity across multiple Thread networks.
#
# Topology:
# -------------(eth)----------------
# | |
# BR1 BR2
# | |
# ROUTER1 ROUTER2
#
# Thread Net1 Thread Net2
#
BR1 = 1
ROUTER1 = 2
BR2 = 3
ROUTER2 = 4
CHANNEL1 = 18
CHANNEL2 = 19
class MultiThreadNetworks(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR1: {
'name': 'BR_1',
'allowlist': [ROUTER1],
'is_otbr': True,
'version': '1.2',
'channel': CHANNEL1,
},
ROUTER1: {
'name': 'Router_1',
'allowlist': [BR1],
'version': '1.2',
'channel': CHANNEL1,
},
BR2: {
'name': 'BR_2',
'allowlist': [ROUTER2],
'is_otbr': True,
'version': '1.2',
'channel': CHANNEL2,
},
ROUTER2: {
'name': 'Router_2',
'allowlist': [BR2],
'version': '1.2',
'channel': CHANNEL2,
},
}
def test(self):
br1 = self.nodes[BR1]
router1 = self.nodes[ROUTER1]
br2 = self.nodes[BR2]
router2 = self.nodes[ROUTER2]
br1.start()
self.simulator.go(config.LEADER_STARTUP_DELAY)
self.assertEqual('leader', br1.get_state())
router1.start()
self.simulator.go(config.ROUTER_STARTUP_DELAY)
self.assertEqual('router', router1.get_state())
br2.start()
self.simulator.go(config.LEADER_STARTUP_DELAY)
self.assertEqual('leader', br2.get_state())
router2.start()
self.simulator.go(config.ROUTER_STARTUP_DELAY)
self.assertEqual('router', router2.get_state())
# Wait for network to stabilize
self.simulator.go(15)
self.collect_ipaddrs()
logging.info("BR1 addrs: %r", br1.get_addrs())
logging.info("ROUTER1 addrs: %r", router1.get_addrs())
logging.info("BR2 addrs: %r", br2.get_addrs())
logging.info("ROUTER2 addrs: %r", router2.get_addrs())
self.assertTrue(len(br1.get_netdata_omr_prefixes()) == 1)
self.assertTrue(len(router1.get_netdata_omr_prefixes()) == 1)
self.assertTrue(len(br2.get_netdata_omr_prefixes()) == 1)
self.assertTrue(len(router2.get_netdata_omr_prefixes()) == 1)
br1_omr_prefix = br1.get_br_omr_prefix()
br2_omr_prefix = br2.get_br_omr_prefix()
self.assertNotEqual(br1_omr_prefix, br2_omr_prefix)
# Each BR should independently register an external route for the on-link prefix
# and OMR prefix in another Thread Network.
self.assertTrue(len(br1.get_netdata_non_nat64_prefixes()) == 1)
self.assertTrue(len(router1.get_netdata_non_nat64_prefixes()) == 1)
self.assertTrue(len(br2.get_netdata_non_nat64_prefixes()) == 1)
self.assertTrue(len(router2.get_netdata_non_nat64_prefixes()) == 1)
br1_external_routes = br1.get_routes()
br2_external_routes = br2.get_routes()
br1_external_routes.sort()
br2_external_routes.sort()
self.assertNotEqual(br1_external_routes, br2_external_routes)
self.assertTrue(len(router1.get_ip6_address(config.ADDRESS_TYPE.OMR)) == 1)
self.assertTrue(len(router2.get_ip6_address(config.ADDRESS_TYPE.OMR)) == 1)
self.assertTrue(router1.ping(router2.get_ip6_address(config.ADDRESS_TYPE.OMR)[0]))
self.verify_border_routing_counters(br1, {'inbound_unicast': 1, 'outbound_unicast': 1})
self.verify_border_routing_counters(br2, {'inbound_unicast': 1, 'outbound_unicast': 1})
self.assertTrue(router2.ping(router1.get_ip6_address(config.ADDRESS_TYPE.OMR)[0]))
self.verify_border_routing_counters(br1, {'inbound_unicast': 1, 'outbound_unicast': 1})
self.verify_border_routing_counters(br2, {'inbound_unicast': 1, 'outbound_unicast': 1})
self.assertGreater(br1.get_border_routing_counters()['ra_rx'], 0)
self.assertGreater(br1.get_border_routing_counters()['ra_tx_success'], 0)
self.assertGreater(br1.get_border_routing_counters()['rs_tx_success'], 0)
def verify_border_routing_counters(self, br, expect_delta):
delta_counters = br.read_border_routing_counters_delta()
self.assertEqual(set(delta_counters.keys()), set(expect_delta.keys()))
for key in delta_counters:
self.assertEqual(delta_counters[key][0], expect_delta[key])
self.assertGreater(delta_counters[key][1], 0)
if __name__ == '__main__':
unittest.main()
|
cd21bc8588a1c879b3abfe49a20c7ae8c52f2c87
|
85ccd32aa73eecf274a937f1fc3b6f4d484b77da
|
/mesonbuild/dependencies/dev.py
|
ec7015103b7ae8147bd2421319ef5b51122702e5
|
[
"Apache-2.0"
] |
permissive
|
mesonbuild/meson
|
48321cf4235dfcc0194fed90ff43a57367592bf7
|
cf5adf0c646474f0259d123fad60ca5ed38ec891
|
refs/heads/master
| 2023-09-01T05:58:50.807952
| 2023-03-17T20:27:37
| 2023-08-31T11:52:41
| 19,784,232
| 5,122
| 1,848
|
Apache-2.0
| 2023-09-14T15:47:23
| 2014-05-14T15:08:16
|
Python
|
UTF-8
|
Python
| false
| false
| 30,147
|
py
|
dev.py
|
# Copyright 2013-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for external dependencies useful for
# development purposes, such as testing, debugging, etc..
from __future__ import annotations
import glob
import os
import re
import pathlib
import shutil
import subprocess
import typing as T
import functools
from mesonbuild.interpreterbase.decorators import FeatureDeprecated
from .. import mesonlib, mlog
from ..environment import get_llvm_tool_names
from ..mesonlib import version_compare, version_compare_many, search_version, stringlistify, extract_as_list
from .base import DependencyException, DependencyMethods, detect_compiler, strip_system_includedirs, strip_system_libdirs, SystemDependency, ExternalDependency, DependencyTypeName
from .cmake import CMakeDependency
from .configtool import ConfigToolDependency
from .detect import packages
from .factory import DependencyFactory
from .misc import threads_factory
from .pkgconfig import PkgConfigDependency
if T.TYPE_CHECKING:
from ..envconfig import MachineInfo
from ..environment import Environment
from ..mesonlib import MachineChoice
from typing_extensions import TypedDict
class JNISystemDependencyKW(TypedDict):
modules: T.List[str]
# FIXME: When dependency() moves to typed Kwargs, this should inherit
# from its TypedDict type.
version: T.Optional[str]
def get_shared_library_suffix(environment: 'Environment', for_machine: MachineChoice) -> str:
"""This is only guaranteed to work for languages that compile to machine
code, not for languages like C# that use a bytecode and always end in .dll
"""
m = environment.machines[for_machine]
if m.is_windows():
return '.dll'
elif m.is_darwin():
return '.dylib'
return '.so'
class GTestDependencySystem(SystemDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:
super().__init__(name, environment, kwargs, language='cpp')
self.main = kwargs.get('main', False)
self.src_dirs = ['/usr/src/gtest/src', '/usr/src/googletest/googletest/src']
if not self._add_sub_dependency(threads_factory(environment, self.for_machine, {})):
self.is_found = False
return
self.detect()
def detect(self) -> None:
gtest_detect = self.clib_compiler.find_library("gtest", self.env, [])
gtest_main_detect = self.clib_compiler.find_library("gtest_main", self.env, [])
if gtest_detect and (not self.main or gtest_main_detect):
self.is_found = True
self.compile_args = []
self.link_args = gtest_detect
if self.main:
self.link_args += gtest_main_detect
self.sources = []
self.prebuilt = True
elif self.detect_srcdir():
self.is_found = True
self.compile_args = ['-I' + d for d in self.src_include_dirs]
self.link_args = []
if self.main:
self.sources = [self.all_src, self.main_src]
else:
self.sources = [self.all_src]
self.prebuilt = False
else:
self.is_found = False
def detect_srcdir(self) -> bool:
for s in self.src_dirs:
if os.path.exists(s):
self.src_dir = s
self.all_src = mesonlib.File.from_absolute_file(
os.path.join(self.src_dir, 'gtest-all.cc'))
self.main_src = mesonlib.File.from_absolute_file(
os.path.join(self.src_dir, 'gtest_main.cc'))
self.src_include_dirs = [os.path.normpath(os.path.join(self.src_dir, '..')),
os.path.normpath(os.path.join(self.src_dir, '../include')),
]
return True
return False
def log_info(self) -> str:
if self.prebuilt:
return 'prebuilt'
else:
return 'building self'
class GTestDependencyPC(PkgConfigDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
assert name == 'gtest'
if kwargs.get('main'):
name = 'gtest_main'
super().__init__(name, environment, kwargs)
class GMockDependencySystem(SystemDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:
super().__init__(name, environment, kwargs, language='cpp')
self.main = kwargs.get('main', False)
if not self._add_sub_dependency(threads_factory(environment, self.for_machine, {})):
self.is_found = False
return
# If we are getting main() from GMock, we definitely
# want to avoid linking in main() from GTest
gtest_kwargs = kwargs.copy()
if self.main:
gtest_kwargs['main'] = False
# GMock without GTest is pretty much useless
# this also mimics the structure given in WrapDB,
# where GMock always pulls in GTest
found = self._add_sub_dependency(gtest_factory(environment, self.for_machine, gtest_kwargs))
if not found:
self.is_found = False
return
# GMock may be a library or just source.
# Work with both.
gmock_detect = self.clib_compiler.find_library("gmock", self.env, [])
gmock_main_detect = self.clib_compiler.find_library("gmock_main", self.env, [])
if gmock_detect and (not self.main or gmock_main_detect):
self.is_found = True
self.link_args += gmock_detect
if self.main:
self.link_args += gmock_main_detect
self.prebuilt = True
return
for d in ['/usr/src/googletest/googlemock/src', '/usr/src/gmock/src', '/usr/src/gmock']:
if os.path.exists(d):
self.is_found = True
# Yes, we need both because there are multiple
# versions of gmock that do different things.
d2 = os.path.normpath(os.path.join(d, '..'))
self.compile_args += ['-I' + d, '-I' + d2, '-I' + os.path.join(d2, 'include')]
all_src = mesonlib.File.from_absolute_file(os.path.join(d, 'gmock-all.cc'))
main_src = mesonlib.File.from_absolute_file(os.path.join(d, 'gmock_main.cc'))
if self.main:
self.sources += [all_src, main_src]
else:
self.sources += [all_src]
self.prebuilt = False
return
self.is_found = False
def log_info(self) -> str:
if self.prebuilt:
return 'prebuilt'
else:
return 'building self'
class GMockDependencyPC(PkgConfigDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
assert name == 'gmock'
if kwargs.get('main'):
name = 'gmock_main'
super().__init__(name, environment, kwargs)
class LLVMDependencyConfigTool(ConfigToolDependency):
"""
LLVM uses a special tool, llvm-config, which has arguments for getting
c args, cxx args, and ldargs as well as version.
"""
tool_name = 'llvm-config'
__cpp_blacklist = {'-DNDEBUG'}
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
self.tools = get_llvm_tool_names('llvm-config')
# Fedora starting with Fedora 30 adds a suffix of the number
# of bits in the isa that llvm targets, for example, on x86_64
# and aarch64 the name will be llvm-config-64, on x86 and arm
# it will be llvm-config-32.
if environment.machines[self.get_for_machine_from_kwargs(kwargs)].is_64_bit:
self.tools.append('llvm-config-64')
else:
self.tools.append('llvm-config-32')
# It's necessary for LLVM <= 3.8 to use the C++ linker. For 3.9 and 4.0
# the C linker works fine if only using the C API.
super().__init__(name, environment, kwargs, language='cpp')
self.provided_modules: T.List[str] = []
self.required_modules: mesonlib.OrderedSet[str] = mesonlib.OrderedSet()
self.module_details: T.List[str] = []
if not self.is_found:
return
self.provided_modules = self.get_config_value(['--components'], 'modules')
modules = stringlistify(extract_as_list(kwargs, 'modules'))
self.check_components(modules)
opt_modules = stringlistify(extract_as_list(kwargs, 'optional_modules'))
self.check_components(opt_modules, required=False)
cargs = mesonlib.OrderedSet(self.get_config_value(['--cppflags'], 'compile_args'))
self.compile_args = list(cargs.difference(self.__cpp_blacklist))
if version_compare(self.version, '>= 3.9'):
self._set_new_link_args(environment)
else:
self._set_old_link_args()
self.link_args = strip_system_libdirs(environment, self.for_machine, self.link_args)
self.link_args = self.__fix_bogus_link_args(self.link_args)
if not self._add_sub_dependency(threads_factory(environment, self.for_machine, {})):
self.is_found = False
return
def __fix_bogus_link_args(self, args: T.List[str]) -> T.List[str]:
"""This function attempts to fix bogus link arguments that llvm-config
generates.
Currently it works around the following:
- FreeBSD: when statically linking -l/usr/lib/libexecinfo.so will
be generated, strip the -l in cases like this.
- Windows: We may get -LIBPATH:... which is later interpreted as
"-L IBPATH:...", if we're using an msvc like compilers convert
that to "/LIBPATH", otherwise to "-L ..."
"""
new_args = []
for arg in args:
if arg.startswith('-l') and arg.endswith('.so'):
new_args.append(arg.lstrip('-l'))
elif arg.startswith('-LIBPATH:'):
cpp = self.env.coredata.compilers[self.for_machine]['cpp']
new_args.extend(cpp.get_linker_search_args(arg.lstrip('-LIBPATH:')))
else:
new_args.append(arg)
return new_args
def __check_libfiles(self, shared: bool) -> None:
"""Use llvm-config's --libfiles to check if libraries exist."""
mode = '--link-shared' if shared else '--link-static'
# Set self.required to true to force an exception in get_config_value
# if the returncode != 0
restore = self.required
self.required = True
try:
# It doesn't matter what the stage is, the caller needs to catch
# the exception anyway.
self.link_args = self.get_config_value(['--libfiles', mode], '')
finally:
self.required = restore
def _set_new_link_args(self, environment: 'Environment') -> None:
"""How to set linker args for LLVM versions >= 3.9"""
try:
mode = self.get_config_value(['--shared-mode'], 'link_args')[0]
except IndexError:
mlog.debug('llvm-config --shared-mode returned an error')
self.is_found = False
return
if not self.static and mode == 'static':
# If llvm is configured with LLVM_BUILD_LLVM_DYLIB but not with
# LLVM_LINK_LLVM_DYLIB and not LLVM_BUILD_SHARED_LIBS (which
# upstream doesn't recommend using), then llvm-config will lie to
# you about how to do shared-linking. It wants to link to a a bunch
# of individual shared libs (which don't exist because llvm wasn't
# built with LLVM_BUILD_SHARED_LIBS.
#
# Therefore, we'll try to get the libfiles, if the return code is 0
# or we get an empty list, then we'll try to build a working
# configuration by hand.
try:
self.__check_libfiles(True)
except DependencyException:
lib_ext = get_shared_library_suffix(environment, self.for_machine)
libdir = self.get_config_value(['--libdir'], 'link_args')[0]
# Sort for reproducibility
matches = sorted(glob.iglob(os.path.join(libdir, f'libLLVM*{lib_ext}')))
if not matches:
if self.required:
raise
self.is_found = False
return
self.link_args = self.get_config_value(['--ldflags'], 'link_args')
libname = os.path.basename(matches[0]).rstrip(lib_ext).lstrip('lib')
self.link_args.append(f'-l{libname}')
return
elif self.static and mode == 'shared':
# If, however LLVM_BUILD_SHARED_LIBS is true # (*cough* gentoo *cough*)
# then this is correct. Building with LLVM_BUILD_SHARED_LIBS has a side
# effect, it stops the generation of static archives. Therefore we need
# to check for that and error out on static if this is the case
try:
self.__check_libfiles(False)
except DependencyException:
if self.required:
raise
self.is_found = False
return
link_args = ['--link-static', '--system-libs'] if self.static else ['--link-shared']
self.link_args = self.get_config_value(
['--libs', '--ldflags'] + link_args + list(self.required_modules),
'link_args')
def _set_old_link_args(self) -> None:
"""Setting linker args for older versions of llvm.
Old versions of LLVM bring an extra level of insanity with them.
llvm-config will provide the correct arguments for static linking, but
not for shared-linnking, we have to figure those out ourselves, because
of course we do.
"""
if self.static:
self.link_args = self.get_config_value(
['--libs', '--ldflags', '--system-libs'] + list(self.required_modules),
'link_args')
else:
# llvm-config will provide arguments for static linking, so we get
# to figure out for ourselves what to link with. We'll do that by
# checking in the directory provided by --libdir for a library
# called libLLVM-<ver>.(so|dylib|dll)
libdir = self.get_config_value(['--libdir'], 'link_args')[0]
expected_name = f'libLLVM-{self.version}'
re_name = re.compile(fr'{expected_name}.(so|dll|dylib)$')
for file_ in os.listdir(libdir):
if re_name.match(file_):
self.link_args = [f'-L{libdir}',
'-l{}'.format(os.path.splitext(file_.lstrip('lib'))[0])]
break
else:
raise DependencyException(
'Could not find a dynamically linkable library for LLVM.')
def check_components(self, modules: T.List[str], required: bool = True) -> None:
"""Check for llvm components (modules in meson terms).
The required option is whether the module is required, not whether LLVM
is required.
"""
for mod in sorted(set(modules)):
status = ''
if mod not in self.provided_modules:
if required:
self.is_found = False
if self.required:
raise DependencyException(
f'Could not find required LLVM Component: {mod}')
status = '(missing)'
else:
status = '(missing but optional)'
else:
self.required_modules.add(mod)
self.module_details.append(mod + status)
def log_details(self) -> str:
if self.module_details:
return 'modules: ' + ', '.join(self.module_details)
return ''
class LLVMDependencyCMake(CMakeDependency):
def __init__(self, name: str, env: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:
self.llvm_modules = stringlistify(extract_as_list(kwargs, 'modules'))
self.llvm_opt_modules = stringlistify(extract_as_list(kwargs, 'optional_modules'))
compilers = None
if kwargs.get('native', False):
compilers = env.coredata.compilers.build
else:
compilers = env.coredata.compilers.host
if not compilers or not all(x in compilers for x in ('c', 'cpp')):
# Initialize basic variables
ExternalDependency.__init__(self, DependencyTypeName('cmake'), env, kwargs)
# Initialize CMake specific variables
self.found_modules: T.List[str] = []
self.name = name
# Warn and return
mlog.warning('The LLVM dependency was not found via CMake since both a C and C++ compiler are required.')
return
super().__init__(name, env, kwargs, language='cpp', force_use_global_compilers=True)
if self.traceparser is None:
return
if not self.is_found:
return
#CMake will return not found due to not defined LLVM_DYLIB_COMPONENTS
if not self.static and version_compare(self.version, '< 7.0') and self.llvm_modules:
mlog.warning('Before version 7.0 cmake does not export modules for dynamic linking, cannot check required modules')
return
# Extract extra include directories and definitions
inc_dirs = self.traceparser.get_cmake_var('PACKAGE_INCLUDE_DIRS')
defs = self.traceparser.get_cmake_var('PACKAGE_DEFINITIONS')
# LLVM explicitly uses space-separated variables rather than semicolon lists
if len(defs) == 1:
defs = defs[0].split(' ')
temp = ['-I' + x for x in inc_dirs] + defs
self.compile_args += [x for x in temp if x not in self.compile_args]
self.compile_args = strip_system_includedirs(env, self.for_machine, self.compile_args)
if not self._add_sub_dependency(threads_factory(env, self.for_machine, {})):
self.is_found = False
return
def _main_cmake_file(self) -> str:
# Use a custom CMakeLists.txt for LLVM
return 'CMakeListsLLVM.txt'
# Check version in CMake to return exact version as config tool (latest allowed)
# It is safe to add .0 to latest argument, it will discarded if we use search_version
def llvm_cmake_versions(self) -> T.List[str]:
def ver_from_suf(req: str) -> str:
return search_version(req.strip('-')+'.0')
def version_sorter(a: str, b: str) -> int:
if version_compare(a, "="+b):
return 0
if version_compare(a, "<"+b):
return 1
return -1
llvm_requested_versions = [ver_from_suf(x) for x in get_llvm_tool_names('') if version_compare(ver_from_suf(x), '>=0')]
if self.version_reqs:
llvm_requested_versions = [ver_from_suf(x) for x in get_llvm_tool_names('') if version_compare_many(ver_from_suf(x), self.version_reqs)]
# CMake sorting before 3.18 is incorrect, sort it here instead
return sorted(llvm_requested_versions, key=functools.cmp_to_key(version_sorter))
# Split required and optional modules to distinguish it in CMake
def _extra_cmake_opts(self) -> T.List[str]:
return ['-DLLVM_MESON_REQUIRED_MODULES={}'.format(';'.join(self.llvm_modules)),
'-DLLVM_MESON_OPTIONAL_MODULES={}'.format(';'.join(self.llvm_opt_modules)),
'-DLLVM_MESON_PACKAGE_NAMES={}'.format(';'.join(get_llvm_tool_names(self.name))),
'-DLLVM_MESON_VERSIONS={}'.format(';'.join(self.llvm_cmake_versions())),
'-DLLVM_MESON_DYLIB={}'.format('OFF' if self.static else 'ON')]
def _map_module_list(self, modules: T.List[T.Tuple[str, bool]], components: T.List[T.Tuple[str, bool]]) -> T.List[T.Tuple[str, bool]]:
res = []
for mod, required in modules:
cm_targets = self.traceparser.get_cmake_var(f'MESON_LLVM_TARGETS_{mod}')
if not cm_targets:
if required:
raise self._gen_exception(f'LLVM module {mod} was not found')
else:
mlog.warning('Optional LLVM module', mlog.bold(mod), 'was not found', fatal=False)
continue
for i in cm_targets:
res += [(i, required)]
return res
def _original_module_name(self, module: str) -> str:
orig_name = self.traceparser.get_cmake_var(f'MESON_TARGET_TO_LLVM_{module}')
if orig_name:
return orig_name[0]
return module
class ValgrindDependency(PkgConfigDependency):
'''
Consumers of Valgrind usually only need the compile args and do not want to
link to its (static) libraries.
'''
def __init__(self, env: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__('valgrind', env, kwargs)
def get_link_args(self, language: T.Optional[str] = None, raw: bool = False) -> T.List[str]:
return []
packages['valgrind'] = ValgrindDependency
class ZlibSystemDependency(SystemDependency):
def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
super().__init__(name, environment, kwargs)
from ..compilers.c import AppleClangCCompiler
from ..compilers.cpp import AppleClangCPPCompiler
m = self.env.machines[self.for_machine]
# I'm not sure this is entirely correct. What if we're cross compiling
# from something to macOS?
if ((m.is_darwin() and isinstance(self.clib_compiler, (AppleClangCCompiler, AppleClangCPPCompiler))) or
m.is_freebsd() or m.is_dragonflybsd() or m.is_android()):
# No need to set includes,
# on macos xcode/clang will do that for us.
# on freebsd zlib.h is in /usr/include
self.is_found = True
self.link_args = ['-lz']
else:
if self.clib_compiler.get_argument_syntax() == 'msvc':
libs = ['zlib1', 'zlib']
else:
libs = ['z']
for lib in libs:
l = self.clib_compiler.find_library(lib, environment, [])
h = self.clib_compiler.has_header('zlib.h', '', environment, dependencies=[self])
if l and h[0]:
self.is_found = True
self.link_args = l
break
else:
return
v, _ = self.clib_compiler.get_define('ZLIB_VERSION', '#include <zlib.h>', self.env, [], [self])
self.version = v.strip('"')
class JNISystemDependency(SystemDependency):
def __init__(self, environment: 'Environment', kwargs: JNISystemDependencyKW):
super().__init__('jni', environment, T.cast('T.Dict[str, T.Any]', kwargs))
self.feature_since = ('0.62.0', '')
m = self.env.machines[self.for_machine]
if 'java' not in environment.coredata.compilers[self.for_machine]:
detect_compiler(self.name, environment, self.for_machine, 'java')
self.javac = environment.coredata.compilers[self.for_machine]['java']
self.version = self.javac.version
modules: T.List[str] = mesonlib.listify(kwargs.get('modules', []))
for module in modules:
if module not in {'jvm', 'awt'}:
msg = f'Unknown JNI module ({module})'
if self.required:
mlog.error(msg)
else:
mlog.debug(msg)
self.is_found = False
return
if 'version' in kwargs and not version_compare(self.version, kwargs['version']):
mlog.error(f'Incorrect JDK version found ({self.version}), wanted {kwargs["version"]}')
self.is_found = False
return
self.java_home = environment.properties[self.for_machine].get_java_home()
if not self.java_home:
self.java_home = pathlib.Path(shutil.which(self.javac.exelist[0])).resolve().parents[1]
if m.is_darwin():
problem_java_prefix = pathlib.Path('/System/Library/Frameworks/JavaVM.framework/Versions')
if problem_java_prefix in self.java_home.parents:
res = subprocess.run(['/usr/libexec/java_home', '--failfast', '--arch', m.cpu_family],
stdout=subprocess.PIPE)
if res.returncode != 0:
msg = 'JAVA_HOME could not be discovered on the system. Please set it explicitly.'
if self.required:
mlog.error(msg)
else:
mlog.debug(msg)
self.is_found = False
return
self.java_home = pathlib.Path(res.stdout.decode().strip())
platform_include_dir = self.__machine_info_to_platform_include_dir(m)
if platform_include_dir is None:
mlog.error("Could not find a JDK platform include directory for your OS, please open an issue or provide a pull request.")
self.is_found = False
return
java_home_include = self.java_home / 'include'
self.compile_args.append(f'-I{java_home_include}')
self.compile_args.append(f'-I{java_home_include / platform_include_dir}')
if modules:
if m.is_windows():
java_home_lib = self.java_home / 'lib'
java_home_lib_server = java_home_lib
else:
if version_compare(self.version, '<= 1.8.0'):
java_home_lib = self.java_home / 'jre' / 'lib' / self.__cpu_translate(m.cpu_family)
else:
java_home_lib = self.java_home / 'lib'
java_home_lib_server = java_home_lib / 'server'
if 'jvm' in modules:
jvm = self.clib_compiler.find_library('jvm', environment, extra_dirs=[str(java_home_lib_server)])
if jvm is None:
mlog.debug('jvm library not found.')
self.is_found = False
else:
self.link_args.extend(jvm)
if 'awt' in modules:
jawt = self.clib_compiler.find_library('jawt', environment, extra_dirs=[str(java_home_lib)])
if jawt is None:
mlog.debug('jawt library not found.')
self.is_found = False
else:
self.link_args.extend(jawt)
self.is_found = True
@staticmethod
def __cpu_translate(cpu: str) -> str:
'''
The JDK and Meson have a disagreement here, so translate it over. In the event more
translation needs to be done, add to following dict.
'''
java_cpus = {
'x86_64': 'amd64',
}
return java_cpus.get(cpu, cpu)
@staticmethod
def __machine_info_to_platform_include_dir(m: 'MachineInfo') -> T.Optional[str]:
'''Translates the machine information to the platform-dependent include directory
When inspecting a JDK release tarball or $JAVA_HOME, inside the `include/` directory is a
platform-dependent directory that must be on the target's include path in addition to the
parent `include/` directory.
'''
if m.is_linux():
return 'linux'
elif m.is_windows():
return 'win32'
elif m.is_darwin():
return 'darwin'
elif m.is_sunos():
return 'solaris'
elif m.is_freebsd():
return 'freebsd'
elif m.is_netbsd():
return 'netbsd'
elif m.is_openbsd():
return 'openbsd'
elif m.is_dragonflybsd():
return 'dragonfly'
return None
packages['jni'] = JNISystemDependency
class JDKSystemDependency(JNISystemDependency):
def __init__(self, environment: 'Environment', kwargs: JNISystemDependencyKW):
super().__init__(environment, kwargs)
self.feature_since = ('0.59.0', '')
self.featurechecks.append(FeatureDeprecated(
'jdk system dependency',
'0.62.0',
'Use the jni system dependency instead'
))
packages['jdk'] = JDKSystemDependency
packages['llvm'] = llvm_factory = DependencyFactory(
'LLVM',
[DependencyMethods.CMAKE, DependencyMethods.CONFIG_TOOL],
cmake_class=LLVMDependencyCMake,
configtool_class=LLVMDependencyConfigTool,
)
packages['gtest'] = gtest_factory = DependencyFactory(
'gtest',
[DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],
pkgconfig_class=GTestDependencyPC,
system_class=GTestDependencySystem,
)
packages['gmock'] = gmock_factory = DependencyFactory(
'gmock',
[DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],
pkgconfig_class=GMockDependencyPC,
system_class=GMockDependencySystem,
)
packages['zlib'] = zlib_factory = DependencyFactory(
'zlib',
[DependencyMethods.PKGCONFIG, DependencyMethods.CMAKE, DependencyMethods.SYSTEM],
cmake_name='ZLIB',
system_class=ZlibSystemDependency,
)
|
c7e7d0447318172a5c68e3614462fe57db27dc6e
|
749af8e81d5ccd2d8714a34434a9c77772df551b
|
/statsmodels/stats/libqsturng/make_tbls.py
|
33bc88c05f3d4d0818495bc10b5a5c76e134d6cc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
statsmodels/statsmodels
|
98ca67192c08bcc611ed3a75edaded2c7181ab98
|
01b19d7d111b29c183f620ff0a949ef6391ff8ee
|
refs/heads/main
| 2023-09-05T13:05:49.497076
| 2023-09-01T10:54:50
| 2023-09-01T10:54:50
| 1,885,237
| 8,666
| 3,023
|
BSD-3-Clause
| 2023-09-13T17:51:48
| 2011-06-12T17:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 63,583
|
py
|
make_tbls.py
|
"""this script builds the T table and A table for the upper
quantile stundentized range algorithm"""
from statsmodels.compat.python import lrange, lmap
import math
import scipy.stats
from scipy.optimize import leastsq
import numpy as np
from numpy.random import random
# The values for p in [.5, .75, .9, .95, .975, .99, .995, .999]
# were pulled from:
# http://www.stata.com/stb/stb46/dm64/sturng.pdf
#
# Values for p in [.1, .675, .8, .85] were calculated using R's qtukey function
#
# the table was programmed by Gleason and extends Harter's (1960) table
# using the Copenhaver & Holland (1988) algorithm (C&H). Gleason found
# that the 4th significant digit of the C&H differed from Harter's
# tables on about 20% of the values. Gleason states this was do to
# consevative rounding by Harter. In those event the table reflects
# Harter's orginal approximations.
q0100 = """\
2 0.2010022 0.6351172 0.9504689 1.179321 1.354691 1.495126 1.611354 1.709984 1.795325 1.87032 1.937057 1.997068 2.051505 2.101256 2.147016 2.189342 2.228683 2.265408 2.299823 2.558612 2.729718 2.95625 3.184742938 3.398609188
3 0.193179 0.6294481 0.9564746 1.19723 1.383028 1.532369 1.656225 1.761451 1.852559 1.93265 2.003933 2.068034 2.126178 2.179312 2.228177 2.273367 2.315364 2.354561 2.391287 2.667213 2.849389 3.009265469 3.237758406 3.451624656
4 0.1892648 0.6266441 0.9606115 1.2089 1.401557 1.55691 1.686009 1.795829 1.890994 1.974697 2.049222 2.116253 2.177065 2.232641 2.283754 2.331023 2.37495 2.415949 2.454361 2.742846 2.933173 3.062280938 3.290773875 3.504640125
5 0.1869239 0.6249713 0.963532 1.217021 1.414548 1.574255 1.707205 1.820437 1.91864 2.005066 2.082048 2.151312 2.214162 2.271609 2.324449 2.37332 2.418737 2.461128 2.500844 2.7991 2.9958 3.115296406 3.343789344 3.557655594
6 0.185369 0.6238602 0.9656833 1.22298 1.424151 1.587166 1.723076 1.838955 1.939532 2.028098 2.107021 2.178053 2.242524 2.301465 2.355686 2.40584 2.452454 2.495964 2.536731 2.842892 3.027993254 3.168311875 3.396804813 3.610671063
7 0.1842618 0.6230685 0.9673274 1.227534 1.431536 1.597154 1.735417 1.853415 1.955904 2.046203 2.126704 2.19918 2.264979 2.325144 2.380502 2.431713 2.479315 2.52375 2.565387 2.878126 3.060186557 3.221327344 3.449820281 3.663686531
8 0.1834338 0.6224757 0.9686225 1.231126 1.437392 1.605113 1.745294 1.86503 1.969097 2.060832 2.142645 2.216325 2.283234 2.344427 2.400739 2.45284 2.501275 2.546491 2.588864 2.861237 3.092379859 3.274342813 3.50283575 3.716702
9 0.1827912 0.6220153 0.969668 1.23403 1.442149 1.611608 1.753382 1.874572 1.979964 2.07291 2.155833 2.230535 2.298388 2.360458 2.417585 2.470448 2.519597 2.565484 2.608488 2.871492 3.1631665 3.362394 3.613696 3.7452106
10 0.1822783 0.6216474 0.9705293 1.236426 1.446091 1.617009 1.76013 1.882554 1.989077 2.083059 2.166935 2.242517 2.311185 2.374011 2.431845 2.485368 2.535137 2.581607 2.625162 2.898717 3.1760535 3.45339 3.6807265 3.7737192
11 0.1818593 0.6213467 0.9712507 1.238437 1.449411 1.621571 1.765847 1.889333 1.996831 2.091711 2.176415 2.252763 2.322141 2.38563 2.444082 2.498185 2.548497 2.59548 2.639519 2.923558 3.19971275 3.4758675 3.70202225 3.8022278
12 0.1815106 0.6210962 0.9718637 1.240149 1.452244 1.625478 1.770753 1.895163 2.003512 2.099178 2.184609 2.26163 2.331635 2.395707 2.454706 2.509321 2.560115 2.607554 2.652022 2.94649 3.2224175 3.498345 3.7242725 3.8307364
13 0.181216 0.6208845 0.9723908 1.241623 1.454692 1.62886 1.77501 1.900231 2.009331 2.10569 2.191763 2.269382 2.339943 2.404535 2.46402 2.519093 2.570318 2.618162 2.663015 2.967868 3.242456 3.517044 3.741632 3.859245
14 0.1809637 0.620703 0.972849 1.242906 1.456827 1.631817 1.778739 1.904678 2.014444 2.11142 2.198067 2.276219 2.347278 2.412335 2.472257 2.52774 2.579352 2.627563 2.672763 2.987963 3.245505708 3.503048417 3.748851667 3.878849
15 0.1807453 0.6205458 0.9732508 1.244033 1.458706 1.634424 1.782034 1.908613 2.018974 2.116503 2.203664 2.282296 2.353802 2.41928 2.479596 2.53545 2.587413 2.635954 2.681468 3.006982 3.275778125 3.489052833 3.758247778 3.896564
16 0.1805544 0.6204084 0.973606 1.24503 1.460373 1.636741 1.784965 1.912119 2.023015 2.121043 2.208668 2.287733 2.359646 2.425503 2.486177 2.542369 2.59465 2.643493 2.689292 3.025091 3.281966688 3.49479125 3.763291 3.9179256
17 0.1803861 0.6202871 0.9739223 1.245919 1.461861 1.638813 1.78759 1.915263 2.026643 2.125123 2.21317 2.292628 2.36491 2.431114 2.492114 2.548614 2.601186 2.650304 2.696365 3.032426 3.272395 3.50687125 3.77704 3.9392872
18 0.1802366 0.6201793 0.9742057 1.246717 1.463198 1.640677 1.789955 1.918099 2.029919 2.128809 2.21724 2.297059 2.369678 2.436199 2.497498 2.55428 2.607118 2.656491 2.702792 3.039093 3.282026 3.586406 3.789645 3.9606488
19 0.1801029 0.620083 0.9744612 1.247436 1.464405 1.642363 1.792096 1.920669 2.032891 2.132158 2.22094 2.301089 2.374017 2.44083 2.502404 2.559444 2.612529 2.662134 2.708658 3.045182 3.296680985 3.603883649 3.801248 3.974888443
20 0.1799827 0.6199962 0.9746925 1.248088 1.465502 1.643895 1.794045 1.923011 2.035601 2.135212 2.224318 2.30477 2.377983 2.445065 2.506892 2.564173 2.617485 2.667306 2.714034 3.050762 3.311335993 3.621361325 3.788047375 3.989130221
24 0.1796023 0.6197217 0.9754345 1.250184 1.469033 1.648844 1.800353 1.930604 2.044404 2.145153 2.235327 2.316784 2.390945 2.45892 2.521592 2.579673 2.633745 2.684289 2.731705 3.089271 3.325991 3.638839 3.77484675 4.003372
30 0.1792227 0.6194474 0.9761909 1.252326 1.47266 1.653946 1.806877 1.938484 2.053567 2.155527 2.246844 2.329381 2.404562 2.473504 2.537093 2.596046 2.650947 2.702281 2.750452 3.088623 3.365526818 3.632653344 3.810852875 4.0480485
40 0.1788439 0.6191733 0.976962 1.254517 1.476384 1.659208 1.813634 1.946672 2.063118 2.166372 2.258917 2.342618 2.418905 2.488898 2.553488 2.613394 2.669206 2.731569 2.770415 3.109261 3.405062637 3.629560516 3.846859 4.092725
60 0.1784658 0.6188994 0.9777482 1.256759 1.480212 1.664639 1.820636 1.955191 2.07309 2.177731 2.271599 2.356562 2.434053 2.505196 2.570884 2.631843 2.688663 2.745483 2.802303 3.159123 3.406221516 3.626467688 3.90118925 4.1497775
120 0.1780885 0.6186256 0.9785495 1.259052 1.484147 1.67025 1.827902 1.964066 2.083518 2.189653 2.284954 2.371292 2.450102 2.522511 2.589417 2.651546 2.71243375 2.763748 2.8156585 3.201588 3.459897 3.67055425 3.9555195 4.20683
1e38 0.177712 0.6183521 0.9793662 1.261398 1.488195 1.676051 1.835449 1.973327 2.094446 2.202195 2.299057 2.386902 2.467168 2.540983 2.609248 2.677513 2.745778 2.787396 2.829014 3.236691 3.487830797 3.721309063 4.01874075 4.279424"""
q0300 = """\
2 0.6289521 1.248281 1.638496 1.916298 2.129504 2.301246 2.444313 2.566465 2.672747 2.766604 2.850494 2.926224 2.995161 3.05836 3.116655 3.170712 3.221076 3.268192 3.312433 3.647666 3.871606 4.170521 4.372227 4.52341
3 0.5999117 1.209786 1.598235 1.875707 2.088948 2.260822 2.404037 2.52633 2.632732 2.726691 2.810665 2.886463 2.955453 3.018695 3.077022 3.131103 3.181483 3.228609 3.272854 3.607961 3.831649 4.130021 4.331231 4.48198
4 0.5857155 1.19124 1.579749 1.858059 2.072245 2.245015 2.389043 2.512062 2.619115 2.713659 2.798159 2.874434 2.943859 3.007497 3.066189 3.120607 3.171298 3.218713 3.263228 3.600301 3.825208 4.125075 4.327208 4.478602
5 0.5773226 1.18033 1.569213 1.84843 2.063579 2.237255 2.382108 2.505872 2.613597 2.708749 2.793803 2.870583 2.94047 3.004536 3.063622 3.118406 3.169439 3.217173 3.261987 3.601296 3.827646 4.129353 4.332665 4.48491
6 0.5717854 1.173145 1.562427 1.842442 2.058433 2.232905 2.378487 2.502915 2.611243 2.706943 2.792499 2.869739 2.940051 3.004509 3.06396 3.119085 3.170435 3.218468 3.263562 3.604991 3.832733 4.136241 4.340726 4.493824
7 0.5678608 1.168056 1.5577 1.838389 2.055092 2.230242 2.376451 2.501451 2.610302 2.706482 2.792477 2.870123 2.94081 3.005616 3.065391 3.120818 3.172464 3.220752 3.266098 3.609448 3.838467 4.143645 4.349225 4.503125
8 0.5649349 1.164263 1.55422 1.835477 2.052781 2.228508 2.375251 2.500743 2.610046 2.706641 2.793019 2.871019 2.942034 3.007146 3.067207 3.122901 3.174787 3.223323 3.268893 3.61396 3.844133 4.150832 4.357416 4.512054
9 0.56267 1.161326 1.551554 1.833289 2.051105 2.227323 2.374526 2.500442 2.610136 2.707092 2.793803 2.872112 2.943416 3.008796 3.069108 3.125039 3.177147 3.225893 3.27166 3.618262 3.849473 4.157551 4.365051 4.520364
10 0.5608651 1.158985 1.549445 1.831589 2.049842 2.226484 2.374084 2.500369 2.610403 2.707674 2.794677 2.873258 2.944815 3.010432 3.070966 3.127106 3.179411 3.228343 3.274287 3.62226 3.854408 4.16298725 4.372072 4.51335807
11 0.5593931 1.157076 1.547737 1.830233 2.048863 2.225872 2.373818 2.500424 2.610757 2.708305 2.795566 2.874386 2.946167 3.011994 3.072726 3.129052 3.181532 3.23063 3.276732 3.625936 3.8583805 4.1684235 4.378495 4.527370035
12 0.5581697 1.155489 1.546325 1.829125 2.048085 2.225415 2.373664 2.500554 2.61115 2.708943 2.796432 2.875466 2.947446 3.01346 3.074368 3.13086 3.183497 3.232744 3.278987 3.629302 3.862353 4.17385975 4.384365 4.541382
13 0.557137 1.154148 1.545138 1.828206 2.047454 2.225068 2.373584 2.500725 2.611555 2.709566 2.797258 2.876482 2.948641 3.014823 3.075889 3.13253 3.185309 3.23469 3.28106 3.63238 3.8663255 4.179296 4.3895105 4.547231
14 0.5562535 1.153001 1.544127 1.82743 2.046934 2.224798 2.373553 2.500918 2.611956 2.710162 2.798038 2.877433 2.949752 3.016085 3.077294 3.134069 3.186976 3.236478 3.282963 3.635197 3.870298 4.183625 4.394656 4.552594
15 0.5554892 1.152009 1.543255 1.826767 2.046499 2.224587 2.373555 2.50112 2.612347 2.710728 2.798768 2.878317 2.950781 3.017251 3.078589 3.135487 3.188509 3.238121 3.284712 3.637674 3.8733385 4.187597 4.399177 4.557525
16 0.5548214 1.151142 1.542495 1.826194 2.04613 2.224419 2.373579 2.501325 2.612721 2.711261 2.799449 2.879139 2.951735 3.018329 3.079784 3.136793 3.189921 3.239634 3.28632 3.640151 3.876379 4.191252 4.40334 4.56207
17 0.554233 1.150377 1.541828 1.825694 2.045813 2.224284 2.373618 2.501527 2.613077 2.711761 2.800085 2.879902 2.952618 3.019325 3.080888 3.137998 3.191223 3.241028 3.287802 3.642251 3.878959 4.1944965 4.407184 4.56627
18 0.5537107 1.149699 1.541236 1.825254 2.04554 2.224175 2.373667 2.501725 2.613414 2.71223 2.800678 2.880611 2.953437 3.020248 3.081909 3.139113 3.192426 3.242315 3.28917 3.644351 3.881539 4.197741 4.410743 4.570162
19 0.5532438 1.149092 1.540709 1.824864 2.045301 2.224086 2.373722 2.501915 2.613733 2.71267 2.801231 2.881271 2.954198 3.021104 3.082855 3.140145 3.19354 3.243507 3.290436 3.641213698 3.877674372 4.200632 4.414046 4.573776
20 0.552824 1.148546 1.540235 1.824516 2.045091 2.224013 2.37378 2.502099 2.614034 2.713082 2.801747 2.881885 2.954905 3.0219 3.083734 3.141103 3.194574 3.244613 3.291611 3.647488349 3.885403686 4.203319 4.4207865 4.577142
24 0.5514973 1.146821 1.538745 1.823433 2.044456 2.223827 2.374026 2.502754 2.615077 2.71449 2.8035 2.883965 2.957293 3.02458 3.086693 3.144327 3.198049 3.248329 3.295558 3.653763 3.893133 4.212396 4.427527 4.588561
30 0.5501747 1.1451 1.537267 1.822378 2.04387 2.223711 2.374365 2.503527 2.616262 2.716064 2.805443 2.886257 2.959917 3.027519 3.089932 3.147852 3.201849 3.252391 3.29987 3.646771403 3.901009 4.201241112 4.423813037 4.57425581
40 0.5488561 1.143384 1.535803 1.821351 2.043333 2.223667 2.3748 2.504422 2.617594 2.717812 2.807585 2.888774 2.962791 3.030734 3.093472 3.151703 3.205997 3.256824 3.304577 3.660754702 3.910106 4.223551056 4.431241019 4.602866405
60 0.5475416 1.141671 1.53435 1.820353 2.042846 2.223698 2.375337 2.505445 2.619082 2.719743 2.809939 2.891531 2.965932 3.034243 3.097332 3.1559 3.210518 3.261657 3.30971 3.674738 3.919203 4.245861 4.438669 4.631477
120 0.5462314 1.139963 1.532911 1.819385 2.04241 2.223806 2.375978 2.506602 2.620733 2.721868 2.812516 2.894541 2.969356 3.038064 3.101534 3.160468 3.215439 3.39400025 3.5725615 3.75112275 3.929684 4.256342 4.44915 4.641958
1e38 0.5449254 1.138259 1.531485 1.818447 2.042028 2.223993 2.376728 2.507898 2.622556 2.724195 2.815328 2.897817 2.973079 3.042215 3.106097 3.165428 3.220399 3.39896025 3.5775215 3.75608275 3.934644 4.261302 4.45411 4.646918"""
q0500 = """\
2 1.155 1.908 2.377 2.713 2.973 3.184 3.361 3.513 3.645 3.762 3.867 3.963 4.049 4.129 4.203 4.271 4.335 4.394 4.451 4.878 5.165 5.549 5.810 6.006
3 1.082 1.791 2.230 2.545 2.789 2.986 3.152 3.294 3.418 3.528 3.626 3.715 3.796 3.871 3.940 4.004 4.064 4.120 4.172 4.573 4.842 5.202 5.447 5.630
4 1.048 1.736 2.163 2.468 2.704 2.895 3.055 3.193 3.313 3.419 3.515 3.601 3.680 3.752 3.819 3.881 3.939 3.993 4.044 4.432 4.693 5.043 5.279 5.457
5 1.028 1.705 2.124 2.423 2.655 2.843 3.000 3.135 3.253 3.357 3.451 3.535 3.613 3.684 3.749 3.810 3.867 3.920 3.970 4.351 4.608 4.951 5.184 5.358
6 1.015 1.684 2.098 2.394 2.623 2.808 2.964 3.097 3.213 3.317 3.409 3.493 3.569 3.639 3.704 3.764 3.820 3.873 3.922 4.299 4.552 4.891 5.121 5.294
7 1.006 1.670 2.080 2.374 2.601 2.784 2.938 3.070 3.186 3.288 3.380 3.463 3.538 3.608 3.672 3.732 3.787 3.840 3.889 4.262 4.513 4.849 5.077 5.249
8 .9990 1.659 2.067 2.359 2.584 2.767 2.919 3.051 3.165 3.267 3.358 3.440 3.515 3.584 3.648 3.708 3.763 3.815 3.863 4.234 4.484 4.818 5.045 5.215
9 .9938 1.651 2.057 2.347 2.571 2.753 2.905 3.035 3.149 3.250 3.341 3.423 3.498 3.566 3.630 3.689 3.744 3.796 3.844 4.213 4.461 4.794 5.020 5.189
10 .9897 1.645 2.049 2.338 2.561 2.742 2.893 3.023 3.137 3.237 3.328 3.409 3.484 3.552 3.615 3.674 3.729 3.780 3.829 4.196 4.443 4.775 5.000 5.168
11 .9863 1.639 2.042 2.330 2.553 2.733 2.884 3.013 3.127 3.227 3.317 3.398 3.472 3.540 3.603 3.662 3.717 3.768 3.816 4.182 4.429 4.759 4.983 5.152
12 .9836 1.635 2.037 2.324 2.546 2.726 2.876 3.005 3.118 3.218 3.308 3.389 3.463 3.531 3.594 3.652 3.706 3.757 3.805 4.171 4.417 4.746 4.970 5.138
13 .9812 1.631 2.032 2.319 2.540 2.719 2.869 2.998 3.111 3.210 3.300 3.381 3.455 3.522 3.585 3.643 3.698 3.749 3.796 4.161 4.406 4.735 4.958 5.126
14 .9792 1.628 2.028 2.314 2.535 2.714 2.864 2.992 3.105 3.204 3.293 3.374 3.448 3.515 3.578 3.636 3.690 3.741 3.789 4.153 4.397 4.726 4.948 5.115
15 .9775 1.625 2.025 2.310 2.531 2.709 2.859 2.987 3.099 3.199 3.288 3.368 3.442 3.509 3.572 3.630 3.684 3.735 3.782 4.145 4.390 4.718 4.940 5.107
16 .9760 1.623 2.022 2.307 2.527 2.705 2.855 2.983 3.095 3.194 3.283 3.363 3.436 3.504 3.566 3.624 3.678 3.729 3.776 4.139 4.383 4.710 4.932 5.099
17 .9747 1.621 2.019 2.304 2.524 2.702 2.851 2.979 3.090 3.189 3.278 3.359 3.432 3.499 3.561 3.619 3.673 3.724 3.771 4.133 4.377 4.704 4.926 5.092
18 .9735 1.619 2.017 2.301 2.521 2.699 2.848 2.975 3.087 3.186 3.274 3.354 3.428 3.495 3.557 3.615 3.669 3.719 3.767 4.128 4.372 4.698 4.920 5.086
19 .9724 1.617 2.015 2.299 2.518 2.696 2.845 2.972 3.084 3.182 3.271 3.351 3.424 3.491 3.553 3.611 3.665 3.715 3.763 4.124 4.367 4.693 4.914 5.080
20 .9715 1.616 2.013 2.297 2.516 2.693 2.842 2.969 3.081 3.179 3.268 3.348 3.421 3.488 3.550 3.607 3.661 3.712 3.759 4.120 4.363 4.688 4.909 5.075
24 .9685 1.611 2.007 2.290 2.508 2.685 2.833 2.960 3.071 3.170 3.258 3.337 3.410 3.477 3.539 3.596 3.650 3.700 3.747 4.107 4.349 4.674 4.894 5.060
30 .9656 1.606 2.001 2.283 2.501 2.677 2.825 2.951 3.062 3.160 3.248 3.327 3.400 3.466 3.528 3.585 3.638 3.688 3.735 4.094 4.335 4.659 4.878 5.043
40 .9626 1.602 1.996 2.277 2.494 2.669 2.816 2.942 3.053 3.150 3.238 3.317 3.389 3.455 3.517 3.574 3.627 3.677 3.723 4.080 4.321 4.643 4.862 5.027
60 .9597 1.597 1.990 2.270 2.486 2.661 2.808 2.933 3.043 3.140 3.227 3.306 3.378 3.444 3.505 3.562 3.615 3.665 3.711 4.067 4.306 4.627 4.845 5.009
120 .9568 1.592 1.984 2.263 2.479 2.653 2.799 2.924 3.034 3.130 3.217 3.296 3.367 3.433 3.494 3.550 3.603 3.652 3.699 4.052 4.290 4.610 4.827 4.990
1e38 .9539 1.588 1.978 2.257 2.472 2.645 2.791 2.915 3.024 3.121 3.207 3.285 3.356 3.422 3.482 3.538 3.591 3.640 3.686 4.037 4.274 4.591 4.806 4.968"""
q0675 = """\
2 1.829602 2.751705 3.332700 3.754119 4.082579 4.350351 4.575528 4.769258 4.938876 5.089456 5.22465 5.347168 5.459072 5.56197 5.657136 5.745596 5.828188 5.905606 5.978428 6.534036 6.908522 7.411898 7.753537 8.010516
3 1.660743 2.469725 2.973973 3.338757 3.622958 3.854718 4.049715 4.217574 4.364624 4.495236 4.612559 4.718926 4.816117 4.905518 4.988228 5.065133 5.136955 5.204295 5.267653 5.751485 6.07799 6.517299 6.815682 7.040219
4 1.585479 2.344680 2.814410 3.153343 3.417165 3.632254 3.813232 3.96905 4.105579 4.226877 4.335857 4.434684 4.525004 4.608102 4.684995 4.756504 4.823298 4.885932 4.944872 5.395226 5.699385 6.108899 6.387203 6.596702
5 1.543029 2.27426 2.72431 3.048331 3.300303 3.505645 3.678397 3.827131 3.957462 4.073264 4.177319 4.27169 4.35795 4.437321 4.510774 4.579091 4.642911 4.702763 4.759089 5.189651 5.480611 5.872552 6.139026 6.339673
6 1.515809 2.229127 2.666435 2.980707 3.224876 3.423769 3.591058 3.735076 3.861273 3.973403 4.074164 4.165554 4.249094 4.325968 4.397115 4.463293 4.525119 4.583105 4.637678 5.054965 5.337079 5.717251 5.975813 6.170546
7 1.496881 2.197746 2.626119 2.933501 3.17212 3.366405 3.529778 3.670406 3.793624 3.903106 4.001488 4.090723 4.172295 4.247362 4.31684 4.381468 4.441849 4.498482 4.551786 4.959448 5.235148 5.606794 5.859629 6.050083
8 1.482962 2.174667 2.596423 2.898666 3.133126 3.323942 3.484357 3.622418 3.743376 3.850846 3.947419 4.035012 4.115085 4.188774 4.256978 4.320423 4.379701 4.435301 4.487633 4.887938 5.158734 5.523864 5.772325 5.959514
9 1.472298 2.156982 2.573637 2.871897 3.103116 3.29122 3.449316 3.585359 3.704538 3.810421 3.905564 3.991859 4.070746 4.143343 4.210539 4.273046 4.331448 4.386229 4.437792 4.832253 5.099153 5.459107 5.704096 5.888693
10 1.463868 2.142999 2.555601 2.85068 3.079300 3.265222 3.421445 3.555857 3.673594 3.778189 3.872171 3.957411 4.035332 4.107041 4.173413 4.235156 4.292845 4.346957 4.39789 4.787576 5.05129 5.407011 5.649161 5.831642
11 1.457037 2.131666 2.540969 2.833447 3.059936 3.244061 3.398740 3.531802 3.648345 3.751871 3.844888 3.929251 4.006369 4.077337 4.143024 4.204129 4.261223 4.314777 4.365186 4.750882 5.01193 5.36411 5.603886 5.784574
12 1.451389 2.122295 2.528860 2.819172 3.043878 3.226497 3.379879 3.511805 3.627341 3.729965 3.822167 3.905787 3.982224 4.052565 4.117671 4.178235 4.234823 4.287903 4.337867 4.720168 4.978946 5.32811 5.565864 5.745066
13 1.446642 2.114418 2.518673 2.807152 3.030346 3.211683 3.363958 3.494914 3.609588 3.71144 3.802942 3.885925 3.961777 4.031579 4.096185 4.156283 4.212436 4.265108 4.314686 4.694058 4.950875 5.297431 5.533435 5.711335
14 1.442597 2.107703 2.509984 2.796892 3.018785 3.199019 3.350338 3.480454 3.594382 3.695564 3.786459 3.868888 3.944232 4.013564 4.077734 4.137427 4.1932 4.245515 4.294759 4.671571 4.926672 5.270944 5.505418 5.682176
15 1.439108 2.101911 2.502485 2.788030 3.008793 3.188066 3.338551 3.467934 3.581209 3.681803 3.772166 3.854108 3.929005 3.997925 4.061712 4.121047 4.176486 4.228488 4.277436 4.651989 4.905573 5.247825 5.480945 5.656694
16 1.436068 2.096865 2.495948 2.7803 3.000071 3.178498 3.328250 3.456985 3.569684 3.669759 3.75965 3.841162 3.915663 3.984216 4.047663 4.106681 4.161824 4.213546 4.262231 4.634774 4.887015 5.227455 5.459365 5.634213
17 1.433397 2.092428 2.490198 2.773497 2.992391 3.170069 3.319169 3.447329 3.559514 3.659127 3.748598 3.829725 3.903873 3.972099 4.035242 4.093976 4.148852 4.200325 4.248775 4.619514 4.870529 5.209359 5.440181 5.614218
18 1.431030 2.088497 2.485101 2.767464 2.985576 3.162585 3.311102 3.438749 3.550474 3.649671 3.738765 3.819547 3.893377 3.961308 4.024178 4.082656 4.137293 4.188541 4.236778 4.605888 4.855804 5.193166 5.423003 5.596306
19 1.428918 2.08499 2.480552 2.762076 2.979488 3.155896 3.30389 3.431072 3.542383 3.641206 3.729960 3.810429 3.883971 3.951636 4.014258 4.072505 4.126925 4.177969 4.226014 4.593643 4.84256 5.178585 5.407524 5.580158
20 1.427023 2.081842 2.476467 2.757236 2.974016 3.149882 3.297401 3.424164 3.535099 3.633583 3.722027 3.802213 3.875493 3.942916 4.005312 4.063349 4.117571 4.168429 4.216298 4.582577 4.830579 5.16538 5.393498 5.565519
24 1.421053 2.071924 2.463589 2.741964 2.956732 3.130867 3.276871 3.402288 3.512015 3.609405 3.696852 3.776122 3.848556 3.915194 3.976858 4.03421 4.087789 4.138042 4.185340 4.547205 4.792208 5.122986 5.348394 5.518394
30 1.415131 2.062082 2.450796 2.726770 2.939512 3.111895 3.256356 3.380399 3.488888 3.585153 3.67157 3.749892 3.821449 3.887270 3.948172 4.00481 4.057717 4.107336 4.154034 4.511241 4.753052 5.079524 5.302021 5.469846
40 1.409257 2.052316 2.438086 2.711654 2.922351 3.092956 3.235846 3.358481 3.465697 3.5608 3.646150 3.723487 3.794128 3.859096 3.919198 3.975085 4.027284 4.076232 4.122296 4.474532 4.712898 5.034679 5.253982 5.419412
60 1.40343 2.042626 2.425459 2.696611 2.905244 3.074043 3.215327 3.336516 3.442417 3.536316 3.620555 3.696861 3.766541 3.830609 3.889866 3.944956 3.996401 4.044636 4.09002 4.436878 4.671454 4.987998 5.203693 5.366394
120 1.397651 2.033010 2.412913 2.681639 2.888185 3.055146 3.194785 3.314484 3.419022 3.511665 3.59474 3.66996 3.738623 3.801735 3.86009 3.914325 3.96496 4.012423 4.057072 4.398008 4.628308 4.938805 5.150236 5.309666
1e38 1.391918 2.023469 2.400447 2.666735 2.871167 3.036254 3.174203 3.292360 3.395479 3.486805 3.568651 3.642718 3.710296 3.772381 3.829761 3.883069 3.93282 3.979437 4.023276 4.357546 4.582861 4.886029 5.092081 5.247256"""
q0750 = """\
2 2.267583 3.308014 3.969236 4.451126 4.82785 5.13561 5.394819 5.618097 5.813776 5.987632 6.143829 6.285461 6.41489 6.533954 6.644113 6.746546 6.842214 6.931913 7.01631 7.660853 8.09584 8.68119 9.0788 9.377929
3 2.011896 2.883775 3.431223 3.829258 4.140443 4.394852 4.609323 4.794233 4.956425 5.10064 5.230299 5.347941 5.455509 5.554514 5.646158 5.73141 5.811064 5.885775 5.956093 6.493827 6.857365 7.3472 7.680302 7.931152
4 1.901267 2.701018 3.198596 3.559322 3.841087 4.071417 4.265624 4.433118 4.580085 4.710812 4.828384 4.935098 5.032703 5.122566 5.205771 5.283192 5.355547 5.423427 5.48733 5.976418 6.307462 6.753955 7.057827 7.286775
5 1.839820 2.599651 3.069171 3.40865 3.673526 3.889955 4.072422 4.229795 4.367901 4.490764 4.601285 4.701617 4.793402 4.877922 4.956192 5.029034 5.09712 5.161005 5.221154 5.681792 5.993844 6.415033 6.70187 6.918073
6 1.800788 2.535293 2.986795 3.312495 3.566338 3.773641 3.948369 4.099056 4.231292 4.348941 4.45478 4.550869 4.638783 4.719746 4.794731 4.864523 4.929764 4.990987 5.048635 5.490302 5.789693 6.194025 6.469523 6.677248
7 1.773818 2.490830 2.929770 3.245783 3.491823 3.692639 3.86185 4.007755 4.135786 4.249692 4.352165 4.445203 4.530329 4.608729 4.681345 4.748937 4.812126 4.871427 4.927269 5.35523 5.64547 6.037624 6.304935 6.506543
8 1.754075 2.458283 2.887956 3.196776 3.436989 3.632943 3.798002 3.940301 4.065154 4.176226 4.276148 4.366871 4.44988 4.526332 4.597147 4.663065 4.724692 4.782528 4.836995 5.254505 5.537762 5.920623 6.18169 6.378634
9 1.739001 2.433431 2.855984 3.159245 3.394934 3.587097 3.748912 3.888384 4.010742 4.119586 4.2175 4.306396 4.387735 4.462649 4.532041 4.596635 4.657026 4.713704 4.767081 5.176307 5.454025 5.829512 6.085625 6.27887
10 1.727116 2.413835 2.830746 3.129578 3.361648 3.550769 3.709972 3.847165 3.967506 4.074547 4.170832 4.258248 4.33823 4.411895 4.480128 4.543645 4.60303 4.658765 4.711254 5.113722 5.386914 5.756374 6.008439 6.198662
11 1.717506 2.397989 2.810315 3.105535 3.334641 3.521265 3.678317 3.813629 3.932304 4.037852 4.132788 4.218976 4.297831 4.370457 4.437728 4.500349 4.558895 4.613844 4.665594 5.062423 5.331834 5.696254 5.944934 6.13263
12 1.709576 2.384911 2.793439 3.085654 3.312288 3.496821 3.65207 3.785802 3.903075 4.007365 4.101163 4.186312 4.264215 4.335963 4.402419 4.46428 4.522117 4.5764 4.627523 5.019561 5.285754 5.645883 5.891679 6.077222
13 1.70292 2.373934 2.779263 3.068939 3.293478 3.476235 3.62995 3.762334 3.878409 3.981623 4.074447 4.158707 4.235793 4.306786 4.372542 4.433751 4.490977 4.544687 4.595269 4.983178 5.246593 5.603014 5.846316 6.029999
14 1.697255 2.364590 2.767188 3.05469 3.27743 3.458659 3.611050 3.742271 3.85731 3.959593 4.051572 4.13506 4.211437 4.281774 4.346922 4.407563 4.464258 4.517468 4.567581 4.951886 5.212872 5.566049 5.807169 5.989222
15 1.692374 2.356539 2.756778 3.042397 3.263576 3.443476 3.594714 3.724919 3.839053 3.940521 4.031759 4.114571 4.190326 4.260088 4.324701 4.384844 4.441071 4.493842 4.54354 4.92467 5.183511 5.533819 5.773008 5.95362
16 1.688126 2.349531 2.747712 3.031684 3.251494 3.430227 3.580451 3.709761 3.823097 3.923845 4.01443 4.096644 4.171848 4.2411 4.305239 4.364939 4.420752 4.473133 4.522464 4.900769 5.1577 5.505449 5.742915 5.92224
17 1.684395 2.343375 2.739744 3.022264 3.240865 3.418565 3.567889 3.696405 3.809031 3.909139 3.999142 4.080823 4.155535 4.224333 4.288048 4.347353 4.402795 4.454828 4.50383 4.879604 5.134819 5.48027 5.716186 5.894353
18 1.681092 2.337926 2.732687 3.013916 3.23144 3.408218 3.55674 3.684546 3.796536 3.896071 3.985552 4.066754 4.141026 4.209415 4.27275 4.331699 4.386808 4.438527 4.487232 4.860723 5.114389 5.457759 5.692272 5.86939
19 1.678147 2.333067 2.726393 3.006467 3.223026 3.398978 3.546777 3.673945 3.785363 3.884381 3.973391 4.054162 4.128035 4.196054 4.259045 4.317673 4.37248 4.423914 4.472351 4.843772 5.096029 5.437505 5.67074 5.846902
20 1.675506 2.328708 2.720745 2.999780 3.215469 3.390674 3.537821 3.664411 3.775312 3.873861 3.962444 4.042823 4.116334 4.184018 4.246696 4.305031 4.359563 4.410739 4.458932 4.828464 5.079434 5.419178 5.651242 5.82653
24 1.667194 2.314991 2.702957 2.978701 3.191627 3.364455 3.50952 3.63426 3.7435 3.840544 3.927753 4.006868 4.079211 4.14581 4.207477 4.264864 4.318505 4.368841 4.416241 4.779619 5.026378 5.360437 5.58865 5.761054
30 1.658964 2.301406 2.68532 2.957771 3.167919 3.338345 3.481298 3.604155 3.711699 3.8072 3.892996 3.970809 4.041947 4.107423 4.168039 4.224442 4.277156 4.326617 4.373187 4.730101 4.9724 5.300398 5.524486 5.693793
40 1.650814 2.28795 2.667830 2.936984 3.144337 3.312335 3.453142 3.574077 3.679882 3.773798 3.858136 3.934602 4.004488 4.068796 4.128318 4.183691 4.235434 4.283976 4.329675 4.679735 4.917252 5.238689 5.458277 5.62419
60 1.642744 2.274622 2.650486 2.916339 3.120874 3.286413 3.425034 3.544004 3.648023 3.740302 3.823131 3.898196 3.966776 4.029861 4.088234 4.142523 4.19324 4.24081 4.285584 4.628295 4.860604 5.174794 5.389348 5.551435
120 1.634753 2.261421 2.633285 2.895829 3.097525 3.260567 3.396959 3.513912 3.616089 3.706672 3.787929 3.861531 3.92874 3.990536 4.047692 4.10083 4.150455 4.196985 4.240768 4.575490 4.802013 5.107977 5.316696 5.474283
1e38 1.626840 2.248346 2.616224 2.875451 3.074279 3.234786 3.368898 3.483775 3.584045 3.672862 3.752475 3.824535 3.890294 3.950721 4.006580 4.058483 4.106932 4.152338 4.195044 4.520933 4.740866 5.037152 5.238766 5.390726"""
q0800 = """\
2 2.666345 3.820436 4.558532 5.098158 5.520848 5.866626 6.158145 6.409446 6.62982 6.825717 7.001791 7.161505 7.307502 7.441845 7.566171 7.681802 7.789818 7.891113 7.986436 8.714887 9.206808 9.868718 10.31830 10.65683
3 2.316120 3.245426 3.832597 4.261107 4.596942 4.871989 5.104169 5.304561 5.480484 5.637021 5.777843 5.905682 6.022626 6.130305 6.230013 6.322797 6.409513 6.49087 6.567462 7.153711 7.55053 8.085698 8.449862 8.724212
4 2.168283 3.003795 3.52645 3.90676 4.204595 4.44853 4.654519 4.832388 4.988615 5.127694 5.25287 5.366554 5.470593 5.566425 5.655195 5.737827 5.815079 5.887577 5.955847 6.47896 6.833568 7.31242 7.638648 7.884592
5 2.087215 2.871505 3.358337 3.711564 3.987876 4.214094 4.405111 4.57007 4.714986 4.844026 4.960193 5.065723 5.162321 5.25132 5.333779 5.410553 5.482342 5.549725 5.613191 6.099852 6.430105 6.876484 7.180827 7.410389
6 2.036122 2.788188 3.252203 3.588013 3.850385 4.06507 4.246305 4.402806 4.540296 4.662734 4.772969 4.873124 4.964814 5.049304 5.127595 5.200498 5.268677 5.33268 5.392969 5.855517 6.169658 6.594568 6.884463 7.103222
7 2.001005 2.730943 3.179141 3.502777 3.755348 3.961886 4.136188 4.286677 4.418877 4.536604 4.642603 4.738913 4.82709 4.908349 4.983652 5.05378 5.119368 5.180945 5.238953 5.684175 5.986732 6.39621 6.675724 6.886723
8 1.975395 2.689205 3.125785 3.440421 3.685706 3.886163 4.055272 4.201248 4.329470 4.443647 4.546448 4.639854 4.725375 4.804189 4.87723 4.945254 5.008879 5.068616 5.124894 5.556957 5.850708 6.248453 6.520077 6.725179
9 1.955898 2.657432 3.085114 3.392817 3.632464 3.828199 3.993263 4.135716 4.260824 4.37222 4.472512 4.563637 4.64707 4.723960 4.79522 4.861587 4.923664 4.981949 5.036862 5.458529 5.745314 6.133773 6.399153 6.59959
10 1.940561 2.632439 3.053086 3.355281 3.590431 3.782386 3.944205 4.083824 4.206424 4.315576 4.413841 4.503121 4.584862 4.660193 4.730009 4.795032 4.855852 4.912959 4.966762 5.37997 5.661078 6.041965 6.302252 6.498885
11 1.928182 2.612267 3.027211 3.324922 3.556399 3.745257 3.904411 4.041698 4.16223 4.269529 4.366119 4.453871 4.534212 4.608251 4.676869 4.740775 4.800552 4.85668 4.909561 5.315725 5.5921 5.966667 6.222702 6.41616
12 1.917981 2.595645 3.005872 3.299861 3.528278 3.714552 3.871475 4.006806 4.125603 4.231343 4.326522 4.412988 4.492148 4.565096 4.632701 4.695665 4.754559 4.809858 4.861959 5.262152 5.534506 5.9037 6.156119 6.346875
13 1.909431 2.581711 2.987972 3.278821 3.504650 3.688731 3.843759 3.977426 4.094742 4.199153 4.293127 4.378492 4.45664 4.528654 4.595391 4.657546 4.715682 4.770269 4.8217 5.216755 5.485643 5.850201 6.099497 6.287919
14 1.90216 2.569864 2.972742 3.260906 3.484516 3.666713 3.82011 3.952342 4.068380 4.171642 4.264573 4.348985 4.426256 4.497459 4.563444 4.624895 4.682373 4.736342 4.787189 5.177769 5.443632 5.804138 6.050703 6.237084
15 1.895903 2.559666 2.959626 3.245467 3.467154 3.647714 3.799691 3.930673 4.045596 4.147854 4.239872 4.32345 4.399954 4.470446 4.53577 4.596605 4.653505 4.706931 4.757265 5.143906 5.4071 5.764029 6.00818 6.192757
16 1.89046 2.550797 2.948212 3.232024 3.452027 3.631152 3.781882 3.911763 4.025705 4.127077 4.218291 4.301132 4.376957 4.446821 4.511561 4.571849 4.628237 4.681181 4.731062 5.114203 5.375025 5.728765 5.970765 6.153733
17 1.885683 2.543012 2.93819 3.220213 3.438729 3.616585 3.766210 3.895117 4.008187 4.108772 4.19927 4.281455 4.356676 4.42598 4.490198 4.549999 4.605930 4.658444 4.707919 5.08793 5.346623 5.697501 5.937567 6.119088
18 1.881457 2.536125 2.929319 3.209754 3.426947 3.603672 3.752313 3.880348 3.992639 4.09252 4.182377 4.263975 4.338653 4.407454 4.471204 4.530568 4.586089 4.638215 4.687324 5.064516 5.321288 5.66958 5.907896 6.088107
19 1.877691 2.529988 2.921412 3.200427 3.416435 3.592147 3.739903 3.867156 3.978745 4.077993 4.167272 4.24834 4.322529 4.390877 4.454204 4.513172 4.568321 4.620098 4.668877 5.043513 5.298541 5.644481 5.881205 6.060224
20 1.874315 2.524485 2.914320 3.192058 3.407000 3.581797 3.728755 3.855300 3.966255 4.064929 4.153685 4.234272 4.308018 4.375954 4.438897 4.497506 4.552317 4.603776 4.652254 5.024562 5.277998 5.621789 5.857057 6.034984
24 1.863701 2.507187 2.89201 3.165709 3.377268 3.549158 3.69357 3.817854 3.92678 4.023614 4.110690 4.189731 4.262048 4.328656 4.390359 4.447807 4.501527 4.551956 4.599461 4.964202 5.212441 5.549191 5.779675 5.954012
30 1.853207 2.490080 2.869925 3.139592 3.347756 3.516716 3.658554 3.780544 3.887403 3.982357 4.067712 4.145167 4.216014 4.281252 4.341675 4.397921 4.450509 4.49987 4.546362 4.903188 5.145946 5.47522 5.700597 5.871091
40 1.842829 2.473164 2.848060 3.113699 3.318456 3.484461 3.62369 3.743347 3.848096 3.941126 4.024712 4.100533 4.16986 4.233681 4.292775 4.34777 4.399178 4.447421 4.492854 4.841333 5.078248 5.399473 5.619306 5.785608
60 1.832568 2.456435 2.826413 3.088026 3.289358 3.452379 3.588962 3.70624 3.808829 3.899882 3.981645 4.055774 4.123524 4.185868 4.243575 4.297261 4.34743 4.394499 4.438814 4.778404 5.009002 5.321406 5.535087 5.696698
120 1.822478 2.439890 2.804980 3.062567 3.260456 3.420458 3.55435 3.669198 3.769570 3.858583 3.938458 4.010829 4.076934 4.137731 4.193979 4.246285 4.295145 4.340968 4.384094 4.714106 4.937761 5.24027 5.446912 5.603078
1e38 1.812388 2.423529 2.783758 3.037317 3.231739 3.388684 3.519834 3.632192 3.73028 3.817183 3.895093 3.965627 4.030005 4.089173 4.143877 4.194716 4.242179 4.286668 4.328517 4.648069 4.863937 5.155024 5.353283 5.50281"""
q0850 = """\
2 3.226562 4.548022 5.398759 6.022701 6.512387 6.913502 7.251997 7.54401 7.800236 8.028116 8.233021 8.418953 8.588968 8.74545 8.890294 9.02503 9.150913 9.268977 9.380094 10.22972 10.80450 11.58094 12.11086 12.51097
3 2.721399 3.731515 4.374509 4.845675 5.215912 5.5197 5.776502 5.998388 6.193356 6.366968 6.523249 6.665198 6.795111 6.914781 7.025634 7.128823 7.225292 7.315823 7.401073 8.054202 8.496827 9.094477 9.501702 9.808753
4 2.514747 3.399285 3.956491 4.363675 4.68348 4.945965 5.16798 5.359938 5.52872 5.679113 5.814574 5.937683 6.050411 6.154302 6.25058 6.34024 6.424092 6.502812 6.576963 7.145835 7.532079 8.054293 8.410406 8.679063
5 2.403262 3.220436 3.730867 4.102766 4.394545 4.633955 4.836465 5.011596 5.165628 5.302922 5.426626 5.539086 5.642096 5.737057 5.825086 5.907085 5.983793 6.055822 6.123687 6.644817 6.999123 7.478735 7.806159 8.05333
6 2.333697 3.108965 3.589945 3.939419 4.213263 4.437836 4.627757 4.791998 4.936465 5.06525 5.181307 5.286833 5.38351 5.47265 5.555297 5.632296 5.704339 5.771998 5.835756 6.325681 6.659107 7.110867 7.419518 7.652631
7 2.286206 3.032919 3.493643 3.827573 4.088914 4.303095 4.484169 4.640738 4.77845 4.901217 5.011858 5.112468 5.20465 5.289655 5.368478 5.441922 5.510646 5.575196 5.63603 6.103717 6.422253 6.85415 7.149428 7.372543
8 2.251741 2.977758 3.423692 3.746199 3.998303 4.204778 4.379271 4.530117 4.662781 4.781044 4.887624 4.984545 5.073351 5.155248 5.231193 5.30196 5.368185 5.430392 5.489022 5.939926 6.24721 6.664096 6.949269 7.16483
9 2.225598 2.935932 3.370588 3.684337 3.92933 4.12985 4.299243 4.445643 4.57438 4.68913 4.792542 4.88658 4.972745 5.052208 5.125899 5.194568 5.258832 5.319201 5.376101 5.81381 6.112237 6.517297 6.794508 7.004117
10 2.205093 2.903132 3.328904 3.635722 3.875064 4.070838 4.236155 4.378996 4.504581 4.61651 4.717372 4.809088 4.893124 4.970624 5.042494 5.109469 5.172148 5.231029 5.28653 5.713546 6.004781 6.400234 6.670974 6.875744
11 2.18858 2.876725 3.295316 3.596509 3.831250 4.023149 4.18513 4.325052 4.448048 4.557656 4.656418 4.74622 4.828499 4.904377 4.974743 5.040315 5.101683 5.159333 5.213674 5.631818 5.917077 6.304535 6.569888 6.770631
12 2.174999 2.855009 3.267675 3.564211 3.795132 3.983803 4.143002 4.280484 4.401312 4.508975 4.605974 4.694168 4.77497 4.849483 4.918582 4.982973 5.043235 5.099847 5.153209 5.563854 5.84405 6.22473 6.485512 6.682838
13 2.163633 2.836837 3.244531 3.537147 3.764842 3.950784 4.107624 4.243034 4.36202 4.468027 4.563524 4.650346 4.729887 4.803233 4.871249 4.93463 4.993946 5.049668 5.102191 5.5064 5.782244 6.157087 6.413931 6.608311
14 2.153982 2.821408 3.224869 3.514139 3.739075 3.922677 4.077491 4.21112 4.328519 4.433097 4.527298 4.612934 4.691385 4.763723 4.830801 4.893306 4.951801 5.006752 5.058548 5.457164 5.729217 6.09897 6.352377 6.544185
15 2.145684 2.808145 3.207959 3.494339 3.716887 3.898461 4.051515 4.183594 4.299611 4.402944 4.496014 4.580615 4.658112 4.729568 4.795825 4.857564 4.91534 4.969615 5.020773 5.414476 5.683193 6.048461 6.298836 6.488374
16 2.138475 2.796621 3.193261 3.47712 3.697581 3.877377 4.028889 4.159607 4.274409 4.376647 4.468721 4.55241 4.629066 4.699743 4.765275 4.826337 4.883478 4.937154 4.987748 5.377097 5.642851 6.004129 6.251805 6.439321
17 2.132153 2.786517 3.180367 3.462007 3.680628 3.858856 4.009003 4.138517 4.252242 4.353508 4.444698 4.527576 4.603485 4.673469 4.738356 4.798814 4.855389 4.908532 4.958622 5.344083 5.607184 5.964886 6.21014 6.395841
18 2.126564 2.777584 3.168965 3.448637 3.665623 3.842455 3.991387 4.119827 4.232591 4.332989 4.423388 4.505541 4.58078 4.650143 4.714452 4.774369 4.830436 4.883101 4.932739 5.314701 5.575413 5.929888 6.172953 6.357013
19 2.121587 2.769631 3.158810 3.436724 3.652248 3.82783 3.975673 4.103149 4.21505 4.314667 4.404354 4.485854 4.560491 4.629294 4.693081 4.752509 4.808118 4.860351 4.909581 5.288379 5.546924 5.898469 6.139544 6.322112
20 2.117128 2.762505 3.149708 3.426043 3.640251 3.814707 3.961568 4.088173 4.199295 4.298206 4.387249 4.468158 4.542248 4.610545 4.673858 4.732844 4.788036 4.839877 4.888736 5.264656 5.521227 5.870097 6.109355 6.290558
24 2.103128 2.740133 3.121118 3.392466 3.60251 3.773393 3.917129 4.040961 4.149593 4.246248 4.33323 4.412242 4.484578 4.551244 4.613036 4.670595 4.724445 4.775019 4.822681 5.189274 5.439419 5.779554 6.012859 6.189586
30 2.089309 2.718054 3.092876 3.359261 3.56514 3.732436 3.873024 3.994053 4.10016 4.19452 4.2794 4.356475 4.427015 4.492009 4.552236 4.608325 4.660792 4.710058 4.756481 5.113372 5.356776 5.687685 5.914664 6.08662
40 2.07567 2.696264 3.064978 3.326418 3.52813 3.691822 3.829233 3.947423 4.050965 4.142986 4.225718 4.300807 4.369502 4.432773 4.491385 4.545955 4.596987 4.644896 4.69003 5.036754 5.27302 5.594062 5.814221 5.981005
60 2.062208 2.674759 3.037417 3.293931 3.491470 3.651535 3.785736 3.901046 4.001976 4.091607 4.172136 4.245183 4.311975 4.373463 4.430401 4.483391 4.532928 4.579419 4.623205 4.95919 5.187807 5.498134 5.710792 5.871841
120 2.048920 2.653534 3.010189 3.261791 3.455148 3.611561 3.742514 3.854896 3.953159 4.040341 4.118605 4.189543 4.254363 4.314 4.369191 4.42053 4.4685 4.513501 4.555864 4.880396 5.100706 5.399172 5.60337 5.757859
1e38 2.035805 2.632586 2.983286 3.229990 3.419154 3.571884 3.699544 3.808945 3.904479 3.989143 4.065068 4.133821 4.19659 4.254292 4.307653 4.357255 4.403572 4.446994 4.487848 4.800043 5.011193 5.296241 5.4906 5.637297"""
q0900 = """\
1 8.929 13.44 16.36 18.49 20.15 21.51 22.64 23.62 24.48 25.24 25.92 26.54 27.10 27.62 28.10 28.54 28.96 29.35 29.71 32.50 34.38 36.91 38.62 39.91
2 4.129 5.733 6.773 7.538 8.139 8.633 9.049 9.409 9.725 10.01 10.26 10.49 10.70 10.89 11.07 11.24 11.39 11.54 11.68 12.73 13.44 14.40 15.04 15.54
3 3.328 4.467 5.199 5.738 6.162 6.511 6.806 7.062 7.287 7.487 7.667 7.831 7.982 8.120 8.248 8.368 8.479 8.584 8.683 9.440 9.954 10.65 11.12 11.48
4 3.015 3.976 4.586 5.035 5.388 5.679 5.926 6.139 6.327 6.494 6.645 6.783 6.909 7.025 7.132 7.233 7.326 7.414 7.497 8.135 8.569 9.156 9.558 9.861
5 2.850 3.717 4.264 4.664 4.979 5.238 5.458 5.648 5.816 5.965 6.100 6.223 6.336 6.439 6.536 6.626 6.710 6.788 6.863 7.435 7.824 8.353 8.714 8.987
6 2.748 3.558 4.065 4.435 4.726 4.966 5.168 5.344 5.499 5.637 5.762 5.875 5.979 6.075 6.164 6.247 6.325 6.398 6.466 6.996 7.358 7.848 8.184 8.438
7 2.679 3.451 3.931 4.280 4.555 4.780 4.971 5.137 5.283 5.413 5.530 5.637 5.735 5.826 5.910 5.988 6.061 6.130 6.195 6.695 7.036 7.500 7.818 8.059
8 2.630 3.374 3.834 4.169 4.431 4.646 4.829 4.987 5.126 5.250 5.362 5.464 5.558 5.644 5.724 5.799 5.869 5.935 5.997 6.475 6.801 7.245 7.550 7.780
9 2.592 3.316 3.761 4.084 4.337 4.545 4.721 4.873 5.007 5.126 5.234 5.333 5.423 5.506 5.583 5.655 5.722 5.786 5.845 6.306 6.621 7.049 7.343 7.566
10 2.563 3.270 3.704 4.018 4.264 4.465 4.636 4.783 4.913 5.029 5.134 5.229 5.316 5.397 5.472 5.542 5.607 5.668 5.726 6.173 6.478 6.894 7.180 7.396
11 2.540 3.234 3.658 3.965 4.205 4.401 4.567 4.711 4.838 4.951 5.053 5.145 5.231 5.309 5.382 5.450 5.514 5.573 5.630 6.065 6.363 6.768 7.046 7.257
12 2.521 3.204 3.621 3.921 4.156 4.349 4.511 4.652 4.776 4.886 4.986 5.076 5.160 5.236 5.308 5.374 5.436 5.495 5.550 5.975 6.267 6.663 6.936 7.142
13 2.504 3.179 3.589 3.885 4.116 4.304 4.464 4.602 4.724 4.832 4.930 5.019 5.100 5.175 5.245 5.310 5.371 5.429 5.483 5.900 6.186 6.575 6.842 7.045
14 2.491 3.158 3.563 3.854 4.081 4.267 4.424 4.560 4.679 4.786 4.882 4.969 5.050 5.124 5.192 5.256 5.316 5.372 5.426 5.836 6.116 6.499 6.762 6.961
15 2.479 3.140 3.540 3.828 4.052 4.235 4.390 4.524 4.641 4.746 4.841 4.927 5.006 5.079 5.146 5.209 5.268 5.324 5.376 5.780 6.056 6.433 6.692 6.888
16 2.469 3.124 3.520 3.804 4.026 4.207 4.360 4.492 4.608 4.712 4.805 4.890 4.968 5.040 5.106 5.169 5.227 5.282 5.333 5.731 6.004 6.376 6.631 6.825
17 2.460 3.110 3.503 3.784 4.003 4.182 4.334 4.464 4.579 4.681 4.774 4.857 4.934 5.005 5.071 5.133 5.190 5.244 5.295 5.688 5.958 6.325 6.577 6.769
18 2.452 3.098 3.487 3.766 3.984 4.161 4.310 4.440 4.553 4.654 4.746 4.829 4.905 4.975 5.040 5.101 5.158 5.211 5.262 5.650 5.917 6.280 6.529 6.718
19 2.445 3.087 3.474 3.751 3.966 4.142 4.290 4.418 4.530 4.630 4.721 4.803 4.878 4.948 5.012 5.072 5.129 5.182 5.232 5.616 5.880 6.239 6.486 6.673
20 2.439 3.077 3.462 3.736 3.950 4.124 4.271 4.398 4.510 4.609 4.699 4.780 4.855 4.923 4.987 5.047 5.103 5.155 5.205 5.586 5.847 6.202 6.447 6.633
24 2.420 3.047 3.423 3.692 3.900 4.070 4.213 4.336 4.445 4.541 4.628 4.707 4.780 4.847 4.909 4.966 5.020 5.071 5.119 5.489 5.741 6.086 6.323 6.503
30 2.400 3.017 3.386 3.648 3.851 4.016 4.155 4.275 4.381 4.474 4.559 4.635 4.706 4.770 4.830 4.886 4.939 4.988 5.034 5.391 5.636 5.969 6.198 6.372
40 2.381 2.988 3.348 3.605 3.802 3.963 4.099 4.215 4.317 4.408 4.490 4.564 4.632 4.694 4.752 4.806 4.857 4.904 4.949 5.294 5.529 5.850 6.071 6.238
60 2.363 2.959 3.312 3.562 3.755 3.911 4.042 4.155 4.254 4.342 4.421 4.493 4.558 4.619 4.675 4.727 4.775 4.821 4.864 5.196 5.422 5.730 5.941 6.101
120 2.344 2.930 3.276 3.520 3.707 3.859 3.986 4.096 4.191 4.276 4.353 4.422 4.485 4.543 4.597 4.647 4.694 4.738 4.779 5.097 5.313 5.606 5.808 5.960
1e38 2.326 2.902 3.240 3.478 3.661 3.808 3.931 4.037 4.129 4.211 4.285 4.351 4.412 4.468 4.519 4.568 4.612 4.654 4.694 4.997 5.202 5.480 5.669 5.812"""
q0950 = """\
1 17.97 26.98 32.82 37.08 40.41 43.12 45.40 47.36 49.07 50.59 51.96 53.20 54.33 55.36 56.32 57.22 58.04 58.83 59.56 65.15 68.92 73.97 77.40 79.98
2 6.085 8.331 9.799 10.88 11.73 12.43 13.03 13.54 13.99 14.40 14.76 15.09 15.39 15.65 15.92 16.14 16.38 16.57 16.78 18.27 19.28 20.66 21.59 22.29
3 4.501 5.910 6.825 7.502 8.037 8.478 8.852 9.177 9.462 9.717 9.946 10.15 10.35 10.52 10.69 10.84 10.98 11.11 11.24 12.21 12.86 13.76 14.36 14.82
4 3.926 5.040 5.757 6.287 6.706 7.053 7.347 7.602 7.826 8.027 8.208 8.373 8.524 8.664 8.793 8.914 9.027 9.133 9.233 10.00 10.53 11.24 11.73 12.10
5 3.635 4.602 5.218 5.673 6.033 6.330 6.582 6.801 6.995 7.167 7.323 7.466 7.596 7.716 7.828 7.932 8.030 8.122 8.208 8.875 9.330 9.949 10.37 10.69
6 3.460 4.339 4.896 5.305 5.628 5.895 6.122 6.319 6.493 6.649 6.789 6.917 7.034 7.143 7.244 7.338 7.426 7.508 7.586 8.189 8.601 9.162 9.547 9.839
7 3.344 4.165 4.681 5.060 5.359 5.606 5.815 5.997 6.158 6.302 6.431 6.550 6.658 6.759 6.852 6.939 7.020 7.097 7.169 7.727 8.110 8.631 8.989 9.260
8 3.261 4.041 4.529 4.886 5.167 5.399 5.596 5.767 5.918 6.053 6.175 6.287 6.389 6.483 6.571 6.653 6.729 6.801 6.869 7.395 7.756 8.248 8.586 8.843
9 3.199 3.948 4.415 4.755 5.024 5.244 5.432 5.595 5.738 5.867 5.983 6.089 6.186 6.276 6.359 6.437 6.510 6.579 6.643 7.144 7.488 7.958 8.281 8.526
10 3.151 3.877 4.327 4.654 4.912 5.124 5.304 5.460 5.598 5.722 5.833 5.935 6.028 6.114 6.194 6.269 6.339 6.405 6.467 6.948 7.278 7.730 8.041 8.276
11 3.113 3.820 4.256 4.574 4.823 5.028 5.202 5.353 5.486 5.605 5.713 5.811 5.901 5.984 6.062 6.134 6.202 6.265 6.325 6.790 7.109 7.546 7.847 8.075
12 3.081 3.773 4.199 4.508 4.750 4.950 5.119 5.265 5.395 5.510 5.615 5.710 5.797 5.878 5.953 6.023 6.089 6.151 6.209 6.660 6.970 7.394 7.687 7.908
13 3.055 3.734 4.151 4.453 4.690 4.884 5.049 5.192 5.318 5.431 5.533 5.625 5.711 5.789 5.862 5.931 5.995 6.055 6.112 6.551 6.853 7.267 7.552 7.769
14 3.033 3.701 4.111 4.407 4.639 4.829 4.990 5.130 5.253 5.364 5.463 5.554 5.637 5.714 5.785 5.852 5.915 5.973 6.029 6.459 6.754 7.159 7.437 7.649
15 3.014 3.673 4.076 4.367 4.595 4.782 4.940 5.077 5.198 5.306 5.403 5.492 5.574 5.649 5.719 5.785 5.846 5.904 5.958 6.379 6.669 7.065 7.338 7.546
16 2.998 3.649 4.046 4.333 4.557 4.741 4.896 5.031 5.150 5.256 5.352 5.439 5.519 5.593 5.662 5.726 5.786 5.843 5.896 6.310 6.594 6.983 7.252 7.456
17 2.984 3.628 4.020 4.303 4.524 4.705 4.858 4.991 5.108 5.212 5.306 5.392 5.471 5.544 5.612 5.675 5.734 5.790 5.842 6.249 6.529 6.912 7.176 7.377
18 2.971 3.609 3.997 4.276 4.494 4.673 4.824 4.955 5.071 5.173 5.266 5.351 5.429 5.501 5.567 5.629 5.688 5.743 5.794 6.195 6.471 6.848 7.108 7.307
19 2.960 3.593 3.977 4.253 4.468 4.645 4.794 4.924 5.037 5.139 5.231 5.314 5.391 5.462 5.528 5.589 5.647 5.701 5.752 6.147 6.419 6.791 7.048 7.244
20 2.950 3.578 3.958 4.232 4.445 4.620 4.768 4.895 5.008 5.108 5.199 5.282 5.357 5.427 5.492 5.553 5.610 5.663 5.714 6.104 6.372 6.740 6.994 7.187
24 2.919 3.532 3.901 4.166 4.373 4.541 4.684 4.807 4.915 5.012 5.099 5.179 5.251 5.319 5.381 5.439 5.494 5.545 5.594 5.968 6.226 6.578 6.822 7.007
30 2.888 3.486 3.845 4.102 4.301 4.464 4.601 4.720 4.824 4.917 5.001 5.077 5.147 5.211 5.271 5.327 5.379 5.429 5.475 5.833 6.080 6.417 6.650 6.827
40 2.858 3.442 3.791 4.039 4.232 4.388 4.521 4.634 4.735 4.824 4.904 4.977 5.044 5.106 5.163 5.216 5.266 5.313 5.358 5.700 5.934 6.255 6.477 6.645
60 2.829 3.399 3.737 3.977 4.163 4.314 4.441 4.550 4.646 4.732 4.808 4.878 4.942 5.001 5.056 5.107 5.154 5.199 5.241 5.566 5.789 6.093 6.302 6.462
120 2.800 3.356 3.685 3.917 4.096 4.241 4.363 4.468 4.560 4.641 4.714 4.781 4.842 4.898 4.950 4.998 5.043 5.086 5.126 5.434 5.644 5.929 6.126 6.275
1e38 2.772 3.314 3.633 3.858 4.030 4.170 4.286 4.387 4.474 4.552 4.622 4.685 4.743 4.796 4.845 4.891 4.934 4.974 5.012 5.301 5.498 5.764 5.947 6.085"""
q0975 = """\
1 35.99 54.00 65.69 74.22 80.87 86.29 90.85 94.77 98.20 101.3 104.0 106.5 108.8 110.8 112.7 114.5 116.2 117.7 119.2 130.4 137.9 148.1 154.9 160.0
2 8.776 11.94 14.02 15.54 16.75 17.74 18.58 19.31 19.95 20.52 21.03 21.49 21.91 22.30 22.67 23.01 23.32 23.62 23.89 26.03 27.47 29.42 30.74 31.74
3 5.907 7.661 8.808 9.659 10.33 10.89 11.36 11.77 12.14 12.46 12.75 13.01 13.25 13.47 13.68 13.87 14.05 14.22 14.38 15.62 16.46 17.58 18.37 18.95
4 4.943 6.244 7.088 7.715 8.213 8.625 8.975 9.279 9.548 9.788 10.00 10.20 10.38 10.55 10.71 10.85 10.99 11.11 11.23 12.16 12.78 13.65 14.23 14.68
5 4.474 5.558 6.257 6.775 7.186 7.526 7.816 8.068 8.291 8.490 8.670 8.834 8.984 9.124 9.253 9.373 9.486 9.592 9.693 10.47 11.00 11.72 12.21 12.59
6 4.198 5.158 5.772 6.226 6.586 6.884 7.138 7.359 7.554 7.729 7.887 8.031 8.163 8.285 8.399 8.505 8.605 8.698 8.787 9.469 9.937 10.57 11.01 11.34
7 4.018 4.897 5.455 5.867 6.194 6.464 6.694 6.894 7.071 7.230 7.373 7.504 7.624 7.735 7.838 7.935 8.025 8.110 8.191 8.812 9.239 9.822 10.22 10.53
8 3.891 4.714 5.233 5.616 5.919 6.169 6.382 6.567 6.731 6.878 7.011 7.132 7.244 7.347 7.442 7.532 7.616 7.694 7.769 8.346 8.743 9.286 9.660 9.944
9 3.797 4.578 5.069 5.430 5.715 5.950 6.151 6.325 6.479 6.617 6.742 6.856 6.961 7.057 7.148 7.232 7.311 7.385 7.455 7.999 8.373 8.885 9.238 9.506
10 3.725 4.474 4.943 5.286 5.558 5.782 5.972 6.138 6.284 6.415 6.534 6.642 6.742 6.834 6.920 7.000 7.075 7.145 7.212 7.729 8.085 8.574 8.910 9.166
11 3.667 4.391 4.843 5.173 5.433 5.648 5.830 5.989 6.130 6.255 6.369 6.473 6.568 6.656 6.738 6.815 6.887 6.955 7.018 7.514 7.856 8.324 8.648 8.894
12 3.620 4.324 4.761 5.080 5.332 5.539 5.715 5.868 6.004 6.125 6.235 6.335 6.426 6.511 6.591 6.664 6.734 6.799 6.860 7.338 7.668 8.120 8.433 8.670
13 3.582 4.269 4.694 5.004 5.248 5.449 5.620 5.768 5.899 6.017 6.123 6.220 6.309 6.391 6.468 6.539 6.606 6.670 6.729 7.192 7.511 7.950 8.253 8.484
14 3.549 4.222 4.638 4.940 5.178 5.373 5.540 5.684 5.812 5.926 6.029 6.123 6.210 6.290 6.364 6.434 6.499 6.560 6.618 7.068 7.379 7.806 8.100 8.325
15 3.521 4.182 4.589 4.885 5.117 5.309 5.471 5.612 5.736 5.848 5.948 6.040 6.125 6.205 6.275 6.343 6.407 6.467 6.523 6.962 7.265 7.682 7.969 8.189
16 3.497 4.148 4.548 4.838 5.065 5.253 5.412 5.550 5.671 5.780 5.879 5.969 6.051 6.128 6.199 6.265 6.327 6.386 6.441 6.870 7.167 7.574 7.856 8.070
17 3.476 4.118 4.511 4.797 5.020 5.204 5.360 5.495 5.615 5.722 5.818 5.906 5.987 6.062 6.132 6.197 6.258 6.315 6.369 6.790 7.080 7.479 7.756 7.966
18 3.458 4.091 4.479 4.760 4.980 5.161 5.315 5.448 5.565 5.670 5.765 5.851 5.931 6.004 6.073 6.137 6.196 6.253 6.306 6.719 7.004 7.396 7.667 7.874
19 3.441 4.068 4.451 4.728 4.945 5.123 5.274 5.405 5.521 5.624 5.717 5.803 5.881 5.953 6.020 6.083 6.142 6.197 6.250 6.656 6.936 7.322 7.589 7.792
20 3.427 4.047 4.426 4.699 4.914 5.089 5.238 5.367 5.481 5.583 5.675 5.759 5.836 5.907 5.974 6.035 6.093 6.148 6.199 6.599 6.876 7.255 7.518 7.718
24 3.381 3.982 4.347 4.610 4.816 4.984 5.126 5.250 5.358 5.455 5.543 5.623 5.696 5.764 5.827 5.886 5.941 5.993 6.042 6.422 6.685 7.046 7.295 7.486
30 3.337 3.919 4.271 4.523 4.720 4.881 5.017 5.134 5.238 5.330 5.414 5.490 5.560 5.624 5.684 5.740 5.792 5.841 5.888 6.248 6.497 6.838 7.075 7.255
40 3.294 3.858 4.196 4.439 4.627 4.780 4.910 5.022 5.120 5.208 5.287 5.360 5.426 5.487 5.543 5.596 5.646 5.692 5.736 6.077 6.311 6.633 6.855 7.025
60 3.251 3.798 4.124 4.356 4.536 4.682 4.806 4.912 5.006 5.089 5.164 5.232 5.295 5.352 5.406 5.456 5.502 5.546 5.588 5.908 6.127 6.428 6.636 6.795
120 3.210 3.739 4.053 4.275 4.447 4.587 4.704 4.805 4.894 4.972 5.043 5.107 5.166 5.221 5.271 5.318 5.362 5.403 5.442 5.741 5.946 6.225 6.418 6.564
1e38 3.170 3.682 3.984 4.197 4.361 4.494 4.605 4.700 4.784 4.858 4.925 4.985 5.041 5.092 5.139 5.183 5.224 5.262 5.299 5.577 5.766 6.023 6.199 6.333"""
q0990 = """\
1 90.02 135.0 164.3 185.6 202.2 215.8 227.2 237.0 245.6 253.2 260.0 266.2 271.8 277.0 281.8 286.3 290.4 294.3 298.0 326.0 344.8 370.1 387.3 400.1
2 14.04 19.02 22.29 24.72 26.63 28.20 29.53 30.68 31.69 32.59 33.40 34.13 34.81 35.43 36.00 36.53 37.03 37.50 37.95 41.32 43.61 46.70 48.80 50.38
3 8.260 10.62 12.17 13.32 14.24 15.00 15.65 16.21 16.69 17.13 17.53 17.89 18.22 18.52 18.81 19.07 19.32 19.55 19.77 21.44 22.59 24.13 25.19 25.99
4 6.511 8.120 9.173 9.958 10.58 11.10 11.54 11.92 12.26 12.57 12.84 13.09 13.32 13.53 13.72 13.91 14.08 14.24 14.39 15.57 16.38 17.46 18.20 18.77
5 5.702 6.976 7.804 8.421 8.913 9.321 9.669 9.971 10.24 10.48 10.70 10.89 11.08 11.24 11.40 11.55 11.68 11.81 11.93 12.87 13.51 14.39 14.99 15.45
6 5.243 6.331 7.033 7.556 7.972 8.318 8.612 8.869 9.097 9.300 9.485 9.653 9.808 9.951 10.08 10.21 10.32 10.43 10.54 11.34 11.89 12.65 13.17 13.55
7 4.949 5.919 6.542 7.005 7.373 7.678 7.939 8.166 8.367 8.548 8.711 8.860 8.997 9.124 9.242 9.353 9.456 9.553 9.645 10.36 10.85 11.52 11.98 12.34
8 4.745 5.635 6.204 6.625 6.959 7.237 7.474 7.680 7.863 8.027 8.176 8.311 8.436 8.552 8.659 8.760 8.854 8.943 9.027 9.677 10.13 10.74 11.17 11.49
9 4.596 5.428 5.957 6.347 6.657 6.915 7.134 7.325 7.494 7.646 7.784 7.910 8.025 8.132 8.232 8.325 8.412 8.495 8.573 9.177 9.594 10.17 10.56 10.86
10 4.482 5.270 5.769 6.136 6.428 6.669 6.875 7.054 7.213 7.356 7.485 7.603 7.712 7.812 7.906 7.993 8.075 8.153 8.226 8.794 9.186 9.726 10.10 10.38
11 4.392 5.146 5.621 5.970 6.247 6.476 6.671 6.841 6.992 7.127 7.250 7.362 7.464 7.560 7.648 7.731 7.809 7.883 7.952 8.491 8.864 9.377 9.732 10.00
12 4.320 5.046 5.502 5.836 6.101 6.320 6.507 6.670 6.814 6.943 7.060 7.166 7.265 7.356 7.441 7.520 7.594 7.664 7.730 8.246 8.602 9.093 9.433 9.693
13 4.260 4.964 5.404 5.726 5.981 6.192 6.372 6.528 6.666 6.791 6.903 7.006 7.100 7.188 7.269 7.345 7.417 7.484 7.548 8.043 8.386 8.859 9.186 9.436
14 4.210 4.895 5.322 5.634 5.881 6.085 6.258 6.409 6.543 6.663 6.772 6.871 6.962 7.047 7.125 7.199 7.268 7.333 7.394 7.873 8.204 8.661 8.978 9.219
15 4.167 4.836 5.252 5.556 5.796 5.994 6.162 6.309 6.438 6.555 6.660 6.756 6.845 6.927 7.003 7.074 7.141 7.204 7.264 7.727 8.049 8.492 8.800 9.034
16 4.131 4.786 5.192 5.489 5.722 5.915 6.079 6.222 6.348 6.461 6.564 6.658 6.744 6.823 6.897 6.967 7.032 7.093 7.151 7.602 7.915 8.346 8.646 8.874
17 4.099 4.742 5.140 5.430 5.659 5.847 6.007 6.147 6.270 6.380 6.480 6.572 6.656 6.733 6.806 6.873 6.937 6.997 7.053 7.493 7.798 8.219 8.511 8.734
18 4.071 4.703 5.094 5.379 5.603 5.787 5.944 6.081 6.201 6.309 6.407 6.496 6.579 6.655 6.725 6.791 6.854 6.912 6.967 7.397 7.696 8.107 8.393 8.611
19 4.046 4.669 5.054 5.334 5.553 5.735 5.889 6.022 6.141 6.246 6.342 6.430 6.510 6.585 6.654 6.719 6.780 6.837 6.891 7.312 7.605 8.008 8.288 8.501
20 4.024 4.639 5.018 5.293 5.510 5.688 5.839 5.970 6.086 6.190 6.285 6.370 6.449 6.523 6.591 6.654 6.714 6.770 6.823 7.237 7.523 7.919 8.194 8.404
24 3.955 4.546 4.907 5.168 5.373 5.542 5.685 5.809 5.919 6.017 6.105 6.186 6.261 6.330 6.394 6.453 6.510 6.562 6.612 7.001 7.270 7.641 7.900 8.097
30 3.889 4.455 4.799 5.048 5.242 5.401 5.536 5.653 5.756 5.848 5.932 6.008 6.078 6.142 6.202 6.258 6.311 6.361 6.407 6.771 7.023 7.370 7.611 7.796
40 3.825 4.367 4.695 4.931 5.114 5.265 5.392 5.502 5.599 5.685 5.764 5.835 5.900 5.961 6.017 6.069 6.118 6.165 6.208 6.547 6.781 7.104 7.328 7.499
60 3.762 4.282 4.594 4.818 4.991 5.133 5.253 5.356 5.447 5.528 5.601 5.667 5.728 5.784 5.837 5.886 5.931 5.974 6.015 6.329 6.546 6.843 7.049 7.207
120 3.702 4.200 4.497 4.708 4.872 5.005 5.118 5.214 5.299 5.375 5.443 5.505 5.561 5.614 5.662 5.708 5.750 5.790 5.827 6.117 6.316 6.588 6.776 6.919
1e38 3.643 4.120 4.403 4.603 4.757 4.882 4.987 5.078 5.157 5.227 5.290 5.348 5.400 5.448 5.493 5.535 5.574 5.611 5.645 5.911 6.092 6.338 6.507 6.636"""
q0995 = """\
1 180.1 270.1 328.5 371.2 404.4 431.6 454.4 474.0 491.1 506.3 520.0 532.4 543.6 554.0 563.6 572.5 580.9 588.7 596.0 652.0 689.6 740.2 774.5 800.3
2 19.92 26.97 31.60 35.02 37.73 39.95 41.83 43.46 44.89 46.16 47.31 48.35 49.30 50.17 50.99 51.74 52.45 53.12 53.74 58.52 61.76 66.13 69.10 71.35
3 10.54 13.51 15.45 16.91 18.06 19.01 19.83 20.53 21.15 21.70 22.20 22.66 23.08 23.46 23.82 24.15 24.46 24.76 25.03 27.15 28.60 30.55 31.88 32.90
4 7.916 9.813 11.06 11.99 12.74 13.35 13.88 14.33 14.74 15.10 15.42 15.72 15.99 16.24 16.47 16.70 16.90 17.09 17.28 18.68 19.63 20.93 21.83 22.50
5 6.751 8.195 9.140 9.846 10.41 10.88 11.28 11.62 11.93 12.21 12.46 12.69 12.90 13.09 13.27 13.44 13.60 13.74 13.89 14.96 15.71 16.72 17.41 17.94
6 6.105 7.306 8.087 8.670 9.135 9.522 9.852 10.14 10.39 10.62 10.83 11.02 11.19 11.35 11.50 11.64 11.78 11.90 12.02 12.92 13.54 14.40 14.98 15.43
7 5.698 6.750 7.429 7.935 8.339 8.674 8.961 9.211 9.433 9.632 9.812 9.976 10.13 10.27 10.40 10.52 10.64 10.74 10.84 11.64 12.18 12.93 13.45 13.85
8 5.420 6.370 6.981 7.435 7.796 8.097 8.354 8.578 8.777 8.955 9.117 9.265 9.401 9.527 9.644 9.754 9.856 9.953 10.04 10.76 11.25 11.92 12.39 12.75
9 5.218 6.096 6.657 7.073 7.405 7.680 7.915 8.120 8.302 8.466 8.614 8.749 8.874 8.989 9.097 9.197 9.292 9.381 9.465 10.12 10.57 11.19 11.62 11.95
10 5.065 5.888 6.412 6.800 7.109 7.365 7.584 7.775 7.944 8.096 8.233 8.359 8.475 8.583 8.683 8.777 8.864 8.947 9.025 9.635 10.06 10.64 11.04 11.35
11 4.945 5.726 6.221 6.587 6.878 7.119 7.325 7.504 7.664 7.807 7.936 8.055 8.164 8.265 8.359 8.447 8.530 8.608 8.681 9.255 9.653 10.20 10.58 10.87
12 4.849 5.596 6.068 6.416 6.693 6.922 7.117 7.288 7.439 7.574 7.697 7.810 7.913 8.009 8.099 8.182 8.261 8.335 8.405 8.950 9.328 9.850 10.21 10.49
13 4.769 5.489 5.943 6.276 6.541 6.760 6.947 7.110 7.254 7.384 7.502 7.609 7.708 7.800 7.885 7.965 8.040 8.111 8.178 8.699 9.061 9.560 9.907 10.17
14 4.703 5.401 5.838 6.160 6.414 6.625 6.805 6.962 7.101 7.225 7.338 7.442 7.537 7.625 7.707 7.784 7.856 7.924 7.988 8.489 8.837 9.317 9.651 9.906
15 4.647 5.325 5.750 6.061 6.307 6.511 6.685 6.836 6.970 7.091 7.200 7.300 7.391 7.476 7.556 7.630 7.699 7.765 7.827 8.310 8.647 9.111 9.434 9.680
16 4.599 5.261 5.674 5.976 6.216 6.413 6.582 6.729 6.859 6.975 7.081 7.178 7.267 7.349 7.426 7.498 7.565 7.629 7.689 8.157 8.483 8.933 9.246 9.486
17 4.557 5.205 5.608 5.903 6.136 6.329 6.493 6.636 6.762 6.876 6.978 7.072 7.159 7.239 7.313 7.383 7.449 7.510 7.569 8.024 8.341 8.779 9.083 9.316
18 4.521 5.156 5.550 5.839 6.067 6.255 6.415 6.554 6.677 6.788 6.888 6.980 7.064 7.142 7.215 7.283 7.347 7.407 7.464 7.908 8.216 8.643 8.940 9.167
19 4.488 5.112 5.500 5.782 6.005 6.189 6.346 6.482 6.603 6.711 6.809 6.898 6.981 7.057 7.128 7.194 7.257 7.316 7.371 7.805 8.106 8.523 8.813 9.035
20 4.460 5.074 5.455 5.732 5.951 6.131 6.285 6.418 6.536 6.642 6.738 6.826 6.906 6.981 7.051 7.116 7.177 7.234 7.289 7.713 8.008 8.416 8.700 8.917
24 4.371 4.955 5.315 5.577 5.783 5.952 6.096 6.221 6.331 6.430 6.520 6.602 6.677 6.747 6.812 6.872 6.929 6.983 7.034 7.429 7.703 8.083 8.348 8.551
30 4.285 4.841 5.181 5.428 5.621 5.779 5.914 6.031 6.134 6.226 6.310 6.386 6.456 6.521 6.581 6.638 6.691 6.740 6.787 7.154 7.409 7.760 8.005 8.193
40 4.202 4.731 5.052 5.284 5.465 5.614 5.739 5.848 5.944 6.030 6.108 6.178 6.243 6.303 6.359 6.411 6.460 6.507 6.550 6.888 7.123 7.447 7.672 7.844
60 4.122 4.625 4.928 5.146 5.316 5.454 5.571 5.673 5.762 5.841 5.913 5.979 6.039 6.094 6.146 6.194 6.239 6.281 6.321 6.632 6.846 7.142 7.347 7.504
120 4.044 4.523 4.809 5.013 5.172 5.301 5.410 5.504 5.586 5.660 5.726 5.786 5.842 5.893 5.940 5.984 6.025 6.064 6.101 6.384 6.579 6.846 7.031 7.172
1e38 3.970 4.424 4.694 4.886 5.033 5.154 5.255 5.341 5.418 5.485 5.546 5.602 5.652 5.699 5.742 5.783 5.820 5.856 5.889 6.146 6.322 6.561 6.725 6.850"""
q0999 = """\
1 900.3 1351. 1643. 1856. 2022. 2158. 2272. 2370. 2455. 2532. 2600. 2662. 2718. 2770. 2818. 2863. 2904. 2943. 2980. 3260. 3448. 3701. 3873. 4002.
2 44.69 60.42 70.77 78.43 84.49 89.46 93.67 97.30 100.5 103.3 105.9 108.2 110.4 112.3 114.2 115.9 117.4 118.9 120.3 131.0 138.3 148.0 154.7 159.7
3 18.28 23.32 26.65 29.13 31.11 32.74 34.12 35.33 36.39 37.34 38.20 38.98 39.69 40.35 40.97 41.54 42.07 42.58 43.05 46.68 49.16 52.51 54.81 56.53
4 12.18 14.98 16.84 18.23 19.34 20.26 21.04 21.73 22.33 22.87 23.36 23.81 24.21 24.59 24.94 25.27 25.58 25.87 26.14 28.24 29.68 31.65 32.98 34.00
5 9.714 11.67 12.96 13.93 14.71 15.35 15.91 16.39 16.82 17.18 17.53 17.85 18.13 18.41 18.66 18.89 19.10 19.31 19.51 21.01 22.03 23.45 24.41 25.15
6 8.427 9.960 10.97 11.72 12.32 12.82 13.25 13.63 13.96 14.26 14.53 14.78 15.00 15.21 15.41 15.59 15.78 15.94 16.09 17.28 18.10 19.22 20.00 20.58
7 7.648 8.930 9.768 10.40 10.90 11.32 11.67 11.99 12.27 12.52 12.74 12.95 13.14 13.32 13.48 13.64 13.78 13.92 14.05 15.06 15.74 16.69 17.35 17.85
8 7.129 8.250 8.977 9.522 9.958 10.32 10.63 10.90 11.15 11.36 11.56 11.74 11.91 12.06 12.20 12.34 12.46 12.58 12.69 13.57 14.17 15.01 15.59 16.02
9 6.761 7.768 8.419 8.906 9.295 9.619 9.896 10.14 10.35 10.55 10.72 10.89 11.03 11.17 11.30 11.42 11.53 11.64 11.74 12.52 13.07 13.82 14.34 14.74
10 6.487 7.411 8.006 8.449 8.804 9.099 9.352 9.573 9.769 9.946 10.11 10.25 10.39 10.51 10.63 10.74 10.84 10.94 11.03 11.75 12.25 12.94 13.42 13.79
11 6.275 7.135 7.687 8.098 8.426 8.699 8.933 9.137 9.319 9.482 9.630 9.766 9.891 10.01 10.12 10.22 10.31 10.40 10.49 11.15 11.61 12.25 12.70 13.03
12 6.106 6.917 7.435 7.820 8.127 8.382 8.601 8.792 8.962 9.115 9.253 9.380 9.497 9.606 9.707 9.802 9.891 9.975 10.05 10.68 11.11 11.71 12.12 12.44
13 5.969 6.740 7.231 7.595 7.885 8.126 8.332 8.513 8.673 8.817 8.948 9.068 9.178 9.280 9.376 9.465 9.549 9.629 9.704 10.29 10.70 11.27 11.66 11.96
14 5.855 6.593 7.062 7.409 7.685 7.914 8.110 8.282 8.434 8.571 8.695 8.809 8.914 9.011 9.102 9.187 9.267 9.342 9.414 9.972 10.36 10.90 11.28 11.57
15 5.760 6.470 6.920 7.252 7.517 7.736 7.924 8.088 8.234 8.364 8.483 8.592 8.692 8.785 8.872 8.953 9.030 9.102 9.170 9.703 10.08 10.59 10.95 11.23
16 5.678 6.365 6.799 7.119 7.374 7.585 7.765 7.923 8.063 8.189 8.303 8.407 8.504 8.593 8.676 8.754 8.828 8.897 8.962 9.475 9.832 10.33 10.68 10.94
17 5.608 6.274 6.695 7.004 7.250 7.454 7.629 7.781 7.916 8.037 8.147 8.248 8.341 8.427 8.507 8.583 8.653 8.720 8.783 9.277 9.623 10.10 10.44 10.69
18 5.546 6.195 6.604 6.905 7.143 7.341 7.510 7.657 7.788 7.905 8.012 8.109 8.199 8.283 8.361 8.433 8.502 8.566 8.627 9.106 9.440 9.904 10.23 10.48
19 5.492 6.126 6.524 6.817 7.049 7.241 7.405 7.549 7.676 7.790 7.893 7.988 8.075 8.156 8.232 8.302 8.369 8.431 8.491 8.955 9.279 9.729 10.04 10.29
20 5.444 6.065 6.454 6.740 6.966 7.153 7.313 7.453 7.576 7.687 7.788 7.880 7.965 8.044 8.118 8.186 8.251 8.312 8.370 8.821 9.136 9.575 9.881 10.12
24 5.297 5.877 6.238 6.502 6.711 6.884 7.031 7.159 7.272 7.374 7.467 7.551 7.629 7.701 7.768 7.831 7.890 7.946 7.999 8.411 8.699 9.100 9.380 9.595
30 5.156 5.698 6.033 6.277 6.469 6.628 6.763 6.880 6.984 7.077 7.161 7.239 7.310 7.375 7.437 7.494 7.548 7.598 7.646 8.021 8.283 8.646 8.901 9.096
40 5.022 5.527 5.838 6.063 6.240 6.385 6.509 6.616 6.710 6.795 6.872 6.942 7.007 7.066 7.122 7.174 7.223 7.268 7.312 7.651 7.887 8.214 8.442 8.618
60 4.893 5.365 5.653 5.860 6.022 6.155 6.268 6.365 6.451 6.528 6.598 6.661 6.720 6.773 6.824 6.870 6.914 6.956 6.995 7.299 7.510 7.802 8.005 8.161
120 4.771 5.211 5.476 5.667 5.815 5.937 6.039 6.128 6.206 6.275 6.338 6.395 6.448 6.496 6.541 6.583 6.623 6.660 6.695 6.966 7.153 7.410 7.589 7.726
1e38 4.654 5.063 5.309 5.484 5.619 5.730 5.823 5.903 5.973 6.036 6.092 6.144 6.191 6.234 6.274 6.312 6.347 6.380 6.411 6.651 6.816 7.041 7.196 7.314"""
# Build the T+ 'matrix'
# T is a dict of dicts of lists
# [alpha keys] [v keys]
# [table values as lists of floats]
T = dict([(0.100, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0100.split('\n')])),
(0.500, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0500.split('\n')])),
(0.675, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0675.split('\n')])),
(0.750, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0750.split('\n')])),
(0.800, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0800.split('\n')])),
(0.850, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0850.split('\n')])),
(0.900, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0900.split('\n')])),
(0.950, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0950.split('\n')])),
(0.975, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0975.split('\n')])),
(0.990, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0990.split('\n')])),
(0.995, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0995.split('\n')])),
(0.999, dict([(float(L.split()[0]),
lmap(float, L.split()[1:])) for L in q0999.split('\n')]))])
# This dict maps r values to the correct list index
R = dict(zip([2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,
17,18,19,20,30,40,60,80,100], lrange(24)))
inf = np.inf
# we will need a tinv function
_tinv = scipy.stats.t.isf
_phi = scipy.stats.norm.isf
# Now we can build the A 'matrix'
# these are for the least squares fitting
def qhat(a, p, r, v):
# eq. 2.3
p_ = (1. + p) /2.
f = a[0]*np.log(r-1.) + \
a[1]*np.log(r-1.)**2 + \
a[2]*np.log(r-1.)**3 + \
a[3]*np.log(r-1.)**4
# eq. 2.7 and 2.8 corrections
for i, r_ in enumerate(r):
if r_ == 3:
f[i] += -0.002 / (1. + 12. * _phi(p)**2)
if v <= 4.364:
f[i] += 1./517. - 1./(312.*v)
else:
f[i] += 1./(191.*v)
return math.sqrt(2) * (f - 1.) * _tinv(p_, v)
errfunc = lambda a, p, r, v, q: qhat(a, p, r, v) - q
A = {} # this is the error matrix
for p in T:
for v in T[p]:
#eq. 2.4
a0 = random(4)
a1, success = leastsq(errfunc, a0,
args=(p, np.array(list(R.keys())),
v, np.array(T[p][v])))
if v == 1e38:
A[(p,inf)] = list(a1)
else:
A[(p,v)] = list(a1)
raise ImportError("we do not want to import this")
# uncomment the lines below to repr-ize A
##import pprint
##pprint.pprint(A, width=160)
|
e70cbe5683d10027d6fa2845e100adde13f746f9
|
ddddaa700e4642f46a2c1e1e0271a7c8ea62ba0f
|
/e2e_tests/tests/cluster/test_job_queue.py
|
75683d5dc04cc3e94eb0a87f15ae87dc64dea42c
|
[
"Apache-2.0"
] |
permissive
|
determined-ai/determined
|
9d563cb5ffd074c88ee5edc9bf22ab9c3cb78c7e
|
8239b1993f4f44390f4e88901ffaf3b12429b83c
|
refs/heads/main
| 2023-08-21T12:13:36.651298
| 2023-08-21T08:34:16
| 2023-08-21T08:34:16
| 253,846,879
| 2,531
| 330
|
Apache-2.0
| 2023-09-14T21:54:17
| 2020-04-07T16:12:29
|
Go
|
UTF-8
|
Python
| false
| false
| 2,269
|
py
|
test_job_queue.py
|
import subprocess
from time import sleep
from typing import Dict, List, Tuple
import pytest
# from determined.experimental import Determined, ModelSortBy
from tests import config as conf
from tests import experiment as exp
@pytest.mark.e2e_cpu
def test_job_queue_adjust_weight() -> None:
config = conf.tutorials_path("mnist_pytorch/const.yaml")
model = conf.tutorials_path("mnist_pytorch")
for _ in range(2):
exp.create_experiment(config, model)
jobs = JobInfo()
ok = jobs.refresh_until_populated()
assert ok
ordered_ids = jobs.get_ids()
subprocess.run(["det", "job", "update", ordered_ids[0], "--weight", "10"])
sleep(2)
jobs.refresh()
new_weight = jobs.get_job_weight(ordered_ids[0])
assert new_weight == "10"
subprocess.run(["det", "job", "update-batch", f"{ordered_ids[1]}.weight=10"])
sleep(2)
jobs.refresh()
new_weight = jobs.get_job_weight(ordered_ids[1])
assert new_weight == "10"
def get_raw_data() -> Tuple[List[Dict[str, str]], List[str]]:
data = []
ordered_ids = []
output = subprocess.check_output(["det", "job", "list"]).decode("utf-8")
lines = output.split("\n")
keys = [line.strip() for line in lines[0].split("|")]
for line in lines[2:]:
line_dict = {}
for i, field in enumerate(line.split("|")):
if keys[i] == "ID":
ordered_ids.append(field.strip())
line_dict[keys[i]] = field.strip()
data.append(line_dict)
return data, ordered_ids
class JobInfo:
def __init__(self) -> None:
self.values, self.ids = get_raw_data()
def refresh(self) -> None:
self.values, self.ids = get_raw_data()
def refresh_until_populated(self, retries: int = 10) -> bool:
while retries > 0:
retries -= 1
if len(self.ids) > 0:
return True
sleep(0.5)
self.refresh()
print("self.ids remains empty")
return False
def get_ids(self) -> List:
return self.ids
def get_job_weight(self, jobID: str) -> str:
for value_dict in self.values:
if value_dict["ID"] != jobID:
continue
return value_dict["Weight"]
return ""
|
25dbfbefe977a4295da1205f5ebebf19e6e41c86
|
cb293ffb6c0015bf01ac30e6ce1b217a376c2458
|
/pacu/modules/route53__enum/main.py
|
1ba6970e1458b32c4018849b47eb69f0414a2f4d
|
[
"BSD-3-Clause"
] |
permissive
|
RhinoSecurityLabs/pacu
|
33675ef331858b5ba8ca9cb338b9d129173859da
|
79cd7d58f7bff5693c6ae73b30a8455df6136cca
|
refs/heads/master
| 2023-09-03T16:01:05.363049
| 2023-08-07T22:19:47
| 2023-08-07T22:19:47
| 137,276,052
| 3,626
| 624
|
BSD-3-Clause
| 2023-08-07T22:22:45
| 2018-06-13T21:58:59
|
Python
|
UTF-8
|
Python
| false
| false
| 5,324
|
py
|
main.py
|
#!/usr/bin/env python3
import argparse
from botocore.exceptions import ClientError
module_info = {
# Name of the module (should be the same as the filename).
"name": "route53__enum",
# Name and any other notes about the author.
"author": "Aaron Rea - Scalesec",
# Category of the module. Make sure the name matches an existing category.
"category": "ENUM",
# One liner description of the module functionality. This shows up when a
# user searches for modules.
"one_liner": "Enumerates Route53 hosted zones and query logging configurations",
# Full description about what the module does and how it works.
"description": "This module enumerates Route53 hosted zones across an account and correlates them with query logging configs for later use.",
# A list of AWS services that the module utilizes during its execution.
"services": ["Route53"],
# For prerequisite modules, try and see if any existing modules return the
# data that is required for your module before writing that code yourself;
# that way, session data can stay separated and modular.
"prerequisite_modules": [],
# External resources that the module depends on. Valid options are either
# a GitHub URL (must end in .git), or a single file URL.
"external_dependencies": [],
# Module arguments to autocomplete when the user hits tab.
"arguments_to_autocomplete": ["--get_query_logging_config"],
}
parser = argparse.ArgumentParser(add_help=False, description=module_info["description"])
def get_hosted_zones(client):
hosted_zones = []
paginator = client.get_paginator("list_hosted_zones")
for hosted_zone in paginator.paginate():
hosted_zones += hosted_zone["HostedZones"]
zones = {}
if len(hosted_zones) > 0:
for zone in hosted_zones:
zid = zone["Id"].split("/")[2]
print(
f"ZoneID: {zid} Name: {zone['Name']} Private: {zone['Config']['PrivateZone']} "
)
zones[zid] = zone
else:
print("No HostedZones found")
return zones
def get_resource_record_sets_for_zone_id(client, hosted_zone_id):
record_sets = {}
all_records_for_zone = []
paginator = client.get_paginator("list_resource_record_sets")
for resource_records in paginator.paginate(HostedZoneId=hosted_zone_id):
all_records_for_zone += resource_records["ResourceRecordSets"]
record_sets[hosted_zone_id] = {"ResourceRecordSets": all_records_for_zone}
if len(record_sets[hosted_zone_id]) > 0:
print(f"ResourceRecordSets for {hosted_zone_id}:")
for record in record_sets[hosted_zone_id]["ResourceRecordSets"]:
print(f"Name: {record['Name']} Type: {record['Type']}")
else:
print("No ResourceRecordSets found")
return record_sets
def get_query_logging_config(client):
configs = client.list_query_logging_configs()["QueryLoggingConfigs"]
if len(configs) > 0:
print("QueryLoggingConfigs:")
for con in configs:
print(
f"ZoneID: {con['HostedZoneId']} :: CloudWatchLogsLogGroupArn: {con['CloudWatchLogsLogGroupArn']}"
)
else:
print("No QueryLoggingConfigs found")
return configs
def zones_plus_config_and_records(zones, configs, records):
for con in configs:
if con["HostedZoneId"] in zones.keys():
zones[con["HostedZoneId"]].update(
{"CloudWatchLogsLogGroupArn": con["CloudWatchLogsLogGroupArn"]}
)
zones[con["HostedZoneId"]].update({"QueryLoggingConfigId": con["Id"]})
for zone_id in records.keys():
zones[zone_id].update(records[zone_id])
return zones
def main(args, pacu_main):
session = pacu_main.get_active_session()
print = pacu_main.print
args = parser.parse_args(args)
try:
client = pacu_main.get_boto3_client("route53")
except ClientError as error:
print(f"Failed to initialize boto client for route53: {error}")
data = {}
try:
zones = get_hosted_zones(client=client)
except ClientError as error:
print(f"Failed to list R53 Hosted Zones: {error}")
return
try:
confs = get_query_logging_config(client=client)
except ClientError as error:
print(f"Failed to list R53 Hosted Zone Query Logging Configurations: {error}")
return
records = {}
for hosted_zone_id in zones.keys():
try:
records_for_zone = get_resource_record_sets_for_zone_id(
client=client, hosted_zone_id=hosted_zone_id
)
except ClientError as error:
print(f"Failed to list R53 Resource Record Sets: {error}")
continue
records.update(records_for_zone)
data = zones_plus_config_and_records(zones=zones, configs=confs, records=records)
session.update(pacu_main.database, Route53=data)
return data
def summary(data, pacu_main):
if len(data) > 0:
hosted_zone_count = len(data)
total_records = 0
for zone_id in data.keys():
total_records += data[zone_id]["ResourceRecordSetCount"]
return f"Found {hosted_zone_count} hosted zones.\nFound {total_records} resource records."
else:
return "No hosted zones found."
|
72c3e00ca60b0874870786b121d3658c6a881d32
|
84724b34b3f1e84dc53cbca5f3660590dbc34a9f
|
/nova/tests/functional/db/test_quotas.py
|
a6ad5af517ff9cfaceeccacd6841b99cf788de59
|
[
"Apache-2.0"
] |
permissive
|
openstack/nova
|
2c24b64e3677595611715bae6dda14edd3f90a24
|
065c5906d2da3e2bb6eeb3a7a15d4cd8d98b35e9
|
refs/heads/master
| 2023-08-28T15:10:05.126314
| 2023-08-25T20:31:27
| 2023-08-25T20:31:27
| 790,031
| 2,287
| 2,320
|
Apache-2.0
| 2023-07-08T02:10:29
| 2010-07-22T02:04:27
|
Python
|
UTF-8
|
Python
| false
| false
| 16,573
|
py
|
test_quotas.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import context
from nova.db.main import api as db_api
from nova import exception
from nova.objects import quotas
from nova import test
from nova.tests.unit.db.main import test_api as test_db_api
class QuotasObjectTestCase(test.TestCase,
test_db_api.ModelsObjectComparatorMixin):
def setUp(self):
super(QuotasObjectTestCase, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
def test_create_class(self):
created = quotas.Quotas._create_class_in_db(self.context, 'foo',
'cores', 10)
db_class = quotas.Quotas._get_class_from_db(self.context, 'foo',
'cores')
self._assertEqualObjects(created, db_class)
def test_create_class_exists(self):
quotas.Quotas._create_class_in_db(self.context, 'foo', 'cores', 10)
self.assertRaises(exception.QuotaClassExists,
quotas.Quotas._create_class_in_db, self.context,
'foo', 'cores', 10)
def test_update_class(self):
created = quotas.Quotas._create_class_in_db(self.context, 'foo',
'cores', 10)
quotas.Quotas._update_class_in_db(self.context, 'foo', 'cores', 20)
db_class = quotas.Quotas._get_class_from_db(self.context, 'foo',
'cores')
# Should have a limit of 20 now
created['hard_limit'] = 20
self._assertEqualObjects(created, db_class, ignored_keys='updated_at')
def test_update_class_not_found(self):
self.assertRaises(exception.QuotaClassNotFound,
quotas.Quotas._update_class_in_db, self.context,
'foo', 'cores', 20)
def test_create_per_project_limit(self):
created = quotas.Quotas._create_limit_in_db(self.context,
'fake-project',
'fixed_ips', 10)
db_limit = quotas.Quotas._get_from_db(self.context, 'fake-project',
'fixed_ips')
self._assertEqualObjects(created, db_limit)
def test_create_per_user_limit(self):
created = quotas.Quotas._create_limit_in_db(self.context,
'fake-project', 'cores',
10, user_id='fake-user')
db_limit = quotas.Quotas._get_from_db(self.context, 'fake-project',
'cores', user_id='fake-user')
self._assertEqualObjects(created, db_limit)
def test_create_limit_duplicate(self):
quotas.Quotas._create_limit_in_db(self.context, 'fake-project',
'cores', 10)
self.assertRaises(exception.QuotaExists,
quotas.Quotas._create_limit_in_db, self.context,
'fake-project', 'cores', 20)
def test_update_per_project_limit(self):
created = quotas.Quotas._create_limit_in_db(self.context,
'fake-project',
'fixed_ips', 10)
quotas.Quotas._update_limit_in_db(self.context, 'fake-project',
'fixed_ips', 20)
db_limit = quotas.Quotas._get_from_db(self.context, 'fake-project',
'fixed_ips')
# Should have a limit of 20 now
created['hard_limit'] = 20
self._assertEqualObjects(created, db_limit, ignored_keys='updated_at')
def test_update_per_project_limit_not_found(self):
self.assertRaises(exception.ProjectQuotaNotFound,
quotas.Quotas._update_limit_in_db, self.context,
'fake-project', 'fixed_ips', 20)
def test_update_per_user_limit(self):
created = quotas.Quotas._create_limit_in_db(self.context,
'fake-project', 'cores',
10, user_id='fake-user')
quotas.Quotas._update_limit_in_db(self.context, 'fake-project',
'cores', 20, user_id='fake-user')
db_limit = quotas.Quotas._get_from_db(self.context, 'fake-project',
'cores', user_id='fake-user')
# Should have a limit of 20 now
created['hard_limit'] = 20
self._assertEqualObjects(created, db_limit, ignored_keys='updated_at')
def test_update_per_user_limit_not_found(self):
self.assertRaises(exception.ProjectUserQuotaNotFound,
quotas.Quotas._update_limit_in_db, self.context,
'fake-project', 'cores', 20, user_id='fake-user')
def test_get_per_project_limit_not_found(self):
self.assertRaises(exception.ProjectQuotaNotFound,
quotas.Quotas._get_from_db, self.context,
'fake-project', 'fixed_ips')
def test_get_per_user_limit_not_found(self):
self.assertRaises(exception.ProjectUserQuotaNotFound,
quotas.Quotas._get_from_db, self.context,
'fake-project', 'cores', user_id='fake-user')
def test_get_all_per_user_limits(self):
created = []
created.append(quotas.Quotas._create_limit_in_db(self.context,
'fake-project',
'cores', 10,
user_id='fake-user'))
created.append(quotas.Quotas._create_limit_in_db(self.context,
'fake-project', 'ram',
8192,
user_id='fake-user'))
db_limits = quotas.Quotas._get_all_from_db(self.context,
'fake-project')
for i, db_limit in enumerate(db_limits):
self._assertEqualObjects(created[i], db_limit)
def test_get_all_per_project_limits_by_project(self):
quotas.Quotas._create_limit_in_db(self.context, 'fake-project',
'fixed_ips', 20)
quotas.Quotas._create_limit_in_db(self.context, 'fake-project',
'floating_ips', 10)
limits_dict = quotas.Quotas._get_all_from_db_by_project(self.context,
'fake-project')
self.assertEqual('fake-project', limits_dict['project_id'])
self.assertEqual(20, limits_dict['fixed_ips'])
self.assertEqual(10, limits_dict['floating_ips'])
def test_get_all_per_user_limits_by_project_and_user(self):
quotas.Quotas._create_limit_in_db(self.context, 'fake-project',
'instances', 5, user_id='fake-user')
quotas.Quotas._create_limit_in_db(self.context, 'fake-project',
'cores', 10, user_id='fake-user')
limits_dict = quotas.Quotas._get_all_from_db_by_project_and_user(
self.context, 'fake-project', 'fake-user')
self.assertEqual('fake-project', limits_dict['project_id'])
self.assertEqual('fake-user', limits_dict['user_id'])
self.assertEqual(5, limits_dict['instances'])
self.assertEqual(10, limits_dict['cores'])
def test_destroy_per_project_and_per_user_limits(self):
# per user limit
quotas.Quotas._create_limit_in_db(self.context, 'fake-project',
'instances', 5, user_id='fake-user')
# per project limit
quotas.Quotas._create_limit_in_db(self.context, 'fake-project',
'fixed_ips', 10)
quotas.Quotas._destroy_all_in_db_by_project(self.context,
'fake-project')
self.assertRaises(exception.ProjectUserQuotaNotFound,
quotas.Quotas._get_from_db, self.context,
'fake-project', 'instances', user_id='fake-user')
self.assertRaises(exception.ProjectQuotaNotFound,
quotas.Quotas._get_from_db, self.context,
'fake-project', 'fixed_ips')
def test_destroy_per_project_and_per_user_limits_not_found(self):
self.assertRaises(exception.ProjectQuotaNotFound,
quotas.Quotas._destroy_all_in_db_by_project,
self.context, 'fake-project')
def test_destroy_per_user_limits(self):
quotas.Quotas._create_limit_in_db(self.context, 'fake-project',
'instances', 5, user_id='fake-user')
quotas.Quotas._destroy_all_in_db_by_project_and_user(self.context,
'fake-project',
'fake-user')
self.assertRaises(exception.ProjectUserQuotaNotFound,
quotas.Quotas._get_from_db, self.context,
'fake-project', 'instances', user_id='fake-user')
def test_destroy_per_user_limits_not_found(self):
self.assertRaises(
exception.ProjectUserQuotaNotFound,
quotas.Quotas._destroy_all_in_db_by_project_and_user,
self.context, 'fake-project', 'fake-user')
def test_get_class_not_found(self):
self.assertRaises(exception.QuotaClassNotFound,
quotas.Quotas._get_class_from_db, self.context,
'foo', 'cores')
def test_get_all_class_by_name(self):
quotas.Quotas._create_class_in_db(self.context, 'foo', 'instances', 5)
quotas.Quotas._create_class_in_db(self.context, 'foo', 'cores', 10)
limits_dict = quotas.Quotas._get_all_class_from_db_by_name(
self.context, 'foo')
self.assertEqual('foo', limits_dict['class_name'])
self.assertEqual(5, limits_dict['instances'])
self.assertEqual(10, limits_dict['cores'])
def test_migrate_quota_limits(self):
# Create a limit in api db
quotas.Quotas._create_limit_in_db(self.context, 'fake-project',
'instances', 5, user_id='fake-user')
# Create 4 limits in main db
db_api.quota_create(self.context, 'fake-project', 'cores', 10,
user_id='fake-user')
db_api.quota_create(self.context, 'fake-project', 'ram', 8192,
user_id='fake-user')
db_api.quota_create(self.context, 'fake-project', 'fixed_ips', 10)
db_api.quota_create(self.context, 'fake-project', 'floating_ips', 10)
# Migrate with a count/limit of 3
total, done = quotas.migrate_quota_limits_to_api_db(self.context, 3)
self.assertEqual(3, total)
self.assertEqual(3, done)
# This only fetches from the api db. There should now be 4 limits.
api_user_limits = quotas.Quotas._get_all_from_db(self.context,
'fake-project')
api_proj_limits_dict = quotas.Quotas._get_all_from_db_by_project(
self.context, 'fake-project')
api_proj_limits_dict.pop('project_id', None)
self.assertEqual(4,
len(api_user_limits) + len(api_proj_limits_dict))
# This only fetches from the main db. There should be one left.
main_user_limits = db_api.quota_get_all(self.context, 'fake-project')
main_proj_limits_dict = db_api.quota_get_all_by_project(self.context,
'fake-project')
main_proj_limits_dict.pop('project_id', None)
self.assertEqual(1, len(main_user_limits) + len(main_proj_limits_dict))
self.assertEqual((1, 1),
quotas.migrate_quota_limits_to_api_db(
self.context, 100))
self.assertEqual((0, 0),
quotas.migrate_quota_limits_to_api_db(
self.context, 100))
def test_migrate_quota_limits_skips_existing(self):
quotas.Quotas._create_limit_in_db(self.context, 'fake-project',
'instances', 5, user_id='fake-user')
db_api.quota_create(self.context, 'fake-project', 'instances', 5,
user_id='fake-user')
total, done = quotas.migrate_quota_limits_to_api_db(
self.context, 100)
self.assertEqual(1, total)
self.assertEqual(1, done)
total, done = quotas.migrate_quota_limits_to_api_db(
self.context, 100)
self.assertEqual(0, total)
self.assertEqual(0, done)
self.assertEqual(1, len(quotas.Quotas._get_all_from_db(
self.context, 'fake-project')))
def test_migrate_quota_classes(self):
# Create a class in api db
quotas.Quotas._create_class_in_db(self.context, 'foo', 'instances', 5)
# Create 3 classes in main db
db_api.quota_class_create(self.context, 'foo', 'cores', 10)
db_api.quota_class_create(self.context, db_api._DEFAULT_QUOTA_NAME,
'instances', 10)
db_api.quota_class_create(self.context, 'foo', 'ram', 8192)
total, done = quotas.migrate_quota_classes_to_api_db(self.context, 2)
self.assertEqual(2, total)
self.assertEqual(2, done)
# This only fetches from the api db
api_foo_dict = quotas.Quotas._get_all_class_from_db_by_name(
self.context, 'foo')
api_foo_dict.pop('class_name', None)
api_default_dict = quotas.Quotas._get_all_class_from_db_by_name(
self.context, db_api._DEFAULT_QUOTA_NAME)
api_default_dict.pop('class_name', None)
self.assertEqual(3,
len(api_foo_dict) + len(api_default_dict))
# This only fetches from the main db
main_foo_dict = db_api.quota_class_get_all_by_name(self.context, 'foo')
main_foo_dict.pop('class_name', None)
main_default_dict = db_api.quota_class_get_default(self.context)
main_default_dict.pop('class_name', None)
self.assertEqual(1, len(main_foo_dict) + len(main_default_dict))
self.assertEqual((1, 1),
quotas.migrate_quota_classes_to_api_db(
self.context, 100))
self.assertEqual((0, 0),
quotas.migrate_quota_classes_to_api_db(
self.context, 100))
def test_migrate_quota_classes_skips_existing(self):
quotas.Quotas._create_class_in_db(self.context, 'foo-class',
'instances', 5)
db_api.quota_class_create(self.context, 'foo-class', 'instances', 7)
total, done = quotas.migrate_quota_classes_to_api_db(
self.context, 100)
self.assertEqual(1, total)
self.assertEqual(1, done)
total, done = quotas.migrate_quota_classes_to_api_db(
self.context, 100)
self.assertEqual(0, total)
self.assertEqual(0, done)
# Existing class should not be overwritten in the result
db_class = quotas.Quotas._get_all_class_from_db_by_name(
self.context, 'foo-class')
self.assertEqual(5, db_class['instances'])
|
bb511785f3010863589739d8e6bb608dd265772a
|
017090be7ab186cb6b47f49e1066ac5cfec3a542
|
/tests/unit/neptune/new/attributes/series/test_series.py
|
5ae07204c7c3a69f53816ad7c3f4d4bcbd92b92b
|
[
"Apache-2.0"
] |
permissive
|
neptune-ai/neptune-client
|
9a79f9d93c84b3a20114e6e49a80652930399ece
|
9b697ce548634c30dbc5881d4a0b223c8987515d
|
refs/heads/master
| 2023-08-18T01:48:22.634432
| 2023-08-17T11:55:57
| 2023-08-17T11:55:57
| 170,117,229
| 408
| 55
|
Apache-2.0
| 2023-09-13T12:51:03
| 2019-02-11T11:25:57
|
Python
|
UTF-8
|
Python
| false
| false
| 7,467
|
py
|
test_series.py
|
#
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mock import (
MagicMock,
call,
patch,
)
from neptune.attributes.series.float_series import (
FloatSeries,
FloatSeriesVal,
)
from neptune.attributes.series.string_series import (
StringSeries,
StringSeriesVal,
)
from neptune.internal.operation import (
ClearFloatLog,
ClearStringLog,
ConfigFloatSeries,
LogFloats,
)
from tests.unit.neptune.new.attributes.test_attribute_base import TestAttributeBase
@patch("time.time", new=TestAttributeBase._now)
class TestSeries(TestAttributeBase):
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_assign(self, get_operation_processor):
value = FloatSeriesVal([17, 3.6], min=0, max=100, unit="%")
expected = [
LogFloats.ValueType(17, None, self._now()),
LogFloats.ValueType(3.6, None, self._now()),
]
processor = MagicMock()
get_operation_processor.return_value = processor
path, wait = (
self._random_path(),
self._random_wait(),
)
with self._exp() as exp:
var = FloatSeries(exp, path)
var.assign(value, wait=wait)
processor.enqueue_operation.assert_has_calls(
[
call(ConfigFloatSeries(path, min=0, max=100, unit="%"), wait=False),
call(ClearFloatLog(path), wait=False),
call(LogFloats(path, expected), wait=wait),
]
)
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_assign_empty(self, get_operation_processor):
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
path, wait = (
self._random_path(),
self._random_wait(),
)
var = StringSeries(exp, path)
var.assign(StringSeriesVal([]), wait=wait)
processor.enqueue_operation.assert_called_with(ClearStringLog(path), wait=wait)
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_log(self, get_operation_processor):
value_and_expected = [
(13, [LogFloats.ValueType(13, None, self._now())]),
(15.3, [LogFloats.ValueType(15.3, None, self._now())]),
(
[1, 9, 7],
[
LogFloats.ValueType(1, None, self._now()),
LogFloats.ValueType(9, None, self._now()),
LogFloats.ValueType(7, None, self._now()),
],
),
(
(1, 9, 7),
[
LogFloats.ValueType(1, None, self._now()),
LogFloats.ValueType(9, None, self._now()),
LogFloats.ValueType(7, None, self._now()),
],
),
(
{1, 9, 7},
[
LogFloats.ValueType(1, None, self._now()),
LogFloats.ValueType(9, None, self._now()),
LogFloats.ValueType(7, None, self._now()),
],
),
]
for value, expected in value_and_expected:
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
path, wait = (
self._random_path(),
self._random_wait(),
)
var = FloatSeries(exp, path)
var.log(value, wait=wait)
processor.enqueue_operation.assert_called_with(LogFloats(path, expected), wait=wait)
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_log_with_step(self, get_operation_processor):
value_step_and_expected = [
(13, 5.3, LogFloats.ValueType(13, 5.3, self._now())),
(15.3, 10, LogFloats.ValueType(15.3, 10, self._now())),
([13], 5.3, LogFloats.ValueType(13, 5.3, self._now())),
((13,), 5.3, LogFloats.ValueType(13, 5.3, self._now())),
({13}, 5.3, LogFloats.ValueType(13, 5.3, self._now())),
]
for value, step, expected in value_step_and_expected:
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
path, wait = (
self._random_path(),
self._random_wait(),
)
var = FloatSeries(exp, path)
var.log(value, step=step, wait=wait)
processor.enqueue_operation.assert_called_with(LogFloats(path, [expected]), wait=wait)
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_log_with_timestamp(self, get_operation_processor):
value_step_and_expected = [
(13, 5.3, LogFloats.ValueType(13, None, 5.3)),
(15.3, 10, LogFloats.ValueType(15.3, None, 10)),
]
for value, ts, expected in value_step_and_expected:
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
path, wait = (
self._random_path(),
self._random_wait(),
)
var = FloatSeries(exp, path)
var.log(value, timestamp=ts, wait=wait)
processor.enqueue_operation.assert_called_with(LogFloats(path, [expected]), wait=wait)
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_log_value_errors(self, get_operation_processor):
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
attr = FloatSeries(exp, self._random_path())
with self.assertRaises(ValueError):
attr.log(["str", 5])
with self.assertRaises(ValueError):
attr.log([5, 10], step=10)
with self.assertRaises(TypeError):
attr.log(5, step="str")
with self.assertRaises(TypeError):
attr.log(5, timestamp="str")
@patch("neptune.metadata_containers.metadata_container.get_operation_processor")
def test_clear(self, get_operation_processor):
processor = MagicMock()
get_operation_processor.return_value = processor
with self._exp() as exp:
path, wait = (
self._random_path(),
self._random_wait(),
)
var = FloatSeries(exp, path)
var.clear(wait=wait)
processor.enqueue_operation.assert_called_with(ClearFloatLog(path), wait=wait)
|
d470d7814b94cec8d13ccd79c03fa23300f2fd08
|
8b6810b3aef58cf2e2d02c95b343472453f1c2a3
|
/tests/test_messages.py
|
4f99b16119d7c912ff1646cec7cc3ac902b77948
|
[
"Apache-2.0"
] |
permissive
|
jodal/pykka
|
477ed953685b439c97888066bea6c19034d9360f
|
06537a5535b1c9eddfe3535f42ac081f1874bcd1
|
refs/heads/main
| 2023-08-30T00:45:16.096965
| 2023-08-03T21:21:35
| 2023-08-03T21:21:35
| 1,079,822
| 955
| 119
|
Apache-2.0
| 2023-09-14T21:47:57
| 2010-11-14T23:45:50
|
Python
|
UTF-8
|
Python
| false
| false
| 903
|
py
|
test_messages.py
|
from pykka.messages import ProxyCall, ProxyGetAttr, ProxySetAttr, _ActorStop
def test_actor_stop() -> None:
message = _ActorStop()
assert isinstance(message, _ActorStop)
def test_proxy_call() -> None:
message = ProxyCall(attr_path=("nested", "method"), args=(1,), kwargs={"a": "b"})
assert isinstance(message, ProxyCall)
assert message.attr_path == ("nested", "method")
assert message.args == (1,)
assert message.kwargs == {"a": "b"}
def test_proxy_get_attr() -> None:
message = ProxyGetAttr(attr_path=("nested", "attr"))
assert isinstance(message, ProxyGetAttr)
assert message.attr_path == ("nested", "attr")
def test_proxy_set_attr() -> None:
message = ProxySetAttr(attr_path=("nested", "attr"), value="abcdef")
assert isinstance(message, ProxySetAttr)
assert message.attr_path == ("nested", "attr")
assert message.value == "abcdef"
|
b3b09c5010cc1eeb63b1c001ec1fa0fda57f3a10
|
391fb5b11425d59ea917c6fed51fe1fa9c672764
|
/opytimizer/optimizers/swarm/fso.py
|
013ccd3e5e12dbade027fb908a3fbe27b7524705
|
[
"Apache-2.0"
] |
permissive
|
gugarosa/opytimizer
|
89e60d582dee9e31b1723e35d08103d7f8f5d3e1
|
7326a887ed8e3858bc99c8815048d56d02edf88c
|
refs/heads/master
| 2023-08-01T08:09:12.055317
| 2023-05-11T15:21:58
| 2023-05-11T15:21:58
| 109,152,650
| 602
| 45
|
Apache-2.0
| 2023-09-07T14:26:13
| 2017-11-01T16:04:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,198
|
py
|
fso.py
|
"""Flying Squirrel Optimizer.
"""
import copy
from typing import Any, Dict, Optional
import numpy as np
import opytimizer.math.distribution as d
import opytimizer.math.random as r
import opytimizer.utils.exception as e
from opytimizer.core import Optimizer
from opytimizer.core.function import Function
from opytimizer.core.space import Space
from opytimizer.utils import logging
logger = logging.get_logger(__name__)
class FSO(Optimizer):
"""A FSO class, inherited from Optimizer.
This is the designed class to define FSO-related
variables and methods.
References:
G. Azizyan et al.
Flying Squirrel Optimizer (FSO): A novel SI-based optimization algorithm for engineering problems.
Iranian Journal of Optimization (2019).
"""
def __init__(self, params: Optional[Dict[str, Any]] = None) -> None:
"""Initialization method.
Args:
params: Contains key-value parameters to the meta-heuristics.
"""
logger.info("Overriding class: Optimizer -> FSO.")
super(FSO, self).__init__()
self.beta = 0.5
self.build(params)
logger.info("Class overrided.")
@property
def beta(self) -> float:
"""Lévy distribution parameter."""
return self._beta
@beta.setter
def beta(self, beta: float) -> None:
if not isinstance(beta, (float, int)):
raise e.TypeError("`beta` should be a float or integer")
if beta <= 0 or beta > 2:
raise e.ValueError("`beta` should be between 0 and 2")
self._beta = beta
def update(
self, space: Space, function: Function, iteration: int, n_iterations: int
) -> None:
"""Wraps Flying Squirrel Optimizer over all agents and variables.
Args:
space: Space containing agents and update-related information.
function: A Function object that will be used as the objective function.
iteration: Current iteration.
n_iterations: Maximum number of iterations.
"""
mean_position = np.mean([agent.position for agent in space.agents], axis=0)
# Calculates the Sigma Reduction Factor (eq. 5)
SRF = (-np.log(1 - (1 / np.sqrt(iteration + 2)))) ** 2
# Calculates the Beta Expansion Factor
BEF = self.beta + (2 - self.beta) * ((iteration + 1) / n_iterations)
for agent in space.agents:
a = copy.deepcopy(agent)
for j in range(agent.n_variables):
# Calculates the random walk (eq. 2 and 3)
random_step = r.generate_gaussian_random_number(mean_position[j], SRF)
# Calculates the Lévy flight (eq. 6 to 18)
levy_step = d.generate_levy_distribution(BEF)
a.position[j] += (
random_step
* levy_step
* (agent.position[j] - space.best_agent.position[j])
)
a.clip_by_bound()
a.fit = function(a.position)
if a.fit < agent.fit:
agent.position = copy.deepcopy(a.position)
agent.fit = copy.deepcopy(a.fit)
|
52dc98c401029e67d784195f5d946868bae9c3d4
|
cceaa3bd21e0402a3cf619ec746e2eba39c0905d
|
/ma_gym/envs/checkers/__init__.py
|
18a93edbaf6251300320059d372a30bd8c4c64f5
|
[
"Apache-2.0"
] |
permissive
|
koulanurag/ma-gym
|
f3785906f57a097752cf36341d98fc60e381626a
|
70d3b4d194c7e792bb1baef57d542b2acba2b71d
|
refs/heads/master
| 2023-09-02T15:09:52.841704
| 2023-08-16T16:46:24
| 2023-08-16T16:46:24
| 191,194,671
| 460
| 90
|
Apache-2.0
| 2023-05-29T16:28:07
| 2019-06-10T15:28:41
|
Python
|
UTF-8
|
Python
| false
| false
| 30
|
py
|
__init__.py
|
from .checkers import Checkers
|
75ed31d8625d27fb669d3b7d415752ace8dc537f
|
07434513334237d453faae8972b136f28b8d1e2c
|
/remme/rpc_api/atomic_swap.py
|
80152c48c2a5c01f6e0c87aa034a882582e30426
|
[
"Apache-2.0"
] |
permissive
|
Remmeauth/remme-core
|
5a595391659e60ce82fbe73883be13df8a52dad9
|
3a8ac8d8f6ba1a1126c028c81d350c9475fe9834
|
refs/heads/master
| 2021-03-27T14:17:19.966578
| 2019-05-21T17:00:15
| 2019-05-21T17:00:15
| 121,400,220
| 132
| 30
|
Apache-2.0
| 2021-12-29T20:13:14
| 2018-02-13T15:36:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
atomic_swap.py
|
# Copyright 2018 REMME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
import json
import logging
from google.protobuf.json_format import MessageToJson
from remme.clients.atomic_swap import AtomicSwapClient
from remme.shared.forms import ProtoForm, AtomicSwapForm
from remme.shared.exceptions import KeyNotFound
from .utils import validate_params
__all__ = (
'get_atomic_swap_info',
'get_atomic_swap_public_key',
)
LOGGER = logging.getLogger(__name__)
@validate_params(AtomicSwapForm)
async def get_atomic_swap_info(request):
client = AtomicSwapClient()
swap_id = request.params['swap_id']
try:
swap_info = await client.swap_get(swap_id)
except KeyNotFound as e:
raise KeyNotFound(f'Atomic swap with id "{swap_id}" not found')
LOGGER.info(f'Get swap info {swap_info}')
data = MessageToJson(
swap_info, preserving_proto_field_name=True,
including_default_value_fields=True
)
return json.loads(data)
@validate_params(ProtoForm)
async def get_atomic_swap_public_key(request):
client = AtomicSwapClient()
try:
return await client.get_pub_key_encryption()
except KeyNotFound:
raise KeyNotFound('Public key for atomic swap not set')
|
c65d44f99380065820894d5f4340a6fb3975d1fb
|
f729993e43a8f2031a4ad5c766e63117588b4312
|
/tests/test_registry/test_default_scope.py
|
0798f4a2c75831dd67559f3df5614fd6431dba3d
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmengine
|
d28a41c4b3dac47c58ee19b241c4b963eb14ddb6
|
170758aefe6cb05f61bf9353d03af1b8c1f4f73a
|
refs/heads/main
| 2023-08-29T18:12:33.261889
| 2023-08-28T08:15:00
| 2023-08-28T08:15:00
| 456,857,425
| 708
| 279
|
Apache-2.0
| 2023-09-14T09:23:21
| 2022-02-08T09:05:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,976
|
py
|
test_default_scope.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
import pytest
from mmengine.registry import DefaultScope
class TestDefaultScope:
def test_scope(self):
default_scope = DefaultScope.get_instance('name1', scope_name='mmdet')
assert default_scope.scope_name == 'mmdet'
# `DefaultScope.get_instance` must have `scope_name` argument.
with pytest.raises(TypeError):
DefaultScope.get_instance('name2')
def test_get_current_instance(self):
DefaultScope._instance_dict = OrderedDict()
assert DefaultScope.get_current_instance() is None
DefaultScope.get_instance('instance_name', scope_name='mmengine')
default_scope = DefaultScope.get_current_instance()
assert default_scope.scope_name == 'mmengine'
def test_overwrite_default_scope(self):
origin_scope = DefaultScope.get_instance(
'test_overwrite_default_scope', scope_name='origin_scope')
with DefaultScope.overwrite_default_scope(scope_name=None):
assert DefaultScope.get_current_instance(
).scope_name == 'origin_scope'
with DefaultScope.overwrite_default_scope(scope_name='test_overwrite'):
assert DefaultScope.get_current_instance(
).scope_name == 'test_overwrite'
assert DefaultScope.get_current_instance(
).scope_name == origin_scope.scope_name == 'origin_scope'
# Test overwrite default scope immediately.
# Test sequentially overwrite.
with DefaultScope.overwrite_default_scope(scope_name='test_overwrite'):
pass
with DefaultScope.overwrite_default_scope(scope_name='test_overwrite'):
pass
# Test nested overwrite.
with DefaultScope.overwrite_default_scope(scope_name='test_overwrite'):
with DefaultScope.overwrite_default_scope(
scope_name='test_overwrite'):
pass
|
7f1bb946446feb6e7369868872e0c205e074ee11
|
7a5b1039d9dd2e488c361b3141226f6ae5bf76f3
|
/rsb_depth_check/plot.py
|
dfb4d6db33248cfe1af3fe8302f2ae5fa7265b85
|
[] |
no_license
|
comsec-group/retbleed
|
39066af14fb1adf7ef82754c36002f2bf1cf871f
|
32e1eecd7c7a9cb9174c1ae7ce8614a60d39b11d
|
refs/heads/master
| 2023-03-07T11:40:42.759042
| 2022-08-25T22:47:01
| 2022-08-25T22:47:01
| 505,872,974
| 112
| 10
| null | 2022-07-27T18:41:03
| 2022-06-21T14:15:23
|
C
|
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
plot.py
|
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import sys
if len(sys.argv) < 3:
print(f"useage: {sys.argv[0]} <ret> <jmp>")
exit(1)
file_ret = sys.argv[1]
file_jmp = sys.argv[2]
# matplotlib.rcParams['font.family'] = 'serif'
# sb.set()
fig, ax = plt.subplots()
fig.set_figwidth(5)
fig.set_figheight(1.75)
with open(file_ret) as f:
with open(file_jmp) as f2:
lines_ret = f.readlines()[1:]
lines_jmp = f2.readlines()[1:]
nx = min(len(lines_ret), len(lines_jmp))
xs = []
ys = []
for i in range(nx):
x = i
y = lines_ret[i]
#[x, y] = [int(n) for n in lines_ret[i].split(";")]
xs.append(x)
ys.append(y)
ax.plot(xs, ys, label="return")
ax.set_xlim(left=0, right=35)
xs = []
ys = []
for i in range(nx):
#[x, y] = [int(n) for n in lines_jmp[i].split(";")]
x = i
y = lines_jmp[i]
xs.append(x)
ys.append(y)
ax.plot(xs, ys, label="indirect branch")
ax.set_xlim(left=0, right=35)
ax.grid()
ax.axvline(x=16, color='0.3', linestyle='--', label="Intel RSB capacity");
ax.legend()
xlabel =ax.set_xlabel('# branches')
ylabel = ax.set_ylabel("mispredictions")
ax.xaxis.set_major_locator(ticker.MultipleLocator(4))
ax.yaxis.set_major_locator(ticker.MultipleLocator(14))
fig.tight_layout()
fig.savefig("ret-btb-miss.pdf")
|
6f0984a74e586fe9dbcd96c39c7ecd3340c4315e
|
03b97977ca4d2e91cb19c9c43385037eff3731ce
|
/test/TestUtilsHelpers.py
|
80658d82e0abee7f4bce07340dd677523f368d32
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
debbiemarkslab/EVcouplings
|
17f601e7498fe3e9283b2b729fab7696311575c5
|
af27842c5fc72c291831261c9a19849a3e313efd
|
refs/heads/develop
| 2023-08-03T21:29:37.533602
| 2023-03-16T15:02:13
| 2023-03-16T15:02:13
| 63,804,056
| 184
| 74
|
NOASSERTION
| 2023-08-02T14:29:38
| 2016-07-20T18:15:45
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,018
|
py
|
TestUtilsHelpers.py
|
import unittest
from unittest import TestCase
from evcouplings.utils import *
import tempfile
class TestUtilsHelpers(TestCase):
def test_wrap(self):
"""
Test whether string is correctly wrap
"""
out = wrap("Test", width=2)
self.assertEqual("Te\nst", out)
def test_range_overlap_noOverlapPosNumber(self):
"""
Test whether range overlaps are correctly calculated
"""
overlap = range_overlap((1,2), (3,4))
self.assertEqual(overlap, 0)
def test_range_overlap_overlapPosNumber(self):
"""
Test whether range overlaps are correctly calculated
"""
overlap = range_overlap((1, 3), (2, 4))
self.assertEqual(overlap, 1)
def test_range_overlap_start_greater_end(self):
"""
Test whether range overlaps are correctly calculated
"""
self.assertRaises(InvalidParameterError, range_overlap, (-2, -4), (-3, -1))
class TestUtilsProgressbar(TestCase):
def test_initiation(self):
p = Progressbar(10, 10)
def test_update(self):
p = Progressbar(5, 5)
for i in range(5):
p.update(i)
class TestDefaultOrderdDict(TestCase):
def test_defaultOrderedDict(self):
"""
test if order is maintained
"""
d = DefaultOrderedDict()
d["one"] = 1
d["a"] = 3
d["two"] = 2
self.assertEqual("DefaultOrderedDict([('one', 1), ('a', 3), ('two', 2)])", str(d))
class TestPersistentDict(TestCase):
def setUp(self):
self.tmp_db = tempfile.NamedTemporaryFile(delete=False)
def test_add_element(self):
"""
Tests whether adding an elements provokes to sync the dict with a file system (it should not)
"""
d = PersistentDict(self.tmp_db.name)
d["test"] = "insert"
self.assertFalse(valid_file(self.tmp_db.name))
def test_get_element(self):
"""
Tests whether adding an elements provokes to sync the dict with a file system (it should not)
"""
d = PersistentDict(self.tmp_db.name)
d["test"] = "insert"
self.assertEqual(d["test"], "insert")
def test_sync(self):
"""
Tests whether adding an elements provokes to sync the dict with a file system (it should not)
"""
d = PersistentDict(self.tmp_db.name)
d["test"] = "insert"
d.sync()
self.assertTrue(valid_file(self.tmp_db.name))
def test_sync_empty(self):
"""
Tests whether adding an elements provokes to sync the dict with a file system (it should not)
"""
d = PersistentDict(self.tmp_db.name)
d.sync()
self.assertFalse(valid_file(self.tmp_db.name))
def test_dump(self):
"""
Tests whether adding an elements provokes to sync the dict with a file system (it should not)
"""
d = PersistentDict(self.tmp_db.name)
tmp2 = tempfile.NamedTemporaryFile(mode="w", delete=False)
d["test"] = "insert"
d.dump(tmp2)
tmp2.close()
self.assertTrue(valid_file(tmp2.name))
os.unlink(tmp2.name)
def test_dump_empty(self):
"""
Tests whether adding an elements provokes to sync the dict with a file system (it should not)
"""
d = PersistentDict(self.tmp_db.name)
tmp2 = tempfile.NamedTemporaryFile(mode="w", delete=False)
d.dump(tmp2)
tmp2.close()
self.assertFalse(valid_file(tmp2.name))
os.unlink(tmp2.name)
def test_load(self):
"""
Tests whether adding an elements provokes to sync the dict with a file system (it should not)
"""
d = PersistentDict(self.tmp_db.name)
d["test"] = "insert"
d.sync()
d.close()
d2 = PersistentDict(self.tmp_db.name)
d2.load(self.tmp_db)
def tearDown(self):
os.unlink(self.tmp_db.name)
if __name__ == '__main__':
unittest.main()
|
fc8a90092fb6040203ade9f5996a15d971fff379
|
bdf0d4d3aac186af3ad0ad6ac9f380f9a0573fba
|
/aries_cloudagent/protocols/discovery/v1_0/messages/tests/test_query.py
|
18af3b6ba7f82acac2ca9acdb5365b52782bb1e4
|
[
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
hyperledger/aries-cloudagent-python
|
f25d961e0717a4d703bf43df3e4b4bc8ec07b908
|
39cac36d8937ce84a9307ce100aaefb8bc05ec04
|
refs/heads/main
| 2023-09-01T15:37:05.353674
| 2023-08-31T14:13:06
| 2023-08-31T14:13:06
| 193,556,007
| 370
| 530
|
Apache-2.0
| 2023-09-14T17:59:34
| 2019-06-24T18:12:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,536
|
py
|
test_query.py
|
from unittest import mock, TestCase
from .....didcomm_prefix import DIDCommPrefix
from ...message_types import QUERY, PROTOCOL_PACKAGE
from ..query import Query
class TestQuery(TestCase):
test_query = "*"
test_comment = "comment"
def test_init(self):
query = Query(query=self.test_query, comment=self.test_comment)
assert query.query == self.test_query
assert query.comment == self.test_comment
def test_type(self):
query = Query(query=self.test_query, comment=self.test_comment)
assert query._type == DIDCommPrefix.qualify_current(QUERY)
@mock.patch(f"{PROTOCOL_PACKAGE}.messages.query.QuerySchema.load")
def test_deserialize(self, mock_query_schema_load):
obj = {"obj": "obj"}
query = Query.deserialize(obj)
mock_query_schema_load.assert_called_once_with(obj)
assert query is mock_query_schema_load.return_value
@mock.patch(f"{PROTOCOL_PACKAGE}.messages.query.QuerySchema.dump")
def test_serialize(self, mock_query_schema_dump):
query = Query(query=self.test_query, comment=self.test_comment)
query_dict = query.serialize()
mock_query_schema_dump.assert_called_once_with(query)
assert query_dict is mock_query_schema_dump.return_value
class TestQuerySchema(TestCase):
query = Query(query="*", comment="comment")
def test_make_model(self):
data = self.query.serialize()
model_instance = Query.deserialize(data)
assert isinstance(model_instance, Query)
|
0359c94b2877805866efa681dec11e680102c4f5
|
f54739ec8ca9a9012eefda5c4759a62db3fef3c2
|
/src/api-engine/api/routes/node/serializers.py
|
de7dc9d151ade386cdda7eb2fa1a63beeadf6187
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
hyperledger/cello
|
6e615ab0df9724262ef6c2028d45f2f642254fe2
|
cb4d24347228ad9d1ae24cd0d6188bf29b1b8cbe
|
refs/heads/main
| 2023-09-03T15:33:35.844553
| 2023-08-29T03:47:41
| 2023-08-29T03:47:41
| 78,610,786
| 957
| 559
|
Apache-2.0
| 2023-09-12T00:53:55
| 2017-01-11T06:47:08
|
Python
|
UTF-8
|
Python
| false
| false
| 11,404
|
py
|
serializers.py
|
#
# SPDX-License-Identifier: Apache-2.0
#
import logging
from rest_framework import serializers
from api.common.enums import (
Operation,
)
from api.common.serializers import PageQuerySerializer
from api.models import (
Node,
Port,
FabricCA,
NodeUser,
FabricPeer,
PeerCa,
PeerCaUser,
)
LOG = logging.getLogger(__name__)
class PortSerializer(serializers.ModelSerializer):
class Meta:
model = Port
fields = ("external", "internal")
extra_kwargs = {
"external": {"required": True},
"internal": {"required": True},
}
class NodeQuery(PageQuerySerializer, serializers.ModelSerializer):
agent_id = serializers.UUIDField(
help_text="Agent ID, only operator can use this field",
required=False,
allow_null=True,
)
class Meta:
model = Node
fields = (
"page",
"per_page",
"type",
"name",
"agent_id",
)
extra_kwargs = {"type": {"required": False}}
class NodeIDSerializer(serializers.Serializer):
id = serializers.UUIDField(help_text="ID of node")
class NodeCIDSerializer(serializers.Serializer):
id = serializers.CharField(help_text="containter ID of node")
class FabricCASerializer(serializers.ModelSerializer):
hosts = serializers.ListField(
help_text="Hosts for ca support",
child=serializers.CharField(help_text="Host name", max_length=64),
required=False,
allow_empty=True,
)
class Meta:
model = FabricCA
fields = ("admin_name", "admin_password", "hosts", "type")
class PeerCaUserSerializer(serializers.ModelSerializer):
class Meta:
model = PeerCaUser
fields = ("user", "username", "password", "type")
def validate(self, attrs):
user = attrs.get("user")
username = attrs.get("username")
password = attrs.get("password")
user_type = attrs.get("type")
if user is None and (
username is None or password is None or user_type is None
):
raise serializers.ValidationError(
"Input user or username,password,type"
)
if user is not None and (
username is not None
or password is not None
or user_type is not None
):
raise serializers.ValidationError(
"Input user or username,password,type"
)
return attrs
class PeerCaSerializer(serializers.ModelSerializer):
users = PeerCaUserSerializer(
help_text="Users of ca node, "
"can only set user or set username,password,type together",
many=True,
)
class Meta:
model = PeerCa
fields = ("node", "address", "certificate", "type", "users")
def validate(self, attrs):
node = attrs.get("node")
address = attrs.get("address")
certificate = attrs.get("certificate")
ca_type = attrs.get("type")
# check ether set node or set address,certificate,type together
if (
node is None
and (address is None or certificate is None or ca_type is None)
) or (
node is not None
and (
address is not None
or certificate is not None
or ca_type is not None
)
):
raise serializers.ValidationError(
"Input node or address,certificate"
)
return attrs
class FabricPeerSerializer(serializers.ModelSerializer):
ca_nodes = PeerCaSerializer(
help_text="CA nodes for peer node, "
"can only set node or set address,certificate,type together",
many=True,
)
class Meta:
model = FabricPeer
fields = (
"name",
"gossip_use_leader_reflection",
"gossip_org_leader",
"gossip_skip_handshake",
"local_msp_id",
"ca_nodes",
)
extra_kwargs = {
"name": {"required": True},
"local_msp_id": {"required": True},
"ca_nodes": {"required": True},
"gossip_use_leader_reflection": {"default": True},
"gossip_skip_handshake": {"default": True},
"gossip_org_leader": {"default": False},
}
class NodeInListSerializer(NodeIDSerializer, serializers.ModelSerializer):
# agent_id = serializers.UUIDField(
# help_text="Agent ID", required=False, allow_null=True
# )
ports = PortSerializer(
help_text="Port mapping for node", many=True, required=False
)
network_id = serializers.UUIDField(
help_text="Network ID", required=False, allow_null=True
)
class Meta:
model = Node
fields = (
"id",
"type",
"name",
"urls",
"created_at",
"status",
"network_id",
"organization",
"cid",
"ports",
)
extra_kwargs = {
"id": {"required": True, "read_only": False},
"created_at": {"required": True, "read_only": False},
# "ca": {"required": False, "allow_null": True},
}
class NodeListSerializer(serializers.Serializer):
data = NodeInListSerializer(many=True, help_text="Nodes list")
total = serializers.IntegerField(
help_text="Total number of node", min_value=0
)
class NodeUrlSerializer(serializers.Serializer):
internal_port = serializers.IntegerField(
min_value=1,
max_value=65535,
required=True,
help_text="Port number of node service",
)
url = serializers.CharField(help_text="Url of node service", required=True)
class NodeInfoSerializer(NodeIDSerializer, serializers.ModelSerializer):
# ca = FabricCASerializer(
# help_text="CA configuration for node", required=False, allow_null=True
# )
# file = serializers.URLField(help_text="File url of node", required=False)
# links = NodeUrlSerializer(help_text="Links of node service", many=True)
agent_id = serializers.UUIDField(
help_text="Agent ID", required=False, allow_null=True
)
class Meta:
model = Node
fields = (
"id",
"type",
"name",
# "network_type",
# "network_version",
"created_at",
"agent_id",
# "network_id",
"status",
# "ca",
# "file",
# "links",
)
extra_kwargs = {
"id": {"required": True, "read_only": False},
"created_at": {"required": True, "read_only": False},
}
class NodeStatusSerializer(NodeIDSerializer, serializers.ModelSerializer):
class Meta:
model = Node
fields = (
"status",
)
extra_kwargs = {
"id": {"required": True, "read_only": False},
"created_at": {"required": True, "read_only": False},
}
class NodeCreateBody(serializers.ModelSerializer):
num = serializers.IntegerField(help_text="number of node")
class Meta:
model = Node
fields = (
"name",
"type",
"num",
)
extra_kwargs = {
"name": {"required": True},
"type": {"required": True},
}
def validate(self, attrs):
# network_type = attrs.get("network_type")
# node_type = attrs.get("type")
# network_version = attrs.get("network_version")
# agent_type = attrs.get("agent_type")
# agent = attrs.get("agent")
# ca = attrs.get("ca")
# peer = attrs.get("peer")
# if network_type == NetworkType.Fabric.value:
# if network_version not in FabricVersions.values():
# raise serializers.ValidationError("Not valid fabric version")
# if node_type not in FabricNodeType.names():
# raise serializers.ValidationError(
# "Not valid node type for %s" % network_type
# )
# if node_type == FabricNodeType.Ca.name.lower() and ca is None:
# raise serializers.ValidationError(
# "Please input ca configuration for ca node"
# )
# elif (
# node_type == FabricNodeType.Peer.name.lower() and peer is None
# ):
# raise serializers.ValidationError(
# "Please input peer configuration for peer node"
# )
#
# if agent_type is None and agent is None:
# raise serializers.ValidationError("Please set agent_type or agent")
#
# if agent_type and agent:
# if agent_type != agent.type:
# raise serializers.ValidationError(
# "agent type not equal to agent"
# )
return attrs
class NodeUpdateBody(serializers.ModelSerializer):
ports = PortSerializer(
help_text="Port mapping for node", many=True, required=False
)
class Meta:
model = Node
fields = ("status", "ports")
class NodeOperationSerializer(serializers.Serializer):
action = serializers.ChoiceField(
help_text=Operation.get_info("Operation for node:", list_str=True),
choices=Operation.to_choices(True),
)
class NodeConfigFileSerializer(serializers.ModelSerializer):
files = serializers.FileField()
# class NodeFileCreateSerializer(serializers.ModelSerializer):
# def to_form_paras(self):
# custom_paras = to_form_paras(self)
# return custom_paras
# class Meta:
# model = Node
# fields = ("file",)
# extra_kwargs = {
# "file": {
# "required": True,
# "validators": [
# FileExtensionValidator(
# allowed_extensions=["tar.gz", "tgz"]
# ),
# validate_file,
# ],
# }
# }
class NodeUserCreateSerializer(serializers.ModelSerializer):
class Meta:
model = NodeUser
fields = ("name", "user_type", "secret", "attrs")
extra_kwargs = {
"name": {"required": True},
"user_type": {"required": True},
"secret": {"required": True},
}
class NodeUserQuerySerializer(
PageQuerySerializer, serializers.ModelSerializer
):
class Meta:
model = NodeUser
fields = ("name", "user_type", "page", "per_page", "status")
class UserInListSerializer(serializers.ModelSerializer):
class Meta:
model = NodeUser
fields = ("id", "name", "user_type", "status")
class NodeUserListSerializer(serializers.Serializer):
data = UserInListSerializer(many=True, help_text="Users list")
total = serializers.IntegerField(
help_text="Total number of node", min_value=0
)
class NodeUserIDSerializer(serializers.ModelSerializer):
class Meta:
model = NodeUser
fields = ("id",)
class NodeUserPatchSerializer(serializers.ModelSerializer):
class Meta:
model = NodeUser
fields = ("status",)
extra_kwargs = {"status": {"required": True}}
|
7e37777a44921b638c0bcebd5dd2004849280748
|
ab5d1586266e525eb32823851dd3c774b60fabec
|
/core/logger.py
|
5ac1b9bca952fd7dc71060f204adc35621f5f833
|
[
"Apache-2.0"
] |
permissive
|
yeti-platform/yeti
|
d31fa464582b6d6731a437d7ceab9e730ce09c75
|
dccc691d48177f921ef1134c8fd22f064dc085a2
|
refs/heads/master
| 2023-09-01T13:54:19.258408
| 2023-06-29T12:44:55
| 2023-06-29T12:44:55
| 47,927,876
| 1,444
| 321
|
Apache-2.0
| 2023-09-12T14:49:55
| 2015-12-13T16:54:26
|
Python
|
UTF-8
|
Python
| false
| false
| 756
|
py
|
logger.py
|
"""Set up logging for Yeti."""
import logging
import os
from logging import FileHandler
from logging import Formatter
from core.config.config import yeti_config
LOG_FORMAT = "%(asctime)s [%(levelname)s]: %(message)s"
LOG_LEVEL = logging.INFO
# user logger
USER_LOG_FILE = yeti_config.get("logging", "filename")
# Fall back to tmp if the logging directory does not exist
if not os.path.isdir(os.path.dirname(USER_LOG_FILE)):
USER_LOG_FILE = "/tmp/yeti.log"
userLogger = logging.getLogger("userLogger.messaging")
userLogger.setLevel(LOG_LEVEL)
userLogger_file_handler = FileHandler(USER_LOG_FILE)
userLogger_file_handler.setLevel(LOG_LEVEL)
userLogger_file_handler.setFormatter(Formatter(LOG_FORMAT))
userLogger.addHandler(userLogger_file_handler)
|
bd7359f157c2f9a5e44397f96140951619d50fba
|
0841643267b9fc1478f6e3d21bfccb17aba67af6
|
/gs_quant/api/gs/screens.py
|
59408dd61e36f519bc4ed6cb9c7f74bcb42d9bfa
|
[
"Apache-2.0"
] |
permissive
|
goldmansachs/gs-quant
|
55618e0e4e961d4ee50b7393f27c258e2647a957
|
4cf8ec75c4d85b16ec08371c46cc1a9ede9d72a2
|
refs/heads/master
| 2023-08-20T00:55:43.324547
| 2023-08-16T16:55:22
| 2023-08-16T16:55:22
| 161,840,815
| 2,088
| 596
|
Apache-2.0
| 2023-08-16T16:55:23
| 2018-12-14T21:10:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,098
|
py
|
screens.py
|
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import logging
from typing import Tuple, List
from gs_quant.session import GsSession
from gs_quant.target.screens import Screen
from gs_quant.target.assets_screener import AssetScreenerRequest
_logger = logging.getLogger(__name__)
class GsScreenApi:
@classmethod
def get_screens(cls, screen_ids: List[str] = None, screen_names: List[str] = None, limit: int = 100) \
-> Tuple[Screen]:
url = '/screens?'
if screen_ids:
url += f'&id={"&id=".join(screen_ids)}'
if screen_names:
url += f'&name={"&name=".join(screen_names)}'
return GsSession.current._get(f'{url}&limit={limit}', cls=Screen)['results']
@classmethod
def get_screen(cls, screen_id: str) -> Screen:
return GsSession.current._get(f'/screens/{screen_id}', cls=Screen)
@classmethod
def create_screen(cls, screen: Screen) -> Screen:
return GsSession.current._post('/screens', screen, cls=Screen)
@classmethod
def update_screen(cls, screen: Screen) -> Screen:
return GsSession.current._put(f'/screens/{screen.id}', screen, cls=Screen)
@classmethod
def delete_screen(cls, screen_id: str) -> str:
return GsSession.current._delete(f'/screens/{screen_id}')
@classmethod
def get_filter_options(cls) -> dict:
return GsSession.current._get('/assets/screener/options')
@classmethod
def calculate(cls, payload: AssetScreenerRequest) -> dict:
return GsSession.current._post('/assets/screener', payload=payload)
|
ffaa61ee25273ca6b55cd1c977cf070a182b4ccd
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/parties/DistributedPartyCannonActivity.py
|
a2ee0d2a0d44f303a6a6bb4abadb96525cb2b902
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 40,354
|
py
|
DistributedPartyCannonActivity.py
|
import math
from panda3d.core import *
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from otp.otpbase.PythonUtil import quantizeVec
from direct.task.Task import Task
from toontown.toontowngui import TTDialog
from toontown.toonbase.ToonBaseGlobal import *
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.effects import Splash, DustCloud, Wake
from toontown.minigame import Trajectory
from toontown.minigame import CannonGameGlobals
from toontown.parties import PartyGlobals
from toontown.parties.PartyGlobals import ActivityIds
from toontown.parties.PartyGlobals import ActivityTypes
from toontown.parties.PartyGlobals import FireworksStartedEvent
from toontown.parties.PartyGlobals import FireworksFinishedEvent
from toontown.parties.PartyGlobals import PartyCannonCollisions
from toontown.parties.DistributedPartyActivity import DistributedPartyActivity
from toontown.parties.CannonGui import CannonGui
from toontown.parties.PartyUtils import toRadians, toDegrees
CANNON_ROTATION_VEL = 15.0
CANNON_ANGLE_VEL = 15.0
GROUND_PLANE_MIN = -15
SHADOW_Z_OFFSET = 0.5
INITIAL_VELOCITY = 90.0
WHISTLE_SPEED = INITIAL_VELOCITY * 0.35
class DistributedPartyCannonActivity(DistributedPartyActivity):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPartyCannonActivity')
HIT_GROUND = 0
HIT_TOWER = 1
HIT_WATER = 2
REACTIVATE_CLOUD_TASK = 'PartyActivity_ReactivateLastCloud'
RULES_DONE_EVENT = 'DistributedPartyCannonActivity_RULES_DONE_EVENT'
LOCAL_TOON_LANDED_EVENT = 'DistributedPartyCannonActivity_LOCAL_TOON_LANDED_EVENT'
NetDivisor = 100
TimeFactor = 0.75
BroadcastPeriod = 0.2
def __init__(self, cr):
DistributedPartyActivity.__init__(self, cr, ActivityIds.PartyCannon, ActivityTypes.Continuous, wantRewardGui=True)
self.gui = None
self.firingCannon = None
self.shadowNode = None
self.partyDoId = None
self.splash = None
self.dustCloud = None
self.lastWakeTime = 0
self.localFlyingDropShadow = None
self.localFlyingToon = None
self.localFlyingToonId = 0
self._lastBroadcastTime = -self.BroadcastPeriod
self._dirtyNewVel = None
self.hitBumper = 0
self.hitCloud = 0
self.lastPos = Vec3(0, 0, 0)
self.lastVel = Vec3(0, 0, 0)
self.vel = Vec3(0, 0, 0)
self.landingPos = Vec3(0, 0, 0)
self.t = 0
self.lastT = 0
self.deltaT = 0
self._lastCloudHit = None
self.cameraPos = Vec3(0, -15.0, -25.0)
self.cameraSpeed = 5.0
self.camNode = None
self.flyingToonOffsetRotation = 0
self.flyingToonOffsetAngle = 0
self.flyingToonOffsetX = 0
self.flyingToonOffsetY = 0
self.flyingToonCloudsHit = 0
self.initialFlyVel = 0
self._localPlayedBefore = False
self.hitTrack = None
self.cTrav = None
self.flyColNode = None
self.flyColNodePath = None
self._flyingCollisionTaskName = None
return
def generateInit(self):
DistributedPartyActivity.generateInit(self)
self.taskNameFireCannon = self.taskName('fireCannon')
self.taskNameShoot = self.taskName('shootTask')
self.taskNameFly = self.taskName('flyTask')
self.gui = CannonGui()
def load(self):
self.notify.debug('load')
DistributedPartyActivity.load(self)
base.cr.playGame.hood.loader.loadClouds()
base.cr.playGame.hood.loader.setCloudSwitch(1)
self.shadow = loader.loadModel('phase_3/models/props/drop_shadow')
self.shadowNode = hidden.attachNewNode('dropShadow')
self.shadow.copyTo(self.shadowNode)
self.shadowNode.setColor(0, 0, 0, 0.5)
self.shadowNode.setBin('fixed', 0, 1)
self.splash = Splash.Splash(render)
self.dustCloud = DustCloud.DustCloud(render)
self.dustCloud.setBillboardPointEye()
self.sndHitGround = base.loader.loadSfx('phase_4/audio/sfx/MG_cannon_hit_dirt.ogg')
self.sndHitWater = base.loader.loadSfx('phase_4/audio/sfx/MG_cannon_splash.ogg')
self.sndHitHouse = base.loader.loadSfx('phase_5/audio/sfx/AA_drop_sandbag.ogg')
self.sndBounce1 = base.loader.loadSfx('phase_13/audio/sfx/bounce1.ogg')
self.sndBounce2 = base.loader.loadSfx('phase_13/audio/sfx/bounce2.ogg')
self.sndBounce3 = base.loader.loadSfx('phase_13/audio/sfx/bounce3.ogg')
self.onstage()
self.sign.reparentTo(hidden)
self.sign.setPos(-6.0, 10.0, 0.0)
self.accept(FireworksStartedEvent, self.__handleFireworksStarted)
self.accept(FireworksFinishedEvent, self.__handleFireworksFinished)
def generate(self):
DistributedPartyActivity.generate(self)
self._doneCannons = False
self._avId2trajectoryInfo = {}
self._remoteToonFlyTaskName = 'remoteToonFlyTask-%s' % self.doId
taskMgr.add(self._remoteToonFlyTask, self._remoteToonFlyTaskName, priority=45)
self.d_cloudsColorRequest()
def unload(self):
self.notify.debug('unload')
DistributedPartyActivity.unload(self)
if self.shadowNode is not None:
self.shadowNode.removeNode()
del self.shadowNode
if self.splash is not None:
self.splash.destroy()
del self.splash
if self.dustCloud is not None:
self.dustCloud.destroy()
del self.dustCloud
del self.sndHitHouse
del self.sndHitGround
del self.sndHitWater
del self.sndBounce1
del self.sndBounce2
del self.sndBounce3
if self.localFlyingToon:
self.__resetToon(self.localFlyingToon)
self.localFlyingToon.loop('neutral')
self.localFlyingToon.setPlayRate(1.0, 'run')
self.localFlyingToon = None
self.ignoreAll()
return
def onstage(self):
self.notify.debug('onstage')
self.splash.reparentTo(render)
self.dustCloud.reparentTo(render)
def offstage(self):
self.notify.debug('offstage')
if self.splash is not None:
self.splash.reparentTo(hidden)
self.splash.stop()
if self.dustCloud is not None:
self.dustCloud.reparentTo(hidden)
self.dustCloud.stop()
return
def disable(self):
taskMgr.remove(self._remoteToonFlyTaskName)
if self._flyingCollisionTaskName:
taskMgr.remove(self._flyingCollisionTaskName)
taskMgr.remove(self.taskNameFireCannon)
taskMgr.remove(self.taskNameShoot)
taskMgr.remove(self.taskNameFly)
taskMgr.remove(DistributedPartyCannonActivity.REACTIVATE_CLOUD_TASK)
self.ignoreAll()
if self.localFlyingToonId:
self.__stopCollisionHandler(self.localFlyingToon)
self.__stopLocalFlyTask(self.localFlyingToonId)
self.setMovie(PartyGlobals.CANNON_MOVIE_CLEAR, self.localFlyingToonId)
if self.hitTrack is not None:
self.hitTrack.finish()
del self.hitTrack
self.hitTrack = None
DistributedPartyActivity.disable(self)
return
def delete(self):
self.offstage()
DistributedPartyActivity.delete(self)
def setMovie(self, mode, toonId):
self.notify.debug('%s setMovie(%s, %s)' % (self.doId, toonId, mode))
if toonId != base.localAvatar.doId:
return
if mode == PartyGlobals.CANNON_MOVIE_CLEAR:
self.landToon(toonId)
elif mode == PartyGlobals.CANNON_MOVIE_LANDED:
self.landToon(toonId)
elif mode == PartyGlobals.CANNON_MOVIE_FORCE_EXIT:
self.landToon(toonId)
def __handleAvatarGone(self):
self.setMovie(PartyGlobals.CANNON_MOVIE_CLEAR, 0)
def handleToonDisabled(self, toonId):
self.notify.warning('handleToonDisabled no implementation yet')
def handleToonJoined(self, toonId):
self.notify.warning('handleToonJoined no implementation yet')
def isLocalToon(self, av):
return base.localAvatar == av
def isLocalToonId(self, toonId):
return base.localAvatar.doId == toonId
def getTitle(self):
return TTLocalizer.PartyCannonActivityTitle
def getInstructions(self):
return TTLocalizer.PartyCannonActivityInstructions
def hasPlayedBefore(self):
return self._localPlayedBefore
def displayRules(self):
self.startRules()
def handleRulesDone(self):
self.finishRules()
self._localPlayedBefore = True
messenger.send(DistributedPartyCannonActivity.RULES_DONE_EVENT)
def setCannonWillFire(self, cannonId, zRot, angle):
self.notify.debug('setCannonWillFire: %d %d %d' % (cannonId, zRot, angle))
cannon = base.cr.doId2do.get(cannonId)
if cannon is None:
self.notify.warning("Cannon has not been created, but we got this message. Don't show firing.")
return
if not cannon.getToonInside():
self.notify.warning("setCannonWillFire, but no toon insde. Don't show firing")
return
if self.isLocalToon(cannon.getToonInside()):
self.localFlyingToon = base.localAvatar
self.localFlyingToonId = base.localAvatar.doId
self.localFiringCannon = cannon
self.flyingToonCloudsHit = 0
cannon.updateModel(zRot, angle)
toonId = cannon.getToonInside().doId
task = Task(self.__fireCannonTask)
task.toonId = toonId
task.cannon = cannon
taskMgr.add(task, self.taskNameFireCannon)
self.toonIds.append(toonId)
return
def __fireCannonTask(self, task):
launchTime = 0.0
toonId = task.toonId
cannon = task.cannon
toon = cannon.getToonInside()
self.notify.debug(str(self.doId) + ' FIRING CANNON FOR TOON ' + str(toonId))
if not cannon.isToonInside():
return Task.done
if self.isLocalToonId(toonId):
self.inWater = 0
startPos, startHpr, startVel, trajectory = self.__calcFlightResults(cannon, toonId, launchTime)
self.notify.debug('start position: ' + str(startPos))
self.notify.debug('start velocity: ' + str(startVel))
self.notify.debug('time of launch: ' + str(launchTime))
cannon.removeToonReadyToFire()
shootTask = Task(self.__shootTask, self.taskNameShoot)
shootTask.info = {'toonId': toonId,
'cannon': cannon}
if self.isLocalToonId(toonId):
self.flyingToonOffsetRotation = 0
self.flyingToonOffsetAngle = 0
self.flyingToonOffsetX = 0
self.flyingToonOffsetY = 0
self.hitCloud = 0
self.initialFlyVel = INITIAL_VELOCITY
self.camNode = NodePath(self.uniqueName('flyingCamera'))
self.camNode.setScale(0.5)
self.camNode.setPos(self.localFlyingToon.getPos())
self.camNode.setHpr(self.localFlyingToon.getHpr())
self.camNode.reparentTo(render)
self.lastStartVel = startVel
place = base.cr.playGame.getPlace()
place.fsm.request('activity')
toon.dropShadow.hide()
self.localFlyingDropShadow = self.shadowNode.copyTo(hidden)
vel = startVel
toon.lookAt(toon.getPos() + Vec3(vel[0], vel[1], vel[2]))
toon.setP(localAvatar, -90)
hpr = toon.getHpr()
toon.d_setPosHpr(startPos[0], startPos[1], startPos[2], hpr[0], hpr[1], hpr[2])
self.localFlyingToon.wrtReparentTo(render)
info = {}
info['toonId'] = toonId
info['trajectory'] = trajectory
info['launchTime'] = launchTime
info['toon'] = self.localFlyingToon
info['hRot'] = cannon.getRotation()
camera.wrtReparentTo(self.localFlyingToon)
flyTask = Task(self.__localFlyTask, self.taskNameFly)
flyTask.info = info
seqTask = Task.sequence(shootTask, flyTask)
self.__startCollisionHandler()
self.notify.debug('Disable standard local toon controls.')
base.localAvatar.disableAvatarControls()
frameTime = globalClock.getFrameTime()
netLaunchTime = globalClockDelta.localToNetworkTime(launchTime + frameTime, bits=31)
self.sendUpdate('setToonTrajectoryAi', [netLaunchTime,
startPos[0],
startPos[1],
startPos[2],
startHpr[0],
startHpr[1],
startHpr[2],
startVel[0],
startVel[1],
startVel[2]])
else:
seqTask = shootTask
taskMgr.add(seqTask, self.taskName('flyingToon') + '-' + str(toonId))
toon.startSmooth()
return Task.done
def setToonTrajectory(self, avId, launchTime, x, y, z, h, p, r, vx, vy, vz):
if avId == localAvatar.doId:
return
startPos = Vec3(x, y, z)
startHpr = Vec3(h, p, r)
startVel = Vec3(vx, vy, vz)
startT = globalClockDelta.networkToLocalTime(launchTime, bits=31) + 0.2
trajectory = Trajectory.Trajectory(0.0, startPos, startVel)
self._avId2trajectoryInfo[avId] = ScratchPad(startPos=startPos, startHpr=startHpr, startVel=startVel, startT=startT, trajectory=trajectory)
def _remoteToonFlyTask(self, task = None):
ids2del = []
frameTime = globalClock.getFrameTime()
for avId, trajInfo in self._avId2trajectoryInfo.items():
trajectory = trajInfo.trajectory
startTime = trajInfo.startT
groundTime = trajectory.calcTimeOfImpactOnPlane(0.0) / self.TimeFactor + startTime
now = frameTime
if now < startTime:
now = startTime
if now > groundTime:
now = groundTime
t = max(0.0, now - startTime)
t *= self.TimeFactor
toon = self.cr.getDo(avId)
if toon is None:
ids2del.append(avId)
else:
toon.setFluidPos(trajectory.getPos(t))
vel = trajectory.getVel(t)
toon.lookAt(toon.getPos() + Vec3(vel[0], vel[1], vel[2]))
toon.setP(toon, -90)
for avId in ids2del:
del self._avId2trajectoryInfo[avId]
return Task.cont
def __calcFlightResults(self, cannon, toonId, launchTime):
startPos = cannon.getToonFirePos()
startHpr = cannon.getToonFireHpr()
startVel = cannon.getToonFireVel()
quantizeVec(startPos, self.NetDivisor)
quantizeVec(startHpr, self.NetDivisor)
quantizeVec(startVel, self.NetDivisor)
trajectory = Trajectory.Trajectory(launchTime, startPos, startVel)
self.trajectory = trajectory
return startPos, startHpr, startVel, trajectory
def __shootTask(self, task):
task.info['cannon'].fire()
toonId = task.info['toonId']
toon = base.cr.doId2do.get(toonId)
if toon:
toon.loop('swim')
else:
self.notify.debug('__shootTask avoided a crash, toon %d not found' % toonId)
if self.isLocalToonId(task.info['toonId']):
self.localFlyingDropShadow.reparentTo(render)
self.gui.enableAimKeys()
return Task.done
def d_setLanded(self, toonId):
printStack()
self.notify.debug('d_setLanded %s' % toonId)
if self.isLocalToonId(toonId):
if self.cr:
self.sendUpdate('setLanded', [toonId])
else:
self.notify.debug('we avoided crash 2')
def setLanded(self, toonId):
if toonId in self._avId2trajectoryInfo:
del self._avId2trajectoryInfo[toonId]
def landToon(self, toonId):
self.notify.debug('%s landToon' % self.doId)
toon = base.cr.doId2do.get(toonId)
if toon is not None:
toon.resetLOD()
if toon == base.localAvatar:
self.__stopCollisionHandler(base.localAvatar)
toon.wrtReparentTo(render)
self.__setToonUpright(toon)
toon.setPlayRate(1.0, 'run')
toon.startSmooth()
toon.setScale(1.0)
self.ignore(toon.uniqueName('disable'))
self.__cleanupFlyingToonData(toon)
toon.dropShadow.show()
place = base.cr.playGame.getPlace()
if place is not None:
if not hasattr(place, 'fsm'):
return
if toon is not None and toon == base.localAvatar:
self.__localDisplayLandedResults()
return
def __localDisplayLandedResults(self):
if self.flyingToonCloudsHit > 0:
self._doneCannons = True
else:
self.__localToonDoneLanding()
def handleRewardDone(self):
DistributedPartyActivity.handleRewardDone(self)
if self._doneCannons:
self.__localToonDoneLanding()
def __localToonDoneLanding(self):
base.cr.playGame.getPlace().fsm.request('walk')
self.notify.debug('__localToonDoneLanding')
base.localAvatar.collisionsOn()
base.localAvatar.startPosHprBroadcast()
base.localAvatar.enableAvatarControls()
messenger.send(DistributedPartyCannonActivity.LOCAL_TOON_LANDED_EVENT)
def __setToonUpright(self, toon, pos = None):
if toon:
if self.inWater:
toon.setP(0)
toon.setR(0)
return
if not pos:
pos = toon.getPos(render)
toon.setPos(render, pos)
toon.loop('neutral')
if self.localFiringCannon and hasattr(self.localFiringCannon, 'cannonNode'):
if self.localFiringCannon.cannonNode:
toon.lookAt(self.localFiringCannon.cannonNode)
else:
self.notify.debug('we avoided crash 1.')
toon.setP(0)
toon.setR(0)
toon.setScale(1, 1, 1)
def __resetToonToCannon(self, avatar):
self.notify.debug('__resetToonToCannon')
if not avatar and self.localFlyingToonId:
avatar = base.cr.doId2do.get(self.localFlyingToonId, None)
if avatar:
self.__resetToon(avatar)
return
def __resetToon(self, avatar, pos = None):
self.notify.debug('__resetToon')
if avatar:
self.__stopCollisionHandler(avatar)
self.__setToonUpright(avatar, pos)
if self.isLocalToonId(avatar.doId):
self.notify.debug('toon setting position to %s' % pos)
if pos:
base.localAvatar.setPos(pos)
camera.reparentTo(avatar)
self.d_setLanded(avatar.doId)
def __updateFlightVelocity(self, trajectory):
hpr = LRotationf(self.flyingToonOffsetRotation, 0, 0)
newVel = hpr.xform(self.lastStartVel)
hpr = LRotationf(0, self.flyingToonOffsetAngle, 0)
zVel = hpr.xform(self.lastStartVel).getZ()
if zVel < newVel.getZ():
newVel.setZ(zVel)
trajectory.setStartVel(newVel)
now = globalClock.getFrameTime()
if now - self._lastBroadcastTime >= self.BroadcastPeriod:
self._dirtyNewVel = newVel
if self._dirtyNewVel:
self.sendUpdate('updateToonTrajectoryStartVelAi', [self._dirtyNewVel[0], self._dirtyNewVel[1], self._dirtyNewVel[2]])
self._lastBroadcastTime = now
self._dirtyNewVel = None
return
def updateToonTrajectoryStartVel(self, avId, vx, vy, vz):
if avId == localAvatar.doId:
return
if avId in self._avId2trajectoryInfo:
self._avId2trajectoryInfo[avId].trajectory.setStartVel(Vec3(vx, vy, vz))
def __isFlightKeyPressed(self):
return self.gui.leftPressed or self.gui.rightPressed or self.gui.upPressed or self.gui.downPressed
def __moveFlyingToon(self, toon):
toonP = toon.getP(render)
isToonFlyingHorizontal = toonP > -150 and toonP < -30
OFFSET = 0.25
rotVel = 0
if self.gui.leftPressed:
if isToonFlyingHorizontal:
rotVel += CANNON_ROTATION_VEL
else:
self.flyingToonOffsetX -= OFFSET
if self.gui.rightPressed:
if isToonFlyingHorizontal:
rotVel -= CANNON_ROTATION_VEL
else:
self.flyingToonOffsetX += OFFSET
self.flyingToonOffsetRotation += rotVel * globalClock.getDt()
angVel = 0
if self.gui.upPressed:
if not isToonFlyingHorizontal:
self.flyingToonOffsetY -= OFFSET
if self.gui.downPressed:
if isToonFlyingHorizontal:
angVel += CANNON_ANGLE_VEL
else:
self.flyingToonOffsetY += OFFSET
self.flyingToonOffsetAngle += angVel * globalClock.getDt()
def __stopLocalFlyTask(self, toonId):
taskMgr.remove(self.taskName('flyingToon') + '-' + str(toonId))
self.gui.disableAimKeys()
def __localFlyTask(self, task):
toon = task.info['toon']
if toon.isEmpty():
self.__resetToonToCannon(self.localFlyingToon)
return Task.done
curTime = task.time + task.info['launchTime']
t = curTime
t *= self.TimeFactor
self.lastT = self.t
self.t = t
deltaT = self.t - self.lastT
self.deltaT = deltaT
if self.hitBumper:
pos = self.lastPos + self.lastVel * deltaT
vel = self.lastVel
self.lastVel += Vec3(0, 0, -32.0) * deltaT
self.lastPos = pos
toon.setFluidPos(pos)
lastR = toon.getR()
toon.setR(lastR - deltaT * self.angularVel * 2.0)
cameraView = 0
else:
if not self.hitCloud and self.__isFlightKeyPressed():
self.__moveFlyingToon(toon)
self.__updateFlightVelocity(task.info['trajectory'])
if self.hitCloud == 1:
vel = task.info['trajectory'].getVel(t)
startPos = toon.getPos(render)
task.info['trajectory'].setStartTime(t)
task.info['trajectory'].setStartPos(startPos)
task.info['trajectory'].setStartVel(self.lastVel)
toon.lookAt(toon.getPos() + vel)
toon.setH(-toon.getH())
now = globalClock.getFrameTime()
netLaunchTime = globalClockDelta.localToNetworkTime(now, bits=31)
hpr = toon.getHpr()
self.sendUpdate('setToonTrajectoryAi', [netLaunchTime,
startPos[0],
startPos[1],
startPos[2],
hpr[0],
hpr[1],
hpr[2],
self.lastVel[0],
self.lastVel[1],
self.lastVel[2]])
self._lastBroadcastTime = now
self._dirtyNewVel = None
self.flyingToonOffsetRotation = 0
self.flyingToonOffsetAngle = 0
self.flyingToonOffsetX = 0
self.flyingToonOffsetY = 0
self.hitCloud = 2
pos = task.info['trajectory'].getPos(t)
toon.setFluidPos(pos)
toon.setFluidPos(toon, self.flyingToonOffsetX, self.flyingToonOffsetY, 0)
vel = task.info['trajectory'].getVel(t)
toon.lookAt(toon.getPos() + Vec3(vel[0], vel[1], vel[2]))
toon.setP(toon.getP() - 90)
cameraView = 2
if self.hitCloud == 2:
self.lastStartVel = vel
self.hitCloud = 0
shadowPos = toon.getPos()
shadowPos.setZ(SHADOW_Z_OFFSET)
self.localFlyingDropShadow.setPos(shadowPos)
if pos.getZ() < -20 or pos.getZ() > 1000:
self.notify.debug('stopping fly task toon.getZ()=%.2f' % pos.getZ())
self.__resetToonToCannon(self.localFlyingToon)
return Task.done
self.__setFlyingCameraView(task.info['toon'], cameraView, deltaT)
return Task.cont
def __setFlyingCameraView(self, toon, view, deltaT):
if toon != base.localAvatar:
return
lookAt = toon.getPos(render)
hpr = toon.getHpr(render)
if view == 0:
camera.wrtReparentTo(render)
camera.lookAt(lookAt)
elif view == 1:
camera.reparentTo(render)
camera.setPos(render, 100, 100, 35.25)
camera.lookAt(render, lookAt)
elif view == 2:
if camera.getParent() != self.camNode:
camera.wrtReparentTo(self.camNode)
camera.setPos(self.cameraPos)
camera.lookAt(toon)
self.camNode.setPos(toon.getPos(render))
camHpr = self.camNode.getHpr(toon)
vec = -Point3(0, 0, 0) - camHpr
relativeSpeed = math.pow(vec.length() / 60.0, 2) + 0.1
newHpr = camHpr + vec * deltaT * self.cameraSpeed * relativeSpeed
self.camNode.setHpr(toon, newHpr)
camera.lookAt(self.camNode)
camera.setR(render, 0)
def __cleanupFlyingToonData(self, toon):
self.notify.debug('__cleanupFlyingToonData')
if toon:
toon.dropShadow.show()
self.toonIds.remove(toon.doId)
if self.isLocalToon(toon):
if self.localFlyingDropShadow != None:
self.localFlyingDropShadow.removeNode()
self.localFlyingDropShadow = None
self.hitBumper = 0
self.angularVel = 0
self.vel = Vec3(0, 0, 0)
self.lastVel = Vec3(0, 0, 0)
self.lastPos = Vec3(0, 0, 0)
self.landingPos = Vec3(0, 0, 0)
self.t = 0
self.lastT = 0
self.deltaT = 0
self.lastWakeTime = 0
self.localFlyingToon = None
self.localFlyingToonId = 0
self.localFiringCannon = None
if hasattr(self, 'camNode') and self.camNode:
self.camNode.removeNode()
self.camNode = None
return
def __startCollisionHandler(self):
self.flyColSphere = CollisionSphere(0, 0, self.localFlyingToon.getHeight() / 2.0, 1.0)
self.flyColNode = CollisionNode(self.uniqueName('flySphere'))
self.flyColNode.setCollideMask(ToontownGlobals.WallBitmask | ToontownGlobals.FloorBitmask)
self.flyColNode.addSolid(self.flyColSphere)
self.flyColNodePath = self.localFlyingToon.attachNewNode(self.flyColNode)
self.flyColNodePath.setColor(1, 0, 0, 1)
self._activeCollisions = set()
self.handler = CollisionHandlerQueue()
self._flyingCollisionTaskName = 'checkFlyingToonCollision-%s' % self.doId
taskMgr.add(self._checkFlyingToonCollision, self._flyingCollisionTaskName)
base.cTrav.addCollider(self.flyColNodePath, self.handler)
def __stopCollisionHandler(self, avatar):
self.notify.debug('%s __stopCollisionHandler' % self.doId)
if self._flyingCollisionTaskName:
taskMgr.remove(self._flyingCollisionTaskName)
self._flyingCollisionTaskName = None
self._activeCollisions = set()
if avatar:
avatar.loop('neutral')
if self.flyColNode:
self.flyColNode = None
self.flyColSphere = None
if self.flyColNodePath:
base.cTrav.removeCollider(self.flyColNodePath)
self.flyColNodePath.removeNode()
self.flyColNodePath = None
self.handler = None
return
def _checkFlyingToonCollision(self, task = None):
curCollisions = set()
if self.handler.getNumEntries():
self.handler.sortEntries()
i = self.handler.getNumEntries()
activeEntry = None
while i > 0:
entry = self.handler.getEntry(i - 1)
k = (str(entry.getFromNodePath()), str(entry.getIntoNodePath()))
curCollisions.add(k)
if activeEntry is None and k not in self._activeCollisions:
activeEntry = entry
self._activeCollisions.add(k)
i -= 1
if activeEntry is not None:
self.__handleFlyingToonCollision(activeEntry)
if self.handler:
self.handler.clearEntries()
for k in list(self._activeCollisions):
if k not in curCollisions:
self._activeCollisions.remove(k)
return Task.cont
def __handleFlyingToonCollision(self, collisionEntry):
self.notify.debug('%s __handleToonCollision' % self.doId)
if self.localFlyingToon == None or self.flyColNode == None:
return
hitNode = collisionEntry.getIntoNode().getName()
self.notify.debug('hitNode = %s' % hitNode)
self.notify.debug('hitNodePath.getParent = %s' % collisionEntry.getIntoNodePath().getParent())
self.vel = self.trajectory.getVel(self.t)
vel = self.trajectory.getVel(self.t)
vel.normalize()
if self.hitBumper:
vel = self.lastVel * 1
vel.normalize()
self.notify.debug('normalized vel=%s' % vel)
solid = collisionEntry.getInto()
intoNormal = collisionEntry.getSurfaceNormal(collisionEntry.getIntoNodePath())
self.notify.debug('old intoNormal = %s' % intoNormal)
intoNormal = collisionEntry.getSurfaceNormal(render)
self.notify.debug('new intoNormal = %s' % intoNormal)
hitPylonAboveWater = False
hitPylonBelowWater = False
hitNormal = intoNormal
if hitNode.find('cSphere') == 0 or hitNode.find('treasureSphere') == 0 or hitNode.find('prop') == 0 or hitNode.find('distAvatarCollNode') == 0 or hitNode.find('CannonSphere') == 0 or hitNode.find('plotSphere') == 0 or hitNode.find('flySphere') == 0 or hitNode.find('FishingSpotSphere') == 0 or hitNode.find('TrampolineTrigger') == 0 or hitNode == 'gagtree_collision' or hitNode == 'sign_collision' or hitNode == 'FlowerSellBox' or hitPylonBelowWater:
self.notify.debug('--------------hit and ignoring %s' % hitNode)
return
if vel.dot(hitNormal) > 0 and not hitNode == 'collision_roof' and not hitNode == 'collision_fence':
self.notify.debug('--------------hit and ignoring backfacing %s, dot=%s' % (hitNode, vel.dot(hitNormal)))
return
intoNode = collisionEntry.getIntoNodePath()
bumperNodes = ['sky_collision'] + PartyCannonCollisions['bounce'] + PartyCannonCollisions['fence']
cloudBumpers = PartyCannonCollisions['clouds']
bumperNodes += cloudBumpers
if hitNode in bumperNodes or hitNode.find('cogPie') == 0 or PartyCannonCollisions['trampoline_bounce'] in hitNode:
if hitNode == 'sky_collision' or hitNode in PartyCannonCollisions['fence'] or hitNode.find('cogPie') == 0:
self.__hitFence(self.localFlyingToon, collisionEntry)
elif PartyCannonCollisions['trampoline_bounce'] in hitNode or hitNode in PartyCannonCollisions['bounce']:
if hitNode == 'wall_collision':
hitSound = self.sndBounce2
else:
hitSound = self.sndBounce3
self.hitCloud = 1
self.__hitBumper(self.localFlyingToon, collisionEntry, hitSound, kr=0.09, angVel=5)
self.hitBumper = 0
elif hitNode in cloudBumpers:
self.__hitCloudPlatform(self.localFlyingToon, collisionEntry)
elif hitNode == 'statuaryCol':
self.__hitStatuary(self.localFlyingToon, collisionEntry)
else:
self.notify.debug('*************** hit something else ************')
return
else:
self.__stopCollisionHandler(self.localFlyingToon)
self.__stopLocalFlyTask(self.localFlyingToonId)
self.notify.debug('stopping flying since we hit %s' % hitNode)
if self.isLocalToonId(self.localFlyingToon.doId):
camera.wrtReparentTo(render)
if self.localFlyingDropShadow:
self.localFlyingDropShadow.reparentTo(hidden)
pos = collisionEntry.getSurfacePoint(render)
hpr = self.localFlyingToon.getHpr()
hitPos = collisionEntry.getSurfacePoint(render)
pos = hitPos
self.landingPos = pos
self.notify.debug('hitNode, Normal = %s,%s' % (hitNode, intoNormal))
track = Sequence()
track.append(Func(self.localFlyingToon.wrtReparentTo, render))
if self.isLocalToonId(self.localFlyingToon.doId):
track.append(Func(self.localFlyingToon.collisionsOff))
if hitNode in PartyCannonCollisions['ground']:
track.append(Func(self.__hitGround, self.localFlyingToon, pos))
track.append(Wait(1.0))
track.append(Func(self.__setToonUpright, self.localFlyingToon, self.landingPos))
elif hitNode in PartyCannonCollisions['fence']:
track.append(Func(self.__hitFence, self.localFlyingToon, collisionEntry))
elif hitNode == 'collision3':
track.append(Func(self.__hitWater, self.localFlyingToon, pos, collisionEntry))
track.append(Wait(2.0))
track.append(Func(self.__setToonUpright, self.localFlyingToon, self.landingPos))
elif hitNode.find('cloudSphere') == 0:
track.append(Func(self.__hitCloudPlatform, self.localFlyingToon, collisionEntry))
else:
self.notify.warning('************* unhandled hitNode=%s parent =%s' % (hitNode, collisionEntry.getIntoNodePath().getParent()))
track.append(Func(self.d_setLanded, self.localFlyingToonId))
if self.isLocalToonId(self.localFlyingToonId):
track.append(Func(self.localFlyingToon.collisionsOn))
if self.hitTrack:
self.hitTrack.finish()
self.hitTrack = track
self.hitTrack.start()
return
def __hitBumper(self, avatar, collisionEntry, sound, kr = 0.6, angVel = 1):
self.hitBumper = 1
base.playSfx(sound)
hitP = avatar.getPos(render)
self.lastPos = hitP
normal = collisionEntry.getSurfaceNormal(render)
self.notify.debug('normal = %s' % normal)
vel = self.vel * 1
speed = vel.length()
vel.normalize()
self.notify.debug('old vel = %s' % vel)
if self.hitCloud:
centerVec = Vec3(-avatar.getPos(self.getParentNodePath()))
centerVec.setZ(0)
d = centerVec.length() / 15.0
centerVec.setZ(abs(centerVec.length() * math.sin(70.0)))
centerVec.normalize()
newVel = centerVec * d + normal * 0.2
newVel = newVel * (kr * speed)
self.initialFlyVel = kr * speed
else:
newVel = (normal * 2.0 + vel) * (kr * speed)
self.lastVel = newVel
self.notify.debug('new vel = %s' % newVel)
self.angularVel = angVel * 360
if self.hitCloud:
return
t = Sequence(Func(avatar.pose, 'lose', 110))
t.start()
def __hitGround(self, avatar, pos, extraArgs = []):
self.notify.debug('__hitGround')
hitP = avatar.getPos(render)
self.notify.debug('hitGround pos = %s, hitP = %s' % (pos, hitP))
self.notify.debug('avatar hpr = %s' % avatar.getHpr())
avatar.setPos(pos[0], pos[1], pos[2] + avatar.getHeight() / 3.0)
avatar.setHpr(avatar.getH(), -135, 0)
self.notify.debug('parent = %s' % avatar.getParent())
self.notify.debug('pos = %s, hpr = %s' % (avatar.getPos(render), avatar.getHpr(render)))
self.__playDustCloud(avatar, pos)
base.playSfx(self.sndHitGround)
avatar.setPlayRate(2.0, 'run')
avatar.loop('run')
def __playDustCloud(self, toon, pos):
self.dustCloud.setPos(render, pos[0], pos[1], pos[2] + toon.getHeight() / 3.0)
self.dustCloud.setScale(0.35)
self.dustCloud.play()
def __hitFence(self, avatar, collisionEntry, extraArgs = []):
self.notify.debug('__hitFence')
self.__hitBumper(avatar, collisionEntry, self.sndHitHouse, kr=0.2, angVel=3)
def __hitWater(self, avatar, pos, collisionEntry, extraArgs = []):
hitP = avatar.getPos(render)
if hitP[2] > ToontownGlobals.EstateWakeWaterHeight:
self.notify.debug('we hit the ground before we hit water')
self.__hitGround(avatar, pos, extraArgs)
return
self.notify.debug('hit water')
hitP = avatar.getPos(render)
avatar.loop('neutral')
self.splash.setPos(hitP)
self.splash.setZ(ToontownGlobals.EstateWakeWaterHeight)
self.splash.setScale(2)
self.splash.play()
base.playSfx(self.sndHitWater)
place = base.cr.playGame.getPlace()
def __hitStatuary(self, avatar, collisionEntry, extraArgs = []):
self.__hitBumper(avatar, collisionEntry, self.sndHitHouse, kr=0.4, angVel=5)
def d_cloudsColorRequest(self):
self.notify.debug('cloudsColorRequest')
self.sendUpdate('cloudsColorRequest')
def cloudsColorResponse(self, cloudColorList):
self.notify.debug('cloudsColorResponse: %s' % cloudColorList)
for cloudColor in cloudColorList:
self.setCloudHit(*cloudColor)
def d_requestCloudHit(self, cloudNumber, color):
self.sendUpdate('requestCloudHit', [cloudNumber,
color.getX(),
color.getY(),
color.getZ()])
def setCloudHit(self, cloudNumber, r, g, b):
cloud = render.find('**/cloud-%d' % cloudNumber)
if not cloud.isEmpty():
cloud.setColor(r, g, b, 1.0)
else:
self.notify.debug('Could not find cloud-%d' % cloudNumber)
def __hitCloudPlatform(self, avatar, collisionEntry, extraArgs = []):
if not self.hitBumper and not self.hitCloud:
self.hitCloud = 1
self.__hitBumper(avatar, collisionEntry, self.sndBounce1, kr=0.35, angVel=5)
self.hitBumper = 0
if self._lastCloudHit is None:
cloud = collisionEntry.getIntoNodePath().getParent()
self._lastCloudHit = cloud
cloud.setColor(base.localAvatar.style.getHeadColor())
cloudNumber = int(cloud.getNetTag('number'))
self.d_requestCloudHit(cloudNumber, base.localAvatar.style.getHeadColor())
self.__playDustCloud(avatar, collisionEntry.getSurfacePoint(render))
self.flyingToonCloudsHit += 1
taskMgr.doMethodLater(0.25, self.__reactivateLastCloudHit, DistributedPartyCannonActivity.REACTIVATE_CLOUD_TASK)
return
def __reactivateLastCloudHit(self, task):
self._lastCloudHit = None
return Task.done
def __handleFireworksStarted(self):
self.notify.debug('__handleFireworksStarted')
base.cr.playGame.hood.loader.fadeClouds()
def __handleFireworksFinished(self):
self.notify.debug('__handleFireworksFinished')
if self.__checkHoodValidity():
base.cr.playGame.hood.loader.fadeClouds()
else:
self.notify.debug('Toon has left the party')
def __checkHoodValidity(self):
if hasattr(base.cr.playGame, 'hood') and base.cr.playGame.hood and hasattr(base.cr.playGame.hood, 'loader') and base.cr.playGame.hood.loader and hasattr(base.cr.playGame.hood.loader, 'geom') and base.cr.playGame.hood.loader.geom:
return True
else:
return False
def handleToonExited(self, toonId):
self.notify.debug('DistributedPartyCannonActivity handleToonExited( toonId=%s ) ' % toonId)
if toonId in self.cr.doId2do:
self.notify.warning('handleToonExited is not defined')
|
1f726fed52fa48fbe105342a95685fb2dc0b7ab7
|
b4108cbe9fae763ca6b26ff641fc7cab83e4f385
|
/tests/test_autots.py
|
89897b1550f8830dbe084cc80e01380dfb597d28
|
[
"MIT"
] |
permissive
|
winedarksea/AutoTS
|
ac66d78168d2becca001eb7bd3aa3f8b343c5e28
|
f2a332d2f681cd20ec277a5e1a996e2457e915d3
|
refs/heads/master
| 2023-08-21T19:28:30.020737
| 2023-08-07T21:24:21
| 2023-08-07T21:24:21
| 224,208,127
| 827
| 83
|
MIT
| 2023-08-11T00:15:00
| 2019-11-26T14:13:16
|
Python
|
UTF-8
|
Python
| false
| false
| 37,499
|
py
|
test_autots.py
|
# -*- coding: utf-8 -*-
"""Overall testing."""
import unittest
import json
import time
import timeit
import tempfile
import os
import numpy as np
import pandas as pd
from autots.datasets import (
load_daily, load_monthly, load_artificial, load_sine
)
from autots import AutoTS, model_forecast
from autots.evaluator.auto_ts import fake_regressor
from autots.evaluator.auto_model import ModelMonster
from autots.models.model_list import default as default_model_list
from autots.models.model_list import all_models
from autots.evaluator.benchmark import Benchmark
class AutoTSTest(unittest.TestCase):
def test_autots(self):
print("Starting AutoTS class tests")
forecast_length = 8
long = False
df = load_daily(long=long)
n_jobs = 'auto'
verbose = 0
validation_method = "backwards"
generations = 1
num_validations = 2
models_to_validate = 0.25 # must be a decimal percent for this test
model_list = [
'ConstantNaive',
'LastValueNaive',
'AverageValueNaive',
'SeasonalNaive',
'DatepartRegression',
]
transformer_list = "fast" # ["SinTrend", "MinMaxScaler"]
transformer_max_depth = 3
metric_weighting = {
'smape_weighting': 3,
'mae_weighting': 1,
'rmse_weighting': 1,
'containment_weighting': 0,
'runtime_weighting': 0,
'spl_weighting': 1,
'contour_weighting': 1,
}
ensemble = [
'simple',
# 'distance',
# 'horizontal',
'horizontal-max',
# 'mosaic',
'mosaic-window',
'mosaic-crosshair'
# 'subsample',
# 'mlensemble',
]
model = AutoTS(
forecast_length=forecast_length,
frequency='infer',
prediction_interval=0.9,
ensemble=ensemble,
constraint=None,
max_generations=generations,
num_validations=num_validations,
validation_method=validation_method,
model_list=model_list,
transformer_list=transformer_list,
transformer_max_depth=transformer_max_depth,
initial_template='General+Random',
metric_weighting=metric_weighting,
models_to_validate=models_to_validate,
max_per_model_class=None,
model_interrupt="end_generation",
no_negatives=True,
subset=100,
n_jobs=n_jobs,
drop_most_recent=1,
verbose=verbose,
)
future_regressor_train2d, future_regressor_forecast2d = fake_regressor(
df,
dimensions=4,
forecast_length=forecast_length,
date_col='datetime' if long else None,
value_col='value' if long else None,
id_col='series_id' if long else None,
drop_most_recent=model.drop_most_recent,
aggfunc=model.aggfunc,
verbose=model.verbose,
)
model = model.fit(
df,
future_regressor=future_regressor_train2d,
date_col='datetime' if long else None,
value_col='value' if long else None,
id_col='series_id' if long else None,
)
prediction = model.predict(future_regressor=future_regressor_forecast2d, verbose=0)
long_form = prediction.long_form_results()
forecasts_df = prediction.forecast
initial_results = model.results()
validation_results = model.results("validation")
back_forecast = model.back_forecast(n_splits=2, verbose=0).forecast
# validated_count = (validation_results['Runs'] == (num_validations + 1)).sum()
# so these account for DROP MOST RECENT = 1
expected_idx = pd.date_range(
start=df.index[-2], periods=forecast_length + 1, freq='D'
)[1:]
expected_val1 = pd.date_range(
end=df.index[-(forecast_length + 2)], periods=forecast_length, freq='D'
)
expected_val2 = pd.date_range(
end=df.index[-(forecast_length * 2 + 2)], periods=forecast_length, freq='D'
)
template_dict = json.loads(model.best_model['ModelParameters'].iloc[0])
best_model_result = validation_results[validation_results['ID'] == model.best_model['ID'].iloc[0]]
# check there were few failed models in this simple setup (fancier models are expected to fail sometimes!)
self.assertGreater(initial_results['Exceptions'].isnull().mean(), 0.95, "Too many 'superfast' models failed. This can occur by random chance, try running again.")
# check general model setup
# self.assertEqual(validated_count, model.models_to_validate)
self.assertGreater(model.validation_template.size, (initial_results['ValidationRound'] == 0).sum() * models_to_validate - 2)
self.assertEqual(set(initial_results['Model'].unique().tolist()) - {'Ensemble', 'MLEnsemble'}, set(model.model_list))
self.assertFalse(model.best_model.empty)
# check the generated forecasts look right
self.assertEqual(forecasts_df.shape[0], forecast_length)
self.assertEqual(forecasts_df.shape[1], df.shape[1])
self.assertFalse(forecasts_df.isna().any().any())
self.assertTrue((forecasts_df >= 0).all().all())
self.assertEqual(forecast_length, len(forecasts_df.index))
self.assertTrue((expected_idx == pd.DatetimeIndex(forecasts_df.index)).all())
# these next two could potentiall fail if any inputs have a strong trend
self.assertTrue((forecasts_df.mean() <= (df.max()) + df.std()).all())
self.assertTrue((forecasts_df.mean() >= (df.min()) - df.std()).all())
# check all the checks work
self.assertEqual(model.ensemble_check, 1)
self.assertFalse(model.weighted)
self.assertFalse(model.subset_flag)
# assess 'backwards' validation
self.assertEqual(len(model.validation_test_indexes), num_validations + 1)
self.assertTrue(model.validation_test_indexes[1].intersection(model.validation_train_indexes[1]).empty)
self.assertTrue(model.validation_test_indexes[2].intersection(model.validation_train_indexes[2]).empty)
self.assertEqual(model.validation_train_indexes[1].shape[0], df.shape[0] - (forecast_length * 2 + 1)) # +1 via drop most recent
self.assertTrue((model.validation_test_indexes[1] == expected_val1).all())
self.assertTrue((model.validation_test_indexes[2] == expected_val2).all())
# assess Horizontal Ensembling
tested_horizontal = 'horizontal' in template_dict['model_name'].lower()
tested_mosaic = 'mosaic' in template_dict['model_name'].lower()
print(f"chosen model was mosaic: {tested_mosaic} or was horizontal: {tested_horizontal}")
self.assertTrue(tested_horizontal or tested_mosaic)
self.assertEqual(len(template_dict['series'].keys()), df.shape[1])
if tested_horizontal:
self.assertEqual(len(set(template_dict['series'].values())), template_dict['model_count'])
self.assertEqual(len(template_dict['models'].keys()), template_dict['model_count'])
# test that actually the best model (or nearly) was chosen
self.assertGreater(validation_results['Score'].quantile(0.05), best_model_result['Score'].iloc[0])
# test back_forecast
# self.assertTrue((back_forecast.index == model.df_wide_numeric.index).all(), msg="Back forecasting failed to have equivalent index to train.")
self.assertFalse(np.any(back_forecast.isnull()))
self.assertEqual(long_form.shape[0], forecasts_df.shape[0] * forecasts_df.shape[1] * 3)
# TEST EXPORTING A TEMPLATE THEN USING THE BEST MODEL AS A PREDICTION
df_train = df.iloc[:-forecast_length]
df_test = df.iloc[-forecast_length:]
tf = tempfile.NamedTemporaryFile(suffix='.csv', prefix=os.path.basename("autots_test"), delete=False)
time.sleep(1)
name = tf.name
model.export_template(name, models="best", n=20, max_per_model_class=3)
model2 = AutoTS(
forecast_length=forecast_length,
frequency='infer',
prediction_interval=0.9,
ensemble='all',
constraint=None,
max_generations=generations,
num_validations=num_validations,
validation_method=validation_method,
model_list="update_fit",
transformer_list=transformer_list,
transformer_max_depth=transformer_max_depth,
initial_template='General+Random',
metric_weighting=metric_weighting,
models_to_validate=models_to_validate,
max_per_model_class=None,
model_interrupt=False,
no_negatives=True,
subset=100,
n_jobs=n_jobs,
drop_most_recent=1,
verbose=2,
)
# TEST MODEL PREDICT WITH LOWER LEVEL MODEL TRAINED ON PREVIOUS DATA ONLY
model2.import_best_model(tf.name, include_ensemble=False)
model2.fit_data(df_train, future_regressor=future_regressor_train2d.reindex(df_train.index))
prediction = model2.predict(future_regressor=future_regressor_forecast2d.reindex(df_test.index), verbose=0)
prediction.evaluate(df_test, df_train=df_train)
smape1 = prediction.avg_metrics['smape']
model2.fit_data(df, future_regressor=future_regressor_train2d)
prediction2 = model2.predict(future_regressor=future_regressor_forecast2d, verbose=0)
forecasts_df2 = prediction2.forecast
# now retrain on full data
model2.model = None
model2.fit_data(df, future_regressor=future_regressor_train2d)
prediction2 = model2.predict(future_regressor=future_regressor_forecast2d, verbose=0)
# and see if it got better on past holdout
model2.fit_data(df_train, future_regressor=future_regressor_train2d.reindex(df_train.index))
prediction = model2.predict(future_regressor=future_regressor_forecast2d.reindex(df_test.index), verbose=0)
prediction.evaluate(df_test, df_train=df_train)
smape2 = prediction.avg_metrics['smape']
print("=====================================================")
# smape2 should be better because it is trained on the very data it is supposed to predict
print(f"fit 1 SMAPE {smape1}, then refit with history SMAPE: {smape2}")
tf.close()
os.unlink(tf.name)
self.assertEqual(forecasts_df2.shape[0], forecast_length)
self.assertEqual(forecasts_df2.shape[1], df.shape[1])
self.assertFalse(forecasts_df2.isna().any().any())
def test_all_default_models(self):
print("Starting test_all_default_models")
forecast_length = 8
long = False
df = load_daily(long=long).drop(columns=['US.Total.Covid.Tests'], errors='ignore')
# to make it faster
df = df[df.columns[0:2]]
n_jobs = 'auto'
verbose = -1
validation_method = "backwards"
generations = 1
num_validations = 1
models_to_validate = 0.10 # must be a decimal percent for this test
model_list = "default"
transformer_list = "fast" # ["SinTrend", "MinMaxScaler"]
transformer_max_depth = 1
model = AutoTS(
forecast_length=forecast_length,
frequency='infer',
prediction_interval=0.9,
ensemble=["horizontal-max"],
constraint=None,
max_generations=generations,
num_validations=num_validations,
validation_method=validation_method,
model_list=model_list,
transformer_list=transformer_list,
transformer_max_depth=transformer_max_depth,
initial_template='Random',
models_to_validate=models_to_validate,
max_per_model_class=None,
n_jobs=n_jobs,
model_interrupt=True,
drop_most_recent=1,
verbose=verbose,
random_seed=1918,
)
model = model.fit(
df,
date_col='datetime' if long else None,
value_col='value' if long else None,
id_col='series_id' if long else None,
)
prediction = model.predict(verbose=0)
forecasts_df = prediction.forecast
initial_results = model.results()
validation_results = model.results("validation")
# validated_count = (validation_results['Runs'] == (num_validations + 1)).sum()
validated_count = (validation_results['Runs'] > 1).sum()
# so these account for DROP MOST RECENT = 1
expected_idx = pd.date_range(
start=df.index[-2], periods=forecast_length + 1, freq='D'
)[1:]
expected_val1 = pd.date_range(
end=df.index[-(forecast_length + 2)], periods=forecast_length, freq='D'
)
template_dict = json.loads(model.best_model['ModelParameters'].iloc[0])
best_model_result = validation_results[validation_results['ID'] == model.best_model['ID'].iloc[0]]
check_fails = initial_results.groupby("Model")["mae"].count() > 0
# check that all models had at least 1 success
self.assertEqual(set(initial_results['Model'].unique().tolist()) - {'Ensemble'}, set(default_model_list), msg="Not all models used in initial template.")
self.assertTrue(check_fails.all(), msg=f"These models failed: {check_fails[~check_fails].index.tolist()}. It is more likely a package install problem than a code problem")
# check general model setup
self.assertGreaterEqual(validated_count, model.models_to_validate)
self.assertGreater(model.models_to_validate, (initial_results['ValidationRound'] == 0).sum() * models_to_validate - 2)
self.assertFalse(model.best_model.empty)
# check the generated forecasts look right
self.assertEqual(forecasts_df.shape[0], forecast_length)
self.assertEqual(forecasts_df.shape[1], df.shape[1])
self.assertFalse(forecasts_df.isna().any().any())
self.assertEqual(forecast_length, len(forecasts_df.index))
self.assertTrue((expected_idx == pd.DatetimeIndex(forecasts_df.index)).all())
# these next two could potentiall fail if any inputs have a strong trend
self.assertTrue((forecasts_df.mean() <= (df.max()) + df.std()).all())
self.assertTrue((forecasts_df.mean() >= (df.min()) - df.std()).all())
# check all the checks work
self.assertEqual(model.ensemble_check, 1)
self.assertFalse(model.weighted)
self.assertFalse(model.subset_flag)
self.assertFalse(model.used_regressor_check)
# assess 'backwards' validation
val_1 = model.validation_test_indexes[1]
self.assertEqual(len(model.validation_test_indexes), num_validations + 1)
self.assertTrue(val_1.intersection(model.validation_train_indexes[1]).empty)
self.assertEqual(model.validation_train_indexes[1].shape[0], df.shape[0] - (forecast_length * 2 + 1)) # +1 via drop most recent
self.assertTrue((val_1 == expected_val1).all())
# assess Horizontal Ensembling
self.assertTrue('horizontal' in template_dict['model_name'].lower())
self.assertEqual(len(template_dict['series'].keys()), df.shape[1])
self.assertEqual(len(set(template_dict['series'].values())), template_dict['model_count'])
self.assertEqual(len(template_dict['models'].keys()), template_dict['model_count'])
# test that actually the best model (or nearly) was chosen
self.assertGreater(validation_results['Score'].quantile(0.05), best_model_result['Score'].iloc[0])
# test metrics
self.assertTrue(initial_results['Score'].min() > 0)
self.assertTrue(initial_results['mae'].min() >= 0)
self.assertTrue(initial_results['smape'].min() >= 0)
self.assertTrue(initial_results['rmse'].min() >= 0)
self.assertTrue(initial_results['contour'].min() >= 0)
self.assertTrue(initial_results['containment'].min() >= 0)
self.assertTrue(initial_results['TotalRuntimeSeconds'].min() >= 0)
self.assertTrue(initial_results['spl'].min() >= 0)
self.assertTrue(initial_results['contour'].min() <= 1)
self.assertTrue(initial_results['containment'].min() <= 1)
def test_load_datasets(self):
df = load_artificial(long=True)
df = load_monthly(long=True)
df = load_sine(long=False)
df = load_daily(long=False)
df = load_daily(long=True) # noqa
def test_new_params(self):
params = AutoTS.get_new_params()
self.assertIsInstance(params, dict)
def test_univariate1step(self):
print("Starting test_univariate1step")
df = load_artificial(long=False)
df.iloc[:, :1]
forecast_length = 1
n_jobs = 1
verbose = -1
validation_method = "backwards"
generations = 1
model_list = [
'ConstantNaive',
'LastValueNaive',
'AverageValueNaive',
'SeasonalNaive',
]
model = AutoTS(
forecast_length=forecast_length,
frequency='infer',
max_generations=generations,
validation_method=validation_method,
model_list=model_list,
n_jobs=n_jobs,
verbose=verbose,
)
model = model.fit(
df,
)
prediction = model.predict(verbose=0)
forecasts_df = prediction.forecast
initial_results = model.results()
expected_idx = pd.date_range(
start=df.index[-1], periods=forecast_length + 1, freq='D'
)[1:]
check_fails = initial_results.groupby("Model")["mae"].count() > 0
self.assertTrue(check_fails.all(), msg=f"These models failed: {check_fails[~check_fails].index.tolist()}. It is more likely a package install problem than a code problem")
# check the generated forecasts look right
self.assertEqual(forecasts_df.shape[0], forecast_length)
self.assertEqual(forecasts_df.shape[1], df.shape[1])
self.assertFalse(forecasts_df.isna().any().any())
self.assertEqual(forecast_length, len(forecasts_df.index))
self.assertTrue((expected_idx == pd.DatetimeIndex(forecasts_df.index)).all())
def test_all_models_load(self):
print("Starting test_all_models_load")
# make sure it can at least load a template of all models
forecast_length = 8
n_jobs = 'auto'
verbose = 4
generations = 0
model_list = "all"
transformer_list = "all" # ["SinTrend", "MinMaxScaler"]
transformer_max_depth = 10
model = AutoTS(
forecast_length=forecast_length,
frequency='infer',
prediction_interval=0.9,
ensemble=["horizontal-max"],
constraint=None,
max_generations=generations,
model_list=model_list,
transformer_list=transformer_list,
transformer_max_depth=transformer_max_depth,
initial_template='Random',
max_per_model_class=None,
n_jobs=n_jobs,
model_interrupt=True,
drop_most_recent=1,
verbose=verbose,
)
self.assertFalse(model.initial_template.empty)
def test_benchmark(self):
print("Starting test_benchmark")
bench = Benchmark()
bench.run(times=1, verbose=-1)
self.assertGreater(bench.total_runtime, 0)
print(f"Benchmark total_runtime: {bench.total_runtime}")
print(bench.results)
time.sleep(5)
# test all same on univariate input, non-horizontal, with regressor, and different frequency, with forecast_length = 1 !
# the big ones are:
# 1. that validations are sampled correctly
# 2. that accuracy metrics are performed and aggregated correctly
# test template import and export
# test result saving and import
# test seasonal validation
# test score generation + metric_weighting
# test very short training data and/or lots of NaNs in data
# test on all models that for each model, failure rate is < 100%
class ModelTest(unittest.TestCase):
def test_models_get_params(self):
"""See if new random params can be generated without error."""
default_methods = ['deep', 'fast', 'random', 'default', 'superfast', 'regressor', 'event_risk']
for method in default_methods:
for model_str in all_models:
ModelMonster(model_str).get_new_params(method=method)
def test_models(self):
"""Test if models are the same as saved comparisons."""
print("Starting test_models")
n_jobs = 1
random_seed = 300
df = load_daily(long=False).iloc[:, 0:5]
df = df[df.index < "2022-10-04"] # update dataset and have not yet updated stored model results
df = df[df.index > "2017-10-04"] # update dataset and have not yet updated stored model results
models = [
'SectionalMotif', 'MultivariateMotif', 'AverageValueNaive',
'NVAR', "LastValueNaive", 'Theta', 'FBProphet', 'SeasonalNaive',
'GLM', 'ETS', "ConstantNaive", 'WindowRegression',
'DatepartRegression', 'MultivariateRegression',
'Cassandra', 'MetricMotif', 'SeasonalityMotif', 'KalmanStateSpace',
'ARDL', 'UnivariateMotif', 'VAR', 'MAR', 'TMF', 'RRVAR', 'VECM',
]
# models that for whatever reason arne't consistent across test sessions
run_only_no_score = ['FBProphet', 'RRVAR', "TMF"]
timings = {}
forecasts = {}
upper_forecasts = {}
lower_forecasts = {}
# load the comparison source
with open("./tests/model_forecasts.json", "r") as file:
loaded = json.load(file)
for x in models:
forecasts[x] = pd.DataFrame.from_dict(loaded['forecasts'][x], orient="columns")
forecasts[x]['index'] = pd.to_datetime(forecasts[x]['index'], infer_datetime_format=True)
forecasts[x] = forecasts[x].set_index("index")
upper_forecasts[x] = pd.DataFrame.from_dict(loaded['upper_forecasts'][x], orient="columns")
upper_forecasts[x]['index'] = pd.to_datetime(upper_forecasts[x]['index'], infer_datetime_format=True)
upper_forecasts[x] = upper_forecasts[x].set_index("index")
lower_forecasts[x] = pd.DataFrame.from_dict(loaded['lower_forecasts'][x], orient="columns")
lower_forecasts[x]['index'] = pd.to_datetime(lower_forecasts[x]['index'], infer_datetime_format=True)
lower_forecasts[x] = lower_forecasts[x].set_index("index")
timings = loaded['timing']
timings2 = {}
forecasts2 = {}
upper_forecasts2 = {}
lower_forecasts2 = {}
# following are not consistent with seed:
# "MotifSimulation"
for x in models:
print(x)
try:
start_time = timeit.default_timer()
df_forecast = model_forecast(
model_name=x,
model_param_dict={}, # 'return_result_windows': True
model_transform_dict={
"fillna": "ffill",
"transformations": {"0": "StandardScaler"},
"transformation_params": {"0": {}},
},
df_train=df,
forecast_length=5,
frequency="D",
prediction_interval=0.9,
random_seed=random_seed,
verbose=0,
# bug in sklearn 1.1.2 for n_jobs for RandomForest
n_jobs=n_jobs if x != "WindowRegression" else 2,
return_model=True,
)
forecasts2[x] = df_forecast.forecast.round(2)
upper_forecasts2[x] = df_forecast.upper_forecast.round(2)
lower_forecasts2[x] = df_forecast.lower_forecast.round(2)
timings2[x] = (timeit.default_timer() - start_time)
except Exception as e:
raise ValueError(f"model {x} failed with {repr(e)}")
print(sum(timings.values()))
pass_probabilistic = ['FBProphet'] # not yet reproducible in upper/lower with seed
for x in models:
if x not in run_only_no_score:
with self.subTest(i=x):
res = (forecasts2[x].round(2) == forecasts[x].round(2)).all().all()
if x not in pass_probabilistic:
res_u = (upper_forecasts2[x].round(2) == upper_forecasts[x].round(2)).all().all()
res_l = (lower_forecasts2[x].round(2) == lower_forecasts[x].round(2)).all().all()
else:
res_u = True
res_l = True
self.assertTrue(
res,
f"Model '{x}' forecasts diverged from sample forecasts."
)
self.assertTrue(
res_u,
f"Model '{x}' upper forecasts diverged from sample forecasts."
)
self.assertTrue(
res_l,
f"Model '{x}' lower forecasts diverged from sample forecasts."
)
print(f"{res & res_u & res_l} model '{x}' ran successfully in {round(timings2[x], 4)} (bench: {round(timings[x], 4)})")
"""
for x in models:
forecasts[x].index = forecasts[x].index.strftime("%Y-%m-%d")
forecasts[x] = forecasts[x].reset_index(drop=False).to_dict(orient="list")
upper_forecasts[x].index = upper_forecasts[x].index.strftime("%Y-%m-%d")
upper_forecasts[x] = upper_forecasts[x].reset_index(drop=False).to_dict(orient="list")
lower_forecasts[x].index = lower_forecasts[x].index.strftime("%Y-%m-%d")
lower_forecasts[x] = lower_forecasts[x].reset_index(drop=False).to_dict(orient="list")
with open("./tests/model_forecasts.json", "w") as file:
json.dump(
{
'forecasts': forecasts,
"upper_forecasts": upper_forecasts,
"lower_forecasts": lower_forecasts,
"timing": timings,
}, file
)
"""
def test_transforms(self):
"""Test if transformers meet saved comparison outputs."""
print("Starting test_transforms")
n_jobs = 1
random_seed = 300
df = load_monthly(long=False)[['CSUSHPISA', 'EMVOVERALLEMV', 'EXCAUS']]
transforms = [
'MinMaxScaler', 'PowerTransformer', 'QuantileTransformer',
'MaxAbsScaler', 'StandardScaler', 'RobustScaler',
'PCA', 'FastICA', "DatepartRegression",
"EWMAFilter", 'STLFilter', 'HPFilter', 'Detrend', 'Slice',
'ScipyFilter', 'Round', 'ClipOutliers', 'IntermittentOccurrence',
'CenterLastValue', 'Discretize', 'SeasonalDifference',
'RollingMeanTransformer', 'bkfilter', 'cffilter', 'Log',
'DifferencedTransformer', 'PctChangeTransformer', 'PositiveShift',
'SineTrend', 'convolution_filter', 'CumSumTransformer',
'AlignLastValue', # new 0.4.3
'AnomalyRemoval', "HolidayTransformer", # new 0.5.0
'LocalLinearTrend', # new 0.5.1
"KalmanSmoothing", # new 0.5.1
# "RegressionFilter" # new 0.5.7
]
timings = {}
forecasts = {}
upper_forecasts = {}
lower_forecasts = {}
# load the comparison source
with open("./tests/transform_forecasts.json", "r") as file:
loaded = json.load(file)
for x in transforms:
forecasts[x] = pd.DataFrame.from_dict(loaded['forecasts'][x], orient="columns")
forecasts[x]['index'] = pd.to_datetime(forecasts[x]['index'], infer_datetime_format=True)
forecasts[x] = forecasts[x].set_index("index")
upper_forecasts[x] = pd.DataFrame.from_dict(loaded['upper_forecasts'][x], orient="columns")
upper_forecasts[x]['index'] = pd.to_datetime(upper_forecasts[x]['index'], infer_datetime_format=True)
upper_forecasts[x] = upper_forecasts[x].set_index("index")
lower_forecasts[x] = pd.DataFrame.from_dict(loaded['lower_forecasts'][x], orient="columns")
lower_forecasts[x]['index'] = pd.to_datetime(lower_forecasts[x]['index'], infer_datetime_format=True)
lower_forecasts[x] = lower_forecasts[x].set_index("index")
timings = loaded['timing']
timings2 = {}
forecasts2 = {}
upper_forecasts2 = {}
lower_forecasts2 = {}
for x in transforms:
print(x)
param = {} if x not in ['QuantileTransformer'] else {"n_quantiles": 100}
start_time = timeit.default_timer()
df_forecast = model_forecast(
model_name="LastValueNaive",
model_param_dict={}, # 'return_result_windows': True
model_transform_dict={
"fillna": "ffill",
"transformations": {"0": x},
"transformation_params": {"0": param},
},
df_train=df,
forecast_length=5,
frequency="M",
prediction_interval=0.9,
random_seed=random_seed,
verbose=-1,
n_jobs=n_jobs,
return_model=True,
)
forecasts2[x] = df_forecast.forecast.round(2)
upper_forecasts2[x] = df_forecast.upper_forecast.round(2)
lower_forecasts2[x] = df_forecast.lower_forecast.round(2)
timings2[x] = (timeit.default_timer() - start_time)
print(sum(timings2.values()))
pass_probabilistic = ['FastICA'] # not reproducible in upper/lower with seed
for x in transforms:
with self.subTest(i=x):
res = (forecasts2[x].round(2) == forecasts[x].round(2)).all().all()
if x not in pass_probabilistic:
res_u = (upper_forecasts2[x].round(2) == upper_forecasts[x].round(2)).all().all()
res_l = (lower_forecasts2[x].round(2) == lower_forecasts[x].round(2)).all().all()
else:
res_u = True
res_l = True
self.assertTrue(
res,
f"Model '{x}' forecasts diverged from sample forecasts."
)
self.assertTrue(
res_u,
f"Model '{x}' upper forecasts diverged from sample forecasts."
)
self.assertTrue(
res_l,
f"Model '{x}' lower forecasts diverged from sample forecasts."
)
print(f"{res & res_u & res_l} model '{x}' ran successfully in {round(timings2[x], 4)} (bench: {round(timings[x], 4)})")
"""
for x in transforms:
forecasts[x].index = forecasts[x].index.strftime("%Y-%m-%d")
forecasts[x] = forecasts[x].reset_index(drop=False).to_dict(orient="list")
upper_forecasts[x].index = upper_forecasts[x].index.strftime("%Y-%m-%d")
upper_forecasts[x] = upper_forecasts[x].reset_index(drop=False).to_dict(orient="list")
lower_forecasts[x].index = lower_forecasts[x].index.strftime("%Y-%m-%d")
lower_forecasts[x] = lower_forecasts[x].reset_index(drop=False).to_dict(orient="list")
with open("./tests/transform_forecasts.json", "w") as file:
json.dump(
{
'forecasts': forecasts,
"upper_forecasts": upper_forecasts,
"lower_forecasts": lower_forecasts,
"timing": timings,
}, file
)
"""
def test_sklearn(self):
from autots import load_daily
from autots import create_regressor
from autots.models.sklearn import MultivariateRegression, DatepartRegression, WindowRegression
df = load_daily(long=False)
forecast_length = 8
df_train = df.iloc[:-forecast_length]
df_test = df.iloc[-forecast_length:]
future_regressor_train, future_regressor_forecast = create_regressor(
df_train,
forecast_length=forecast_length,
frequency="infer",
drop_most_recent=0,
scale=True,
summarize="auto",
backfill="bfill",
fill_na="spline",
holiday_countries={"US": None}, # requires holidays package
encode_holiday_type=True,
)
random_seed = 300
frequency = 'D'
prediction_interval = 0.9
verbose = -1
n_jobs = 2
params = MultivariateRegression().get_new_params()
params = {
'regression_model': {'model': 'LightGBM',
'model_params': {
'objective': 'regression',
'learning_rate': 0.1,
'num_leaves': 31,
'max_depth': 10,
'boosting_type': 'goss',
'n_estimators': 250,
'linear_tree': False
}},
'mean_rolling_periods': 90,
'macd_periods': 12,
'std_rolling_periods': 7,
'max_rolling_periods': None,
'min_rolling_periods': None,
'quantile90_rolling_periods': 7,
'quantile10_rolling_periods': 10,
'ewm_alpha': 0.8,
'ewm_var_alpha': None,
'additional_lag_periods': None,
'abs_energy': False,
'rolling_autocorr_periods': None,
'datepart_method': 'expanded',
'polynomial_degree': None,
'regression_type': None,
'window': 3,
'holiday': True,
'probabilistic': False,
'cointegration': None,
'cointegration_lag': 1
}
model = MultivariateRegression(
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**params
)
model.fit(df_train)
first_forecast = model.predict(future_regressor=future_regressor_forecast)
self.assertListEqual(first_forecast.forecast.index.tolist(), df_test.index.tolist())
model.fit_data(df)
updated_forecast = model.predict()
self.assertEqual(updated_forecast.forecast.shape[0], forecast_length)
self.assertTrue(updated_forecast.forecast.index[0] > df.index[-1])
params = WindowRegression().get_new_params()
params = {}
model = WindowRegression(
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**params
)
model.fit(df_train.fillna(method='ffill'))
first_forecast = model.predict(future_regressor=future_regressor_forecast)
# first_forecast.plot_grid(df)
self.assertListEqual(first_forecast.forecast.index.tolist(), df_test.index.tolist())
model.fit_data(df)
updated_forecast = model.predict()
# updated_forecast.plot_grid(df)
self.assertEqual(updated_forecast.forecast.shape[0], forecast_length)
self.assertTrue(updated_forecast.forecast.index[0] > df.index[-1])
params = {
'regression_model': {
'model': 'ExtraTrees',
'model_params': {
'n_estimators': 500,
'min_samples_leaf': 1,
'max_depth': 20
}},
'datepart_method': 'simple_binarized',
'polynomial_degree': None,
'regression_type': None,
}
model = DatepartRegression(
forecast_length=forecast_length,
frequency=frequency,
prediction_interval=prediction_interval,
random_seed=random_seed,
verbose=verbose,
n_jobs=n_jobs,
**params
)
model.fit(df_train)
first_forecast = model.predict(future_regressor=future_regressor_forecast)
self.assertListEqual(first_forecast.forecast.index.tolist(), df_test.index.tolist())
model.fit_data(df)
updated_forecast = model.predict()
self.assertEqual(updated_forecast.forecast.shape[0], forecast_length)
self.assertTrue(updated_forecast.forecast.index[0] > df.index[-1])
|
331fa2102d987d240ae965ec216ada567c6be13a
|
a0736beb3269a71b2f5b13cafe8fb5e7f6f540f4
|
/src/scripts/schedule_jobs.py
|
97ed1658dd4508831ced3e74b67958ea4f30d70a
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
GoogleCloudPlatform/covid-19-open-data
|
d9e467fadbfc71686e30e28f3ce1d438e8fd92ba
|
1123ce02a0b4059d6acd4c4446f3f9b8335018f1
|
refs/heads/main
| 2023-08-02T23:57:12.785363
| 2022-10-23T22:26:29
| 2022-10-23T22:26:29
| 282,079,576
| 489
| 146
|
Apache-2.0
| 2022-09-05T12:00:37
| 2020-07-23T23:43:51
|
Python
|
UTF-8
|
Python
| false
| false
| 10,336
|
py
|
schedule_jobs.py
|
#!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script schedules all the jobs to be dispatched to AppEngine.
"""
import os
import sys
from argparse import ArgumentParser
from functools import partial
from typing import List
from google.cloud import scheduler_v1
from google.cloud.scheduler_v1.types import AppEngineHttpTarget, Duration, Job
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# pylint: disable=wrong-import-position
from lib.constants import GCP_LOCATION, SRC
from lib.memory_efficient import table_read_column
from lib.pipeline_tools import get_pipelines
def _split_into_subsets(items: List[str], bin_count: int):
""" Produce subsets of the given list divided into equal `bin_count` bins """
bin_size = len(items) // bin_count
for idx in range(bin_count - 1):
yield items[bin_size * idx : bin_size * (idx + 1)]
# The last bin might have up to `bin_size - 1` additional items
yield items[bin_size * (bin_count - 1) :]
def clear_jobs(
client: scheduler_v1.CloudSchedulerClient, project_id: str, location_id: str
) -> None:
""" Delete all scheduled jobs """
parent = client.location_path(project_id, location_id)
for job in client.list_jobs(parent):
client.delete_job(job.name)
def schedule_job(
client: scheduler_v1.CloudSchedulerClient,
project_id: str,
location_id: str,
time_zone: str,
schedule: str,
path: str,
) -> None:
""" Schedules the given job for the specified project and location """
# Create a Job to schedule
target = AppEngineHttpTarget(relative_uri=path, http_method="GET")
timeout = Duration(seconds=2 * 60 * 60) # 2 hours.
job = Job(
app_engine_http_target=target,
schedule=schedule,
time_zone=time_zone,
attempt_deadline=timeout,
)
# Schedule the Job we just created
parent = client.location_path(project_id, location_id)
client.create_job(parent, job)
def schedule_all_jobs(project_id: str, location_id: str, time_zone: str) -> None:
"""
Clears all previously scheduled jobs and schedules all necessary jobs for the current
configuration.
"""
client = scheduler_v1.CloudSchedulerClient()
# Create a custom method with our parameters for ease of use
_schedule_job = partial(
schedule_job,
client=client,
project_id=project_id,
location_id=location_id,
time_zone=time_zone,
)
# Clear all pre-existing jobs
clear_jobs(client=client, project_id=project_id, location_id=location_id)
# Read the list of all known locations, since we will be splitting some jobs based on that
location_keys = list(table_read_column(SRC / "data" / "metadata.csv", "key"))
# Cache pull job runs hourly
_schedule_job(schedule="0 * * * *", path="/deferred/cache_pull")
# Get new errors once a day at midday.
_schedule_job(path="/deferred/report_errors_to_github", schedule="0 12 * * *")
# Keep track of the different job groups to only output them once
job_urls_seen = set()
for data_pipeline in get_pipelines():
# The job that combines data sources into a table runs hourly
_schedule_job(
path=f"/deferred/combine_table?table={data_pipeline.table}",
# Offset by 15 minutes to let other hourly tasks finish
schedule="15 * * * *",
)
for data_source in data_pipeline.data_sources:
automation_opts = data_source.config.get("automation", {})
# The job to pull each individual data source runs hourly unless specified otherwise
job_sched = automation_opts.get("schedule", "0 * * * *")
# If the job is deferred, then prepend the token to the path
job_prefix = "/deferred" if automation_opts.get("deferred", True) else ""
# Each data source has a job group. All data sources within the same job group are run
# as part of the same job in series. The default job group is "default".
job_group = automation_opts.get("job_group", "default")
job_url = f"{job_prefix}/update_table?table={data_pipeline.table}&job_group={job_group}"
if job_url not in job_urls_seen:
job_urls_seen.add(job_url)
_schedule_job(path=job_url, schedule=job_sched)
########
# V2 publish jobs
########
# The job that publishes combined tables into the prod bucket runs every 2 hours
_schedule_job(
# Run in a separate, preemptible instance
path="/deferred/publish_tables",
# Offset by 30 minutes to let other hourly tasks finish
schedule="30 */2 * * *",
)
# The job that publishes aggregate outputs runs every 4 hours
_schedule_job(
# Run in a separate, preemptible instance
path="/deferred/publish_main_table",
# Offset by 60 minutes to let other hourly tasks finish
schedule="0 1-23/4 * * *",
)
# The job that publishes breakdown outputs runs every 4 hours
_schedule_job(
path="/deferred/publish_subset_tables",
# Offset by 90 minutes to run after publishing
schedule="30 1-23/4 * * *",
)
# Converting the outputs to JSON is less critical but also slow so it's run separately
_schedule_job(
path=f"/deferred/publish_json_tables?prod_folder=v2",
# Offset by 120 minutes to run after subset tables are published
schedule="0 2-23/4 * * *",
)
for subset in _split_into_subsets(location_keys, bin_count=5):
job_params = f"prod_folder=v2&location_key_from={subset[0]}&location_key_until={subset[-1]}"
_schedule_job(
path=f"/deferred/publish_json_locations?{job_params}",
# Offset by 120 minutes to run after subset tables are published
schedule="0 2-23/4 * * *",
)
# Publish an index of versions for each global table
_schedule_job(
path=f"/deferred/publish_versions?prod_folder=v2",
# Run this job hourly
schedule="0 * * * *",
)
########
# V3 publish jobs
########
# Publish the global tables (with all location keys) every 2 hours
_schedule_job(
path="/deferred/publish_global_tables?prod_folder=v3",
# Offset by 30 minutes to let other hourly tasks finish
schedule="30 */2 * * *",
)
# Publish the latest subset for all tables every 2 hours
_schedule_job(
path="/deferred/publish_v3_latest_tables",
# Offset by 60 minutes to execute after publish_v3_global_tables finishes
schedule="0 1-23/2 * * *",
)
# Convert the global tables to JSON
_schedule_job(
path=f"/deferred/publish_json_tables?prod_folder=v3",
# Offset by 60 minutes to execute after publish_v3_global_tables finishes
schedule="0 1-23/2 * * *",
)
# Break down the outputs by location key every 2 hours, and execute the job in chunks
for subset in _split_into_subsets(location_keys, bin_count=5):
job_params = f"location_key_from={subset[0]}&location_key_until={subset[-1]}"
_schedule_job(
path=f"/deferred/publish_v3_location_subsets?{job_params}",
# Offset by 60 minutes to execute after publish_v3_global_tables finishes
schedule="0 1-23/2 * * *",
)
# Publish the main aggregated table every 2 hours
_schedule_job(
path="/deferred/publish_v3_main_table",
# Offset by 90 minutes to execute after publish_v3_location_subsets finishes
schedule="30 1-23/2 * * *",
)
# Publish outputs in JSON format every 2 hours, and execute the job in chunks
for subset in _split_into_subsets(location_keys, bin_count=5):
job_params = f"prod_folder=v3&location_key_from={subset[0]}&location_key_until={subset[-1]}"
_schedule_job(
path=f"/deferred/publish_json_locations?{job_params}",
# Offset by 90 minutes to execute after publish_v3_location_subsets finishes
schedule="30 1-23/2 * * *",
)
# Publish an index of versions for each global table
_schedule_job(
path=f"/deferred/publish_versions?prod_folder=v3",
# Run this job hourly
schedule="0 * * * *",
)
# Publish the metadata, including description, schema and data sources for each table
_schedule_job(
path=f"/deferred/publish_metadata?prod_folder=v3",
# Run this job daily
schedule="0 0 * * *",
)
# Publish a map of data sources for each global table
_schedule_job(
path=f"/deferred/publish_sources?prod_folder=v3",
# Run this job daily
schedule="0 0 * * *",
)
if __name__ == "__main__":
# Get default values from environment
default_project = os.environ.get("GCP_PROJECT")
default_location = os.environ.get("GCP_LOCATION", GCP_LOCATION)
default_time_zone = os.environ.get("GCP_TIME_ZONE", "America/New_York")
# Parse arguments from the command line
argparser = ArgumentParser()
argparser.add_argument("--project-id", type=str, default=default_project)
argparser.add_argument("--location-id", type=str, default=default_location)
argparser.add_argument("--time-zone", type=str, default=default_time_zone)
args = argparser.parse_args()
# Ensure project ID is not empty, since we don't have a default value for it
assert args.project_id is not None, 'Argument "project-id" must not be empty'
# Clear all preexisting jobs and schedule the new ones, this assumes the current code has
# already been successfully deployed to GAE in a previous build step
schedule_all_jobs(args.project_id, args.location_id, args.time_zone)
|
e46389530a8eb74f5a49ce0220a61f1dec69821d
|
b049a961f100444dde14599bab06a0a4224d869b
|
/sdk/python/pulumi_azure_native/botservice/_inputs.py
|
6a8116d04f15bfdb4acab2440a5c36cbdae968f4
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure-native
|
b390c88beef8381f9a71ab2bed5571e0dd848e65
|
4c499abe17ec6696ce28477dde1157372896364e
|
refs/heads/master
| 2023-08-30T08:19:41.564780
| 2023-08-28T19:29:04
| 2023-08-28T19:29:04
| 172,386,632
| 107
| 29
|
Apache-2.0
| 2023-09-14T13:17:00
| 2019-02-24T20:30:21
|
Python
|
UTF-8
|
Python
| false
| false
| 159,145
|
py
|
_inputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'AcsChatChannelArgs',
'AlexaChannelPropertiesArgs',
'AlexaChannelArgs',
'BotPropertiesArgs',
'ConnectionSettingParameterArgs',
'ConnectionSettingPropertiesArgs',
'DirectLineChannelPropertiesArgs',
'DirectLineChannelArgs',
'DirectLineSiteArgs',
'DirectLineSpeechChannelPropertiesArgs',
'DirectLineSpeechChannelArgs',
'EmailChannelPropertiesArgs',
'EmailChannelArgs',
'FacebookChannelPropertiesArgs',
'FacebookChannelArgs',
'FacebookPageArgs',
'KikChannelPropertiesArgs',
'KikChannelArgs',
'LineChannelPropertiesArgs',
'LineChannelArgs',
'LineRegistrationArgs',
'M365ExtensionsArgs',
'MsTeamsChannelPropertiesArgs',
'MsTeamsChannelArgs',
'OmnichannelArgs',
'OutlookChannelArgs',
'PrivateLinkServiceConnectionStateArgs',
'SearchAssistantArgs',
'SkuArgs',
'SkypeChannelPropertiesArgs',
'SkypeChannelArgs',
'SlackChannelPropertiesArgs',
'SlackChannelArgs',
'SmsChannelPropertiesArgs',
'SmsChannelArgs',
'TelegramChannelPropertiesArgs',
'TelegramChannelArgs',
'TelephonyChannelPropertiesArgs',
'TelephonyChannelResourceApiConfigurationArgs',
'TelephonyChannelArgs',
'TelephonyPhoneNumbersArgs',
'WebChatChannelPropertiesArgs',
'WebChatChannelArgs',
'WebChatSiteArgs',
]
@pulumi.input_type
class AcsChatChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None):
"""
AcsChat channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'AcsChatChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
"""
pulumi.set(__self__, "channel_name", 'AcsChatChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'AcsChatChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@pulumi.input_type
class AlexaChannelPropertiesArgs:
def __init__(__self__, *,
alexa_skill_id: pulumi.Input[str],
is_enabled: pulumi.Input[bool]):
"""
The parameters to provide for the Alexa channel.
:param pulumi.Input[str] alexa_skill_id: The Alexa skill Id
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
"""
pulumi.set(__self__, "alexa_skill_id", alexa_skill_id)
pulumi.set(__self__, "is_enabled", is_enabled)
@property
@pulumi.getter(name="alexaSkillId")
def alexa_skill_id(self) -> pulumi.Input[str]:
"""
The Alexa skill Id
"""
return pulumi.get(self, "alexa_skill_id")
@alexa_skill_id.setter
def alexa_skill_id(self, value: pulumi.Input[str]):
pulumi.set(self, "alexa_skill_id", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@pulumi.input_type
class AlexaChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['AlexaChannelPropertiesArgs']] = None):
"""
Alexa channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'AlexaChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['AlexaChannelPropertiesArgs'] properties: The set of properties specific to Alexa channel resource
"""
pulumi.set(__self__, "channel_name", 'AlexaChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'AlexaChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['AlexaChannelPropertiesArgs']]:
"""
The set of properties specific to Alexa channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['AlexaChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class BotPropertiesArgs:
def __init__(__self__, *,
display_name: pulumi.Input[str],
endpoint: pulumi.Input[str],
msa_app_id: pulumi.Input[str],
all_settings: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
app_password_hint: Optional[pulumi.Input[str]] = None,
cmek_key_vault_url: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
developer_app_insight_key: Optional[pulumi.Input[str]] = None,
developer_app_insights_api_key: Optional[pulumi.Input[str]] = None,
developer_app_insights_application_id: Optional[pulumi.Input[str]] = None,
disable_local_auth: Optional[pulumi.Input[bool]] = None,
icon_url: Optional[pulumi.Input[str]] = None,
is_cmek_enabled: Optional[pulumi.Input[bool]] = None,
is_streaming_supported: Optional[pulumi.Input[bool]] = None,
luis_app_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
luis_key: Optional[pulumi.Input[str]] = None,
manifest_url: Optional[pulumi.Input[str]] = None,
msa_app_msi_resource_id: Optional[pulumi.Input[str]] = None,
msa_app_tenant_id: Optional[pulumi.Input[str]] = None,
msa_app_type: Optional[pulumi.Input[Union[str, 'MsaAppType']]] = None,
open_with_hint: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
public_network_access: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]] = None,
publishing_credentials: Optional[pulumi.Input[str]] = None,
schema_transformation_version: Optional[pulumi.Input[str]] = None,
storage_resource_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Bot.
:param pulumi.Input[str] display_name: The Name of the bot
:param pulumi.Input[str] endpoint: The bot's endpoint
:param pulumi.Input[str] msa_app_id: Microsoft App Id for the bot
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] all_settings: Contains resource all settings defined as key/value pairs.
:param pulumi.Input[str] app_password_hint: The hint (e.g. keyVault secret resourceId) on how to fetch the app secret
:param pulumi.Input[str] cmek_key_vault_url: The CMK Url
:param pulumi.Input[str] description: The description of the bot
:param pulumi.Input[str] developer_app_insight_key: The Application Insights key
:param pulumi.Input[str] developer_app_insights_api_key: The Application Insights Api Key
:param pulumi.Input[str] developer_app_insights_application_id: The Application Insights App Id
:param pulumi.Input[bool] disable_local_auth: Opt-out of local authentication and ensure only MSI and AAD can be used exclusively for authentication.
:param pulumi.Input[str] icon_url: The Icon Url of the bot
:param pulumi.Input[bool] is_cmek_enabled: Whether Cmek is enabled
:param pulumi.Input[bool] is_streaming_supported: Whether the bot is streaming supported
:param pulumi.Input[Sequence[pulumi.Input[str]]] luis_app_ids: Collection of LUIS App Ids
:param pulumi.Input[str] luis_key: The LUIS Key
:param pulumi.Input[str] manifest_url: The bot's manifest url
:param pulumi.Input[str] msa_app_msi_resource_id: Microsoft App Managed Identity Resource Id for the bot
:param pulumi.Input[str] msa_app_tenant_id: Microsoft App Tenant Id for the bot
:param pulumi.Input[Union[str, 'MsaAppType']] msa_app_type: Microsoft App Type for the bot
:param pulumi.Input[str] open_with_hint: The hint to browser (e.g. protocol handler) on how to open the bot for authoring
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Contains resource parameters defined as key/value pairs.
:param pulumi.Input[Union[str, 'PublicNetworkAccess']] public_network_access: Whether the bot is in an isolated network
:param pulumi.Input[str] publishing_credentials: Publishing credentials of the resource
:param pulumi.Input[str] schema_transformation_version: The channel schema transformation version for the bot
:param pulumi.Input[str] storage_resource_id: The storage resourceId for the bot
:param pulumi.Input[str] tenant_id: The Tenant Id for the bot
"""
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "endpoint", endpoint)
pulumi.set(__self__, "msa_app_id", msa_app_id)
if all_settings is not None:
pulumi.set(__self__, "all_settings", all_settings)
if app_password_hint is not None:
pulumi.set(__self__, "app_password_hint", app_password_hint)
if cmek_key_vault_url is not None:
pulumi.set(__self__, "cmek_key_vault_url", cmek_key_vault_url)
if description is not None:
pulumi.set(__self__, "description", description)
if developer_app_insight_key is not None:
pulumi.set(__self__, "developer_app_insight_key", developer_app_insight_key)
if developer_app_insights_api_key is not None:
pulumi.set(__self__, "developer_app_insights_api_key", developer_app_insights_api_key)
if developer_app_insights_application_id is not None:
pulumi.set(__self__, "developer_app_insights_application_id", developer_app_insights_application_id)
if disable_local_auth is not None:
pulumi.set(__self__, "disable_local_auth", disable_local_auth)
if icon_url is None:
icon_url = ''
if icon_url is not None:
pulumi.set(__self__, "icon_url", icon_url)
if is_cmek_enabled is None:
is_cmek_enabled = False
if is_cmek_enabled is not None:
pulumi.set(__self__, "is_cmek_enabled", is_cmek_enabled)
if is_streaming_supported is None:
is_streaming_supported = False
if is_streaming_supported is not None:
pulumi.set(__self__, "is_streaming_supported", is_streaming_supported)
if luis_app_ids is not None:
pulumi.set(__self__, "luis_app_ids", luis_app_ids)
if luis_key is not None:
pulumi.set(__self__, "luis_key", luis_key)
if manifest_url is not None:
pulumi.set(__self__, "manifest_url", manifest_url)
if msa_app_msi_resource_id is not None:
pulumi.set(__self__, "msa_app_msi_resource_id", msa_app_msi_resource_id)
if msa_app_tenant_id is not None:
pulumi.set(__self__, "msa_app_tenant_id", msa_app_tenant_id)
if msa_app_type is not None:
pulumi.set(__self__, "msa_app_type", msa_app_type)
if open_with_hint is not None:
pulumi.set(__self__, "open_with_hint", open_with_hint)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if public_network_access is None:
public_network_access = 'Enabled'
if public_network_access is not None:
pulumi.set(__self__, "public_network_access", public_network_access)
if publishing_credentials is not None:
pulumi.set(__self__, "publishing_credentials", publishing_credentials)
if schema_transformation_version is not None:
pulumi.set(__self__, "schema_transformation_version", schema_transformation_version)
if storage_resource_id is not None:
pulumi.set(__self__, "storage_resource_id", storage_resource_id)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Input[str]:
"""
The Name of the bot
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: pulumi.Input[str]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def endpoint(self) -> pulumi.Input[str]:
"""
The bot's endpoint
"""
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="msaAppId")
def msa_app_id(self) -> pulumi.Input[str]:
"""
Microsoft App Id for the bot
"""
return pulumi.get(self, "msa_app_id")
@msa_app_id.setter
def msa_app_id(self, value: pulumi.Input[str]):
pulumi.set(self, "msa_app_id", value)
@property
@pulumi.getter(name="allSettings")
def all_settings(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Contains resource all settings defined as key/value pairs.
"""
return pulumi.get(self, "all_settings")
@all_settings.setter
def all_settings(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "all_settings", value)
@property
@pulumi.getter(name="appPasswordHint")
def app_password_hint(self) -> Optional[pulumi.Input[str]]:
"""
The hint (e.g. keyVault secret resourceId) on how to fetch the app secret
"""
return pulumi.get(self, "app_password_hint")
@app_password_hint.setter
def app_password_hint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_password_hint", value)
@property
@pulumi.getter(name="cmekKeyVaultUrl")
def cmek_key_vault_url(self) -> Optional[pulumi.Input[str]]:
"""
The CMK Url
"""
return pulumi.get(self, "cmek_key_vault_url")
@cmek_key_vault_url.setter
def cmek_key_vault_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cmek_key_vault_url", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The description of the bot
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="developerAppInsightKey")
def developer_app_insight_key(self) -> Optional[pulumi.Input[str]]:
"""
The Application Insights key
"""
return pulumi.get(self, "developer_app_insight_key")
@developer_app_insight_key.setter
def developer_app_insight_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "developer_app_insight_key", value)
@property
@pulumi.getter(name="developerAppInsightsApiKey")
def developer_app_insights_api_key(self) -> Optional[pulumi.Input[str]]:
"""
The Application Insights Api Key
"""
return pulumi.get(self, "developer_app_insights_api_key")
@developer_app_insights_api_key.setter
def developer_app_insights_api_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "developer_app_insights_api_key", value)
@property
@pulumi.getter(name="developerAppInsightsApplicationId")
def developer_app_insights_application_id(self) -> Optional[pulumi.Input[str]]:
"""
The Application Insights App Id
"""
return pulumi.get(self, "developer_app_insights_application_id")
@developer_app_insights_application_id.setter
def developer_app_insights_application_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "developer_app_insights_application_id", value)
@property
@pulumi.getter(name="disableLocalAuth")
def disable_local_auth(self) -> Optional[pulumi.Input[bool]]:
"""
Opt-out of local authentication and ensure only MSI and AAD can be used exclusively for authentication.
"""
return pulumi.get(self, "disable_local_auth")
@disable_local_auth.setter
def disable_local_auth(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_local_auth", value)
@property
@pulumi.getter(name="iconUrl")
def icon_url(self) -> Optional[pulumi.Input[str]]:
"""
The Icon Url of the bot
"""
return pulumi.get(self, "icon_url")
@icon_url.setter
def icon_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "icon_url", value)
@property
@pulumi.getter(name="isCmekEnabled")
def is_cmek_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether Cmek is enabled
"""
return pulumi.get(self, "is_cmek_enabled")
@is_cmek_enabled.setter
def is_cmek_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_cmek_enabled", value)
@property
@pulumi.getter(name="isStreamingSupported")
def is_streaming_supported(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the bot is streaming supported
"""
return pulumi.get(self, "is_streaming_supported")
@is_streaming_supported.setter
def is_streaming_supported(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_streaming_supported", value)
@property
@pulumi.getter(name="luisAppIds")
def luis_app_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Collection of LUIS App Ids
"""
return pulumi.get(self, "luis_app_ids")
@luis_app_ids.setter
def luis_app_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "luis_app_ids", value)
@property
@pulumi.getter(name="luisKey")
def luis_key(self) -> Optional[pulumi.Input[str]]:
"""
The LUIS Key
"""
return pulumi.get(self, "luis_key")
@luis_key.setter
def luis_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "luis_key", value)
@property
@pulumi.getter(name="manifestUrl")
def manifest_url(self) -> Optional[pulumi.Input[str]]:
"""
The bot's manifest url
"""
return pulumi.get(self, "manifest_url")
@manifest_url.setter
def manifest_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "manifest_url", value)
@property
@pulumi.getter(name="msaAppMSIResourceId")
def msa_app_msi_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
Microsoft App Managed Identity Resource Id for the bot
"""
return pulumi.get(self, "msa_app_msi_resource_id")
@msa_app_msi_resource_id.setter
def msa_app_msi_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "msa_app_msi_resource_id", value)
@property
@pulumi.getter(name="msaAppTenantId")
def msa_app_tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
Microsoft App Tenant Id for the bot
"""
return pulumi.get(self, "msa_app_tenant_id")
@msa_app_tenant_id.setter
def msa_app_tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "msa_app_tenant_id", value)
@property
@pulumi.getter(name="msaAppType")
def msa_app_type(self) -> Optional[pulumi.Input[Union[str, 'MsaAppType']]]:
"""
Microsoft App Type for the bot
"""
return pulumi.get(self, "msa_app_type")
@msa_app_type.setter
def msa_app_type(self, value: Optional[pulumi.Input[Union[str, 'MsaAppType']]]):
pulumi.set(self, "msa_app_type", value)
@property
@pulumi.getter(name="openWithHint")
def open_with_hint(self) -> Optional[pulumi.Input[str]]:
"""
The hint to browser (e.g. protocol handler) on how to open the bot for authoring
"""
return pulumi.get(self, "open_with_hint")
@open_with_hint.setter
def open_with_hint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "open_with_hint", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Contains resource parameters defined as key/value pairs.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="publicNetworkAccess")
def public_network_access(self) -> Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]:
"""
Whether the bot is in an isolated network
"""
return pulumi.get(self, "public_network_access")
@public_network_access.setter
def public_network_access(self, value: Optional[pulumi.Input[Union[str, 'PublicNetworkAccess']]]):
pulumi.set(self, "public_network_access", value)
@property
@pulumi.getter(name="publishingCredentials")
def publishing_credentials(self) -> Optional[pulumi.Input[str]]:
"""
Publishing credentials of the resource
"""
return pulumi.get(self, "publishing_credentials")
@publishing_credentials.setter
def publishing_credentials(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "publishing_credentials", value)
@property
@pulumi.getter(name="schemaTransformationVersion")
def schema_transformation_version(self) -> Optional[pulumi.Input[str]]:
"""
The channel schema transformation version for the bot
"""
return pulumi.get(self, "schema_transformation_version")
@schema_transformation_version.setter
def schema_transformation_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "schema_transformation_version", value)
@property
@pulumi.getter(name="storageResourceId")
def storage_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The storage resourceId for the bot
"""
return pulumi.get(self, "storage_resource_id")
@storage_resource_id.setter
def storage_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_resource_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The Tenant Id for the bot
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@pulumi.input_type
class ConnectionSettingParameterArgs:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Extra Parameter in a Connection Setting Properties to indicate service provider specific properties
:param pulumi.Input[str] key: Key for the Connection Setting Parameter.
:param pulumi.Input[str] value: Value associated with the Connection Setting Parameter.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Key for the Connection Setting Parameter.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Value associated with the Connection Setting Parameter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ConnectionSettingPropertiesArgs:
def __init__(__self__, *,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionSettingParameterArgs']]]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
scopes: Optional[pulumi.Input[str]] = None,
service_provider_display_name: Optional[pulumi.Input[str]] = None,
service_provider_id: Optional[pulumi.Input[str]] = None):
"""
Properties for a Connection Setting Item
:param pulumi.Input[str] client_id: Client Id associated with the Connection Setting.
:param pulumi.Input[str] client_secret: Client Secret associated with the Connection Setting
:param pulumi.Input[str] id: Id of the Connection Setting.
:param pulumi.Input[str] name: Name of the Connection Setting.
:param pulumi.Input[Sequence[pulumi.Input['ConnectionSettingParameterArgs']]] parameters: Service Provider Parameters associated with the Connection Setting
:param pulumi.Input[str] provisioning_state: Provisioning state of the resource
:param pulumi.Input[str] scopes: Scopes associated with the Connection Setting
:param pulumi.Input[str] service_provider_display_name: Service Provider Display Name associated with the Connection Setting
:param pulumi.Input[str] service_provider_id: Service Provider Id associated with the Connection Setting
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if id is not None:
pulumi.set(__self__, "id", id)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if scopes is None:
scopes = ''
if scopes is not None:
pulumi.set(__self__, "scopes", scopes)
if service_provider_display_name is not None:
pulumi.set(__self__, "service_provider_display_name", service_provider_display_name)
if service_provider_id is not None:
pulumi.set(__self__, "service_provider_id", service_provider_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
Client Id associated with the Connection Setting.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
"""
Client Secret associated with the Connection Setting
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Id of the Connection Setting.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Connection Setting.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionSettingParameterArgs']]]]:
"""
Service Provider Parameters associated with the Connection Setting
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ConnectionSettingParameterArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
Provisioning state of the resource
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter
def scopes(self) -> Optional[pulumi.Input[str]]:
"""
Scopes associated with the Connection Setting
"""
return pulumi.get(self, "scopes")
@scopes.setter
def scopes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scopes", value)
@property
@pulumi.getter(name="serviceProviderDisplayName")
def service_provider_display_name(self) -> Optional[pulumi.Input[str]]:
"""
Service Provider Display Name associated with the Connection Setting
"""
return pulumi.get(self, "service_provider_display_name")
@service_provider_display_name.setter
def service_provider_display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_provider_display_name", value)
@property
@pulumi.getter(name="serviceProviderId")
def service_provider_id(self) -> Optional[pulumi.Input[str]]:
"""
Service Provider Id associated with the Connection Setting
"""
return pulumi.get(self, "service_provider_id")
@service_provider_id.setter
def service_provider_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_provider_id", value)
@pulumi.input_type
class DirectLineChannelPropertiesArgs:
def __init__(__self__, *,
direct_line_embed_code: Optional[pulumi.Input[str]] = None,
extension_key1: Optional[pulumi.Input[str]] = None,
extension_key2: Optional[pulumi.Input[str]] = None,
sites: Optional[pulumi.Input[Sequence[pulumi.Input['DirectLineSiteArgs']]]] = None):
"""
The parameters to provide for the Direct Line channel.
:param pulumi.Input[str] direct_line_embed_code: Direct Line embed code of the resource
:param pulumi.Input[str] extension_key1: The extensionKey1
:param pulumi.Input[str] extension_key2: The extensionKey2
:param pulumi.Input[Sequence[pulumi.Input['DirectLineSiteArgs']]] sites: The list of Direct Line sites
"""
if direct_line_embed_code is not None:
pulumi.set(__self__, "direct_line_embed_code", direct_line_embed_code)
if extension_key1 is None:
extension_key1 = ''
if extension_key1 is not None:
pulumi.set(__self__, "extension_key1", extension_key1)
if extension_key2 is None:
extension_key2 = ''
if extension_key2 is not None:
pulumi.set(__self__, "extension_key2", extension_key2)
if sites is not None:
pulumi.set(__self__, "sites", sites)
@property
@pulumi.getter(name="directLineEmbedCode")
def direct_line_embed_code(self) -> Optional[pulumi.Input[str]]:
"""
Direct Line embed code of the resource
"""
return pulumi.get(self, "direct_line_embed_code")
@direct_line_embed_code.setter
def direct_line_embed_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "direct_line_embed_code", value)
@property
@pulumi.getter(name="extensionKey1")
def extension_key1(self) -> Optional[pulumi.Input[str]]:
"""
The extensionKey1
"""
return pulumi.get(self, "extension_key1")
@extension_key1.setter
def extension_key1(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extension_key1", value)
@property
@pulumi.getter(name="extensionKey2")
def extension_key2(self) -> Optional[pulumi.Input[str]]:
"""
The extensionKey2
"""
return pulumi.get(self, "extension_key2")
@extension_key2.setter
def extension_key2(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "extension_key2", value)
@property
@pulumi.getter
def sites(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DirectLineSiteArgs']]]]:
"""
The list of Direct Line sites
"""
return pulumi.get(self, "sites")
@sites.setter
def sites(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DirectLineSiteArgs']]]]):
pulumi.set(self, "sites", value)
@pulumi.input_type
class DirectLineChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['DirectLineChannelPropertiesArgs']] = None):
"""
Direct Line channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'DirectLineChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['DirectLineChannelPropertiesArgs'] properties: The set of properties specific to Direct Line channel resource
"""
pulumi.set(__self__, "channel_name", 'DirectLineChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'DirectLineChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['DirectLineChannelPropertiesArgs']]:
"""
The set of properties specific to Direct Line channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['DirectLineChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class DirectLineSiteArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
is_v1_enabled: pulumi.Input[bool],
is_v3_enabled: pulumi.Input[bool],
site_name: pulumi.Input[str],
app_id: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
is_block_user_upload_enabled: Optional[pulumi.Input[bool]] = None,
is_detailed_logging_enabled: Optional[pulumi.Input[bool]] = None,
is_endpoint_parameters_enabled: Optional[pulumi.Input[bool]] = None,
is_no_storage_enabled: Optional[pulumi.Input[bool]] = None,
is_secure_site_enabled: Optional[pulumi.Input[bool]] = None,
is_web_chat_speech_enabled: Optional[pulumi.Input[bool]] = None,
is_webchat_preview_enabled: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
trusted_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A site for the Direct Line channel
:param pulumi.Input[bool] is_enabled: Whether this site is enabled for DirectLine channel
:param pulumi.Input[bool] is_v1_enabled: Whether this site is enabled for Bot Framework V1 protocol.
:param pulumi.Input[bool] is_v3_enabled: Whether this site is enabled for Bot Framework V3 protocol.
:param pulumi.Input[str] site_name: Site name
:param pulumi.Input[str] app_id: DirectLine application id
:param pulumi.Input[str] e_tag: Entity Tag
:param pulumi.Input[bool] is_block_user_upload_enabled: Whether this site is enabled for block user upload.
:param pulumi.Input[bool] is_detailed_logging_enabled: Whether this site is disabled detailed logging for
:param pulumi.Input[bool] is_endpoint_parameters_enabled: Whether this site is EndpointParameters enabled for channel
:param pulumi.Input[bool] is_no_storage_enabled: Whether this no-storage site is disabled detailed logging for
:param pulumi.Input[bool] is_secure_site_enabled: Whether this site is enabled for authentication with Bot Framework.
:param pulumi.Input[bool] is_web_chat_speech_enabled: Whether this site is enabled for Webchat Speech
:param pulumi.Input[bool] is_webchat_preview_enabled: Whether this site is enabled for preview versions of Webchat
:param pulumi.Input[str] tenant_id: Tenant Id
:param pulumi.Input[Sequence[pulumi.Input[str]]] trusted_origins: List of Trusted Origin URLs for this site. This field is applicable only if isSecureSiteEnabled is True.
"""
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "is_v1_enabled", is_v1_enabled)
pulumi.set(__self__, "is_v3_enabled", is_v3_enabled)
pulumi.set(__self__, "site_name", site_name)
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if is_block_user_upload_enabled is not None:
pulumi.set(__self__, "is_block_user_upload_enabled", is_block_user_upload_enabled)
if is_detailed_logging_enabled is not None:
pulumi.set(__self__, "is_detailed_logging_enabled", is_detailed_logging_enabled)
if is_endpoint_parameters_enabled is not None:
pulumi.set(__self__, "is_endpoint_parameters_enabled", is_endpoint_parameters_enabled)
if is_no_storage_enabled is not None:
pulumi.set(__self__, "is_no_storage_enabled", is_no_storage_enabled)
if is_secure_site_enabled is not None:
pulumi.set(__self__, "is_secure_site_enabled", is_secure_site_enabled)
if is_web_chat_speech_enabled is None:
is_web_chat_speech_enabled = False
if is_web_chat_speech_enabled is not None:
pulumi.set(__self__, "is_web_chat_speech_enabled", is_web_chat_speech_enabled)
if is_webchat_preview_enabled is None:
is_webchat_preview_enabled = False
if is_webchat_preview_enabled is not None:
pulumi.set(__self__, "is_webchat_preview_enabled", is_webchat_preview_enabled)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if trusted_origins is not None:
pulumi.set(__self__, "trusted_origins", trusted_origins)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this site is enabled for DirectLine channel
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="isV1Enabled")
def is_v1_enabled(self) -> pulumi.Input[bool]:
"""
Whether this site is enabled for Bot Framework V1 protocol.
"""
return pulumi.get(self, "is_v1_enabled")
@is_v1_enabled.setter
def is_v1_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_v1_enabled", value)
@property
@pulumi.getter(name="isV3Enabled")
def is_v3_enabled(self) -> pulumi.Input[bool]:
"""
Whether this site is enabled for Bot Framework V3 protocol.
"""
return pulumi.get(self, "is_v3_enabled")
@is_v3_enabled.setter
def is_v3_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_v3_enabled", value)
@property
@pulumi.getter(name="siteName")
def site_name(self) -> pulumi.Input[str]:
"""
Site name
"""
return pulumi.get(self, "site_name")
@site_name.setter
def site_name(self, value: pulumi.Input[str]):
pulumi.set(self, "site_name", value)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[str]]:
"""
DirectLine application id
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag
"""
return pulumi.get(self, "e_tag")
@e_tag.setter
def e_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "e_tag", value)
@property
@pulumi.getter(name="isBlockUserUploadEnabled")
def is_block_user_upload_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for block user upload.
"""
return pulumi.get(self, "is_block_user_upload_enabled")
@is_block_user_upload_enabled.setter
def is_block_user_upload_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_block_user_upload_enabled", value)
@property
@pulumi.getter(name="isDetailedLoggingEnabled")
def is_detailed_logging_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is disabled detailed logging for
"""
return pulumi.get(self, "is_detailed_logging_enabled")
@is_detailed_logging_enabled.setter
def is_detailed_logging_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_detailed_logging_enabled", value)
@property
@pulumi.getter(name="isEndpointParametersEnabled")
def is_endpoint_parameters_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is EndpointParameters enabled for channel
"""
return pulumi.get(self, "is_endpoint_parameters_enabled")
@is_endpoint_parameters_enabled.setter
def is_endpoint_parameters_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_endpoint_parameters_enabled", value)
@property
@pulumi.getter(name="isNoStorageEnabled")
def is_no_storage_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this no-storage site is disabled detailed logging for
"""
return pulumi.get(self, "is_no_storage_enabled")
@is_no_storage_enabled.setter
def is_no_storage_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_no_storage_enabled", value)
@property
@pulumi.getter(name="isSecureSiteEnabled")
def is_secure_site_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for authentication with Bot Framework.
"""
return pulumi.get(self, "is_secure_site_enabled")
@is_secure_site_enabled.setter
def is_secure_site_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_secure_site_enabled", value)
@property
@pulumi.getter(name="isWebChatSpeechEnabled")
def is_web_chat_speech_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for Webchat Speech
"""
return pulumi.get(self, "is_web_chat_speech_enabled")
@is_web_chat_speech_enabled.setter
def is_web_chat_speech_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_web_chat_speech_enabled", value)
@property
@pulumi.getter(name="isWebchatPreviewEnabled")
def is_webchat_preview_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for preview versions of Webchat
"""
return pulumi.get(self, "is_webchat_preview_enabled")
@is_webchat_preview_enabled.setter
def is_webchat_preview_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_webchat_preview_enabled", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
Tenant Id
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="trustedOrigins")
def trusted_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Trusted Origin URLs for this site. This field is applicable only if isSecureSiteEnabled is True.
"""
return pulumi.get(self, "trusted_origins")
@trusted_origins.setter
def trusted_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "trusted_origins", value)
@pulumi.input_type
class DirectLineSpeechChannelPropertiesArgs:
def __init__(__self__, *,
cognitive_service_region: Optional[pulumi.Input[str]] = None,
cognitive_service_resource_id: Optional[pulumi.Input[str]] = None,
cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,
custom_speech_model_id: Optional[pulumi.Input[str]] = None,
custom_voice_deployment_id: Optional[pulumi.Input[str]] = None,
is_default_bot_for_cog_svc_account: Optional[pulumi.Input[bool]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None):
"""
The parameters to provide for the DirectLine Speech channel.
:param pulumi.Input[str] cognitive_service_region: The cognitive service region with this channel registration.
:param pulumi.Input[str] cognitive_service_resource_id: The cognitive service id with this channel registration.
:param pulumi.Input[str] cognitive_service_subscription_key: The cognitive service subscription key to use with this channel registration.
:param pulumi.Input[str] custom_speech_model_id: Custom voice deployment id (optional).
:param pulumi.Input[str] custom_voice_deployment_id: Custom speech model id (optional).
:param pulumi.Input[bool] is_default_bot_for_cog_svc_account: Make this a default bot for chosen cognitive service account.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled or not.
"""
if cognitive_service_region is not None:
pulumi.set(__self__, "cognitive_service_region", cognitive_service_region)
if cognitive_service_resource_id is not None:
pulumi.set(__self__, "cognitive_service_resource_id", cognitive_service_resource_id)
if cognitive_service_subscription_key is not None:
pulumi.set(__self__, "cognitive_service_subscription_key", cognitive_service_subscription_key)
if custom_speech_model_id is not None:
pulumi.set(__self__, "custom_speech_model_id", custom_speech_model_id)
if custom_voice_deployment_id is not None:
pulumi.set(__self__, "custom_voice_deployment_id", custom_voice_deployment_id)
if is_default_bot_for_cog_svc_account is not None:
pulumi.set(__self__, "is_default_bot_for_cog_svc_account", is_default_bot_for_cog_svc_account)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
@property
@pulumi.getter(name="cognitiveServiceRegion")
def cognitive_service_region(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service region with this channel registration.
"""
return pulumi.get(self, "cognitive_service_region")
@cognitive_service_region.setter
def cognitive_service_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_region", value)
@property
@pulumi.getter(name="cognitiveServiceResourceId")
def cognitive_service_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service id with this channel registration.
"""
return pulumi.get(self, "cognitive_service_resource_id")
@cognitive_service_resource_id.setter
def cognitive_service_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_resource_id", value)
@property
@pulumi.getter(name="cognitiveServiceSubscriptionKey")
def cognitive_service_subscription_key(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service subscription key to use with this channel registration.
"""
return pulumi.get(self, "cognitive_service_subscription_key")
@cognitive_service_subscription_key.setter
def cognitive_service_subscription_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_subscription_key", value)
@property
@pulumi.getter(name="customSpeechModelId")
def custom_speech_model_id(self) -> Optional[pulumi.Input[str]]:
"""
Custom voice deployment id (optional).
"""
return pulumi.get(self, "custom_speech_model_id")
@custom_speech_model_id.setter
def custom_speech_model_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_speech_model_id", value)
@property
@pulumi.getter(name="customVoiceDeploymentId")
def custom_voice_deployment_id(self) -> Optional[pulumi.Input[str]]:
"""
Custom speech model id (optional).
"""
return pulumi.get(self, "custom_voice_deployment_id")
@custom_voice_deployment_id.setter
def custom_voice_deployment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_voice_deployment_id", value)
@property
@pulumi.getter(name="isDefaultBotForCogSvcAccount")
def is_default_bot_for_cog_svc_account(self) -> Optional[pulumi.Input[bool]]:
"""
Make this a default bot for chosen cognitive service account.
"""
return pulumi.get(self, "is_default_bot_for_cog_svc_account")
@is_default_bot_for_cog_svc_account.setter
def is_default_bot_for_cog_svc_account(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_default_bot_for_cog_svc_account", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this channel is enabled or not.
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@pulumi.input_type
class DirectLineSpeechChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['DirectLineSpeechChannelPropertiesArgs']] = None):
"""
DirectLine Speech channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'DirectLineSpeechChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['DirectLineSpeechChannelPropertiesArgs'] properties: The set of properties specific to DirectLine Speech channel resource
"""
pulumi.set(__self__, "channel_name", 'DirectLineSpeechChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'DirectLineSpeechChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['DirectLineSpeechChannelPropertiesArgs']]:
"""
The set of properties specific to DirectLine Speech channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['DirectLineSpeechChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class EmailChannelPropertiesArgs:
def __init__(__self__, *,
email_address: pulumi.Input[str],
is_enabled: pulumi.Input[bool],
auth_method: Optional[pulumi.Input[float]] = None,
magic_code: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Email channel.
:param pulumi.Input[str] email_address: The email address
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[float] auth_method: Email channel auth method. 0 Password (Default); 1 Graph.
:param pulumi.Input[str] magic_code: The magic code for setting up the modern authentication.
:param pulumi.Input[str] password: The password for the email address. Value only returned through POST to the action Channel List API, otherwise empty.
"""
pulumi.set(__self__, "email_address", email_address)
pulumi.set(__self__, "is_enabled", is_enabled)
if auth_method is not None:
pulumi.set(__self__, "auth_method", auth_method)
if magic_code is not None:
pulumi.set(__self__, "magic_code", magic_code)
if password is not None:
pulumi.set(__self__, "password", password)
@property
@pulumi.getter(name="emailAddress")
def email_address(self) -> pulumi.Input[str]:
"""
The email address
"""
return pulumi.get(self, "email_address")
@email_address.setter
def email_address(self, value: pulumi.Input[str]):
pulumi.set(self, "email_address", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="authMethod")
def auth_method(self) -> Optional[pulumi.Input[float]]:
"""
Email channel auth method. 0 Password (Default); 1 Graph.
"""
return pulumi.get(self, "auth_method")
@auth_method.setter
def auth_method(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "auth_method", value)
@property
@pulumi.getter(name="magicCode")
def magic_code(self) -> Optional[pulumi.Input[str]]:
"""
The magic code for setting up the modern authentication.
"""
return pulumi.get(self, "magic_code")
@magic_code.setter
def magic_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "magic_code", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The password for the email address. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@pulumi.input_type
class EmailChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['EmailChannelPropertiesArgs']] = None):
"""
Email channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'EmailChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['EmailChannelPropertiesArgs'] properties: The set of properties specific to email channel resource
"""
pulumi.set(__self__, "channel_name", 'EmailChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'EmailChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['EmailChannelPropertiesArgs']]:
"""
The set of properties specific to email channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['EmailChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class FacebookChannelPropertiesArgs:
def __init__(__self__, *,
app_id: pulumi.Input[str],
is_enabled: pulumi.Input[bool],
app_secret: Optional[pulumi.Input[str]] = None,
pages: Optional[pulumi.Input[Sequence[pulumi.Input['FacebookPageArgs']]]] = None):
"""
The parameters to provide for the Facebook channel.
:param pulumi.Input[str] app_id: Facebook application id
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] app_secret: Facebook application secret. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[Sequence[pulumi.Input['FacebookPageArgs']]] pages: The list of Facebook pages
"""
pulumi.set(__self__, "app_id", app_id)
pulumi.set(__self__, "is_enabled", is_enabled)
if app_secret is not None:
pulumi.set(__self__, "app_secret", app_secret)
if pages is not None:
pulumi.set(__self__, "pages", pages)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Input[str]:
"""
Facebook application id
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: pulumi.Input[str]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="appSecret")
def app_secret(self) -> Optional[pulumi.Input[str]]:
"""
Facebook application secret. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "app_secret")
@app_secret.setter
def app_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_secret", value)
@property
@pulumi.getter
def pages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FacebookPageArgs']]]]:
"""
The list of Facebook pages
"""
return pulumi.get(self, "pages")
@pages.setter
def pages(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FacebookPageArgs']]]]):
pulumi.set(self, "pages", value)
@pulumi.input_type
class FacebookChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['FacebookChannelPropertiesArgs']] = None):
"""
Facebook channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'FacebookChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['FacebookChannelPropertiesArgs'] properties: The set of properties specific to bot facebook channel
"""
pulumi.set(__self__, "channel_name", 'FacebookChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'FacebookChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['FacebookChannelPropertiesArgs']]:
"""
The set of properties specific to bot facebook channel
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['FacebookChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class FacebookPageArgs:
def __init__(__self__, *,
id: pulumi.Input[str],
access_token: Optional[pulumi.Input[str]] = None):
"""
A Facebook page for Facebook channel registration
:param pulumi.Input[str] id: Page id
:param pulumi.Input[str] access_token: Facebook application access token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
pulumi.set(__self__, "id", id)
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
@property
@pulumi.getter
def id(self) -> pulumi.Input[str]:
"""
Page id
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: pulumi.Input[str]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[pulumi.Input[str]]:
"""
Facebook application access token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "access_token")
@access_token.setter
def access_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_token", value)
@pulumi.input_type
class KikChannelPropertiesArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
user_name: pulumi.Input[str],
api_key: Optional[pulumi.Input[str]] = None,
is_validated: Optional[pulumi.Input[bool]] = None):
"""
The parameters to provide for the Kik channel.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] user_name: The Kik user name
:param pulumi.Input[str] api_key: Kik API key. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[bool] is_validated: Whether this channel is validated for the bot
"""
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "user_name", user_name)
if api_key is not None:
pulumi.set(__self__, "api_key", api_key)
if is_validated is not None:
pulumi.set(__self__, "is_validated", is_validated)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="userName")
def user_name(self) -> pulumi.Input[str]:
"""
The Kik user name
"""
return pulumi.get(self, "user_name")
@user_name.setter
def user_name(self, value: pulumi.Input[str]):
pulumi.set(self, "user_name", value)
@property
@pulumi.getter(name="apiKey")
def api_key(self) -> Optional[pulumi.Input[str]]:
"""
Kik API key. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "api_key")
@api_key.setter
def api_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_key", value)
@property
@pulumi.getter(name="isValidated")
def is_validated(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this channel is validated for the bot
"""
return pulumi.get(self, "is_validated")
@is_validated.setter
def is_validated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_validated", value)
@pulumi.input_type
class KikChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['KikChannelPropertiesArgs']] = None):
"""
Kik channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'KikChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['KikChannelPropertiesArgs'] properties: The set of properties specific to Kik channel resource
"""
pulumi.set(__self__, "channel_name", 'KikChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'KikChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['KikChannelPropertiesArgs']]:
"""
The set of properties specific to Kik channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['KikChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class LineChannelPropertiesArgs:
def __init__(__self__, *,
line_registrations: pulumi.Input[Sequence[pulumi.Input['LineRegistrationArgs']]]):
"""
The parameters to provide for the Line channel.
:param pulumi.Input[Sequence[pulumi.Input['LineRegistrationArgs']]] line_registrations: The list of line channel registrations
"""
pulumi.set(__self__, "line_registrations", line_registrations)
@property
@pulumi.getter(name="lineRegistrations")
def line_registrations(self) -> pulumi.Input[Sequence[pulumi.Input['LineRegistrationArgs']]]:
"""
The list of line channel registrations
"""
return pulumi.get(self, "line_registrations")
@line_registrations.setter
def line_registrations(self, value: pulumi.Input[Sequence[pulumi.Input['LineRegistrationArgs']]]):
pulumi.set(self, "line_registrations", value)
@pulumi.input_type
class LineChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['LineChannelPropertiesArgs']] = None):
"""
Line channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'LineChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['LineChannelPropertiesArgs'] properties: The set of properties specific to line channel resource
"""
pulumi.set(__self__, "channel_name", 'LineChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'LineChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['LineChannelPropertiesArgs']]:
"""
The set of properties specific to line channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['LineChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class LineRegistrationArgs:
def __init__(__self__, *,
channel_access_token: Optional[pulumi.Input[str]] = None,
channel_secret: Optional[pulumi.Input[str]] = None):
"""
The properties corresponding to a line channel registration
:param pulumi.Input[str] channel_access_token: Access token for the line channel registration
:param pulumi.Input[str] channel_secret: Secret for the line channel registration
"""
if channel_access_token is not None:
pulumi.set(__self__, "channel_access_token", channel_access_token)
if channel_secret is not None:
pulumi.set(__self__, "channel_secret", channel_secret)
@property
@pulumi.getter(name="channelAccessToken")
def channel_access_token(self) -> Optional[pulumi.Input[str]]:
"""
Access token for the line channel registration
"""
return pulumi.get(self, "channel_access_token")
@channel_access_token.setter
def channel_access_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "channel_access_token", value)
@property
@pulumi.getter(name="channelSecret")
def channel_secret(self) -> Optional[pulumi.Input[str]]:
"""
Secret for the line channel registration
"""
return pulumi.get(self, "channel_secret")
@channel_secret.setter
def channel_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "channel_secret", value)
@pulumi.input_type
class M365ExtensionsArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None):
"""
M365 Extensions definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'M365Extensions'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
"""
pulumi.set(__self__, "channel_name", 'M365Extensions')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'M365Extensions'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@pulumi.input_type
class MsTeamsChannelPropertiesArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
accepted_terms: Optional[pulumi.Input[bool]] = None,
calling_webhook: Optional[pulumi.Input[str]] = None,
deployment_environment: Optional[pulumi.Input[str]] = None,
enable_calling: Optional[pulumi.Input[bool]] = None,
incoming_call_route: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Microsoft Teams channel.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[bool] accepted_terms: Whether this channel accepted terms
:param pulumi.Input[str] calling_webhook: Webhook for Microsoft Teams channel calls
:param pulumi.Input[str] deployment_environment: Deployment environment for Microsoft Teams channel calls
:param pulumi.Input[bool] enable_calling: Enable calling for Microsoft Teams channel
:param pulumi.Input[str] incoming_call_route: Webhook for Microsoft Teams channel calls
"""
pulumi.set(__self__, "is_enabled", is_enabled)
if accepted_terms is not None:
pulumi.set(__self__, "accepted_terms", accepted_terms)
if calling_webhook is not None:
pulumi.set(__self__, "calling_webhook", calling_webhook)
if deployment_environment is None:
deployment_environment = 'FallbackDeploymentEnvironment'
if deployment_environment is not None:
pulumi.set(__self__, "deployment_environment", deployment_environment)
if enable_calling is None:
enable_calling = False
if enable_calling is not None:
pulumi.set(__self__, "enable_calling", enable_calling)
if incoming_call_route is not None:
pulumi.set(__self__, "incoming_call_route", incoming_call_route)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="acceptedTerms")
def accepted_terms(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this channel accepted terms
"""
return pulumi.get(self, "accepted_terms")
@accepted_terms.setter
def accepted_terms(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "accepted_terms", value)
@property
@pulumi.getter(name="callingWebhook")
def calling_webhook(self) -> Optional[pulumi.Input[str]]:
"""
Webhook for Microsoft Teams channel calls
"""
return pulumi.get(self, "calling_webhook")
@calling_webhook.setter
def calling_webhook(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "calling_webhook", value)
@property
@pulumi.getter(name="deploymentEnvironment")
def deployment_environment(self) -> Optional[pulumi.Input[str]]:
"""
Deployment environment for Microsoft Teams channel calls
"""
return pulumi.get(self, "deployment_environment")
@deployment_environment.setter
def deployment_environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "deployment_environment", value)
@property
@pulumi.getter(name="enableCalling")
def enable_calling(self) -> Optional[pulumi.Input[bool]]:
"""
Enable calling for Microsoft Teams channel
"""
return pulumi.get(self, "enable_calling")
@enable_calling.setter
def enable_calling(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_calling", value)
@property
@pulumi.getter(name="incomingCallRoute")
def incoming_call_route(self) -> Optional[pulumi.Input[str]]:
"""
Webhook for Microsoft Teams channel calls
"""
return pulumi.get(self, "incoming_call_route")
@incoming_call_route.setter
def incoming_call_route(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "incoming_call_route", value)
@pulumi.input_type
class MsTeamsChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['MsTeamsChannelPropertiesArgs']] = None):
"""
Microsoft Teams channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'MsTeamsChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['MsTeamsChannelPropertiesArgs'] properties: The set of properties specific to Microsoft Teams channel resource
"""
pulumi.set(__self__, "channel_name", 'MsTeamsChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'MsTeamsChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['MsTeamsChannelPropertiesArgs']]:
"""
The set of properties specific to Microsoft Teams channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['MsTeamsChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class OmnichannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None):
"""
Omnichannel channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'Omnichannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
"""
pulumi.set(__self__, "channel_name", 'Omnichannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'Omnichannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@pulumi.input_type
class OutlookChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None):
"""
Outlook channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'OutlookChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
"""
pulumi.set(__self__, "channel_name", 'OutlookChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'OutlookChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@pulumi.input_type
class PrivateLinkServiceConnectionStateArgs:
def __init__(__self__, *,
actions_required: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]] = None):
"""
A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] actions_required: A message indicating if changes on the service provider require any updates on the consumer.
:param pulumi.Input[str] description: The reason for approval/rejection of the connection.
:param pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']] status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
if actions_required is not None:
pulumi.set(__self__, "actions_required", actions_required)
if description is not None:
pulumi.set(__self__, "description", description)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> Optional[pulumi.Input[str]]:
"""
A message indicating if changes on the service provider require any updates on the consumer.
"""
return pulumi.get(self, "actions_required")
@actions_required.setter
def actions_required(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "actions_required", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
The reason for approval/rejection of the connection.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:
"""
Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]):
pulumi.set(self, "status", value)
@pulumi.input_type
class SearchAssistantArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None):
"""
SearchAssistant definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'SearchAssistant'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
"""
pulumi.set(__self__, "channel_name", 'SearchAssistant')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'SearchAssistant'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: pulumi.Input[Union[str, 'SkuName']]):
"""
The SKU of the cognitive services account.
:param pulumi.Input[Union[str, 'SkuName']] name: The sku name
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[Union[str, 'SkuName']]:
"""
The sku name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[Union[str, 'SkuName']]):
pulumi.set(self, "name", value)
@pulumi.input_type
class SkypeChannelPropertiesArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
calling_web_hook: Optional[pulumi.Input[str]] = None,
enable_calling: Optional[pulumi.Input[bool]] = None,
enable_groups: Optional[pulumi.Input[bool]] = None,
enable_media_cards: Optional[pulumi.Input[bool]] = None,
enable_messaging: Optional[pulumi.Input[bool]] = None,
enable_screen_sharing: Optional[pulumi.Input[bool]] = None,
enable_video: Optional[pulumi.Input[bool]] = None,
groups_mode: Optional[pulumi.Input[str]] = None,
incoming_call_route: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Microsoft Teams channel.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] calling_web_hook: Calling web hook for Skype channel
:param pulumi.Input[bool] enable_calling: Enable calling for Skype channel
:param pulumi.Input[bool] enable_groups: Enable groups for Skype channel
:param pulumi.Input[bool] enable_media_cards: Enable media cards for Skype channel
:param pulumi.Input[bool] enable_messaging: Enable messaging for Skype channel
:param pulumi.Input[bool] enable_screen_sharing: Enable screen sharing for Skype channel
:param pulumi.Input[bool] enable_video: Enable video for Skype channel
:param pulumi.Input[str] groups_mode: Group mode for Skype channel
:param pulumi.Input[str] incoming_call_route: Incoming call route for Skype channel
"""
pulumi.set(__self__, "is_enabled", is_enabled)
if calling_web_hook is not None:
pulumi.set(__self__, "calling_web_hook", calling_web_hook)
if enable_calling is None:
enable_calling = False
if enable_calling is not None:
pulumi.set(__self__, "enable_calling", enable_calling)
if enable_groups is not None:
pulumi.set(__self__, "enable_groups", enable_groups)
if enable_media_cards is not None:
pulumi.set(__self__, "enable_media_cards", enable_media_cards)
if enable_messaging is not None:
pulumi.set(__self__, "enable_messaging", enable_messaging)
if enable_screen_sharing is not None:
pulumi.set(__self__, "enable_screen_sharing", enable_screen_sharing)
if enable_video is not None:
pulumi.set(__self__, "enable_video", enable_video)
if groups_mode is not None:
pulumi.set(__self__, "groups_mode", groups_mode)
if incoming_call_route is not None:
pulumi.set(__self__, "incoming_call_route", incoming_call_route)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="callingWebHook")
def calling_web_hook(self) -> Optional[pulumi.Input[str]]:
"""
Calling web hook for Skype channel
"""
return pulumi.get(self, "calling_web_hook")
@calling_web_hook.setter
def calling_web_hook(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "calling_web_hook", value)
@property
@pulumi.getter(name="enableCalling")
def enable_calling(self) -> Optional[pulumi.Input[bool]]:
"""
Enable calling for Skype channel
"""
return pulumi.get(self, "enable_calling")
@enable_calling.setter
def enable_calling(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_calling", value)
@property
@pulumi.getter(name="enableGroups")
def enable_groups(self) -> Optional[pulumi.Input[bool]]:
"""
Enable groups for Skype channel
"""
return pulumi.get(self, "enable_groups")
@enable_groups.setter
def enable_groups(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_groups", value)
@property
@pulumi.getter(name="enableMediaCards")
def enable_media_cards(self) -> Optional[pulumi.Input[bool]]:
"""
Enable media cards for Skype channel
"""
return pulumi.get(self, "enable_media_cards")
@enable_media_cards.setter
def enable_media_cards(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_media_cards", value)
@property
@pulumi.getter(name="enableMessaging")
def enable_messaging(self) -> Optional[pulumi.Input[bool]]:
"""
Enable messaging for Skype channel
"""
return pulumi.get(self, "enable_messaging")
@enable_messaging.setter
def enable_messaging(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_messaging", value)
@property
@pulumi.getter(name="enableScreenSharing")
def enable_screen_sharing(self) -> Optional[pulumi.Input[bool]]:
"""
Enable screen sharing for Skype channel
"""
return pulumi.get(self, "enable_screen_sharing")
@enable_screen_sharing.setter
def enable_screen_sharing(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_screen_sharing", value)
@property
@pulumi.getter(name="enableVideo")
def enable_video(self) -> Optional[pulumi.Input[bool]]:
"""
Enable video for Skype channel
"""
return pulumi.get(self, "enable_video")
@enable_video.setter
def enable_video(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_video", value)
@property
@pulumi.getter(name="groupsMode")
def groups_mode(self) -> Optional[pulumi.Input[str]]:
"""
Group mode for Skype channel
"""
return pulumi.get(self, "groups_mode")
@groups_mode.setter
def groups_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "groups_mode", value)
@property
@pulumi.getter(name="incomingCallRoute")
def incoming_call_route(self) -> Optional[pulumi.Input[str]]:
"""
Incoming call route for Skype channel
"""
return pulumi.get(self, "incoming_call_route")
@incoming_call_route.setter
def incoming_call_route(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "incoming_call_route", value)
@pulumi.input_type
class SkypeChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['SkypeChannelPropertiesArgs']] = None):
"""
Skype channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'SkypeChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['SkypeChannelPropertiesArgs'] properties: The set of properties specific to Skype channel resource
"""
pulumi.set(__self__, "channel_name", 'SkypeChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'SkypeChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['SkypeChannelPropertiesArgs']]:
"""
The set of properties specific to Skype channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['SkypeChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class SlackChannelPropertiesArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
landing_page_url: Optional[pulumi.Input[str]] = None,
register_before_o_auth_flow: Optional[pulumi.Input[bool]] = None,
scopes: Optional[pulumi.Input[str]] = None,
signing_secret: Optional[pulumi.Input[str]] = None,
verification_token: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Slack channel.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] client_id: The Slack client id
:param pulumi.Input[str] client_secret: The Slack client secret. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[str] landing_page_url: The Slack landing page Url
:param pulumi.Input[bool] register_before_o_auth_flow: Whether to register the settings before OAuth validation is performed. Recommended to True.
:param pulumi.Input[str] scopes: The Slack permission scopes.
:param pulumi.Input[str] signing_secret: The Slack signing secret.
:param pulumi.Input[str] verification_token: The Slack verification token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
pulumi.set(__self__, "is_enabled", is_enabled)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if landing_page_url is not None:
pulumi.set(__self__, "landing_page_url", landing_page_url)
if register_before_o_auth_flow is not None:
pulumi.set(__self__, "register_before_o_auth_flow", register_before_o_auth_flow)
if scopes is not None:
pulumi.set(__self__, "scopes", scopes)
if signing_secret is not None:
pulumi.set(__self__, "signing_secret", signing_secret)
if verification_token is not None:
pulumi.set(__self__, "verification_token", verification_token)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The Slack client id
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
"""
The Slack client secret. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="landingPageUrl")
def landing_page_url(self) -> Optional[pulumi.Input[str]]:
"""
The Slack landing page Url
"""
return pulumi.get(self, "landing_page_url")
@landing_page_url.setter
def landing_page_url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "landing_page_url", value)
@property
@pulumi.getter(name="registerBeforeOAuthFlow")
def register_before_o_auth_flow(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to register the settings before OAuth validation is performed. Recommended to True.
"""
return pulumi.get(self, "register_before_o_auth_flow")
@register_before_o_auth_flow.setter
def register_before_o_auth_flow(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "register_before_o_auth_flow", value)
@property
@pulumi.getter
def scopes(self) -> Optional[pulumi.Input[str]]:
"""
The Slack permission scopes.
"""
return pulumi.get(self, "scopes")
@scopes.setter
def scopes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scopes", value)
@property
@pulumi.getter(name="signingSecret")
def signing_secret(self) -> Optional[pulumi.Input[str]]:
"""
The Slack signing secret.
"""
return pulumi.get(self, "signing_secret")
@signing_secret.setter
def signing_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signing_secret", value)
@property
@pulumi.getter(name="verificationToken")
def verification_token(self) -> Optional[pulumi.Input[str]]:
"""
The Slack verification token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "verification_token")
@verification_token.setter
def verification_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "verification_token", value)
@pulumi.input_type
class SlackChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['SlackChannelPropertiesArgs']] = None):
"""
Slack channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'SlackChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['SlackChannelPropertiesArgs'] properties: The set of properties specific to Slack channel resource
"""
pulumi.set(__self__, "channel_name", 'SlackChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'SlackChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['SlackChannelPropertiesArgs']]:
"""
The set of properties specific to Slack channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['SlackChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class SmsChannelPropertiesArgs:
def __init__(__self__, *,
account_sid: pulumi.Input[str],
is_enabled: pulumi.Input[bool],
phone: pulumi.Input[str],
auth_token: Optional[pulumi.Input[str]] = None,
is_validated: Optional[pulumi.Input[bool]] = None):
"""
The parameters to provide for the Sms channel.
:param pulumi.Input[str] account_sid: The Sms account SID. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] phone: The Sms phone
:param pulumi.Input[str] auth_token: The Sms auth token. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[bool] is_validated: Whether this channel is validated for the bot
"""
pulumi.set(__self__, "account_sid", account_sid)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "phone", phone)
if auth_token is not None:
pulumi.set(__self__, "auth_token", auth_token)
if is_validated is not None:
pulumi.set(__self__, "is_validated", is_validated)
@property
@pulumi.getter(name="accountSID")
def account_sid(self) -> pulumi.Input[str]:
"""
The Sms account SID. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "account_sid")
@account_sid.setter
def account_sid(self, value: pulumi.Input[str]):
pulumi.set(self, "account_sid", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter
def phone(self) -> pulumi.Input[str]:
"""
The Sms phone
"""
return pulumi.get(self, "phone")
@phone.setter
def phone(self, value: pulumi.Input[str]):
pulumi.set(self, "phone", value)
@property
@pulumi.getter(name="authToken")
def auth_token(self) -> Optional[pulumi.Input[str]]:
"""
The Sms auth token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "auth_token")
@auth_token.setter
def auth_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_token", value)
@property
@pulumi.getter(name="isValidated")
def is_validated(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this channel is validated for the bot
"""
return pulumi.get(self, "is_validated")
@is_validated.setter
def is_validated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_validated", value)
@pulumi.input_type
class SmsChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['SmsChannelPropertiesArgs']] = None):
"""
Sms channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'SmsChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['SmsChannelPropertiesArgs'] properties: The set of properties specific to Sms channel resource
"""
pulumi.set(__self__, "channel_name", 'SmsChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'SmsChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['SmsChannelPropertiesArgs']]:
"""
The set of properties specific to Sms channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['SmsChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class TelegramChannelPropertiesArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
access_token: Optional[pulumi.Input[str]] = None,
is_validated: Optional[pulumi.Input[bool]] = None):
"""
The parameters to provide for the Telegram channel.
:param pulumi.Input[bool] is_enabled: Whether this channel is enabled for the bot
:param pulumi.Input[str] access_token: The Telegram access token. Value only returned through POST to the action Channel List API, otherwise empty.
:param pulumi.Input[bool] is_validated: Whether this channel is validated for the bot
"""
pulumi.set(__self__, "is_enabled", is_enabled)
if access_token is not None:
pulumi.set(__self__, "access_token", access_token)
if is_validated is not None:
pulumi.set(__self__, "is_validated", is_validated)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this channel is enabled for the bot
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="accessToken")
def access_token(self) -> Optional[pulumi.Input[str]]:
"""
The Telegram access token. Value only returned through POST to the action Channel List API, otherwise empty.
"""
return pulumi.get(self, "access_token")
@access_token.setter
def access_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "access_token", value)
@property
@pulumi.getter(name="isValidated")
def is_validated(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this channel is validated for the bot
"""
return pulumi.get(self, "is_validated")
@is_validated.setter
def is_validated(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_validated", value)
@pulumi.input_type
class TelegramChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['TelegramChannelPropertiesArgs']] = None):
"""
Telegram channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'TelegramChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['TelegramChannelPropertiesArgs'] properties: The set of properties specific to Telegram channel resource
"""
pulumi.set(__self__, "channel_name", 'TelegramChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'TelegramChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['TelegramChannelPropertiesArgs']]:
"""
The set of properties specific to Telegram channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['TelegramChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class TelephonyChannelPropertiesArgs:
def __init__(__self__, *,
api_configurations: Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyChannelResourceApiConfigurationArgs']]]] = None,
cognitive_service_region: Optional[pulumi.Input[str]] = None,
cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,
default_locale: Optional[pulumi.Input[str]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
phone_numbers: Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyPhoneNumbersArgs']]]] = None,
premium_sku: Optional[pulumi.Input[str]] = None):
"""
The parameters to provide for the Direct Line channel.
:param pulumi.Input[Sequence[pulumi.Input['TelephonyChannelResourceApiConfigurationArgs']]] api_configurations: The list of Telephony api configuration
:param pulumi.Input[str] cognitive_service_region: The extensionKey2
:param pulumi.Input[str] cognitive_service_subscription_key: The extensionKey1
:param pulumi.Input[str] default_locale: The default locale of the channel
:param pulumi.Input[bool] is_enabled: Whether the channel is enabled
:param pulumi.Input[Sequence[pulumi.Input['TelephonyPhoneNumbersArgs']]] phone_numbers: The list of Telephony phone numbers
:param pulumi.Input[str] premium_sku: The premium SKU applied to the channel
"""
if api_configurations is not None:
pulumi.set(__self__, "api_configurations", api_configurations)
if cognitive_service_region is not None:
pulumi.set(__self__, "cognitive_service_region", cognitive_service_region)
if cognitive_service_subscription_key is not None:
pulumi.set(__self__, "cognitive_service_subscription_key", cognitive_service_subscription_key)
if default_locale is not None:
pulumi.set(__self__, "default_locale", default_locale)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if phone_numbers is not None:
pulumi.set(__self__, "phone_numbers", phone_numbers)
if premium_sku is not None:
pulumi.set(__self__, "premium_sku", premium_sku)
@property
@pulumi.getter(name="apiConfigurations")
def api_configurations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyChannelResourceApiConfigurationArgs']]]]:
"""
The list of Telephony api configuration
"""
return pulumi.get(self, "api_configurations")
@api_configurations.setter
def api_configurations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyChannelResourceApiConfigurationArgs']]]]):
pulumi.set(self, "api_configurations", value)
@property
@pulumi.getter(name="cognitiveServiceRegion")
def cognitive_service_region(self) -> Optional[pulumi.Input[str]]:
"""
The extensionKey2
"""
return pulumi.get(self, "cognitive_service_region")
@cognitive_service_region.setter
def cognitive_service_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_region", value)
@property
@pulumi.getter(name="cognitiveServiceSubscriptionKey")
def cognitive_service_subscription_key(self) -> Optional[pulumi.Input[str]]:
"""
The extensionKey1
"""
return pulumi.get(self, "cognitive_service_subscription_key")
@cognitive_service_subscription_key.setter
def cognitive_service_subscription_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_subscription_key", value)
@property
@pulumi.getter(name="defaultLocale")
def default_locale(self) -> Optional[pulumi.Input[str]]:
"""
The default locale of the channel
"""
return pulumi.get(self, "default_locale")
@default_locale.setter
def default_locale(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_locale", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the channel is enabled
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="phoneNumbers")
def phone_numbers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyPhoneNumbersArgs']]]]:
"""
The list of Telephony phone numbers
"""
return pulumi.get(self, "phone_numbers")
@phone_numbers.setter
def phone_numbers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TelephonyPhoneNumbersArgs']]]]):
pulumi.set(self, "phone_numbers", value)
@property
@pulumi.getter(name="premiumSKU")
def premium_sku(self) -> Optional[pulumi.Input[str]]:
"""
The premium SKU applied to the channel
"""
return pulumi.get(self, "premium_sku")
@premium_sku.setter
def premium_sku(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "premium_sku", value)
@pulumi.input_type
class TelephonyChannelResourceApiConfigurationArgs:
def __init__(__self__, *,
cognitive_service_region: Optional[pulumi.Input[str]] = None,
cognitive_service_resource_id: Optional[pulumi.Input[str]] = None,
cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,
default_locale: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
provider_name: Optional[pulumi.Input[str]] = None):
"""
A resource Api configuration for the Telephony channel
:param pulumi.Input[str] cognitive_service_region: The cognitive service region.
:param pulumi.Input[str] cognitive_service_resource_id: The cognitive service resourceId.
:param pulumi.Input[str] cognitive_service_subscription_key: The cognitive service subscription key.
:param pulumi.Input[str] default_locale: The default locale.
:param pulumi.Input[str] id: The id of config.
:param pulumi.Input[str] provider_name: The provider name.
"""
if cognitive_service_region is not None:
pulumi.set(__self__, "cognitive_service_region", cognitive_service_region)
if cognitive_service_resource_id is not None:
pulumi.set(__self__, "cognitive_service_resource_id", cognitive_service_resource_id)
if cognitive_service_subscription_key is not None:
pulumi.set(__self__, "cognitive_service_subscription_key", cognitive_service_subscription_key)
if default_locale is not None:
pulumi.set(__self__, "default_locale", default_locale)
if id is not None:
pulumi.set(__self__, "id", id)
if provider_name is not None:
pulumi.set(__self__, "provider_name", provider_name)
@property
@pulumi.getter(name="cognitiveServiceRegion")
def cognitive_service_region(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service region.
"""
return pulumi.get(self, "cognitive_service_region")
@cognitive_service_region.setter
def cognitive_service_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_region", value)
@property
@pulumi.getter(name="cognitiveServiceResourceId")
def cognitive_service_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service resourceId.
"""
return pulumi.get(self, "cognitive_service_resource_id")
@cognitive_service_resource_id.setter
def cognitive_service_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_resource_id", value)
@property
@pulumi.getter(name="cognitiveServiceSubscriptionKey")
def cognitive_service_subscription_key(self) -> Optional[pulumi.Input[str]]:
"""
The cognitive service subscription key.
"""
return pulumi.get(self, "cognitive_service_subscription_key")
@cognitive_service_subscription_key.setter
def cognitive_service_subscription_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_subscription_key", value)
@property
@pulumi.getter(name="defaultLocale")
def default_locale(self) -> Optional[pulumi.Input[str]]:
"""
The default locale.
"""
return pulumi.get(self, "default_locale")
@default_locale.setter
def default_locale(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_locale", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The id of config.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="providerName")
def provider_name(self) -> Optional[pulumi.Input[str]]:
"""
The provider name.
"""
return pulumi.get(self, "provider_name")
@provider_name.setter
def provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "provider_name", value)
@pulumi.input_type
class TelephonyChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['TelephonyChannelPropertiesArgs']] = None):
"""
Telephony channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'TelephonyChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['TelephonyChannelPropertiesArgs'] properties: The set of properties specific to Telephony channel resource
"""
pulumi.set(__self__, "channel_name", 'TelephonyChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'TelephonyChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['TelephonyChannelPropertiesArgs']]:
"""
The set of properties specific to Telephony channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['TelephonyChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class TelephonyPhoneNumbersArgs:
def __init__(__self__, *,
acs_endpoint: Optional[pulumi.Input[str]] = None,
acs_resource_id: Optional[pulumi.Input[str]] = None,
acs_secret: Optional[pulumi.Input[str]] = None,
cognitive_service_region: Optional[pulumi.Input[str]] = None,
cognitive_service_resource_id: Optional[pulumi.Input[str]] = None,
cognitive_service_subscription_key: Optional[pulumi.Input[str]] = None,
default_locale: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
offer_type: Optional[pulumi.Input[str]] = None,
phone_number: Optional[pulumi.Input[str]] = None):
"""
A telephone number for the Telephony channel
:param pulumi.Input[str] acs_endpoint: The endpoint of ACS.
:param pulumi.Input[str] acs_resource_id: The resource id of ACS.
:param pulumi.Input[str] acs_secret: The secret of ACS.
:param pulumi.Input[str] cognitive_service_region: The service region of cognitive service.
:param pulumi.Input[str] cognitive_service_resource_id: The resource id of cognitive service.
:param pulumi.Input[str] cognitive_service_subscription_key: The subscription key of cognitive service.
:param pulumi.Input[str] default_locale: The default locale of the phone number.
:param pulumi.Input[str] id: The element id.
:param pulumi.Input[str] offer_type: Optional Property that will determine the offering type of the phone.
:param pulumi.Input[str] phone_number: The phone number.
"""
if acs_endpoint is not None:
pulumi.set(__self__, "acs_endpoint", acs_endpoint)
if acs_resource_id is not None:
pulumi.set(__self__, "acs_resource_id", acs_resource_id)
if acs_secret is not None:
pulumi.set(__self__, "acs_secret", acs_secret)
if cognitive_service_region is not None:
pulumi.set(__self__, "cognitive_service_region", cognitive_service_region)
if cognitive_service_resource_id is not None:
pulumi.set(__self__, "cognitive_service_resource_id", cognitive_service_resource_id)
if cognitive_service_subscription_key is not None:
pulumi.set(__self__, "cognitive_service_subscription_key", cognitive_service_subscription_key)
if default_locale is not None:
pulumi.set(__self__, "default_locale", default_locale)
if id is not None:
pulumi.set(__self__, "id", id)
if offer_type is not None:
pulumi.set(__self__, "offer_type", offer_type)
if phone_number is not None:
pulumi.set(__self__, "phone_number", phone_number)
@property
@pulumi.getter(name="acsEndpoint")
def acs_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The endpoint of ACS.
"""
return pulumi.get(self, "acs_endpoint")
@acs_endpoint.setter
def acs_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acs_endpoint", value)
@property
@pulumi.getter(name="acsResourceId")
def acs_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource id of ACS.
"""
return pulumi.get(self, "acs_resource_id")
@acs_resource_id.setter
def acs_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acs_resource_id", value)
@property
@pulumi.getter(name="acsSecret")
def acs_secret(self) -> Optional[pulumi.Input[str]]:
"""
The secret of ACS.
"""
return pulumi.get(self, "acs_secret")
@acs_secret.setter
def acs_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "acs_secret", value)
@property
@pulumi.getter(name="cognitiveServiceRegion")
def cognitive_service_region(self) -> Optional[pulumi.Input[str]]:
"""
The service region of cognitive service.
"""
return pulumi.get(self, "cognitive_service_region")
@cognitive_service_region.setter
def cognitive_service_region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_region", value)
@property
@pulumi.getter(name="cognitiveServiceResourceId")
def cognitive_service_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource id of cognitive service.
"""
return pulumi.get(self, "cognitive_service_resource_id")
@cognitive_service_resource_id.setter
def cognitive_service_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_resource_id", value)
@property
@pulumi.getter(name="cognitiveServiceSubscriptionKey")
def cognitive_service_subscription_key(self) -> Optional[pulumi.Input[str]]:
"""
The subscription key of cognitive service.
"""
return pulumi.get(self, "cognitive_service_subscription_key")
@cognitive_service_subscription_key.setter
def cognitive_service_subscription_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cognitive_service_subscription_key", value)
@property
@pulumi.getter(name="defaultLocale")
def default_locale(self) -> Optional[pulumi.Input[str]]:
"""
The default locale of the phone number.
"""
return pulumi.get(self, "default_locale")
@default_locale.setter
def default_locale(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_locale", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
The element id.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="offerType")
def offer_type(self) -> Optional[pulumi.Input[str]]:
"""
Optional Property that will determine the offering type of the phone.
"""
return pulumi.get(self, "offer_type")
@offer_type.setter
def offer_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "offer_type", value)
@property
@pulumi.getter(name="phoneNumber")
def phone_number(self) -> Optional[pulumi.Input[str]]:
"""
The phone number.
"""
return pulumi.get(self, "phone_number")
@phone_number.setter
def phone_number(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "phone_number", value)
@pulumi.input_type
class WebChatChannelPropertiesArgs:
def __init__(__self__, *,
sites: Optional[pulumi.Input[Sequence[pulumi.Input['WebChatSiteArgs']]]] = None):
"""
The parameters to provide for the Web Chat channel.
:param pulumi.Input[Sequence[pulumi.Input['WebChatSiteArgs']]] sites: The list of Web Chat sites
"""
if sites is not None:
pulumi.set(__self__, "sites", sites)
@property
@pulumi.getter
def sites(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['WebChatSiteArgs']]]]:
"""
The list of Web Chat sites
"""
return pulumi.get(self, "sites")
@sites.setter
def sites(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['WebChatSiteArgs']]]]):
pulumi.set(self, "sites", value)
@pulumi.input_type
class WebChatChannelArgs:
def __init__(__self__, *,
channel_name: pulumi.Input[str],
etag: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['WebChatChannelPropertiesArgs']] = None):
"""
Web Chat channel definition
:param pulumi.Input[str] channel_name: The channel name
Expected value is 'WebChatChannel'.
:param pulumi.Input[str] etag: Entity Tag of the resource
:param pulumi.Input[str] location: Specifies the location of the resource.
:param pulumi.Input['WebChatChannelPropertiesArgs'] properties: The set of properties specific to Web Chat channel resource
"""
pulumi.set(__self__, "channel_name", 'WebChatChannel')
if etag is not None:
pulumi.set(__self__, "etag", etag)
if location is None:
location = 'global'
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> pulumi.Input[str]:
"""
The channel name
Expected value is 'WebChatChannel'.
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag of the resource
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['WebChatChannelPropertiesArgs']]:
"""
The set of properties specific to Web Chat channel resource
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['WebChatChannelPropertiesArgs']]):
pulumi.set(self, "properties", value)
@pulumi.input_type
class WebChatSiteArgs:
def __init__(__self__, *,
is_enabled: pulumi.Input[bool],
is_webchat_preview_enabled: pulumi.Input[bool],
site_name: pulumi.Input[str],
app_id: Optional[pulumi.Input[str]] = None,
e_tag: Optional[pulumi.Input[str]] = None,
is_block_user_upload_enabled: Optional[pulumi.Input[bool]] = None,
is_detailed_logging_enabled: Optional[pulumi.Input[bool]] = None,
is_endpoint_parameters_enabled: Optional[pulumi.Input[bool]] = None,
is_no_storage_enabled: Optional[pulumi.Input[bool]] = None,
is_secure_site_enabled: Optional[pulumi.Input[bool]] = None,
is_v1_enabled: Optional[pulumi.Input[bool]] = None,
is_v3_enabled: Optional[pulumi.Input[bool]] = None,
is_web_chat_speech_enabled: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
trusted_origins: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A site for the Webchat channel
:param pulumi.Input[bool] is_enabled: Whether this site is enabled for DirectLine channel
:param pulumi.Input[bool] is_webchat_preview_enabled: Whether this site is enabled for preview versions of Webchat
:param pulumi.Input[str] site_name: Site name
:param pulumi.Input[str] app_id: DirectLine application id
:param pulumi.Input[str] e_tag: Entity Tag
:param pulumi.Input[bool] is_block_user_upload_enabled: Whether this site is enabled for block user upload.
:param pulumi.Input[bool] is_detailed_logging_enabled: Whether this site is disabled detailed logging for
:param pulumi.Input[bool] is_endpoint_parameters_enabled: Whether this site is EndpointParameters enabled for channel
:param pulumi.Input[bool] is_no_storage_enabled: Whether this no-storage site is disabled detailed logging for
:param pulumi.Input[bool] is_secure_site_enabled: Whether this site is enabled for authentication with Bot Framework.
:param pulumi.Input[bool] is_v1_enabled: Whether this site is enabled for Bot Framework V1 protocol.
:param pulumi.Input[bool] is_v3_enabled: Whether this site is enabled for Bot Framework V3 protocol.
:param pulumi.Input[bool] is_web_chat_speech_enabled: Whether this site is enabled for Webchat Speech
:param pulumi.Input[str] tenant_id: Tenant Id
:param pulumi.Input[Sequence[pulumi.Input[str]]] trusted_origins: List of Trusted Origin URLs for this site. This field is applicable only if isSecureSiteEnabled is True.
"""
pulumi.set(__self__, "is_enabled", is_enabled)
if is_webchat_preview_enabled is None:
is_webchat_preview_enabled = False
pulumi.set(__self__, "is_webchat_preview_enabled", is_webchat_preview_enabled)
pulumi.set(__self__, "site_name", site_name)
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if e_tag is not None:
pulumi.set(__self__, "e_tag", e_tag)
if is_block_user_upload_enabled is not None:
pulumi.set(__self__, "is_block_user_upload_enabled", is_block_user_upload_enabled)
if is_detailed_logging_enabled is not None:
pulumi.set(__self__, "is_detailed_logging_enabled", is_detailed_logging_enabled)
if is_endpoint_parameters_enabled is not None:
pulumi.set(__self__, "is_endpoint_parameters_enabled", is_endpoint_parameters_enabled)
if is_no_storage_enabled is not None:
pulumi.set(__self__, "is_no_storage_enabled", is_no_storage_enabled)
if is_secure_site_enabled is not None:
pulumi.set(__self__, "is_secure_site_enabled", is_secure_site_enabled)
if is_v1_enabled is not None:
pulumi.set(__self__, "is_v1_enabled", is_v1_enabled)
if is_v3_enabled is not None:
pulumi.set(__self__, "is_v3_enabled", is_v3_enabled)
if is_web_chat_speech_enabled is None:
is_web_chat_speech_enabled = False
if is_web_chat_speech_enabled is not None:
pulumi.set(__self__, "is_web_chat_speech_enabled", is_web_chat_speech_enabled)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if trusted_origins is not None:
pulumi.set(__self__, "trusted_origins", trusted_origins)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Input[bool]:
"""
Whether this site is enabled for DirectLine channel
"""
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter(name="isWebchatPreviewEnabled")
def is_webchat_preview_enabled(self) -> pulumi.Input[bool]:
"""
Whether this site is enabled for preview versions of Webchat
"""
return pulumi.get(self, "is_webchat_preview_enabled")
@is_webchat_preview_enabled.setter
def is_webchat_preview_enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "is_webchat_preview_enabled", value)
@property
@pulumi.getter(name="siteName")
def site_name(self) -> pulumi.Input[str]:
"""
Site name
"""
return pulumi.get(self, "site_name")
@site_name.setter
def site_name(self, value: pulumi.Input[str]):
pulumi.set(self, "site_name", value)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[str]]:
"""
DirectLine application id
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[pulumi.Input[str]]:
"""
Entity Tag
"""
return pulumi.get(self, "e_tag")
@e_tag.setter
def e_tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "e_tag", value)
@property
@pulumi.getter(name="isBlockUserUploadEnabled")
def is_block_user_upload_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for block user upload.
"""
return pulumi.get(self, "is_block_user_upload_enabled")
@is_block_user_upload_enabled.setter
def is_block_user_upload_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_block_user_upload_enabled", value)
@property
@pulumi.getter(name="isDetailedLoggingEnabled")
def is_detailed_logging_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is disabled detailed logging for
"""
return pulumi.get(self, "is_detailed_logging_enabled")
@is_detailed_logging_enabled.setter
def is_detailed_logging_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_detailed_logging_enabled", value)
@property
@pulumi.getter(name="isEndpointParametersEnabled")
def is_endpoint_parameters_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is EndpointParameters enabled for channel
"""
return pulumi.get(self, "is_endpoint_parameters_enabled")
@is_endpoint_parameters_enabled.setter
def is_endpoint_parameters_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_endpoint_parameters_enabled", value)
@property
@pulumi.getter(name="isNoStorageEnabled")
def is_no_storage_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this no-storage site is disabled detailed logging for
"""
return pulumi.get(self, "is_no_storage_enabled")
@is_no_storage_enabled.setter
def is_no_storage_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_no_storage_enabled", value)
@property
@pulumi.getter(name="isSecureSiteEnabled")
def is_secure_site_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for authentication with Bot Framework.
"""
return pulumi.get(self, "is_secure_site_enabled")
@is_secure_site_enabled.setter
def is_secure_site_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_secure_site_enabled", value)
@property
@pulumi.getter(name="isV1Enabled")
def is_v1_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for Bot Framework V1 protocol.
"""
return pulumi.get(self, "is_v1_enabled")
@is_v1_enabled.setter
def is_v1_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_v1_enabled", value)
@property
@pulumi.getter(name="isV3Enabled")
def is_v3_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for Bot Framework V3 protocol.
"""
return pulumi.get(self, "is_v3_enabled")
@is_v3_enabled.setter
def is_v3_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_v3_enabled", value)
@property
@pulumi.getter(name="isWebChatSpeechEnabled")
def is_web_chat_speech_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether this site is enabled for Webchat Speech
"""
return pulumi.get(self, "is_web_chat_speech_enabled")
@is_web_chat_speech_enabled.setter
def is_web_chat_speech_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_web_chat_speech_enabled", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
Tenant Id
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="trustedOrigins")
def trusted_origins(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Trusted Origin URLs for this site. This field is applicable only if isSecureSiteEnabled is True.
"""
return pulumi.get(self, "trusted_origins")
@trusted_origins.setter
def trusted_origins(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "trusted_origins", value)
|
87890e8dcfd1b1d3dd765f5bfe0ecdd9e32631a7
|
028ddc5e85d89c26f8320b70d8ffe80f3d5aec52
|
/src/UQpy/sampling/stratified_sampling/strata/VoronoiStrata.py
|
76e0bfa03e663c2a77501e02cf08f8f31add7472
|
[
"MIT"
] |
permissive
|
SURGroup/UQpy
|
3b516706e9072c6fac80da0bdfbd23e2193f5844
|
9e98a6279aa5a2ec2d6d4c61226c34712547bcc6
|
refs/heads/master
| 2023-09-04T03:38:35.294389
| 2023-08-04T12:55:02
| 2023-08-04T12:55:02
| 112,795,497
| 215
| 70
|
MIT
| 2023-09-14T14:18:22
| 2017-12-01T23:05:13
|
Python
|
UTF-8
|
Python
| false
| false
| 19,271
|
py
|
VoronoiStrata.py
|
import logging
import math
from UQpy.sampling.stratified_sampling.strata.DelaunayStrata import DelaunayStrata
from UQpy.sampling.stratified_sampling.strata.baseclass.Strata import Strata
from UQpy.sampling.SimplexSampling import *
from UQpy.utilities.ValidationTypes import RandomStateType
import numpy as np
from scipy.spatial import Voronoi
class VoronoiStrata(Strata):
@beartype
def __init__(
self,
seeds: np.ndarray = None,
seeds_number: PositiveInteger = None,
dimension: PositiveInteger = None,
decomposition_iterations: PositiveInteger = 1,
random_state: RandomStateType = None
):
"""
Define a geometric decomposition of the n-dimensional unit hypercube into disjoint and space-filling
Voronoi strata.
:param seeds: An array of dimension :math:`N * n` specifying the seeds of all strata. The seeds of the strata
are the coordinates of the point inside each stratum that defines the stratum. The user must provide `seeds` or
`seeds_number` and `dimension`
:param seeds_number: The number of seeds to randomly generate. Seeds are generated by random sampling on the
unit hypercube. The user must provide `seeds` or `seeds_number` and `dimension`
:param dimension: The dimension of the unit hypercube in which to generate random seeds. Used only if
`seeds_number` is provided. The user must provide `seeds` or `seeds_number` and `dimension`
:param decomposition_iterations: Number of iterations to perform to create a Centroidal Voronoi decomposition.
If :code:`decomposition_iterations = 0`, the Voronoi decomposition is based on the provided or generated seeds.
If :code:`decomposition_iterations >= 1`, the seed points are moved to the centroids of the Voronoi cells
in each iteration and a new Voronoi decomposition is performed. This process is repeated
`decomposition_iterations` times to create a Centroidal Voronoi decomposition.
"""
super().__init__(seeds=seeds, random_state=random_state)
self.logger = logging.getLogger(__name__)
self.seeds_number = seeds_number
self.dimension = dimension
self.decomposition_iterations = decomposition_iterations
self.voronoi: Voronoi = None
"""
Defines a Voronoi decomposition of the set of reflected points. When creating the Voronoi decomposition on
the unit hypercube, the code reflects the points on the unit hypercube across all faces of the unit hypercube.
This causes the Voronoi decomposition to create edges along the faces of the hypercube.
This object is not the Voronoi decomposition of the unit hypercube. It is the Voronoi decomposition of all
points and their reflections from which the unit hypercube is extracted.
To access the vertices in the unit hypercube, see the attribute :py:attr:`vertices`."""
self.vertices: list = []
"""A list of the vertices for each Voronoi stratum on the unit hypercube."""
if self.seeds is not None:
if self.seeds_number is not None or self.dimension is not None:
self.logger.info(
"UQpy: Ignoring 'nseeds' and 'dimension' attributes because 'seeds' are provided"
)
self.dimension = self.seeds.shape[1]
self.stratify()
def stratify(self):
"""
Performs the Voronoi stratification.
"""
self.logger.info("UQpy: Creating Voronoi stratification ...")
initial_seeds = self.seeds
if self.seeds is None:
initial_seeds = stats.uniform.rvs(size=[self.seeds_number, self.dimension], random_state=self.random_state)
if self.decomposition_iterations == 0:
cent, vol = self.create_volume(initial_seeds)
self.volume = np.asarray(vol)
else:
for i in range(self.decomposition_iterations):
cent, vol = self.create_volume(initial_seeds)
initial_seeds = np.asarray(cent)
self.volume = np.asarray(vol)
self.seeds = initial_seeds
self.logger.info("UQpy: Voronoi stratification created.")
def create_volume(self, initial_seeds):
self.voronoi, bounded_regions = self.voronoi_unit_hypercube(initial_seeds)
cent, vol = [], []
for region in bounded_regions:
vertices = self.voronoi.vertices[region + [region[0]], :]
centroid, volume = self.compute_voronoi_centroid_volume(vertices)
self.vertices.append(vertices)
cent.append(centroid[0, :])
vol.append(volume)
return cent, vol
@staticmethod
def voronoi_unit_hypercube(seeds):
from scipy.spatial import Voronoi
# Mirror the seeds in both low and high directions for each dimension
bounded_points = seeds
dimension = seeds.shape[1]
for i in range(dimension):
seeds_del = np.delete(bounded_points, i, 1)
if i == 0:
points_temp1 = np.hstack([np.atleast_2d(-bounded_points[:, i]).T, seeds_del])
points_temp2 = np.hstack([np.atleast_2d(2 - bounded_points[:, i]).T, seeds_del])
elif i == dimension - 1:
points_temp1 = np.hstack([seeds_del, np.atleast_2d(-bounded_points[:, i]).T])
points_temp2 = np.hstack([seeds_del, np.atleast_2d(2 - bounded_points[:, i]).T])
else:
points_temp1 = np.hstack([seeds_del[:, :i],
np.atleast_2d(-bounded_points[:, i]).T,
seeds_del[:, i:], ])
points_temp2 = np.hstack([seeds_del[:, :i],
np.atleast_2d(2 - bounded_points[:, i]).T,
seeds_del[:, i:],])
seeds = np.append(seeds, points_temp1, axis=0)
seeds = np.append(seeds, points_temp2, axis=0)
vor = Voronoi(seeds, incremental=True)
regions = [None] * bounded_points.shape[0]
for i in range(bounded_points.shape[0]):
regions[i] = vor.regions[vor.point_region[i]]
bounded_regions = regions
return vor, bounded_regions
@staticmethod
def compute_voronoi_centroid_volume(vertices):
"""
This function computes the centroid and volume of a Voronoi cell from its vertices.
:param vertices: Coordinates of the vertices that define the Voronoi cell.
:return: Centroid and Volume of the Voronoi cell
"""
from scipy.spatial import Delaunay, ConvexHull
tess = Delaunay(vertices)
dimension = np.shape(vertices)[1]
w = np.zeros((tess.nsimplex, 1))
cent = np.zeros((tess.nsimplex, dimension))
for i in range(tess.nsimplex):
# pylint: disable=E1136
ch = ConvexHull(tess.points[tess.simplices[i]])
w[i] = ch.volume
cent[i, :] = np.mean(tess.points[tess.simplices[i]], axis=0)
volume = np.sum(w)
centroid = np.matmul(np.divide(w, volume).T, cent)
return centroid, volume
def sample_strata(self, nsamples_per_stratum, random_state):
from scipy.spatial import Delaunay, ConvexHull
samples_in_strata, weights = list(), list()
for j in range(
len(self.vertices)
): # For each bounded region (Voronoi stratification)
vertices = self.vertices[j][:-1, :]
seed = self.seeds[j, :].reshape(1, -1)
seed_and_vertices = np.concatenate([vertices, seed])
# Create Dealunay Triangulation using seed and vertices of each stratum
delaunay_obj = Delaunay(seed_and_vertices)
# Compute volume of each delaunay
volume = list()
for i in range(len(delaunay_obj.vertices)):
vert = delaunay_obj.vertices[i]
ch = ConvexHull(seed_and_vertices[vert])
volume.append(ch.volume)
temp_prob = np.array(volume) / sum(volume)
a = list(range(len(delaunay_obj.vertices)))
for k in range(int(nsamples_per_stratum[j])):
simplex = random_state.choice(a, p=temp_prob)
new_samples = SimplexSampling(
nodes=seed_and_vertices[delaunay_obj.vertices[simplex]],
nsamples=1,
random_state=self.random_state,
).samples
samples_in_strata.append(new_samples)
self.extend_weights(nsamples_per_stratum, j, weights)
return samples_in_strata, weights
def compute_centroids(self):
# if self.mesh is None:
# self.add_boundary_points_and_construct_delaunay()
self.mesh.centroids = np.zeros([self.mesh.nsimplex, self.dimension])
self.mesh.volumes = np.zeros([self.mesh.nsimplex, 1])
from scipy.spatial import qhull, ConvexHull
for j in range(self.mesh.nsimplex):
try:
ConvexHull(self.points[self.mesh.vertices[j]])
self.mesh.centroids[j, :], self.mesh.volumes[j] = \
DelaunayStrata.compute_delaunay_centroid_volume(self.points[self.mesh.vertices[j]])
except qhull.QhullError:
self.mesh.centroids[j, :], self.mesh.volumes[j] = (np.mean(self.points[self.mesh.vertices[j]]), 0,)
def initialize(self, samples_number, training_points):
self.add_boundary_points_and_construct_delaunay(samples_number, training_points)
self.mesh.old_vertices = self.mesh.vertices.copy()
def add_boundary_points_and_construct_delaunay(
self, samples_number, training_points
):
"""
This method add the corners of :math:`[0, 1]^n` hypercube to the existing samples, which are used to construct a
Delaunay Triangulation.
"""
self.mesh_vertices = training_points.copy()
self.points_to_samplesU01 = np.arange(0, training_points.shape[0])
for i in range(np.shape(self.voronoi.vertices)[0]):
if any(np.logical_and(self.voronoi.vertices[i, :] >= -1e-10, self.voronoi.vertices[i, :] <= 1e-10,)) or \
any(np.logical_and(self.voronoi.vertices[i, :] >= 1 - 1e-10, self.voronoi.vertices[i, :] <= 1 + 1e-10,)):
self.mesh_vertices = np.vstack([self.mesh_vertices, self.voronoi.vertices[i, :]])
self.points_to_samplesU01 = np.hstack([np.array([-1]), self.points_to_samplesU01, ])
from scipy.spatial.qhull import Delaunay
# Define the simplex mesh to be used for gradient estimation and sampling
self.mesh = Delaunay(
self.mesh_vertices,
furthest_site=False,
incremental=True,
qhull_options=None,)
self.points = getattr(self.mesh, "points")
def calculate_strata_metrics(self, index):
self.compute_centroids()
s = np.zeros(self.mesh.nsimplex)
for j in range(self.mesh.nsimplex):
s[j] = self.mesh.volumes[j] ** 2
return s
def update_strata_and_generate_samples(
self, dimension, points_to_add, bins2break, samples_u01, random_state
):
new_points = np.zeros([points_to_add, dimension])
for j in range(points_to_add):
new_points[j, :] = self._generate_sample(
bins2break[j], random_state=random_state
)
self._update_strata(new_point=new_points, samples_u01=samples_u01)
return new_points
def calculate_gradient_strata_metrics(self, index):
# Estimate the variance over each simplex by Delta Method. Moments of the simplices are computed using
# Eq. (19) from the following reference:
# Good, I.J. and Gaskins, R.A. (1971). The Centroid Method of Numerical Integration. Numerische
# Mathematik. 16: 343--359.
var = np.zeros((self.mesh.nsimplex, self.dimension))
s = np.zeros(self.mesh.nsimplex)
for j in range(self.mesh.nsimplex):
for k in range(self.dimension):
std = np.std(self.points[self.mesh.vertices[j]][:, k])
var[j, k] = (
self.mesh.volumes[j]
* math.factorial(self.dimension)
/ math.factorial(self.dimension + 2)
) * (self.dimension * std ** 2)
s[j] = np.sum(self.dy_dx[j, :] * var[j, :] * self.dy_dx[j, :]) * (
self.mesh.volumes[j] ** 2
)
self.dy_dx_old = self.dy_dx
return s
def estimate_gradient(
self,
surrogate,
step_size,
samples_number,
index,
samples_u01,
training_points,
qoi,
max_train_size=None,
):
self.mesh.centroids = np.zeros([self.mesh.nsimplex, self.dimension])
self.mesh.volumes = np.zeros([self.mesh.nsimplex, 1])
from scipy.spatial import qhull, ConvexHull
for j in range(self.mesh.nsimplex):
try:
ConvexHull(self.points[self.mesh.vertices[j]])
self.mesh.centroids[j, :], self.mesh.volumes[j] = DelaunayStrata.compute_delaunay_centroid_volume(
self.points[self.mesh.vertices[j]])
except qhull.QhullError:
self.mesh.centroids[j, :], self.mesh.volumes[j] = (np.mean(self.points[self.mesh.vertices[j]]), 0,)
if max_train_size is None or len(training_points) <= max_train_size or index == training_points.shape[0]:
from UQpy.utilities.Utilities import calculate_gradient
# Use the entire sample set to train the surrogate model (more expensive option)
self.dy_dx = calculate_gradient(
surrogate,
step_size,
np.atleast_2d(training_points),
np.atleast_2d(np.array(qoi)).T,
self.mesh.centroids,)
# dy_dx = self.calculate_gradient(
# np.atleast_2d(training_points), qoi, self.mesh.centroids, surrogate)
else:
# Use only max_train_size points to train the surrogate model (more economical option)
# Build a mapping from the new vertex indices to the old vertex indices.
self.mesh.new_vertices, self.mesh.new_indices = [], []
self.mesh.new_to_old = np.zeros([self.mesh.vertices.shape[0], ]) * np.nan
j, k = 0, 0
while (j < self.mesh.vertices.shape[0] and k < self.mesh.old_vertices.shape[0]):
if np.all(self.mesh.vertices[j, :] == self.mesh.old_vertices[k, :]):
self.mesh.new_to_old[j] = int(k)
j += 1
k = 0
else:
k += 1
if k == self.mesh.old_vertices.shape[0]:
self.mesh.new_vertices.append(self.mesh.vertices[j])
self.mesh.new_indices.append(j)
j += 1
k = 0
# Find the nearest neighbors to the most recently added point
from sklearn.neighbors import NearestNeighbors
knn = NearestNeighbors(n_neighbors=max_train_size)
knn.fit(np.atleast_2d(samples_u01))
neighbors = knn.kneighbors(np.atleast_2d(samples_u01[-1]), return_distance=False)
# For every simplex, check if at least dimension-1 vertices are in the neighbor set.
# Only update the gradient in simplices that meet this criterion.
update_list = []
for j in range(self.mesh.vertices.shape[0]):
self.vertices_in_U01 = self.points_to_samplesU01[self.mesh.vertices[j]]
self.vertices_in_U01[np.isnan(self.vertices_in_U01)] = 10 ** 18
v_set = set(self.vertices_in_U01)
v_list = list(self.vertices_in_U01)
if len(v_set) != len(v_list):
continue
else:
if all(np.isin(self.vertices_in_U01, np.hstack([neighbors, np.atleast_2d(10 ** 18)]),)):
update_list.append(j)
update_array = np.asarray(update_list)
# Initialize the gradient vector
self.dy_dx = np.zeros((self.mesh.new_to_old.shape[0], self.dimension))
# For those simplices that will not be updated, use the previous gradient
for j in range(self.dy_dx.shape[0]):
if np.isnan(self.mesh.new_to_old[j]):
continue
else:
self.dy_dx[j, :] = self.dy_dx_old[int(self.mesh.new_to_old[j]), :]
# For those simplices that will be updated, compute the new gradient
from UQpy.utilities.Utilities import calculate_gradient
self.dy_dx[update_array, :] = calculate_gradient(surrogate, step_size,
np.atleast_2d(training_points)[neighbors],
np.atleast_2d(np.array(qoi)[neighbors]).T,
self.mesh.centroids[update_array])
def _update_strata(self, new_point, samples_u01):
i_ = samples_u01.shape[0]
p_ = new_point.shape[0]
# Update the matrices to have recognize the new point
self.points_to_samplesU01 = np.hstack([self.points_to_samplesU01, np.arange(i_, i_ + p_)])
self.mesh.old_vertices = self.mesh.vertices
# Update the Delaunay triangulation mesh to include the new point.
self.mesh.add_points(new_point)
self.points = getattr(self.mesh, "points")
self.mesh_vertices = np.vstack([self.mesh_vertices, new_point])
# Compute the strata weights.
self.voronoi, bounded_regions = VoronoiStrata.voronoi_unit_hypercube(samples_u01)
self.centroids = []
self.volume = []
for region in bounded_regions:
vertices = self.voronoi.vertices[region + [region[0]]]
centroid, volume = VoronoiStrata.compute_voronoi_centroid_volume(vertices)
self.centroids.append(centroid[0, :])
self.volume.append(volume)
def _generate_sample(self, bin_, random_state):
import itertools
tmp_vertices = self.points[self.mesh.simplices[int(bin_), :]]
col_one = np.array(list(itertools.combinations(np.arange(self.dimension + 1), self.dimension)))
self.mesh.sub_simplex = np.zeros_like(tmp_vertices) # node: an array containing mid-point of edges
for m in range(self.dimension + 1):
self.mesh.sub_simplex[m, :] = (
np.sum(tmp_vertices[col_one[m] - 1, :], 0) / self.dimension)
# Using the Simplex class to generate a new sample in the sub-simplex
new = SimplexSampling(nodes=self.mesh.sub_simplex, nsamples=1, random_state=random_state).samples
return new
|
f66d39f8e1d978d06dfcb1e0da4b5b58dd3bdc85
|
d3ef2463f556d6cd166eb29d3a5f5b210a6402e7
|
/tests/cupyx_tests/profiler_tests/test_benchmark.py
|
7889df39a2d958e26b4de963b12eff2974e44d8a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
cupy/cupy
|
ce7a010a57504dbfe4fb5af10d354a22e79f4907
|
96105afb78aa3f8380834d2516184b8365e23fcb
|
refs/heads/main
| 2023-08-31T00:36:47.967611
| 2023-08-30T09:19:27
| 2023-08-30T09:19:27
| 72,523,920
| 7,505
| 1,072
|
MIT
| 2023-09-14T01:04:42
| 2016-11-01T09:54:45
|
Python
|
UTF-8
|
Python
| false
| false
| 5,372
|
py
|
test_benchmark.py
|
import unittest
from unittest import mock
import numpy
import cupy
from cupy import testing
from cupyx import profiler
from cupyx.profiler import _time
class TestBenchmark(unittest.TestCase):
def test_cpu_routine(self):
with mock.patch('time.perf_counter',
mock.Mock(side_effect=[2.4, 3.8, 3.8] * 10)):
with mock.patch('cupy.cuda.get_elapsed_time',
mock.Mock(return_value=2500)):
mock_func = mock.Mock()
mock_func.__name__ = 'test_name_xxx'
x = cupy.testing.shaped_random((2, 3), cupy, 'int32')
y = cupy.testing.shaped_random((2, 3), cupy, 'int32')
assert mock_func.call_count == 0
perf = profiler.benchmark(
mock_func, (x, y), n_repeat=10, n_warmup=3)
assert perf.name == 'test_name_xxx'
assert mock_func.call_count == 13
assert perf.cpu_times.shape == (10,)
assert perf.gpu_times.shape == (1, 10,)
assert (perf.cpu_times == 1.4).all()
assert (perf.gpu_times == 2.5).all()
@testing.multi_gpu(2)
def test_multigpu_routine(self):
with mock.patch('time.perf_counter',
mock.Mock(side_effect=[2.4, 3.8, 3.8] * 10)):
with mock.patch('cupy.cuda.get_elapsed_time',
mock.Mock(return_value=2500)):
mock_func = mock.Mock()
mock_func.__name__ = 'test_name_xxx'
x = cupy.testing.shaped_random((2, 3), cupy, 'int32')
y = cupy.testing.shaped_random((2, 3), cupy, 'int32')
assert mock_func.call_count == 0
perf = profiler.benchmark(
mock_func, (x, y), n_repeat=10, n_warmup=3, devices=(0, 1))
assert perf.name == 'test_name_xxx'
assert mock_func.call_count == 13
assert perf.cpu_times.shape == (10,)
assert perf.gpu_times.shape == (2, 10,)
assert (perf.cpu_times == 1.4).all()
assert (perf.gpu_times == 2.5).all()
def test_benchmark_max_duration(self):
with mock.patch('time.perf_counter',
mock.Mock(side_effect=[1., 2., 2.] * 6)):
with mock.patch('cupy.cuda.get_elapsed_time',
mock.Mock(return_value=2500)):
mock_func = mock.Mock()
mock_func.__name__ = 'test_name_xxx'
x = cupy.testing.shaped_random((2, 3), cupy, 'int32')
y = cupy.testing.shaped_random((2, 3), cupy, 'int32')
assert mock_func.call_count == 0
perf = profiler.benchmark(
mock_func, (x, y), n_warmup=3, max_duration=2.5)
assert perf.name == 'test_name_xxx'
assert mock_func.call_count == 6
assert perf.cpu_times.shape == (3,)
assert perf.gpu_times.shape == (1, 3)
assert (perf.cpu_times == 1.).all()
assert (perf.gpu_times == 2.5).all()
def test_benchmark_kwargs(self):
x = cupy.random.rand(5)
profiler.benchmark(
cupy.nonzero, kwargs={'a': x}, n_repeat=1, n_warmup=1)
class TestPerfCaseResult(unittest.TestCase):
def test_show_gpu(self):
times = numpy.array([
[5.4, 7.1, 6.0, 5.4, 4.2],
[6.4, 4.3, 8.9, 9.6, 3.8],
]) * 1e-6
perf = _time._PerfCaseResult('test_name_xxx', times, (0,))
expected = (
'test_name_xxx :'
' CPU: 5.620 us +/- 0.943 '
'(min: 4.200 / max: 7.100) us '
' GPU-0: 6.600 us +/- 2.344 '
'(min: 3.800 / max: 9.600) us'
)
assert str(perf) == expected
def test_no_show_gpu(self):
times = numpy.array([
[5.4, 7.1, 6.0, 5.4, 4.2],
[6.4, 4.3, 8.9, 9.6, 3.8],
]) * 1e-6
perf = _time._PerfCaseResult('test_name_xxx', times, (0,))
expected = (
'test_name_xxx :'
' CPU: 5.620 us +/- 0.943 '
'(min: 4.200 / max: 7.100) us'
)
assert perf.to_str() == expected
# Checks if the result does not change.
assert perf.to_str() == expected
def test_single_show_gpu(self):
times = numpy.array([[5.4], [6.4]]) * 1e-6
perf = _time._PerfCaseResult('test_name_xxx', times, (0,))
assert str(perf) == ('test_name_xxx : CPU: 5.400 us '
' GPU-0: 6.400 us')
def test_single_no_show_gpu(self):
times = numpy.array([[5.4], [6.4]]) * 1e-6
perf = _time._PerfCaseResult('test_name_xxx', times, (0,))
assert perf.to_str() == 'test_name_xxx : CPU: 5.400 us'
def test_show_multigpu(self):
times = numpy.array([[5.4], [6.4], [7.0], [8.1]]) * 1e-6
perf = _time._PerfCaseResult('test_name_xxx', times, (0, 1, 2))
assert str(perf) == ('test_name_xxx : CPU: 5.400 us '
' GPU-0: 6.400 us '
' GPU-1: 7.000 us '
' GPU-2: 8.100 us')
|
956bbceee173bf63051c8c07951b37c390f4718b
|
e5ba883c7ae6761e119e245d66b01efc41631d97
|
/examples/hitcon2017_sakura/solve.py
|
20e483e203cccf07c8d1007c7435a854c051e511
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr-doc
|
e40ffb5e68a7f08c1d4aa0e27788b985826ff196
|
bf380700f2baa092c2970a2dceb0eb2793bd9837
|
refs/heads/master
| 2023-08-18T15:55:38.508443
| 2023-04-27T22:54:12
| 2023-04-27T22:54:12
| 40,329,995
| 926
| 595
|
BSD-2-Clause
| 2023-04-12T12:51:06
| 2015-08-06T22:37:54
|
TeX
|
UTF-8
|
Python
| false
| false
| 1,650
|
py
|
solve.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Kyle ZENG
# Runtime: ~6 minutes
import hashlib
import angr
def main():
e = open('./sakura', 'rb').read()
# make sure return value is not 0, add corresponding addr to avoid list
avoids = []
index = 0
while True:
# avoid asm('mov byte ptr [rbp-0x1E49], 0')
index = e.find(b'\xc6\x85\xb7\xe1\xff\xff\x00', index+1)
if index == -1:
break
addr = 0x400000 + index
avoids.append(addr)
# find list
finds = []
index = 0
while True:
# find asm('mov rdi, rax')
index = e.find(b'H\x89\xc7', index+1)
if index == -1 or index > 0x10ff5:
break
addr = 0x400000 + index
finds.append(addr)
# skip a addr we don't want to find
index = e.find(b'H\x89\xc7', index+1)
# initialize project
proj = angr.Project('./sakura', auto_load_libs=False)
state = proj.factory.entry_state()
simgr = proj.factory.simulation_manager(state)
# find ans stage by stage
for find in finds:
simgr.explore(find=find, avoid=avoids)
found = simgr.found[0]
simgr = proj.factory.simulation_manager(found)
# evaluate text
text = found.solver.eval(found.memory.load(0x612040, 400), cast_to=bytes)
h = hashlib.sha256(text)
flag = 'hitcon{'+h.hexdigest()+'}'
return flag
def test():
assert main() == 'hitcon{6c0d62189adfd27a12289890d5b89c0dc8098bc976ecc3f6d61ec0429cccae61}'
if __name__ == '__main__':
import logging
logging.getLogger('angr.sim_manager').setLevel(logging.DEBUG)
print(main())
|
3c9eb477eeb20cc2275d87668200611951897e69
|
60d6b8501d0be546437b26a6ee1f9fab97ec3897
|
/platypush/runner/_app.py
|
6ff9207be57944472dd66829b415c1aea560a4e2
|
[
"MIT"
] |
permissive
|
BlackLight/platypush
|
68284a85b2f9eef303d26b04530f075927b5834a
|
446bc2f67493d3554c5422242ff91d5b5c76d78a
|
refs/heads/master
| 2023-08-31T21:01:53.519960
| 2023-08-29T22:05:38
| 2023-08-29T22:05:38
| 109,421,017
| 265
| 25
|
MIT
| 2023-09-01T23:15:49
| 2017-11-03T16:56:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,390
|
py
|
_app.py
|
import logging
import os
import signal
import subprocess
import sys
from typing_extensions import override
from platypush.process import ControllableProcess
class ApplicationProcess(ControllableProcess):
"""
Controllable process wrapper interface for the main application.
"""
def __init__(self, *args: str, pidfile: str, **kwargs):
super().__init__(name='platypush', **kwargs)
self.logger = logging.getLogger('platypush')
self.args = args
self.pidfile = pidfile
def __enter__(self) -> "ApplicationProcess":
self.start()
return self
def __exit__(self, *_, **__):
self.stop()
@override
def main(self):
self.logger.info('Starting application...')
with subprocess.Popen(
['python', '-m', 'platypush.app', *self.args],
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
) as app:
try:
app.wait()
except KeyboardInterrupt:
pass
@override
def on_stop(self):
try:
with open(self.pidfile, 'r') as f:
pid = int(f.read().strip())
except (OSError, ValueError):
pid = None
if not pid:
return
try:
os.kill(pid, signal.SIGINT)
except OSError:
pass
|
8c817bdc1029e695a27a65d8735247d5d9d93315
|
85373d45a83e4096affafa4f4e5b400787413e57
|
/test/programytest/parser/template/graph_tests/test_learn.py
|
c5b1ae0bad8ce17eec1394d8698ec6e334a1528a
|
[
"MIT"
] |
permissive
|
keiffster/program-y
|
a02bb9d8278835547cc875f4f9cd668d5b1f44da
|
fc7b0a3afa4fa6ed683e0c817a9aa89f9543bb20
|
refs/heads/master
| 2023-08-23T13:55:39.255535
| 2022-12-13T09:51:57
| 2022-12-13T09:51:57
| 74,462,571
| 379
| 173
|
NOASSERTION
| 2023-05-23T00:51:21
| 2016-11-22T10:43:41
|
Python
|
UTF-8
|
Python
| false
| false
| 6,207
|
py
|
test_learn.py
|
import xml.etree.ElementTree as ET
from programy.parser.exceptions import ParserException
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.learn import TemplateLearnNode, LearnCategory
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphLearnTests(TemplateGraphTestClient):
def test_learn_simple(self):
client_context1 = self.create_client_context("testid")
template = ET.fromstring("""
<template>
<learn>
<category>
<pattern>HELLO <eval>WORLD</eval> <iset>THERE, NOW</iset></pattern>
<template>HIYA</template>
</category>
</learn>
</template>
""")
ast = self._graph.parse_template_expression(template)
self.assertIsNotNone(ast)
self.assertIsInstance(ast, TemplateNode)
self.assertIsNotNone(ast.children)
self.assertEqual(len(ast.children), 1)
learn_node = ast.children[0]
self.assertIsNotNone(learn_node)
self.assertIsInstance(learn_node, TemplateLearnNode)
self.assertEqual(1, len(learn_node.children))
self.assertIsInstance(learn_node.children[0], LearnCategory)
self.assertIsNotNone(learn_node.children[0].pattern)
self.assertIsInstance(learn_node.children[0].pattern, ET.Element)
self.assertIsNotNone(learn_node.children[0].topic)
self.assertIsInstance(learn_node.children[0].topic, ET.Element)
self.assertIsNotNone(learn_node.children[0].that)
self.assertIsInstance(learn_node.children[0].that, ET.Element)
self.assertIsNotNone(learn_node.children[0].template)
self.assertIsInstance(learn_node.children[0].template, TemplateNode)
resolved = learn_node.resolve(client_context1)
self.assertEqual(resolved, "")
response = client_context1.bot.ask_question(client_context1, "HELLO WORLD THERE")
self.assertEqual("HIYA.", response)
def test_learn_multi_user(self):
client_context1 = self._client.create_client_context("testid")
template = ET.fromstring("""
<template>
<learn>
<category>
<pattern>HELLO THERE ONE</pattern>
<template>HIYA ONE</template>
</category>
</learn>
</template>
""")
ast = self._graph.parse_template_expression(template)
learn_node = ast.children[0]
learn_node.resolve(client_context1)
response = client_context1.bot.ask_question(client_context1, "HELLO THERE ONE")
self.assertEqual("HIYA ONE.", response)
client_context2 = self._client.create_client_context("testid2")
template = ET.fromstring("""
<template>
<learn>
<category>
<pattern>HELLO THERE TWO</pattern>
<template>HIYA TWO</template>
</category>
</learn>
</template>
""")
ast = self._graph.parse_template_expression(template)
learn_node = ast.children[0]
learn_node.resolve(client_context2)
response = client_context2.bot.ask_question(client_context2, "HELLO THERE TWO")
self.assertEqual("HIYA TWO.", response)
# Now try and ask each others questions
response = client_context1.bot.ask_question(client_context1, "HELLO THERE TWO")
self.assertEqual("", response)
response = client_context2.bot.ask_question(client_context2, "HELLO THERE ONE")
self.assertEqual("", response)
def test_multiple_patterns(self):
client_context1 = self._client.create_client_context("testid")
template = ET.fromstring("""
<template>
<learn>
<category>
<pattern>HELLO THERE</pattern>
<template>HIYA ONE</template>
</category>
</learn>
</template>
""")
ast = self._graph.parse_template_expression(template)
learn_node = ast.children[0]
learn_node.resolve(client_context1)
response = client_context1.bot.ask_question(client_context1, "HELLO THERE")
self.assertEqual("HIYA ONE.", response)
client_context2 = self._client.create_client_context("testid")
template = ET.fromstring("""
<template>
<learn>
<category>
<pattern>HELLO THERE</pattern>
<template>HIYA TWO</template>
</category>
</learn>
</template>
""")
ast = self._graph.parse_template_expression(template)
learn_node = ast.children[0]
learn_node.resolve(client_context2)
response = client_context2.bot.ask_question(client_context2, "HELLO THERE")
self.assertEqual("HIYA TWO.", response)
def test_category_missing(self):
template = ET.fromstring("""
<template>
<learn>
<pattern>HELLO THERE</pattern>
<template>HIYA TWO</template>
</learn>
</template>
""")
with self.assertRaises(ParserException):
_ = self._graph.parse_template_expression(template)
def test_topic_present(self):
template = ET.fromstring("""
<template>
<learn>
<topic>
<category>
<pattern>HELLO THERE</pattern>
<template>HIYA TWO</template>
</category>
</topic>
</learn>
</template>
""")
with self.assertRaises(ParserException):
_ = self._graph.parse_template_expression(template)
def test_other_tag(self):
template = ET.fromstring("""
<template>
<learn>
<categoryx>
<pattern>HELLO THERE</pattern>
<template>HIYA TWO</template>
</categoryx>
</learn>
</template>
""")
with self.assertRaises(ParserException):
_ = self._graph.parse_template_expression(template)
|
b1f89150260fa8114a71d66f54c7d31e028f02c4
|
9abd182d02355ddf0b79afd4a35f7127a4a66f7a
|
/scripts/action-recognition/get_flops.py
|
f32f39ddf1f2f1596219c8dba04fa4d8b07adb57
|
[
"Apache-2.0"
] |
permissive
|
dmlc/gluon-cv
|
e1303086419a5733661d0fcb9095c09d4f2382ad
|
567775619f3b97d47e7c360748912a4fd883ff52
|
refs/heads/master
| 2023-07-19T12:02:36.824294
| 2023-01-19T00:37:33
| 2023-01-19T00:37:33
| 122,896,249
| 6,064
| 1,458
|
Apache-2.0
| 2023-01-19T00:37:35
| 2018-02-26T01:33:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
get_flops.py
|
"""
Script to compute FLOPs of a model
"""
import os
import argparse
import torch
from gluoncv.torch.model_zoo import get_model
from gluoncv.torch.engine.config import get_cfg_defaults
from thop import profile, clever_format
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute FLOPs of a model.')
parser.add_argument('--config-file', type=str, help='path to config file.')
parser.add_argument('--num-frames', type=int, default=32, help='temporal clip length.')
parser.add_argument('--input-size', type=int, default=224,
help='size of the input image size. default is 224')
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg.merge_from_file(args.config_file)
model = get_model(cfg)
input_tensor = torch.autograd.Variable(torch.rand(1, 3, args.num_frames, args.input_size, args.input_size))
macs, params = profile(model, inputs=(input_tensor,))
macs, params = clever_format([macs, params], "%.3f")
print("FLOPs: ", macs, "; #params: ", params)
|
ba0984b931f2a537b87c1f5bf87c79eedde26f3e
|
6d40c76564adacb9c481e87582c239f9b708f592
|
/benchmarking/run_remote.py
|
63f31ec33dd2e713cb303c6f85c4e7bea7f41bec
|
[
"Apache-2.0"
] |
permissive
|
facebook/FAI-PEP
|
40c9636fcc5f6416783384667449b76d7354d156
|
75ffd8ba91da66987904603a2a13c259c59840c9
|
refs/heads/main
| 2023-08-31T07:48:53.781883
| 2023-08-29T18:41:53
| 2023-08-29T18:41:53
| 111,588,048
| 390
| 93
|
Apache-2.0
| 2023-06-12T23:37:55
| 2017-11-21T18:47:29
|
Python
|
UTF-8
|
Python
| false
| false
| 32,554
|
py
|
run_remote.py
|
#!/usr/bin/env python
##############################################################################
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import json
import os
import re
import shutil
import subprocess
import tempfile
import threading
from collections import defaultdict
from getpass import getuser
from random import randint
from threading import Lock
import pkg_resources
from bridge.db import DBDriver
from remote.devices import Devices
from remote.file_handler import FileHandler
from remote.print_result_url import PrintResultURL
from remote.screen_reporter import ScreenReporter
from tabulate import tabulate
from utils.build_program import buildProgramPlatform, buildUsingBuck
from utils.custom_logger import getLogger, setLoggerLevel
from utils.utilities import (
BenchmarkArgParseException,
getBenchmarks,
getMeta,
parse_kwarg,
unpackAdhocFile,
)
parser = argparse.ArgumentParser(description="Run the benchmark remotely")
parser.add_argument(
"--app_id",
help="The app id you use to upload/download your file for everstore "
"and access the job queue",
)
parser.add_argument(
"--async_submit",
action="store_true",
help="Return once the job has been submitted to db. No need to wait till "
"finish so that you can submit mutiple jobs in async way.",
)
parser.add_argument(
"-b",
"--benchmark_file",
help="Specify the json file for the benchmark or a number of benchmarks",
)
parser.add_argument(
"--benchmark_db", help="The database that will store benchmark infos"
)
parser.add_argument("--benchmark_db_entry", help="The entry point of server's database")
parser.add_argument(
"--benchmark_table", help="The table that will store benchmark infos"
)
parser.add_argument(
"-c", "--custom_binary", help="Specify the custom binary that you want to run."
)
parser.add_argument(
"--cache_config",
required=True,
help="The config file to specify the cached uploaded files. If the files "
"are already uploaded in the recent past, do not upload again.",
)
parser.add_argument(
"--hashes",
default=None,
help="Specify the exact devices to run remotely by hashes. Have to use "
"together with --remote and --devices",
)
parser.add_argument(
"--debug",
action="store_true",
help="Debug mode to retain all the running binaries and models.",
)
parser.add_argument(
"--log_output_dir",
default=None,
help="Directory where the benchmark logs are written to. If not specified, the logs are outputted to the terminal",
)
parser.add_argument(
"--devices", help="Specify the devices to benchmark on, in comma separated list."
)
parser.add_argument(
"--devices_config",
default=None,
help="The config file in absolute path to map abbreviations to full names",
)
parser.add_argument("--env", help="environment variables passed to runtime binary")
parser.add_argument(
"--fetch_result",
action="store_true",
help="Fetch the result of already submitted jobs, use together with "
"--user_identifier",
)
parser.add_argument(
"--fetch_status",
action="store_true",
help="Fetch the status of already submitted jobs, use together with "
"--user_identifier",
)
parser.add_argument(
"--kill",
action="store_true",
help="Kill submitted jobs, use together with " "--user_identifier",
)
parser.add_argument(
"--file_storage", help="The storage engine for uploading and downloading files"
)
parser.add_argument(
"--force_submit", action="store_true", help="Force to submit the run."
)
parser.add_argument(
"--framework",
choices=["caffe2", "generic", "oculus", "pytorch", "tflite", "glow"],
help="Specify the framework to benchmark on.",
)
parser.add_argument(
"--frameworks_dir",
default=None,
help="The root directory that all frameworks resides. "
"Usually it is the specifications/frameworks directory. "
"If not provide, we will try to find it from the binary.",
)
parser.add_argument(
"--info", help="The json serialized options describing the control and treatment."
)
parser.add_argument(
"--job_queue",
default="aibench_interactive",
help="Specify the db job queue that the benchmark is sent to",
)
parser.add_argument(
"--list_devices",
action="store_true",
help="List the devices associated to the job queue",
)
parser.add_argument(
"--list_job_queues",
action="store_true",
help="List the job queues that have available devices",
)
parser.add_argument(
"--logger_level",
default="info",
choices=["info", "warning", "error"],
help="Specify the logger level",
)
parser.add_argument(
"--platform",
help="Specify the platform to benchmark on."
"Use this flag if the framework"
" needs special compilation scripts. The scripts are called build.sh "
"saved in specifications/frameworks/<framework>/<platforms> directory",
)
parser.add_argument(
"--pre_built_binary",
help="Specify the pre_built_binary to bypass the building process.",
)
parser.add_argument(
"--force_profile",
action="store_true",
help="Enable profiling regardless of the setting in the benchmark.",
)
parser.add_argument(
"--profile",
nargs="*",
default=None,
action="store",
help="Enable profiling, overriding any profiling settings in the benchmark config.",
)
parser.add_argument(
"--query_num_devices",
help="Return the counter of user specified device name under different condition",
)
parser.add_argument(
"--repo_dir", help="Required. The base framework repo directory used for benchmark."
)
parser.add_argument(
"--result_db", help="The database that will store benchmark results"
)
parser.add_argument(
"--root_model_dir",
help="The root model directory if the meta data of the model uses "
"relative directory, i.e. the location field starts with //",
)
parser.add_argument(
"--screen_reporter",
action="store_true",
help="Display the summary of the benchmark result on screen.",
)
parser.add_argument("--server_addr", help="The lab's server address")
parser.add_argument(
"--string_map",
help="The json serialized arguments passed into treatment for remote run.",
)
parser.add_argument(
"--test",
action="store_true",
help="Indicate whether this is a test run. Test runs use a different database.",
)
parser.add_argument(
"--token",
help="The token you use to upload/download your file for everstore "
"and access the job queue",
)
parser.add_argument(
"--urlPrefix", help="URL Prefix if you want to find your result from the URL."
)
parser.add_argument(
"--user_identifier",
help="The identifier user pass in to differentiate different benchmark runs.",
)
parser.add_argument(
"--user_string",
help="The user_string pass in to differentiate different regression benchmark runs.",
)
parser.add_argument(
"--adhoc",
nargs="?",
const="generic",
default=None,
help="Use the adhoc template file",
)
parser.add_argument(
"--buck_target", default="", help="The buck command to build the custom binary"
)
LOCK = Lock()
def _requote(match) -> str:
input = match.group(0)
return input if input == "true" or input == "false" else f'"{input}"'
class BuildProgram(threading.Thread):
def __init__(self, args, file_handler, tempdir, filenames, prebuilt_binary=None):
threading.Thread.__init__(self)
self.tempdir = tempdir
self.args = args
self.file_handler = file_handler
self.filenames = filenames
self.prebuilt_binary = prebuilt_binary
def run(self):
self._buildProgram(self.tempdir)
def _buildProgram(self, tempdir):
# build binary
platform = self.args.platform
program = tempdir + "/program"
if os.name == "nt":
program = program + ".exe"
elif platform.startswith("ios"):
program = program + ".ipa"
if self.prebuilt_binary:
program = self.prebuilt_binary
elif self.args.buck_target:
print("Building program with buck...")
success = buildUsingBuck(program, self.args.platform, self.args.buck_target)
if not success:
return
else:
print("Building program...")
success = buildProgramPlatform(
program,
self.args.repo_dir,
self.args.framework,
self.args.frameworks_dir,
self.args.platform,
)
if not success:
return
# upload all files under the fname directory
filedir = os.path.dirname(program)
allfiles = []
if os.path.exists(filedir):
if self.prebuilt_binary:
allfiles = [program]
else:
allfiles = [
os.path.join(filedir, f)
for f in os.listdir(filedir)
if os.path.isfile(os.path.join(filedir, f))
]
for fn in allfiles:
with LOCK:
filename, _ = self.file_handler.uploadFile(fn, None, None, False)
getLogger().info("program: {}".format(filename))
self.filenames[os.path.basename(fn)] = filename
# main program needs to be in
self.filenames["program"] = self.filenames[os.path.basename(program)]
else:
self.filenames["program"] = program
class RunRemote:
def __init__(self, raw_args=None):
self.args, self.unknowns = parser.parse_known_args(raw_args)
self._updateArgs(self.args)
setLoggerLevel(self.args.logger_level)
if not self.args.benchmark_db_entry:
assert (
self.args.server_addr is not None
), "Either server_addr or benchmark_db_entry must be specified"
while self.args.server_addr[-1] == "/":
self.args.server_addr = self.args.server_addr[:-1]
self.args.benchmark_db_entry = self.args.server_addr + "/benchmark/"
self.db = DBDriver(
self.args.benchmark_db,
self.args.app_id,
self.args.token,
self.args.benchmark_table,
self.args.job_queue,
self.args.test,
self.args.benchmark_db_entry,
)
self.url_printer = PrintResultURL(self.args)
self.file_handler = FileHandler(self.args)
self.devices = Devices(self.args.devices_config)
# Hard code scuba table
self.scuba_dataset = "caffe2_benchmarking"
self.info = None
self.temprdir = ""
def run(self):
if self.args.list_devices:
devices = self.db.listDevices(self.args.job_queue)
self._listDevices()
return devices
if self.args.list_job_queues:
self._printJobQueues()
return
if self.args.fetch_status or self.args.fetch_result:
result = self._fetchResult()
return result
if self.args.kill:
self._killJob()
return
if self.args.query_num_devices:
return self._queryNumDevices(self.args.query_num_devices)
assert self.args.benchmark_file, "--benchmark_file (-b) must be specified"
assert self.args.devices, "--devices must be specified"
assert self.args.framework, "--framework must be specified"
assert self.args.platform, "--platform must be specified"
assert self.args.repo_dir, "--repo_dir must be specified"
assert (
(self.args.info is not None)
and (self.args.custom_binary is None)
and (self.args.pre_built_binary is None)
) or (
self.args.info is None
), "--info cannot co-exist with --custom_binary and --pre_built_binary"
list_job_queues = self._listJobQueues()
if not self.args.force_submit:
self._checkDevices(self.args.devices, self.args.hashes)
assert (
self.args.job_queue != "*" and self.args.job_queue in list_job_queues
), "--job_queue must be choosen from " + " ".join(list_job_queues)
self.tempdir = tempfile.mkdtemp(prefix="aibench")
program_filenames = {}
if self.args.info:
self.info = json.loads(self.args.info)
else:
self.info = {"treatment": {"programs": {}}}
if self.args.string_map:
self.info["treatment"]["string_map"] = str(self.args.string_map)
assert ("treatment" in self.info) and ("programs" in self.info["treatment"]), (
'In --info, field treatment must exist. In info["treatment"] '
"program field must exist (may be None)"
)
binary = (
self.info["treatment"]["programs"]["program"]["location"]
if (
"programs" in self.info["treatment"]
and "program" in self.info["treatment"]["programs"]
)
else self.args.custom_binary
if self.args.custom_binary
else self.args.pre_built_binary
)
t = BuildProgram(
self.args, self.file_handler, self.tempdir, program_filenames, binary
)
t.start()
benchmarks = getBenchmarks(self.args.benchmark_file, self.args.framework)
self._updateBenchmarksWithArgs(benchmarks, self.args)
for benchmark in benchmarks:
self._uploadOneBenchmark(benchmark)
if self.args.debug:
for test in benchmark["content"]["tests"]:
test["log_output"] = True
if self.args.env:
env = {}
env_vars = self.args.env.split()
for env_var in env_vars:
k, v = parse_kwarg(env_var)
env[k] = v
for test in benchmark["content"]["tests"]:
cmd_env = {}
cmd_env.update(env)
if "env" in test:
cmd_env.update(test["env"])
test["env"] = cmd_env
t.join()
assert (
"program" in program_filenames
), "program does not exist. Build may be failed."
for fn in program_filenames:
self.info["treatment"]["programs"][fn] = {"location": program_filenames[fn]}
# Pass meta file from build to benchmark
meta = getMeta(self.args, self.args.platform)
if meta:
assert "meta" not in self.info, "info field already has a meta field"
self.info["meta"] = meta
new_devices = self.devices.getFullNames(self.args.devices)
user_identifier = (
int(self.args.user_identifier)
if self.args.user_identifier
else randint(1, 1000000000000000)
)
user = getuser() if not self.args.user_string else self.args.user_string
hashes = self.args.hashes
for benchmark in benchmarks:
data = {
"benchmark": benchmark,
"info": self.info,
}
self.db.submitBenchmarks(data, new_devices, user_identifier, user, hashes)
if self.args.async_submit:
print("Job submitted.")
self._printRunDetailsURL(user_identifier)
return
self.url_printer.printURL(self.scuba_dataset, user_identifier, benchmarks)
if not self.args.debug:
shutil.rmtree(self.tempdir, True)
if self.args.screen_reporter:
self._screenReporter(user_identifier)
self._printRunDetailsURL(user_identifier)
# Clean up
try:
rm_list = glob.glob("/tmp/aibench*")
rm_list.extend(glob.iglob("/tmp/aibench*"))
for f in rm_list:
if os.path.isdir(f):
shutil.rmtree(f, True)
if os.path.isfile(f):
os.remove(f)
except Exception:
pass
def _updateBenchmarksWithArgs(self, benchmarks, args):
for benchmark in benchmarks:
content = benchmark["content"]
tests = []
if "tests" in content:
tests = content["tests"]
for test in tests:
if args.force_profile: # deprecated
if "profiler" not in test:
test["profiler"] = {}
test["profiler"]["enabled"] = True
elif args.profile is not None:
# first check for boolean values
if args.profile == [] or (
len(args.profile) == 1 and args.profile[0].lower() == "true"
):
test["profiler"] = {"enabled": True}
elif len(args.profile) == 1 and args.profile[0].lower() == "false":
test["profiler"] = {"enabled": False}
elif args.profile[0].startswith("{"):
# Specified in json format on the command line for full profiling options
try:
val = " ".join(args.profile)
if '"' not in val:
# ensure text substrings are quoted
val = re.sub(
r"[a-zA-Z0-9_\-]*\:*[a-zA-Z0-9_\-]+",
_requote,
val,
)
test["profiler"] = json.loads(val)
except Exception as e:
raise BenchmarkArgParseException(
f"Invalid --profile arguments: {val}\n{e}"
)
else:
# only a list of "types" can be specified directly
test["profiler"] = {}
test["profiler"]["enabled"] = True
if args.profile != []:
test["profiler"]["types"] = args.profile
def _uploadOneBenchmark(self, benchmark):
filename = benchmark["filename"]
one_benchmark = benchmark["content"]
# TODO refactor the code to collect all files to upload
del_paths = []
if "model" in one_benchmark:
if "files" in one_benchmark["model"]:
for field in one_benchmark["model"]["files"]:
value = one_benchmark["model"]["files"][field]
assert (
"location" in value
), "location field is missing in benchmark " "{}".format(filename)
ref_path = ["files", field]
if self._uploadFile(value, filename, benchmark, ref_path):
del_paths.append(ref_path)
if "libraries" in one_benchmark["model"]:
for value in one_benchmark["model"]["libraries"]:
assert (
"location" in value
), "location field is missing in benchmark " "{}".format(filename)
self._uploadFile(value, filename, benchmark)
for del_path in del_paths:
self._del_from_benchmark(benchmark["content"]["model"], del_path)
# upload test file
assert (
"tests" in one_benchmark
), "tests field is missing in benchmark {}".format(filename)
tests = one_benchmark["tests"]
for test in tests:
if "input_files" in test:
self._uploadTestFiles(test["input_files"], filename)
# ignore the outputs for non accuracy metrics
if "output_files" in test and test["metric"] == "error":
self._uploadTestFiles(test["output_files"], filename)
def _uploadTestFiles(self, files, basefilename):
if isinstance(files, list):
for i in range(len(files)):
f = files[i]
self._uploadFile(f, basefilename)
elif isinstance(files, dict):
for f in files:
value = files[f]
if isinstance(value, list):
for i in range(len(value)):
v = value[i]
self._uploadFile(v, basefilename)
else:
self._uploadFile(value, basefilename)
def _uploadFile(
self, f, basefilename, benchmark=None, ref_path=None, cache_file=True
):
if "location" not in f:
return
location = f["location"]
if "md5" not in f:
raise Exception("No md5sum provided for {}".format(f["filename"]))
md5 = f["md5"]
"""
For the file from repo, there is special handling
we need to fetch both control and treatment
, and also move the file from benchmark to info
Note: Support the file in model first
"""
if location.startswith("//repo"):
assert (
ref_path is not None
), "repo is not yet \
supported for {}".format(
location
)
for side in self.info:
if side == "extra":
continue
value = self.info[side]
commit_hash = "master"
if "commit" in value:
commit_hash = value["commit"] or "master"
tgt_file = self._downloadRepoFile(location, self.tempdir, commit_hash)
with LOCK:
f["location"], f["md5"] = self.file_handler.uploadFile(
tgt_file, md5, basefilename, cache_file
)
# add to info
assert len(ref_path), "ref_path must be a path to target file"
value["programs"][".".join(ref_path)] = {"location": f["location"]}
# remove from benchmark
assert (
benchmark is not None
), "benchmark must be passed into _uploadFile"
return True
else:
with LOCK:
f["location"], f["md5"] = self.file_handler.uploadFile(
location, md5, basefilename, cache_file
)
return False
def _downloadRepoFile(self, location, tgt_dir, commit_hash):
"""
location: //repo/fbsource/fbcode/aibench/...../a.py
"""
raw_scm_query = pkg_resources.resource_string(
"aibench", "benchmarking/bin/scm_query.par"
)
query_exe = os.path.join(tgt_dir, "scm_query.par")
with open(query_exe, "wb") as f:
f.write(raw_scm_query)
cmd = ["chmod", "+x", os.path.join(tgt_dir, "scm_query.par")]
subprocess.check_output(cmd)
dirs = location[2:].split("/")
tgt_file = os.path.join(tgt_dir, dirs[-1])
cmd = [
query_exe,
"--repo",
dirs[1],
"--file_path",
"/".join(dirs[2:]),
"--target_file",
tgt_file,
"--commit_hash",
commit_hash,
]
getLogger().info("Downloading {}".format(location))
subprocess.check_output(cmd)
os.remove(query_exe)
return tgt_file
def _del_from_benchmark(self, benchmark, ref_path):
tgt = benchmark
for item in ref_path[:-1]:
tgt = tgt[item]
tgt.pop(ref_path[-1])
def _listDevices(self, flag=True):
devices = self.db.listDevices(self.args.job_queue)
headers = ["Device", "Status", "Abbrs", "Hash"]
rows = []
for device in devices:
abbrs = self.devices.getAbbrs(device["device"])
abbrs = ",".join(abbrs) if abbrs else ""
hash = device["hash"]
row = [device["device"], device["status"], abbrs, hash]
rows.append(row)
rows.sort()
if flag:
table = tabulate(rows, headers=headers, tablefmt="orgtbl")
print("\n{}\n".format(table))
return rows
def _checkDevices(self, specified_devices, hashes=None):
rows = self._listDevices(flag=False)
specifiedDevices = set(specified_devices.split(","))
specifiedHashes = None
if hashes:
hashes = hashes.split(",")
devices = specified_devices.split(",")
if len(hashes) != len(devices):
raise Exception("You need to provide same number of hashes and devices")
specifiedHashes = {}
for i, hash in enumerate(hashes):
specifiedHashes[hash] = devices[i]
devices = {}
devicesIn = True
for row in rows:
abbrs = row[-2].split(",") if row[-2] else []
if row[-1] not in devices:
devices[row[-1]] = {row[0]}.union(set(abbrs))
else:
devices[row[-1]].union({row[0]}.union(set(abbrs)))
if specifiedHashes:
for specifiedHash in specifiedHashes:
if (
specifiedHash not in devices
or specifiedHashes[specifiedHash] not in devices[specifiedHash]
):
devicesIn = False
else:
allDevices = set()
for v in devices.values():
allDevices = allDevices.union(v)
devicesIn = not specifiedDevices.difference(allDevices)
if not devicesIn:
errMessages = " ".join(
[
"Devices",
specified_devices,
"is not available in the job_queue",
self.args.job_queue,
]
)
if hashes:
errMessages = " ".join(
[
"Devices",
specified_devices,
"with hashes",
",".join(hashes),
"is not available in the job_queue",
self.args.job_queue,
]
)
raise Exception(errMessages)
def _queryNumDevices(self, device_name):
deviceCounter = defaultdict(int)
for device in self.db.listDevices(self.args.job_queue):
abbrs = self.devices.getAbbrs(device["device"])
if device["device"] == device_name or device_name in (abbrs or []):
deviceCounter[device["status"]] += 1
return deviceCounter
def _listJobQueues(self):
devices = self.db.listDevices(job_queue="*")
list_job_queues = sorted({device["job_queue"] for device in devices})
return list_job_queues
def _printJobQueues(self):
list_job_queues = self._listJobQueues()
for jobQueue in list_job_queues:
print(jobQueue)
def _printRunDetailsURL(self, user_identifier):
if self.args.urlPrefix:
print(
"You can find more info via {}{}".format(
self.args.urlPrefix, user_identifier
)
)
def _screenReporter(self, user_identifier):
reporter = ScreenReporter(
self.db, self.devices, self.args.debug, self.args.log_output_dir
)
reporter.run(user_identifier)
def _fetchResult(self):
user_identifier = self.args.user_identifier
assert user_identifier, (
"User identifier must be specified for "
"fetching the status and/or result of the previously run benchmarks"
)
statuses = self.db.statusBenchmarks(user_identifier)
result = None
if self.args.fetch_status:
result = json.dumps(statuses)
elif self.args.fetch_result:
ids = ",".join([str(status["id"]) for status in statuses])
output = self.db.getBenchmarks(ids)
self._mobilelabResult(output)
result = json.dumps(output)
return result
def _killJob(self):
user_identifier = self.args.user_identifier
assert user_identifier, (
"User identifier must be specified for " "killing submitted jobs."
)
statuses = self.db.statusBenchmarks(user_identifier)
result = json.dumps(statuses)
status = json.loads(result)[-1]["status"]
if status in ["RUNNING", "QUEUE"]:
self.db.killBenchmarks(user_identifier)
getLogger().info("The job has been killed")
else:
getLogger().info(
"The job cannot be killed since its status is {}".format(status)
)
def _mobilelabResult(self, output):
# always get the last result
for item in output:
raw_result = item["result"]
if raw_result is None:
continue
result = json.loads(raw_result)
mobilelab_result = {"treatment": {}, "control": {}}
for k in result:
# k is identifier
v = result[k]
for kk in v:
vv = v[kk]
# update values if only summary exists
if "values" not in vv or len(vv["values"]) == 0:
if "summary" in vv:
if "mean" in vv["summary"]:
vv["values"] = [vv["summary"]["mean"]]
elif "p50" in vv["summary"]:
vv["values"] = [vv["summary"]["p50"]]
if "control_summary" in vv:
if "mean" in vv["control_summary"]:
vv["control_values"] = [vv["control_summary"]["mean"]]
elif "p50" in vv["control_summary"]:
vv["control_values"] = [vv["control_summary"]["p50"]]
# check values again
if "values" not in vv or len(vv["values"]) == 0:
continue
assert vv["type"], "type is missing in {}".format(kk)
assert vv["metric"], "metric is missing in {}".format(kk)
if vv["metric"] == "flops":
continue
unit = vv["unit"] if "unit" in vv else "null"
self._mobilelabAddField(
mobilelab_result["treatment"],
k,
vv["type"],
vv["metric"],
vv["values"],
unit,
)
if "control_values" in vv:
self._mobilelabAddField(
mobilelab_result["control"],
k,
vv["type"],
vv["metric"],
vv["control_values"],
unit,
)
item["mobilelab_result"] = mobilelab_result
def _mobilelabAddField(self, output, identifier, type, metric, values, unit):
key = "{}__{}__{}".format(identifier, type, metric)
key = re.sub(r"\W+", "_", key)
assert key not in output, "duplicate key {}".format(key)
output[key] = {
"values": values,
"metric": metric,
"type": type,
"unit": unit,
}
def _updateArgs(self, args):
# Remove later when adhoc is moved to seperated infrastructure
if args.adhoc is not None:
adhoc_file, success = unpackAdhocFile(args.adhoc)
if success:
args.benchmark_file = adhoc_file
else:
getLogger().error(
"Could not find specified adhoc config: {}".format(args.adhoc)
)
if __name__ == "__main__":
raw_args = None
app = RunRemote(raw_args=raw_args)
app.run()
|
50442c56d4e034a8779bea85a38ded5befb04c01
|
691b82da7c38a6c109088d8426ffa26175f4ff31
|
/apps/comment/__init__.py
|
cbb4310db14393564be9c625d1ed5268dda44d76
|
[
"MIT"
] |
permissive
|
Hopetree/izone
|
4072957eb12c7703a235c6df87ab49c7a7fdd84e
|
46f90dbaa3968cb3261d60a74765fa462415f000
|
refs/heads/master
| 2023-08-19T00:21:48.632159
| 2023-08-09T07:36:20
| 2023-08-09T07:36:20
| 115,265,222
| 1,167
| 460
|
MIT
| 2023-06-25T07:27:10
| 2017-12-24T13:39:43
|
Python
|
UTF-8
|
Python
| false
| false
| 49
|
py
|
__init__.py
|
default_app_config = 'comment.apps.CommentConfig'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.