hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71ad0247ebf714503f6f8492d0d47f17da35091 | 623 | py | Python | course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py | sepam/machine-learning-engineering-for-production-public | cd6053459eee9b7f30bf86da63104b3f1381383a | [
"Apache-2.0"
] | null | null | null | course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py | sepam/machine-learning-engineering-for-production-public | cd6053459eee9b7f30bf86da63104b3f1381383a | [
"Apache-2.0"
] | null | null | null | course4/week3-ungraded-labs/C4_W3_Lab_4_Github_Actions/app/main.py | sepam/machine-learning-engineering-for-production-public | cd6053459eee9b7f30bf86da63104b3f1381383a | [
"Apache-2.0"
] | null | null | null | import pickle
import numpy as np
from typing import List
from fastapi import FastAPI
from pydantic import BaseModel, conlist
app = FastAPI(title="Predicting Wine Class with batching")
# Open classifier in global scope
with open("models/wine-95-fixed.pkl", "rb") as file:
clf = pickle.load(file)
class Wine(BaseModel):
batches: List[conlist(item_type=float, min_items=13, max_items=13)]
# make predictions on this endpoint
@app.post("/predict")
def predict(wine: Wine):
batches = wine.batches
np_batches = np.array(batches)
pred = clf.predict(np_batches).tolist()
return {"Prediction": pred}
| 23.074074 | 71 | 0.731942 | import pickle
import numpy as np
from typing import List
from fastapi import FastAPI
from pydantic import BaseModel, conlist
app = FastAPI(title="Predicting Wine Class with batching")
with open("models/wine-95-fixed.pkl", "rb") as file:
clf = pickle.load(file)
class Wine(BaseModel):
batches: List[conlist(item_type=float, min_items=13, max_items=13)]
@app.post("/predict")
def predict(wine: Wine):
batches = wine.batches
np_batches = np.array(batches)
pred = clf.predict(np_batches).tolist()
return {"Prediction": pred}
| true | true |
f71ad074c1c6f98c66a9513ea45897d958bd392f | 1,461 | py | Python | python/config_default_48k.py | entn-at/GlottDNN | b7db669d7f34da92ab34742d75a8ba3c70763a65 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | python/config_default_48k.py | entn-at/GlottDNN | b7db669d7f34da92ab34742d75a8ba3c70763a65 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | python/config_default_48k.py | entn-at/GlottDNN | b7db669d7f34da92ab34742d75a8ba3c70763a65 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # run flags
make_dirs = True
make_scp = True
do_sptk_pitch_analysis = False
do_reaper_pitch_analysis = False
do_glott_vocoder_analysis = False
make_dnn_train_data = False
make_dnn_infofile = False
do_dnn_training = False
do_glott_vocoder_synthesis = True
# directories
prjdir = '/l/CODE/GlottDNN' # add your own local install dir here
datadir = prjdir + '/data/ansyn_jenny'
# general parameters
sampling_frequency = 48000
warping_lambda = 0.54
use_external_gci = False
# programs
reaper = 'reaper'
sox = 'sox'
pitch = '/u/76/mairaksi/unix/Documents/SPTK-3.8/bin/pitch/pitch -a 0 -s 48.0 -o 1 -p 240 -T 0.0 -L 50 -H 500 '
x2x = '/u/76/mairaksi/unix/Documents/SPTK-3.8/bin/x2x/x2x'
Analysis = prjdir + '/src/Analysis'
Synthesis = prjdir + '/src/Synthesis'
config_default = prjdir + '/config/config_default_48k.cfg'
# nn input params
inputs = ['f0', 'gain', 'hnr', 'slsf', 'lsf']
input_exts = ['.f0', '.gain', '.hnr', '.slsf','.lsf']
input_dims = [1, 1, 25, 10, 50] # set feature to zero if not used
outputs = ['pls']
output_exts = ['.pls']
output_dims = [1200]
# dnn data conf
dnn_name = 'nancy48_legacy_same'
train_data_dir = prjdir + '/nndata/traindata/' + dnn_name
weights_data_dir = prjdir + '/nndata/weights/' + dnn_name
data_buffer_size = 1000
remove_unvoiced_frames = True
#train_set = [1, 2 , 3, 4, 5]
train_set = [1]
val_set = [6]
test_set = [7]
# dnn train conf
n_hidden = [250, 250, 250]
learning_rate = 0.1
batch_size = 100
max_epochs = 20000
| 26.563636 | 110 | 0.713895 |
make_dirs = True
make_scp = True
do_sptk_pitch_analysis = False
do_reaper_pitch_analysis = False
do_glott_vocoder_analysis = False
make_dnn_train_data = False
make_dnn_infofile = False
do_dnn_training = False
do_glott_vocoder_synthesis = True
prjdir = '/l/CODE/GlottDNN'
datadir = prjdir + '/data/ansyn_jenny'
sampling_frequency = 48000
warping_lambda = 0.54
use_external_gci = False
reaper = 'reaper'
sox = 'sox'
pitch = '/u/76/mairaksi/unix/Documents/SPTK-3.8/bin/pitch/pitch -a 0 -s 48.0 -o 1 -p 240 -T 0.0 -L 50 -H 500 '
x2x = '/u/76/mairaksi/unix/Documents/SPTK-3.8/bin/x2x/x2x'
Analysis = prjdir + '/src/Analysis'
Synthesis = prjdir + '/src/Synthesis'
config_default = prjdir + '/config/config_default_48k.cfg'
inputs = ['f0', 'gain', 'hnr', 'slsf', 'lsf']
input_exts = ['.f0', '.gain', '.hnr', '.slsf','.lsf']
input_dims = [1, 1, 25, 10, 50]
outputs = ['pls']
output_exts = ['.pls']
output_dims = [1200]
dnn_name = 'nancy48_legacy_same'
train_data_dir = prjdir + '/nndata/traindata/' + dnn_name
weights_data_dir = prjdir + '/nndata/weights/' + dnn_name
data_buffer_size = 1000
remove_unvoiced_frames = True
train_set = [1]
val_set = [6]
test_set = [7]
n_hidden = [250, 250, 250]
learning_rate = 0.1
batch_size = 100
max_epochs = 20000
| true | true |
f71ad0e03a1f64c0b8808cee586a271e9c91b997 | 950 | py | Python | clif/testing/python/non_raising_test.py | wangxf123456/clif | 9bff8a28f5d266d6ea4f4bb0dc1d9c9a0c9ee5b1 | [
"Apache-2.0"
] | 966 | 2017-04-18T04:14:04.000Z | 2022-03-03T21:22:44.000Z | clif/testing/python/non_raising_test.py | wangxf123456/clif | 9bff8a28f5d266d6ea4f4bb0dc1d9c9a0c9ee5b1 | [
"Apache-2.0"
] | 48 | 2017-05-02T23:51:29.000Z | 2021-12-06T19:10:11.000Z | clif/testing/python/non_raising_test.py | wangxf123456/clif | 9bff8a28f5d266d6ea4f4bb0dc1d9c9a0c9ee5b1 | [
"Apache-2.0"
] | 135 | 2017-04-26T06:15:30.000Z | 2022-01-07T02:17:20.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from clif.testing.python import non_raising
class NonRaisingTest(absltest.TestCase):
def testPlain(self):
num = non_raising.MakeTestNonRaisingPlain()
self.assertEqual(num, 1)
def testMarked(self):
num = non_raising.MakeTestNonRaisingMarked()
self.assertEqual(num, -1)
if __name__ == '__main__':
absltest.main()
| 28.787879 | 74 | 0.753684 |
from absl.testing import absltest
from clif.testing.python import non_raising
class NonRaisingTest(absltest.TestCase):
def testPlain(self):
num = non_raising.MakeTestNonRaisingPlain()
self.assertEqual(num, 1)
def testMarked(self):
num = non_raising.MakeTestNonRaisingMarked()
self.assertEqual(num, -1)
if __name__ == '__main__':
absltest.main()
| true | true |
f71ad1078d00053f655a55288594e5cc3c29cabe | 6,120 | py | Python | custom_components/pandora_cas/binary_sensor.py | alryaz/hass-pandora-cas | 684e9d5e95c89fcf5ac9ede75df3e18c05c03452 | [
"MIT"
] | 15 | 2020-09-15T01:05:05.000Z | 2022-03-15T11:18:07.000Z | custom_components/pandora_cas/binary_sensor.py | alryaz/hass-pandora-cas | 684e9d5e95c89fcf5ac9ede75df3e18c05c03452 | [
"MIT"
] | 12 | 2020-09-13T13:22:27.000Z | 2022-03-15T12:03:45.000Z | custom_components/pandora_cas/binary_sensor.py | alryaz/hass-pandora-cas | 684e9d5e95c89fcf5ac9ede75df3e18c05c03452 | [
"MIT"
] | 2 | 2021-06-03T16:32:22.000Z | 2021-10-01T20:53:50.000Z | """Binary sensor platform for Pandora Car Alarm System."""
__all__ = ["ENTITY_TYPES", "async_setup_entry"]
import logging
from functools import partial
from typing import Any, Dict
import attr
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_MOTION,
DOMAIN as PLATFORM_DOMAIN,
BinarySensorEntity,
ENTITY_ID_FORMAT,
)
from homeassistant.const import ATTR_NAME, ATTR_ICON, ATTR_DEVICE_CLASS
from . import PandoraCASBooleanEntity, async_platform_setup_entry
from .api import BitStatus
from .const import *
_LOGGER = logging.getLogger(__name__)
_car_door_icons = ("mdi:car-door-lock", "mdi:car-door")
_car_glass_icons = ("mdi:car-windshield", "mdi:car-windshield-outline")
ENTITY_TYPES = {
"connection_state": {
ATTR_NAME: "Connection state",
ATTR_DEVICE_CLASS: DEVICE_CLASS_CONNECTIVITY,
ATTR_ATTRIBUTE: "is_online",
ATTR_ATTRIBUTE_SOURCE: True,
},
"moving": {
ATTR_NAME: "Moving",
ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION,
ATTR_STATE_SENSITIVE: True,
ATTR_ATTRIBUTE: "is_moving",
},
# Status-related sensors
"left_front_door": {
ATTR_NAME: "Left Front Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_FRONT_LEFT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"right_front_door": {
ATTR_NAME: "Right Front Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_FRONT_RIGHT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"left_back_door": {
ATTR_NAME: "Left Back Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_BACK_LEFT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"right_back_door": {
ATTR_NAME: "Right Back Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_BACK_RIGHT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"left_front_glass": {
ATTR_NAME: "Left Front Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_front_left",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"right_front_glass": {
ATTR_NAME: "Right Front Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_front_right",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"left_back_glass": {
ATTR_NAME: "Left Back Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_back_left",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"right_back_glass": {
ATTR_NAME: "Right Back Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_back_right",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"trunk": {
ATTR_NAME: "Trunk",
ATTR_ICON: "mdi:car-back",
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.TRUNK_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"hood": {
ATTR_NAME: "Hood",
ATTR_ICON: "mdi:car",
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.HOOD_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"parking": {
ATTR_NAME: "Parking Mode",
ATTR_ICON: "mdi:car-brake-parking",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.HANDBRAKE_ENGAGED,
ATTR_STATE_SENSITIVE: True,
},
"brakes": {
ATTR_NAME: "Brakes",
ATTR_ICON: "mdi:car-brake-hold",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.BRAKES_ENGAGED,
ATTR_STATE_SENSITIVE: True,
},
"ignition": {
ATTR_NAME: "Ignition",
ATTR_ICON: "mdi:key-variant",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.IGNITION,
},
"exterior_lights": {
ATTR_NAME: "Exterior Lights",
ATTR_ICON: "mdi:car-light-high",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.EXTERIOR_LIGHTS_ACTIVE,
},
"ev_charging_connected": {
ATTR_NAME: "EV Charging Connected",
ATTR_ICON: "mdi:ev-station",
ATTR_ATTRIBUTE: "ev_charging_connected",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
}
class PandoraCASBinarySensor(PandoraCASBooleanEntity, BinarySensorEntity):
ENTITY_TYPES = ENTITY_TYPES
ENTITY_ID_FORMAT = ENTITY_ID_FORMAT
@property
def is_on(self) -> bool:
"""Return current state of"""
return bool(self._state)
@property
def device_state_attributes(self) -> Dict[str, Any]:
existing_attributes = super().device_state_attributes
entity_type = self._entity_type
if entity_type == "connection_state":
state = self._device.state
if state is not None:
existing_attributes.update(attr.asdict(state, True))
elif entity_type == "ev_charging_connected":
if not self._device.is_online:
return existing_attributes
state = self._device.state
existing_attributes["slow_charging"] = state.ev_charging_slow
existing_attributes["fast_charging"] = state.ev_charging_fast
existing_attributes["ready_status"] = state.ev_status_ready
return existing_attributes
async_setup_entry = partial(
async_platform_setup_entry, PLATFORM_DOMAIN, PandoraCASBinarySensor, logger=_LOGGER
)
| 31.709845 | 87 | 0.662745 | __all__ = ["ENTITY_TYPES", "async_setup_entry"]
import logging
from functools import partial
from typing import Any, Dict
import attr
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_MOTION,
DOMAIN as PLATFORM_DOMAIN,
BinarySensorEntity,
ENTITY_ID_FORMAT,
)
from homeassistant.const import ATTR_NAME, ATTR_ICON, ATTR_DEVICE_CLASS
from . import PandoraCASBooleanEntity, async_platform_setup_entry
from .api import BitStatus
from .const import *
_LOGGER = logging.getLogger(__name__)
_car_door_icons = ("mdi:car-door-lock", "mdi:car-door")
_car_glass_icons = ("mdi:car-windshield", "mdi:car-windshield-outline")
ENTITY_TYPES = {
"connection_state": {
ATTR_NAME: "Connection state",
ATTR_DEVICE_CLASS: DEVICE_CLASS_CONNECTIVITY,
ATTR_ATTRIBUTE: "is_online",
ATTR_ATTRIBUTE_SOURCE: True,
},
"moving": {
ATTR_NAME: "Moving",
ATTR_DEVICE_CLASS: DEVICE_CLASS_MOTION,
ATTR_STATE_SENSITIVE: True,
ATTR_ATTRIBUTE: "is_moving",
},
"left_front_door": {
ATTR_NAME: "Left Front Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_FRONT_LEFT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"right_front_door": {
ATTR_NAME: "Right Front Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_FRONT_RIGHT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"left_back_door": {
ATTR_NAME: "Left Back Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_BACK_LEFT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"right_back_door": {
ATTR_NAME: "Right Back Door",
ATTR_ICON: _car_door_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.DOOR_BACK_RIGHT_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"left_front_glass": {
ATTR_NAME: "Left Front Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_front_left",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"right_front_glass": {
ATTR_NAME: "Right Front Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_front_right",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"left_back_glass": {
ATTR_NAME: "Left Back Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_back_left",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"right_back_glass": {
ATTR_NAME: "Right Back Glass",
ATTR_ICON: _car_glass_icons,
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "can_glass_back_right",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
"trunk": {
ATTR_NAME: "Trunk",
ATTR_ICON: "mdi:car-back",
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.TRUNK_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"hood": {
ATTR_NAME: "Hood",
ATTR_ICON: "mdi:car",
ATTR_DEVICE_CLASS: DEVICE_CLASS_DOOR,
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.HOOD_OPEN,
ATTR_STATE_SENSITIVE: True,
},
"parking": {
ATTR_NAME: "Parking Mode",
ATTR_ICON: "mdi:car-brake-parking",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.HANDBRAKE_ENGAGED,
ATTR_STATE_SENSITIVE: True,
},
"brakes": {
ATTR_NAME: "Brakes",
ATTR_ICON: "mdi:car-brake-hold",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.BRAKES_ENGAGED,
ATTR_STATE_SENSITIVE: True,
},
"ignition": {
ATTR_NAME: "Ignition",
ATTR_ICON: "mdi:key-variant",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.IGNITION,
},
"exterior_lights": {
ATTR_NAME: "Exterior Lights",
ATTR_ICON: "mdi:car-light-high",
ATTR_ATTRIBUTE: "bit_state",
ATTR_FLAG: BitStatus.EXTERIOR_LIGHTS_ACTIVE,
},
"ev_charging_connected": {
ATTR_NAME: "EV Charging Connected",
ATTR_ICON: "mdi:ev-station",
ATTR_ATTRIBUTE: "ev_charging_connected",
ATTR_STATE_SENSITIVE: True,
ATTR_DISABLED_BY_DEFAULT: True,
},
}
class PandoraCASBinarySensor(PandoraCASBooleanEntity, BinarySensorEntity):
ENTITY_TYPES = ENTITY_TYPES
ENTITY_ID_FORMAT = ENTITY_ID_FORMAT
@property
def is_on(self) -> bool:
return bool(self._state)
@property
def device_state_attributes(self) -> Dict[str, Any]:
existing_attributes = super().device_state_attributes
entity_type = self._entity_type
if entity_type == "connection_state":
state = self._device.state
if state is not None:
existing_attributes.update(attr.asdict(state, True))
elif entity_type == "ev_charging_connected":
if not self._device.is_online:
return existing_attributes
state = self._device.state
existing_attributes["slow_charging"] = state.ev_charging_slow
existing_attributes["fast_charging"] = state.ev_charging_fast
existing_attributes["ready_status"] = state.ev_status_ready
return existing_attributes
async_setup_entry = partial(
async_platform_setup_entry, PLATFORM_DOMAIN, PandoraCASBinarySensor, logger=_LOGGER
)
| true | true |
f71ad2d57334e5b52814c0e8d8c40e1550a4d52b | 1,141 | py | Python | assignment_seven_2021_09_14/cycle_sort.py | Soyvolon/CISS_380 | 81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08 | [
"Unlicense"
] | null | null | null | assignment_seven_2021_09_14/cycle_sort.py | Soyvolon/CISS_380 | 81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08 | [
"Unlicense"
] | null | null | null | assignment_seven_2021_09_14/cycle_sort.py | Soyvolon/CISS_380 | 81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08 | [
"Unlicense"
] | null | null | null | def cycle_sort(data: list):
cap = len(data)
for start in range(0, cap - 1):
# get item
item = data[start]
# get new pos for said item
pos = start
for i in range(start + 1, cap):
if data[i] < item:
pos += 1
# if there isnt a new pos, skip this, we don't move it
if start == pos:
continue
# skip past any any duplicates
while data[pos] == item:
pos += 1
# set the item to the pos
# and get the next item to move
data[pos], item = item, data[pos]
# take the new item and move
# it backwards until
# it is in its correct spot
while pos != start:
pos = start
# get the new pos value
for i in range(start + 1, cap):
if data[i] < item:
pos += 1
# skip duplicates
while data[pos] == item:
pos += 1
# and place item at data[pos] and prep
# data[pos] to be moved to the next spot
data[pos], item = item, data[pos]
| 31.694444 | 62 | 0.473269 | def cycle_sort(data: list):
cap = len(data)
for start in range(0, cap - 1):
item = data[start]
pos = start
for i in range(start + 1, cap):
if data[i] < item:
pos += 1
if start == pos:
continue
# skip past any any duplicates
while data[pos] == item:
pos += 1
# set the item to the pos
# and get the next item to move
data[pos], item = item, data[pos]
# take the new item and move
# it backwards until
# it is in its correct spot
while pos != start:
pos = start
# get the new pos value
for i in range(start + 1, cap):
if data[i] < item:
pos += 1
# skip duplicates
while data[pos] == item:
pos += 1
# and place item at data[pos] and prep
# data[pos] to be moved to the next spot
data[pos], item = item, data[pos]
| true | true |
f71ad48e5cbbd6870f5adadbf55d52267e89621a | 5,044 | py | Python | options.py | Kunalmighty/BlueJaysGame | 76936cea8a57542bf7ea3d546f851f9f9771931f | [
"MIT"
] | null | null | null | options.py | Kunalmighty/BlueJaysGame | 76936cea8a57542bf7ea3d546f851f9f9771931f | [
"MIT"
] | null | null | null | options.py | Kunalmighty/BlueJaysGame | 76936cea8a57542bf7ea3d546f851f9f9771931f | [
"MIT"
] | null | null | null | """ #EmbraceTheS's options menu state. """
import state
import menu
import globes
import pygame
import joystick
import volume
class Options(state.State):
""" Option menu state with the options to clear high scores, and
adjust brightness/volume (not yet implemented) """
TEXT = []
BACKGROUND = None
LEFT_MARGIN = None
HEIGHTS = None
def __init__(self, sound=False, option=0):
state.State.__init__(self)
if not sound:
globes.play_music("title.ogg")
if (Options.BACKGROUND is None):
Options.BACKGROUND = pygame.image.load("bg/titlescreen.png")\
.convert()
self.option = option
self.blink = 0 # cycle through 0-9, display if < 7
self.confirmation = False # if asking for action confirmation
self.confirmed = 0 # 0: no, 1: yes
if (len(Options.TEXT) == 0):
Options.TEXT = [globes.Globals.FONT.render("Clear High Scores",
True, globes.BLACK),
globes.Globals.FONT.render("Setup Joystick",
True, globes.BLACK),
globes.Globals.FONT.render("Volume & Brightness",
True, globes.BLACK),
globes.Globals.FONT.render("Return to Menu",
True, globes.BLACK)]
if Options.LEFT_MARGIN is None:
Options.LEFT_MARGIN = 2 * globes.Globals.WIDTH / 3
if Options.HEIGHTS is None:
Options.HEIGHTS = [
(globes.Globals.HEIGHT / 2 - globes.Globals.HEIGHT / 8),
globes.Globals.HEIGHT / 2 - globes.Globals.HEIGHT / 16,
globes.Globals.HEIGHT / 2,
globes.Globals.HEIGHT / 2 + globes.Globals.HEIGHT / 16,
(globes.Globals.HEIGHT / 2 + globes.Globals.HEIGHT / 8)
]
def render(self):
globes.Globals.SCREEN.blit(Options.BACKGROUND, (0, 0))
if not self.confirmation:
for i in range(4):
if ((not (self.option == i)) or self.blink < 7):
globes.Globals.SCREEN.blit(Options.TEXT[i],
(Options.LEFT_MARGIN,
Options.HEIGHTS[i]))
else:
surf = globes.Globals.FONT.render("Are you absolutely certain " +
"you want to erase", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (270, 70))
surf = globes.Globals.FONT.render("your legendary legacy?", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (370, 95))
if self.blink < 7 or not self.confirmed == 1:
surf = globes.Globals.FONT.render("Yes", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (430, 130))
if self.blink < 7 or not self.confirmed == 0:
surf = globes.Globals.FONT.render("No", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (530, 130))
def update(self, time):
self.blink = (self.blink + 1) % 10
def event(self, event):
if self.confirmation:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.confirmed = (self.confirmed - 1) % 2
elif event.key == pygame.K_RIGHT:
self.confirmed = (self.confirmed + 1) % 2
if event.key == pygame.K_SPACE or event.key == pygame.K_RETURN:
if self.confirmed:
globes.Globals.HIGHSCORES.clear_file()
self.confirmation = False
else:
self.confirmation = False
else:
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
globes.Globals.STATE = menu.Menu(True)
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
self.option = (self.option - 1) % 4
if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
self.option = (self.option + 1) % 4
if event.type == pygame.KEYDOWN and \
(event.key == pygame.K_SPACE or
event.key == pygame.K_RETURN):
if self.option == 0:
self.confirmation = True
elif self.option == 1:
globes.Globals.STATE = joystick.Joystick()
elif self.option == 2:
globes.Globals.STATE = volume.Volume(True)
if self.option == 3:
globes.Globals.STATE = menu.Menu(True)
| 45.035714 | 79 | 0.490682 |
import state
import menu
import globes
import pygame
import joystick
import volume
class Options(state.State):
TEXT = []
BACKGROUND = None
LEFT_MARGIN = None
HEIGHTS = None
def __init__(self, sound=False, option=0):
state.State.__init__(self)
if not sound:
globes.play_music("title.ogg")
if (Options.BACKGROUND is None):
Options.BACKGROUND = pygame.image.load("bg/titlescreen.png")\
.convert()
self.option = option
self.blink = 0
self.confirmation = False
self.confirmed = 0
if (len(Options.TEXT) == 0):
Options.TEXT = [globes.Globals.FONT.render("Clear High Scores",
True, globes.BLACK),
globes.Globals.FONT.render("Setup Joystick",
True, globes.BLACK),
globes.Globals.FONT.render("Volume & Brightness",
True, globes.BLACK),
globes.Globals.FONT.render("Return to Menu",
True, globes.BLACK)]
if Options.LEFT_MARGIN is None:
Options.LEFT_MARGIN = 2 * globes.Globals.WIDTH / 3
if Options.HEIGHTS is None:
Options.HEIGHTS = [
(globes.Globals.HEIGHT / 2 - globes.Globals.HEIGHT / 8),
globes.Globals.HEIGHT / 2 - globes.Globals.HEIGHT / 16,
globes.Globals.HEIGHT / 2,
globes.Globals.HEIGHT / 2 + globes.Globals.HEIGHT / 16,
(globes.Globals.HEIGHT / 2 + globes.Globals.HEIGHT / 8)
]
def render(self):
globes.Globals.SCREEN.blit(Options.BACKGROUND, (0, 0))
if not self.confirmation:
for i in range(4):
if ((not (self.option == i)) or self.blink < 7):
globes.Globals.SCREEN.blit(Options.TEXT[i],
(Options.LEFT_MARGIN,
Options.HEIGHTS[i]))
else:
surf = globes.Globals.FONT.render("Are you absolutely certain " +
"you want to erase", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (270, 70))
surf = globes.Globals.FONT.render("your legendary legacy?", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (370, 95))
if self.blink < 7 or not self.confirmed == 1:
surf = globes.Globals.FONT.render("Yes", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (430, 130))
if self.blink < 7 or not self.confirmed == 0:
surf = globes.Globals.FONT.render("No", True,
globes.BLACK)
globes.Globals.SCREEN.blit(surf, (530, 130))
def update(self, time):
self.blink = (self.blink + 1) % 10
def event(self, event):
if self.confirmation:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.confirmed = (self.confirmed - 1) % 2
elif event.key == pygame.K_RIGHT:
self.confirmed = (self.confirmed + 1) % 2
if event.key == pygame.K_SPACE or event.key == pygame.K_RETURN:
if self.confirmed:
globes.Globals.HIGHSCORES.clear_file()
self.confirmation = False
else:
self.confirmation = False
else:
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
globes.Globals.STATE = menu.Menu(True)
if event.type == pygame.KEYDOWN and event.key == pygame.K_UP:
self.option = (self.option - 1) % 4
if event.type == pygame.KEYDOWN and event.key == pygame.K_DOWN:
self.option = (self.option + 1) % 4
if event.type == pygame.KEYDOWN and \
(event.key == pygame.K_SPACE or
event.key == pygame.K_RETURN):
if self.option == 0:
self.confirmation = True
elif self.option == 1:
globes.Globals.STATE = joystick.Joystick()
elif self.option == 2:
globes.Globals.STATE = volume.Volume(True)
if self.option == 3:
globes.Globals.STATE = menu.Menu(True)
| true | true |
f71ad5d930f9670c92bb44ed061e73d50e006900 | 1,532 | py | Python | tests/functional/test_lock.py | box/etcdb | 0f27846a0ca13efff9750b97a38939f66172debc | [
"Apache-2.0"
] | 12 | 2016-10-25T18:03:49.000Z | 2019-06-27T13:20:22.000Z | tests/functional/test_lock.py | box/etcdb | 0f27846a0ca13efff9750b97a38939f66172debc | [
"Apache-2.0"
] | 30 | 2016-10-20T23:27:09.000Z | 2018-12-06T17:23:59.000Z | tests/functional/test_lock.py | box/etcdb | 0f27846a0ca13efff9750b97a38939f66172debc | [
"Apache-2.0"
] | 4 | 2016-10-20T23:24:48.000Z | 2022-03-01T09:59:29.000Z | import pytest
from etcdb import OperationalError
from etcdb.lock import Lock, ReadLock, WriteLock
def test_readers(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = ReadLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(ttl=0)
readers = lock.readers()
lock.release()
assert len(readers) == 1
readers = lock.readers()
assert len(readers) == 0
lock.acquire(ttl=0)
l2 = ReadLock(etcdb_connection.client, 'foo', 'bar')
l2.acquire(ttl=0)
readers = lock.readers()
assert len(readers) == 2
def test_writers(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = WriteLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(ttl=0)
writers = lock.writers()
assert len(writers) == 1
lock.release()
writers = lock.writers()
assert len(writers) == 0
lock.acquire(ttl=0)
l2 = WriteLock(etcdb_connection.client, 'foo', 'bar')
with pytest.raises(OperationalError):
l2.acquire()
def test_attributes(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = WriteLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(author='author foo', reason='reason foo')
assert lock.author == 'author foo'
assert lock.reason == 'reason foo'
assert type(lock.created_at) == int
assert lock.created_at > 0
| 30.039216 | 64 | 0.679504 | import pytest
from etcdb import OperationalError
from etcdb.lock import Lock, ReadLock, WriteLock
def test_readers(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = ReadLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(ttl=0)
readers = lock.readers()
lock.release()
assert len(readers) == 1
readers = lock.readers()
assert len(readers) == 0
lock.acquire(ttl=0)
l2 = ReadLock(etcdb_connection.client, 'foo', 'bar')
l2.acquire(ttl=0)
readers = lock.readers()
assert len(readers) == 2
def test_writers(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = WriteLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(ttl=0)
writers = lock.writers()
assert len(writers) == 1
lock.release()
writers = lock.writers()
assert len(writers) == 0
lock.acquire(ttl=0)
l2 = WriteLock(etcdb_connection.client, 'foo', 'bar')
with pytest.raises(OperationalError):
l2.acquire()
def test_attributes(etcdb_connection):
cur = etcdb_connection.cursor()
cur.execute('CREATE TABLE bar(id int not null PRIMARY KEY)')
lock = WriteLock(etcdb_connection.client, 'foo', 'bar')
lock.acquire(author='author foo', reason='reason foo')
assert lock.author == 'author foo'
assert lock.reason == 'reason foo'
assert type(lock.created_at) == int
assert lock.created_at > 0
| true | true |
f71ad6efb1b2752e5513922cac67d1c668226597 | 6,403 | py | Python | django_mako_plus/uid.py | knowsuchagency/django-mako-plus | e737be6a2db6e9e897cc804c660494415c4ea180 | [
"Apache-2.0"
] | null | null | null | django_mako_plus/uid.py | knowsuchagency/django-mako-plus | e737be6a2db6e9e897cc804c660494415c4ea180 | [
"Apache-2.0"
] | null | null | null | django_mako_plus/uid.py | knowsuchagency/django-mako-plus | e737be6a2db6e9e897cc804c660494415c4ea180 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
'''
Created by Conan Albrecht <doconix@gmail.com>
Apache open source license.
November, 2017
'''
##################################################
### Unique id generator. Similar to uuid1() but
### also includes the process id.
###
### Note that upping the counter requires a global lock.
###
### The bit assignment:
###
### 52 bits for nanoseconds since epoch (really it can use unlimited bits because on left side of the number, but 52 bits gets us to ~2100)
### 16 bits for counter
### 48 bits for machine id
### 24 bits for process id
### ========
### 140 bits total, or 35 hex characters
###
### Maximum number is 1.39e42
###
import uuid
import time as time
import os
import random
import threading
import math
import collections
# initial values/constants
lastnow = 0
counterstart = random.getrandbits(16) - 1
countermax = math.pow(2, 16) - 1
counter = counterstart
# returns a 48 bit number
machineid = uuid.getnode()
# linux is usually 16 bits
processid = os.getpid()
# the main data structure
UID = collections.namedtuple('UID', ( 'time', 'counter', 'machine', 'process' ))
# binary size of each number
# and binary positions for shifting
# and for splitting hex and binary (from right side)
size = UID(52, 16, 48, 24)
_shift = []
for i in reversed(range(len(size))):
_shift.append(sum(size[i:]))
shift = UID(*reversed(_shift))
hsplit = UID(*(int(s/-4) for s in shift))
bsplit = UID(*(s*-1 for s in shift))
######################################
### Main API
def ruid():
'''
Creates a "raw" unique id. The result is a
UID namedtuple with four parts:
time
counter
machine
process
All other functions in this module just format
the id created in this function.
'''
global lastnow, counter_start, counter
# update the nanoseconds and counter
with threading.RLock():
now = int(time.time())#int(time.time() * 1e6)
counter += 1
if counter >= countermax:
counter = 0
while now == lastnow and counter == counterstart:
time.sleep(.001) # wait a millisecond and try again
now = int(time.time())#int(time.time() * 1e6)
lastnow = now
# return the named tuple
return UID(now, counter, machineid, processid)
def iuid(raw=None):
'''
Creates a unique id as an int.
If provided, raw should be a UID named tuple
(usually from a call to ruid).
'''
if raw is None:
raw = ruid()
return (raw.time << shift.counter) + \
(raw.counter << shift.machine) + \
(raw.machine << shift.process) + \
(raw.process)
def uid(raw=None, sep=None):
'''
Creates a unique id as a hex string.
If provided, raw should be a UID named tuple
(usually from a call to ruid).
Use sep='-' to separate the parts by dashes.
'''
if raw is None:
raw = ruid()
# hex version
if sep is None:
return '{:0x}'.format(iuid(raw))
# pretty version
n = uid(raw)
return sep.join((
n[:hsplit.counter],
n[hsplit.counter: hsplit.machine],
n[hsplit.machine: hsplit.process],
n[hsplit.process:],
))
def buid(raw=None, sep=None):
'''
Creates a unique id as a binary string.
If provided, raw should be a UID named tuple
(usually from a call to ruid).
Use sep='-' to separate the parts by dashes.
'''
if raw is None:
raw = ruid()
# hex version
if sep is None:
return '{:0b}'.format(iuid(raw))
# pretty version
n = buid(raw)
return sep.join((
n[:bsplit.counter],
n[bsplit.counter: bsplit.machine],
n[bsplit.machine: bsplit.process],
n[bsplit.process:],
))
def wuid(raw=None, leading='u'):
'''
Creates a unique id as a web-compliant id
for use in HTML ids. This is the same as
a hex id, but it has a leading `u` to ensure
an alphabetical character comes first, per
the standard.
If provided, raw should be a UID named tuple
(usually from a call to ruid).
Use sep='-' to separate the parts by dashes.
'''
if raw is None:
raw = ruid()
return '{}{}'.format(leading, uid(raw))
def iunpack(n):
'''
Unpacks the given integer number
into a UID namedtuple.
'''
# format of these is (mask & n) >> shifted
return UID(
n >> shift.counter,
((((1 << size.counter) - 1) << shift.machine) & n) >> shift.machine,
((((1 << size.machine) - 1) << shift.process) & n) >> shift.process,
((1 << shift.process) - 1) & n,
)
def unpack(hex_n):
'''
Unpacks the given hex number string
into a UID namedtuple.
To unpack a web id, use
unpack(myid[1:])
to remove the leading character.
'''
return iunpack(int(hex_n, 16))
###################################################
### Unit tests for this module:
###
### python3 uid.py
###
import unittest
class Tester(unittest.TestCase):
def test_ruid(self):
u = ruid()
u2 = ruid()
self.assertEqual(u.machine, u2.machine)
self.assertEqual(u.process, u2.process)
def test_int_hex_binary(self):
u = ruid()
n = iuid(u)
h = uid(u)
b = buid(u)
self.assertEqual(n, int(h, 16))
self.assertEqual(n, int(b, 2))
def test_int_hex_binary(self):
u = ruid()
n = iuid(u)
h = uid(u)
b = buid(u)
self.assertEqual(n, int(h, 16))
self.assertEqual(n, int(b, 2))
def test_pretty(self):
u = ruid()
# hex
h = uid(u)
p = uid(u, '-')
self.assertEqual(h, p.replace('-', ''))
# binary
b = buid(u)
p = buid(u, '-')
self.assertEqual(b, p.replace('-', ''))
def test_unpack(self):
# one test
u = ruid()
self.assertEqual(u, unpack(uid(u)))
self.assertEqual(u, iunpack(iuid(u)))
# other direction with int
n = iuid()
self.assertEqual(n, iuid(iunpack(n)))
# other direction with hex
h = uid()
self.assertEqual(h, uid(unpack(h)))
if __name__ == '__main__':
unittest.main()
| 25.109804 | 144 | 0.558176 | true | true | |
f71ad714eec52284cdcd59b0da289a9e2213538e | 646 | py | Python | data_source.py | ReimuYk/aws-line-counter | ce6131b8a2f3c9b9e70b8496e3f9ce1cfdd64804 | [
"MIT"
] | null | null | null | data_source.py | ReimuYk/aws-line-counter | ce6131b8a2f3c9b9e70b8496e3f9ce1cfdd64804 | [
"MIT"
] | null | null | null | data_source.py | ReimuYk/aws-line-counter | ce6131b8a2f3c9b9e70b8496e3f9ce1cfdd64804 | [
"MIT"
] | null | null | null | import requests
import os
import time
def get_page(i):
url = r'https://shr32taah3.execute-api.us-east-1.amazonaws.com/Prod/applications/browse?pageSize=12&pageNumber=%d&searchText=&category=&runtime=&verified=&sortFields='
page = requests.get(url%i)
return eval(page.text.replace("true", "True").replace("false", "False"))
data = get_page(3)
for i in range(1, 4):
data = get_page(i)
for item in data["applications"]:
print(item["deploymentCount"], end="\t")
print(item["name"])
print(item["homePageUrl"])
print()
# os.popen("git clone " + item["homePageUrl"])
# time.sleep(3)
| 32.3 | 171 | 0.645511 | import requests
import os
import time
def get_page(i):
url = r'https://shr32taah3.execute-api.us-east-1.amazonaws.com/Prod/applications/browse?pageSize=12&pageNumber=%d&searchText=&category=&runtime=&verified=&sortFields='
page = requests.get(url%i)
return eval(page.text.replace("true", "True").replace("false", "False"))
data = get_page(3)
for i in range(1, 4):
data = get_page(i)
for item in data["applications"]:
print(item["deploymentCount"], end="\t")
print(item["name"])
print(item["homePageUrl"])
print()
| true | true |
f71ad7b7135ea8f54cdfded058aa8b21c4b24595 | 762 | py | Python | learntools/core/multiproblem.py | bkmalayC/learntools | c739a1ee131caebcb9bbd8b138d51cff75152f3a | [
"Apache-2.0"
] | 1 | 2020-06-24T18:25:31.000Z | 2020-06-24T18:25:31.000Z | learntools/core/multiproblem.py | bkmalayC/learntools | c739a1ee131caebcb9bbd8b138d51cff75152f3a | [
"Apache-2.0"
] | null | null | null | learntools/core/multiproblem.py | bkmalayC/learntools | c739a1ee131caebcb9bbd8b138d51cff75152f3a | [
"Apache-2.0"
] | null | null | null | class MultipartProblem:
"""A container for multiple related Problems grouped together in one
question. If q1 is a MPP, its subquestions are accessed as q1.a, q1.b, etc.
"""
def __init__(self, *probs):
self.problems = probs
# TODO: This should be ordered.
self._prob_map = {}
def _repr_markdown_(self):
return repr(self)
def __repr__(self):
varname = self._varname
part_names = ['`{}.{}`'.format(varname, letter) for letter in self._prob_map]
return """This question is in {} parts. Those parts can be accessed as {}.
For example, to get a hint about part a, you would type `{}.a.hint()`.""".format(
len(self._prob_map), ', '.join(part_names), varname
)
| 34.636364 | 85 | 0.616798 | class MultipartProblem:
def __init__(self, *probs):
self.problems = probs
self._prob_map = {}
def _repr_markdown_(self):
return repr(self)
def __repr__(self):
varname = self._varname
part_names = ['`{}.{}`'.format(varname, letter) for letter in self._prob_map]
return """This question is in {} parts. Those parts can be accessed as {}.
For example, to get a hint about part a, you would type `{}.a.hint()`.""".format(
len(self._prob_map), ', '.join(part_names), varname
)
| true | true |
f71ad7f78f34b02d7d0b0895dc72b58d6a84bcbe | 1,775 | py | Python | {{cookiecutter.repo_name}}/python/{{cookiecutter.package_name}}/api/__init__.py | havok2063/cookiecutter-marvin | 3e64169b865012c5a6099e71a91789770222e5b5 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.repo_name}}/python/{{cookiecutter.package_name}}/api/__init__.py | havok2063/cookiecutter-marvin | 3e64169b865012c5a6099e71a91789770222e5b5 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.repo_name}}/python/{{cookiecutter.package_name}}/api/__init__.py | havok2063/cookiecutter-marvin | 3e64169b865012c5a6099e71a91789770222e5b5 | [
"BSD-3-Clause"
] | null | null | null | # !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
from __future__ import print_function, division, absolute_import
from flask import request
def process_request(request=None, as_dict=None, param=None):
'''Generally process the request for POST or GET, and build a form dictionary
Parameters:
request (request):
HTTP request object containing POST or GET data
as_dict (bool):
Boolean indicating whether to return the data as a standard dict or not
param (str):
Parameter name to extract from the request form
Returns:
Dict or ImmutableMultiDict
'''
# get form data
if request.method == 'POST':
if not request.form:
# if data is content-type json
data = request.get_json()
else:
# if data is content-type form
data = request.form
elif request.method == 'GET':
data = request.args
else:
return {}
# # if no data at all, return nothing
if param and data:
return data.get(param, None)
# convert ImmutableMultiDict to dictionary (if get or post-form) or use dict if post-json
if as_dict:
if isinstance(data, dict):
form = data
else:
# use multidict lists and iterlists to group multiple values for same in key into list
try:
# py2.7
form = {key: val if len(val) > 1 else val[0] for key, val in data.iterlists()}
except AttributeError:
# py3.5
form = {key: val if len(val) > 1 else val[0] for key, val in data.lists()}
else:
form = data
return form
| 30.603448 | 98 | 0.579155 |
from __future__ import print_function, division, absolute_import
from flask import request
def process_request(request=None, as_dict=None, param=None):
if request.method == 'POST':
if not request.form:
data = request.get_json()
else:
data = request.form
elif request.method == 'GET':
data = request.args
else:
return {}
urn data.get(param, None)
if as_dict:
if isinstance(data, dict):
form = data
else:
try:
form = {key: val if len(val) > 1 else val[0] for key, val in data.iterlists()}
except AttributeError:
form = {key: val if len(val) > 1 else val[0] for key, val in data.lists()}
else:
form = data
return form
| true | true |
f71ad891414e822ed9504c04e3c021fb01b0b6e5 | 13,313 | py | Python | negative_inline_editor/templatetags/negative_inline_edit.py | negative-space/negative-inline-editor | dde28b9ada65b81cb996bb9197826df45b67c48b | [
"MIT"
] | null | null | null | negative_inline_editor/templatetags/negative_inline_edit.py | negative-space/negative-inline-editor | dde28b9ada65b81cb996bb9197826df45b67c48b | [
"MIT"
] | null | null | null | negative_inline_editor/templatetags/negative_inline_edit.py | negative-space/negative-inline-editor | dde28b9ada65b81cb996bb9197826df45b67c48b | [
"MIT"
] | null | null | null | import os
import re
from django import template
from django.conf import settings
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import MultipleObjectsReturned
from django.urls import reverse
from django.db.models import Model
from django.template import Library, loader
from django.template.defaulttags import ForNode
from django.utils.module_loading import import_string
from django.utils.safestring import mark_safe
from django.utils.translation import get_language
register = Library()
def get_model_by_name(model_name):
from django.apps import apps
app_name, model_name = model_name.split('.', 1)
return apps.get_model(app_name, model_name)
class EditableNode(template.Node):
def __init__(self, expr, field, html=False):
self.expr = expr
self.field = field
self.html = html
def render(self, context):
expr = self.expr
model = get_model_by_expr(context, expr)
if not context['request'].session.get('editable'):
return getattr(model, self.field, None)
if not context['request'].session.get('editable_inplace'):
return getattr(model, self.field, None)
model_cls = '{}.{}'.format(model._meta.app_label, model.__class__.__name__)
save_url = reverse('update_model')
return '<span contenteditable="true" data-html="{}" class="editable-model" data-editable-model="{}" data-editable-pk="{}" ' \
'data-editable-field="{}" data-save-url="{}">{}</span>'.format(
'true' if self.html else 'false', model_cls, model.pk, self.field, save_url,
getattr(model, self.field, None)
)
def get_model_by_expr(context, expr):
model = template.Variable(expr).resolve(context)
if not isinstance(model, Model):
raise ValueError('Left part of expression "{}" do not evaluate to Django model: {}'.format(
expr,
repr(model)
))
if model.pk is None:
raise ValueError(
'Left part of expression "{}" evaluated to model that have no primary key. Not saved? {}'.format(
expr,
repr(model)
))
return model
@register.tag
def editable(parser, token):
bits = token.split_contents()
if len(bits) < 2 or len(bits) > 3:
raise template.TemplateSyntaxError(
"%r tag requires at least single argument" % token.contents.split()[0]
)
expr = bits[1]
if len(bits) == 3:
if bits[2] != '@html':
raise template.TemplateSyntaxError(
"%r tag requires at least single argument" % token.contents.split()[0]
)
html = True
else:
html = False
if '->' not in expr:
raise template.TemplateSyntaxError(
"%r tag's argument should be expression in form: model->field" % tag_name
)
expr, field = [x.strip() for x in expr.split('->')]
return EditableNode(expr, field, html=html)
class EditablePanel(object):
def __init__(self, name, model, field=None, add_btn=None, form_style=False) -> None:
super().__init__()
self.name = name
self.model = model
self.field = field
self.add_btn = add_btn
self.form_style = form_style
self.model_cls = type(model)
try:
self.admin_cls = admin.site._registry[self.model_cls]
content_type = ContentType.objects.get_for_model(self.model_cls) # .__class__
self.model_admin_url = reverse("admin:%s_%s_changelist" % (content_type.app_label, content_type.model)) \
+ str(model.id) + '/change/'
except IndexError:
self.admin_cls = None
if not form_style:
assert self.field
assert self.add_btn
@property
def items(self):
return [{'obj': x, 'cls': x._meta.verbose_name} for x in getattr(self.model, self.field).all()]
class ForWrappingNode(template.Node):
def __init__(self, for_node, expr, field, panel_edit, inline_edit, alias):
self.for_node = for_node
self.expr = expr
self.field = field
self.panel_edit = panel_edit
self.inline_edit = inline_edit
self.alias = alias
def render(self, context):
if not context['request'].session.get('editable'):
return self.for_node.render(context)
model = get_model_by_expr(context, self.expr)
model_cls = '{}.{}'.format(model._meta.app_label, model.__class__.__name__)
related = get_model_by_name(model_cls)._meta.get_field(self.field)
content_type = ContentType.objects.get_for_model(related.related_model) # .__class__
model_admin_url = reverse("admin:%s_%s_changelist" % (content_type.app_label, content_type.model))
update_sort_url = reverse('update_sort')
rendered_for = self.for_node.render(context)
add_btn = f'<a id="editable-{model_cls}-{model.pk}" class="editable-list-btn" data-editable-model="{model_cls}" data-editable-pk="{model.pk}" ' \
f'data-editable-field="{self.field}" data-editable-related-field="{related.field.name}" data-related-admin-url="{model_admin_url}" data-update-sort-url="{update_sort_url}"></a>'
if self.panel_edit:
if not hasattr(context['request'], 'editable_panels'):
context['request'].editable_panels = {}
panel_name = self.alias or related.related_name
context['request'].editable_panels[panel_name] = EditablePanel(
name=panel_name,
model=model,
field=self.field,
add_btn=add_btn
)
if not self.inline_edit or not context['request'].session.get('editable_inplace'):
return rendered_for
return add_btn + rendered_for
@register.tag(name='editable-related')
def editable_list(parser, token):
bits = token.split_contents()
panel_edit = False
inline_edit = True
alias = None
panel_expr = re.match('^@(panel|panel_only)(\(([^\)]+)\))?$', bits[-1])
if panel_expr:
bits = bits[0:-1]
panel_type = panel_expr.group(1)
alias = panel_expr.group(3)
if panel_type == 'panel':
panel_edit = True
elif panel_type == 'panel_only':
inline_edit = False
panel_edit = True
if len(bits) < 4:
raise template.TemplateSyntaxError("'editable-list' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = -3 if is_reversed else -2
if bits[in_index] != 'in':
raise template.TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))
for var in loopvars:
if not var or ' ' in var:
raise template.TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
raw_expr = bits[in_index + 1]
expr, field = [x.strip() for x in raw_expr.split('->')]
sequence = parser.compile_filter('{}.{}.all'.format(expr, field))
nodelist_loop = parser.parse(('end-editable-related',))
token = parser.next_token()
# if token.contents == 'empty':
# nodelist_empty = parser.parse(('endfor',))
# parser.delete_first_token()
# else:
nodelist_empty = None
return ForWrappingNode(
ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty),
expr,
field,
panel_edit=panel_edit,
inline_edit=inline_edit,
alias=alias
)
@register.simple_tag(takes_context=True, name='_')
def translate_inline(context, value):
from negative_i18n.models import StringTranslation
from negative_i18n.trans_utils import translate_lazy
if 'request' in context and context['request'].session.get('editable'):
if 'disable_i18n_collect' not in context:
if not hasattr(context['request'], 'editable_strings'):
context['request'].editable_strings = set()
context['request'].editable_strings.add(value)
if not 'request' in context or not context['request'].session.get('editable_inplace'):
return translate_lazy(value)
try:
obj, created = StringTranslation.objects.get_or_create(key=value)
except MultipleObjectsReturned:
first = StringTranslation.objects.filter(key=value)[0]
StringTranslation.objects.exclude(id=first.id).filter(key=value).delete()
obj, created = first, False
save_url = reverse('update_model')
return mark_safe(
'<span contenteditable="true" class="editable-model" data-editable-model="{}" data-editable-pk="{}" ' \
'data-editable-field="{}" data-save-url="{}">{}</span>'.format(
'negative_i18n.StringTranslation', obj.pk, 'translation', save_url, obj.translation or value
))
class EditModelNode(template.Node):
def __init__(self, expr, alias):
self.expr = expr
self.alias = alias
def render(self, context):
if not context['request'].session.get('editable'):
return ''
model = get_model_by_expr(context, self.expr)
if not hasattr(context['request'], 'editable_panels'):
context['request'].editable_panels = {}
panel_name = self.alias or model._meta.verbose_name
context['request'].editable_panels[panel_name] = EditablePanel(
name=panel_name,
model=model,
form_style=True
)
return ''
@register.tag(name='editable-model')
def editable_model(parser, token):
bits = token.split_contents()
alias = None
if len(bits) != 2:
if len(bits) == 4 and bits[2] == 'as':
alias = bits[3]
else:
raise template.TemplateSyntaxError("'editable-model' statements should have at least two"
" words: %s" % token.contents)
return EditModelNode(bits[1], alias=alias)
class InlineFormNode(template.Node):
def __init__(self, form_name, var_name):
self.form_name = form_name
self.var_name = var_name
def render(self, context):
request = context['request']
form_cls = import_string(self.form_name)
if request.method == 'POST':
form = form_cls(request.POST)
if form.is_valid():
form.save()
else:
pass
else:
form = form_cls()
context[self.var_name] = form
return ''
@register.tag
def load_form(parser, token):
try:
tag_name, form_name, as_expr, variable_name = token.split_contents()
if as_expr != 'as':
raise ValueError
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires arguments in form: load_form form_name as var_name" % token.contents.split()[0]
)
return InlineFormNode(form_name, variable_name)
class EditableWrapNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
html = self.nodelist.render(context)
if not context['request'].user.is_superuser:
return html
if context['request'].session.get('editable'):
extra_class = ' open' if context['request'].GET.get('editableTab') else ''
html = '<div class="cratis-editable-wrapper' + extra_class + '">' + self.nodelist.render(
context) + '</div>'
t = loader.get_template('editable-model/panel.html')
context.push({'langs': settings.LANGUAGES, 'lang': get_language()})
panel_html = t.render(context.flatten())
css_file = os.path.dirname(os.path.dirname(__file__)) + '/static/editable-model/editable-model.css'
with open(css_file) as f:
css_data = '<style>' + re.sub('\s+', ' ', f.read()) + '</style>'
return css_data + html + panel_html
@register.tag(name='editable-wrap')
def editable_wrap(parser, token):
bits = token.split_contents()
if len(bits) > 1:
raise template.TemplateSyntaxError("'editable-wrap' statement do not accept arguments")
nodelist_loop = parser.parse(('end-editable-wrap',))
token = parser.next_token()
return EditableWrapNode(nodelist=nodelist_loop)
class WithViewContextNode(template.Node):
def __init__(self, expr):
self.expr = expr
def render(self, context):
cls = import_string(self.expr)
view = cls(request=context['request'], kwargs={})
for key, val in view.get_context_data().items():
if key not in context:
context[key] = val
return ''
@register.tag(name='load-view-context')
def load_view_context(parser, token):
bits = token.split_contents()
if len(bits) != 2:
raise template.TemplateSyntaxError("'load-view-context' requires argument")
return WithViewContextNode(expr=bits[1])
| 32.234867 | 189 | 0.621948 | import os
import re
from django import template
from django.conf import settings
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import MultipleObjectsReturned
from django.urls import reverse
from django.db.models import Model
from django.template import Library, loader
from django.template.defaulttags import ForNode
from django.utils.module_loading import import_string
from django.utils.safestring import mark_safe
from django.utils.translation import get_language
register = Library()
def get_model_by_name(model_name):
from django.apps import apps
app_name, model_name = model_name.split('.', 1)
return apps.get_model(app_name, model_name)
class EditableNode(template.Node):
def __init__(self, expr, field, html=False):
self.expr = expr
self.field = field
self.html = html
def render(self, context):
expr = self.expr
model = get_model_by_expr(context, expr)
if not context['request'].session.get('editable'):
return getattr(model, self.field, None)
if not context['request'].session.get('editable_inplace'):
return getattr(model, self.field, None)
model_cls = '{}.{}'.format(model._meta.app_label, model.__class__.__name__)
save_url = reverse('update_model')
return '<span contenteditable="true" data-html="{}" class="editable-model" data-editable-model="{}" data-editable-pk="{}" ' \
'data-editable-field="{}" data-save-url="{}">{}</span>'.format(
'true' if self.html else 'false', model_cls, model.pk, self.field, save_url,
getattr(model, self.field, None)
)
def get_model_by_expr(context, expr):
model = template.Variable(expr).resolve(context)
if not isinstance(model, Model):
raise ValueError('Left part of expression "{}" do not evaluate to Django model: {}'.format(
expr,
repr(model)
))
if model.pk is None:
raise ValueError(
'Left part of expression "{}" evaluated to model that have no primary key. Not saved? {}'.format(
expr,
repr(model)
))
return model
@register.tag
def editable(parser, token):
bits = token.split_contents()
if len(bits) < 2 or len(bits) > 3:
raise template.TemplateSyntaxError(
"%r tag requires at least single argument" % token.contents.split()[0]
)
expr = bits[1]
if len(bits) == 3:
if bits[2] != '@html':
raise template.TemplateSyntaxError(
"%r tag requires at least single argument" % token.contents.split()[0]
)
html = True
else:
html = False
if '->' not in expr:
raise template.TemplateSyntaxError(
"%r tag's argument should be expression in form: model->field" % tag_name
)
expr, field = [x.strip() for x in expr.split('->')]
return EditableNode(expr, field, html=html)
class EditablePanel(object):
def __init__(self, name, model, field=None, add_btn=None, form_style=False) -> None:
super().__init__()
self.name = name
self.model = model
self.field = field
self.add_btn = add_btn
self.form_style = form_style
self.model_cls = type(model)
try:
self.admin_cls = admin.site._registry[self.model_cls]
content_type = ContentType.objects.get_for_model(self.model_cls) # .__class__
self.model_admin_url = reverse("admin:%s_%s_changelist" % (content_type.app_label, content_type.model)) \
+ str(model.id) + '/change/'
except IndexError:
self.admin_cls = None
if not form_style:
assert self.field
assert self.add_btn
@property
def items(self):
return [{'obj': x, 'cls': x._meta.verbose_name} for x in getattr(self.model, self.field).all()]
class ForWrappingNode(template.Node):
def __init__(self, for_node, expr, field, panel_edit, inline_edit, alias):
self.for_node = for_node
self.expr = expr
self.field = field
self.panel_edit = panel_edit
self.inline_edit = inline_edit
self.alias = alias
def render(self, context):
if not context['request'].session.get('editable'):
return self.for_node.render(context)
model = get_model_by_expr(context, self.expr)
model_cls = '{}.{}'.format(model._meta.app_label, model.__class__.__name__)
related = get_model_by_name(model_cls)._meta.get_field(self.field)
content_type = ContentType.objects.get_for_model(related.related_model) # .__class__
model_admin_url = reverse("admin:%s_%s_changelist" % (content_type.app_label, content_type.model))
update_sort_url = reverse('update_sort')
rendered_for = self.for_node.render(context)
add_btn = f'<a id="editable-{model_cls}-{model.pk}" class="editable-list-btn" data-editable-model="{model_cls}" data-editable-pk="{model.pk}" ' \
f'data-editable-field="{self.field}" data-editable-related-field="{related.field.name}" data-related-admin-url="{model_admin_url}" data-update-sort-url="{update_sort_url}"></a>'
if self.panel_edit:
if not hasattr(context['request'], 'editable_panels'):
context['request'].editable_panels = {}
panel_name = self.alias or related.related_name
context['request'].editable_panels[panel_name] = EditablePanel(
name=panel_name,
model=model,
field=self.field,
add_btn=add_btn
)
if not self.inline_edit or not context['request'].session.get('editable_inplace'):
return rendered_for
return add_btn + rendered_for
@register.tag(name='editable-related')
def editable_list(parser, token):
bits = token.split_contents()
panel_edit = False
inline_edit = True
alias = None
panel_expr = re.match('^@(panel|panel_only)(\(([^\)]+)\))?$', bits[-1])
if panel_expr:
bits = bits[0:-1]
panel_type = panel_expr.group(1)
alias = panel_expr.group(3)
if panel_type == 'panel':
panel_edit = True
elif panel_type == 'panel_only':
inline_edit = False
panel_edit = True
if len(bits) < 4:
raise template.TemplateSyntaxError("'editable-list' statements should have at least four"
" words: %s" % token.contents)
is_reversed = bits[-1] == 'reversed'
in_index = -3 if is_reversed else -2
if bits[in_index] != 'in':
raise template.TemplateSyntaxError("'for' statements should use the format"
" 'for x in y': %s" % token.contents)
loopvars = re.split(r' *, *', ' '.join(bits[1:in_index]))
for var in loopvars:
if not var or ' ' in var:
raise template.TemplateSyntaxError("'for' tag received an invalid argument:"
" %s" % token.contents)
raw_expr = bits[in_index + 1]
expr, field = [x.strip() for x in raw_expr.split('->')]
sequence = parser.compile_filter('{}.{}.all'.format(expr, field))
nodelist_loop = parser.parse(('end-editable-related',))
token = parser.next_token()
# if token.contents == 'empty':
# nodelist_empty = parser.parse(('endfor',))
# parser.delete_first_token()
# else:
nodelist_empty = None
return ForWrappingNode(
ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty),
expr,
field,
panel_edit=panel_edit,
inline_edit=inline_edit,
alias=alias
)
@register.simple_tag(takes_context=True, name='_')
def translate_inline(context, value):
from negative_i18n.models import StringTranslation
from negative_i18n.trans_utils import translate_lazy
if 'request' in context and context['request'].session.get('editable'):
if 'disable_i18n_collect' not in context:
if not hasattr(context['request'], 'editable_strings'):
context['request'].editable_strings = set()
context['request'].editable_strings.add(value)
if not 'request' in context or not context['request'].session.get('editable_inplace'):
return translate_lazy(value)
try:
obj, created = StringTranslation.objects.get_or_create(key=value)
except MultipleObjectsReturned:
first = StringTranslation.objects.filter(key=value)[0]
StringTranslation.objects.exclude(id=first.id).filter(key=value).delete()
obj, created = first, False
save_url = reverse('update_model')
return mark_safe(
'<span contenteditable="true" class="editable-model" data-editable-model="{}" data-editable-pk="{}" ' \
'data-editable-field="{}" data-save-url="{}">{}</span>'.format(
'negative_i18n.StringTranslation', obj.pk, 'translation', save_url, obj.translation or value
))
class EditModelNode(template.Node):
def __init__(self, expr, alias):
self.expr = expr
self.alias = alias
def render(self, context):
if not context['request'].session.get('editable'):
return ''
model = get_model_by_expr(context, self.expr)
if not hasattr(context['request'], 'editable_panels'):
context['request'].editable_panels = {}
panel_name = self.alias or model._meta.verbose_name
context['request'].editable_panels[panel_name] = EditablePanel(
name=panel_name,
model=model,
form_style=True
)
return ''
@register.tag(name='editable-model')
def editable_model(parser, token):
bits = token.split_contents()
alias = None
if len(bits) != 2:
if len(bits) == 4 and bits[2] == 'as':
alias = bits[3]
else:
raise template.TemplateSyntaxError("'editable-model' statements should have at least two"
" words: %s" % token.contents)
return EditModelNode(bits[1], alias=alias)
class InlineFormNode(template.Node):
def __init__(self, form_name, var_name):
self.form_name = form_name
self.var_name = var_name
def render(self, context):
request = context['request']
form_cls = import_string(self.form_name)
if request.method == 'POST':
form = form_cls(request.POST)
if form.is_valid():
form.save()
else:
pass
else:
form = form_cls()
context[self.var_name] = form
return ''
@register.tag
def load_form(parser, token):
try:
tag_name, form_name, as_expr, variable_name = token.split_contents()
if as_expr != 'as':
raise ValueError
except ValueError:
raise template.TemplateSyntaxError(
"%r tag requires arguments in form: load_form form_name as var_name" % token.contents.split()[0]
)
return InlineFormNode(form_name, variable_name)
class EditableWrapNode(template.Node):
def __init__(self, nodelist):
self.nodelist = nodelist
def render(self, context):
html = self.nodelist.render(context)
if not context['request'].user.is_superuser:
return html
if context['request'].session.get('editable'):
extra_class = ' open' if context['request'].GET.get('editableTab') else ''
html = '<div class="cratis-editable-wrapper' + extra_class + '">' + self.nodelist.render(
context) + '</div>'
t = loader.get_template('editable-model/panel.html')
context.push({'langs': settings.LANGUAGES, 'lang': get_language()})
panel_html = t.render(context.flatten())
css_file = os.path.dirname(os.path.dirname(__file__)) + '/static/editable-model/editable-model.css'
with open(css_file) as f:
css_data = '<style>' + re.sub('\s+', ' ', f.read()) + '</style>'
return css_data + html + panel_html
@register.tag(name='editable-wrap')
def editable_wrap(parser, token):
bits = token.split_contents()
if len(bits) > 1:
raise template.TemplateSyntaxError("'editable-wrap' statement do not accept arguments")
nodelist_loop = parser.parse(('end-editable-wrap',))
token = parser.next_token()
return EditableWrapNode(nodelist=nodelist_loop)
class WithViewContextNode(template.Node):
def __init__(self, expr):
self.expr = expr
def render(self, context):
cls = import_string(self.expr)
view = cls(request=context['request'], kwargs={})
for key, val in view.get_context_data().items():
if key not in context:
context[key] = val
return ''
@register.tag(name='load-view-context')
def load_view_context(parser, token):
bits = token.split_contents()
if len(bits) != 2:
raise template.TemplateSyntaxError("'load-view-context' requires argument")
return WithViewContextNode(expr=bits[1])
| true | true |
f71ad930fc472c80200a4a8c8b4190aa61e62059 | 675 | py | Python | components/app/App_tpl.py | bitbuit/billterm | 553bf2afb6ff2c1e15becbe1b4ab59346e5a87b5 | [
"MIT"
] | null | null | null | components/app/App_tpl.py | bitbuit/billterm | 553bf2afb6ff2c1e15becbe1b4ab59346e5a87b5 | [
"MIT"
] | null | null | null | components/app/App_tpl.py | bitbuit/billterm | 553bf2afb6ff2c1e15becbe1b4ab59346e5a87b5 | [
"MIT"
] | null | null | null | from libs.Screen import *
class App_tpl(object):
@staticmethod
def hello():
print(" _ _ _ _ _ ")
print(" | |__(_) | | |_ ___ _ _ _ __ ")
print(" | '_ \ | | | _/ -_) '_| ' \ ")
print("__|_.__/_|_|_|\__\___|_| |_|_|_|_H_e_l_l_o_!")
print(" ~ Write " + Text_style.BOLD + "help" + Text_style.END_STYLE + " if you are lost :$")
@staticmethod
def list_dbs(dbs):
print("Existing DBs")
for db in dbs:
Screen.render_line([ [db, "{:<20}", Text_style.BLUE] ])
@staticmethod
def help():
print('helping people!')
| 30.681818 | 109 | 0.459259 | from libs.Screen import *
class App_tpl(object):
@staticmethod
def hello():
print(" _ _ _ _ _ ")
print(" | |__(_) | | |_ ___ _ _ _ __ ")
print(" | '_ \ | | | _/ -_) '_| ' \ ")
print("__|_.__/_|_|_|\__\___|_| |_|_|_|_H_e_l_l_o_!")
print(" ~ Write " + Text_style.BOLD + "help" + Text_style.END_STYLE + " if you are lost :$")
@staticmethod
def list_dbs(dbs):
print("Existing DBs")
for db in dbs:
Screen.render_line([ [db, "{:<20}", Text_style.BLUE] ])
@staticmethod
def help():
print('helping people!')
| true | true |
f71adab4390632d131f94912e04795cf9ddfadd8 | 408 | py | Python | tbx/people/migrations/0009_personpage_alt_short_intro.py | arush15june/wagtail-torchbox | c4d06e096c72bd8007975dc016133024f9d27fab | [
"MIT"
] | null | null | null | tbx/people/migrations/0009_personpage_alt_short_intro.py | arush15june/wagtail-torchbox | c4d06e096c72bd8007975dc016133024f9d27fab | [
"MIT"
] | null | null | null | tbx/people/migrations/0009_personpage_alt_short_intro.py | arush15june/wagtail-torchbox | c4d06e096c72bd8007975dc016133024f9d27fab | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-02-16 07:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0008_personpage_short_intro'),
]
operations = [
migrations.AddField(
model_name='personpage',
name='alt_short_intro',
field=models.TextField(blank=True, null=True),
),
]
| 21.473684 | 58 | 0.612745 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0008_personpage_short_intro'),
]
operations = [
migrations.AddField(
model_name='personpage',
name='alt_short_intro',
field=models.TextField(blank=True, null=True),
),
]
| true | true |
f71adb37c06a58da63ac0e0d5a07fe9ff030a284 | 2,941 | py | Python | southwest-headers.py | WGriffing/southwest-headers | 0dd4ac4e2ea50872638499b5b14673aa4b5fa60b | [
"MIT"
] | 12 | 2021-12-09T16:04:52.000Z | 2022-03-25T01:03:58.000Z | southwest-headers.py | WGriffing/southwest-headers | 0dd4ac4e2ea50872638499b5b14673aa4b5fa60b | [
"MIT"
] | 3 | 2021-12-13T00:04:53.000Z | 2022-01-02T06:37:13.000Z | southwest-headers.py | WGriffing/southwest-headers | 0dd4ac4e2ea50872638499b5b14673aa4b5fa60b | [
"MIT"
] | 2 | 2021-12-20T16:36:41.000Z | 2021-12-28T06:51:17.000Z | # this code is based on original work by @jasonwbarnett.
# https://github.com/pyro2927/SouthwestCheckin/issues/70#issuecomment-921166994
import json
import time
import re
import os
import random
import string
import sys
from pathlib import Path
from seleniumwire import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
confirmation_number = ''.join(random.choices(string.ascii_uppercase, k=6))
first_name = ''.join(random.choices(string.ascii_lowercase, k=random.randrange(4,10))).capitalize()
last_name = ''.join(random.choices(string.ascii_lowercase, k=random.randrange(4,10))).capitalize()
output_file = sys.argv[1] if len(sys.argv) > 1 else "southwest_headers.json"
chrome_options = Options()
chrome_options.headless = True
# the headless option adds HeadlessChrome to the user agent which causes southwest to return invalid headers. so need to set a user agent that appears like a normal web browser.
chrome_options.add_argument('user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36')
# fixes issue when user runs as root
# https://stackoverflow.com/questions/50642308/webdriverexception-unknown-error-devtoolsactiveport-file-doesnt-exist-while-t
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
# fixes issue if user doesn't have write permissions to default storage location
seleniumwire_options = { 'request_storage': 'memory' }
driver = webdriver.Chrome(os.getcwd() + "/chromedriver", options=chrome_options, seleniumwire_options=seleniumwire_options)
driver.scopes = [ "page\/check-in" ] # only capture request URLs matching this regex
driver.get("https://mobile.southwest.com/check-in")
# fill out the form once the form fields become available
element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "recordLocator")))
element.send_keys(confirmation_number)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "firstName"))).send_keys(first_name)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "lastName"))).send_keys(last_name)
element.submit()
# give the form time to submit before checking headers
time.sleep(10)
# content-type is a required header but not included in the request headers so we'll manually add it here.
southwest_headers = { "content-type": "application/json" }
headers = driver.requests[0].headers
for key in headers:
if re.match("x-api-key|x-user-experience-id|x-channel-id|^[\w-]+?-\w$", key, re.I):
# only keep the headers we need
southwest_headers[key] = headers[key]
# save headers
with open(output_file, "w") as json_file:
json.dump(southwest_headers, json_file)
driver.quit()
| 43.895522 | 177 | 0.782387 |
me
import re
import os
import random
import string
import sys
from pathlib import Path
from seleniumwire import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
confirmation_number = ''.join(random.choices(string.ascii_uppercase, k=6))
first_name = ''.join(random.choices(string.ascii_lowercase, k=random.randrange(4,10))).capitalize()
last_name = ''.join(random.choices(string.ascii_lowercase, k=random.randrange(4,10))).capitalize()
output_file = sys.argv[1] if len(sys.argv) > 1 else "southwest_headers.json"
chrome_options = Options()
chrome_options.headless = True
chrome_options.add_argument('user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
seleniumwire_options = { 'request_storage': 'memory' }
driver = webdriver.Chrome(os.getcwd() + "/chromedriver", options=chrome_options, seleniumwire_options=seleniumwire_options)
driver.scopes = [ "page\/check-in" ] # only capture request URLs matching this regex
driver.get("https://mobile.southwest.com/check-in")
# fill out the form once the form fields become available
element = WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "recordLocator")))
element.send_keys(confirmation_number)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "firstName"))).send_keys(first_name)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.NAME, "lastName"))).send_keys(last_name)
element.submit()
# give the form time to submit before checking headers
time.sleep(10)
# content-type is a required header but not included in the request headers so we'll manually add it here.
southwest_headers = { "content-type": "application/json" }
headers = driver.requests[0].headers
for key in headers:
if re.match("x-api-key|x-user-experience-id|x-channel-id|^[\w-]+?-\w$", key, re.I):
southwest_headers[key] = headers[key]
with open(output_file, "w") as json_file:
json.dump(southwest_headers, json_file)
driver.quit()
| true | true |
f71adbc103a2cba26d96345692d6ef2e185b7c56 | 5,133 | py | Python | part_14/setfirmware.py | zcutlip/broken_abandoned | 76f2f8577de3c1a570e28f081349e6f22fb33a34 | [
"MIT"
] | 28 | 2015-05-02T22:19:26.000Z | 2021-04-26T20:01:00.000Z | part_14/setfirmware.py | zcutlip/broken_abandoned | 76f2f8577de3c1a570e28f081349e6f22fb33a34 | [
"MIT"
] | null | null | null | part_14/setfirmware.py | zcutlip/broken_abandoned | 76f2f8577de3c1a570e28f081349e6f22fb33a34 | [
"MIT"
] | 6 | 2015-11-03T08:12:43.000Z | 2018-08-19T21:48:18.000Z | #!/usr/bin/env python
# Copyright (c) 2015
# - Zachary Cutlip <uid000()gmail.com>
#
# See LICENSE for more details.
#
import sys
import socket
import time
import base64
from bowcaster.common import Logging
HOST="10.12.34.1"
#HOST="192.168.127.141"
class SetFirmwareRequest(object):
"""
Generate a "SetFirmware" SOAP request
Params
------
firmware_file: Optional. The name of a file to base64 encode into
the SOAP request. If no file is provided, a string
of As is used (unencoded) in its place.
logger: Optional. A Bowcaster Logging object. If a logger
is not provided, one will be instantiated.
"""
MIN_CONTENT_LENGTH=102401
def __init__(self,firmware_file=None,logger=None):
b64encode=True
if not logger:
logger=Logging(max_level=Logging.DEBUG)
if firmware_file:
logger.LOG_INFO("Reading firmware data from: %s" % firmware_file)
firmware_data=open(firmware_file,"rb").read()
else:
b64encode=False
logger.LOG_INFO("Generating padding of As in place of firmware data.")
firmware_data="A"*self.MIN_CONTENT_LENGTH
self.request_body=SetFirmwareBody(firmware_data,b64encode=b64encode,logger=logger)
content_length=len(self.request_body)
self.request_headers=SetFirmwareRequestHeaders(content_length)
def __str__(self):
return str(self.request_headers)+str(self.request_body)
class SetFirmwareRequestHeaders(object):
"""
Class to generate the HTTP headers for a "SetFirmware" SOAP request.
Params
------
content_length: Value to specify for the Content-Length header.
"""
def __init__(self,content_length):
headers="".join(["POST /soap/server_sa/SetFirmware HTTP/1.1\r\n",
"Accept-Encoding: identity\r\n",
"Content-Length: %d\r\n",
"Soapaction: \"urn:DeviceConfig\"\r\n",
"Host: 127.0.0.1\r\n",
"User-Agent: Python-urllib/2.7\r\n",
"Connection: close\r\n",
"Content-Type: text/xml ;charset=\"utf-8\"\r\n\r\n"])
self.headers=headers % (content_length)
def __str__(self):
return self.headers
class SetFirmwareBody(object):
"""
Class to generate the body of a "SetFirmware" SOAP request
Params
------
firmware_data: Data to encapsulate in the request.
b64encode: Optional. Boolean flag whether to base64 encode firmware_data.
logger: Optional. A Bowcaster Logging object. If a logger
is not provided, one will be instantiated.
"""
SOAP_REQUEST_START="<SOAP-ENV:Body><NewFirmware>"
SOAP_REQUEST_END="</NewFirmware></SOAP-ENV:Body>"
def __init__(self,firmware_data,b64encode=True,logger=None):
if not logger:
logger=Logging(max_level=Logging.DEBUG)
self.logger=logger
logger.LOG_DEBUG("Building SetFirmware request body.")
logger.LOG_DEBUG("Length of firmware: %d" % len(firmware_data))
if b64encode:
self.encoded_firmware=base64.b64encode(firmware_data)
else:
self.encoded_firmware=firmware_data
logger.LOG_DEBUG("Length of encoded firmware: %d" % len(self.encoded_firmware))
def __len__(self):
return len(self.SOAP_REQUEST_START+self.encoded_firmware+self.SOAP_REQUEST_END)
def __str__(self):
return self.SOAP_REQUEST_START+self.encoded_firmware+self.SOAP_REQUEST_END
def special_upnp_send(addr,port,data):
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect((addr,port))
"""only send first 8190 bytes of request"""
sock.send(data[:8190])
"""sleep to ensure first recv()
only gets this first chunk."""
time.sleep(1)
"""Hopefully in upnp_receiv_firmware_packets()
by now, so we can send the rest."""
sock.send(data[8190:])
"""
Sleep a bit more so server doesn't end up
in an infinite select() loop.
Select's timeout is set to 1 sec,
so we need to give enough time
for the loop to go back to select,
and for the timeout to happen,
returning an error."""
time.sleep(10)
sock.close()
def main(firmware_file=None):
logger=Logging(max_level=Logging.DEBUG)
request=SetFirmwareRequest(firmware_file=firmware_file,logger=logger)
#write out the request to a file so we can easily analyze what we sent.
logger.LOG_DEBUG("Writing request to request.bin for analysis.")
open("./request.bin","wb").write(str(request))
logger.LOG_DEBUG("Done.")
logger.LOG_INFO("Sending special UPnP request to host: %s" % HOST)
special_upnp_send(HOST,5000,str(request))
logger.LOG_INFO("Done.")
if __name__ == "__main__":
try:
firmware_file=sys.argv[1]
except:
firmware_file=None
main(firmware_file)
| 32.903846 | 90 | 0.633742 |
import sys
import socket
import time
import base64
from bowcaster.common import Logging
HOST="10.12.34.1"
class SetFirmwareRequest(object):
MIN_CONTENT_LENGTH=102401
def __init__(self,firmware_file=None,logger=None):
b64encode=True
if not logger:
logger=Logging(max_level=Logging.DEBUG)
if firmware_file:
logger.LOG_INFO("Reading firmware data from: %s" % firmware_file)
firmware_data=open(firmware_file,"rb").read()
else:
b64encode=False
logger.LOG_INFO("Generating padding of As in place of firmware data.")
firmware_data="A"*self.MIN_CONTENT_LENGTH
self.request_body=SetFirmwareBody(firmware_data,b64encode=b64encode,logger=logger)
content_length=len(self.request_body)
self.request_headers=SetFirmwareRequestHeaders(content_length)
def __str__(self):
return str(self.request_headers)+str(self.request_body)
class SetFirmwareRequestHeaders(object):
def __init__(self,content_length):
headers="".join(["POST /soap/server_sa/SetFirmware HTTP/1.1\r\n",
"Accept-Encoding: identity\r\n",
"Content-Length: %d\r\n",
"Soapaction: \"urn:DeviceConfig\"\r\n",
"Host: 127.0.0.1\r\n",
"User-Agent: Python-urllib/2.7\r\n",
"Connection: close\r\n",
"Content-Type: text/xml ;charset=\"utf-8\"\r\n\r\n"])
self.headers=headers % (content_length)
def __str__(self):
return self.headers
class SetFirmwareBody(object):
SOAP_REQUEST_START="<SOAP-ENV:Body><NewFirmware>"
SOAP_REQUEST_END="</NewFirmware></SOAP-ENV:Body>"
def __init__(self,firmware_data,b64encode=True,logger=None):
if not logger:
logger=Logging(max_level=Logging.DEBUG)
self.logger=logger
logger.LOG_DEBUG("Building SetFirmware request body.")
logger.LOG_DEBUG("Length of firmware: %d" % len(firmware_data))
if b64encode:
self.encoded_firmware=base64.b64encode(firmware_data)
else:
self.encoded_firmware=firmware_data
logger.LOG_DEBUG("Length of encoded firmware: %d" % len(self.encoded_firmware))
def __len__(self):
return len(self.SOAP_REQUEST_START+self.encoded_firmware+self.SOAP_REQUEST_END)
def __str__(self):
return self.SOAP_REQUEST_START+self.encoded_firmware+self.SOAP_REQUEST_END
def special_upnp_send(addr,port,data):
sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.connect((addr,port))
sock.send(data[:8190])
time.sleep(1)
sock.send(data[8190:])
time.sleep(10)
sock.close()
def main(firmware_file=None):
logger=Logging(max_level=Logging.DEBUG)
request=SetFirmwareRequest(firmware_file=firmware_file,logger=logger)
logger.LOG_DEBUG("Writing request to request.bin for analysis.")
open("./request.bin","wb").write(str(request))
logger.LOG_DEBUG("Done.")
logger.LOG_INFO("Sending special UPnP request to host: %s" % HOST)
special_upnp_send(HOST,5000,str(request))
logger.LOG_INFO("Done.")
if __name__ == "__main__":
try:
firmware_file=sys.argv[1]
except:
firmware_file=None
main(firmware_file)
| true | true |
f71adbd3d7f37d3e3cf4898f63bddfd194187306 | 4,593 | py | Python | DQMOffline/L1Trigger/test/runDQMOffline_step1_L1TStage2CaloLayer2_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | 1 | 2019-08-09T08:42:11.000Z | 2019-08-09T08:42:11.000Z | DQMOffline/L1Trigger/test/runDQMOffline_step1_L1TStage2CaloLayer2_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | DQMOffline/L1Trigger/test/runDQMOffline_step1_L1TStage2CaloLayer2_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | 1 | 2019-04-03T19:23:27.000Z | 2019-04-03T19:23:27.000Z | import os
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
from Configuration.StandardSequences.Eras import eras
def get_root_files(path):
files = os.listdir(path)
root_files = [f for f in files if f.endswith(".root")]
full_paths = [os.path.join(path, f) for f in root_files]
urls = ['file://{0}'.format(f) for f in full_paths]
return urls
options = VarParsing('analysis')
options.register(
'sample',
'TTJet',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
)
options.setDefault('maxEvents', 2000)
options.setDefault(
'outputFile', 'L1TOffline_L1TStage2CaloLayer2_job1_RAW2DIGI_RECO_DQM.root')
options.parseArguments()
inputFiles = {
'TTJet': get_root_files('/data/TTJet/reco'),
'DoubleEG': get_root_files('/data/DoubleEG'),
}
inputFilesRAW = {
'TTJet': get_root_files('/data/TTJet/raw'),
}
process = cms.Process('L1TStage2EmulatorDQM', eras.Run2_2016)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load(
'Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
# load DQM
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = int(options.maxEvents / 10)
process.maxEvents = cms.untracked.PSet(
input=cms.untracked.int32(options.maxEvents)
)
# Input source
process.source = cms.Source(
"PoolSource",
fileNames=cms.untracked.vstring(inputFiles[options.sample]),
)
if options.sample == 'TTJet':
process.source.secondaryFileNames = cms.untracked.vstring(inputFilesRAW[
'TTJet'])
process.options = cms.untracked.PSet(
)
# Output definition
process.DQMoutput = cms.OutputModule(
"DQMRootOutputModule",
fileName=cms.untracked.string(options.outputFile)
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
if options.sample == 'TTJet':
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
else:
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.load('DQMOffline.L1Trigger.L1TEtSumJetOffline_cfi')
process.load('DQMOffline.L1Trigger.L1TEGammaOffline_cfi')
process.load('DQMOffline.L1Trigger.L1TTauOffline_cfi')
if os.environ.get('DEBUG', False):
process.MessageLogger.cout.threshold = cms.untracked.string('DEBUG')
process.MessageLogger.debugModules = cms.untracked.vstring(
'*',
)
# pfMETT1 from https://github.com/cms-sw/cmssw/blob/master/DQMOffline/JetMET/python/jetMETDQMOfflineSource_cff.py#L109,
# is difficult to set up, let's use pfMet for testing
process.l1tPFMetNoMuForDQM.pfMETCollection = 'pfMet'
process.dqmoffline_step = cms.Path(
process.goodPFJetsForL1T *
process.l1tPFMetNoMuForDQM *
process.l1tEtSumJetOfflineDQMEmu +
process.l1tEtSumJetOfflineDQM +
process.l1tEGammaOfflineDQM +
process.l1tEGammaOfflineDQMEmu +
process.l1tTauOfflineDQM +
process.l1tTauOfflineDQMEmu
)
if options.sample != 'TTJet':
process.dqmoffline_step.remove(process.l1tEtSumJetOfflineDQMEmu)
process.dqmoffline_step.remove(process.l1tEGammaOfflineDQMEmu)
process.dqmoffline_step.remove(process.l1tTauOfflineDQMEmu)
process.DQMoutput_step = cms.EndPath(process.DQMoutput)
# Schedule definition
process.schedule = cms.Schedule(
process.raw2digi_step,
)
# customisation of the process.
# Automatic addition of the customisation function from
# L1Trigger.Configuration.customiseReEmul
from L1Trigger.Configuration.customiseReEmul import L1TReEmulFromRAW
# call to customisation function L1TReEmulFromRAW imported from
# L1Trigger.Configuration.customiseReEmul
# complains about
# AttributeError: 'Process' object has no attribute 'simRctDigis'
# process = L1TReEmulFromRAW(process)
process.schedule.append(process.dqmoffline_step)
process.schedule.append(process.DQMoutput_step)
| 32.34507 | 119 | 0.775963 | import os
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
from Configuration.StandardSequences.Eras import eras
def get_root_files(path):
files = os.listdir(path)
root_files = [f for f in files if f.endswith(".root")]
full_paths = [os.path.join(path, f) for f in root_files]
urls = ['file://{0}'.format(f) for f in full_paths]
return urls
options = VarParsing('analysis')
options.register(
'sample',
'TTJet',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
)
options.setDefault('maxEvents', 2000)
options.setDefault(
'outputFile', 'L1TOffline_L1TStage2CaloLayer2_job1_RAW2DIGI_RECO_DQM.root')
options.parseArguments()
inputFiles = {
'TTJet': get_root_files('/data/TTJet/reco'),
'DoubleEG': get_root_files('/data/DoubleEG'),
}
inputFilesRAW = {
'TTJet': get_root_files('/data/TTJet/raw'),
}
process = cms.Process('L1TStage2EmulatorDQM', eras.Run2_2016)
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.RawToDigi_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load(
'Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.load("DQMServices.Core.DQM_cfg")
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = int(options.maxEvents / 10)
process.maxEvents = cms.untracked.PSet(
input=cms.untracked.int32(options.maxEvents)
)
process.source = cms.Source(
"PoolSource",
fileNames=cms.untracked.vstring(inputFiles[options.sample]),
)
if options.sample == 'TTJet':
process.source.secondaryFileNames = cms.untracked.vstring(inputFilesRAW[
'TTJet'])
process.options = cms.untracked.PSet(
)
process.DQMoutput = cms.OutputModule(
"DQMRootOutputModule",
fileName=cms.untracked.string(options.outputFile)
)
from Configuration.AlCa.GlobalTag import GlobalTag
if options.sample == 'TTJet':
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
else:
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
process.raw2digi_step = cms.Path(process.RawToDigi)
process.load('DQMOffline.L1Trigger.L1TEtSumJetOffline_cfi')
process.load('DQMOffline.L1Trigger.L1TEGammaOffline_cfi')
process.load('DQMOffline.L1Trigger.L1TTauOffline_cfi')
if os.environ.get('DEBUG', False):
process.MessageLogger.cout.threshold = cms.untracked.string('DEBUG')
process.MessageLogger.debugModules = cms.untracked.vstring(
'*',
)
ess.l1tPFMetNoMuForDQM.pfMETCollection = 'pfMet'
process.dqmoffline_step = cms.Path(
process.goodPFJetsForL1T *
process.l1tPFMetNoMuForDQM *
process.l1tEtSumJetOfflineDQMEmu +
process.l1tEtSumJetOfflineDQM +
process.l1tEGammaOfflineDQM +
process.l1tEGammaOfflineDQMEmu +
process.l1tTauOfflineDQM +
process.l1tTauOfflineDQMEmu
)
if options.sample != 'TTJet':
process.dqmoffline_step.remove(process.l1tEtSumJetOfflineDQMEmu)
process.dqmoffline_step.remove(process.l1tEGammaOfflineDQMEmu)
process.dqmoffline_step.remove(process.l1tTauOfflineDQMEmu)
process.DQMoutput_step = cms.EndPath(process.DQMoutput)
# Schedule definition
process.schedule = cms.Schedule(
process.raw2digi_step,
)
# customisation of the process.
# Automatic addition of the customisation function from
# L1Trigger.Configuration.customiseReEmul
from L1Trigger.Configuration.customiseReEmul import L1TReEmulFromRAW
# call to customisation function L1TReEmulFromRAW imported from
# L1Trigger.Configuration.customiseReEmul
# complains about
# AttributeError: 'Process' object has no attribute 'simRctDigis'
# process = L1TReEmulFromRAW(process)
process.schedule.append(process.dqmoffline_step)
process.schedule.append(process.DQMoutput_step)
| true | true |
f71ade1259b4cf0d7e2485e0b9af1fdc780966f1 | 4,862 | py | Python | tests/core/test_tracker_stores.py | vinit134/rasa | 58c122fbf459c587fd947d48af5c76ae30bf39be | [
"Apache-2.0"
] | 3 | 2020-02-04T08:44:02.000Z | 2021-05-25T19:46:55.000Z | tests/core/test_tracker_stores.py | vinit134/rasa | 58c122fbf459c587fd947d48af5c76ae30bf39be | [
"Apache-2.0"
] | 21 | 2019-12-16T17:37:54.000Z | 2020-07-06T06:19:04.000Z | tests/core/test_tracker_stores.py | zijiannc/RASA_NLU | 4ffafb62b7414cabae07149533e01afe5fc26c14 | [
"Apache-2.0"
] | 1 | 2021-03-08T15:04:09.000Z | 2021-03-08T15:04:09.000Z | import pytest
from rasa.core.channels.channel import UserMessage
from rasa.core.domain import Domain
from rasa.core.events import SlotSet, ActionExecuted, Restarted
from rasa.core.tracker_store import (
TrackerStore,
InMemoryTrackerStore,
RedisTrackerStore,
SQLTrackerStore,
)
from rasa.utils.endpoints import EndpointConfig, read_endpoint_config
from tests.core.conftest import DEFAULT_ENDPOINTS_FILE
domain = Domain.load("data/test_domains/default.yml")
def test_get_or_create():
slot_key = "location"
slot_val = "Easter Island"
store = InMemoryTrackerStore(domain)
tracker = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID)
ev = SlotSet(slot_key, slot_val)
tracker.update(ev)
assert tracker.get_slot(slot_key) == slot_val
store.save(tracker)
again = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID)
assert again.get_slot(slot_key) == slot_val
def test_restart_after_retrieval_from_tracker_store(default_domain):
store = InMemoryTrackerStore(default_domain)
tr = store.get_or_create_tracker("myuser")
synth = [ActionExecuted("action_listen") for _ in range(4)]
for e in synth:
tr.update(e)
tr.update(Restarted())
latest_restart = tr.idx_after_latest_restart()
store.save(tr)
tr2 = store.retrieve("myuser")
latest_restart_after_loading = tr2.idx_after_latest_restart()
assert latest_restart == latest_restart_after_loading
def test_tracker_store_remembers_max_history(default_domain):
store = InMemoryTrackerStore(default_domain)
tr = store.get_or_create_tracker("myuser", max_event_history=42)
tr.update(Restarted())
store.save(tr)
tr2 = store.retrieve("myuser")
assert tr._max_event_history == tr2._max_event_history == 42
def test_tracker_store_endpoint_config_loading():
cfg = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store")
assert cfg == EndpointConfig.from_dict(
{
"type": "redis",
"url": "localhost",
"port": 6379,
"db": 0,
"password": "password",
"timeout": 30000,
}
)
def test_find_tracker_store(default_domain):
store = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store")
tracker_store = RedisTrackerStore(
domain=default_domain,
host="localhost",
port=6379,
db=0,
password="password",
record_exp=3000,
)
assert isinstance(
tracker_store, type(TrackerStore.find_tracker_store(default_domain, store))
)
class ExampleTrackerStore(RedisTrackerStore):
def __init__(self, domain, url, port, db, password, record_exp):
super(ExampleTrackerStore, self).__init__(
domain, host=url, port=port, db=db, password=password, record_exp=record_exp
)
def test_tracker_store_from_string(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, ExampleTrackerStore)
def test_tracker_store_from_invalid_module(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "a.module.which.cannot.be.found"
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, InMemoryTrackerStore)
def test_tracker_store_from_invalid_string(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "any string"
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, InMemoryTrackerStore)
@pytest.mark.parametrize(
"full_url",
[
"postgresql://localhost",
"postgresql://localhost:5432",
"postgresql://user:secret@localhost",
],
)
def test_get_db_url_with_fully_specified_url(full_url):
assert SQLTrackerStore._get_db_url(host=full_url) == full_url
def test_get_db_url_with_port_in_host():
host = "localhost:1234"
dialect = "postgresql"
db = "mydb"
expected = "{}://{}/{}".format(dialect, host, db)
assert (
str(SQLTrackerStore._get_db_url(dialect="postgresql", host=host, db=db))
== expected
)
def test_get_db_url_with_correct_host():
expected = "postgresql://localhost:5005/mydb"
assert (
str(
SQLTrackerStore._get_db_url(
dialect="postgresql", host="localhost", port=5005, db="mydb"
)
)
== expected
)
| 29.646341 | 88 | 0.719663 | import pytest
from rasa.core.channels.channel import UserMessage
from rasa.core.domain import Domain
from rasa.core.events import SlotSet, ActionExecuted, Restarted
from rasa.core.tracker_store import (
TrackerStore,
InMemoryTrackerStore,
RedisTrackerStore,
SQLTrackerStore,
)
from rasa.utils.endpoints import EndpointConfig, read_endpoint_config
from tests.core.conftest import DEFAULT_ENDPOINTS_FILE
domain = Domain.load("data/test_domains/default.yml")
def test_get_or_create():
slot_key = "location"
slot_val = "Easter Island"
store = InMemoryTrackerStore(domain)
tracker = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID)
ev = SlotSet(slot_key, slot_val)
tracker.update(ev)
assert tracker.get_slot(slot_key) == slot_val
store.save(tracker)
again = store.get_or_create_tracker(UserMessage.DEFAULT_SENDER_ID)
assert again.get_slot(slot_key) == slot_val
def test_restart_after_retrieval_from_tracker_store(default_domain):
store = InMemoryTrackerStore(default_domain)
tr = store.get_or_create_tracker("myuser")
synth = [ActionExecuted("action_listen") for _ in range(4)]
for e in synth:
tr.update(e)
tr.update(Restarted())
latest_restart = tr.idx_after_latest_restart()
store.save(tr)
tr2 = store.retrieve("myuser")
latest_restart_after_loading = tr2.idx_after_latest_restart()
assert latest_restart == latest_restart_after_loading
def test_tracker_store_remembers_max_history(default_domain):
store = InMemoryTrackerStore(default_domain)
tr = store.get_or_create_tracker("myuser", max_event_history=42)
tr.update(Restarted())
store.save(tr)
tr2 = store.retrieve("myuser")
assert tr._max_event_history == tr2._max_event_history == 42
def test_tracker_store_endpoint_config_loading():
cfg = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store")
assert cfg == EndpointConfig.from_dict(
{
"type": "redis",
"url": "localhost",
"port": 6379,
"db": 0,
"password": "password",
"timeout": 30000,
}
)
def test_find_tracker_store(default_domain):
store = read_endpoint_config(DEFAULT_ENDPOINTS_FILE, "tracker_store")
tracker_store = RedisTrackerStore(
domain=default_domain,
host="localhost",
port=6379,
db=0,
password="password",
record_exp=3000,
)
assert isinstance(
tracker_store, type(TrackerStore.find_tracker_store(default_domain, store))
)
class ExampleTrackerStore(RedisTrackerStore):
def __init__(self, domain, url, port, db, password, record_exp):
super(ExampleTrackerStore, self).__init__(
domain, host=url, port=port, db=db, password=password, record_exp=record_exp
)
def test_tracker_store_from_string(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, ExampleTrackerStore)
def test_tracker_store_from_invalid_module(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "a.module.which.cannot.be.found"
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, InMemoryTrackerStore)
def test_tracker_store_from_invalid_string(default_domain):
endpoints_path = "data/test_endpoints/custom_tracker_endpoints.yml"
store_config = read_endpoint_config(endpoints_path, "tracker_store")
store_config.type = "any string"
tracker_store = TrackerStore.find_tracker_store(default_domain, store_config)
assert isinstance(tracker_store, InMemoryTrackerStore)
@pytest.mark.parametrize(
"full_url",
[
"postgresql://localhost",
"postgresql://localhost:5432",
"postgresql://user:secret@localhost",
],
)
def test_get_db_url_with_fully_specified_url(full_url):
assert SQLTrackerStore._get_db_url(host=full_url) == full_url
def test_get_db_url_with_port_in_host():
host = "localhost:1234"
dialect = "postgresql"
db = "mydb"
expected = "{}://{}/{}".format(dialect, host, db)
assert (
str(SQLTrackerStore._get_db_url(dialect="postgresql", host=host, db=db))
== expected
)
def test_get_db_url_with_correct_host():
expected = "postgresql://localhost:5005/mydb"
assert (
str(
SQLTrackerStore._get_db_url(
dialect="postgresql", host="localhost", port=5005, db="mydb"
)
)
== expected
)
| true | true |
f71adf5f0868af6fbec61be245b281682e33dcf5 | 3,884 | py | Python | configs/top_down/resnext/coco/resnext101_coco_256x192.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | 5 | 2022-01-13T15:06:45.000Z | 2022-01-28T19:39:54.000Z | configs/top_down/resnext/coco/resnext101_coco_256x192.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | null | null | null | configs/top_down/resnext/coco/resnext101_coco_256x192.py | ssumin6/buob | 4fb4537423a993cd2894f54cb12f5f3b3fb73141 | [
"Apache-2.0"
] | 1 | 2021-06-17T13:56:23.000Z | 2021-06-17T13:56:23.000Z | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='mmcls://resnext101',
backbone=dict(type='ResNeXt', depth=101),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 27.742857 | 79 | 0.627703 | log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
model = dict(
type='TopDown',
pretrained='mmcls://resnext101',
backbone=dict(type='ResNeXt', depth=101),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| true | true |
f71adf69a610b566f88a21e94d9a8d804f8523db | 7,872 | py | Python | sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/ml/azure-ai-ml/azure/ai/ml/entities/_assets/_artifacts/model.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from os import PathLike
from pathlib import Path
from typing import Any, Dict, Union
from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, PARAMS_OVERRIDE_KEY, ArmConstants, LONG_URI_FORMAT, AssetTypes
from azure.ai.ml._restclient.v2022_05_01.models import (
ModelContainerData,
ModelVersionDetails,
ModelVersionData,
FlavorData,
)
from azure.ai.ml._schema import ModelSchema
from azure.ai.ml._utils._arm_id_utils import AMLNamedArmId, AMLVersionedArmId
from azure.ai.ml._utils.utils import load_yaml, snake_to_pascal
from azure.ai.ml.entities._assets import Artifact
from .artifact import ArtifactStorageInfo
from azure.ai.ml.entities._util import load_from_dict, get_md5_string
from azure.ai.ml._utils._asset_utils import get_ignore_file, get_object_hash
class Model(Artifact):
"""Model for training and scoring.
:param name: Name of the resource.
:type name: str
:param version: Version of the resource.
:type version: str
:param type: The storage format for this entity. Used for NCD. Possible values include:
"custom_model", "mlflow_model", "triton_model".
:type type: str
:param utc_time_created: Date and time when the model was created, in
UTC ISO 8601 format. (e.g. '2020-10-19 17:44:02.096572')
:type utc_time_created: str
:param flavors: The flavors in which the model can be interpreted.
(e.g. {sklearn: {sklearn_version: 0.23.2}, python_function: {loader_module: office.plrmodel, python_version: 3.6})
:type flavors: Dict[str, Any]
:param path: A remote uri or a local path pointing at a model.
Example: "azureml://subscriptions/my-sub-id/resourcegroups/my-rg/workspaces/myworkspace/datastores/mydatastore/paths/path_on_datastore/"
:type path: str
:param description: Description of the resource.
:type description: str
:param tags: Tag dictionary. Tags can be added, removed, and updated.
:type tags: dict[str, str]
:param properties: The asset property dictionary.
:type properties: dict[str, str]
:param kwargs: A dictionary of additional configuration parameters.
:type kwargs: dict
"""
def __init__(
self,
*,
name: str = None,
version: str = None,
type: str = None,
path: Union[str, PathLike] = None,
utc_time_created: str = None,
flavors: Dict[str, Dict[str, Any]] = None,
description: str = None,
tags: Dict = None,
properties: Dict = None,
**kwargs,
):
self.job_name = kwargs.pop("job_name", None)
super().__init__(
name=name,
version=version,
path=path,
description=description,
tags=tags,
properties=properties,
**kwargs,
)
self.utc_time_created = utc_time_created
self.flavors = dict(flavors) if flavors else None
self._arm_type = ArmConstants.MODEL_VERSION_TYPE
self.type = type or AssetTypes.CUSTOM_MODEL
if self._is_anonymous and self.path:
_ignore_file = get_ignore_file(self.path)
_upload_hash = get_object_hash(self.path, _ignore_file)
self.name = get_md5_string(_upload_hash)
@classmethod
def load(
cls,
path: Union[PathLike, str],
params_override: list = None,
**kwargs,
) -> "Model":
"""Construct a model object from yaml file.
:param path: Path to a local file as the source.
:type path: str
:param params_override: Fields to overwrite on top of the yaml file. Format is [{"field1": "value1"}, {"field2": "value2"}]
:type params_override: list
:param kwargs: A dictionary of additional configuration parameters.
:type kwargs: dict
:return: Constructed model object.
:rtype: Model
"""
yaml_dict = load_yaml(path)
return cls._load(data=yaml_dict, yaml_path=path, params_override=params_override, **kwargs)
# For lack of bidirectional map in Python, defining the mapping in two ways in one dictionary
@classmethod
def _load(
cls,
data: Dict = None,
yaml_path: Union[PathLike, str] = None,
params_override: list = None,
**kwargs,
) -> "Model":
params_override = params_override or []
data = data or {}
context = {
BASE_PATH_CONTEXT_KEY: Path(yaml_path).parent if yaml_path else Path("./"),
PARAMS_OVERRIDE_KEY: params_override,
}
return load_from_dict(ModelSchema, data, context, **kwargs)
def _to_dict(self) -> Dict:
return ModelSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
@classmethod
def _from_rest_object(cls, model_rest_object: ModelVersionData) -> "Model":
rest_model_version: ModelVersionDetails = model_rest_object.properties
arm_id = AMLVersionedArmId(arm_id=model_rest_object.id)
flavors = {key: flavor.data for key, flavor in rest_model_version.flavors.items()}
model = Model(
id=model_rest_object.id,
name=arm_id.asset_name,
version=arm_id.asset_version,
path=rest_model_version.model_uri,
description=rest_model_version.description,
tags=rest_model_version.tags,
flavors=flavors,
properties=rest_model_version.properties,
creation_context=model_rest_object.system_data,
type=rest_model_version.model_type,
job_name=rest_model_version.job_name,
)
return model
@classmethod
def _from_container_rest_object(cls, model_container_rest_object: ModelContainerData) -> "Model":
model = Model(
name=model_container_rest_object.name,
version="1",
id=model_container_rest_object.id,
creation_context=model_container_rest_object.system_data,
)
model.latest_version = model_container_rest_object.properties.latest_version
# Setting version to None since if version is not provided it is defaulted to "1".
# This should go away once container concept is finalized.
model.version = None
return model
def _to_rest_object(self) -> ModelVersionData:
model_version = ModelVersionDetails(
description=self.description,
tags=self.tags,
properties=self.properties,
flavors={key: FlavorData(data=dict(value)) for key, value in self.flavors.items()}
if self.flavors
else None, # flatten OrderedDict to dict
model_type=self.type,
model_uri=self.path,
is_anonymous=self._is_anonymous,
)
model_version_resource = ModelVersionData(properties=model_version)
return model_version_resource
def _update_path(self, asset_artifact: ArtifactStorageInfo) -> None:
aml_datastore_id = AMLNamedArmId(asset_artifact.datastore_arm_id)
self.path = LONG_URI_FORMAT.format(
aml_datastore_id.subscription_id,
aml_datastore_id.resource_group_name,
aml_datastore_id.workspace_name,
aml_datastore_id.asset_name,
asset_artifact.relative_path,
)
def _to_arm_resource_param(self, **kwargs):
properties = self._to_rest_object().properties
return {
self._arm_type: {
ArmConstants.NAME: self.name,
ArmConstants.VERSION: self.version,
ArmConstants.PROPERTIES_PARAMETER_NAME: self._serialize.body(properties, "ModelVersionDetails"),
}
}
| 38.778325 | 144 | 0.650788 |
from os import PathLike
from pathlib import Path
from typing import Any, Dict, Union
from azure.ai.ml.constants import BASE_PATH_CONTEXT_KEY, PARAMS_OVERRIDE_KEY, ArmConstants, LONG_URI_FORMAT, AssetTypes
from azure.ai.ml._restclient.v2022_05_01.models import (
ModelContainerData,
ModelVersionDetails,
ModelVersionData,
FlavorData,
)
from azure.ai.ml._schema import ModelSchema
from azure.ai.ml._utils._arm_id_utils import AMLNamedArmId, AMLVersionedArmId
from azure.ai.ml._utils.utils import load_yaml, snake_to_pascal
from azure.ai.ml.entities._assets import Artifact
from .artifact import ArtifactStorageInfo
from azure.ai.ml.entities._util import load_from_dict, get_md5_string
from azure.ai.ml._utils._asset_utils import get_ignore_file, get_object_hash
class Model(Artifact):
def __init__(
self,
*,
name: str = None,
version: str = None,
type: str = None,
path: Union[str, PathLike] = None,
utc_time_created: str = None,
flavors: Dict[str, Dict[str, Any]] = None,
description: str = None,
tags: Dict = None,
properties: Dict = None,
**kwargs,
):
self.job_name = kwargs.pop("job_name", None)
super().__init__(
name=name,
version=version,
path=path,
description=description,
tags=tags,
properties=properties,
**kwargs,
)
self.utc_time_created = utc_time_created
self.flavors = dict(flavors) if flavors else None
self._arm_type = ArmConstants.MODEL_VERSION_TYPE
self.type = type or AssetTypes.CUSTOM_MODEL
if self._is_anonymous and self.path:
_ignore_file = get_ignore_file(self.path)
_upload_hash = get_object_hash(self.path, _ignore_file)
self.name = get_md5_string(_upload_hash)
@classmethod
def load(
cls,
path: Union[PathLike, str],
params_override: list = None,
**kwargs,
) -> "Model":
yaml_dict = load_yaml(path)
return cls._load(data=yaml_dict, yaml_path=path, params_override=params_override, **kwargs)
@classmethod
def _load(
cls,
data: Dict = None,
yaml_path: Union[PathLike, str] = None,
params_override: list = None,
**kwargs,
) -> "Model":
params_override = params_override or []
data = data or {}
context = {
BASE_PATH_CONTEXT_KEY: Path(yaml_path).parent if yaml_path else Path("./"),
PARAMS_OVERRIDE_KEY: params_override,
}
return load_from_dict(ModelSchema, data, context, **kwargs)
def _to_dict(self) -> Dict:
return ModelSchema(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
@classmethod
def _from_rest_object(cls, model_rest_object: ModelVersionData) -> "Model":
rest_model_version: ModelVersionDetails = model_rest_object.properties
arm_id = AMLVersionedArmId(arm_id=model_rest_object.id)
flavors = {key: flavor.data for key, flavor in rest_model_version.flavors.items()}
model = Model(
id=model_rest_object.id,
name=arm_id.asset_name,
version=arm_id.asset_version,
path=rest_model_version.model_uri,
description=rest_model_version.description,
tags=rest_model_version.tags,
flavors=flavors,
properties=rest_model_version.properties,
creation_context=model_rest_object.system_data,
type=rest_model_version.model_type,
job_name=rest_model_version.job_name,
)
return model
@classmethod
def _from_container_rest_object(cls, model_container_rest_object: ModelContainerData) -> "Model":
model = Model(
name=model_container_rest_object.name,
version="1",
id=model_container_rest_object.id,
creation_context=model_container_rest_object.system_data,
)
model.latest_version = model_container_rest_object.properties.latest_version
model.version = None
return model
def _to_rest_object(self) -> ModelVersionData:
model_version = ModelVersionDetails(
description=self.description,
tags=self.tags,
properties=self.properties,
flavors={key: FlavorData(data=dict(value)) for key, value in self.flavors.items()}
if self.flavors
else None,
model_type=self.type,
model_uri=self.path,
is_anonymous=self._is_anonymous,
)
model_version_resource = ModelVersionData(properties=model_version)
return model_version_resource
def _update_path(self, asset_artifact: ArtifactStorageInfo) -> None:
aml_datastore_id = AMLNamedArmId(asset_artifact.datastore_arm_id)
self.path = LONG_URI_FORMAT.format(
aml_datastore_id.subscription_id,
aml_datastore_id.resource_group_name,
aml_datastore_id.workspace_name,
aml_datastore_id.asset_name,
asset_artifact.relative_path,
)
def _to_arm_resource_param(self, **kwargs):
properties = self._to_rest_object().properties
return {
self._arm_type: {
ArmConstants.NAME: self.name,
ArmConstants.VERSION: self.version,
ArmConstants.PROPERTIES_PARAMETER_NAME: self._serialize.body(properties, "ModelVersionDetails"),
}
}
| true | true |
f71adf72ecef2e4ad8d1dacf64125bbdfd663a2d | 3,023 | py | Python | pixelsort/argparams.py | jackylu97/pixelsort | 24e36518f21636c201ad8624c831e08462a25414 | [
"MIT"
] | 570 | 2015-03-01T16:16:42.000Z | 2022-03-28T23:12:11.000Z | pixelsort/argparams.py | ebanaut/pixelsort | c4a823c8363e27fb0aebd4f8738ee82dc636f6a8 | [
"MIT"
] | 20 | 2016-03-25T16:28:16.000Z | 2021-11-11T21:39:28.000Z | pixelsort/argparams.py | ebanaut/pixelsort | c4a823c8363e27fb0aebd4f8738ee82dc636f6a8 | [
"MIT"
] | 79 | 2015-03-16T20:14:22.000Z | 2022-02-01T17:05:02.000Z | import argparse
import logging
from pixelsort.interval import choices as interval_choices
from pixelsort.sorting import choices as sorting_choices
from pixelsort.constants import DEFAULTS
def parse_args():
parser = argparse.ArgumentParser(description="Pixel mangle an image.")
parser.add_argument("image", help="Input image file path.")
parser.add_argument(
"-o",
"--output",
help="Output image file path, DEFAULTS to the time created.")
parser.add_argument("-i", "--int_function",
choices=interval_choices.keys(),
default=DEFAULTS["interval_function"],
help="Function to determine sorting intervals")
parser.add_argument("-f", "--int_file",
help="Image used for defining intervals.")
parser.add_argument(
"-t",
"--threshold",
type=float,
default=DEFAULTS["lower_threshold"],
help="Pixels darker than this are not sorted, between 0 and 1")
parser.add_argument(
"-u",
"--upper_threshold",
type=float,
default=DEFAULTS["upper_threshold"],
help="Pixels brighter than this are not sorted, between 0 and 1")
parser.add_argument(
"-c",
"--clength",
type=int,
default=DEFAULTS["clength"],
help="Characteristic length of random intervals")
parser.add_argument(
"-a",
"--angle",
type=float,
default=DEFAULTS["angle"],
help="Rotate the image by an angle (in degrees) before sorting")
parser.add_argument(
"-r",
"--randomness",
type=float,
default=DEFAULTS["randomness"],
help="What percentage of intervals are NOT sorted")
parser.add_argument("-s", "--sorting_function",
choices=sorting_choices.keys(),
default=DEFAULTS["sorting_function"],
help="Function to sort pixels by.")
parser.add_argument(
"-m", "--mask", help="Image used for masking parts of the image")
parser.add_argument(
"-l",
"--log_level",
default="WARNING",
help="Print more or less info",
choices=[
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL"])
_args = parser.parse_args()
logging.basicConfig(
format="%(name)s: %(levelname)s - %(message)s",
level=logging.getLevelName(
_args.log_level))
return {
"image_input_path": _args.image,
"image_output_path": _args.output,
"interval_function": _args.int_function,
"interval_file_path": _args.int_file,
"lower_threshold": _args.threshold,
"upper_threshold": _args.upper_threshold,
"clength": _args.clength,
"angle": _args.angle,
"randomness": _args.randomness,
"sorting_function": _args.sorting_function,
"mask_path": _args.mask
}
| 33.966292 | 74 | 0.585511 | import argparse
import logging
from pixelsort.interval import choices as interval_choices
from pixelsort.sorting import choices as sorting_choices
from pixelsort.constants import DEFAULTS
def parse_args():
parser = argparse.ArgumentParser(description="Pixel mangle an image.")
parser.add_argument("image", help="Input image file path.")
parser.add_argument(
"-o",
"--output",
help="Output image file path, DEFAULTS to the time created.")
parser.add_argument("-i", "--int_function",
choices=interval_choices.keys(),
default=DEFAULTS["interval_function"],
help="Function to determine sorting intervals")
parser.add_argument("-f", "--int_file",
help="Image used for defining intervals.")
parser.add_argument(
"-t",
"--threshold",
type=float,
default=DEFAULTS["lower_threshold"],
help="Pixels darker than this are not sorted, between 0 and 1")
parser.add_argument(
"-u",
"--upper_threshold",
type=float,
default=DEFAULTS["upper_threshold"],
help="Pixels brighter than this are not sorted, between 0 and 1")
parser.add_argument(
"-c",
"--clength",
type=int,
default=DEFAULTS["clength"],
help="Characteristic length of random intervals")
parser.add_argument(
"-a",
"--angle",
type=float,
default=DEFAULTS["angle"],
help="Rotate the image by an angle (in degrees) before sorting")
parser.add_argument(
"-r",
"--randomness",
type=float,
default=DEFAULTS["randomness"],
help="What percentage of intervals are NOT sorted")
parser.add_argument("-s", "--sorting_function",
choices=sorting_choices.keys(),
default=DEFAULTS["sorting_function"],
help="Function to sort pixels by.")
parser.add_argument(
"-m", "--mask", help="Image used for masking parts of the image")
parser.add_argument(
"-l",
"--log_level",
default="WARNING",
help="Print more or less info",
choices=[
"DEBUG",
"INFO",
"WARNING",
"ERROR",
"CRITICAL"])
_args = parser.parse_args()
logging.basicConfig(
format="%(name)s: %(levelname)s - %(message)s",
level=logging.getLevelName(
_args.log_level))
return {
"image_input_path": _args.image,
"image_output_path": _args.output,
"interval_function": _args.int_function,
"interval_file_path": _args.int_file,
"lower_threshold": _args.threshold,
"upper_threshold": _args.upper_threshold,
"clength": _args.clength,
"angle": _args.angle,
"randomness": _args.randomness,
"sorting_function": _args.sorting_function,
"mask_path": _args.mask
}
| true | true |
f71ae016370c3402b30ac0827d01b9242344e952 | 1,491 | py | Python | example/blog/admin2.py | montiniz/django-admin2 | eb3ba7e98a68686f80af80c5c3b8c9c10296fe7a | [
"BSD-3-Clause"
] | 1 | 2015-01-16T23:00:09.000Z | 2015-01-16T23:00:09.000Z | example/blog/admin2.py | montiniz/django-admin2 | eb3ba7e98a68686f80af80c5c3b8c9c10296fe7a | [
"BSD-3-Clause"
] | null | null | null | example/blog/admin2.py | montiniz/django-admin2 | eb3ba7e98a68686f80af80c5c3b8c9c10296fe7a | [
"BSD-3-Clause"
] | 1 | 2017-01-18T08:27:21.000Z | 2017-01-18T08:27:21.000Z | # -*- coding: utf-8 -*-
from __future__ import division, absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy
import djadmin2
from djadmin2 import renderers
from djadmin2.actions import DeleteSelectedAction
# Import your custom models
from .actions import (CustomPublishAction, PublishAllItemsAction,
unpublish_items, unpublish_all_items)
from .models import Post, Comment
class CommentInline(djadmin2.Admin2TabularInline):
model = Comment
class PostAdmin(djadmin2.ModelAdmin2):
list_actions = [
DeleteSelectedAction, CustomPublishAction,
PublishAllItemsAction, unpublish_items,
unpublish_all_items,
]
inlines = [CommentInline]
search_fields = ('title', '^body')
list_display = ('title', 'body', 'published', "published_date",)
field_renderers = {
'title': renderers.title_renderer,
}
save_on_top = True
date_hierarchy = "published_date"
ordering = ["-published_date", "title",]
class CommentAdmin(djadmin2.ModelAdmin2):
search_fields = ('body', '=post__title')
list_filter = ['post', ]
actions_on_top = True
actions_on_bottom = True
actions_selection_counter = False
# Register the blog app with a verbose name
djadmin2.default.register_app_verbose_name(
'blog',
ugettext_lazy('My Blog')
)
# Register each model with the admin
djadmin2.default.register(Post, PostAdmin)
djadmin2.default.register(Comment, CommentAdmin)
| 27.611111 | 68 | 0.729041 |
from __future__ import division, absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy
import djadmin2
from djadmin2 import renderers
from djadmin2.actions import DeleteSelectedAction
from .actions import (CustomPublishAction, PublishAllItemsAction,
unpublish_items, unpublish_all_items)
from .models import Post, Comment
class CommentInline(djadmin2.Admin2TabularInline):
model = Comment
class PostAdmin(djadmin2.ModelAdmin2):
list_actions = [
DeleteSelectedAction, CustomPublishAction,
PublishAllItemsAction, unpublish_items,
unpublish_all_items,
]
inlines = [CommentInline]
search_fields = ('title', '^body')
list_display = ('title', 'body', 'published', "published_date",)
field_renderers = {
'title': renderers.title_renderer,
}
save_on_top = True
date_hierarchy = "published_date"
ordering = ["-published_date", "title",]
class CommentAdmin(djadmin2.ModelAdmin2):
search_fields = ('body', '=post__title')
list_filter = ['post', ]
actions_on_top = True
actions_on_bottom = True
actions_selection_counter = False
djadmin2.default.register_app_verbose_name(
'blog',
ugettext_lazy('My Blog')
)
djadmin2.default.register(Post, PostAdmin)
djadmin2.default.register(Comment, CommentAdmin)
| true | true |
f71ae15d38428c33a761ff30c5fe22e701f6415c | 28,827 | py | Python | electrum/lnsweep.py | roth-a/electrum | b6a5f6f2fc11b94bc863d2ccd43f166091badda9 | [
"MIT"
] | null | null | null | electrum/lnsweep.py | roth-a/electrum | b6a5f6f2fc11b94bc863d2ccd43f166091badda9 | [
"MIT"
] | 1 | 2020-01-31T17:11:07.000Z | 2020-01-31T17:11:07.000Z | electrum/lnsweep.py | roth-a/electrum | b6a5f6f2fc11b94bc863d2ccd43f166091badda9 | [
"MIT"
] | null | null | null | # Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
from typing import Optional, Dict, List, Tuple, TYPE_CHECKING, NamedTuple, Callable
from enum import Enum, auto
from .util import bfh, bh2u
from .bitcoin import redeem_script_to_address, dust_threshold
from . import ecc
from .lnutil import (make_commitment_output_to_remote_address, make_commitment_output_to_local_witness_script,
derive_privkey, derive_pubkey, derive_blinded_pubkey, derive_blinded_privkey,
make_htlc_tx_witness, make_htlc_tx_with_open_channel, UpdateAddHtlc,
LOCAL, REMOTE, make_htlc_output_witness_script, UnknownPaymentHash,
get_ordered_channel_configs, privkey_to_pubkey, get_per_commitment_secret_from_seed,
RevocationStore, extract_ctn_from_tx_and_chan, UnableToDeriveSecret, SENT, RECEIVED,
map_htlcs_to_ctx_output_idxs, Direction)
from .transaction import (Transaction, TxOutput, construct_witness, PartialTransaction, PartialTxInput,
PartialTxOutput, TxOutpoint)
from .simple_config import SimpleConfig
from .logging import get_logger
if TYPE_CHECKING:
from .lnchannel import Channel
_logger = get_logger(__name__)
class SweepInfo(NamedTuple):
name: str
csv_delay: int
cltv_expiry: int
gen_tx: Callable[[], Optional[Transaction]]
def create_sweeptxs_for_watchtower(chan: 'Channel', ctx: Transaction, per_commitment_secret: bytes,
sweep_address: str) -> List[Transaction]:
"""Presign sweeping transactions using the just received revoked pcs.
These will only be utilised if the remote breaches.
Sweep 'to_local', and all the HTLCs (two cases: directly from ctx, or from HTLC tx).
"""
# prep
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
txs = []
# to_local
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
if sweep_tx:
txs.append(sweep_tx)
# HTLCs
def create_sweeptx_for_htlc(*, htlc: 'UpdateAddHtlc', htlc_direction: Direction,
ctx_output_idx: int) -> Optional[Transaction]:
htlc_tx_witness_script, htlc_tx = make_htlc_tx_with_open_channel(chan=chan,
pcp=pcp,
subject=REMOTE,
htlc_direction=htlc_direction,
commit=ctx,
htlc=htlc,
ctx_output_idx=ctx_output_idx)
return create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(
to_self_delay=0,
htlc_tx=htlc_tx,
htlctx_witness_script=htlc_tx_witness_script,
sweep_address=sweep_address,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=pcp,
subject=REMOTE,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
secondstage_sweep_tx = create_sweeptx_for_htlc(htlc=htlc,
htlc_direction=direction,
ctx_output_idx=ctx_output_idx)
if secondstage_sweep_tx:
txs.append(secondstage_sweep_tx)
return txs
def create_sweeptx_for_their_revoked_ctx(chan: 'Channel', ctx: Transaction, per_commitment_secret: bytes,
sweep_address: str) -> Optional[Callable[[], Optional[Transaction]]]:
# prep
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
txs = []
# to_local
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
return sweep_tx
return None
def create_sweeptx_for_their_revoked_htlc(chan: 'Channel', ctx: Transaction, htlc_tx: Transaction,
sweep_address: str) -> Optional[SweepInfo]:
x = analyze_ctx(chan, ctx)
if not x:
return
ctn, their_pcp, is_revocation, per_commitment_secret = x
if not is_revocation:
return
# prep
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
# same witness script as to_local
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
htlc_address = redeem_script_to_address('p2wsh', witness_script)
# check that htlc_tx is a htlc
if htlc_tx.outputs()[0].address != htlc_address:
return
gen_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=htlc_tx,
output_idx=0,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
return SweepInfo(name='redeem_htlc2',
csv_delay=0,
cltv_expiry=0,
gen_tx=gen_tx)
def create_sweeptxs_for_our_ctx(*, chan: 'Channel', ctx: Transaction,
sweep_address: str) -> Optional[Dict[str, SweepInfo]]:
"""Handle the case where we force close unilaterally with our latest ctx.
Construct sweep txns for 'to_local', and for all HTLCs (2 txns each).
'to_local' can be swept even if this is a breach (by us),
but HTLCs cannot (old HTLCs are no longer stored).
"""
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
our_per_commitment_secret = get_per_commitment_secret_from_seed(
our_conf.per_commitment_secret_seed, RevocationStore.START_INDEX - ctn)
our_pcp = ecc.ECPrivkey(our_per_commitment_secret).get_public_key_bytes(compressed=True)
our_delayed_bp_privkey = ecc.ECPrivkey(our_conf.delayed_basepoint.privkey)
our_localdelayed_privkey = derive_privkey(our_delayed_bp_privkey.secret_scalar, our_pcp)
our_localdelayed_privkey = ecc.ECPrivkey.from_secret_scalar(our_localdelayed_privkey)
their_revocation_pubkey = derive_blinded_pubkey(their_conf.revocation_basepoint.pubkey, our_pcp)
to_self_delay = their_conf.to_self_delay
our_htlc_privkey = derive_privkey(secret=int.from_bytes(our_conf.htlc_basepoint.privkey, 'big'),
per_commitment_point=our_pcp).to_bytes(32, 'big')
our_localdelayed_pubkey = our_localdelayed_privkey.get_public_key_bytes(compressed=True)
to_local_witness_script = bh2u(make_commitment_output_to_local_witness_script(
their_revocation_pubkey, to_self_delay, our_localdelayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', to_local_witness_script)
their_payment_pubkey = derive_pubkey(their_conf.payment_basepoint.pubkey, our_pcp)
to_remote_address = make_commitment_output_to_remote_address(their_payment_pubkey)
# test ctx
_logger.debug(f'testing our ctx: {to_local_address} {to_remote_address}')
if not ctx.get_output_idxs_from_address(to_local_address) \
and not ctx.get_output_idxs_from_address(to_remote_address):
return
# we have to_local, to_remote.
# other outputs are htlcs
# if they are spent, we need to generate the script
# so, second-stage htlc sweep should not be returned here
if ctn < chan.get_oldest_unrevoked_ctn(LOCAL):
_logger.info("we breached.")
return {}
txs = {} # type: Dict[str, SweepInfo]
# to_local
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=to_local_witness_script,
privkey=our_localdelayed_privkey.get_secret_bytes(),
is_revocation=False,
to_self_delay=to_self_delay,
config=chan.lnworker.config)
prevout = ctx.txid() + ':%d'%output_idx
txs[prevout] = SweepInfo(name='our_ctx_to_local',
csv_delay=to_self_delay,
cltv_expiry=0,
gen_tx=sweep_tx)
# HTLCs
def create_txns_for_htlc(*, htlc: 'UpdateAddHtlc', htlc_direction: Direction,
ctx_output_idx: int, htlc_relative_idx: int):
if htlc_direction == RECEIVED:
try:
preimage = chan.lnworker.get_preimage(htlc.payment_hash)
except UnknownPaymentHash as e:
_logger.info(f'trying to sweep htlc from our latest ctx but getting {repr(e)}')
return
else:
preimage = None
htlctx_witness_script, htlc_tx = create_htlctx_that_spends_from_our_ctx(
chan=chan,
our_pcp=our_pcp,
ctx=ctx,
htlc=htlc,
local_htlc_privkey=our_htlc_privkey,
preimage=preimage,
htlc_direction=htlc_direction,
ctx_output_idx=ctx_output_idx,
htlc_relative_idx=htlc_relative_idx)
sweep_tx = lambda: create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(
to_self_delay=to_self_delay,
htlc_tx=htlc_tx,
htlctx_witness_script=htlctx_witness_script,
sweep_address=sweep_address,
privkey=our_localdelayed_privkey.get_secret_bytes(),
is_revocation=False,
config=chan.lnworker.config)
# side effect
txs[htlc_tx.inputs()[0].prevout.to_str()] = SweepInfo(name='first-stage-htlc',
csv_delay=0,
cltv_expiry=htlc_tx.locktime,
gen_tx=lambda: htlc_tx)
txs[htlc_tx.txid() + ':0'] = SweepInfo(name='second-stage-htlc',
csv_delay=to_self_delay,
cltv_expiry=0,
gen_tx=sweep_tx)
# offered HTLCs, in our ctx --> "timeout"
# received HTLCs, in our ctx --> "success"
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=our_pcp,
subject=LOCAL,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
create_txns_for_htlc(htlc=htlc,
htlc_direction=direction,
ctx_output_idx=ctx_output_idx,
htlc_relative_idx=htlc_relative_idx)
return txs
def analyze_ctx(chan: 'Channel', ctx: Transaction):
# note: the remote sometimes has two valid non-revoked commitment transactions,
# either of which could be broadcast
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
per_commitment_secret = None
oldest_unrevoked_remote_ctn = chan.get_oldest_unrevoked_ctn(REMOTE)
if ctn == oldest_unrevoked_remote_ctn:
their_pcp = their_conf.current_per_commitment_point
is_revocation = False
elif ctn == oldest_unrevoked_remote_ctn + 1:
their_pcp = their_conf.next_per_commitment_point
is_revocation = False
elif ctn < oldest_unrevoked_remote_ctn: # breach
try:
per_commitment_secret = chan.revocation_store.retrieve_secret(RevocationStore.START_INDEX - ctn)
except UnableToDeriveSecret:
return
their_pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
is_revocation = True
#_logger.info(f'tx for revoked: {list(txs.keys())}')
elif ctn in chan.data_loss_protect_remote_pcp:
their_pcp = chan.data_loss_protect_remote_pcp[ctn]
is_revocation = False
else:
return
return ctn, their_pcp, is_revocation, per_commitment_secret
def create_sweeptxs_for_their_ctx(*, chan: 'Channel', ctx: Transaction,
sweep_address: str) -> Optional[Dict[str,SweepInfo]]:
"""Handle the case when the remote force-closes with their ctx.
Sweep outputs that do not have a CSV delay ('to_remote' and first-stage HTLCs).
Outputs with CSV delay ('to_local' and second-stage HTLCs) are redeemed by LNWatcher.
"""
txs = {} # type: Dict[str, SweepInfo]
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
x = analyze_ctx(chan, ctx)
if not x:
return
ctn, their_pcp, is_revocation, per_commitment_secret = x
# to_local and to_remote addresses
our_revocation_pubkey = derive_blinded_pubkey(our_conf.revocation_basepoint.pubkey, their_pcp)
their_delayed_pubkey = derive_pubkey(their_conf.delayed_basepoint.pubkey, their_pcp)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
our_revocation_pubkey, our_conf.to_self_delay, their_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
our_payment_pubkey = derive_pubkey(our_conf.payment_basepoint.pubkey, their_pcp)
to_remote_address = make_commitment_output_to_remote_address(our_payment_pubkey)
# test if this is their ctx
_logger.debug(f'testing their ctx: {to_local_address} {to_remote_address}')
if not ctx.get_output_idxs_from_address(to_local_address) \
and not ctx.get_output_idxs_from_address(to_remote_address):
return
if is_revocation:
our_revocation_privkey = derive_blinded_privkey(our_conf.revocation_basepoint.privkey, per_commitment_secret)
gen_tx = create_sweeptx_for_their_revoked_ctx(chan, ctx, per_commitment_secret, chan.sweep_address)
if gen_tx:
tx = gen_tx()
txs[tx.inputs()[0].prevout.to_str()] = SweepInfo(name='to_local_for_revoked_ctx',
csv_delay=0,
cltv_expiry=0,
gen_tx=gen_tx)
# prep
our_htlc_privkey = derive_privkey(secret=int.from_bytes(our_conf.htlc_basepoint.privkey, 'big'), per_commitment_point=their_pcp)
our_htlc_privkey = ecc.ECPrivkey.from_secret_scalar(our_htlc_privkey)
their_htlc_pubkey = derive_pubkey(their_conf.htlc_basepoint.pubkey, their_pcp)
our_payment_bp_privkey = ecc.ECPrivkey(our_conf.payment_basepoint.privkey)
our_payment_privkey = derive_privkey(our_payment_bp_privkey.secret_scalar, their_pcp)
our_payment_privkey = ecc.ECPrivkey.from_secret_scalar(our_payment_privkey)
assert our_payment_pubkey == our_payment_privkey.get_public_key_bytes(compressed=True)
# to_local is handled by lnwatcher
# to_remote
output_idxs = ctx.get_output_idxs_from_address(to_remote_address)
if output_idxs:
output_idx = output_idxs.pop()
prevout = ctx.txid() + ':%d'%output_idx
sweep_tx = lambda: create_sweeptx_their_ctx_to_remote(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
our_payment_privkey=our_payment_privkey,
config=chan.lnworker.config)
txs[prevout] = SweepInfo(name='their_ctx_to_remote',
csv_delay=0,
cltv_expiry=0,
gen_tx=sweep_tx)
# HTLCs
def create_sweeptx_for_htlc(htlc: 'UpdateAddHtlc', is_received_htlc: bool,
ctx_output_idx: int) -> None:
if not is_received_htlc and not is_revocation:
try:
preimage = chan.lnworker.get_preimage(htlc.payment_hash)
except UnknownPaymentHash as e:
_logger.info(f'trying to sweep htlc from their latest ctx but getting {repr(e)}')
return
else:
preimage = None
htlc_output_witness_script = make_htlc_output_witness_script(
is_received_htlc=is_received_htlc,
remote_revocation_pubkey=our_revocation_pubkey,
remote_htlc_pubkey=our_htlc_privkey.get_public_key_bytes(compressed=True),
local_htlc_pubkey=their_htlc_pubkey,
payment_hash=htlc.payment_hash,
cltv_expiry=htlc.cltv_expiry)
cltv_expiry = htlc.cltv_expiry if is_received_htlc and not is_revocation else 0
prevout = ctx.txid() + ':%d'%ctx_output_idx
sweep_tx = lambda: create_sweeptx_their_ctx_htlc(
ctx=ctx,
witness_script=htlc_output_witness_script,
sweep_address=sweep_address,
preimage=preimage,
output_idx=ctx_output_idx,
privkey=our_revocation_privkey if is_revocation else our_htlc_privkey.get_secret_bytes(),
is_revocation=is_revocation,
cltv_expiry=cltv_expiry,
config=chan.lnworker.config)
txs[prevout] = SweepInfo(name=f'their_ctx_htlc_{ctx_output_idx}',
csv_delay=0,
cltv_expiry=cltv_expiry,
gen_tx=sweep_tx)
# received HTLCs, in their ctx --> "timeout"
# offered HTLCs, in their ctx --> "success"
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=their_pcp,
subject=REMOTE,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
create_sweeptx_for_htlc(htlc=htlc,
is_received_htlc=direction == RECEIVED,
ctx_output_idx=ctx_output_idx)
return txs
def create_htlctx_that_spends_from_our_ctx(chan: 'Channel', our_pcp: bytes,
ctx: Transaction, htlc: 'UpdateAddHtlc',
local_htlc_privkey: bytes, preimage: Optional[bytes],
htlc_direction: Direction, htlc_relative_idx: int,
ctx_output_idx: int) -> Tuple[bytes, Transaction]:
assert (htlc_direction == RECEIVED) == bool(preimage), 'preimage is required iff htlc is received'
preimage = preimage or b''
witness_script, htlc_tx = make_htlc_tx_with_open_channel(chan=chan,
pcp=our_pcp,
subject=LOCAL,
htlc_direction=htlc_direction,
commit=ctx,
htlc=htlc,
ctx_output_idx=ctx_output_idx,
name=f'our_ctx_{ctx_output_idx}_htlc_tx_{bh2u(htlc.payment_hash)}')
remote_htlc_sig = chan.get_remote_htlc_sig_for_htlc(htlc_relative_idx=htlc_relative_idx)
local_htlc_sig = bfh(htlc_tx.sign_txin(0, local_htlc_privkey))
txin = htlc_tx.inputs()[0]
witness_program = bfh(Transaction.get_preimage_script(txin))
txin.witness = make_htlc_tx_witness(remote_htlc_sig, local_htlc_sig, preimage, witness_program)
return witness_script, htlc_tx
def create_sweeptx_their_ctx_htlc(ctx: Transaction, witness_script: bytes, sweep_address: str,
preimage: Optional[bytes], output_idx: int,
privkey: bytes, is_revocation: bool,
cltv_expiry: int, config: SimpleConfig) -> Optional[PartialTransaction]:
assert type(cltv_expiry) is int
preimage = preimage or b'' # preimage is required iff (not is_revocation and htlc is offered)
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.witness_script = witness_script
txin.script_sig = b''
sweep_inputs = [txin]
tx_size_bytes = 200 # TODO (depends on offered/received and is_revocation)
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2, locktime=cltv_expiry)
sig = bfh(tx.sign_txin(0, privkey))
if not is_revocation:
witness = construct_witness([sig, preimage, witness_script])
else:
revocation_pubkey = privkey_to_pubkey(privkey)
witness = construct_witness([sig, revocation_pubkey, witness_script])
tx.inputs()[0].witness = bfh(witness)
assert tx.is_complete()
return tx
def create_sweeptx_their_ctx_to_remote(sweep_address: str, ctx: Transaction, output_idx: int,
our_payment_privkey: ecc.ECPrivkey,
config: SimpleConfig) -> Optional[PartialTransaction]:
our_payment_pubkey = our_payment_privkey.get_public_key_hex(compressed=True)
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_type = 'p2wpkh'
txin.pubkeys = [bfh(our_payment_pubkey)]
txin.num_sig = 1
sweep_inputs = [txin]
tx_size_bytes = 110 # approx size of p2wpkh->p2wpkh
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
sweep_tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs)
sweep_tx.set_rbf(True)
sweep_tx.sign({our_payment_pubkey: (our_payment_privkey.get_secret_bytes(), True)})
if not sweep_tx.is_complete():
raise Exception('channel close sweep tx is not complete')
return sweep_tx
def create_sweeptx_ctx_to_local(*, sweep_address: str, ctx: Transaction, output_idx: int, witness_script: str,
privkey: bytes, is_revocation: bool, config: SimpleConfig,
to_self_delay: int=None) -> Optional[PartialTransaction]:
"""Create a txn that sweeps the 'to_local' output of a commitment
transaction into our wallet.
privkey: either revocation_privkey or localdelayed_privkey
is_revocation: tells us which ^
"""
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_sig = b''
txin.witness_script = bfh(witness_script)
sweep_inputs = [txin]
if not is_revocation:
assert isinstance(to_self_delay, int)
sweep_inputs[0].nsequence = to_self_delay
tx_size_bytes = 121 # approx size of to_local -> p2wpkh
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold():
return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
sweep_tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2)
sig = sweep_tx.sign_txin(0, privkey)
witness = construct_witness([sig, int(is_revocation), witness_script])
sweep_tx.inputs()[0].witness = bfh(witness)
return sweep_tx
def create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(*,
htlc_tx: Transaction, htlctx_witness_script: bytes, sweep_address: str,
privkey: bytes, is_revocation: bool, to_self_delay: int,
config: SimpleConfig) -> Optional[PartialTransaction]:
val = htlc_tx.outputs()[0].value
prevout = TxOutpoint(txid=bfh(htlc_tx.txid()), out_idx=0)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_sig = b''
txin.witness_script = htlctx_witness_script
sweep_inputs = [txin]
if not is_revocation:
assert isinstance(to_self_delay, int)
sweep_inputs[0].nsequence = to_self_delay
tx_size_bytes = 200 # TODO
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2)
sig = bfh(tx.sign_txin(0, privkey))
witness = construct_witness([sig, int(is_revocation), htlctx_witness_script])
tx.inputs()[0].witness = bfh(witness)
assert tx.is_complete()
return tx
| 52.034296 | 132 | 0.648073 |
from typing import Optional, Dict, List, Tuple, TYPE_CHECKING, NamedTuple, Callable
from enum import Enum, auto
from .util import bfh, bh2u
from .bitcoin import redeem_script_to_address, dust_threshold
from . import ecc
from .lnutil import (make_commitment_output_to_remote_address, make_commitment_output_to_local_witness_script,
derive_privkey, derive_pubkey, derive_blinded_pubkey, derive_blinded_privkey,
make_htlc_tx_witness, make_htlc_tx_with_open_channel, UpdateAddHtlc,
LOCAL, REMOTE, make_htlc_output_witness_script, UnknownPaymentHash,
get_ordered_channel_configs, privkey_to_pubkey, get_per_commitment_secret_from_seed,
RevocationStore, extract_ctn_from_tx_and_chan, UnableToDeriveSecret, SENT, RECEIVED,
map_htlcs_to_ctx_output_idxs, Direction)
from .transaction import (Transaction, TxOutput, construct_witness, PartialTransaction, PartialTxInput,
PartialTxOutput, TxOutpoint)
from .simple_config import SimpleConfig
from .logging import get_logger
if TYPE_CHECKING:
from .lnchannel import Channel
_logger = get_logger(__name__)
class SweepInfo(NamedTuple):
name: str
csv_delay: int
cltv_expiry: int
gen_tx: Callable[[], Optional[Transaction]]
def create_sweeptxs_for_watchtower(chan: 'Channel', ctx: Transaction, per_commitment_secret: bytes,
sweep_address: str) -> List[Transaction]:
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
txs = []
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
if sweep_tx:
txs.append(sweep_tx)
def create_sweeptx_for_htlc(*, htlc: 'UpdateAddHtlc', htlc_direction: Direction,
ctx_output_idx: int) -> Optional[Transaction]:
htlc_tx_witness_script, htlc_tx = make_htlc_tx_with_open_channel(chan=chan,
pcp=pcp,
subject=REMOTE,
htlc_direction=htlc_direction,
commit=ctx,
htlc=htlc,
ctx_output_idx=ctx_output_idx)
return create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(
to_self_delay=0,
htlc_tx=htlc_tx,
htlctx_witness_script=htlc_tx_witness_script,
sweep_address=sweep_address,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=pcp,
subject=REMOTE,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
secondstage_sweep_tx = create_sweeptx_for_htlc(htlc=htlc,
htlc_direction=direction,
ctx_output_idx=ctx_output_idx)
if secondstage_sweep_tx:
txs.append(secondstage_sweep_tx)
return txs
def create_sweeptx_for_their_revoked_ctx(chan: 'Channel', ctx: Transaction, per_commitment_secret: bytes,
sweep_address: str) -> Optional[Callable[[], Optional[Transaction]]]:
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
txs = []
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
return sweep_tx
return None
def create_sweeptx_for_their_revoked_htlc(chan: 'Channel', ctx: Transaction, htlc_tx: Transaction,
sweep_address: str) -> Optional[SweepInfo]:
x = analyze_ctx(chan, ctx)
if not x:
return
ctn, their_pcp, is_revocation, per_commitment_secret = x
if not is_revocation:
return
pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
this_conf, other_conf = get_ordered_channel_configs(chan=chan, for_us=False)
other_revocation_privkey = derive_blinded_privkey(other_conf.revocation_basepoint.privkey,
per_commitment_secret)
to_self_delay = other_conf.to_self_delay
this_delayed_pubkey = derive_pubkey(this_conf.delayed_basepoint.pubkey, pcp)
revocation_pubkey = ecc.ECPrivkey(other_revocation_privkey).get_public_key_bytes(compressed=True)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
revocation_pubkey, to_self_delay, this_delayed_pubkey))
htlc_address = redeem_script_to_address('p2wsh', witness_script)
if htlc_tx.outputs()[0].address != htlc_address:
return
gen_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=htlc_tx,
output_idx=0,
witness_script=witness_script,
privkey=other_revocation_privkey,
is_revocation=True,
config=chan.lnworker.config)
return SweepInfo(name='redeem_htlc2',
csv_delay=0,
cltv_expiry=0,
gen_tx=gen_tx)
def create_sweeptxs_for_our_ctx(*, chan: 'Channel', ctx: Transaction,
sweep_address: str) -> Optional[Dict[str, SweepInfo]]:
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
our_per_commitment_secret = get_per_commitment_secret_from_seed(
our_conf.per_commitment_secret_seed, RevocationStore.START_INDEX - ctn)
our_pcp = ecc.ECPrivkey(our_per_commitment_secret).get_public_key_bytes(compressed=True)
our_delayed_bp_privkey = ecc.ECPrivkey(our_conf.delayed_basepoint.privkey)
our_localdelayed_privkey = derive_privkey(our_delayed_bp_privkey.secret_scalar, our_pcp)
our_localdelayed_privkey = ecc.ECPrivkey.from_secret_scalar(our_localdelayed_privkey)
their_revocation_pubkey = derive_blinded_pubkey(their_conf.revocation_basepoint.pubkey, our_pcp)
to_self_delay = their_conf.to_self_delay
our_htlc_privkey = derive_privkey(secret=int.from_bytes(our_conf.htlc_basepoint.privkey, 'big'),
per_commitment_point=our_pcp).to_bytes(32, 'big')
our_localdelayed_pubkey = our_localdelayed_privkey.get_public_key_bytes(compressed=True)
to_local_witness_script = bh2u(make_commitment_output_to_local_witness_script(
their_revocation_pubkey, to_self_delay, our_localdelayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', to_local_witness_script)
their_payment_pubkey = derive_pubkey(their_conf.payment_basepoint.pubkey, our_pcp)
to_remote_address = make_commitment_output_to_remote_address(their_payment_pubkey)
_logger.debug(f'testing our ctx: {to_local_address} {to_remote_address}')
if not ctx.get_output_idxs_from_address(to_local_address) \
and not ctx.get_output_idxs_from_address(to_remote_address):
return
if ctn < chan.get_oldest_unrevoked_ctn(LOCAL):
_logger.info("we breached.")
return {}
txs = {}
output_idxs = ctx.get_output_idxs_from_address(to_local_address)
if output_idxs:
output_idx = output_idxs.pop()
sweep_tx = lambda: create_sweeptx_ctx_to_local(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
witness_script=to_local_witness_script,
privkey=our_localdelayed_privkey.get_secret_bytes(),
is_revocation=False,
to_self_delay=to_self_delay,
config=chan.lnworker.config)
prevout = ctx.txid() + ':%d'%output_idx
txs[prevout] = SweepInfo(name='our_ctx_to_local',
csv_delay=to_self_delay,
cltv_expiry=0,
gen_tx=sweep_tx)
def create_txns_for_htlc(*, htlc: 'UpdateAddHtlc', htlc_direction: Direction,
ctx_output_idx: int, htlc_relative_idx: int):
if htlc_direction == RECEIVED:
try:
preimage = chan.lnworker.get_preimage(htlc.payment_hash)
except UnknownPaymentHash as e:
_logger.info(f'trying to sweep htlc from our latest ctx but getting {repr(e)}')
return
else:
preimage = None
htlctx_witness_script, htlc_tx = create_htlctx_that_spends_from_our_ctx(
chan=chan,
our_pcp=our_pcp,
ctx=ctx,
htlc=htlc,
local_htlc_privkey=our_htlc_privkey,
preimage=preimage,
htlc_direction=htlc_direction,
ctx_output_idx=ctx_output_idx,
htlc_relative_idx=htlc_relative_idx)
sweep_tx = lambda: create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(
to_self_delay=to_self_delay,
htlc_tx=htlc_tx,
htlctx_witness_script=htlctx_witness_script,
sweep_address=sweep_address,
privkey=our_localdelayed_privkey.get_secret_bytes(),
is_revocation=False,
config=chan.lnworker.config)
txs[htlc_tx.inputs()[0].prevout.to_str()] = SweepInfo(name='first-stage-htlc',
csv_delay=0,
cltv_expiry=htlc_tx.locktime,
gen_tx=lambda: htlc_tx)
txs[htlc_tx.txid() + ':0'] = SweepInfo(name='second-stage-htlc',
csv_delay=to_self_delay,
cltv_expiry=0,
gen_tx=sweep_tx)
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=our_pcp,
subject=LOCAL,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
create_txns_for_htlc(htlc=htlc,
htlc_direction=direction,
ctx_output_idx=ctx_output_idx,
htlc_relative_idx=htlc_relative_idx)
return txs
def analyze_ctx(chan: 'Channel', ctx: Transaction):
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
ctn = extract_ctn_from_tx_and_chan(ctx, chan)
per_commitment_secret = None
oldest_unrevoked_remote_ctn = chan.get_oldest_unrevoked_ctn(REMOTE)
if ctn == oldest_unrevoked_remote_ctn:
their_pcp = their_conf.current_per_commitment_point
is_revocation = False
elif ctn == oldest_unrevoked_remote_ctn + 1:
their_pcp = their_conf.next_per_commitment_point
is_revocation = False
elif ctn < oldest_unrevoked_remote_ctn:
try:
per_commitment_secret = chan.revocation_store.retrieve_secret(RevocationStore.START_INDEX - ctn)
except UnableToDeriveSecret:
return
their_pcp = ecc.ECPrivkey(per_commitment_secret).get_public_key_bytes(compressed=True)
is_revocation = True
elif ctn in chan.data_loss_protect_remote_pcp:
their_pcp = chan.data_loss_protect_remote_pcp[ctn]
is_revocation = False
else:
return
return ctn, their_pcp, is_revocation, per_commitment_secret
def create_sweeptxs_for_their_ctx(*, chan: 'Channel', ctx: Transaction,
sweep_address: str) -> Optional[Dict[str,SweepInfo]]:
txs = {}
our_conf, their_conf = get_ordered_channel_configs(chan=chan, for_us=True)
x = analyze_ctx(chan, ctx)
if not x:
return
ctn, their_pcp, is_revocation, per_commitment_secret = x
our_revocation_pubkey = derive_blinded_pubkey(our_conf.revocation_basepoint.pubkey, their_pcp)
their_delayed_pubkey = derive_pubkey(their_conf.delayed_basepoint.pubkey, their_pcp)
witness_script = bh2u(make_commitment_output_to_local_witness_script(
our_revocation_pubkey, our_conf.to_self_delay, their_delayed_pubkey))
to_local_address = redeem_script_to_address('p2wsh', witness_script)
our_payment_pubkey = derive_pubkey(our_conf.payment_basepoint.pubkey, their_pcp)
to_remote_address = make_commitment_output_to_remote_address(our_payment_pubkey)
_logger.debug(f'testing their ctx: {to_local_address} {to_remote_address}')
if not ctx.get_output_idxs_from_address(to_local_address) \
and not ctx.get_output_idxs_from_address(to_remote_address):
return
if is_revocation:
our_revocation_privkey = derive_blinded_privkey(our_conf.revocation_basepoint.privkey, per_commitment_secret)
gen_tx = create_sweeptx_for_their_revoked_ctx(chan, ctx, per_commitment_secret, chan.sweep_address)
if gen_tx:
tx = gen_tx()
txs[tx.inputs()[0].prevout.to_str()] = SweepInfo(name='to_local_for_revoked_ctx',
csv_delay=0,
cltv_expiry=0,
gen_tx=gen_tx)
our_htlc_privkey = derive_privkey(secret=int.from_bytes(our_conf.htlc_basepoint.privkey, 'big'), per_commitment_point=their_pcp)
our_htlc_privkey = ecc.ECPrivkey.from_secret_scalar(our_htlc_privkey)
their_htlc_pubkey = derive_pubkey(their_conf.htlc_basepoint.pubkey, their_pcp)
our_payment_bp_privkey = ecc.ECPrivkey(our_conf.payment_basepoint.privkey)
our_payment_privkey = derive_privkey(our_payment_bp_privkey.secret_scalar, their_pcp)
our_payment_privkey = ecc.ECPrivkey.from_secret_scalar(our_payment_privkey)
assert our_payment_pubkey == our_payment_privkey.get_public_key_bytes(compressed=True)
output_idxs = ctx.get_output_idxs_from_address(to_remote_address)
if output_idxs:
output_idx = output_idxs.pop()
prevout = ctx.txid() + ':%d'%output_idx
sweep_tx = lambda: create_sweeptx_their_ctx_to_remote(
sweep_address=sweep_address,
ctx=ctx,
output_idx=output_idx,
our_payment_privkey=our_payment_privkey,
config=chan.lnworker.config)
txs[prevout] = SweepInfo(name='their_ctx_to_remote',
csv_delay=0,
cltv_expiry=0,
gen_tx=sweep_tx)
def create_sweeptx_for_htlc(htlc: 'UpdateAddHtlc', is_received_htlc: bool,
ctx_output_idx: int) -> None:
if not is_received_htlc and not is_revocation:
try:
preimage = chan.lnworker.get_preimage(htlc.payment_hash)
except UnknownPaymentHash as e:
_logger.info(f'trying to sweep htlc from their latest ctx but getting {repr(e)}')
return
else:
preimage = None
htlc_output_witness_script = make_htlc_output_witness_script(
is_received_htlc=is_received_htlc,
remote_revocation_pubkey=our_revocation_pubkey,
remote_htlc_pubkey=our_htlc_privkey.get_public_key_bytes(compressed=True),
local_htlc_pubkey=their_htlc_pubkey,
payment_hash=htlc.payment_hash,
cltv_expiry=htlc.cltv_expiry)
cltv_expiry = htlc.cltv_expiry if is_received_htlc and not is_revocation else 0
prevout = ctx.txid() + ':%d'%ctx_output_idx
sweep_tx = lambda: create_sweeptx_their_ctx_htlc(
ctx=ctx,
witness_script=htlc_output_witness_script,
sweep_address=sweep_address,
preimage=preimage,
output_idx=ctx_output_idx,
privkey=our_revocation_privkey if is_revocation else our_htlc_privkey.get_secret_bytes(),
is_revocation=is_revocation,
cltv_expiry=cltv_expiry,
config=chan.lnworker.config)
txs[prevout] = SweepInfo(name=f'their_ctx_htlc_{ctx_output_idx}',
csv_delay=0,
cltv_expiry=cltv_expiry,
gen_tx=sweep_tx)
htlc_to_ctx_output_idx_map = map_htlcs_to_ctx_output_idxs(chan=chan,
ctx=ctx,
pcp=their_pcp,
subject=REMOTE,
ctn=ctn)
for (direction, htlc), (ctx_output_idx, htlc_relative_idx) in htlc_to_ctx_output_idx_map.items():
create_sweeptx_for_htlc(htlc=htlc,
is_received_htlc=direction == RECEIVED,
ctx_output_idx=ctx_output_idx)
return txs
def create_htlctx_that_spends_from_our_ctx(chan: 'Channel', our_pcp: bytes,
ctx: Transaction, htlc: 'UpdateAddHtlc',
local_htlc_privkey: bytes, preimage: Optional[bytes],
htlc_direction: Direction, htlc_relative_idx: int,
ctx_output_idx: int) -> Tuple[bytes, Transaction]:
assert (htlc_direction == RECEIVED) == bool(preimage), 'preimage is required iff htlc is received'
preimage = preimage or b''
witness_script, htlc_tx = make_htlc_tx_with_open_channel(chan=chan,
pcp=our_pcp,
subject=LOCAL,
htlc_direction=htlc_direction,
commit=ctx,
htlc=htlc,
ctx_output_idx=ctx_output_idx,
name=f'our_ctx_{ctx_output_idx}_htlc_tx_{bh2u(htlc.payment_hash)}')
remote_htlc_sig = chan.get_remote_htlc_sig_for_htlc(htlc_relative_idx=htlc_relative_idx)
local_htlc_sig = bfh(htlc_tx.sign_txin(0, local_htlc_privkey))
txin = htlc_tx.inputs()[0]
witness_program = bfh(Transaction.get_preimage_script(txin))
txin.witness = make_htlc_tx_witness(remote_htlc_sig, local_htlc_sig, preimage, witness_program)
return witness_script, htlc_tx
def create_sweeptx_their_ctx_htlc(ctx: Transaction, witness_script: bytes, sweep_address: str,
preimage: Optional[bytes], output_idx: int,
privkey: bytes, is_revocation: bool,
cltv_expiry: int, config: SimpleConfig) -> Optional[PartialTransaction]:
assert type(cltv_expiry) is int
preimage = preimage or b''
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.witness_script = witness_script
txin.script_sig = b''
sweep_inputs = [txin]
tx_size_bytes = 200
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2, locktime=cltv_expiry)
sig = bfh(tx.sign_txin(0, privkey))
if not is_revocation:
witness = construct_witness([sig, preimage, witness_script])
else:
revocation_pubkey = privkey_to_pubkey(privkey)
witness = construct_witness([sig, revocation_pubkey, witness_script])
tx.inputs()[0].witness = bfh(witness)
assert tx.is_complete()
return tx
def create_sweeptx_their_ctx_to_remote(sweep_address: str, ctx: Transaction, output_idx: int,
our_payment_privkey: ecc.ECPrivkey,
config: SimpleConfig) -> Optional[PartialTransaction]:
our_payment_pubkey = our_payment_privkey.get_public_key_hex(compressed=True)
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_type = 'p2wpkh'
txin.pubkeys = [bfh(our_payment_pubkey)]
txin.num_sig = 1
sweep_inputs = [txin]
tx_size_bytes = 110
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
sweep_tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs)
sweep_tx.set_rbf(True)
sweep_tx.sign({our_payment_pubkey: (our_payment_privkey.get_secret_bytes(), True)})
if not sweep_tx.is_complete():
raise Exception('channel close sweep tx is not complete')
return sweep_tx
def create_sweeptx_ctx_to_local(*, sweep_address: str, ctx: Transaction, output_idx: int, witness_script: str,
privkey: bytes, is_revocation: bool, config: SimpleConfig,
to_self_delay: int=None) -> Optional[PartialTransaction]:
val = ctx.outputs()[output_idx].value
prevout = TxOutpoint(txid=bfh(ctx.txid()), out_idx=output_idx)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_sig = b''
txin.witness_script = bfh(witness_script)
sweep_inputs = [txin]
if not is_revocation:
assert isinstance(to_self_delay, int)
sweep_inputs[0].nsequence = to_self_delay
tx_size_bytes = 121
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold():
return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
sweep_tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2)
sig = sweep_tx.sign_txin(0, privkey)
witness = construct_witness([sig, int(is_revocation), witness_script])
sweep_tx.inputs()[0].witness = bfh(witness)
return sweep_tx
def create_sweeptx_that_spends_htlctx_that_spends_htlc_in_ctx(*,
htlc_tx: Transaction, htlctx_witness_script: bytes, sweep_address: str,
privkey: bytes, is_revocation: bool, to_self_delay: int,
config: SimpleConfig) -> Optional[PartialTransaction]:
val = htlc_tx.outputs()[0].value
prevout = TxOutpoint(txid=bfh(htlc_tx.txid()), out_idx=0)
txin = PartialTxInput(prevout=prevout)
txin._trusted_value_sats = val
txin.script_sig = b''
txin.witness_script = htlctx_witness_script
sweep_inputs = [txin]
if not is_revocation:
assert isinstance(to_self_delay, int)
sweep_inputs[0].nsequence = to_self_delay
tx_size_bytes = 200
fee = config.estimate_fee(tx_size_bytes, allow_fallback_to_static_rates=True)
outvalue = val - fee
if outvalue <= dust_threshold(): return None
sweep_outputs = [PartialTxOutput.from_address_and_value(sweep_address, outvalue)]
tx = PartialTransaction.from_io(sweep_inputs, sweep_outputs, version=2)
sig = bfh(tx.sign_txin(0, privkey))
witness = construct_witness([sig, int(is_revocation), htlctx_witness_script])
tx.inputs()[0].witness = bfh(witness)
assert tx.is_complete()
return tx
| true | true |
f71ae1ea2aec311b4f7e6cd58d35f326af88dcb8 | 892 | py | Python | python/projeto02/meusite/urls.py | WilliamDeveloper/udemy_cursos | f592bafbe3d2a5d631458f8c42151c880aadef17 | [
"MIT"
] | null | null | null | python/projeto02/meusite/urls.py | WilliamDeveloper/udemy_cursos | f592bafbe3d2a5d631458f8c42151c880aadef17 | [
"MIT"
] | null | null | null | python/projeto02/meusite/urls.py | WilliamDeveloper/udemy_cursos | f592bafbe3d2a5d631458f8c42151c880aadef17 | [
"MIT"
] | null | null | null | """meusite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from home import views
urlpatterns = [
path('', views.index),
path('admin/', admin.site.urls),
path('blog/', include('blog.urls')),
path('sobre/', include('sobre.urls')),
]
| 34.307692 | 77 | 0.693946 | from django.contrib import admin
from django.urls import path, include
from home import views
urlpatterns = [
path('', views.index),
path('admin/', admin.site.urls),
path('blog/', include('blog.urls')),
path('sobre/', include('sobre.urls')),
]
| true | true |
f71ae28487a6c137bf0a9c98196c4d1383a39139 | 489 | py | Python | nnwordembed.py | GLaDO8/pytorch_playground | 3623de18881a37ce413c92d8a63ea9ba1cc401a5 | [
"MIT"
] | 2 | 2019-02-06T18:07:47.000Z | 2020-08-12T21:56:50.000Z | nnwordembed.py | GLaDO8/pytorch_playground | 3623de18881a37ce413c92d8a63ea9ba1cc401a5 | [
"MIT"
] | null | null | null | nnwordembed.py | GLaDO8/pytorch_playground | 3623de18881a37ce413c92d8a63ea9ba1cc401a5 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
word_to_ix = {"hello": 0, "world": 1}
#first argument is the size of the embedded matrix. The second argument is the dimension of each word embedding.
embeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings
lookup_tensor = torch.tensor([word_to_ix["hello"], word_to_ix["world"]], dtype=torch.long)
hello_embed = embeds(lookup_tensor)
print(hello_embed) | 37.615385 | 113 | 0.766871 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
word_to_ix = {"hello": 0, "world": 1}
embeds = nn.Embedding(2, 5)
lookup_tensor = torch.tensor([word_to_ix["hello"], word_to_ix["world"]], dtype=torch.long)
hello_embed = embeds(lookup_tensor)
print(hello_embed) | true | true |
f71ae39173f45a9863447511633a81e7b7687552 | 1,529 | py | Python | Admin-Scripts/Video2.py | vijayshankarrealdeal/Java | 2dff1a79c91782bf2aeb1bee057b19c41cafd2a1 | [
"MIT"
] | 3 | 2021-03-07T16:29:35.000Z | 2021-03-22T07:41:04.000Z | Admin-Scripts/Video2.py | vijayshankarrealdeal/Java | 2dff1a79c91782bf2aeb1bee057b19c41cafd2a1 | [
"MIT"
] | null | null | null | Admin-Scripts/Video2.py | vijayshankarrealdeal/Java | 2dff1a79c91782bf2aeb1bee057b19c41cafd2a1 | [
"MIT"
] | 2 | 2021-03-08T06:12:52.000Z | 2021-03-14T05:01:19.000Z | import firebase_admin
from firebase_admin import credentials,firestore
from firebase_admin import storage
cred = credentials.Certificate("./adminKey.json")
firebase_admin.initialize_app(cred, {
'storageBucket': 'women-e598c.appspot.com'
})
#Database Methods
db = firestore.client()
#discrip = ""
title = "GenderEquality"
cloudStorageLink = "https://firebasestorage.googleapis.com/v0/b/women-e598c.appspot.com/o/y2mate.com%20-%20Melinda%20Gates%20On%20Marriage%20Gender%20Equality%20%20Solving%20Tough%20Problems.mp4?alt=media&token=82126c96-8141-4634-97ae-85a4725913b5"
name = "Business Insider"
source = "YouTube"
sourceLink = "https://www.youtube.com/watch?v=BuYfALzDPrY"
discription = "Gender equality is key. That means having balanced relationships where both partners split the workload at home. This is something that even Melinda and Bill have had to work at. Gates details all these findings in her new book, The Moment of Lift."
viewsOnVideo = 648,568,59947
socialHandle = " https://read.bi/2xCnzGF"
webpage = "https://www.businessinsider.com"
if(len(title)!=0 and len(cloudStorageLink)!=0):
videsoWrite = db.collection("adminContent").document("Videos").collection("data").document().set({
"title":title,
"name":name,
"source":source,
"sourceLink":sourceLink,
"discription":discription,
"viewsOnVideo":viewsOnVideo,
"socialHandle":socialHandle,
"webpage":webpage,
"cloudStorageLink":cloudStorageLink
})
else:
print("Error")
| 40.236842 | 264 | 0.737083 | import firebase_admin
from firebase_admin import credentials,firestore
from firebase_admin import storage
cred = credentials.Certificate("./adminKey.json")
firebase_admin.initialize_app(cred, {
'storageBucket': 'women-e598c.appspot.com'
})
db = firestore.client()
title = "GenderEquality"
cloudStorageLink = "https://firebasestorage.googleapis.com/v0/b/women-e598c.appspot.com/o/y2mate.com%20-%20Melinda%20Gates%20On%20Marriage%20Gender%20Equality%20%20Solving%20Tough%20Problems.mp4?alt=media&token=82126c96-8141-4634-97ae-85a4725913b5"
name = "Business Insider"
source = "YouTube"
sourceLink = "https://www.youtube.com/watch?v=BuYfALzDPrY"
discription = "Gender equality is key. That means having balanced relationships where both partners split the workload at home. This is something that even Melinda and Bill have had to work at. Gates details all these findings in her new book, The Moment of Lift."
viewsOnVideo = 648,568,59947
socialHandle = " https://read.bi/2xCnzGF"
webpage = "https://www.businessinsider.com"
if(len(title)!=0 and len(cloudStorageLink)!=0):
videsoWrite = db.collection("adminContent").document("Videos").collection("data").document().set({
"title":title,
"name":name,
"source":source,
"sourceLink":sourceLink,
"discription":discription,
"viewsOnVideo":viewsOnVideo,
"socialHandle":socialHandle,
"webpage":webpage,
"cloudStorageLink":cloudStorageLink
})
else:
print("Error")
| true | true |
f71ae3b9a7a99ffd6aec250a1fe54db87c9201ae | 15,356 | py | Python | src/python/analyzer_executor/src/analyzer_executor_lib/analyzer_executor.py | inickles/grapl | f906aba74b2249c9c7d7b1afe6fc540551cdee8b | [
"Apache-2.0"
] | 313 | 2018-10-15T05:58:39.000Z | 2020-04-21T20:31:39.000Z | src/python/analyzer_executor/src/analyzer_executor_lib/analyzer_executor.py | graplsec/grapl | 68386b425c8e9e34f7380a078279b67b316fe2a0 | [
"Apache-2.0"
] | 33 | 2018-10-16T00:47:10.000Z | 2020-03-16T22:32:45.000Z | src/python/analyzer_executor/src/analyzer_executor_lib/analyzer_executor.py | graplsec/grapl | 68386b425c8e9e34f7380a078279b67b316fe2a0 | [
"Apache-2.0"
] | 29 | 2018-11-18T08:39:14.000Z | 2020-04-09T20:59:15.000Z | from __future__ import annotations
import base64
import hashlib
import inspect
import json
import os
import sys
import traceback
from collections import defaultdict
from logging import Logger
from multiprocessing import Pipe, Process
from multiprocessing.connection import Connection
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
cast,
)
import boto3
import grapl_analyzerlib.counters # noqa: F401
from analyzer_executor_lib.redis_cache import EitherCache, construct_redis_client
from grapl_analyzerlib.analyzer import Analyzer
from grapl_analyzerlib.execution import ExecutionComplete, ExecutionFailed, ExecutionHit
from grapl_analyzerlib.grapl_client import GraphClient
from grapl_analyzerlib.nodes.base import BaseView
from grapl_analyzerlib.plugin_retriever import load_plugins
from grapl_analyzerlib.queryable import Queryable
from grapl_analyzerlib.subgraph_view import SubgraphView
from grapl_common.env_helpers import S3ResourceFactory
from grapl_common.grapl_logger import get_module_grapl_logger
from grapl_common.metrics.metric_reporter import MetricReporter, TagPair
from grapl_common.sqs.sqs_types import S3PutRecordDict, SQSMessageBody
from python_proto.pipeline import Metadata, OldEnvelope
if TYPE_CHECKING:
from mypy_boto3_s3 import S3ServiceResource
# Set up logger (this is for the whole file, including static methods)
LOGGER = get_module_grapl_logger()
# Set up plugins dir for models
MODEL_PLUGINS_DIR = os.getenv("MODEL_PLUGINS_DIR", "/tmp")
sys.path.insert(0, MODEL_PLUGINS_DIR)
# Ensure plugins dir exists
try:
directory = Path(MODEL_PLUGINS_DIR + "/model_plugins/")
directory.mkdir(parents=True, exist_ok=True)
except Exception as e:
LOGGER.error("Failed to create model plugins directory", e)
def verbose_cast_to_int(input: Optional[str]) -> Optional[int]:
if not input:
return None
try:
return int(input)
except (TypeError, ValueError):
raise ValueError(f"Couldn't cast this env variable into an int: {input}")
class AnalyzerExecutor:
# constants
CHUNK_SIZE_RETRY: int = 10
CHUNK_SIZE_DEFAULT: int = 100
def __init__(
self,
model_plugins_bucket: str,
analyzers_bucket: str,
analyzer_matched_subgraphs_bucket: str,
message_cache: EitherCache,
hit_cache: EitherCache,
chunk_size: int,
logger: Logger,
metric_reporter: MetricReporter,
) -> None:
self.model_plugins_bucket = model_plugins_bucket
self.analyzers_bucket = analyzers_bucket
self.analyzer_matched_subgraphs_bucket = analyzer_matched_subgraphs_bucket
self.message_cache = message_cache
self.hit_cache = hit_cache
self.chunk_size = chunk_size
self.logger = logger
self.metric_reporter = metric_reporter
@classmethod
def from_env(cls, env: Optional[Mapping[str, str]] = None) -> AnalyzerExecutor:
env = env or os.environ
# If we're retrying, change the chunk size
is_retry = bool(env.get("IS_RETRY", False))
if is_retry:
chunk_size = cls.CHUNK_SIZE_RETRY
else:
chunk_size = cls.CHUNK_SIZE_DEFAULT
# Set up message cache
messagecache_addr = env.get("MESSAGECACHE_ADDR")
messagecache_port: Optional[int] = verbose_cast_to_int(
env.get("MESSAGECACHE_PORT")
)
message_cache = construct_redis_client(messagecache_addr, messagecache_port)
# Set up hit cache
hitcache_addr = env.get("HITCACHE_ADDR")
hitcache_port: Optional[int] = verbose_cast_to_int(env.get("HITCACHE_PORT"))
hit_cache = construct_redis_client(hitcache_addr, hitcache_port)
metric_reporter = MetricReporter.create("analyzer-executor")
model_plugins_bucket = env["GRAPL_MODEL_PLUGINS_BUCKET"]
analyzers_bucket = env["GRAPL_ANALYZERS_BUCKET"]
analyzer_matched_subgraphs_bucket = env[
"GRAPL_ANALYZER_MATCHED_SUBGRAPHS_BUCKET"
]
return AnalyzerExecutor(
model_plugins_bucket=model_plugins_bucket,
analyzers_bucket=analyzers_bucket,
analyzer_matched_subgraphs_bucket=analyzer_matched_subgraphs_bucket,
message_cache=message_cache,
hit_cache=hit_cache,
chunk_size=chunk_size,
logger=LOGGER,
metric_reporter=metric_reporter,
)
def check_caches(
self, file_hash: str, msg_id: str, node_key: str, analyzer_name: str
) -> bool:
with self.metric_reporter.histogram_ctx("analyzer-executor.check_caches"):
if self.check_msg_cache(file_hash, node_key, msg_id):
self.logger.debug("cache hit - already processed")
return True
if self.check_hit_cache(analyzer_name, node_key):
self.logger.debug("cache hit - already matched")
return True
return False
def to_event_hash(self, components: Iterable[str]) -> str:
joined = ",".join(components)
event_hash = hashlib.sha256(joined.encode()).hexdigest()
return event_hash
def check_msg_cache(self, file: str, node_key: str, msg_id: str) -> bool:
event_hash = self.to_event_hash((file, node_key, msg_id))
return bool(self.message_cache.get(event_hash))
def update_msg_cache(self, file: str, node_key: str, msg_id: str) -> None:
event_hash = self.to_event_hash((file, node_key, msg_id))
self.message_cache.set(event_hash, "1")
def check_hit_cache(self, file: str, node_key: str) -> bool:
event_hash = self.to_event_hash((file, node_key))
return bool(self.hit_cache.get(event_hash))
def update_hit_cache(self, file: str, node_key: str) -> None:
event_hash = self.to_event_hash((file, node_key))
self.hit_cache.set(event_hash, "1")
async def handle_events(self, events: SQSMessageBody, context: Any) -> None:
# Parse sns message
self.logger.debug(f"handling events: {events} context: {context}")
client = GraphClient()
s3 = S3ResourceFactory(boto3).from_env()
load_plugins(
self.model_plugins_bucket,
s3.meta.client,
os.path.abspath(MODEL_PLUGINS_DIR),
)
for event in events["Records"]:
data = parse_s3_event(s3, event)
# FIXME: this code assumes inner_message is json
envelope = OldEnvelope.deserialize(data)
message = json.loads(envelope.inner_message)
LOGGER.info(f'Executing Analyzer: {message["key"]}')
with self.metric_reporter.histogram_ctx(
"analyzer-executor.download_s3_file"
):
analyzer = download_s3_file(
s3,
self.analyzers_bucket,
message["key"],
).decode("utf8")
analyzer_name = message["key"].split("/")[-2]
subgraph = SubgraphView.from_proto(client, bytes(message["subgraph"]))
# TODO: Validate signature of S3 file
LOGGER.info(f"event {event} {envelope.metadata}")
rx: Connection
tx: Connection
rx, tx = Pipe(duplex=False)
p = Process(
target=self.execute_file,
args=(analyzer_name, analyzer, subgraph, tx, "", self.chunk_size),
)
p.start()
for exec_hit in self.poll_process(rx=rx, analyzer_name=analyzer_name):
with self.metric_reporter.histogram_ctx(
"analyzer-executor.emit_event.ms",
(TagPair("analyzer_name", exec_hit.analyzer_name),),
):
emit_event(
self.analyzer_matched_subgraphs_bucket,
s3,
exec_hit,
envelope.metadata,
)
self.update_msg_cache(analyzer, exec_hit.root_node_key, message["key"])
self.update_hit_cache(analyzer_name, exec_hit.root_node_key)
p.join()
def poll_process(
self,
rx: Connection,
analyzer_name: str,
) -> Iterator[ExecutionHit]:
"""
Keep polling the spawned Process, and yield any ExecutionHits.
(This will probably disappear if Analyzers move to Docker images.)
"""
t = 0
while True:
p_res = rx.poll(timeout=5)
if not p_res:
t += 1
LOGGER.info(
f"Analyzer {analyzer_name} polled for for {t * 5} seconds without result"
)
continue
result: Optional[Any] = rx.recv()
if isinstance(result, ExecutionComplete):
self.logger.info(f"Analyzer {analyzer_name} execution complete")
return
# emit any hits to an S3 bucket
if isinstance(result, ExecutionHit):
self.logger.info(
f"Analyzer {analyzer_name} emitting event for:"
f"{result.analyzer_name} {result.root_node_key}"
)
yield result
assert not isinstance(
result, ExecutionFailed
), f"Analyzer {analyzer_name} failed."
def exec_analyzers(
self,
dg_client: GraphClient,
file: str,
msg_id: str,
nodes: List[BaseView],
analyzers: Dict[str, Analyzer],
sender: Any,
) -> None:
if not analyzers:
self.logger.warning("Received empty dict of analyzers")
return
if not nodes:
self.logger.warning("Received empty array of nodes")
for node in nodes:
querymap: Dict[str, List[Queryable]] = defaultdict(list)
for an_name, analyzer in analyzers.items():
if self.check_caches(file, msg_id, node.node_key, an_name):
continue
queries = analyzer.get_queries()
if isinstance(queries, list) or isinstance(queries, tuple):
querymap[an_name].extend(queries)
else:
querymap[an_name].append(queries)
for an_name, queries in querymap.items():
analyzer = analyzers[an_name]
for query in queries:
# TODO: Whether it was a hit or not is a good Tag
tags = (TagPair("analyzer_name", an_name),)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.query_first.ms", tags
):
response = query.query_first(
dg_client, contains_node_key=node.node_key
)
if response:
self.logger.debug(
f"Analyzer '{an_name}' received a hit, executing on_response()"
)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.on_response.ms", tags
):
analyzer.on_response(response, sender)
def execute_file(
self,
name: str,
file: str,
graph: SubgraphView,
sender: Connection,
msg_id: str,
chunk_size: int,
) -> None:
try:
pool = ThreadPool(processes=4)
exec(file, globals())
client = GraphClient()
analyzers = get_analyzer_objects(client)
if not analyzers:
self.logger.warning(f"Got no analyzers for file: {name}")
self.logger.info(f"Executing analyzers: {[an for an in analyzers.keys()]}")
for nodes in chunker([n for n in graph.node_iter()], chunk_size):
self.logger.info(f"Querying {len(nodes)} nodes")
def exec_analyzer(
nodes: List[BaseView], sender: Connection
) -> List[BaseView]:
try:
self.exec_analyzers(
client, file, msg_id, nodes, analyzers, sender
)
return nodes
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(
f"Execution of {name} failed with {e} {e.args}"
)
sender.send(ExecutionFailed())
raise
pool.apply_async(exec_analyzer, args=(nodes, sender))
pool.close()
pool.join()
sender.send(ExecutionComplete())
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(f"Execution of {name} failed with {e} {e.args}")
sender.send(ExecutionFailed())
raise
def parse_s3_event(s3: S3ServiceResource, event: S3PutRecordDict) -> bytes:
try:
bucket = event["s3"]["bucket"]["name"]
key = event["s3"]["object"]["key"]
except KeyError:
LOGGER.error("Could not parse s3 event: {}", exc_info=True)
raise
return download_s3_file(s3, bucket, key)
def download_s3_file(s3: S3ServiceResource, bucket: str, key: str) -> bytes:
obj = s3.Object(bucket, key)
return cast(bytes, obj.get()["Body"].read())
def is_analyzer(analyzer_name: str, analyzer_cls: type) -> bool:
if analyzer_name == "Analyzer": # This is the base class
return False
return (
hasattr(analyzer_cls, "get_queries")
and hasattr(analyzer_cls, "build")
and hasattr(analyzer_cls, "on_response")
)
def get_analyzer_objects(dgraph_client: GraphClient) -> Dict[str, Analyzer]:
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return {
an[0]: an[1].build(dgraph_client)
for an in clsmembers
if is_analyzer(an[0], an[1])
}
def chunker(seq: List[BaseView], size: int) -> List[List[BaseView]]:
return [seq[pos : pos + size] for pos in range(0, len(seq), size)]
def emit_event(
analyzer_matched_subgraphs_bucket: str,
s3: S3ServiceResource,
event: ExecutionHit,
metadata: Metadata,
) -> None:
LOGGER.info(f"emitting event for: {event.analyzer_name, event.nodes}")
meta_dict = {
"trace_id": str(metadata.trace_id),
}
event_s = json.dumps(
{
"nodes": json.loads(event.nodes),
"edges": json.loads(event.edges),
"analyzer_name": event.analyzer_name,
"risk_score": event.risk_score,
"lenses": event.lenses,
"risky_node_keys": event.risky_node_keys,
"metadata": meta_dict,
}
)
event_hash = hashlib.sha256(event_s.encode())
key = base64.urlsafe_b64encode(event_hash.digest()).decode("utf-8")
obj = s3.Object(analyzer_matched_subgraphs_bucket, key)
obj.put(Body=event_s.encode("utf-8"))
| 34.276786 | 93 | 0.604259 | from __future__ import annotations
import base64
import hashlib
import inspect
import json
import os
import sys
import traceback
from collections import defaultdict
from logging import Logger
from multiprocessing import Pipe, Process
from multiprocessing.connection import Connection
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
Iterator,
List,
Mapping,
Optional,
cast,
)
import boto3
import grapl_analyzerlib.counters
from analyzer_executor_lib.redis_cache import EitherCache, construct_redis_client
from grapl_analyzerlib.analyzer import Analyzer
from grapl_analyzerlib.execution import ExecutionComplete, ExecutionFailed, ExecutionHit
from grapl_analyzerlib.grapl_client import GraphClient
from grapl_analyzerlib.nodes.base import BaseView
from grapl_analyzerlib.plugin_retriever import load_plugins
from grapl_analyzerlib.queryable import Queryable
from grapl_analyzerlib.subgraph_view import SubgraphView
from grapl_common.env_helpers import S3ResourceFactory
from grapl_common.grapl_logger import get_module_grapl_logger
from grapl_common.metrics.metric_reporter import MetricReporter, TagPair
from grapl_common.sqs.sqs_types import S3PutRecordDict, SQSMessageBody
from python_proto.pipeline import Metadata, OldEnvelope
if TYPE_CHECKING:
from mypy_boto3_s3 import S3ServiceResource
LOGGER = get_module_grapl_logger()
MODEL_PLUGINS_DIR = os.getenv("MODEL_PLUGINS_DIR", "/tmp")
sys.path.insert(0, MODEL_PLUGINS_DIR)
try:
directory = Path(MODEL_PLUGINS_DIR + "/model_plugins/")
directory.mkdir(parents=True, exist_ok=True)
except Exception as e:
LOGGER.error("Failed to create model plugins directory", e)
def verbose_cast_to_int(input: Optional[str]) -> Optional[int]:
if not input:
return None
try:
return int(input)
except (TypeError, ValueError):
raise ValueError(f"Couldn't cast this env variable into an int: {input}")
class AnalyzerExecutor:
# constants
CHUNK_SIZE_RETRY: int = 10
CHUNK_SIZE_DEFAULT: int = 100
def __init__(
self,
model_plugins_bucket: str,
analyzers_bucket: str,
analyzer_matched_subgraphs_bucket: str,
message_cache: EitherCache,
hit_cache: EitherCache,
chunk_size: int,
logger: Logger,
metric_reporter: MetricReporter,
) -> None:
self.model_plugins_bucket = model_plugins_bucket
self.analyzers_bucket = analyzers_bucket
self.analyzer_matched_subgraphs_bucket = analyzer_matched_subgraphs_bucket
self.message_cache = message_cache
self.hit_cache = hit_cache
self.chunk_size = chunk_size
self.logger = logger
self.metric_reporter = metric_reporter
@classmethod
def from_env(cls, env: Optional[Mapping[str, str]] = None) -> AnalyzerExecutor:
env = env or os.environ
# If we're retrying, change the chunk size
is_retry = bool(env.get("IS_RETRY", False))
if is_retry:
chunk_size = cls.CHUNK_SIZE_RETRY
else:
chunk_size = cls.CHUNK_SIZE_DEFAULT
messagecache_addr = env.get("MESSAGECACHE_ADDR")
messagecache_port: Optional[int] = verbose_cast_to_int(
env.get("MESSAGECACHE_PORT")
)
message_cache = construct_redis_client(messagecache_addr, messagecache_port)
hitcache_addr = env.get("HITCACHE_ADDR")
hitcache_port: Optional[int] = verbose_cast_to_int(env.get("HITCACHE_PORT"))
hit_cache = construct_redis_client(hitcache_addr, hitcache_port)
metric_reporter = MetricReporter.create("analyzer-executor")
model_plugins_bucket = env["GRAPL_MODEL_PLUGINS_BUCKET"]
analyzers_bucket = env["GRAPL_ANALYZERS_BUCKET"]
analyzer_matched_subgraphs_bucket = env[
"GRAPL_ANALYZER_MATCHED_SUBGRAPHS_BUCKET"
]
return AnalyzerExecutor(
model_plugins_bucket=model_plugins_bucket,
analyzers_bucket=analyzers_bucket,
analyzer_matched_subgraphs_bucket=analyzer_matched_subgraphs_bucket,
message_cache=message_cache,
hit_cache=hit_cache,
chunk_size=chunk_size,
logger=LOGGER,
metric_reporter=metric_reporter,
)
def check_caches(
self, file_hash: str, msg_id: str, node_key: str, analyzer_name: str
) -> bool:
with self.metric_reporter.histogram_ctx("analyzer-executor.check_caches"):
if self.check_msg_cache(file_hash, node_key, msg_id):
self.logger.debug("cache hit - already processed")
return True
if self.check_hit_cache(analyzer_name, node_key):
self.logger.debug("cache hit - already matched")
return True
return False
def to_event_hash(self, components: Iterable[str]) -> str:
joined = ",".join(components)
event_hash = hashlib.sha256(joined.encode()).hexdigest()
return event_hash
def check_msg_cache(self, file: str, node_key: str, msg_id: str) -> bool:
event_hash = self.to_event_hash((file, node_key, msg_id))
return bool(self.message_cache.get(event_hash))
def update_msg_cache(self, file: str, node_key: str, msg_id: str) -> None:
event_hash = self.to_event_hash((file, node_key, msg_id))
self.message_cache.set(event_hash, "1")
def check_hit_cache(self, file: str, node_key: str) -> bool:
event_hash = self.to_event_hash((file, node_key))
return bool(self.hit_cache.get(event_hash))
def update_hit_cache(self, file: str, node_key: str) -> None:
event_hash = self.to_event_hash((file, node_key))
self.hit_cache.set(event_hash, "1")
async def handle_events(self, events: SQSMessageBody, context: Any) -> None:
self.logger.debug(f"handling events: {events} context: {context}")
client = GraphClient()
s3 = S3ResourceFactory(boto3).from_env()
load_plugins(
self.model_plugins_bucket,
s3.meta.client,
os.path.abspath(MODEL_PLUGINS_DIR),
)
for event in events["Records"]:
data = parse_s3_event(s3, event)
envelope = OldEnvelope.deserialize(data)
message = json.loads(envelope.inner_message)
LOGGER.info(f'Executing Analyzer: {message["key"]}')
with self.metric_reporter.histogram_ctx(
"analyzer-executor.download_s3_file"
):
analyzer = download_s3_file(
s3,
self.analyzers_bucket,
message["key"],
).decode("utf8")
analyzer_name = message["key"].split("/")[-2]
subgraph = SubgraphView.from_proto(client, bytes(message["subgraph"]))
LOGGER.info(f"event {event} {envelope.metadata}")
rx: Connection
tx: Connection
rx, tx = Pipe(duplex=False)
p = Process(
target=self.execute_file,
args=(analyzer_name, analyzer, subgraph, tx, "", self.chunk_size),
)
p.start()
for exec_hit in self.poll_process(rx=rx, analyzer_name=analyzer_name):
with self.metric_reporter.histogram_ctx(
"analyzer-executor.emit_event.ms",
(TagPair("analyzer_name", exec_hit.analyzer_name),),
):
emit_event(
self.analyzer_matched_subgraphs_bucket,
s3,
exec_hit,
envelope.metadata,
)
self.update_msg_cache(analyzer, exec_hit.root_node_key, message["key"])
self.update_hit_cache(analyzer_name, exec_hit.root_node_key)
p.join()
def poll_process(
self,
rx: Connection,
analyzer_name: str,
) -> Iterator[ExecutionHit]:
t = 0
while True:
p_res = rx.poll(timeout=5)
if not p_res:
t += 1
LOGGER.info(
f"Analyzer {analyzer_name} polled for for {t * 5} seconds without result"
)
continue
result: Optional[Any] = rx.recv()
if isinstance(result, ExecutionComplete):
self.logger.info(f"Analyzer {analyzer_name} execution complete")
return
if isinstance(result, ExecutionHit):
self.logger.info(
f"Analyzer {analyzer_name} emitting event for:"
f"{result.analyzer_name} {result.root_node_key}"
)
yield result
assert not isinstance(
result, ExecutionFailed
), f"Analyzer {analyzer_name} failed."
def exec_analyzers(
self,
dg_client: GraphClient,
file: str,
msg_id: str,
nodes: List[BaseView],
analyzers: Dict[str, Analyzer],
sender: Any,
) -> None:
if not analyzers:
self.logger.warning("Received empty dict of analyzers")
return
if not nodes:
self.logger.warning("Received empty array of nodes")
for node in nodes:
querymap: Dict[str, List[Queryable]] = defaultdict(list)
for an_name, analyzer in analyzers.items():
if self.check_caches(file, msg_id, node.node_key, an_name):
continue
queries = analyzer.get_queries()
if isinstance(queries, list) or isinstance(queries, tuple):
querymap[an_name].extend(queries)
else:
querymap[an_name].append(queries)
for an_name, queries in querymap.items():
analyzer = analyzers[an_name]
for query in queries:
tags = (TagPair("analyzer_name", an_name),)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.query_first.ms", tags
):
response = query.query_first(
dg_client, contains_node_key=node.node_key
)
if response:
self.logger.debug(
f"Analyzer '{an_name}' received a hit, executing on_response()"
)
with self.metric_reporter.histogram_ctx(
"analyzer-executor.on_response.ms", tags
):
analyzer.on_response(response, sender)
def execute_file(
self,
name: str,
file: str,
graph: SubgraphView,
sender: Connection,
msg_id: str,
chunk_size: int,
) -> None:
try:
pool = ThreadPool(processes=4)
exec(file, globals())
client = GraphClient()
analyzers = get_analyzer_objects(client)
if not analyzers:
self.logger.warning(f"Got no analyzers for file: {name}")
self.logger.info(f"Executing analyzers: {[an for an in analyzers.keys()]}")
for nodes in chunker([n for n in graph.node_iter()], chunk_size):
self.logger.info(f"Querying {len(nodes)} nodes")
def exec_analyzer(
nodes: List[BaseView], sender: Connection
) -> List[BaseView]:
try:
self.exec_analyzers(
client, file, msg_id, nodes, analyzers, sender
)
return nodes
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(
f"Execution of {name} failed with {e} {e.args}"
)
sender.send(ExecutionFailed())
raise
pool.apply_async(exec_analyzer, args=(nodes, sender))
pool.close()
pool.join()
sender.send(ExecutionComplete())
except Exception as e:
self.logger.error(traceback.format_exc())
self.logger.error(f"Execution of {name} failed with {e} {e.args}")
sender.send(ExecutionFailed())
raise
def parse_s3_event(s3: S3ServiceResource, event: S3PutRecordDict) -> bytes:
try:
bucket = event["s3"]["bucket"]["name"]
key = event["s3"]["object"]["key"]
except KeyError:
LOGGER.error("Could not parse s3 event: {}", exc_info=True)
raise
return download_s3_file(s3, bucket, key)
def download_s3_file(s3: S3ServiceResource, bucket: str, key: str) -> bytes:
obj = s3.Object(bucket, key)
return cast(bytes, obj.get()["Body"].read())
def is_analyzer(analyzer_name: str, analyzer_cls: type) -> bool:
if analyzer_name == "Analyzer":
return False
return (
hasattr(analyzer_cls, "get_queries")
and hasattr(analyzer_cls, "build")
and hasattr(analyzer_cls, "on_response")
)
def get_analyzer_objects(dgraph_client: GraphClient) -> Dict[str, Analyzer]:
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
return {
an[0]: an[1].build(dgraph_client)
for an in clsmembers
if is_analyzer(an[0], an[1])
}
def chunker(seq: List[BaseView], size: int) -> List[List[BaseView]]:
return [seq[pos : pos + size] for pos in range(0, len(seq), size)]
def emit_event(
analyzer_matched_subgraphs_bucket: str,
s3: S3ServiceResource,
event: ExecutionHit,
metadata: Metadata,
) -> None:
LOGGER.info(f"emitting event for: {event.analyzer_name, event.nodes}")
meta_dict = {
"trace_id": str(metadata.trace_id),
}
event_s = json.dumps(
{
"nodes": json.loads(event.nodes),
"edges": json.loads(event.edges),
"analyzer_name": event.analyzer_name,
"risk_score": event.risk_score,
"lenses": event.lenses,
"risky_node_keys": event.risky_node_keys,
"metadata": meta_dict,
}
)
event_hash = hashlib.sha256(event_s.encode())
key = base64.urlsafe_b64encode(event_hash.digest()).decode("utf-8")
obj = s3.Object(analyzer_matched_subgraphs_bucket, key)
obj.put(Body=event_s.encode("utf-8"))
| true | true |
f71ae3c9f30e5074c6179b84ba1638c42566b5fa | 6,730 | py | Python | randomdest.py | gbowerman/random-destination | 88f6a6a6e6cf971cb9d4bea477b093a4b0eef84e | [
"MIT"
] | null | null | null | randomdest.py | gbowerman/random-destination | 88f6a6a6e6cf971cb9d4bea477b093a4b0eef84e | [
"MIT"
] | null | null | null | randomdest.py | gbowerman/random-destination | 88f6a6a6e6cf971cb9d4bea477b093a4b0eef84e | [
"MIT"
] | null | null | null | """randomdest.py - dearpygui app to plot random destinations"""
import math
import os
import random
import requests
from dotenv import load_dotenv
from dearpygui.core import *
from dearpygui.simple import *
# globals/constants
EARTH_RADIUS = 6378.1
MAX_DIST = 16 # destination radius in KM
maps_key = ""
BASE_URL = "https://dev.virtualearth.net/REST/v1/Imagery/Map/AerialWithLabels/"
zoom = "18"
distance = MAX_DIST
img_size_x = 900
img_size_y = 900
img_file_name = "pic1.png"
def plot_location(latitude, longitude, bearing, distance):
"""Plot a new location based on starting point, bearing and distance"""
bearing_rad = math.radians(bearing)
lat1 = math.radians(latitude)
lon1 = math.radians(longitude)
d_over_r = distance / EARTH_RADIUS
lat2 = math.asin(
math.sin(lat1) * math.cos(d_over_r)
+ math.cos(lat1) * math.sin(d_over_r) * math.cos(bearing_rad)
)
lon2 = lon1 + math.atan2(
math.sin(bearing_rad) * math.sin(d_over_r) * math.cos(lat1),
math.cos(d_over_r) - math.sin(lat1) * math.sin(lat2),
)
lat2 = round(math.degrees(lat2), 6)
lon2 = round(math.degrees(lon2), 6)
return [lat2, lon2]
def get_random_location(latitude, longitude, radius_km):
"""Return coordinates for a random location based on starting point and radius"""
global distance # update distance as a global - it will be used to calculate route zoom
# get random destination and distance
bearing = round(random.uniform(0, 360),3 )
distance = round(random.uniform(0, radius_km), 3)
# set zoom based on distance
# print(f"Bearing: {str(bearing)}, Distance (km): {str(distance_km)}")
set_value("bearing_label", f"Bearing: {str(bearing)}°, Distance: {str(distance)} km")
# calculate the new latitude and longitude
return plot_location(latitude, longitude, bearing, distance)
def get_image(coords):
"""Get a new Bing maps image for specified coordinates and save it as a PNG file"""
pic_url = f"{BASE_URL}{coords}/{zoom}?mapSize={str(img_size_x)},{str(img_size_y)}&pp={coords};;1&dcl=1&key={maps_key}"
image_data = requests.get(pic_url).content
with open(img_file_name, "wb") as handler:
handler.write(image_data)
def get_route_image(coords1, coords2, zoom, midpoint):
"""Get a new Bing maps image for specified coordinates and save it as a PNG file"""
pic_url = f"{BASE_URL}{midpoint}/{zoom}/Routes/Driving?waypoint.1={coords1}&waypoint.2={coords2}&mapSize={str(img_size_x)},{str(img_size_y)}&imagerySet=AerialWithLabels&key={maps_key}"
image_data = requests.get(pic_url).content
with open(img_file_name, "wb") as handler:
handler.write(image_data)
def get_midpoint(coord1, coord2):
'''Get the midway point between 2 coordinates, input and output are strings'''
coord1_list = coord1.split(',')
coord2_list = coord2.split(',')
lat1 = float(coord1_list[0])
lon1 = float(coord1_list[1])
lat2 = float(coord2_list[0])
lon2 = float(coord2_list[1])
midlat = lat1 + (lat2 - lat1)/2
midlon = lon1 + (lon2 - lon1)/2
return f"{str(midlat)},{str(midlon)}"
def show_origin(sender, callback):
"""Get the coordinates from the UI and get the Bing maps image for those coords"""
coords = get_value("Coords")
get_image(coords)
# update canvas
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def show_destination(sender, callback):
"""Display a map image for the destination coordinates"""
coords = get_value("destination_text")
if len(coords) < 3:
print("No destination") # to do: convert this message to a popup
return
get_image(coords)
# update canvas
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def show_route(sender, callback):
"""Display a map image for the destination coordinates"""
coords1 = get_value("Coords")
coords2 = get_value("destination_text")
if len(coords2) < 3:
print("No destination") # to do: convert this message to a popup
return
midpoint = get_midpoint(coords1, coords2)
# zoom of route map will be proportional to distance to make it fit on canvas
if distance < 1.8:
route_zoom = 16
elif distance < 3:
route_zoom = 15
elif distance < 6.5:
route_zoom = 14
elif distance < 12:
route_zoom = 13
else:
route_zoom = 12
get_route_image(coords1, coords2, route_zoom, midpoint)
# update canvas
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def get_random_destination(sender, callback):
"""Get new random destination based on starting coordinates"""
coords = get_value("Coords")
# calculate new coords and write then in the destination text box
coord_list = coords.split(",")
latitude = float(coord_list[0])
longitude = float(coord_list[1])
new_coords = get_random_location(latitude, longitude, MAX_DIST)
new_coords_str = f"{str(new_coords[0])},{str(new_coords[1])}"
set_value("destination_text", new_coords_str)
def main():
"""main routine to draw the UI and start the GUI"""
global maps_key
load_dotenv()
maps_key = os.environ.get("BING_MAPS_KEY")
coords = os.environ.get("DEFAULT_COORDS")
# set main window defaults
set_main_window_size(img_size_x + 20, img_size_y + 130)
set_main_window_pos(100, 25)
set_main_window_title("Random destination app")
with window("Main"):
add_text("Coordinates: ")
add_same_line()
add_input_text(
"Coords",
label="",
default_value=coords,
width=170,
callback=show_origin,
on_enter=True,
)
add_same_line()
add_button("Show origin", callback=show_origin)
add_text("Destination: ")
add_same_line()
add_input_text("destination_text", label="", width=170)
add_same_line()
add_button("Random destination", callback=get_random_destination)
add_same_line()
add_button("Show destination", callback=show_destination)
add_same_line()
add_button("Show route", callback=show_route)
add_text("bearing_label", default_value=" ")
add_spacing()
add_separator()
add_spacing()
add_drawing("canvas", width=img_size_x, height=img_size_y)
# if os.path.isfile(img_file_name):
# draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
start_dearpygui(primary_window="Main")
if __name__ == "__main__":
main()
| 32.990196 | 188 | 0.673254 | import math
import os
import random
import requests
from dotenv import load_dotenv
from dearpygui.core import *
from dearpygui.simple import *
EARTH_RADIUS = 6378.1
MAX_DIST = 16
maps_key = ""
BASE_URL = "https://dev.virtualearth.net/REST/v1/Imagery/Map/AerialWithLabels/"
zoom = "18"
distance = MAX_DIST
img_size_x = 900
img_size_y = 900
img_file_name = "pic1.png"
def plot_location(latitude, longitude, bearing, distance):
bearing_rad = math.radians(bearing)
lat1 = math.radians(latitude)
lon1 = math.radians(longitude)
d_over_r = distance / EARTH_RADIUS
lat2 = math.asin(
math.sin(lat1) * math.cos(d_over_r)
+ math.cos(lat1) * math.sin(d_over_r) * math.cos(bearing_rad)
)
lon2 = lon1 + math.atan2(
math.sin(bearing_rad) * math.sin(d_over_r) * math.cos(lat1),
math.cos(d_over_r) - math.sin(lat1) * math.sin(lat2),
)
lat2 = round(math.degrees(lat2), 6)
lon2 = round(math.degrees(lon2), 6)
return [lat2, lon2]
def get_random_location(latitude, longitude, radius_km):
global distance
bearing = round(random.uniform(0, 360),3 )
distance = round(random.uniform(0, radius_km), 3)
set_value("bearing_label", f"Bearing: {str(bearing)}°, Distance: {str(distance)} km")
return plot_location(latitude, longitude, bearing, distance)
def get_image(coords):
pic_url = f"{BASE_URL}{coords}/{zoom}?mapSize={str(img_size_x)},{str(img_size_y)}&pp={coords};;1&dcl=1&key={maps_key}"
image_data = requests.get(pic_url).content
with open(img_file_name, "wb") as handler:
handler.write(image_data)
def get_route_image(coords1, coords2, zoom, midpoint):
pic_url = f"{BASE_URL}{midpoint}/{zoom}/Routes/Driving?waypoint.1={coords1}&waypoint.2={coords2}&mapSize={str(img_size_x)},{str(img_size_y)}&imagerySet=AerialWithLabels&key={maps_key}"
image_data = requests.get(pic_url).content
with open(img_file_name, "wb") as handler:
handler.write(image_data)
def get_midpoint(coord1, coord2):
coord1_list = coord1.split(',')
coord2_list = coord2.split(',')
lat1 = float(coord1_list[0])
lon1 = float(coord1_list[1])
lat2 = float(coord2_list[0])
lon2 = float(coord2_list[1])
midlat = lat1 + (lat2 - lat1)/2
midlon = lon1 + (lon2 - lon1)/2
return f"{str(midlat)},{str(midlon)}"
def show_origin(sender, callback):
coords = get_value("Coords")
get_image(coords)
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def show_destination(sender, callback):
coords = get_value("destination_text")
if len(coords) < 3:
print("No destination")
return
get_image(coords)
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def show_route(sender, callback):
coords1 = get_value("Coords")
coords2 = get_value("destination_text")
if len(coords2) < 3:
print("No destination")
return
midpoint = get_midpoint(coords1, coords2)
if distance < 1.8:
route_zoom = 16
elif distance < 3:
route_zoom = 15
elif distance < 6.5:
route_zoom = 14
elif distance < 12:
route_zoom = 13
else:
route_zoom = 12
get_route_image(coords1, coords2, route_zoom, midpoint)
clear_drawing("canvas")
draw_image("canvas", img_file_name, [0, 0], pmax=[img_size_x, img_size_y])
def get_random_destination(sender, callback):
coords = get_value("Coords")
coord_list = coords.split(",")
latitude = float(coord_list[0])
longitude = float(coord_list[1])
new_coords = get_random_location(latitude, longitude, MAX_DIST)
new_coords_str = f"{str(new_coords[0])},{str(new_coords[1])}"
set_value("destination_text", new_coords_str)
def main():
global maps_key
load_dotenv()
maps_key = os.environ.get("BING_MAPS_KEY")
coords = os.environ.get("DEFAULT_COORDS")
set_main_window_size(img_size_x + 20, img_size_y + 130)
set_main_window_pos(100, 25)
set_main_window_title("Random destination app")
with window("Main"):
add_text("Coordinates: ")
add_same_line()
add_input_text(
"Coords",
label="",
default_value=coords,
width=170,
callback=show_origin,
on_enter=True,
)
add_same_line()
add_button("Show origin", callback=show_origin)
add_text("Destination: ")
add_same_line()
add_input_text("destination_text", label="", width=170)
add_same_line()
add_button("Random destination", callback=get_random_destination)
add_same_line()
add_button("Show destination", callback=show_destination)
add_same_line()
add_button("Show route", callback=show_route)
add_text("bearing_label", default_value=" ")
add_spacing()
add_separator()
add_spacing()
add_drawing("canvas", width=img_size_x, height=img_size_y)
start_dearpygui(primary_window="Main")
if __name__ == "__main__":
main()
| true | true |
f71ae3f98733e595da1ffdd6566812f756e6d03b | 3,576 | py | Python | metadata-ingestion/src/datahub/ingestion/transformer/add_dataset_schema_tags.py | ShubhamThakre/datahub | 08a5fcfd017d4a2903a7b637f1e2129b9d7793ea | [
"Apache-2.0"
] | 1,603 | 2016-03-03T17:21:03.000Z | 2020-01-22T22:12:02.000Z | metadata-ingestion/src/datahub/ingestion/transformer/add_dataset_schema_tags.py | ShubhamThakre/datahub | 08a5fcfd017d4a2903a7b637f1e2129b9d7793ea | [
"Apache-2.0"
] | 1,157 | 2016-03-03T19:29:22.000Z | 2020-01-20T14:41:59.000Z | metadata-ingestion/src/datahub/ingestion/transformer/add_dataset_schema_tags.py | ShubhamThakre/datahub | 08a5fcfd017d4a2903a7b637f1e2129b9d7793ea | [
"Apache-2.0"
] | 570 | 2016-03-03T17:21:05.000Z | 2020-01-21T06:54:10.000Z | from typing import Callable, List, Optional, Union
import datahub.emitter.mce_builder as builder
from datahub.configuration.common import ConfigModel, KeyValuePattern
from datahub.configuration.import_resolver import pydantic_resolve_key
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.transformer.base_transformer import (
BaseTransformer,
SingleAspectTransformer,
)
from datahub.metadata.schema_classes import (
GlobalTagsClass,
SchemaFieldClass,
SchemaMetadataClass,
TagAssociationClass,
)
class AddDatasetSchemaTagsConfig(ConfigModel):
# Workaround for https://github.com/python/mypy/issues/708.
# Suggested by https://stackoverflow.com/a/64528725/5004662.
get_tags_to_add: Union[
Callable[[str], List[TagAssociationClass]],
Callable[[str], List[TagAssociationClass]],
]
_resolve_tag_fn = pydantic_resolve_key("get_tags_to_add")
class AddDatasetSchemaTags(BaseTransformer, SingleAspectTransformer):
"""Transformer that adds glossary tags to datasets according to a callback function."""
ctx: PipelineContext
config: AddDatasetSchemaTagsConfig
def __init__(self, config: AddDatasetSchemaTagsConfig, ctx: PipelineContext):
super().__init__()
self.ctx = ctx
self.config = config
def aspect_name(self) -> str:
return "schemaMetadata"
def entity_types(self) -> List[str]:
return ["dataset"]
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "AddDatasetSchemaTags":
config = AddDatasetSchemaTagsConfig.parse_obj(config_dict)
return cls(config, ctx)
def extend_field(self, schema_field: SchemaFieldClass) -> SchemaFieldClass:
tags_to_add = self.config.get_tags_to_add(schema_field.fieldPath)
if len(tags_to_add) > 0:
new_tags = (
schema_field.globalTags
if schema_field.globalTags is not None
else GlobalTagsClass(
tags=[],
)
)
new_tags.tags.extend(tags_to_add)
schema_field.globalTags = new_tags
return schema_field
def transform_aspect(
self, entity_urn: str, aspect_name: str, aspect: Optional[builder.Aspect]
) -> Optional[builder.Aspect]:
assert aspect is None or isinstance(aspect, SchemaMetadataClass)
if aspect is None:
return aspect
schema_metadata_aspect: SchemaMetadataClass = aspect
schema_metadata_aspect.fields = [
self.extend_field(field) for field in schema_metadata_aspect.fields
]
return schema_metadata_aspect # type: ignore
class PatternDatasetTagsConfig(ConfigModel):
tag_pattern: KeyValuePattern = KeyValuePattern.all()
class PatternAddDatasetSchemaTags(AddDatasetSchemaTags):
"""Transformer that adds a dynamic set of tags to each field in a dataset based on supplied patterns."""
def __init__(self, config: PatternDatasetTagsConfig, ctx: PipelineContext):
tag_pattern = config.tag_pattern
generic_config = AddDatasetSchemaTagsConfig(
get_tags_to_add=lambda path: [
TagAssociationClass(tag=urn) for urn in tag_pattern.value(path)
],
)
super().__init__(generic_config, ctx)
@classmethod
def create(
cls, config_dict: dict, ctx: PipelineContext
) -> "PatternAddDatasetSchemaTags":
config = PatternDatasetTagsConfig.parse_obj(config_dict)
return cls(config, ctx)
| 33.735849 | 108 | 0.699105 | from typing import Callable, List, Optional, Union
import datahub.emitter.mce_builder as builder
from datahub.configuration.common import ConfigModel, KeyValuePattern
from datahub.configuration.import_resolver import pydantic_resolve_key
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.transformer.base_transformer import (
BaseTransformer,
SingleAspectTransformer,
)
from datahub.metadata.schema_classes import (
GlobalTagsClass,
SchemaFieldClass,
SchemaMetadataClass,
TagAssociationClass,
)
class AddDatasetSchemaTagsConfig(ConfigModel):
get_tags_to_add: Union[
Callable[[str], List[TagAssociationClass]],
Callable[[str], List[TagAssociationClass]],
]
_resolve_tag_fn = pydantic_resolve_key("get_tags_to_add")
class AddDatasetSchemaTags(BaseTransformer, SingleAspectTransformer):
ctx: PipelineContext
config: AddDatasetSchemaTagsConfig
def __init__(self, config: AddDatasetSchemaTagsConfig, ctx: PipelineContext):
super().__init__()
self.ctx = ctx
self.config = config
def aspect_name(self) -> str:
return "schemaMetadata"
def entity_types(self) -> List[str]:
return ["dataset"]
@classmethod
def create(cls, config_dict: dict, ctx: PipelineContext) -> "AddDatasetSchemaTags":
config = AddDatasetSchemaTagsConfig.parse_obj(config_dict)
return cls(config, ctx)
def extend_field(self, schema_field: SchemaFieldClass) -> SchemaFieldClass:
tags_to_add = self.config.get_tags_to_add(schema_field.fieldPath)
if len(tags_to_add) > 0:
new_tags = (
schema_field.globalTags
if schema_field.globalTags is not None
else GlobalTagsClass(
tags=[],
)
)
new_tags.tags.extend(tags_to_add)
schema_field.globalTags = new_tags
return schema_field
def transform_aspect(
self, entity_urn: str, aspect_name: str, aspect: Optional[builder.Aspect]
) -> Optional[builder.Aspect]:
assert aspect is None or isinstance(aspect, SchemaMetadataClass)
if aspect is None:
return aspect
schema_metadata_aspect: SchemaMetadataClass = aspect
schema_metadata_aspect.fields = [
self.extend_field(field) for field in schema_metadata_aspect.fields
]
return schema_metadata_aspect
class PatternDatasetTagsConfig(ConfigModel):
tag_pattern: KeyValuePattern = KeyValuePattern.all()
class PatternAddDatasetSchemaTags(AddDatasetSchemaTags):
def __init__(self, config: PatternDatasetTagsConfig, ctx: PipelineContext):
tag_pattern = config.tag_pattern
generic_config = AddDatasetSchemaTagsConfig(
get_tags_to_add=lambda path: [
TagAssociationClass(tag=urn) for urn in tag_pattern.value(path)
],
)
super().__init__(generic_config, ctx)
@classmethod
def create(
cls, config_dict: dict, ctx: PipelineContext
) -> "PatternAddDatasetSchemaTags":
config = PatternDatasetTagsConfig.parse_obj(config_dict)
return cls(config, ctx)
| true | true |
f71ae4f35d601be11c164199f86811680f877aef | 8,965 | py | Python | teambition/api/works.py | jxtech/teambition-api | a15b845fa029d56c084fe134bd082ee8ba25d534 | [
"MIT"
] | 47 | 2015-06-18T15:26:39.000Z | 2022-02-22T08:01:58.000Z | teambition/api/works.py | messense/teambition-api | a15b845fa029d56c084fe134bd082ee8ba25d534 | [
"MIT"
] | 5 | 2015-07-07T11:09:36.000Z | 2020-02-17T08:38:22.000Z | teambition/api/works.py | jxtech/teambition-api | a15b845fa029d56c084fe134bd082ee8ba25d534 | [
"MIT"
] | 13 | 2015-06-18T10:07:04.000Z | 2021-09-22T03:36:05.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from optionaldict import optionaldict
from teambition.api.base import TeambitionAPI
class Works(TeambitionAPI):
def get(self, id=None, parent_id=None, page=None, count=None, all=None):
"""
获取文件信息
详情请参考
http://docs.teambition.com/wiki/works#works-get
:param id: 可选,文件 ID
:param parent_id: 可选,父级 ID
:param page: 可选,当前页,默认为 1
:param count: 可选,每页数量,默认为 30
:param all: 可选,若提供此参数则返回所有
:return: 返回的 JSON 数据包
"""
assert id or parent_id
params = optionaldict(
page=page,
count=count,
all=all
)
if id:
endpoint = 'api/works/{0}'.format(id)
elif parent_id:
endpoint = 'api/works'
params['_parentId'] = parent_id
return self._get(endpoint, params=params)
def create(self, parent_id, file_name, file_size, file_type, file_category,
file_key, image_width=None, image_height=None,
involve_members=None):
"""
新建文件
详情请参考
http://docs.teambition.com/wiki/works#works-create
:param parent_id: 所属目录 ID
:param file_name: 文件名
:param file_size: 文件大小
:param file_type: 文件类型
:param file_category: 文件类别
:param file_key: 使用 striker 服务上传后可得
:param image_width: 可选,图片宽度
:param image_height: 可选,图片高度
:param involve_members: 可选
:return: 返回的 JSON 数据包
"""
data = optionaldict(
_parentId=parent_id,
fileName=file_name,
fileSize=file_size,
fileType=file_type,
fileCategory=file_category,
fileKey=file_key,
imageWidth=image_width,
imageHeight=image_height,
involveMembers=involve_members
)
return self._post(
'api/works',
data=data
)
def like(self, id):
"""
赞文件
详情请参考
http://docs.teambition.com/wiki/works#works-like
:param id: 文件 ID
:return: 返回的 JSON 数据包
"""
return self._post('api/works/{0}/like'.format(id))
def update(self, id, file_name, description=None):
"""
更新文件
详情请参考
http://docs.teambition.com/wiki/works#works-update
:param id: 文件 ID
:param file_name: 文件名
:param description: 可选,描述
:return: 返回的 JSON 数据包
"""
data = optionaldict(
fileName=file_name,
description=description
)
return self._put(
'api/works/{0}'.format(id),
data=data
)
def move(self, id, parent_id):
"""
移动文件
详情请参考
http://docs.teambition.com/wiki/works#works-move
:param id: 文件 ID
:param parent_id: 新的目录 ID
:return: 返回的 JSON 数据包
"""
return self._put(
'api/works/{0}'.format(id),
data={
'_parentId': parent_id
}
)
def delete(self, id):
"""
删除文件
详情请参考
http://docs.teambition.com/wiki/works#works-delete
:param id: 文件 ID
:return: 返回的 JSON 数据包
"""
return self._delete('api/works/{0}'.format(id))
def update_members(self, id, members):
"""
更新文件参与者
详情请参考
http://docs.teambition.com/wiki/works#works-update-involvemembers
:param id: 文件 ID
:param members: 参与者 ID 列表
:return: 返回的 JSON 数据包
"""
return self._put(
'api/works/{0}/involveMembers'.format(id),
data={
'involveMembers': members
}
)
def get_tags(self, id):
"""
获取任务标签列表
:param id: 文件 ID
:return: 返回的 JSON 数据包
"""
return self._get('api/works/{0}/tags'.format(id))
def remove_tag(self, id, tag_id):
"""
移除标签
:param id: 文件 ID
:param name: 标签 ID
:return: 返回的 JSON 数据包
"""
return self._delete('api/works/{0}/tags/{1}'.format(id, tag_id))
def add_tag(self, id, tag_id):
"""
关联标签
:param id: 文件 ID
:param tag_id: 标签 ID
:return: 返回的 JSON 数据包
"""
return self._put('api/works/{0}/tags/{1}'.format(id, tag_id))
def get_objectlinks(self, id):
"""
获取文件关联的 objectlink 列表
:param id: 文件 ID
:return: 返回的 JSON 数据包
"""
return self._get('api/works/{0}/objectlinks'.format(id))
def create_objectlink(self, id, linked_id, linked_type):
"""
关联对象
:param id: 文件 ID
:param linked_id: 关联对象 ID
:param linked_type: 关联对象类型
:return: 返回的 JSON 数据包
"""
return self._post(
'api/objectlinks',
data={
'_parentId': id,
'parentType': 'work',
'_linkedId': linked_id,
'linkedType': linked_type
}
)
def get_versions(self, id):
"""
获取文件关联的历史版本信息
详情请参考
http://docs.teambition.com/wiki/works-versions#works-versions-list
:param id: 文件 ID
:return: 历史版本列表
"""
return self._get('api/works/{0}/versions'.format(id))
def get_version(self, id, version_id):
"""
获取单个历史版本信息
详情请参考
http://docs.teambition.com/wiki/works-versions#works-versions-get
:param id: 文件 ID
:param version_id: 历史版本 ID
:return: 历史版本信息
"""
return self._get('api/works/{0}/versions/{1}'.format(id, version_id))
def update_version(self, id, version_id, file_name=None, description=None):
"""
获取单个历史版本信息
详情请参考
http://docs.teambition.com/wiki/works-versions#works-versions-update
:param id: 文件 ID
:param version_id: 历史版本 ID
:param file_name: 可选,文件名
:param description: 可选,描述
:return: 返回的 JSON 数据包
"""
data = optionaldict(fileName=file_name, description=description)
return self._put(
'api/works/{0}/versions/{1}'.format(id, version_id),
data=data
)
def delete_version(self, id, version_id):
"""
删除单个历史版本
详情请参考
http://docs.teambition.com/wiki/works-versions#works-versions-delete
:param id: 文件 ID
:param version_id: 历史版本 ID
:return: 返回的 JSON 数据包
"""
return self._delete(
'api/works/{0}/versions/{1}'.format(id, version_id)
)
def create_version(self, id, file_name, file_size, file_type,
file_category, file_key, image_width=None,
image_height=None, involve_members=None):
"""
新建文件
详情请参考
http://docs.teambition.com/wiki/works-versions#works-versions-post
:param id: 文件 ID
:param file_name: 文件名
:param file_size: 文件大小
:param file_type: 文件类型
:param file_category: 文件类别
:param file_key: 使用 striker 服务上传后可得
:param image_width: 可选,图片宽度
:param image_height: 可选,图片高度
:param involve_members: 可选
:return: 返回的 JSON 数据包
"""
data = optionaldict(
fileName=file_name,
fileSize=file_size,
fileType=file_type,
fileCategory=file_category,
fileKey=file_key,
imageWidth=image_width,
imageHeight=image_height,
involveMembers=involve_members
)
return self._post(
'api/works/{0}/versions'.format(id),
data=data
)
def link_task(self, id, linked_id):
"""
关联任务
:param id: 任务 ID
:param linked_id: 关联任务 ID
:return: 返回的 JSON 数据包
"""
return self.create_objectlink(id, linked_id, 'task')
def link_post(self, id, linked_id):
"""
关联分享
:param id: 任务 ID
:param linked_id: 关联分享 ID
:return: 返回的 JSON 数据包
"""
return self.create_objectlink(id, linked_id, 'post')
def link_event(self, id, linked_id):
"""
关联日程
:param id: 任务 ID
:param linked_id: 关联日程 ID
:return: 返回的 JSON 数据包
"""
return self.create_objectlink(id, linked_id, 'event')
def link_work(self, id, linked_id):
"""
关联文件
:param id: 任务 ID
:param linked_id: 关联文件 ID
:return: 返回的 JSON 数据包
"""
return self.create_objectlink(id, linked_id, 'work')
def get_activities(self, id):
"""
获取文件动态
:param id: 文件 ID
:return: 返回的 JSON 数据包
"""
return self._get(
'api/activities',
params={'_boundToObjectId': id}
)
| 25.112045 | 79 | 0.530508 |
from __future__ import absolute_import, unicode_literals
from optionaldict import optionaldict
from teambition.api.base import TeambitionAPI
class Works(TeambitionAPI):
def get(self, id=None, parent_id=None, page=None, count=None, all=None):
assert id or parent_id
params = optionaldict(
page=page,
count=count,
all=all
)
if id:
endpoint = 'api/works/{0}'.format(id)
elif parent_id:
endpoint = 'api/works'
params['_parentId'] = parent_id
return self._get(endpoint, params=params)
def create(self, parent_id, file_name, file_size, file_type, file_category,
file_key, image_width=None, image_height=None,
involve_members=None):
data = optionaldict(
_parentId=parent_id,
fileName=file_name,
fileSize=file_size,
fileType=file_type,
fileCategory=file_category,
fileKey=file_key,
imageWidth=image_width,
imageHeight=image_height,
involveMembers=involve_members
)
return self._post(
'api/works',
data=data
)
def like(self, id):
return self._post('api/works/{0}/like'.format(id))
def update(self, id, file_name, description=None):
data = optionaldict(
fileName=file_name,
description=description
)
return self._put(
'api/works/{0}'.format(id),
data=data
)
def move(self, id, parent_id):
return self._put(
'api/works/{0}'.format(id),
data={
'_parentId': parent_id
}
)
def delete(self, id):
return self._delete('api/works/{0}'.format(id))
def update_members(self, id, members):
return self._put(
'api/works/{0}/involveMembers'.format(id),
data={
'involveMembers': members
}
)
def get_tags(self, id):
return self._get('api/works/{0}/tags'.format(id))
def remove_tag(self, id, tag_id):
return self._delete('api/works/{0}/tags/{1}'.format(id, tag_id))
def add_tag(self, id, tag_id):
return self._put('api/works/{0}/tags/{1}'.format(id, tag_id))
def get_objectlinks(self, id):
return self._get('api/works/{0}/objectlinks'.format(id))
def create_objectlink(self, id, linked_id, linked_type):
return self._post(
'api/objectlinks',
data={
'_parentId': id,
'parentType': 'work',
'_linkedId': linked_id,
'linkedType': linked_type
}
)
def get_versions(self, id):
return self._get('api/works/{0}/versions'.format(id))
def get_version(self, id, version_id):
return self._get('api/works/{0}/versions/{1}'.format(id, version_id))
def update_version(self, id, version_id, file_name=None, description=None):
data = optionaldict(fileName=file_name, description=description)
return self._put(
'api/works/{0}/versions/{1}'.format(id, version_id),
data=data
)
def delete_version(self, id, version_id):
return self._delete(
'api/works/{0}/versions/{1}'.format(id, version_id)
)
def create_version(self, id, file_name, file_size, file_type,
file_category, file_key, image_width=None,
image_height=None, involve_members=None):
data = optionaldict(
fileName=file_name,
fileSize=file_size,
fileType=file_type,
fileCategory=file_category,
fileKey=file_key,
imageWidth=image_width,
imageHeight=image_height,
involveMembers=involve_members
)
return self._post(
'api/works/{0}/versions'.format(id),
data=data
)
def link_task(self, id, linked_id):
return self.create_objectlink(id, linked_id, 'task')
def link_post(self, id, linked_id):
return self.create_objectlink(id, linked_id, 'post')
def link_event(self, id, linked_id):
return self.create_objectlink(id, linked_id, 'event')
def link_work(self, id, linked_id):
return self.create_objectlink(id, linked_id, 'work')
def get_activities(self, id):
return self._get(
'api/activities',
params={'_boundToObjectId': id}
)
| true | true |
f71ae8cf1533e7bd8ad8fa5b8cf9a24021f79424 | 623 | py | Python | example/your_app/app.py | keyloguer/flask_middleware | 38d01d7f87484f85aaeb7bb6deaa0f1055497c1a | [
"Apache-2.0"
] | 21 | 2019-10-09T18:50:57.000Z | 2020-10-14T20:49:57.000Z | example/your_app/app.py | keyloguer/flask_middleware | 38d01d7f87484f85aaeb7bb6deaa0f1055497c1a | [
"Apache-2.0"
] | null | null | null | example/your_app/app.py | keyloguer/flask_middleware | 38d01d7f87484f85aaeb7bb6deaa0f1055497c1a | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from flask_middleware_jwt import Middleware, middleware_jwt_required
app = Flask(__name__)
app.config['MIDDLEWARE_URL_IDENTITY'] = 'http://0.0.0.0:5000'
app.config['MIDDLEWARE_VERIFY_ENDPOINT'] = '/token/verify'
app.config['MIDDLEWARE_BEARER'] = True
app.config['MIDDLEWARE_VERIFY_HTTP_VERB'] = 'GET'
app.config['JWT_SECRET'] = 'super-secret'
app.config['JWT_ALGORITHMS'] = ['HS256']
middleware = Middleware(app)
@app.route("/")
@middleware_jwt_required
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
app.run(port=5001) | 28.318182 | 68 | 0.693419 | from flask import Flask
from flask_middleware_jwt import Middleware, middleware_jwt_required
app = Flask(__name__)
app.config['MIDDLEWARE_URL_IDENTITY'] = 'http://0.0.0.0:5000'
app.config['MIDDLEWARE_VERIFY_ENDPOINT'] = '/token/verify'
app.config['MIDDLEWARE_BEARER'] = True
app.config['MIDDLEWARE_VERIFY_HTTP_VERB'] = 'GET'
app.config['JWT_SECRET'] = 'super-secret'
app.config['JWT_ALGORITHMS'] = ['HS256']
middleware = Middleware(app)
@app.route("/")
@middleware_jwt_required
def hello_world():
return 'Hello World!'
if __name__ == '__main__':
app.run(port=5001) | true | true |
f71ae8eb0aa5e13e7033134268ddae1c0ba1dd97 | 6,344 | py | Python | modules/pulse/pulse_sim.py | timsnow/PandABlocks-FPGA | 7df03a7a4415c5c9e02c80dc80c3d377ab480e5c | [
"Apache-2.0"
] | null | null | null | modules/pulse/pulse_sim.py | timsnow/PandABlocks-FPGA | 7df03a7a4415c5c9e02c80dc80c3d377ab480e5c | [
"Apache-2.0"
] | null | null | null | modules/pulse/pulse_sim.py | timsnow/PandABlocks-FPGA | 7df03a7a4415c5c9e02c80dc80c3d377ab480e5c | [
"Apache-2.0"
] | null | null | null | from common.python.simulations import BlockSimulation, properties_from_ini
from collections import deque
# max queue size
MAX_QUEUE = 1023
# min FPGA deadtime between queued pulses
MIN_QUEUE_DELTA = 4
# time taken to clear queue
QUEUE_CLEAR_TIME = 4
NAMES, PROPERTIES = properties_from_ini(__file__, "pulse.block.ini")
class PulseSimulation(BlockSimulation):
ENABLE, TRIG, DELAY_L, DELAY_H, WIDTH_L, WIDTH_H, TRIG_EDGE, OUT, QUEUED, \
DROPPED = PROPERTIES
def __init__(self):
self.queue = deque()
self.valid_ts = 0
self.trigtime = 0
self.enqueue = 0
self.dequeue = 0
self.delaypulse = 0
self.delayqueue = 1
self.doqueue = 0
self.missedsignal = 0
self.width = 0
self.delay = 0
def do_pulse(self, ts, changes):
"""We've received a bit event on INP, so queue some output values
based on DELAY and WIDTH"""
# If the queue isn't valid at the moment then error
# If there isn't room for 2 on the queue then error
# If WIDTH is zero DELAY should be >3, or if DELAY is zero WIDTH
# should be >3 for the FIFO to iterate fully
width = self.width
delay = self.delay
if ts < self.valid_ts or len(self.queue) + 2 > MAX_QUEUE:
self.DROPPED += 1
# If there is no specified width then use the width of input pulse
elif width == 0:
self.queue.append((ts + delay, self.TRIG))
elif self.TRIG and self.TRIG_EDGE == 0:
self.generate_queue(ts, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 1 and delay == 0:
self.generate_queue(ts+1, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 1 and delay >= 0:
self.generate_queue(ts, delay, width)
elif self.TRIG and self.TRIG_EDGE == 2:
self.generate_queue(ts, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 2:
self.generate_queue(ts, delay+1, width)
def generate_queue(self, ts, delay, width):
# generate both high and low queue from inp
start = ts + delay
# make sure that start is after any pulse on queue
if self.queue and start < self.queue[-1][0] + MIN_QUEUE_DELTA:
self.DROPPED += 1
self.missedsignal += 1
else:
self.queue.append((start, 1))
self.queue.append((start + width, 0))
def do_reset(self):
"""Reset the block, called on rising edge of ENABLE"""
self.DROPPED = 0
def do_clear_queue(self, ts):
"""Clear the queue, but not any errors"""
self.valid_ts = ts + QUEUE_CLEAR_TIME
self.OUT = 0
self.queue.clear()
def on_changes(self, ts, changes):
"""Handle changes at a particular timestamp, then return the timestamp
when we next need to be called"""
# This is a ConfigBlock object for use to get our strings from
super(PulseSimulation, self).on_changes(ts, changes)
# This is the next time we need to be called
next_ts = ts+1
# If the DELAY and WIDTH inputs are out of bounds, set them to 4
if 0 < self.DELAY_L < 4:
self.delay = 4
else:
self.delay = self.DELAY_L
if (0 < self.WIDTH_L < 4) and self.DELAY_L == 0:
self.width = 4
else:
self.width = self.WIDTH_L
# Append queue if the start of the queue is delayed
if self.delaypulse == 1:
if self.WIDTH_L > 0 or self.doqueue == 1:
self.QUEUED += 1
self.delaypulse = 0
self.doqueue = 0
elif changes.get(NAMES.TRIG, None) == 0:
self.doqueue = 1
# Increment the queue
if self.enqueue == 1 and ts == self.trigtime+1:
if self.missedsignal > 0:
self.missedsignal -= 1
else:
self.QUEUED += 1
# Is a pulse of zero required before next pulse?
if self.DELAY_L > 0:
self.delaypulse = 1
self.enqueue = 0
# On the trigger edge set the writestrobe to the queue
# If both DELAY and WIDTH are equal to 0, the module bypasses the queue
if self.width == 0 and self.delay == 0:
self.enqueue = 0
elif changes.get(NAMES.TRIG) == 1 and self.TRIG_EDGE in (0, 2):
# Positive edge
self.trigtime = ts
self.enqueue = 1
elif changes.get(NAMES.TRIG) == 0 and self.TRIG_EDGE in (1, 2):
# Negative edge
self.trigtime = ts + 1
self.enqueue = 1
# Set attributes, and flag clear queue
for name, value in changes.items():
setattr(self, name, value)
if name in ("DELAY_L", "DELAY_L", "WIDTH_L", "WIDTH_L"):
self.do_clear_queue(ts)
# On rising edge of enable clear errors
if changes.get(NAMES.ENABLE, None) == 1:
self.do_reset()
# on falling edge of enable reset output and queue
elif changes.get(NAMES.ENABLE, None) == 0:
self.do_clear_queue(ts)
# If we got an input and we were enabled then output a pulse
if NAMES.TRIG in changes and self.ENABLE:
self.do_pulse(ts, changes)
# if we have anything else on the queue return when it's due
if self.queue:
# next_ts = self.queue[0][0]
# if the pulse on our queue is ready to be produced then produce
if self.queue[0][0] == ts:
if self.queue.popleft()[1] == 1:
self.OUT = 1
self.dequeue = 1
else:
self.OUT = 0
assert next_ts >= ts, "Going back in time %s >= %s" % (next_ts, ts)
# At the end of the pulse, the queue count has decreased
if self.OUT == 0 and self.dequeue == 1:
if self.QUEUED > 0:
self.QUEUED -= 1
self.dequeue = 0
self.delayqueue = 1
# Decrease the queue count for the zero pulse
if self.OUT == 1 and self.delayqueue == 1:
if self.QUEUED > 0:
self.QUEUED -= 1
self.delayqueue = 0
return next_ts
| 36.45977 | 79 | 0.565889 | from common.python.simulations import BlockSimulation, properties_from_ini
from collections import deque
MAX_QUEUE = 1023
MIN_QUEUE_DELTA = 4
QUEUE_CLEAR_TIME = 4
NAMES, PROPERTIES = properties_from_ini(__file__, "pulse.block.ini")
class PulseSimulation(BlockSimulation):
ENABLE, TRIG, DELAY_L, DELAY_H, WIDTH_L, WIDTH_H, TRIG_EDGE, OUT, QUEUED, \
DROPPED = PROPERTIES
def __init__(self):
self.queue = deque()
self.valid_ts = 0
self.trigtime = 0
self.enqueue = 0
self.dequeue = 0
self.delaypulse = 0
self.delayqueue = 1
self.doqueue = 0
self.missedsignal = 0
self.width = 0
self.delay = 0
def do_pulse(self, ts, changes):
# If there isn't room for 2 on the queue then error
width = self.width
delay = self.delay
if ts < self.valid_ts or len(self.queue) + 2 > MAX_QUEUE:
self.DROPPED += 1
elif width == 0:
self.queue.append((ts + delay, self.TRIG))
elif self.TRIG and self.TRIG_EDGE == 0:
self.generate_queue(ts, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 1 and delay == 0:
self.generate_queue(ts+1, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 1 and delay >= 0:
self.generate_queue(ts, delay, width)
elif self.TRIG and self.TRIG_EDGE == 2:
self.generate_queue(ts, delay, width)
elif not self.TRIG and self.TRIG_EDGE == 2:
self.generate_queue(ts, delay+1, width)
def generate_queue(self, ts, delay, width):
start = ts + delay
if self.queue and start < self.queue[-1][0] + MIN_QUEUE_DELTA:
self.DROPPED += 1
self.missedsignal += 1
else:
self.queue.append((start, 1))
self.queue.append((start + width, 0))
def do_reset(self):
self.DROPPED = 0
def do_clear_queue(self, ts):
self.valid_ts = ts + QUEUE_CLEAR_TIME
self.OUT = 0
self.queue.clear()
def on_changes(self, ts, changes):
super(PulseSimulation, self).on_changes(ts, changes)
next_ts = ts+1
if 0 < self.DELAY_L < 4:
self.delay = 4
else:
self.delay = self.DELAY_L
if (0 < self.WIDTH_L < 4) and self.DELAY_L == 0:
self.width = 4
else:
self.width = self.WIDTH_L
if self.delaypulse == 1:
if self.WIDTH_L > 0 or self.doqueue == 1:
self.QUEUED += 1
self.delaypulse = 0
self.doqueue = 0
elif changes.get(NAMES.TRIG, None) == 0:
self.doqueue = 1
if self.enqueue == 1 and ts == self.trigtime+1:
if self.missedsignal > 0:
self.missedsignal -= 1
else:
self.QUEUED += 1
if self.DELAY_L > 0:
self.delaypulse = 1
self.enqueue = 0
if self.width == 0 and self.delay == 0:
self.enqueue = 0
elif changes.get(NAMES.TRIG) == 1 and self.TRIG_EDGE in (0, 2):
self.trigtime = ts
self.enqueue = 1
elif changes.get(NAMES.TRIG) == 0 and self.TRIG_EDGE in (1, 2):
self.trigtime = ts + 1
self.enqueue = 1
for name, value in changes.items():
setattr(self, name, value)
if name in ("DELAY_L", "DELAY_L", "WIDTH_L", "WIDTH_L"):
self.do_clear_queue(ts)
if changes.get(NAMES.ENABLE, None) == 1:
self.do_reset()
elif changes.get(NAMES.ENABLE, None) == 0:
self.do_clear_queue(ts)
if NAMES.TRIG in changes and self.ENABLE:
self.do_pulse(ts, changes)
if self.queue:
# next_ts = self.queue[0][0]
# if the pulse on our queue is ready to be produced then produce
if self.queue[0][0] == ts:
if self.queue.popleft()[1] == 1:
self.OUT = 1
self.dequeue = 1
else:
self.OUT = 0
assert next_ts >= ts, "Going back in time %s >= %s" % (next_ts, ts)
# At the end of the pulse, the queue count has decreased
if self.OUT == 0 and self.dequeue == 1:
if self.QUEUED > 0:
self.QUEUED -= 1
self.dequeue = 0
self.delayqueue = 1
# Decrease the queue count for the zero pulse
if self.OUT == 1 and self.delayqueue == 1:
if self.QUEUED > 0:
self.QUEUED -= 1
self.delayqueue = 0
return next_ts
| true | true |
f71ae95e60037316cd6d37acc29a87db6cdf90d0 | 25,177 | py | Python | Lib/fontTools/ttLib/tables/E_B_L_C_.py | ViktorRubenko/fonttools | 522c32547c569d655feafd475987284bc0dabed1 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-05-07T16:29:02.000Z | 2020-05-07T16:29:02.000Z | Lib/fontTools/ttLib/tables/E_B_L_C_.py | ViktorRubenko/fonttools | 522c32547c569d655feafd475987284bc0dabed1 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | Lib/fontTools/ttLib/tables/E_B_L_C_.py | ViktorRubenko/fonttools | 522c32547c569d655feafd475987284bc0dabed1 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from . import DefaultTable
from fontTools.misc.textTools import safeEval
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
import struct
import itertools
from collections import deque
import logging
log = logging.getLogger(__name__)
eblcHeaderFormat = """
> # big endian
version: 16.16F
numSizes: I
"""
# The table format string is split to handle sbitLineMetrics simply.
bitmapSizeTableFormatPart1 = """
> # big endian
indexSubTableArrayOffset: I
indexTablesSize: I
numberOfIndexSubTables: I
colorRef: I
"""
# The compound type for hori and vert.
sbitLineMetricsFormat = """
> # big endian
ascender: b
descender: b
widthMax: B
caretSlopeNumerator: b
caretSlopeDenominator: b
caretOffset: b
minOriginSB: b
minAdvanceSB: b
maxBeforeBL: b
minAfterBL: b
pad1: b
pad2: b
"""
# hori and vert go between the two parts.
bitmapSizeTableFormatPart2 = """
> # big endian
startGlyphIndex: H
endGlyphIndex: H
ppemX: B
ppemY: B
bitDepth: B
flags: b
"""
indexSubTableArrayFormat = ">HHL"
indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
indexSubHeaderFormat = ">HHL"
indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
codeOffsetPairFormat = ">HH"
codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
class table_E_B_L_C_(DefaultTable.DefaultTable):
dependencies = ['EBDT']
# This method can be overridden in subclasses to support new formats
# without changing the other implementation. Also can be used as a
# convenience method for coverting a font file to an alternative format.
def getIndexFormatClass(self, indexFormat):
return eblc_sub_table_classes[indexFormat]
def decompile(self, data, ttFont):
# Save the original data because offsets are from the start of the table.
origData = data
i = 0;
dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
i += 8;
self.strikes = []
for curStrikeIndex in range(self.numSizes):
curStrike = Strike()
self.strikes.append(curStrike)
curTable = curStrike.bitmapSizeTable
dummy = sstruct.unpack2(bitmapSizeTableFormatPart1, data[i:i+16], curTable)
i += 16
for metric in ('hori', 'vert'):
metricObj = SbitLineMetrics()
vars(curTable)[metric] = metricObj
dummy = sstruct.unpack2(sbitLineMetricsFormat, data[i:i+12], metricObj)
i += 12
dummy = sstruct.unpack(bitmapSizeTableFormatPart2, data[i:i+8], curTable)
i += 8
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
for subtableIndex in range(curTable.numberOfIndexSubTables):
i = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize
tup = struct.unpack(indexSubTableArrayFormat, data[i:i+indexSubTableArraySize])
(firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
tup = struct.unpack(indexSubHeaderFormat, data[i:i+indexSubHeaderSize])
(indexFormat, imageFormat, imageDataOffset) = tup
indexFormatClass = self.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(data[i+indexSubHeaderSize:], ttFont)
indexSubTable.firstGlyphIndex = firstGlyphIndex
indexSubTable.lastGlyphIndex = lastGlyphIndex
indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable
indexSubTable.indexFormat = indexFormat
indexSubTable.imageFormat = imageFormat
indexSubTable.imageDataOffset = imageDataOffset
indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317
curStrike.indexSubTables.append(indexSubTable)
def compile(self, ttFont):
dataList = []
self.numSizes = len(self.strikes)
dataList.append(sstruct.pack(eblcHeaderFormat, self))
# Data size of the header + bitmapSizeTable needs to be calculated
# in order to form offsets. This value will hold the size of the data
# in dataList after all the data is consolidated in dataList.
dataSize = len(dataList[0])
# The table will be structured in the following order:
# (0) header
# (1) Each bitmapSizeTable [1 ... self.numSizes]
# (2) Alternate between indexSubTableArray and indexSubTable
# for each bitmapSizeTable present.
#
# The issue is maintaining the proper offsets when table information
# gets moved around. All offsets and size information must be recalculated
# when building the table to allow editing within ttLib and also allow easy
# import/export to and from XML. All of this offset information is lost
# when exporting to XML so everything must be calculated fresh so importing
# from XML will work cleanly. Only byte offset and size information is
# calculated fresh. Count information like numberOfIndexSubTables is
# checked through assertions. If the information in this table was not
# touched or was changed properly then these types of values should match.
#
# The table will be rebuilt the following way:
# (0) Precompute the size of all the bitmapSizeTables. This is needed to
# compute the offsets properly.
# (1) For each bitmapSizeTable compute the indexSubTable and
# indexSubTableArray pair. The indexSubTable must be computed first
# so that the offset information in indexSubTableArray can be
# calculated. Update the data size after each pairing.
# (2) Build each bitmapSizeTable.
# (3) Consolidate all the data into the main dataList in the correct order.
for curStrike in self.strikes:
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat)
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
indexSubTablePairDataList = []
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
curTable.indexSubTableArrayOffset = dataSize
# Precompute the size of the indexSubTableArray. This information
# is important for correctly calculating the new value for
# additionalOffsetToIndexSubtable.
sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize
lowerBound = dataSize
dataSize += sizeOfSubTableArray
upperBound = dataSize
indexSubTableDataList = []
for indexSubTable in curStrike.indexSubTables:
indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset
glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
indexSubTable.firstGlyphIndex = min(glyphIds)
indexSubTable.lastGlyphIndex = max(glyphIds)
data = indexSubTable.compile(ttFont)
indexSubTableDataList.append(data)
dataSize += len(data)
curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables)
curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables)
for i in curStrike.indexSubTables:
data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable)
indexSubTablePairDataList.append(data)
indexSubTablePairDataList.extend(indexSubTableDataList)
curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
dataList.append(data)
for metric in ('hori', 'vert'):
metricObj = vars(curTable)[metric]
data = sstruct.pack(sbitLineMetricsFormat, metricObj)
dataList.append(data)
data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
dataList.append(data)
dataList.extend(indexSubTablePairDataList)
return bytesjoin(dataList)
def toXML(self, writer, ttFont):
writer.simpletag('header', [('version', self.version)])
writer.newline()
for curIndex, curStrike in enumerate(self.strikes):
curStrike.toXML(curIndex, writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == 'header':
self.version = safeEval(attrs['version'])
elif name == 'strike':
if not hasattr(self, 'strikes'):
self.strikes = []
strikeIndex = safeEval(attrs['index'])
curStrike = Strike()
curStrike.fromXML(name, attrs, content, ttFont, self)
# Grow the strike array to the appropriate size. The XML format
# allows for the strike index value to be out of order.
if strikeIndex >= len(self.strikes):
self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
self.strikes[strikeIndex] = curStrike
class Strike(object):
def __init__(self):
self.bitmapSizeTable = BitmapSizeTable()
self.indexSubTables = []
def toXML(self, strikeIndex, writer, ttFont):
writer.begintag('strike', [('index', strikeIndex)])
writer.newline()
self.bitmapSizeTable.toXML(writer, ttFont)
writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.')
writer.newline()
for indexSubTable in self.indexSubTables:
indexSubTable.toXML(writer, ttFont)
writer.endtag('strike')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, locator):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'bitmapSizeTable':
self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
elif name.startswith(_indexSubTableSubclassPrefix):
indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):])
indexFormatClass = locator.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(None, None)
indexSubTable.indexFormat = indexFormat
indexSubTable.fromXML(name, attrs, content, ttFont)
self.indexSubTables.append(indexSubTable)
class BitmapSizeTable(object):
# Returns all the simple metric names that bitmap size table
# cares about in terms of XML creation.
def _getXMLMetricNames(self):
dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
# Skip the first 3 data names because they are byte offsets and counts.
return dataNames[3:]
def toXML(self, writer, ttFont):
writer.begintag('bitmapSizeTable')
writer.newline()
for metric in ('hori', 'vert'):
getattr(self, metric).toXML(metric, writer, ttFont)
for metricName in self._getXMLMetricNames():
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('bitmapSizeTable')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Create a lookup for all the simple names that make sense to
# bitmap size table. Only read the information from these names.
dataNames = set(self._getXMLMetricNames())
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'sbitLineMetrics':
direction = attrs['direction']
assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid."
metricObj = SbitLineMetrics()
metricObj.fromXML(name, attrs, content, ttFont)
vars(self)[direction] = metricObj
elif name in dataNames:
vars(self)[name] = safeEval(attrs['value'])
else:
log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
class SbitLineMetrics(object):
def toXML(self, name, writer, ttFont):
writer.begintag('sbitLineMetrics', [('direction', name)])
writer.newline()
for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('sbitLineMetrics')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name in metricNames:
vars(self)[name] = safeEval(attrs['value'])
# Important information about the naming scheme. Used for identifying subtables.
_indexSubTableSubclassPrefix = 'eblc_index_sub_table_'
class EblcIndexSubTable(object):
def __init__(self, data, ttFont):
self.data = data
self.ttFont = ttFont
# TODO Currently non-lazy decompiling doesn't work for this class...
#if not ttFont.lazy:
# self.decompile()
# del self.data, self.ttFont
def __getattr__(self, attr):
# Allow lazy decompile.
if attr[:2] == '__':
raise AttributeError(attr)
if not hasattr(self, "data"):
raise AttributeError(attr)
self.decompile()
return getattr(self, attr)
# This method just takes care of the indexSubHeader. Implementing subclasses
# should call it to compile the indexSubHeader and then continue compiling
# the remainder of their unique format.
def compile(self, ttFont):
return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset)
# Creates the XML for bitmap glyphs. Each index sub table basically makes
# the same XML except for specific metric information that is written
# out via a method call that a subclass implements optionally.
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
('imageFormat', self.imageFormat),
('firstGlyphIndex', self.firstGlyphIndex),
('lastGlyphIndex', self.lastGlyphIndex),
])
writer.newline()
self.writeMetrics(writer, ttFont)
# Write out the names as thats all thats needed to rebuild etc.
# For font debugging of consecutive formats the ids are also written.
# The ids are not read when moving from the XML format.
glyphIds = map(ttFont.getGlyphID, self.names)
for glyphName, glyphId in zip(self.names, glyphIds):
writer.simpletag('glyphLoc', name=glyphName, id=glyphId)
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Read all the attributes. Even though the glyph indices are
# recalculated, they are still read in case there needs to
# be an immediate export of the data.
self.imageFormat = safeEval(attrs['imageFormat'])
self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex'])
self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex'])
self.readMetrics(name, attrs, content, ttFont)
self.names = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'glyphLoc':
self.names.append(attrs['name'])
# A helper method that writes the metrics for the index sub table. It also
# is responsible for writing the image size for fixed size data since fixed
# size is not recalculated on compile. Default behavior is to do nothing.
def writeMetrics(self, writer, ttFont):
pass
# A helper method that is the inverse of writeMetrics.
def readMetrics(self, name, attrs, content, ttFont):
pass
# This method is for fixed glyph data sizes. There are formats where
# the glyph data is fixed but are actually composite glyphs. To handle
# this the font spec in indexSubTable makes the data the size of the
# fixed size by padding the component arrays. This function abstracts
# out this padding process. Input is data unpadded. Output is data
# padded only in fixed formats. Default behavior is to return the data.
def padBitmapData(self, data):
return data
# Remove any of the glyph locations and names that are flagged as skipped.
# This only occurs in formats {1,3}.
def removeSkipGlyphs(self):
# Determines if a name, location pair is a valid data location.
# Skip glyphs are marked when the size is equal to zero.
def isValidLocation(args):
(name, (startByte, endByte)) = args
return startByte < endByte
# Remove all skip glyphs.
dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
self.names, self.locations = list(map(list, zip(*dataPairs)))
# A closure for creating a custom mixin. This is done because formats 1 and 3
# are very similar. The only difference between them is the size per offset
# value. Code put in here should handle both cases generally.
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
# Prep the data size for the offset array data format.
dataFormat = '>'+formatStringForDataType
offsetDataSize = struct.calcsize(dataFormat)
class OffsetArrayIndexSubTableMixin(object):
def decompile(self):
numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations]
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
self.removeSkipGlyphs()
del self.data, self.ttFont
def compile(self, ttFont):
# First make sure that all the data lines up properly. Formats 1 and 3
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats"
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure that all ids are sorted strictly increasing.
assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1))
# Run a simple algorithm to add skip glyphs to the data locations at
# the places where an id is not present.
idQueue = deque(glyphIds)
locQueue = deque(self.locations)
allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
allLocations = []
for curId in allGlyphIds:
if curId != idQueue[0]:
allLocations.append((locQueue[0][0], locQueue[0][0]))
else:
idQueue.popleft()
allLocations.append(locQueue.popleft())
# Now that all the locations are collected, pack them appropriately into
# offsets. This is the form where offset[i] is the location and
# offset[i+1]-offset[i] is the size of the data location.
offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# This offset may change the value for round tripping but is safer and
# allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsetArray = [offset - self.imageDataOffset for offset in offsets]
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray]
# Take care of any padding issues. Only occurs in format 3.
if offsetDataSize * len(offsetArray) % 4 != 0:
dataList.append(struct.pack(dataFormat, 0))
return bytesjoin(dataList)
return OffsetArrayIndexSubTableMixin
# A Mixin for functionality shared between the different kinds
# of fixed sized data handling. Both kinds have big metrics so
# that kind of special processing is also handled in this mixin.
class FixedSizeIndexSubTableMixin(object):
def writeMetrics(self, writer, ttFont):
writer.simpletag('imageSize', value=self.imageSize)
writer.newline()
self.metrics.toXML(writer, ttFont)
def readMetrics(self, name, attrs, content, ttFont):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'imageSize':
self.imageSize = safeEval(attrs['value'])
elif name == BigGlyphMetrics.__name__:
self.metrics = BigGlyphMetrics()
self.metrics.fromXML(name, attrs, content, ttFont)
elif name == SmallGlyphMetrics.__name__:
log.warning("SmallGlyphMetrics being ignored in format %d.", self.indexFormat)
def padBitmapData(self, data):
# Make sure that the data isn't bigger than the fixed size.
assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat
# Pad the data so that it matches the fixed size.
pad = (self.imageSize - len(data)) * b'\0'
return data + pad
class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable):
pass
class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
(self.imageSize,) = struct.unpack(">L", self.data[:4])
self.metrics = BigGlyphMetrics()
sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure all the ids are consecutive. This is required by Format 2.
assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive."
self.imageDataOffset = min(next(iter(zip(*self.locations))))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
return bytesjoin(dataList)
class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable):
pass
class eblc_index_sub_table_4(EblcIndexSubTable):
def decompile(self):
(numGlyphs,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations]
glyphIds, offsets = list(map(list, zip(*glyphArray)))
# There are one too many glyph ids. Get rid of the last one.
glyphIds.pop()
offsets = [offset + self.imageDataOffset for offset in offsets]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
# First make sure that all the data lines up properly. Format 4
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4"
offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# Resetting this offset may change the value for round tripping but is safer
# and allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsets = [offset - self.imageDataOffset for offset in offsets]
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Create an iterator over the ids plus a padding value.
idsPlusPad = list(itertools.chain(glyphIds, [0]))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", len(glyphIds)))
tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)]
dataList += tmp
data = bytesjoin(dataList)
return data
class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
self.origDataLen = 0
(self.imageSize,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics())
(numGlyphs,) = struct.unpack(">L", data[:4])
data = data[4:]
glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)]
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
self.imageDataOffset = min(next(iter(zip(*self.locations))))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
glyphIds = list(map(ttFont.getGlyphID, self.names))
dataList.append(struct.pack(">L", len(glyphIds)))
dataList += [struct.pack(">H", curId) for curId in glyphIds]
if len(glyphIds) % 2 == 1:
dataList.append(struct.pack(">H", 0))
return bytesjoin(dataList)
# Dictionary of indexFormat to the class representing that format.
eblc_sub_table_classes = {
1: eblc_index_sub_table_1,
2: eblc_index_sub_table_2,
3: eblc_index_sub_table_3,
4: eblc_index_sub_table_4,
5: eblc_index_sub_table_5,
}
| 40.154705 | 139 | 0.743059 | from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from . import DefaultTable
from fontTools.misc.textTools import safeEval
from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
import struct
import itertools
from collections import deque
import logging
log = logging.getLogger(__name__)
eblcHeaderFormat = """
> # big endian
version: 16.16F
numSizes: I
"""
bitmapSizeTableFormatPart1 = """
> # big endian
indexSubTableArrayOffset: I
indexTablesSize: I
numberOfIndexSubTables: I
colorRef: I
"""
sbitLineMetricsFormat = """
> # big endian
ascender: b
descender: b
widthMax: B
caretSlopeNumerator: b
caretSlopeDenominator: b
caretOffset: b
minOriginSB: b
minAdvanceSB: b
maxBeforeBL: b
minAfterBL: b
pad1: b
pad2: b
"""
bitmapSizeTableFormatPart2 = """
> # big endian
startGlyphIndex: H
endGlyphIndex: H
ppemX: B
ppemY: B
bitDepth: B
flags: b
"""
indexSubTableArrayFormat = ">HHL"
indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
indexSubHeaderFormat = ">HHL"
indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
codeOffsetPairFormat = ">HH"
codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
class table_E_B_L_C_(DefaultTable.DefaultTable):
dependencies = ['EBDT']
def getIndexFormatClass(self, indexFormat):
return eblc_sub_table_classes[indexFormat]
def decompile(self, data, ttFont):
origData = data
i = 0;
dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
i += 8;
self.strikes = []
for curStrikeIndex in range(self.numSizes):
curStrike = Strike()
self.strikes.append(curStrike)
curTable = curStrike.bitmapSizeTable
dummy = sstruct.unpack2(bitmapSizeTableFormatPart1, data[i:i+16], curTable)
i += 16
for metric in ('hori', 'vert'):
metricObj = SbitLineMetrics()
vars(curTable)[metric] = metricObj
dummy = sstruct.unpack2(sbitLineMetricsFormat, data[i:i+12], metricObj)
i += 12
dummy = sstruct.unpack(bitmapSizeTableFormatPart2, data[i:i+8], curTable)
i += 8
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
for subtableIndex in range(curTable.numberOfIndexSubTables):
i = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize
tup = struct.unpack(indexSubTableArrayFormat, data[i:i+indexSubTableArraySize])
(firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
tup = struct.unpack(indexSubHeaderFormat, data[i:i+indexSubHeaderSize])
(indexFormat, imageFormat, imageDataOffset) = tup
indexFormatClass = self.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(data[i+indexSubHeaderSize:], ttFont)
indexSubTable.firstGlyphIndex = firstGlyphIndex
indexSubTable.lastGlyphIndex = lastGlyphIndex
indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable
indexSubTable.indexFormat = indexFormat
indexSubTable.imageFormat = imageFormat
indexSubTable.imageDataOffset = imageDataOffset
indexSubTable.decompile()
curStrike.indexSubTables.append(indexSubTable)
def compile(self, ttFont):
dataList = []
self.numSizes = len(self.strikes)
dataList.append(sstruct.pack(eblcHeaderFormat, self))
dataSize = len(dataList[0])
for curStrike in self.strikes:
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat)
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
indexSubTablePairDataList = []
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
curTable.indexSubTableArrayOffset = dataSize
sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize
lowerBound = dataSize
dataSize += sizeOfSubTableArray
upperBound = dataSize
indexSubTableDataList = []
for indexSubTable in curStrike.indexSubTables:
indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset
glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
indexSubTable.firstGlyphIndex = min(glyphIds)
indexSubTable.lastGlyphIndex = max(glyphIds)
data = indexSubTable.compile(ttFont)
indexSubTableDataList.append(data)
dataSize += len(data)
curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables)
curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables)
for i in curStrike.indexSubTables:
data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable)
indexSubTablePairDataList.append(data)
indexSubTablePairDataList.extend(indexSubTableDataList)
curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
for curStrike in self.strikes:
curTable = curStrike.bitmapSizeTable
data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
dataList.append(data)
for metric in ('hori', 'vert'):
metricObj = vars(curTable)[metric]
data = sstruct.pack(sbitLineMetricsFormat, metricObj)
dataList.append(data)
data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
dataList.append(data)
dataList.extend(indexSubTablePairDataList)
return bytesjoin(dataList)
def toXML(self, writer, ttFont):
writer.simpletag('header', [('version', self.version)])
writer.newline()
for curIndex, curStrike in enumerate(self.strikes):
curStrike.toXML(curIndex, writer, ttFont)
def fromXML(self, name, attrs, content, ttFont):
if name == 'header':
self.version = safeEval(attrs['version'])
elif name == 'strike':
if not hasattr(self, 'strikes'):
self.strikes = []
strikeIndex = safeEval(attrs['index'])
curStrike = Strike()
curStrike.fromXML(name, attrs, content, ttFont, self)
if strikeIndex >= len(self.strikes):
self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
self.strikes[strikeIndex] = curStrike
class Strike(object):
def __init__(self):
self.bitmapSizeTable = BitmapSizeTable()
self.indexSubTables = []
def toXML(self, strikeIndex, writer, ttFont):
writer.begintag('strike', [('index', strikeIndex)])
writer.newline()
self.bitmapSizeTable.toXML(writer, ttFont)
writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.')
writer.newline()
for indexSubTable in self.indexSubTables:
indexSubTable.toXML(writer, ttFont)
writer.endtag('strike')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, locator):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'bitmapSizeTable':
self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
elif name.startswith(_indexSubTableSubclassPrefix):
indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):])
indexFormatClass = locator.getIndexFormatClass(indexFormat)
indexSubTable = indexFormatClass(None, None)
indexSubTable.indexFormat = indexFormat
indexSubTable.fromXML(name, attrs, content, ttFont)
self.indexSubTables.append(indexSubTable)
class BitmapSizeTable(object):
def _getXMLMetricNames(self):
dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
return dataNames[3:]
def toXML(self, writer, ttFont):
writer.begintag('bitmapSizeTable')
writer.newline()
for metric in ('hori', 'vert'):
getattr(self, metric).toXML(metric, writer, ttFont)
for metricName in self._getXMLMetricNames():
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('bitmapSizeTable')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
dataNames = set(self._getXMLMetricNames())
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'sbitLineMetrics':
direction = attrs['direction']
assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid."
metricObj = SbitLineMetrics()
metricObj.fromXML(name, attrs, content, ttFont)
vars(self)[direction] = metricObj
elif name in dataNames:
vars(self)[name] = safeEval(attrs['value'])
else:
log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
class SbitLineMetrics(object):
def toXML(self, name, writer, ttFont):
writer.begintag('sbitLineMetrics', [('direction', name)])
writer.newline()
for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
writer.simpletag(metricName, value=getattr(self, metricName))
writer.newline()
writer.endtag('sbitLineMetrics')
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name in metricNames:
vars(self)[name] = safeEval(attrs['value'])
_indexSubTableSubclassPrefix = 'eblc_index_sub_table_'
class EblcIndexSubTable(object):
def __init__(self, data, ttFont):
self.data = data
self.ttFont = ttFont
#if not ttFont.lazy:
# self.decompile()
# del self.data, self.ttFont
def __getattr__(self, attr):
# Allow lazy decompile.
if attr[:2] == '__':
raise AttributeError(attr)
if not hasattr(self, "data"):
raise AttributeError(attr)
self.decompile()
return getattr(self, attr)
# This method just takes care of the indexSubHeader. Implementing subclasses
# should call it to compile the indexSubHeader and then continue compiling
# the remainder of their unique format.
def compile(self, ttFont):
return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset)
# Creates the XML for bitmap glyphs. Each index sub table basically makes
# the same XML except for specific metric information that is written
# out via a method call that a subclass implements optionally.
def toXML(self, writer, ttFont):
writer.begintag(self.__class__.__name__, [
('imageFormat', self.imageFormat),
('firstGlyphIndex', self.firstGlyphIndex),
('lastGlyphIndex', self.lastGlyphIndex),
])
writer.newline()
self.writeMetrics(writer, ttFont)
# Write out the names as thats all thats needed to rebuild etc.
# For font debugging of consecutive formats the ids are also written.
# The ids are not read when moving from the XML format.
glyphIds = map(ttFont.getGlyphID, self.names)
for glyphName, glyphId in zip(self.names, glyphIds):
writer.simpletag('glyphLoc', name=glyphName, id=glyphId)
writer.newline()
writer.endtag(self.__class__.__name__)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
# Read all the attributes. Even though the glyph indices are
# recalculated, they are still read in case there needs to
# be an immediate export of the data.
self.imageFormat = safeEval(attrs['imageFormat'])
self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex'])
self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex'])
self.readMetrics(name, attrs, content, ttFont)
self.names = []
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'glyphLoc':
self.names.append(attrs['name'])
# A helper method that writes the metrics for the index sub table. It also
# is responsible for writing the image size for fixed size data since fixed
# size is not recalculated on compile. Default behavior is to do nothing.
def writeMetrics(self, writer, ttFont):
pass
# A helper method that is the inverse of writeMetrics.
def readMetrics(self, name, attrs, content, ttFont):
pass
# This method is for fixed glyph data sizes. There are formats where
# the glyph data is fixed but are actually composite glyphs. To handle
# this the font spec in indexSubTable makes the data the size of the
# fixed size by padding the component arrays. This function abstracts
# out this padding process. Input is data unpadded. Output is data
# padded only in fixed formats. Default behavior is to return the data.
def padBitmapData(self, data):
return data
# Remove any of the glyph locations and names that are flagged as skipped.
# This only occurs in formats {1,3}.
def removeSkipGlyphs(self):
# Determines if a name, location pair is a valid data location.
# Skip glyphs are marked when the size is equal to zero.
def isValidLocation(args):
(name, (startByte, endByte)) = args
return startByte < endByte
# Remove all skip glyphs.
dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
self.names, self.locations = list(map(list, zip(*dataPairs)))
# A closure for creating a custom mixin. This is done because formats 1 and 3
# are very similar. The only difference between them is the size per offset
# value. Code put in here should handle both cases generally.
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
# Prep the data size for the offset array data format.
dataFormat = '>'+formatStringForDataType
offsetDataSize = struct.calcsize(dataFormat)
class OffsetArrayIndexSubTableMixin(object):
def decompile(self):
numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations]
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
self.removeSkipGlyphs()
del self.data, self.ttFont
def compile(self, ttFont):
# First make sure that all the data lines up properly. Formats 1 and 3
# must have all its data lined up consecutively. If not this will fail.
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats"
glyphIds = list(map(ttFont.getGlyphID, self.names))
# Make sure that all ids are sorted strictly increasing.
assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1))
# Run a simple algorithm to add skip glyphs to the data locations at
# the places where an id is not present.
idQueue = deque(glyphIds)
locQueue = deque(self.locations)
allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
allLocations = []
for curId in allGlyphIds:
if curId != idQueue[0]:
allLocations.append((locQueue[0][0], locQueue[0][0]))
else:
idQueue.popleft()
allLocations.append(locQueue.popleft())
# Now that all the locations are collected, pack them appropriately into
# offsets. This is the form where offset[i] is the location and
# offset[i+1]-offset[i] is the size of the data location.
offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
# Image data offset must be less than or equal to the minimum of locations.
# This offset may change the value for round tripping but is safer and
# allows imageDataOffset to not be required to be in the XML version.
self.imageDataOffset = min(offsets)
offsetArray = [offset - self.imageDataOffset for offset in offsets]
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray]
# Take care of any padding issues. Only occurs in format 3.
if offsetDataSize * len(offsetArray) % 4 != 0:
dataList.append(struct.pack(dataFormat, 0))
return bytesjoin(dataList)
return OffsetArrayIndexSubTableMixin
# A Mixin for functionality shared between the different kinds
# of fixed sized data handling. Both kinds have big metrics so
# that kind of special processing is also handled in this mixin.
class FixedSizeIndexSubTableMixin(object):
def writeMetrics(self, writer, ttFont):
writer.simpletag('imageSize', value=self.imageSize)
writer.newline()
self.metrics.toXML(writer, ttFont)
def readMetrics(self, name, attrs, content, ttFont):
for element in content:
if not isinstance(element, tuple):
continue
name, attrs, content = element
if name == 'imageSize':
self.imageSize = safeEval(attrs['value'])
elif name == BigGlyphMetrics.__name__:
self.metrics = BigGlyphMetrics()
self.metrics.fromXML(name, attrs, content, ttFont)
elif name == SmallGlyphMetrics.__name__:
log.warning("SmallGlyphMetrics being ignored in format %d.", self.indexFormat)
def padBitmapData(self, data):
# Make sure that the data isn't bigger than the fixed size.
assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat
pad = (self.imageSize - len(data)) * b'\0'
return data + pad
class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable):
pass
class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
(self.imageSize,) = struct.unpack(">L", self.data[:4])
self.metrics = BigGlyphMetrics()
sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
glyphIds = list(map(ttFont.getGlyphID, self.names))
assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive."
self.imageDataOffset = min(next(iter(zip(*self.locations))))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
return bytesjoin(dataList)
class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable):
pass
class eblc_index_sub_table_4(EblcIndexSubTable):
def decompile(self):
(numGlyphs,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)]
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations]
glyphIds, offsets = list(map(list, zip(*glyphArray)))
glyphIds.pop()
offsets = [offset + self.imageDataOffset for offset in offsets]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4"
offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
self.imageDataOffset = min(offsets)
offsets = [offset - self.imageDataOffset for offset in offsets]
glyphIds = list(map(ttFont.getGlyphID, self.names))
idsPlusPad = list(itertools.chain(glyphIds, [0]))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", len(glyphIds)))
tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)]
dataList += tmp
data = bytesjoin(dataList)
return data
class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
def decompile(self):
self.origDataLen = 0
(self.imageSize,) = struct.unpack(">L", self.data[:4])
data = self.data[4:]
self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics())
(numGlyphs,) = struct.unpack(">L", data[:4])
data = data[4:]
glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)]
offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
self.locations = list(zip(offsets, offsets[1:]))
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
del self.data, self.ttFont
def compile(self, ttFont):
self.imageDataOffset = min(next(iter(zip(*self.locations))))
dataList = [EblcIndexSubTable.compile(self, ttFont)]
dataList.append(struct.pack(">L", self.imageSize))
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
glyphIds = list(map(ttFont.getGlyphID, self.names))
dataList.append(struct.pack(">L", len(glyphIds)))
dataList += [struct.pack(">H", curId) for curId in glyphIds]
if len(glyphIds) % 2 == 1:
dataList.append(struct.pack(">H", 0))
return bytesjoin(dataList)
eblc_sub_table_classes = {
1: eblc_index_sub_table_1,
2: eblc_index_sub_table_2,
3: eblc_index_sub_table_3,
4: eblc_index_sub_table_4,
5: eblc_index_sub_table_5,
}
| true | true |
f71ae98a1105d01fa234bdf25931e02f5302919f | 1,327 | py | Python | interface/gaussian.py | R-Laurent/detection | 66823e8664b66caadef2ee35ee197fd9a5066f56 | [
"MIT"
] | null | null | null | interface/gaussian.py | R-Laurent/detection | 66823e8664b66caadef2ee35ee197fd9a5066f56 | [
"MIT"
] | null | null | null | interface/gaussian.py | R-Laurent/detection | 66823e8664b66caadef2ee35ee197fd9a5066f56 | [
"MIT"
] | null | null | null | def generate_gaussianFile(geom, grid, logger, outdir="./", igrid=0, maxbq=200):
gaussianfile = outdir + \
"input_batch_{:05d}.com".format(igrid)
f = open(gaussianfile, "w")
# f.write("%OldChk=/home/aartigas/chk/molecule_spe.chk\n".format())
f.write("%nproc=8\n".format())
f.write("%mem=1000MB\n".format())
# f.write("#P b3lyp/6-311++G(d,p) SCF(Tight) CPHF(Separate) Int(Grid=SuperFine) Guess=Read NMR geom=connectivity\n\nTitle\n\n0 1\n".format())
f.write("#P b3lyp/6-311++G(d,p) SCF(Tight) CPHF(Separate) Int(Grid=SuperFine) NMR geom=connectivity\n\nTitle\n\n0 1\n".format())
nat = 0
for at in geom.atoms:
f.write("{:4s} {:16.10f} {:16.10f} {:16.10f}\n".format(at['label'], at['x'], at['y'], at['z']))
nat = nat + 1
nbq = 0
for at in grid[igrid:]:
f.write(
"Bq {0[0]:16.10f} {0[1]:16.10f} {0[2]:16.10f}\n".format(at))
nbq = nbq + 1
nat = nat + 1
igrid = igrid + 1
if (nbq == maxbq):
logger.info("Batch generation : {}".format(igrid))
generate_gaussianFile(
geom, grid, logger, outdir=outdir, igrid=igrid, maxbq = maxbq)
break
f.write("\n")
for i in range(nat):
f.write("{}\n".format(i + 1))
f.write("\n")
f.close()
return
| 39.029412 | 144 | 0.553881 | def generate_gaussianFile(geom, grid, logger, outdir="./", igrid=0, maxbq=200):
gaussianfile = outdir + \
"input_batch_{:05d}.com".format(igrid)
f = open(gaussianfile, "w")
f.write("%nproc=8\n".format())
f.write("%mem=1000MB\n".format())
f.write("#P b3lyp/6-311++G(d,p) SCF(Tight) CPHF(Separate) Int(Grid=SuperFine) NMR geom=connectivity\n\nTitle\n\n0 1\n".format())
nat = 0
for at in geom.atoms:
f.write("{:4s} {:16.10f} {:16.10f} {:16.10f}\n".format(at['label'], at['x'], at['y'], at['z']))
nat = nat + 1
nbq = 0
for at in grid[igrid:]:
f.write(
"Bq {0[0]:16.10f} {0[1]:16.10f} {0[2]:16.10f}\n".format(at))
nbq = nbq + 1
nat = nat + 1
igrid = igrid + 1
if (nbq == maxbq):
logger.info("Batch generation : {}".format(igrid))
generate_gaussianFile(
geom, grid, logger, outdir=outdir, igrid=igrid, maxbq = maxbq)
break
f.write("\n")
for i in range(nat):
f.write("{}\n".format(i + 1))
f.write("\n")
f.close()
return
| true | true |
f71aea3701b57a737d5aa551497f3aa64313bab4 | 6,197 | py | Python | mmdet/models/dense_heads/pisa_retinanet_head.py | zactodd/mmdetection | 9596b9a4c916ae601f9a8a641c3a0ea47265abec | [
"Apache-2.0"
] | 549 | 2020-01-02T05:14:57.000Z | 2022-03-29T18:34:12.000Z | mmdet/models/dense_heads/pisa_retinanet_head.py | wondervictor/lvis-mmdet | 68532eb6f4643ddf0179a4384c8c9e004a2c1d07 | [
"Apache-2.0"
] | 136 | 2021-07-11T11:26:54.000Z | 2022-03-31T02:45:34.000Z | mmdet/models/dense_heads/pisa_retinanet_head.py | wondervictor/lvis-mmdet | 68532eb6f4643ddf0179a4384c8c9e004a2c1d07 | [
"Apache-2.0"
] | 233 | 2020-01-18T03:46:27.000Z | 2022-03-19T03:17:47.000Z | import torch
from mmdet.core import force_fp32, images_to_levels
from ..builder import HEADS
from ..losses import carl_loss, isr_p
from .retina_head import RetinaHead
@HEADS.register_module()
class PISARetinaHead(RetinaHead):
"""PISA Retinanet Head.
The head owns the same structure with Retinanet Head, but differs in two
aspects:
1. Importance-based Sample Reweighting Positive (ISR-P) is applied to
change the positive loss weights.
2. Classification-aware regression loss is adopted as a third loss.
"""
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Box energies / deltas for each scale
level with shape (N, num_anchors * 4, H, W)
gt_bboxes (list[Tensor]): Ground truth bboxes of each image
with shape (num_obj, 4).
gt_labels (list[Tensor]): Ground truth labels of each image
with shape (num_obj, 4).
img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.
Default: None.
Returns:
dict: Loss dict, comprise classification loss, regression loss and
carl loss.
"""
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
return_sampling_results=True)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
num_imgs = len(img_metas)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels)
for cls_score in cls_scores
]
flatten_cls_scores = torch.cat(
flatten_cls_scores, dim=1).reshape(-1,
flatten_cls_scores[0].size(-1))
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_bbox_preds = torch.cat(
flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1))
flatten_labels = torch.cat(labels_list, dim=1).reshape(-1)
flatten_label_weights = torch.cat(
label_weights_list, dim=1).reshape(-1)
flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4)
flatten_bbox_targets = torch.cat(
bbox_targets_list, dim=1).reshape(-1, 4)
flatten_bbox_weights = torch.cat(
bbox_weights_list, dim=1).reshape(-1, 4)
# Apply ISR-P
isr_cfg = self.train_cfg.get('isr', None)
if isr_cfg is not None:
all_targets = (flatten_labels, flatten_label_weights,
flatten_bbox_targets, flatten_bbox_weights)
with torch.no_grad():
all_targets = isr_p(
flatten_cls_scores,
flatten_bbox_preds,
all_targets,
flatten_anchors,
sampling_results_list,
bbox_coder=self.bbox_coder,
loss_cls=self.loss_cls,
num_class=self.num_classes,
**self.train_cfg.isr)
(flatten_labels, flatten_label_weights, flatten_bbox_targets,
flatten_bbox_weights) = all_targets
# For convenience we compute loss once instead separating by fpn level,
# so that we don't need to separate the weights by level again.
# The result should be the same
losses_cls = self.loss_cls(
flatten_cls_scores,
flatten_labels,
flatten_label_weights,
avg_factor=num_total_samples)
losses_bbox = self.loss_bbox(
flatten_bbox_preds,
flatten_bbox_targets,
flatten_bbox_weights,
avg_factor=num_total_samples)
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
# CARL Loss
carl_cfg = self.train_cfg.get('carl', None)
if carl_cfg is not None:
loss_carl = carl_loss(
flatten_cls_scores,
flatten_labels,
flatten_bbox_preds,
flatten_bbox_targets,
self.loss_bbox,
**self.train_cfg.carl,
avg_factor=num_total_pos,
sigmoid=True,
num_class=self.num_classes)
loss_dict.update(loss_carl)
return loss_dict
| 40.24026 | 79 | 0.598031 | import torch
from mmdet.core import force_fp32, images_to_levels
from ..builder import HEADS
from ..losses import carl_loss, isr_p
from .retina_head import RetinaHead
@HEADS.register_module()
class PISARetinaHead(RetinaHead):
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.anchor_generator.num_levels
device = cls_scores[0].device
anchor_list, valid_flag_list = self.get_anchors(
featmap_sizes, img_metas, device=device)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = self.get_targets(
anchor_list,
valid_flag_list,
gt_bboxes,
img_metas,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
return_sampling_results=True)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
num_total_samples = (
num_total_pos + num_total_neg if self.sampling else num_total_pos)
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
concat_anchor_list = []
for i in range(len(anchor_list)):
concat_anchor_list.append(torch.cat(anchor_list[i]))
all_anchor_list = images_to_levels(concat_anchor_list,
num_level_anchors)
num_imgs = len(img_metas)
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels)
for cls_score in cls_scores
]
flatten_cls_scores = torch.cat(
flatten_cls_scores, dim=1).reshape(-1,
flatten_cls_scores[0].size(-1))
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_bbox_preds = torch.cat(
flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1))
flatten_labels = torch.cat(labels_list, dim=1).reshape(-1)
flatten_label_weights = torch.cat(
label_weights_list, dim=1).reshape(-1)
flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4)
flatten_bbox_targets = torch.cat(
bbox_targets_list, dim=1).reshape(-1, 4)
flatten_bbox_weights = torch.cat(
bbox_weights_list, dim=1).reshape(-1, 4)
isr_cfg = self.train_cfg.get('isr', None)
if isr_cfg is not None:
all_targets = (flatten_labels, flatten_label_weights,
flatten_bbox_targets, flatten_bbox_weights)
with torch.no_grad():
all_targets = isr_p(
flatten_cls_scores,
flatten_bbox_preds,
all_targets,
flatten_anchors,
sampling_results_list,
bbox_coder=self.bbox_coder,
loss_cls=self.loss_cls,
num_class=self.num_classes,
**self.train_cfg.isr)
(flatten_labels, flatten_label_weights, flatten_bbox_targets,
flatten_bbox_weights) = all_targets
# The result should be the same
losses_cls = self.loss_cls(
flatten_cls_scores,
flatten_labels,
flatten_label_weights,
avg_factor=num_total_samples)
losses_bbox = self.loss_bbox(
flatten_bbox_preds,
flatten_bbox_targets,
flatten_bbox_weights,
avg_factor=num_total_samples)
loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
# CARL Loss
carl_cfg = self.train_cfg.get('carl', None)
if carl_cfg is not None:
loss_carl = carl_loss(
flatten_cls_scores,
flatten_labels,
flatten_bbox_preds,
flatten_bbox_targets,
self.loss_bbox,
**self.train_cfg.carl,
avg_factor=num_total_pos,
sigmoid=True,
num_class=self.num_classes)
loss_dict.update(loss_carl)
return loss_dict
| true | true |
f71aebb3c6779bbbbac6736bbbca965e3ddbbe88 | 4,718 | py | Python | diff_representation/model/edit_encoder/bag_of_edits_change_encoder.py | microsoft/iclr2019-learning-to-represent-edits | e5777d6aa6cdeda500cf076646177c48d1cb4622 | [
"MIT"
] | 8 | 2021-03-15T18:57:18.000Z | 2021-08-23T11:28:22.000Z | diff_representation/model/edit_encoder/bag_of_edits_change_encoder.py | microsoft/iclr2019-learning-to-represent-edits | e5777d6aa6cdeda500cf076646177c48d1cb4622 | [
"MIT"
] | null | null | null | diff_representation/model/edit_encoder/bag_of_edits_change_encoder.py | microsoft/iclr2019-learning-to-represent-edits | e5777d6aa6cdeda500cf076646177c48d1cb4622 | [
"MIT"
] | 4 | 2021-03-27T14:19:09.000Z | 2021-09-13T12:35:31.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from itertools import chain
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from tqdm import tqdm
import sys
from diff_representation.change_entry import ChangeExample
from diff_representation.model import nn_utils
from diff_representation.model.embedder import EmbeddingTable
class BagOfEditsChangeEncoder(nn.Module):
"""project a CodeChange instance into distributed vectors"""
def __init__(self, token_embedder, vocab, **kwargs):
super(BagOfEditsChangeEncoder, self).__init__()
self.token_embedder = token_embedder
self.token_embedding_size = self.token_embedder.weight.size(1)
self.vocab = vocab
self.change_vector_size = self.token_embedding_size * 2
@property
def device(self):
return self.token_embedder.device
def forward(self, code_changes, *args, **kwargs):
"""
given the token encodings of the previous and updated code,
and the diff information (alignment between the tokens between the
previous and updated code), generate the diff representation
"""
added_tokens = []
added_token_batch_ids = []
deled_tokens = []
deled_token_batch_ids = []
for e_id, example in enumerate(code_changes):
for entry in example.change_seq:
tag, token = entry
if tag == 'ADD':
token_id = self.vocab[token]
added_tokens.append(token_id)
added_token_batch_ids.append(e_id)
elif tag == 'DEL':
token_id = self.vocab[token]
deled_tokens.append(token_id)
deled_token_batch_ids.append(e_id)
elif tag == 'REPLACE':
added_token_id = self.vocab[token[1]]
deled_token_id = self.vocab[token[0]]
added_tokens.append(added_token_id)
deled_tokens.append(deled_token_id)
added_token_batch_ids.append(e_id)
deled_token_batch_ids.append(e_id)
changed_token_ids = added_tokens + deled_tokens
changed_token_ids = torch.tensor(changed_token_ids, dtype=torch.long, device=self.device)
# (token_num, embed_size)
changed_token_embeds = self.token_embedder.weight[changed_token_ids]
added_token_embeds = changed_token_embeds[:len(added_tokens)]
deled_token_embeds = changed_token_embeds[len(added_tokens):]
added_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,
device=self.device)
if added_token_batch_ids:
added_change_embeds = added_change_embeds.scatter_add_(0,
torch.tensor(added_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(added_token_embeds),
added_token_embeds)
deled_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,
device=self.device)
if deled_token_batch_ids:
deled_change_embeds = deled_change_embeds.scatter_add_(0,
torch.tensor(deled_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(deled_token_embeds),
deled_token_embeds)
change_vectors = torch.cat([added_change_embeds, deled_change_embeds], dim=-1)
return change_vectors
def encode_code_change(self, prev_code_tokens, updated_code_tokens, code_encoder):
example = ChangeExample(prev_code_tokens, updated_code_tokens, context=None)
change_vec = self.forward([example]).data.cpu().numpy()[0]
return change_vec
def encode_code_changes(self, examples, code_encoder, batch_size=32):
"""encode each change in the list `code_changes`,
return a 2D numpy array of shape (len(code_changes), code_change_embed_dim)"""
change_vecs = []
for batch_examples in tqdm(nn_utils.batch_iter(examples, batch_size), file=sys.stdout, total=len(examples)):
batch_change_vecs = self.forward(batch_examples).data.cpu().numpy()
change_vecs.append(batch_change_vecs)
change_vecs = np.concatenate(change_vecs, axis=0)
return change_vecs
| 42.504505 | 167 | 0.636922 |
from itertools import chain
import numpy as np
import torch
from torch import nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from tqdm import tqdm
import sys
from diff_representation.change_entry import ChangeExample
from diff_representation.model import nn_utils
from diff_representation.model.embedder import EmbeddingTable
class BagOfEditsChangeEncoder(nn.Module):
def __init__(self, token_embedder, vocab, **kwargs):
super(BagOfEditsChangeEncoder, self).__init__()
self.token_embedder = token_embedder
self.token_embedding_size = self.token_embedder.weight.size(1)
self.vocab = vocab
self.change_vector_size = self.token_embedding_size * 2
@property
def device(self):
return self.token_embedder.device
def forward(self, code_changes, *args, **kwargs):
added_tokens = []
added_token_batch_ids = []
deled_tokens = []
deled_token_batch_ids = []
for e_id, example in enumerate(code_changes):
for entry in example.change_seq:
tag, token = entry
if tag == 'ADD':
token_id = self.vocab[token]
added_tokens.append(token_id)
added_token_batch_ids.append(e_id)
elif tag == 'DEL':
token_id = self.vocab[token]
deled_tokens.append(token_id)
deled_token_batch_ids.append(e_id)
elif tag == 'REPLACE':
added_token_id = self.vocab[token[1]]
deled_token_id = self.vocab[token[0]]
added_tokens.append(added_token_id)
deled_tokens.append(deled_token_id)
added_token_batch_ids.append(e_id)
deled_token_batch_ids.append(e_id)
changed_token_ids = added_tokens + deled_tokens
changed_token_ids = torch.tensor(changed_token_ids, dtype=torch.long, device=self.device)
changed_token_embeds = self.token_embedder.weight[changed_token_ids]
added_token_embeds = changed_token_embeds[:len(added_tokens)]
deled_token_embeds = changed_token_embeds[len(added_tokens):]
added_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,
device=self.device)
if added_token_batch_ids:
added_change_embeds = added_change_embeds.scatter_add_(0,
torch.tensor(added_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(added_token_embeds),
added_token_embeds)
deled_change_embeds = torch.zeros(len(code_changes), self.token_embedding_size, dtype=torch.float,
device=self.device)
if deled_token_batch_ids:
deled_change_embeds = deled_change_embeds.scatter_add_(0,
torch.tensor(deled_token_batch_ids, device=self.device).unsqueeze(-1).expand_as(deled_token_embeds),
deled_token_embeds)
change_vectors = torch.cat([added_change_embeds, deled_change_embeds], dim=-1)
return change_vectors
def encode_code_change(self, prev_code_tokens, updated_code_tokens, code_encoder):
example = ChangeExample(prev_code_tokens, updated_code_tokens, context=None)
change_vec = self.forward([example]).data.cpu().numpy()[0]
return change_vec
def encode_code_changes(self, examples, code_encoder, batch_size=32):
change_vecs = []
for batch_examples in tqdm(nn_utils.batch_iter(examples, batch_size), file=sys.stdout, total=len(examples)):
batch_change_vecs = self.forward(batch_examples).data.cpu().numpy()
change_vecs.append(batch_change_vecs)
change_vecs = np.concatenate(change_vecs, axis=0)
return change_vecs
| true | true |
f71aebc2afaca9f74e0aad77ccea915a36978cb2 | 1,995 | py | Python | examples/classify_capture.py | tbeatty/edgetpu | 14237f65ba07b7b1d8287e9f60dd20c88562871a | [
"Apache-2.0"
] | 10 | 2019-04-12T08:02:12.000Z | 2020-12-27T13:53:37.000Z | examples/classify_capture.py | tbeatty/edgetpu | 14237f65ba07b7b1d8287e9f60dd20c88562871a | [
"Apache-2.0"
] | 1 | 2019-04-03T12:22:55.000Z | 2019-04-04T10:42:35.000Z | examples/classify_capture.py | tbeatty/edgetpu | 14237f65ba07b7b1d8287e9f60dd20c88562871a | [
"Apache-2.0"
] | 3 | 2019-04-25T13:44:36.000Z | 2021-02-17T06:00:56.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A demo to classify Raspberry Pi camera stream."""
import argparse
import io
import time
from edgetpu.classification.engine import ClassificationEngine
from edgetpu.utils import dataset_utils
import numpy as np
import picamera
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', help='File path of Tflite model.', required=True)
parser.add_argument('--label', help='File path of label file.', required=True)
args = parser.parse_args()
labels = dataset_utils.read_label_file(args.label)
engine = ClassificationEngine(args.model)
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.framerate = 30
_, height, width, _ = engine.get_input_tensor_shape()
camera.start_preview()
try:
stream = io.BytesIO()
for _ in camera.capture_continuous(
stream, format='rgb', use_video_port=True, resize=(width, height)):
stream.truncate()
stream.seek(0)
input_tensor = np.frombuffer(stream.getvalue(), dtype=np.uint8)
start_ms = time.time()
results = engine.classify_with_input_tensor(input_tensor, top_k=1)
elapsed_ms = time.time() - start_ms
if results:
camera.annotate_text = '%s %.2f\n%.2fms' % (
labels[results[0][0]], results[0][1], elapsed_ms * 1000.0)
finally:
camera.stop_preview()
if __name__ == '__main__':
main()
| 33.25 | 80 | 0.702757 |
import argparse
import io
import time
from edgetpu.classification.engine import ClassificationEngine
from edgetpu.utils import dataset_utils
import numpy as np
import picamera
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', help='File path of Tflite model.', required=True)
parser.add_argument('--label', help='File path of label file.', required=True)
args = parser.parse_args()
labels = dataset_utils.read_label_file(args.label)
engine = ClassificationEngine(args.model)
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.framerate = 30
_, height, width, _ = engine.get_input_tensor_shape()
camera.start_preview()
try:
stream = io.BytesIO()
for _ in camera.capture_continuous(
stream, format='rgb', use_video_port=True, resize=(width, height)):
stream.truncate()
stream.seek(0)
input_tensor = np.frombuffer(stream.getvalue(), dtype=np.uint8)
start_ms = time.time()
results = engine.classify_with_input_tensor(input_tensor, top_k=1)
elapsed_ms = time.time() - start_ms
if results:
camera.annotate_text = '%s %.2f\n%.2fms' % (
labels[results[0][0]], results[0][1], elapsed_ms * 1000.0)
finally:
camera.stop_preview()
if __name__ == '__main__':
main()
| true | true |
f71aec18d787da6ff5ca5c22add4823da1992bf0 | 718 | py | Python | Solution/0081.Search_in_Rotated_Sorted_Array_II/0081.Search_in_Rotated_Sorted_Array_II.py | xleslie/LeetCode | 0af08817b3922e1bbc558091963fd4ff65a506ea | [
"MIT"
] | null | null | null | Solution/0081.Search_in_Rotated_Sorted_Array_II/0081.Search_in_Rotated_Sorted_Array_II.py | xleslie/LeetCode | 0af08817b3922e1bbc558091963fd4ff65a506ea | [
"MIT"
] | null | null | null | Solution/0081.Search_in_Rotated_Sorted_Array_II/0081.Search_in_Rotated_Sorted_Array_II.py | xleslie/LeetCode | 0af08817b3922e1bbc558091963fd4ff65a506ea | [
"MIT"
] | null | null | null | class Solution:
def search(self, nums: List[int], target: int) -> bool:
i,j=0,len(nums)
while i<j:
m=i+(j-i)//2
if nums[m]==target: return True
if nums[m]>nums[i]:
if target>=nums[i] and target<nums[m]:
j=m
else:
i=m+1
elif nums[m]<nums[i]:
if target>nums[m] and target <=nums[j-1]:
i=m+1
else:
j=m
elif nums[m]==nums[i]:
while i<j and nums[i] == nums[m]:
i+=1
while i<j and nums[j-1] ==nums[m]:
j-=1
return False
| 31.217391 | 59 | 0.367688 | class Solution:
def search(self, nums: List[int], target: int) -> bool:
i,j=0,len(nums)
while i<j:
m=i+(j-i)//2
if nums[m]==target: return True
if nums[m]>nums[i]:
if target>=nums[i] and target<nums[m]:
j=m
else:
i=m+1
elif nums[m]<nums[i]:
if target>nums[m] and target <=nums[j-1]:
i=m+1
else:
j=m
elif nums[m]==nums[i]:
while i<j and nums[i] == nums[m]:
i+=1
while i<j and nums[j-1] ==nums[m]:
j-=1
return False
| true | true |
f71aedc6bc7df22a8c1ea544a471fb0c4efcfc27 | 1,844 | py | Python | tvm_test/run_simple_mod_op2_pth.py | junarwohn/tvm | 96c2e06cd063a695b3b485f2bdf8875df55fff1a | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tvm_test/run_simple_mod_op2_pth.py | junarwohn/tvm | 96c2e06cd063a695b3b485f2bdf8875df55fff1a | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | tvm_test/run_simple_mod_op2_pth.py | junarwohn/tvm | 96c2e06cd063a695b3b485f2bdf8875df55fff1a | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | import tvm
from tvm import relay
from tvm import relay
from tvm.runtime.vm import VirtualMachine
from tvm.contrib.download import download_testdata
from SimpleModel import Net
import numpy as np
import cv2
# PyTorch imports
import torch
import torchvision
# Time library for speed check
import time
in_size = 32
input_shape = (1, 3, in_size, in_size)
def do_trace(model, inp):
model_trace = torch.jit.trace(model, inp)
model_trace.eval()
return model_trace
# model_func = torchvision.models.detection.maskrcnn_resnet50_fpn
# model = TraceWrapper(model_func(pretrained=True))
model = Net()
model.load_state_dict(torch.load('./simple_mod.pth'))
model.eval()
inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size)))
with torch.no_grad():
out = model(inp)
script_module = do_trace(model, inp)
img_url = (
"https://raw.githubusercontent.com/dmlc/web-data/" "master/gluoncv/detection/street_small.jpg"
)
img_path = download_testdata(img_url, "test_street_small.jpg", module="data")
img = cv2.imread(img_path).astype("float32")
img = cv2.resize(img, (in_size, in_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img / 255.0, [2, 0, 1])
img = np.expand_dims(img, axis=0)
input_name = "input0"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(script_module, shape_list)
target = "llvm"
with tvm.transform.PassContext(opt_level=2, disabled_pass=["FoldScaleAxis"]):
vm_exec = relay.vm.compile(mod, target=target, params=params)
# dev = tvm.cuda()
dev = tvm.cpu()
vm = VirtualMachine(vm_exec, dev)
vm.set_input("main", **{input_name: img})
inference_start = time.time()
tvm_res = vm.run()
inference_end = time.time()
inference_time_tvm = inference_end - inference_start
print("Infernece Time : {}".format(inference_time_tvm))
| 25.260274 | 98 | 0.741323 | import tvm
from tvm import relay
from tvm import relay
from tvm.runtime.vm import VirtualMachine
from tvm.contrib.download import download_testdata
from SimpleModel import Net
import numpy as np
import cv2
import torch
import torchvision
import time
in_size = 32
input_shape = (1, 3, in_size, in_size)
def do_trace(model, inp):
model_trace = torch.jit.trace(model, inp)
model_trace.eval()
return model_trace
model = Net()
model.load_state_dict(torch.load('./simple_mod.pth'))
model.eval()
inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size)))
with torch.no_grad():
out = model(inp)
script_module = do_trace(model, inp)
img_url = (
"https://raw.githubusercontent.com/dmlc/web-data/" "master/gluoncv/detection/street_small.jpg"
)
img_path = download_testdata(img_url, "test_street_small.jpg", module="data")
img = cv2.imread(img_path).astype("float32")
img = cv2.resize(img, (in_size, in_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img / 255.0, [2, 0, 1])
img = np.expand_dims(img, axis=0)
input_name = "input0"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(script_module, shape_list)
target = "llvm"
with tvm.transform.PassContext(opt_level=2, disabled_pass=["FoldScaleAxis"]):
vm_exec = relay.vm.compile(mod, target=target, params=params)
dev = tvm.cpu()
vm = VirtualMachine(vm_exec, dev)
vm.set_input("main", **{input_name: img})
inference_start = time.time()
tvm_res = vm.run()
inference_end = time.time()
inference_time_tvm = inference_end - inference_start
print("Infernece Time : {}".format(inference_time_tvm))
| true | true |
f71af005c4808726491024543d12346686c50421 | 3,691 | py | Python | travel_time_visualization/server.py | rogerfitz/tutorials | dae6470bad63b71e755caaff0b69893f5c9a1d63 | [
"MIT"
] | 45 | 2017-07-13T23:20:54.000Z | 2022-02-25T16:48:52.000Z | flask_viz_server/server.py | rogerfitz/tutorials | dae6470bad63b71e755caaff0b69893f5c9a1d63 | [
"MIT"
] | 51 | 2017-07-28T13:48:26.000Z | 2021-11-29T06:37:41.000Z | flask_viz_server/server.py | rogerfitz/tutorials | dae6470bad63b71e755caaff0b69893f5c9a1d63 | [
"MIT"
] | 38 | 2017-07-13T15:48:30.000Z | 2022-02-26T04:12:06.000Z | from flask import Flask, jsonify,render_template,request
from config import API_KEY
import datetime
from collections import defaultdict
import requests
import pandas as pd
import sys
import logging
from itertools import repeat
app = Flask(__name__)
gunicorn_error_logger = logging.getLogger('gunicorn.error')
app.logger.handlers.extend(gunicorn_error_logger.handlers)
app.logger.setLevel(logging.DEBUG)
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(20)
BASE_URL="https://maps.googleapis.com/maps/api/"
app.logger.debug(datetime.datetime.fromtimestamp(1498924020))
class GAPIError(Exception):
status_code = 31337
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
def makeRequest(url, API_KEY):
url+="&key=%s"%API_KEY
return requests.get(url).json()['rows'][0]['elements'][0]['duration_in_traffic']['value']
def getDistanceMatrix(origin,destination,mode,departure_time,traffic_model, API_KEY):
#UTC Time
url=BASE_URL+"distancematrix/json?"
params="origins=%s&destinations=%s&mode=%s&departure_time=%s&traffic_model=%s"%(origin,destination,mode,departure_time,traffic_model)
return makeRequest(url+params, API_KEY)
def getNearest(dt,offset):
return dt + (datetime.datetime.min - dt) % datetime.timedelta(minutes=offset)
def getChartData(starting_address,destination_address, leave_after, hours_to_grab,API_KEY,OFFSET=15):
start_date=getNearest(leave_after,15)
request_times=defaultdict(dict)
dts=[int(leave_after.timestamp())]
for dt in (start_date + datetime.timedelta(minutes=offset) for offset in range(0,60*hours_to_grab,OFFSET)):
dts.append(int(dt.timestamp()))
request_times={}
for traffic_model in ["best_guess","pessimistic","optimistic"]:
results=pool.starmap(
getDistanceMatrix, zip(repeat(starting_address),repeat(destination_address),repeat("car"),dts,repeat(traffic_model), repeat(API_KEY))
)
request_times[traffic_model]=results
request_times["index"]=dts
travel_times=pd.DataFrame.from_dict(request_times).set_index("index")/60
viz_df=travel_times.reset_index()
viz_df['x']=viz_df['index']*1000#Add milliseconds for JS datetime
del viz_df['index']
viz_json=viz_df.to_dict(orient="list")
#to c3 Columns
columns=[]
for col,vals in viz_json.items():
if col!="x":
vals=[round(x) for x in vals]
columns.append([col]+vals)
return columns
@app.route("/")
def index():
return render_template('index.html', API_KEY=API_KEY)
@app.route('/data')
def data():
app.logger.debug(request.args)
leaveAfter=request.args.get("leaveAfter")
leaveAfter=datetime.datetime.fromtimestamp(int(leaveAfter)/1000)
USERS_API_KEY=request.args.get("API_KEY",default=API_KEY)
now=datetime.datetime.now()
if leaveAfter<now:
leaveAfter=now
try:
response=getChartData(request.args.get("startingAddress"),request.args.get("destinationAddress"),leaveAfter,8, USERS_API_KEY)
return jsonify(response)
except:
raise GAPIError("API Key no longer valid", status_code=31337)
@app.errorhandler(GAPIError)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 35.490385 | 145 | 0.715253 | from flask import Flask, jsonify,render_template,request
from config import API_KEY
import datetime
from collections import defaultdict
import requests
import pandas as pd
import sys
import logging
from itertools import repeat
app = Flask(__name__)
gunicorn_error_logger = logging.getLogger('gunicorn.error')
app.logger.handlers.extend(gunicorn_error_logger.handlers)
app.logger.setLevel(logging.DEBUG)
from multiprocessing.dummy import Pool as ThreadPool
pool = ThreadPool(20)
BASE_URL="https://maps.googleapis.com/maps/api/"
app.logger.debug(datetime.datetime.fromtimestamp(1498924020))
class GAPIError(Exception):
status_code = 31337
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
def makeRequest(url, API_KEY):
url+="&key=%s"%API_KEY
return requests.get(url).json()['rows'][0]['elements'][0]['duration_in_traffic']['value']
def getDistanceMatrix(origin,destination,mode,departure_time,traffic_model, API_KEY):
url=BASE_URL+"distancematrix/json?"
params="origins=%s&destinations=%s&mode=%s&departure_time=%s&traffic_model=%s"%(origin,destination,mode,departure_time,traffic_model)
return makeRequest(url+params, API_KEY)
def getNearest(dt,offset):
return dt + (datetime.datetime.min - dt) % datetime.timedelta(minutes=offset)
def getChartData(starting_address,destination_address, leave_after, hours_to_grab,API_KEY,OFFSET=15):
start_date=getNearest(leave_after,15)
request_times=defaultdict(dict)
dts=[int(leave_after.timestamp())]
for dt in (start_date + datetime.timedelta(minutes=offset) for offset in range(0,60*hours_to_grab,OFFSET)):
dts.append(int(dt.timestamp()))
request_times={}
for traffic_model in ["best_guess","pessimistic","optimistic"]:
results=pool.starmap(
getDistanceMatrix, zip(repeat(starting_address),repeat(destination_address),repeat("car"),dts,repeat(traffic_model), repeat(API_KEY))
)
request_times[traffic_model]=results
request_times["index"]=dts
travel_times=pd.DataFrame.from_dict(request_times).set_index("index")/60
viz_df=travel_times.reset_index()
viz_df['x']=viz_df['index']*1000
del viz_df['index']
viz_json=viz_df.to_dict(orient="list")
columns=[]
for col,vals in viz_json.items():
if col!="x":
vals=[round(x) for x in vals]
columns.append([col]+vals)
return columns
@app.route("/")
def index():
return render_template('index.html', API_KEY=API_KEY)
@app.route('/data')
def data():
app.logger.debug(request.args)
leaveAfter=request.args.get("leaveAfter")
leaveAfter=datetime.datetime.fromtimestamp(int(leaveAfter)/1000)
USERS_API_KEY=request.args.get("API_KEY",default=API_KEY)
now=datetime.datetime.now()
if leaveAfter<now:
leaveAfter=now
try:
response=getChartData(request.args.get("startingAddress"),request.args.get("destinationAddress"),leaveAfter,8, USERS_API_KEY)
return jsonify(response)
except:
raise GAPIError("API Key no longer valid", status_code=31337)
@app.errorhandler(GAPIError)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| true | true |
f71af0636d15e0878c031743b0f73cf871004237 | 19,689 | py | Python | paddlenlp/transformers/bert/tokenizer.py | Leedoo/PaddleNLP | ac3a6165e5eb6d638a4165709fd6cf91c11077c7 | [
"Apache-2.0"
] | 3 | 2021-09-06T11:27:49.000Z | 2021-11-09T08:19:00.000Z | paddlenlp/transformers/bert/tokenizer.py | narrowser/PaddleNLP | fd740cb7a9d83b91116d3ad9cf6b4e3a683481f4 | [
"Apache-2.0"
] | null | null | null | paddlenlp/transformers/bert/tokenizer.py | narrowser/PaddleNLP | fd740cb7a9d83b91116d3ad9cf6b4e3a683481f4 | [
"Apache-2.0"
] | 4 | 2021-08-23T07:46:06.000Z | 2021-09-23T08:37:03.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import io
import json
import os
import six
import unicodedata
from .. import PretrainedTokenizer
from ..tokenizer_utils import convert_to_unicode, whitespace_tokenize, _is_whitespace, _is_control, _is_punctuation
__all__ = ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer']
class BasicTokenizer(object):
"""
Runs basic tokenization (punctuation splitting, lower casing, etc.).
Args:
do_lower_case (bool): Whether the text strips accents and convert to
lower case. If you use the BERT Pretrained model, lower is set to
Flase when using the cased model, otherwise it is set to True.
Default: True.
"""
def __init__(self, do_lower_case=True):
"""Constructs a BasicTokenizer."""
self.do_lower_case = do_lower_case
def tokenize(self, text):
"""
Tokenizes a piece of text using basic tokenizer.
Args:
text (str): A piece of text.
Returns:
list(str): A list of tokens.
"""
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""
Strips accents from a piece of text.
"""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""
Splits punctuation on a piece of text.
"""
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""
Adds whitespace around any CJK character.
"""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""
Checks whether CP is the codepoint of a CJK character.
"""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""
Performs invalid character removal and whitespace cleanup on text.
"""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""
Runs WordPiece tokenization.
Args:
vocab (Vocab|dict): Vocab of the word piece tokenizer.
unk_token (str): A specific token to replace all unkown tokens.
max_input_chars_per_word (int): If a word's length is more than
max_input_chars_per_word, it will be dealt as unknown word.
Default: 100.
"""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
list (str): A list of wordpiece tokens.
Example:
input = "unaffable"
output = ["un", "##aff", "##able"]
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
class BertTokenizer(PretrainedTokenizer):
"""
Constructs a BERT tokenizer. It uses a basic tokenizer to do punctuation
splitting, lower casing and so on, and follows a WordPiece tokenizer to
tokenize as subwords.
Args:
vocab_file (str): file path of the vocabulary
do_lower_case (bool): Whether the text strips accents and convert to
lower case. If you use the BERT pretrained model, lower is set to
Flase when using the cased model, otherwise it is set to True.
Default: True.
unk_token (str): The special token for unkown words. Default: "[UNK]".
sep_token (str): The special token for separator token . Default: "[SEP]".
pad_token (str): The special token for padding. Default: "[PAD]".
cls_token (str): The special token for cls. Default: "[CLS]".
mask_token (str): The special token for mask. Default: "[MASK]".
Examples:
.. code-block:: python
from paddle.hapi.text import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# the following line get: ['he', 'was', 'a', 'puppet', '##eer']
tokens = tokenizer('He was a puppeteer')
# the following line get: 'he was a puppeteer'
tokenizer.convert_tokens_to_string(tokens)
"""
resource_files_names = {"vocab_file": "vocab.txt"} # for save_pretrained
pretrained_resource_files_map = {
"vocab_file": {
"bert-base-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-uncased-vocab.txt",
"bert-large-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-uncased-vocab.txt",
"bert-base-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-cased-vocab.txt",
"bert-large-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-cased-vocab.txt",
"bert-base-multilingual-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-uncased-vocab.txt",
"bert-base-multilingual-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-cased-vocab.txt",
"bert-base-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"bert-wwm-chinese":
"http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-chinese-vocab.txt",
"bert-wwm-ext-chinese":
"http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-ext-chinese-vocab.txt",
"macbert-large-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"macbert-base-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"simbert-base-chinese":
"https://paddlenlp.bj.bcebos.com/models/transformers/simbert/vocab.txt",
}
}
pretrained_init_configuration = {
"bert-base-uncased": {
"do_lower_case": True
},
"bert-large-uncased": {
"do_lower_case": True
},
"bert-base-cased": {
"do_lower_case": False
},
"bert-large-cased": {
"do_lower_case": False
},
"bert-base-multilingual-uncased": {
"do_lower_case": True
},
"bert-base-multilingual-cased": {
"do_lower_case": False
},
"bert-base-chinese": {
"do_lower_case": False
},
"bert-wwm-chinese": {
"do_lower_case": False
},
"bert-wwm-ext-chinese": {
"do_lower_case": False
},
"macbert-large-chinese": {
"do_lower_case": False
},
"macbert-base-chinese": {
"do_lower_case": False
},
"simbert-base-chinese":{
"do_lower_case": True
},
}
padding_side = 'right'
def __init__(self,
vocab_file,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]"):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(
vocab=self.vocab, unk_token=unk_token)
@property
def vocab_size(self):
"""
return the size of vocabulary.
Returns:
int: the size of vocabulary.
"""
return len(self.vocab)
def _tokenize(self, text):
"""
End-to-end tokenization for BERT models.
Args:
text (str): The text to be tokenized.
Returns:
list: A list of string representing converted tokens.
"""
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def tokenize(self, text):
"""
End-to-end tokenization for BERT models.
Args:
text (str): The text to be tokenized.
Returns:
list: A list of string representing converted tokens.
"""
return self._tokenize(text)
def convert_tokens_to_string(self, tokens):
"""
Converts a sequence of tokens (list of string) in a single string. Since
the usage of WordPiece introducing `##` to concat subwords, also remove
`##` when converting.
Args:
tokens (list): A list of string representing tokens to be converted.
Returns:
str: Converted string from tokens.
"""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Note:
This encodes inputs and checks the number of added tokens, and is therefore not efficient. Do not put this
inside your training loop.
Args:
pair: Returns the number of added tokens in the case of a sequence pair if set to True, returns the
number of added tokens in the case of a single sequence if set to False.
Returns:
Number of tokens added to sequences
"""
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens.
A BERT sequence has the following format:
::
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of input_id with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
"""
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
A BERT offset_mapping has the following format:
::
- single sequence: ``(0,0) X (0,0)``
- pair of sequences: `(0,0) A (0,0) B (0,0)``
Args:
offset_mapping_ids_0 (:obj:`List[tuple]`):
List of char offsets to which the special tokens will be added.
offset_mapping_ids_1 (:obj:`List[tuple]`, `optional`):
Optional second list of char offsets for offset mapping pairs.
Returns:
:obj:`List[tuple]`: List of char offsets with the appropriate offsets of special tokens.
"""
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)
] + offset_mapping_1 + [(0, 0)]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
A BERT sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of token_type_id according to the given sequence(s).
"""
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``encode`` methods.
Args:
token_ids_0 (List[int]): List of ids of the first sequence.
token_ids_1 (List[int], optinal): List of ids of the second sequence.
already_has_special_tokens (bool, optional): Whether or not the token list is already
formatted with special tokens for the model. Defaults to None.
Returns:
results (List[int]): The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0,
token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
| 37.863462 | 121 | 0.565646 |
import copy
import io
import json
import os
import six
import unicodedata
from .. import PretrainedTokenizer
from ..tokenizer_utils import convert_to_unicode, whitespace_tokenize, _is_whitespace, _is_control, _is_punctuation
__all__ = ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer']
class BasicTokenizer(object):
def __init__(self, do_lower_case=True):
self.do_lower_case = do_lower_case
def tokenize(self, text):
text = convert_to_unicode(text)
text = self._clean_text(text)
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
if ((cp >= 0x4E00 and cp <= 0x9FFF) or
(cp >= 0x3400 and cp <= 0x4DBF) or
(cp >= 0x20000 and cp <= 0x2A6DF) or
(cp >= 0x2A700 and cp <= 0x2B73F) or
(cp >= 0x2B740 and cp <= 0x2B81F) or
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or
(cp >= 0x2F800 and cp <= 0x2FA1F)):
return True
return False
def _clean_text(self, text):
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
class BertTokenizer(PretrainedTokenizer):
resource_files_names = {"vocab_file": "vocab.txt"}
pretrained_resource_files_map = {
"vocab_file": {
"bert-base-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-uncased-vocab.txt",
"bert-large-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-uncased-vocab.txt",
"bert-base-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-cased-vocab.txt",
"bert-large-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-large-cased-vocab.txt",
"bert-base-multilingual-uncased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-uncased-vocab.txt",
"bert-base-multilingual-cased":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-multilingual-cased-vocab.txt",
"bert-base-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"bert-wwm-chinese":
"http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-chinese-vocab.txt",
"bert-wwm-ext-chinese":
"http://paddlenlp.bj.bcebos.com/models/transformers/bert/bert-wwm-ext-chinese-vocab.txt",
"macbert-large-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"macbert-base-chinese":
"https://paddle-hapi.bj.bcebos.com/models/bert/bert-base-chinese-vocab.txt",
"simbert-base-chinese":
"https://paddlenlp.bj.bcebos.com/models/transformers/simbert/vocab.txt",
}
}
pretrained_init_configuration = {
"bert-base-uncased": {
"do_lower_case": True
},
"bert-large-uncased": {
"do_lower_case": True
},
"bert-base-cased": {
"do_lower_case": False
},
"bert-large-cased": {
"do_lower_case": False
},
"bert-base-multilingual-uncased": {
"do_lower_case": True
},
"bert-base-multilingual-cased": {
"do_lower_case": False
},
"bert-base-chinese": {
"do_lower_case": False
},
"bert-wwm-chinese": {
"do_lower_case": False
},
"bert-wwm-ext-chinese": {
"do_lower_case": False
},
"macbert-large-chinese": {
"do_lower_case": False
},
"macbert-base-chinese": {
"do_lower_case": False
},
"simbert-base-chinese":{
"do_lower_case": True
},
}
padding_side = 'right'
def __init__(self,
vocab_file,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]"):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.vocab = self.load_vocabulary(vocab_file, unk_token=unk_token)
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(
vocab=self.vocab, unk_token=unk_token)
@property
def vocab_size(self):
return len(self.vocab)
def _tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def tokenize(self, text):
return self._tokenize(text)
def convert_tokens_to_string(self, tokens):
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0)
] + offset_mapping_1 + [(0, 0)]
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(
map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0,
token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + (
[0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
| true | true |
f71af1cd54d5851bd4030703d9d6a0bb37011f59 | 8,309 | py | Python | src/mission_node/src/intersection_detector.py | mommy79/AuDi-GIT-turtlebot3_autorace | fd1382246f1ee74ee70857006563184d672a6666 | [
"Apache-2.0"
] | 1 | 2021-06-13T06:20:15.000Z | 2021-06-13T06:20:15.000Z | src/mission_node/src/intersection_detector.py | taening/AuDi-GIT-turtlebot3_autorace | fd1382246f1ee74ee70857006563184d672a6666 | [
"Apache-2.0"
] | null | null | null | src/mission_node/src/intersection_detector.py | taening/AuDi-GIT-turtlebot3_autorace | fd1382246f1ee74ee70857006563184d672a6666 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import math
class IntersectionDetector:
def __init__(self):
self.lower_blue = np.array([85, 90, 120], np.uint8)
self.upper_blue = np.array([115, 255, 255], np.uint8)
def fn_find_intersection_line(self, img_trans):
# ROI 영역에 맞게 자른 이미지
pers_height, pers_width = img_trans.shape[:2] # shape is w384 x h240
img_gray = cv2.cvtColor(img_trans[:int(pers_height * 1/ 2), :].copy(), cv2.COLOR_RGB2GRAY)
_, img_intersection = cv2.threshold(img_gray, 180, 255, 0)
img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))
img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))
img_debug = cv2.merge((img_intersection, img_intersection, img_intersection)).copy()
_, list_intersection_contour, _ = cv2.findContours(img_intersection, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
intersection_check = False
for intersection_contour in list_intersection_contour:
cv2.drawContours(img_debug, [intersection_contour], 0, (0, 0, 255), 2)
x_stop, y_stop, w_stop, h_stop = cv2.boundingRect(intersection_contour)
cv2.putText(img_debug, 'w: {}, h: {}'.format(w_stop, h_stop), (intersection_contour[0][0][0]+10, intersection_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))
if 330 < w_stop:
cv2.drawContours(img_debug, [intersection_contour], 0, (0, 255, 0), 2)
intersection_check = True
return intersection_check, img_debug
def fn_find_exit_line(self, img_trans, direction='left'):
# ROI 영역에 맞게 자른 이미지
pers_height, pers_width = img_trans.shape[:2] # shape is w384 x h240
if direction == 'left':
img_gray = cv2.cvtColor(img_trans[:, int(pers_width * 1/ 2):].copy(), cv2.COLOR_RGB2GRAY)
else:
img_gray = cv2.cvtColor(img_trans[:, :int(pers_width * 1/ 2)].copy(), cv2.COLOR_RGB2GRAY)
_, img_exit = cv2.threshold(img_gray, 190, 255, 0)
img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))
img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))
img_debug = cv2.merge((img_exit, img_exit, img_exit)).copy()
_, list_exit_contour, _ = cv2.findContours(img_exit, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
exit_check = False
exit_pos = (0, 0)
for exit_contour in list_exit_contour:
cv2.drawContours(img_debug, [exit_contour], 0, (0, 0, 255), 2)
x_exit, y_exit, w_exit, h_exit = cv2.boundingRect(exit_contour)
bottom_most_pos = tuple(exit_contour[exit_contour[:, :, 1].argmax()][0])
val_height = h_exit
for pos_y in range(pers_height-1, 0, -1):
if img_gray[pos_y, bottom_most_pos[0]] != 0:
val_height = pos_y
break
cv2.putText(img_debug, 'w: {}, h: {}, length: {}'.format(w_exit, h_exit, val_height), (exit_contour[0][0][0]+10, exit_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))
if h_exit > val_height * 4/5 and h_exit > pers_height/2:
cv2.drawContours(img_debug, [exit_contour], 0, (0, 255, 0), 2)
exit_pos = exit_contour[0][0]
exit_check = True
return exit_check, exit_pos, img_debug
def fn_find_direction_sign(self, img_ori):
left_sign_detect = False
right_sign_detect = False
img_height, img_width = img_ori.shape[:2]
img_roi = img_ori[:int(img_height*1 / 2), :].copy()
img_hsv = cv2.cvtColor(img_roi, cv2.COLOR_BGR2HSV)
# Hsv fillter - Blue color
img_mask_b = cv2.inRange(img_hsv, self.lower_blue, self.upper_blue)
img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_OPEN, np.ones((7, 7), np.uint8))
img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8))
#_, list_obj_contour, _ = cv2.findContours(img_mask_b, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
_, list_obj_contour, _ = cv2.findContours(img_mask_b, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img_blue = cv2.bitwise_and(img_roi, img_roi, mask=img_mask_b)
img_debug = img_roi.copy()
list_obj = []
for obj_contour in list_obj_contour:
#cv2.drawContours(img_blue, [contour], 0, (0, 0, 255), 2)
x, y, w, h = cv2.boundingRect(obj_contour)
area = cv2.contourArea(obj_contour)
aspect_ratio = float(w) / h
area_ratio = float(area) / (w*h)
cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img_debug, 'w: {}, h: {}, aspect_ratio: {:.2f}, area_ratio: {:.2f}'.format(w, h, aspect_ratio, area_ratio), (x+10, y+10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 127, 0))
if (50 < w < 150) and (50 < h < 150) and (0.8 < aspect_ratio < 2.5) and (area_ratio > 0.5):
cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 255, 255), 2)
list_obj.append((img_roi[y:y+h, x:x+w].copy(), (x, y, w, h)))
for (img_obj, (obj_x, obj_y, obj_w, obj_h)) in list_obj:
img_obj_gray = cv2.cvtColor(img_obj, cv2.COLOR_BGR2GRAY)
_, img_obj_binary = cv2.threshold(img_obj_gray, 180, 255, cv2.THRESH_BINARY)
img_obj_binary = cv2.morphologyEx(img_obj_binary, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
_, list_arrow_contour, _ = cv2.findContours(img_obj_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
obj_x_mid = int(obj_w / 2)
obj_y_mid = int(obj_h / 2)
min_val_dis = 30
bottom_most_pos = None
for arrow_contour in list_arrow_contour:
mask_arrow = np.zeros(img_obj_gray.shape, np.uint8)
cv2.drawContours(mask_arrow, [arrow_contour], 0, 255, -1)
arrow_x, arrow_y, arrow_w, arrow_h = cv2.boundingRect(arrow_contour)
cv2.rectangle(img_debug, (obj_x + arrow_x, obj_y + arrow_y), (obj_x + arrow_x + arrow_w, arrow_y + obj_y + arrow_h), (255, 255, 0), 1)
arrow_area = cv2.contourArea(arrow_contour)
arrow_aspect_ratio = float(arrow_w) / arrow_h
arrow_area_ratio = float(arrow_area) / (arrow_w * arrow_h)
arrow_x_mid = int(arrow_x + arrow_w / 2)
arrow_y_mid = int(arrow_y + arrow_h / 2)
if (0.4 * obj_w < arrow_w) and (0.4 * obj_h < arrow_h) and (0.5 < arrow_aspect_ratio < 2) and (arrow_area_ratio > 0.3):
val_dis = math.sqrt((arrow_x_mid - obj_x_mid) ** 2 + (arrow_y_mid - obj_y_mid) ** 2)
if val_dis < min_val_dis:
min_val_dis = val_dis
#left_most_pos = tuple(obj_contour[obj_contour[:, :, 0].argmin()][0])
#right_most_pos = tuple(obj_contour[obj_contour[:, :, 0].argmax()][0])
#top_most_pos = tuple(obj_contour[obj_contour[:, :, 1].argmin()][0])
bottom_most_pos = tuple(arrow_contour[arrow_contour[:, :, 1].argmax()][0])
if bottom_most_pos is not None:
cv2.circle(img_debug, (obj_x + bottom_most_pos[0], obj_y + bottom_most_pos[1]), 4, (0, 0, 255), -1)
cv2.line(img_debug, (obj_x + obj_x_mid, obj_y), (obj_x + obj_x_mid, obj_y + obj_h), (255, 0, 255), 2)
if bottom_most_pos[0] > obj_x_mid:
left_sign_detect = True
cv2.putText(img_debug, 'LEFT', (obj_x+10, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (255, 0, 0), 2)
else:
right_sign_detect = True
cv2.putText(img_debug, 'RIGHT', (obj_x+3, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (0, 255, 0), 2)
return left_sign_detect, right_sign_detect, np.vstack((img_debug, img_blue))
| 54.664474 | 197 | 0.603562 |
import numpy as np
import cv2
import math
class IntersectionDetector:
def __init__(self):
self.lower_blue = np.array([85, 90, 120], np.uint8)
self.upper_blue = np.array([115, 255, 255], np.uint8)
def fn_find_intersection_line(self, img_trans):
pers_height, pers_width = img_trans.shape[:2]
img_gray = cv2.cvtColor(img_trans[:int(pers_height * 1/ 2), :].copy(), cv2.COLOR_RGB2GRAY)
_, img_intersection = cv2.threshold(img_gray, 180, 255, 0)
img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))
img_intersection = cv2.morphologyEx(img_intersection, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))
img_debug = cv2.merge((img_intersection, img_intersection, img_intersection)).copy()
_, list_intersection_contour, _ = cv2.findContours(img_intersection, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
intersection_check = False
for intersection_contour in list_intersection_contour:
cv2.drawContours(img_debug, [intersection_contour], 0, (0, 0, 255), 2)
x_stop, y_stop, w_stop, h_stop = cv2.boundingRect(intersection_contour)
cv2.putText(img_debug, 'w: {}, h: {}'.format(w_stop, h_stop), (intersection_contour[0][0][0]+10, intersection_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))
if 330 < w_stop:
cv2.drawContours(img_debug, [intersection_contour], 0, (0, 255, 0), 2)
intersection_check = True
return intersection_check, img_debug
def fn_find_exit_line(self, img_trans, direction='left'):
pers_height, pers_width = img_trans.shape[:2]
if direction == 'left':
img_gray = cv2.cvtColor(img_trans[:, int(pers_width * 1/ 2):].copy(), cv2.COLOR_RGB2GRAY)
else:
img_gray = cv2.cvtColor(img_trans[:, :int(pers_width * 1/ 2)].copy(), cv2.COLOR_RGB2GRAY)
_, img_exit = cv2.threshold(img_gray, 190, 255, 0)
img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_OPEN, np.ones((5, 5), np.uint8))
img_exit = cv2.morphologyEx(img_exit, cv2.MORPH_CLOSE, np.ones((7, 7), np.uint8))
img_debug = cv2.merge((img_exit, img_exit, img_exit)).copy()
_, list_exit_contour, _ = cv2.findContours(img_exit, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
exit_check = False
exit_pos = (0, 0)
for exit_contour in list_exit_contour:
cv2.drawContours(img_debug, [exit_contour], 0, (0, 0, 255), 2)
x_exit, y_exit, w_exit, h_exit = cv2.boundingRect(exit_contour)
bottom_most_pos = tuple(exit_contour[exit_contour[:, :, 1].argmax()][0])
val_height = h_exit
for pos_y in range(pers_height-1, 0, -1):
if img_gray[pos_y, bottom_most_pos[0]] != 0:
val_height = pos_y
break
cv2.putText(img_debug, 'w: {}, h: {}, length: {}'.format(w_exit, h_exit, val_height), (exit_contour[0][0][0]+10, exit_contour[0][0][1]+10), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 255, 255))
if h_exit > val_height * 4/5 and h_exit > pers_height/2:
cv2.drawContours(img_debug, [exit_contour], 0, (0, 255, 0), 2)
exit_pos = exit_contour[0][0]
exit_check = True
return exit_check, exit_pos, img_debug
def fn_find_direction_sign(self, img_ori):
left_sign_detect = False
right_sign_detect = False
img_height, img_width = img_ori.shape[:2]
img_roi = img_ori[:int(img_height*1 / 2), :].copy()
img_hsv = cv2.cvtColor(img_roi, cv2.COLOR_BGR2HSV)
img_mask_b = cv2.inRange(img_hsv, self.lower_blue, self.upper_blue)
img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_OPEN, np.ones((7, 7), np.uint8))
img_mask_b = cv2.morphologyEx(img_mask_b, cv2.MORPH_CLOSE, np.ones((5, 5), np.uint8))
_, list_obj_contour, _ = cv2.findContours(img_mask_b, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
img_blue = cv2.bitwise_and(img_roi, img_roi, mask=img_mask_b)
img_debug = img_roi.copy()
list_obj = []
for obj_contour in list_obj_contour:
x, y, w, h = cv2.boundingRect(obj_contour)
area = cv2.contourArea(obj_contour)
aspect_ratio = float(w) / h
area_ratio = float(area) / (w*h)
cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(img_debug, 'w: {}, h: {}, aspect_ratio: {:.2f}, area_ratio: {:.2f}'.format(w, h, aspect_ratio, area_ratio), (x+10, y+10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 127, 0))
if (50 < w < 150) and (50 < h < 150) and (0.8 < aspect_ratio < 2.5) and (area_ratio > 0.5):
cv2.rectangle(img_debug, (x, y), (x + w, y + h), (0, 255, 255), 2)
list_obj.append((img_roi[y:y+h, x:x+w].copy(), (x, y, w, h)))
for (img_obj, (obj_x, obj_y, obj_w, obj_h)) in list_obj:
img_obj_gray = cv2.cvtColor(img_obj, cv2.COLOR_BGR2GRAY)
_, img_obj_binary = cv2.threshold(img_obj_gray, 180, 255, cv2.THRESH_BINARY)
img_obj_binary = cv2.morphologyEx(img_obj_binary, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
_, list_arrow_contour, _ = cv2.findContours(img_obj_binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
obj_x_mid = int(obj_w / 2)
obj_y_mid = int(obj_h / 2)
min_val_dis = 30
bottom_most_pos = None
for arrow_contour in list_arrow_contour:
mask_arrow = np.zeros(img_obj_gray.shape, np.uint8)
cv2.drawContours(mask_arrow, [arrow_contour], 0, 255, -1)
arrow_x, arrow_y, arrow_w, arrow_h = cv2.boundingRect(arrow_contour)
cv2.rectangle(img_debug, (obj_x + arrow_x, obj_y + arrow_y), (obj_x + arrow_x + arrow_w, arrow_y + obj_y + arrow_h), (255, 255, 0), 1)
arrow_area = cv2.contourArea(arrow_contour)
arrow_aspect_ratio = float(arrow_w) / arrow_h
arrow_area_ratio = float(arrow_area) / (arrow_w * arrow_h)
arrow_x_mid = int(arrow_x + arrow_w / 2)
arrow_y_mid = int(arrow_y + arrow_h / 2)
if (0.4 * obj_w < arrow_w) and (0.4 * obj_h < arrow_h) and (0.5 < arrow_aspect_ratio < 2) and (arrow_area_ratio > 0.3):
val_dis = math.sqrt((arrow_x_mid - obj_x_mid) ** 2 + (arrow_y_mid - obj_y_mid) ** 2)
if val_dis < min_val_dis:
min_val_dis = val_dis
bottom_most_pos = tuple(arrow_contour[arrow_contour[:, :, 1].argmax()][0])
if bottom_most_pos is not None:
cv2.circle(img_debug, (obj_x + bottom_most_pos[0], obj_y + bottom_most_pos[1]), 4, (0, 0, 255), -1)
cv2.line(img_debug, (obj_x + obj_x_mid, obj_y), (obj_x + obj_x_mid, obj_y + obj_h), (255, 0, 255), 2)
if bottom_most_pos[0] > obj_x_mid:
left_sign_detect = True
cv2.putText(img_debug, 'LEFT', (obj_x+10, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (255, 0, 0), 2)
else:
right_sign_detect = True
cv2.putText(img_debug, 'RIGHT', (obj_x+3, obj_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
cv2.rectangle(img_debug, (obj_x, obj_y), (obj_x + obj_w, obj_y + obj_h), (0, 255, 0), 2)
return left_sign_detect, right_sign_detect, np.vstack((img_debug, img_blue))
| true | true |
f71af1f582d2aaa6d4011db4c3ff8c3821e87e34 | 32,935 | py | Python | numpyro/distributions/transforms.py | ahoho/numpyro | 64e94e346c51a6c0c1ba51aa7b608e73513f158f | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/transforms.py | ahoho/numpyro | 64e94e346c51a6c0c1ba51aa7b608e73513f158f | [
"Apache-2.0"
] | null | null | null | numpyro/distributions/transforms.py | ahoho/numpyro | 64e94e346c51a6c0c1ba51aa7b608e73513f158f | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import math
import warnings
import weakref
import numpy as np
from jax import lax, ops, tree_flatten, tree_map, vmap
from jax.flatten_util import ravel_pytree
from jax.nn import softplus
import jax.numpy as jnp
from jax.scipy.linalg import solve_triangular
from jax.scipy.special import expit, logit
from numpyro.distributions import constraints
from numpyro.distributions.util import matrix_to_tril_vec, signed_stick_breaking_tril, sum_rightmost, vec_to_tril_matrix
from numpyro.util import not_jax_tracer
__all__ = [
'biject_to',
'AbsTransform',
'AffineTransform',
'CholeskyTransform',
'ComposeTransform',
'CorrCholeskyTransform',
'CorrMatrixCholeskyTransform',
'ExpTransform',
'SoftplusTransform',
'IdentityTransform',
'InvCholeskyTransform',
'LowerCholeskyTransform',
'LowerCholeskyAffine',
'PermuteTransform',
'PowerTransform',
'SigmoidTransform',
'SoftplusTransform',
'SoftplusLowerCholeskyTransform',
'StickBreakingTransform',
'Transform',
'UnpackTransform',
]
def _clipped_expit(x):
finfo = jnp.finfo(jnp.result_type(x))
return jnp.clip(expit(x), a_min=finfo.tiny, a_max=1. - finfo.eps)
class Transform(object):
domain = constraints.real
codomain = constraints.real
_inv = None
@property
def event_dim(self):
warnings.warn("transform.event_dim is deprecated. Please use Transform.domain.event_dim to "
"get input event dim or Transform.codomain.event_dim to get output event dim.",
FutureWarning)
return self.domain.event_dim
@property
def inv(self):
inv = None
if self._inv is not None:
inv = self._inv()
if inv is None:
inv = _InverseTransform(self)
self._inv = weakref.ref(inv)
return inv
def __call__(self, x):
return NotImplementedError
def _inverse(self, y):
raise NotImplementedError
def log_abs_det_jacobian(self, x, y, intermediates=None):
raise NotImplementedError
def call_with_intermediates(self, x):
return self(x), None
def forward_shape(self, shape):
"""
Infers the shape of the forward computation, given the input shape.
Defaults to preserving shape.
"""
return shape
def inverse_shape(self, shape):
"""
Infers the shapes of the inverse computation, given the output shape.
Defaults to preserving shape.
"""
return shape
class _InverseTransform(Transform):
def __init__(self, transform):
super().__init__()
self._inv = transform
@property
def domain(self):
return self._inv.codomain
@property
def codomain(self):
return self._inv.domain
@property
def inv(self):
return self._inv
def __call__(self, x):
return self._inv._inverse(x)
def log_abs_det_jacobian(self, x, y, intermediates=None):
# NB: we don't use intermediates for inverse transform
return -self._inv.log_abs_det_jacobian(y, x, None)
def forward_shape(self, shape):
return self._inv.inverse_shape(shape)
def inverse_shape(self, shape):
return self._inv.forward_shape(shape)
class AbsTransform(Transform):
domain = constraints.real
codomain = constraints.positive
def __eq__(self, other):
return isinstance(other, AbsTransform)
def __call__(self, x):
return jnp.abs(x)
def _inverse(self, y):
return y
class AffineTransform(Transform):
"""
.. note:: When `scale` is a JAX tracer, we always assume that `scale > 0`
when calculating `codomain`.
"""
def __init__(self, loc, scale, domain=constraints.real):
self.loc = loc
self.scale = scale
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.real:
return constraints.real
elif isinstance(self.domain, constraints.greater_than):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.less_than(self(self.domain.lower_bound))
# we suppose scale > 0 for any tracer
else:
return constraints.greater_than(self(self.domain.lower_bound))
elif isinstance(self.domain, constraints.less_than):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.greater_than(self(self.domain.upper_bound))
# we suppose scale > 0 for any tracer
else:
return constraints.less_than(self(self.domain.upper_bound))
elif isinstance(self.domain, constraints.interval):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.interval(self(self.domain.upper_bound),
self(self.domain.lower_bound))
else:
return constraints.interval(self(self.domain.lower_bound),
self(self.domain.upper_bound))
else:
raise NotImplementedError
def __call__(self, x):
return self.loc + self.scale * x
def _inverse(self, y):
return (y - self.loc) / self.scale
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.broadcast_to(jnp.log(jnp.abs(self.scale)), jnp.shape(x))
def forward_shape(self, shape):
return lax.broadcast_shapes(shape,
getattr(self.loc, "shape", ()),
getattr(self.scale, "shape", ()))
def inverse_shape(self, shape):
return lax.broadcast_shapes(shape,
getattr(self.loc, "shape", ()),
getattr(self.scale, "shape", ()))
def _get_compose_transform_input_event_dim(parts):
input_event_dim = parts[-1].domain.event_dim
for part in parts[len(parts) - 1::-1]:
input_event_dim = part.domain.event_dim + max(input_event_dim - part.codomain.event_dim, 0)
return input_event_dim
def _get_compose_transform_output_event_dim(parts):
output_event_dim = parts[0].codomain.event_dim
for part in parts[1:]:
output_event_dim = part.codomain.event_dim + max(output_event_dim - part.domain.event_dim, 0)
return output_event_dim
class ComposeTransform(Transform):
def __init__(self, parts):
self.parts = parts
@property
def domain(self):
input_event_dim = _get_compose_transform_input_event_dim(self.parts)
first_input_event_dim = self.parts[0].domain.event_dim
assert input_event_dim >= first_input_event_dim
if input_event_dim == first_input_event_dim:
return self.parts[0].domain
else:
return constraints.independent(self.parts[0].domain, input_event_dim - first_input_event_dim)
@property
def codomain(self):
output_event_dim = _get_compose_transform_output_event_dim(self.parts)
last_output_event_dim = self.parts[-1].codomain.event_dim
assert output_event_dim >= last_output_event_dim
if output_event_dim == last_output_event_dim:
return self.parts[-1].codomain
else:
return constraints.independent(self.parts[-1].codomain, output_event_dim - last_output_event_dim)
def __call__(self, x):
for part in self.parts:
x = part(x)
return x
def _inverse(self, y):
for part in self.parts[::-1]:
y = part.inv(y)
return y
def log_abs_det_jacobian(self, x, y, intermediates=None):
if intermediates is not None:
if len(intermediates) != len(self.parts):
raise ValueError('Intermediates array has length = {}. Expected = {}.'
.format(len(intermediates), len(self.parts)))
result = 0.
input_event_dim = self.domain.event_dim
for i, part in enumerate(self.parts[:-1]):
y_tmp = part(x) if intermediates is None else intermediates[i][0]
inter = None if intermediates is None else intermediates[i][1]
logdet = part.log_abs_det_jacobian(x, y_tmp, intermediates=inter)
batch_ndim = input_event_dim - part.domain.event_dim
result = result + sum_rightmost(logdet, batch_ndim)
input_event_dim = part.codomain.event_dim + batch_ndim
x = y_tmp
# account the the last transform, where y is available
inter = None if intermediates is None else intermediates[-1]
part = self.parts[-1]
logdet = part.log_abs_det_jacobian(x, y, intermediates=inter)
result = result + sum_rightmost(logdet, input_event_dim - part.domain.event_dim)
return result
def call_with_intermediates(self, x):
intermediates = []
for part in self.parts[:-1]:
x, inter = part.call_with_intermediates(x)
intermediates.append([x, inter])
# NB: we don't need to hold the last output value in `intermediates`
x, inter = self.parts[-1].call_with_intermediates(x)
intermediates.append(inter)
return x, intermediates
def forward_shape(self, shape):
for part in self.parts:
shape = part.forward_shape(shape)
return shape
def inverse_shape(self, shape):
for part in reversed(self.parts):
shape = part.inverse_shape(shape)
return shape
def _matrix_forward_shape(shape, offset=0):
# Reshape from (..., N) to (..., D, D).
if len(shape) < 1:
raise ValueError("Too few dimensions in input")
N = shape[-1]
D = round((0.25 + 2 * N) ** 0.5 - 0.5)
if D * (D + 1) // 2 != N:
raise ValueError("Input is not a flattend lower-diagonal number")
D = D - offset
return shape[:-1] + (D, D)
def _matrix_inverse_shape(shape, offset=0):
# Reshape from (..., D, D) to (..., N).
if len(shape) < 2:
raise ValueError("Too few dimensions on input")
if shape[-2] != shape[-1]:
raise ValueError("Input is not square")
D = shape[-1] + offset
N = D * (D + 1) // 2
return shape[:-2] + (N,)
class CholeskyTransform(Transform):
r"""
Transform via the mapping :math:`y = cholesky(x)`, where `x` is a
positive definite matrix.
"""
domain = constraints.positive_definite
codomain = constraints.lower_cholesky
def __call__(self, x):
return jnp.linalg.cholesky(x)
def _inverse(self, y):
return jnp.matmul(y, jnp.swapaxes(y, -2, -1))
def log_abs_det_jacobian(self, x, y, intermediates=None):
# Ref: http://web.mit.edu/18.325/www/handouts/handout2.pdf page 13
n = jnp.shape(x)[-1]
order = -jnp.arange(n, 0, -1)
return -n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)
class CorrCholeskyTransform(Transform):
r"""
Transforms a uncontrained real vector :math:`x` with length :math:`D*(D-1)/2` into the
Cholesky factor of a D-dimension correlation matrix. This Cholesky factor is a lower
triangular matrix with positive diagonals and unit Euclidean norm for each row.
The transform is processed as follows:
1. First we convert :math:`x` into a lower triangular matrix with the following order:
.. math::
\begin{bmatrix}
1 & 0 & 0 & 0 \\
x_0 & 1 & 0 & 0 \\
x_1 & x_2 & 1 & 0 \\
x_3 & x_4 & x_5 & 1
\end{bmatrix}
2. For each row :math:`X_i` of the lower triangular part, we apply a *signed* version of
class :class:`StickBreakingTransform` to transform :math:`X_i` into a
unit Euclidean length vector using the following steps:
a. Scales into the interval :math:`(-1, 1)` domain: :math:`r_i = \tanh(X_i)`.
b. Transforms into an unsigned domain: :math:`z_i = r_i^2`.
c. Applies :math:`s_i = StickBreakingTransform(z_i)`.
d. Transforms back into signed domain: :math:`y_i = (sign(r_i), 1) * \sqrt{s_i}`.
"""
domain = constraints.real_vector
codomain = constraints.corr_cholesky
def __call__(self, x):
# we interchange step 1 and step 2.a for a better performance
t = jnp.tanh(x)
return signed_stick_breaking_tril(t)
def _inverse(self, y):
# inverse stick-breaking
z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)
pad_width = [(0, 0)] * y.ndim
pad_width[-1] = (1, 0)
z1m_cumprod_shifted = jnp.pad(z1m_cumprod[..., :-1], pad_width,
mode="constant", constant_values=1.)
t = matrix_to_tril_vec(y, diagonal=-1) / jnp.sqrt(
matrix_to_tril_vec(z1m_cumprod_shifted, diagonal=-1))
# inverse of tanh
x = jnp.log((1 + t) / (1 - t)) / 2
return x
def log_abs_det_jacobian(self, x, y, intermediates=None):
# NB: because domain and codomain are two spaces with different dimensions, determinant of
# Jacobian is not well-defined. Here we return `log_abs_det_jacobian` of `x` and the
# flatten lower triangular part of `y`.
# stick_breaking_logdet = log(y / r) = log(z_cumprod) (modulo right shifted)
z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)
# by taking diagonal=-2, we don't need to shift z_cumprod to the right
# NB: diagonal=-2 works fine for (2 x 2) matrix, where we get an empty array
z1m_cumprod_tril = matrix_to_tril_vec(z1m_cumprod, diagonal=-2)
stick_breaking_logdet = 0.5 * jnp.sum(jnp.log(z1m_cumprod_tril), axis=-1)
tanh_logdet = -2 * jnp.sum(x + softplus(-2 * x) - jnp.log(2.), axis=-1)
return stick_breaking_logdet + tanh_logdet
def forward_shape(self, shape):
return _matrix_forward_shape(shape, offset=-1)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape, offset=-1)
class CorrMatrixCholeskyTransform(CholeskyTransform):
r"""
Transform via the mapping :math:`y = cholesky(x)`, where `x` is a
correlation matrix.
"""
domain = constraints.corr_matrix
codomain = constraints.corr_cholesky
def log_abs_det_jacobian(self, x, y, intermediates=None):
# NB: see derivation in LKJCholesky implementation
n = jnp.shape(x)[-1]
order = -jnp.arange(n - 1, -1, -1)
return jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)
class ExpTransform(Transform):
# TODO: refine domain/codomain logic through setters, especially when
# transforms for inverses are supported
def __init__(self, domain=constraints.real):
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.real:
return constraints.positive
elif isinstance(self.domain, constraints.greater_than):
return constraints.greater_than(self.__call__(self.domain.lower_bound))
elif isinstance(self.domain, constraints.interval):
return constraints.interval(self.__call__(self.domain.lower_bound),
self.__call__(self.domain.upper_bound))
else:
raise NotImplementedError
def __call__(self, x):
# XXX consider to clamp from below for stability if necessary
return jnp.exp(x)
def _inverse(self, y):
return jnp.log(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return x
class IdentityTransform(Transform):
def __call__(self, x):
return x
def _inverse(self, y):
return y
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.zeros_like(x)
class IndependentTransform(Transform):
"""
Wraps a transform by aggregating over ``reinterpreted_batch_ndims``-many
dims in :meth:`check`, so that an event is valid only if all its
independent entries are valid.
"""
def __init__(self, base_transform, reinterpreted_batch_ndims):
assert isinstance(base_transform, Transform)
assert isinstance(reinterpreted_batch_ndims, int)
assert reinterpreted_batch_ndims >= 0
self.base_transform = base_transform
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
super().__init__()
@property
def domain(self):
return constraints.independent(self.base_transform.domain, self.reinterpreted_batch_ndims)
@property
def codomain(self):
return constraints.independent(self.base_transform.codomain, self.reinterpreted_batch_ndims)
def __call__(self, x):
return self.base_transform(x)
def _inverse(self, y):
return self.base_transform._inverse(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
result = self.base_transform.log_abs_det_jacobian(x, y, intermediates=intermediates)
if jnp.ndim(result) < self.reinterpreted_batch_ndims:
expected = self.domain.event_dim
raise ValueError(f"Expected x.dim() >= {expected} but got {jnp.ndim(x)}")
return sum_rightmost(result, self.reinterpreted_batch_ndims)
def call_with_intermediates(self, x):
return self.base_transform.call_with_intermediates(x)
def forward_shape(self, shape):
return self.base_transform.forward_shape(shape)
def inverse_shape(self, shape):
return self.base_transform.inverse_shape(shape)
class InvCholeskyTransform(Transform):
r"""
Transform via the mapping :math:`y = x @ x.T`, where `x` is a lower
triangular matrix with positive diagonal.
"""
def __init__(self, domain=constraints.lower_cholesky):
warnings.warn("InvCholeskyTransform is deprecated. Please use CholeskyTransform"
" or CorrMatrixCholeskyTransform instead.", FutureWarning)
assert domain in [constraints.lower_cholesky, constraints.corr_cholesky]
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.lower_cholesky:
return constraints.positive_definite
elif self.domain is constraints.corr_cholesky:
return constraints.corr_matrix
def __call__(self, x):
return jnp.matmul(x, jnp.swapaxes(x, -2, -1))
def _inverse(self, y):
return jnp.linalg.cholesky(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
if self.domain is constraints.lower_cholesky:
# Ref: http://web.mit.edu/18.325/www/handouts/handout2.pdf page 13
n = jnp.shape(x)[-1]
order = jnp.arange(n, 0, -1)
return n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)
else:
# NB: see derivation in LKJCholesky implementation
n = jnp.shape(x)[-1]
order = jnp.arange(n - 1, -1, -1)
return jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)
class LowerCholeskyAffine(Transform):
r"""
Transform via the mapping :math:`y = loc + scale\_tril\ @\ x`.
:param loc: a real vector.
:param scale_tril: a lower triangular matrix with positive diagonal.
"""
domain = constraints.real_vector
codomain = constraints.real_vector
def __init__(self, loc, scale_tril):
if jnp.ndim(scale_tril) != 2:
raise ValueError("Only support 2-dimensional scale_tril matrix. "
"Please make a feature request if you need to "
"use this transform with batched scale_tril.")
self.loc = loc
self.scale_tril = scale_tril
def __call__(self, x):
return self.loc + jnp.squeeze(jnp.matmul(self.scale_tril, x[..., jnp.newaxis]), axis=-1)
def _inverse(self, y):
y = y - self.loc
original_shape = jnp.shape(y)
yt = jnp.reshape(y, (-1, original_shape[-1])).T
xt = solve_triangular(self.scale_tril, yt, lower=True)
return jnp.reshape(xt.T, original_shape)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.broadcast_to(jnp.log(jnp.diagonal(self.scale_tril, axis1=-2, axis2=-1)).sum(-1),
jnp.shape(x)[:-1])
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])
class LowerCholeskyTransform(Transform):
domain = constraints.real_vector
codomain = constraints.lower_cholesky
def __call__(self, x):
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)
diag = jnp.exp(x[..., -n:])
return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)
def _inverse(self, y):
z = matrix_to_tril_vec(y, diagonal=-1)
return jnp.concatenate([z, jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1))], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
# the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
return x[..., -n:].sum(-1)
def forward_shape(self, shape):
return _matrix_forward_shape(shape)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape)
class OrderedTransform(Transform):
"""
Transform a real vector to an ordered vector.
**References:**
1. *Stan Reference Manual v2.20, section 10.6*,
Stan Development Team
"""
domain = constraints.real_vector
codomain = constraints.ordered_vector
def __call__(self, x):
z = jnp.concatenate([x[..., :1], jnp.exp(x[..., 1:])], axis=-1)
return jnp.cumsum(z, axis=-1)
def _inverse(self, y):
x = jnp.log(y[..., 1:] - y[..., :-1])
return jnp.concatenate([y[..., :1], x], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.sum(x[..., 1:], -1)
class PermuteTransform(Transform):
domain = constraints.real_vector
codomain = constraints.real_vector
def __init__(self, permutation):
self.permutation = permutation
def __call__(self, x):
return x[..., self.permutation]
def _inverse(self, y):
size = self.permutation.size
permutation_inv = ops.index_update(jnp.zeros(size, dtype=jnp.result_type(int)),
self.permutation,
jnp.arange(size))
return y[..., permutation_inv]
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.full(jnp.shape(x)[:-1], 0.)
class PowerTransform(Transform):
domain = constraints.positive
codomain = constraints.positive
def __init__(self, exponent):
self.exponent = exponent
def __call__(self, x):
return jnp.power(x, self.exponent)
def _inverse(self, y):
return jnp.power(y, 1 / self.exponent)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.log(jnp.abs(self.exponent * y / x))
def forward_shape(self, shape):
return lax.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
def inverse_shape(self, shape):
return lax.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
class SigmoidTransform(Transform):
codomain = constraints.unit_interval
def __call__(self, x):
return _clipped_expit(x)
def _inverse(self, y):
return logit(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
x_abs = jnp.abs(x)
return -x_abs - 2 * jnp.log1p(jnp.exp(-x_abs))
def _softplus_inv(y):
return jnp.log(-jnp.expm1(-y)) + y
class SoftplusTransform(Transform):
r"""
Transform from unconstrained space to positive domain via softplus :math:`y = \log(1 + \exp(x))`.
The inverse is computed as :math:`x = \log(\exp(y) - 1)`.
"""
domain = constraints.real
codomain = constraints.softplus_positive
def __call__(self, x):
return softplus(x)
def _inverse(self, y):
return _softplus_inv(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return -softplus(-x)
class SoftplusLowerCholeskyTransform(Transform):
"""
Transform from unconstrained vector to lower-triangular matrices with
nonnegative diagonal entries. This is useful for parameterizing positive
definite matrices in terms of their Cholesky factorization.
"""
domain = constraints.real_vector
codomain = constraints.softplus_lower_cholesky
def __call__(self, x):
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)
diag = softplus(x[..., -n:])
return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)
def _inverse(self, y):
z = matrix_to_tril_vec(y, diagonal=-1)
diag = _softplus_inv(jnp.diagonal(y, axis1=-2, axis2=-1))
return jnp.concatenate([z, diag], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
# the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
return -softplus(-x[..., -n:]).sum(-1)
def forward_shape(self, shape):
return _matrix_forward_shape(shape)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape)
class StickBreakingTransform(Transform):
domain = constraints.real_vector
codomain = constraints.simplex
def __call__(self, x):
# we shift x to obtain a balanced mapping (0, 0, ..., 0) -> (1/K, 1/K, ..., 1/K)
x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
# convert to probabilities (relative to the remaining) of each fraction of the stick
z = _clipped_expit(x)
z1m_cumprod = jnp.cumprod(1 - z, axis=-1)
pad_width = [(0, 0)] * x.ndim
pad_width[-1] = (0, 1)
z_padded = jnp.pad(z, pad_width, mode="constant", constant_values=1.)
pad_width = [(0, 0)] * x.ndim
pad_width[-1] = (1, 0)
z1m_cumprod_shifted = jnp.pad(z1m_cumprod, pad_width, mode="constant", constant_values=1.)
return z_padded * z1m_cumprod_shifted
def _inverse(self, y):
y_crop = y[..., :-1]
z1m_cumprod = jnp.clip(1 - jnp.cumsum(y_crop, axis=-1), a_min=jnp.finfo(y.dtype).tiny)
# hence x = logit(z) = log(z / (1 - z)) = y[::-1] / z1m_cumprod
x = jnp.log(y_crop / z1m_cumprod)
return x + jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
def log_abs_det_jacobian(self, x, y, intermediates=None):
# Ref: https://mc-stan.org/docs/2_19/reference-manual/simplex-transform-section.html
# |det|(J) = Product(y * (1 - z))
x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
z = jnp.clip(expit(x), a_min=jnp.finfo(x.dtype).tiny)
# XXX we use the identity 1 - z = z * exp(-x) to not worry about
# the case z ~ 1
return jnp.sum(jnp.log(y[..., :-1] * z) - x, axis=-1)
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] + 1,)
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] - 1,)
class UnpackTransform(Transform):
"""
Transforms a contiguous array to a pytree of subarrays.
:param unpack_fn: callable used to unpack a contiguous array.
"""
domain = constraints.real_vector
codomain = constraints.dependent
def __init__(self, unpack_fn):
self.unpack_fn = unpack_fn
def __call__(self, x):
batch_shape = x.shape[:-1]
if batch_shape:
unpacked = vmap(self.unpack_fn)(x.reshape((-1,) + x.shape[-1:]))
return tree_map(lambda z: jnp.reshape(z, batch_shape + z.shape[1:]), unpacked)
else:
return self.unpack_fn(x)
def _inverse(self, y):
leading_dims = [v.shape[0] if jnp.ndim(v) > 0 else 0
for v in tree_flatten(y)[0]]
d0 = leading_dims[0]
not_scalar = d0 > 0 or len(leading_dims) > 1
if not_scalar and all(d == d0 for d in leading_dims[1:]):
warnings.warn("UnpackTransform.inv might lead to an unexpected behavior because it"
" cannot transform a batch of unpacked arrays.")
return ravel_pytree(y)[0]
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.zeros(jnp.shape(x)[:-1])
def forward_shape(self, shape):
raise NotImplementedError
def inverse_shape(self, shape):
raise NotImplementedError
##########################################################
# CONSTRAINT_REGISTRY
##########################################################
class ConstraintRegistry(object):
def __init__(self):
self._registry = {}
def register(self, constraint, factory=None):
if factory is None:
return lambda factory: self.register(constraint, factory)
if isinstance(constraint, constraints.Constraint):
constraint = type(constraint)
self._registry[constraint] = factory
def __call__(self, constraint):
try:
factory = self._registry[type(constraint)]
except KeyError as e:
raise NotImplementedError from e
return factory(constraint)
biject_to = ConstraintRegistry()
@biject_to.register(constraints.corr_cholesky)
def _transform_to_corr_cholesky(constraint):
return CorrCholeskyTransform()
@biject_to.register(constraints.corr_matrix)
def _transform_to_corr_matrix(constraint):
return ComposeTransform([CorrCholeskyTransform(),
CorrMatrixCholeskyTransform().inv])
@biject_to.register(constraints.greater_than)
def _transform_to_greater_than(constraint):
if constraint is constraints.positive:
return ExpTransform()
return ComposeTransform([ExpTransform(),
AffineTransform(constraint.lower_bound, 1,
domain=constraints.positive)])
@biject_to.register(constraints.less_than)
def _transform_to_less_than(constraint):
return ComposeTransform([ExpTransform(),
AffineTransform(constraint.upper_bound, -1,
domain=constraints.positive)])
@biject_to.register(constraints.independent)
def _biject_to_independent(constraint):
return IndependentTransform(biject_to(constraint.base_constraint),
constraint.reinterpreted_batch_ndims)
@biject_to.register(constraints.interval)
def _transform_to_interval(constraint):
if constraint is constraints.unit_interval:
return SigmoidTransform()
scale = constraint.upper_bound - constraint.lower_bound
return ComposeTransform([SigmoidTransform(),
AffineTransform(constraint.lower_bound, scale,
domain=constraints.unit_interval)])
@biject_to.register(constraints.lower_cholesky)
def _transform_to_lower_cholesky(constraint):
return LowerCholeskyTransform()
@biject_to.register(constraints.ordered_vector)
def _transform_to_ordered_vector(constraint):
return OrderedTransform()
@biject_to.register(constraints.positive_definite)
def _transform_to_positive_definite(constraint):
return ComposeTransform([LowerCholeskyTransform(), CholeskyTransform().inv])
@biject_to.register(constraints.positive_ordered_vector)
def _transform_to_positive_ordered_vector(constraint):
return ComposeTransform([OrderedTransform(), ExpTransform()])
@biject_to.register(constraints.real)
def _transform_to_real(constraint):
return IdentityTransform()
@biject_to.register(constraints.softplus_positive)
def _transform_to_softplus_positive(constraint):
return SoftplusTransform()
@biject_to.register(constraints.softplus_lower_cholesky)
def _transform_to_softplus_lower_cholesky(constraint):
return SoftplusLowerCholeskyTransform()
@biject_to.register(constraints.simplex)
def _transform_to_simplex(constraint):
return StickBreakingTransform()
| 34.851852 | 120 | 0.637559 |
import math
import warnings
import weakref
import numpy as np
from jax import lax, ops, tree_flatten, tree_map, vmap
from jax.flatten_util import ravel_pytree
from jax.nn import softplus
import jax.numpy as jnp
from jax.scipy.linalg import solve_triangular
from jax.scipy.special import expit, logit
from numpyro.distributions import constraints
from numpyro.distributions.util import matrix_to_tril_vec, signed_stick_breaking_tril, sum_rightmost, vec_to_tril_matrix
from numpyro.util import not_jax_tracer
__all__ = [
'biject_to',
'AbsTransform',
'AffineTransform',
'CholeskyTransform',
'ComposeTransform',
'CorrCholeskyTransform',
'CorrMatrixCholeskyTransform',
'ExpTransform',
'SoftplusTransform',
'IdentityTransform',
'InvCholeskyTransform',
'LowerCholeskyTransform',
'LowerCholeskyAffine',
'PermuteTransform',
'PowerTransform',
'SigmoidTransform',
'SoftplusTransform',
'SoftplusLowerCholeskyTransform',
'StickBreakingTransform',
'Transform',
'UnpackTransform',
]
def _clipped_expit(x):
finfo = jnp.finfo(jnp.result_type(x))
return jnp.clip(expit(x), a_min=finfo.tiny, a_max=1. - finfo.eps)
class Transform(object):
domain = constraints.real
codomain = constraints.real
_inv = None
@property
def event_dim(self):
warnings.warn("transform.event_dim is deprecated. Please use Transform.domain.event_dim to "
"get input event dim or Transform.codomain.event_dim to get output event dim.",
FutureWarning)
return self.domain.event_dim
@property
def inv(self):
inv = None
if self._inv is not None:
inv = self._inv()
if inv is None:
inv = _InverseTransform(self)
self._inv = weakref.ref(inv)
return inv
def __call__(self, x):
return NotImplementedError
def _inverse(self, y):
raise NotImplementedError
def log_abs_det_jacobian(self, x, y, intermediates=None):
raise NotImplementedError
def call_with_intermediates(self, x):
return self(x), None
def forward_shape(self, shape):
return shape
def inverse_shape(self, shape):
return shape
class _InverseTransform(Transform):
def __init__(self, transform):
super().__init__()
self._inv = transform
@property
def domain(self):
return self._inv.codomain
@property
def codomain(self):
return self._inv.domain
@property
def inv(self):
return self._inv
def __call__(self, x):
return self._inv._inverse(x)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return -self._inv.log_abs_det_jacobian(y, x, None)
def forward_shape(self, shape):
return self._inv.inverse_shape(shape)
def inverse_shape(self, shape):
return self._inv.forward_shape(shape)
class AbsTransform(Transform):
domain = constraints.real
codomain = constraints.positive
def __eq__(self, other):
return isinstance(other, AbsTransform)
def __call__(self, x):
return jnp.abs(x)
def _inverse(self, y):
return y
class AffineTransform(Transform):
def __init__(self, loc, scale, domain=constraints.real):
self.loc = loc
self.scale = scale
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.real:
return constraints.real
elif isinstance(self.domain, constraints.greater_than):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.less_than(self(self.domain.lower_bound))
# we suppose scale > 0 for any tracer
else:
return constraints.greater_than(self(self.domain.lower_bound))
elif isinstance(self.domain, constraints.less_than):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.greater_than(self(self.domain.upper_bound))
# we suppose scale > 0 for any tracer
else:
return constraints.less_than(self(self.domain.upper_bound))
elif isinstance(self.domain, constraints.interval):
if not_jax_tracer(self.scale) and np.all(np.less(self.scale, 0)):
return constraints.interval(self(self.domain.upper_bound),
self(self.domain.lower_bound))
else:
return constraints.interval(self(self.domain.lower_bound),
self(self.domain.upper_bound))
else:
raise NotImplementedError
def __call__(self, x):
return self.loc + self.scale * x
def _inverse(self, y):
return (y - self.loc) / self.scale
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.broadcast_to(jnp.log(jnp.abs(self.scale)), jnp.shape(x))
def forward_shape(self, shape):
return lax.broadcast_shapes(shape,
getattr(self.loc, "shape", ()),
getattr(self.scale, "shape", ()))
def inverse_shape(self, shape):
return lax.broadcast_shapes(shape,
getattr(self.loc, "shape", ()),
getattr(self.scale, "shape", ()))
def _get_compose_transform_input_event_dim(parts):
input_event_dim = parts[-1].domain.event_dim
for part in parts[len(parts) - 1::-1]:
input_event_dim = part.domain.event_dim + max(input_event_dim - part.codomain.event_dim, 0)
return input_event_dim
def _get_compose_transform_output_event_dim(parts):
output_event_dim = parts[0].codomain.event_dim
for part in parts[1:]:
output_event_dim = part.codomain.event_dim + max(output_event_dim - part.domain.event_dim, 0)
return output_event_dim
class ComposeTransform(Transform):
def __init__(self, parts):
self.parts = parts
@property
def domain(self):
input_event_dim = _get_compose_transform_input_event_dim(self.parts)
first_input_event_dim = self.parts[0].domain.event_dim
assert input_event_dim >= first_input_event_dim
if input_event_dim == first_input_event_dim:
return self.parts[0].domain
else:
return constraints.independent(self.parts[0].domain, input_event_dim - first_input_event_dim)
@property
def codomain(self):
output_event_dim = _get_compose_transform_output_event_dim(self.parts)
last_output_event_dim = self.parts[-1].codomain.event_dim
assert output_event_dim >= last_output_event_dim
if output_event_dim == last_output_event_dim:
return self.parts[-1].codomain
else:
return constraints.independent(self.parts[-1].codomain, output_event_dim - last_output_event_dim)
def __call__(self, x):
for part in self.parts:
x = part(x)
return x
def _inverse(self, y):
for part in self.parts[::-1]:
y = part.inv(y)
return y
def log_abs_det_jacobian(self, x, y, intermediates=None):
if intermediates is not None:
if len(intermediates) != len(self.parts):
raise ValueError('Intermediates array has length = {}. Expected = {}.'
.format(len(intermediates), len(self.parts)))
result = 0.
input_event_dim = self.domain.event_dim
for i, part in enumerate(self.parts[:-1]):
y_tmp = part(x) if intermediates is None else intermediates[i][0]
inter = None if intermediates is None else intermediates[i][1]
logdet = part.log_abs_det_jacobian(x, y_tmp, intermediates=inter)
batch_ndim = input_event_dim - part.domain.event_dim
result = result + sum_rightmost(logdet, batch_ndim)
input_event_dim = part.codomain.event_dim + batch_ndim
x = y_tmp
# account the the last transform, where y is available
inter = None if intermediates is None else intermediates[-1]
part = self.parts[-1]
logdet = part.log_abs_det_jacobian(x, y, intermediates=inter)
result = result + sum_rightmost(logdet, input_event_dim - part.domain.event_dim)
return result
def call_with_intermediates(self, x):
intermediates = []
for part in self.parts[:-1]:
x, inter = part.call_with_intermediates(x)
intermediates.append([x, inter])
# NB: we don't need to hold the last output value in `intermediates`
x, inter = self.parts[-1].call_with_intermediates(x)
intermediates.append(inter)
return x, intermediates
def forward_shape(self, shape):
for part in self.parts:
shape = part.forward_shape(shape)
return shape
def inverse_shape(self, shape):
for part in reversed(self.parts):
shape = part.inverse_shape(shape)
return shape
def _matrix_forward_shape(shape, offset=0):
if len(shape) < 1:
raise ValueError("Too few dimensions in input")
N = shape[-1]
D = round((0.25 + 2 * N) ** 0.5 - 0.5)
if D * (D + 1) // 2 != N:
raise ValueError("Input is not a flattend lower-diagonal number")
D = D - offset
return shape[:-1] + (D, D)
def _matrix_inverse_shape(shape, offset=0):
if len(shape) < 2:
raise ValueError("Too few dimensions on input")
if shape[-2] != shape[-1]:
raise ValueError("Input is not square")
D = shape[-1] + offset
N = D * (D + 1) // 2
return shape[:-2] + (N,)
class CholeskyTransform(Transform):
domain = constraints.positive_definite
codomain = constraints.lower_cholesky
def __call__(self, x):
return jnp.linalg.cholesky(x)
def _inverse(self, y):
return jnp.matmul(y, jnp.swapaxes(y, -2, -1))
def log_abs_det_jacobian(self, x, y, intermediates=None):
n = jnp.shape(x)[-1]
order = -jnp.arange(n, 0, -1)
return -n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)
class CorrCholeskyTransform(Transform):
domain = constraints.real_vector
codomain = constraints.corr_cholesky
def __call__(self, x):
t = jnp.tanh(x)
return signed_stick_breaking_tril(t)
def _inverse(self, y):
z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)
pad_width = [(0, 0)] * y.ndim
pad_width[-1] = (1, 0)
z1m_cumprod_shifted = jnp.pad(z1m_cumprod[..., :-1], pad_width,
mode="constant", constant_values=1.)
t = matrix_to_tril_vec(y, diagonal=-1) / jnp.sqrt(
matrix_to_tril_vec(z1m_cumprod_shifted, diagonal=-1))
x = jnp.log((1 + t) / (1 - t)) / 2
return x
def log_abs_det_jacobian(self, x, y, intermediates=None):
z1m_cumprod = 1 - jnp.cumsum(y * y, axis=-1)
# NB: diagonal=-2 works fine for (2 x 2) matrix, where we get an empty array
z1m_cumprod_tril = matrix_to_tril_vec(z1m_cumprod, diagonal=-2)
stick_breaking_logdet = 0.5 * jnp.sum(jnp.log(z1m_cumprod_tril), axis=-1)
tanh_logdet = -2 * jnp.sum(x + softplus(-2 * x) - jnp.log(2.), axis=-1)
return stick_breaking_logdet + tanh_logdet
def forward_shape(self, shape):
return _matrix_forward_shape(shape, offset=-1)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape, offset=-1)
class CorrMatrixCholeskyTransform(CholeskyTransform):
domain = constraints.corr_matrix
codomain = constraints.corr_cholesky
def log_abs_det_jacobian(self, x, y, intermediates=None):
# NB: see derivation in LKJCholesky implementation
n = jnp.shape(x)[-1]
order = -jnp.arange(n - 1, -1, -1)
return jnp.sum(order * jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1)), axis=-1)
class ExpTransform(Transform):
# TODO: refine domain/codomain logic through setters, especially when
# transforms for inverses are supported
def __init__(self, domain=constraints.real):
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.real:
return constraints.positive
elif isinstance(self.domain, constraints.greater_than):
return constraints.greater_than(self.__call__(self.domain.lower_bound))
elif isinstance(self.domain, constraints.interval):
return constraints.interval(self.__call__(self.domain.lower_bound),
self.__call__(self.domain.upper_bound))
else:
raise NotImplementedError
def __call__(self, x):
# XXX consider to clamp from below for stability if necessary
return jnp.exp(x)
def _inverse(self, y):
return jnp.log(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return x
class IdentityTransform(Transform):
def __call__(self, x):
return x
def _inverse(self, y):
return y
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.zeros_like(x)
class IndependentTransform(Transform):
def __init__(self, base_transform, reinterpreted_batch_ndims):
assert isinstance(base_transform, Transform)
assert isinstance(reinterpreted_batch_ndims, int)
assert reinterpreted_batch_ndims >= 0
self.base_transform = base_transform
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
super().__init__()
@property
def domain(self):
return constraints.independent(self.base_transform.domain, self.reinterpreted_batch_ndims)
@property
def codomain(self):
return constraints.independent(self.base_transform.codomain, self.reinterpreted_batch_ndims)
def __call__(self, x):
return self.base_transform(x)
def _inverse(self, y):
return self.base_transform._inverse(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
result = self.base_transform.log_abs_det_jacobian(x, y, intermediates=intermediates)
if jnp.ndim(result) < self.reinterpreted_batch_ndims:
expected = self.domain.event_dim
raise ValueError(f"Expected x.dim() >= {expected} but got {jnp.ndim(x)}")
return sum_rightmost(result, self.reinterpreted_batch_ndims)
def call_with_intermediates(self, x):
return self.base_transform.call_with_intermediates(x)
def forward_shape(self, shape):
return self.base_transform.forward_shape(shape)
def inverse_shape(self, shape):
return self.base_transform.inverse_shape(shape)
class InvCholeskyTransform(Transform):
def __init__(self, domain=constraints.lower_cholesky):
warnings.warn("InvCholeskyTransform is deprecated. Please use CholeskyTransform"
" or CorrMatrixCholeskyTransform instead.", FutureWarning)
assert domain in [constraints.lower_cholesky, constraints.corr_cholesky]
self.domain = domain
@property
def codomain(self):
if self.domain is constraints.lower_cholesky:
return constraints.positive_definite
elif self.domain is constraints.corr_cholesky:
return constraints.corr_matrix
def __call__(self, x):
return jnp.matmul(x, jnp.swapaxes(x, -2, -1))
def _inverse(self, y):
return jnp.linalg.cholesky(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
if self.domain is constraints.lower_cholesky:
# Ref: http://web.mit.edu/18.325/www/handouts/handout2.pdf page 13
n = jnp.shape(x)[-1]
order = jnp.arange(n, 0, -1)
return n * jnp.log(2) + jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)
else:
# NB: see derivation in LKJCholesky implementation
n = jnp.shape(x)[-1]
order = jnp.arange(n - 1, -1, -1)
return jnp.sum(order * jnp.log(jnp.diagonal(x, axis1=-2, axis2=-1)), axis=-1)
class LowerCholeskyAffine(Transform):
domain = constraints.real_vector
codomain = constraints.real_vector
def __init__(self, loc, scale_tril):
if jnp.ndim(scale_tril) != 2:
raise ValueError("Only support 2-dimensional scale_tril matrix. "
"Please make a feature request if you need to "
"use this transform with batched scale_tril.")
self.loc = loc
self.scale_tril = scale_tril
def __call__(self, x):
return self.loc + jnp.squeeze(jnp.matmul(self.scale_tril, x[..., jnp.newaxis]), axis=-1)
def _inverse(self, y):
y = y - self.loc
original_shape = jnp.shape(y)
yt = jnp.reshape(y, (-1, original_shape[-1])).T
xt = solve_triangular(self.scale_tril, yt, lower=True)
return jnp.reshape(xt.T, original_shape)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.broadcast_to(jnp.log(jnp.diagonal(self.scale_tril, axis1=-2, axis2=-1)).sum(-1),
jnp.shape(x)[:-1])
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return lax.broadcast_shapes(shape, self.loc.shape, self.scale_tril.shape[:-1])
class LowerCholeskyTransform(Transform):
domain = constraints.real_vector
codomain = constraints.lower_cholesky
def __call__(self, x):
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)
diag = jnp.exp(x[..., -n:])
return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)
def _inverse(self, y):
z = matrix_to_tril_vec(y, diagonal=-1)
return jnp.concatenate([z, jnp.log(jnp.diagonal(y, axis1=-2, axis2=-1))], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
# the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
return x[..., -n:].sum(-1)
def forward_shape(self, shape):
return _matrix_forward_shape(shape)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape)
class OrderedTransform(Transform):
domain = constraints.real_vector
codomain = constraints.ordered_vector
def __call__(self, x):
z = jnp.concatenate([x[..., :1], jnp.exp(x[..., 1:])], axis=-1)
return jnp.cumsum(z, axis=-1)
def _inverse(self, y):
x = jnp.log(y[..., 1:] - y[..., :-1])
return jnp.concatenate([y[..., :1], x], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.sum(x[..., 1:], -1)
class PermuteTransform(Transform):
domain = constraints.real_vector
codomain = constraints.real_vector
def __init__(self, permutation):
self.permutation = permutation
def __call__(self, x):
return x[..., self.permutation]
def _inverse(self, y):
size = self.permutation.size
permutation_inv = ops.index_update(jnp.zeros(size, dtype=jnp.result_type(int)),
self.permutation,
jnp.arange(size))
return y[..., permutation_inv]
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.full(jnp.shape(x)[:-1], 0.)
class PowerTransform(Transform):
domain = constraints.positive
codomain = constraints.positive
def __init__(self, exponent):
self.exponent = exponent
def __call__(self, x):
return jnp.power(x, self.exponent)
def _inverse(self, y):
return jnp.power(y, 1 / self.exponent)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.log(jnp.abs(self.exponent * y / x))
def forward_shape(self, shape):
return lax.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
def inverse_shape(self, shape):
return lax.broadcast_shapes(shape, getattr(self.exponent, "shape", ()))
class SigmoidTransform(Transform):
codomain = constraints.unit_interval
def __call__(self, x):
return _clipped_expit(x)
def _inverse(self, y):
return logit(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
x_abs = jnp.abs(x)
return -x_abs - 2 * jnp.log1p(jnp.exp(-x_abs))
def _softplus_inv(y):
return jnp.log(-jnp.expm1(-y)) + y
class SoftplusTransform(Transform):
domain = constraints.real
codomain = constraints.softplus_positive
def __call__(self, x):
return softplus(x)
def _inverse(self, y):
return _softplus_inv(y)
def log_abs_det_jacobian(self, x, y, intermediates=None):
return -softplus(-x)
class SoftplusLowerCholeskyTransform(Transform):
domain = constraints.real_vector
codomain = constraints.softplus_lower_cholesky
def __call__(self, x):
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
z = vec_to_tril_matrix(x[..., :-n], diagonal=-1)
diag = softplus(x[..., -n:])
return z + jnp.expand_dims(diag, axis=-1) * jnp.identity(n)
def _inverse(self, y):
z = matrix_to_tril_vec(y, diagonal=-1)
diag = _softplus_inv(jnp.diagonal(y, axis1=-2, axis2=-1))
return jnp.concatenate([z, diag], axis=-1)
def log_abs_det_jacobian(self, x, y, intermediates=None):
# the jacobian is diagonal, so logdet is the sum of diagonal `exp` transform
n = round((math.sqrt(1 + 8 * x.shape[-1]) - 1) / 2)
return -softplus(-x[..., -n:]).sum(-1)
def forward_shape(self, shape):
return _matrix_forward_shape(shape)
def inverse_shape(self, shape):
return _matrix_inverse_shape(shape)
class StickBreakingTransform(Transform):
domain = constraints.real_vector
codomain = constraints.simplex
def __call__(self, x):
# we shift x to obtain a balanced mapping (0, 0, ..., 0) -> (1/K, 1/K, ..., 1/K)
x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
# convert to probabilities (relative to the remaining) of each fraction of the stick
z = _clipped_expit(x)
z1m_cumprod = jnp.cumprod(1 - z, axis=-1)
pad_width = [(0, 0)] * x.ndim
pad_width[-1] = (0, 1)
z_padded = jnp.pad(z, pad_width, mode="constant", constant_values=1.)
pad_width = [(0, 0)] * x.ndim
pad_width[-1] = (1, 0)
z1m_cumprod_shifted = jnp.pad(z1m_cumprod, pad_width, mode="constant", constant_values=1.)
return z_padded * z1m_cumprod_shifted
def _inverse(self, y):
y_crop = y[..., :-1]
z1m_cumprod = jnp.clip(1 - jnp.cumsum(y_crop, axis=-1), a_min=jnp.finfo(y.dtype).tiny)
# hence x = logit(z) = log(z / (1 - z)) = y[::-1] / z1m_cumprod
x = jnp.log(y_crop / z1m_cumprod)
return x + jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
def log_abs_det_jacobian(self, x, y, intermediates=None):
# Ref: https://mc-stan.org/docs/2_19/reference-manual/simplex-transform-section.html
# |det|(J) = Product(y * (1 - z))
x = x - jnp.log(x.shape[-1] - jnp.arange(x.shape[-1]))
z = jnp.clip(expit(x), a_min=jnp.finfo(x.dtype).tiny)
# XXX we use the identity 1 - z = z * exp(-x) to not worry about
# the case z ~ 1
return jnp.sum(jnp.log(y[..., :-1] * z) - x, axis=-1)
def forward_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] + 1,)
def inverse_shape(self, shape):
if len(shape) < 1:
raise ValueError("Too few dimensions on input")
return shape[:-1] + (shape[-1] - 1,)
class UnpackTransform(Transform):
domain = constraints.real_vector
codomain = constraints.dependent
def __init__(self, unpack_fn):
self.unpack_fn = unpack_fn
def __call__(self, x):
batch_shape = x.shape[:-1]
if batch_shape:
unpacked = vmap(self.unpack_fn)(x.reshape((-1,) + x.shape[-1:]))
return tree_map(lambda z: jnp.reshape(z, batch_shape + z.shape[1:]), unpacked)
else:
return self.unpack_fn(x)
def _inverse(self, y):
leading_dims = [v.shape[0] if jnp.ndim(v) > 0 else 0
for v in tree_flatten(y)[0]]
d0 = leading_dims[0]
not_scalar = d0 > 0 or len(leading_dims) > 1
if not_scalar and all(d == d0 for d in leading_dims[1:]):
warnings.warn("UnpackTransform.inv might lead to an unexpected behavior because it"
" cannot transform a batch of unpacked arrays.")
return ravel_pytree(y)[0]
def log_abs_det_jacobian(self, x, y, intermediates=None):
return jnp.zeros(jnp.shape(x)[:-1])
def forward_shape(self, shape):
raise NotImplementedError
def inverse_shape(self, shape):
raise NotImplementedError
##########################################################
# CONSTRAINT_REGISTRY
##########################################################
class ConstraintRegistry(object):
def __init__(self):
self._registry = {}
def register(self, constraint, factory=None):
if factory is None:
return lambda factory: self.register(constraint, factory)
if isinstance(constraint, constraints.Constraint):
constraint = type(constraint)
self._registry[constraint] = factory
def __call__(self, constraint):
try:
factory = self._registry[type(constraint)]
except KeyError as e:
raise NotImplementedError from e
return factory(constraint)
biject_to = ConstraintRegistry()
@biject_to.register(constraints.corr_cholesky)
def _transform_to_corr_cholesky(constraint):
return CorrCholeskyTransform()
@biject_to.register(constraints.corr_matrix)
def _transform_to_corr_matrix(constraint):
return ComposeTransform([CorrCholeskyTransform(),
CorrMatrixCholeskyTransform().inv])
@biject_to.register(constraints.greater_than)
def _transform_to_greater_than(constraint):
if constraint is constraints.positive:
return ExpTransform()
return ComposeTransform([ExpTransform(),
AffineTransform(constraint.lower_bound, 1,
domain=constraints.positive)])
@biject_to.register(constraints.less_than)
def _transform_to_less_than(constraint):
return ComposeTransform([ExpTransform(),
AffineTransform(constraint.upper_bound, -1,
domain=constraints.positive)])
@biject_to.register(constraints.independent)
def _biject_to_independent(constraint):
return IndependentTransform(biject_to(constraint.base_constraint),
constraint.reinterpreted_batch_ndims)
@biject_to.register(constraints.interval)
def _transform_to_interval(constraint):
if constraint is constraints.unit_interval:
return SigmoidTransform()
scale = constraint.upper_bound - constraint.lower_bound
return ComposeTransform([SigmoidTransform(),
AffineTransform(constraint.lower_bound, scale,
domain=constraints.unit_interval)])
@biject_to.register(constraints.lower_cholesky)
def _transform_to_lower_cholesky(constraint):
return LowerCholeskyTransform()
@biject_to.register(constraints.ordered_vector)
def _transform_to_ordered_vector(constraint):
return OrderedTransform()
@biject_to.register(constraints.positive_definite)
def _transform_to_positive_definite(constraint):
return ComposeTransform([LowerCholeskyTransform(), CholeskyTransform().inv])
@biject_to.register(constraints.positive_ordered_vector)
def _transform_to_positive_ordered_vector(constraint):
return ComposeTransform([OrderedTransform(), ExpTransform()])
@biject_to.register(constraints.real)
def _transform_to_real(constraint):
return IdentityTransform()
@biject_to.register(constraints.softplus_positive)
def _transform_to_softplus_positive(constraint):
return SoftplusTransform()
@biject_to.register(constraints.softplus_lower_cholesky)
def _transform_to_softplus_lower_cholesky(constraint):
return SoftplusLowerCholeskyTransform()
@biject_to.register(constraints.simplex)
def _transform_to_simplex(constraint):
return StickBreakingTransform()
| true | true |
f71af30647e8b3464f41ad8052f431bd92a2243e | 2,623 | py | Python | christmas_lights/Sprite1d.py | rec/christmas_lights | da72b3941b097b6854ba1ba999c0a6cf9b029b0f | [
"MIT"
] | 1 | 2019-05-26T15:10:04.000Z | 2019-05-26T15:10:04.000Z | christmas_lights/Sprite1d.py | rec/christmas_lights | da72b3941b097b6854ba1ba999c0a6cf9b029b0f | [
"MIT"
] | null | null | null | christmas_lights/Sprite1d.py | rec/christmas_lights | da72b3941b097b6854ba1ba999c0a6cf9b029b0f | [
"MIT"
] | null | null | null | import numbers, random
class Sprite1d:
"""A one-dimensional sprite with subpixel positioning."""
def __init__(self, icon, color_list, speed=0, acceleration=0, bound=(0, 1),
position=0, center=None):
self.color_list = color_list
if hasattr(color_list, 'dtype'):
self._combine = self._combine_numpy
self.icon = icon
self.speed = to_number(speed)
self.acceleration = to_number(acceleration)
self.bound = bound
self.position = to_number(position)
self.center = int(len(self.icon) / 2) if center is None else center
self.fps = 0
def display(self):
# Handle subpixel positioning.
whole, fraction = divmod(self.position * len(self.color_list), 1)
left = int(whole) - self.center
right = left + len(self.icon)
self._add(left, right, 1 - fraction)
if fraction:
self._add(left + 1, right + 1, fraction)
def move(self, amt):
self.position += amt * (self.speed + self.acceleration / 2) / self.fps
self.speed += self.acceleration
def bounce(self):
left, right = self.bound
if self.position < left and self.speed < 0:
self.position = left + (left - self.position)
self.speed = -self.speed
if self.position >= right and self.speed > 0:
self.position = right - (self.position - right)
self.speed = -self.speed
def _combine_numpy(self, left, right, ratio, pixels):
self.color_list[left:right] += ratio * pixels
def _combine(self, left, right, ratio, pixels):
for i in range(left, right):
color = self.color_list[i]
pixel = pixels[i - left]
color = tuple(c + ratio * p for c, p in zip(color, pixel))
def _add(self, left, right, ratio):
pixels = self.icon
# Is the sprite visible?
if right < 0 or left >= len(self.color_list):
return
if left < 0:
# It's partly off the left side.
pixels = pixels[-left:]
left = 0
if right >= len(self.color_list):
# It's partly off the right side.
pixels = pixels[:len(self.color_list) - right - 1]
right = len(self.color_list) - 1
self._combine(left, right, ratio, pixels)
def to_number(x):
if isinstance(x, numbers.Number):
return x
if not x.startswith('rand('):
raise ValueError("Don't understand number '%s'" % x)
lo, hi = (float(i) for i in x[5:-1].split(','))
return random.uniform(lo, hi)
| 33.202532 | 79 | 0.576439 | import numbers, random
class Sprite1d:
def __init__(self, icon, color_list, speed=0, acceleration=0, bound=(0, 1),
position=0, center=None):
self.color_list = color_list
if hasattr(color_list, 'dtype'):
self._combine = self._combine_numpy
self.icon = icon
self.speed = to_number(speed)
self.acceleration = to_number(acceleration)
self.bound = bound
self.position = to_number(position)
self.center = int(len(self.icon) / 2) if center is None else center
self.fps = 0
def display(self):
whole, fraction = divmod(self.position * len(self.color_list), 1)
left = int(whole) - self.center
right = left + len(self.icon)
self._add(left, right, 1 - fraction)
if fraction:
self._add(left + 1, right + 1, fraction)
def move(self, amt):
self.position += amt * (self.speed + self.acceleration / 2) / self.fps
self.speed += self.acceleration
def bounce(self):
left, right = self.bound
if self.position < left and self.speed < 0:
self.position = left + (left - self.position)
self.speed = -self.speed
if self.position >= right and self.speed > 0:
self.position = right - (self.position - right)
self.speed = -self.speed
def _combine_numpy(self, left, right, ratio, pixels):
self.color_list[left:right] += ratio * pixels
def _combine(self, left, right, ratio, pixels):
for i in range(left, right):
color = self.color_list[i]
pixel = pixels[i - left]
color = tuple(c + ratio * p for c, p in zip(color, pixel))
def _add(self, left, right, ratio):
pixels = self.icon
if right < 0 or left >= len(self.color_list):
return
if left < 0:
pixels = pixels[-left:]
left = 0
if right >= len(self.color_list):
# It's partly off the right side.
pixels = pixels[:len(self.color_list) - right - 1]
right = len(self.color_list) - 1
self._combine(left, right, ratio, pixels)
def to_number(x):
if isinstance(x, numbers.Number):
return x
if not x.startswith('rand('):
raise ValueError("Don't understand number '%s'" % x)
lo, hi = (float(i) for i in x[5:-1].split(','))
return random.uniform(lo, hi)
| true | true |
f71af32c0f552806810683bb603031e425d1a879 | 95 | py | Python | core/response/__init__.py | ryanolee/pager-duty-sync | 1fd88634e461b5db647d856bc6b59f990944685e | [
"MIT"
] | null | null | null | core/response/__init__.py | ryanolee/pager-duty-sync | 1fd88634e461b5db647d856bc6b59f990944685e | [
"MIT"
] | 2 | 2020-09-27T18:19:17.000Z | 2021-06-29T09:21:04.000Z | core/response/__init__.py | ryanolee/pager-duty-sync | 1fd88634e461b5db647d856bc6b59f990944685e | [
"MIT"
] | null | null | null | from .response import get_response
from .lambda_proxy_response import get_lambda_proxy_response | 47.5 | 60 | 0.905263 | from .response import get_response
from .lambda_proxy_response import get_lambda_proxy_response | true | true |
f71af42134cc4cc0f0fd59f5b0ef650eed03bbb9 | 2,051 | py | Python | pagarmecoreapi/models/list_customers_response.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 6 | 2021-09-02T19:55:04.000Z | 2022-03-16T14:06:15.000Z | pagarmecoreapi/models/list_customers_response.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 2 | 2021-10-11T22:48:15.000Z | 2022-01-24T18:24:23.000Z | pagarmecoreapi/models/list_customers_response.py | pagarme/pagarme-core-api-python | c7b11ca78ab3e7e896e5b75048e6f72b511db00e | [
"MIT"
] | 2 | 2021-09-12T21:43:32.000Z | 2022-03-07T16:58:54.000Z | # -*- coding: utf-8 -*-
"""
pagarmecoreapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import pagarmecoreapi.models.get_customer_response
import pagarmecoreapi.models.paging_response
class ListCustomersResponse(object):
"""Implementation of the 'ListCustomersResponse' model.
Response for listing the customers
Attributes:
data (list of GetCustomerResponse): The customer object
paging (PagingResponse): Paging object
"""
# Create a mapping from Model property names to API property names
_names = {
"data":'data',
"paging":'paging'
}
def __init__(self,
data=None,
paging=None):
"""Constructor for the ListCustomersResponse class"""
# Initialize members of the class
self.data = data
self.paging = paging
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
data = None
if dictionary.get('data') != None:
data = list()
for structure in dictionary.get('data'):
data.append(pagarmecoreapi.models.get_customer_response.GetCustomerResponse.from_dictionary(structure))
paging = pagarmecoreapi.models.paging_response.PagingResponse.from_dictionary(dictionary.get('paging')) if dictionary.get('paging') else None
# Return an object of this model
return cls(data,
paging)
| 29.3 | 150 | 0.611409 |
import pagarmecoreapi.models.get_customer_response
import pagarmecoreapi.models.paging_response
class ListCustomersResponse(object):
_names = {
"data":'data',
"paging":'paging'
}
def __init__(self,
data=None,
paging=None):
self.data = data
self.paging = paging
@classmethod
def from_dictionary(cls,
dictionary):
if dictionary is None:
return None
data = None
if dictionary.get('data') != None:
data = list()
for structure in dictionary.get('data'):
data.append(pagarmecoreapi.models.get_customer_response.GetCustomerResponse.from_dictionary(structure))
paging = pagarmecoreapi.models.paging_response.PagingResponse.from_dictionary(dictionary.get('paging')) if dictionary.get('paging') else None
return cls(data,
paging)
| true | true |
f71af554f8fb983c51ebf2f5979bbb45ab0484fa | 1,434 | py | Python | services/web/server/src/simcore_service_webserver/log.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 25 | 2018-04-13T12:44:12.000Z | 2022-03-12T15:01:17.000Z | services/web/server/src/simcore_service_webserver/log.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 2,553 | 2018-01-18T17:11:55.000Z | 2022-03-31T16:26:40.000Z | services/web/server/src/simcore_service_webserver/log.py | odeimaiz/osparc-simcore | 71c2fc58dcfe067487dcd75cb70298a4d6237e97 | [
"MIT"
] | 20 | 2018-01-18T19:45:33.000Z | 2022-03-29T07:08:47.000Z | """ Configuration and utilities for service logging
"""
import logging
from typing import Optional, Union
from aiodebug import log_slow_callbacks
from aiohttp.log import access_logger
from servicelib.logging_utils import config_all_loggers
LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR
def setup_logging(*, level: Union[str, int], slow_duration: Optional[float] = None):
# service log level
logging.basicConfig(level=level)
# root
logging.root.setLevel(level)
config_all_loggers()
# aiohttp access log-levels
access_logger.setLevel(level)
# keep mostly quiet noisy loggers
quiet_level: int = max(
min(logging.root.level + LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING
)
logging.getLogger("engineio").setLevel(quiet_level)
logging.getLogger("openapi_spec_validator").setLevel(quiet_level)
logging.getLogger("sqlalchemy").setLevel(quiet_level)
logging.getLogger("sqlalchemy.engine").setLevel(quiet_level)
if slow_duration:
# NOTE: Every task blocking > AIODEBUG_SLOW_DURATION_SECS secs is considered slow and logged as warning
log_slow_callbacks.enable(abs(slow_duration))
def test_logger_propagation(logger: logging.Logger):
msg = f"TESTING %s log with {logger}"
logger.critical(msg, "critical")
logger.error(msg, "error")
logger.info(msg, "info")
logger.warning(msg, "warning")
logger.debug(msg, "debug")
| 30.510638 | 111 | 0.739191 | import logging
from typing import Optional, Union
from aiodebug import log_slow_callbacks
from aiohttp.log import access_logger
from servicelib.logging_utils import config_all_loggers
LOG_LEVEL_STEP = logging.CRITICAL - logging.ERROR
def setup_logging(*, level: Union[str, int], slow_duration: Optional[float] = None):
logging.basicConfig(level=level)
logging.root.setLevel(level)
config_all_loggers()
access_logger.setLevel(level)
quiet_level: int = max(
min(logging.root.level + LOG_LEVEL_STEP, logging.CRITICAL), logging.WARNING
)
logging.getLogger("engineio").setLevel(quiet_level)
logging.getLogger("openapi_spec_validator").setLevel(quiet_level)
logging.getLogger("sqlalchemy").setLevel(quiet_level)
logging.getLogger("sqlalchemy.engine").setLevel(quiet_level)
if slow_duration:
log_slow_callbacks.enable(abs(slow_duration))
def test_logger_propagation(logger: logging.Logger):
msg = f"TESTING %s log with {logger}"
logger.critical(msg, "critical")
logger.error(msg, "error")
logger.info(msg, "info")
logger.warning(msg, "warning")
logger.debug(msg, "debug")
| true | true |
f71af6b92295f3372a61cc87a1cb4e7b3810469d | 336 | py | Python | Algorithms/Sort Array By Parity.py | KushRabadia/Leetcode | f6af5bf0b9ef8daf9870570b52012297128aa9e1 | [
"MIT"
] | null | null | null | Algorithms/Sort Array By Parity.py | KushRabadia/Leetcode | f6af5bf0b9ef8daf9870570b52012297128aa9e1 | [
"MIT"
] | null | null | null | Algorithms/Sort Array By Parity.py | KushRabadia/Leetcode | f6af5bf0b9ef8daf9870570b52012297128aa9e1 | [
"MIT"
] | null | null | null | class Solution(object):
def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
result = []
for i in A:
if i%2 == 0:
result.insert(0,i)
else:
result.append(i)
return result
| 21 | 35 | 0.383929 | class Solution(object):
def sortArrayByParity(self, A):
result = []
for i in A:
if i%2 == 0:
result.insert(0,i)
else:
result.append(i)
return result
| true | true |
f71af7240c9eccec7c0e6d401d254719234a7b2b | 1,119 | py | Python | pyunity/examples/example6/__init__.py | rayzchen/PyUnity | 8ed436eca7a84f05190c1fa275c58da5c6059926 | [
"MIT"
] | null | null | null | pyunity/examples/example6/__init__.py | rayzchen/PyUnity | 8ed436eca7a84f05190c1fa275c58da5c6059926 | [
"MIT"
] | null | null | null | pyunity/examples/example6/__init__.py | rayzchen/PyUnity | 8ed436eca7a84f05190c1fa275c58da5c6059926 | [
"MIT"
] | null | null | null | # Copyright (c) 2020-2022 The PyUnity Team
# This file is licensed under the MIT License.
# See https://docs.pyunity.x10.bz/en/latest/license.html
from pyunity import Behaviour, GameObject, SceneManager, Material, RGB, Mesh, Vector3, MeshRenderer, WaitForSeconds
class Switch(Behaviour):
async def Start(self):
await WaitForSeconds(3)
SceneManager.LoadSceneByIndex(1)
def main():
scene = SceneManager.AddScene("Scene")
scene2 = SceneManager.AddScene("Scene 2")
scene.mainCamera.transform.localPosition = Vector3(0, 0, -10)
scene2.mainCamera.transform.localPosition = Vector3(0, 0, -10)
cube = GameObject("Cube")
renderer = cube.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(255, 0, 0))
cube.AddComponent(Switch)
scene.Add(cube)
cube2 = GameObject("Cube 2")
renderer = cube2.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(0, 0, 255))
scene2.Add(cube2)
SceneManager.LoadScene(scene)
if __name__ == "__main__":
main()
| 31.971429 | 116 | 0.680965 |
from pyunity import Behaviour, GameObject, SceneManager, Material, RGB, Mesh, Vector3, MeshRenderer, WaitForSeconds
class Switch(Behaviour):
async def Start(self):
await WaitForSeconds(3)
SceneManager.LoadSceneByIndex(1)
def main():
scene = SceneManager.AddScene("Scene")
scene2 = SceneManager.AddScene("Scene 2")
scene.mainCamera.transform.localPosition = Vector3(0, 0, -10)
scene2.mainCamera.transform.localPosition = Vector3(0, 0, -10)
cube = GameObject("Cube")
renderer = cube.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(255, 0, 0))
cube.AddComponent(Switch)
scene.Add(cube)
cube2 = GameObject("Cube 2")
renderer = cube2.AddComponent(MeshRenderer)
renderer.mesh = Mesh.cube(2)
renderer.mat = Material(RGB(0, 0, 255))
scene2.Add(cube2)
SceneManager.LoadScene(scene)
if __name__ == "__main__":
main()
| true | true |
f71af7bed48f507c7621116150709b5c2b26365b | 2,239 | py | Python | setup.py | fmarco76/DiscourseSSO | 97d3318c6ebe9cb10af3d5aeaff4da1b60472ff8 | [
"Apache-2.0"
] | 14 | 2015-06-03T09:32:16.000Z | 2021-04-28T13:39:40.000Z | setup.py | fmarco76/DiscourseSSO | 97d3318c6ebe9cb10af3d5aeaff4da1b60472ff8 | [
"Apache-2.0"
] | 3 | 2015-06-03T09:45:04.000Z | 2018-02-21T07:25:47.000Z | setup.py | fmarco76/DiscourseSSO | 97d3318c6ebe9cb10af3d5aeaff4da1b60472ff8 | [
"Apache-2.0"
] | 5 | 2015-05-29T11:23:20.000Z | 2019-09-15T23:54:48.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='DiscourseSSO',
version='0.1.0',
license='Apache2.0',
description='SSO Discourse Application to allow SAML authentication',
long_description='%s\n%s' % (read('README.rst'), re.sub(':obj:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))),
author='Marco Fargetta',
author_email='marco.fargetta@ct.infn.it',
url='https://github.com/fmarco76/DiscourseSSO',
# packages=find_packages(exclude=['tests*']),
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
'SAML', 'discourse'
],
install_requires=[
'Flask>=0.10.1'
],
extras_require={
# eg: 'rst': ['docutils>=0.11'],
},
entry_points={
'console_scripts': [
'discroursesso = sso.__main__:main'
]
},
) | 31.985714 | 113 | 0.618133 |
from __future__ import absolute_import, print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='DiscourseSSO',
version='0.1.0',
license='Apache2.0',
description='SSO Discourse Application to allow SAML authentication',
long_description='%s\n%s' % (read('README.rst'), re.sub(':obj:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))),
author='Marco Fargetta',
author_email='marco.fargetta@ct.infn.it',
url='https://github.com/fmarco76/DiscourseSSO',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Utilities',
],
keywords=[
'SAML', 'discourse'
],
install_requires=[
'Flask>=0.10.1'
],
extras_require={
},
entry_points={
'console_scripts': [
'discroursesso = sso.__main__:main'
]
},
) | true | true |
f71af8d2ec295f87bf614c74aa31266459b19da0 | 3,373 | py | Python | stactools_cgls_lc100/stactools/cgls_lc100/stac.py | jonas-eberle/stactools | ce82450343caf71c08f04d2a4a16590285585735 | [
"Apache-2.0"
] | null | null | null | stactools_cgls_lc100/stactools/cgls_lc100/stac.py | jonas-eberle/stactools | ce82450343caf71c08f04d2a4a16590285585735 | [
"Apache-2.0"
] | 2 | 2021-08-12T17:54:36.000Z | 2021-08-12T18:21:28.000Z | src/stactools/cgls_lc100/stac.py | stactools-packages/stactools-cgls_lc100 | 9cc863336cd946d0e8651c1a3670cfc9c866f54c | [
"Apache-2.0"
] | null | null | null | import os
import pystac
from pystac.utils import str_to_datetime
import rasterio as rio
from shapely.geometry import box, mapping, shape
from stactools.cgls_lc100.constants import (
PROVIDER_NAME, ITEM_TIF_IMAGE_NAME, DISCRETE_CLASSIFICATION_CLASS_NAMES,
DISCRETE_CLASSIFICATION_CLASS_PALETTE)
def create_item(tif_href, additional_providers=None):
"""Creates a STAC Item from Copernicus Global Land Cover Layers data.
Args:
tif_href (str): The href to the metadata for this tif.
This function will read the metadata file for information to place in
the STAC item.
Returns:
pystac.Item: A STAC Item representing this Copernicus Global Land Cover Layers data.
"""
with rio.open(tif_href) as f:
tags = f.tags()
band_tags = f.tags(1)
bounds = f.bounds
# Item id
item_id = os.path.basename(tif_href).replace('.tif', '')
# Bounds
geom = mapping(box(bounds.left, bounds.bottom, bounds.right, bounds.top))
bounds = shape(geom).bounds
start_dt = str_to_datetime(tags.pop('time_coverage_start'))
end_dt = str_to_datetime(tags.pop('time_coverage_end'))
file_creation_dt = str_to_datetime(tags.pop('file_creation'))
item = pystac.Item(id=item_id,
geometry=geom,
bbox=bounds,
datetime=None,
properties={
'start_datetime':
start_dt,
'end_datetime':
end_dt,
'discrete_classification_class_names':
DISCRETE_CLASSIFICATION_CLASS_NAMES,
'discrete_classification_class_palette':
DISCRETE_CLASSIFICATION_CLASS_PALETTE
})
# Common metadata
copernicus_provider = pystac.Provider(name=PROVIDER_NAME,
url=(tags.pop('doi')),
roles=['producer', 'licensor'])
item.common_metadata.providers = [copernicus_provider]
if additional_providers is not None:
item.common_metadata.providers.extend(additional_providers)
item.common_metadata.start_datetime = start_dt
item.common_metadata.end_datetime = end_dt
item.common_metadata.created = file_creation_dt
item.common_metadata.description = tags.pop('Info')
item.common_metadata.platform = tags.pop('platform')
item.common_metadata.title = tags.pop('title')
# proj
item.ext.enable('projection')
item.ext.projection.epsg = int(
tags.pop('delivered_product_crs').replace('WGS84 (EPSG:',
'').replace(')', ''))
# Extra fields
for k, v in tags.items():
item.extra_fields[k] = v
# Bands
long_name = band_tags.pop('long_name')
band = pystac.extensions.eo.Band.create(
name=long_name,
common_name=band_tags.pop('short_name'),
description=long_name)
item.ext.enable('eo')
item.ext.eo.bands = [band]
# Tif
item.add_asset(
ITEM_TIF_IMAGE_NAME,
pystac.Asset(href=tif_href,
media_type=pystac.MediaType.TIFF,
roles=['data'],
title="tif image"))
return item
| 34.070707 | 92 | 0.602728 | import os
import pystac
from pystac.utils import str_to_datetime
import rasterio as rio
from shapely.geometry import box, mapping, shape
from stactools.cgls_lc100.constants import (
PROVIDER_NAME, ITEM_TIF_IMAGE_NAME, DISCRETE_CLASSIFICATION_CLASS_NAMES,
DISCRETE_CLASSIFICATION_CLASS_PALETTE)
def create_item(tif_href, additional_providers=None):
with rio.open(tif_href) as f:
tags = f.tags()
band_tags = f.tags(1)
bounds = f.bounds
item_id = os.path.basename(tif_href).replace('.tif', '')
geom = mapping(box(bounds.left, bounds.bottom, bounds.right, bounds.top))
bounds = shape(geom).bounds
start_dt = str_to_datetime(tags.pop('time_coverage_start'))
end_dt = str_to_datetime(tags.pop('time_coverage_end'))
file_creation_dt = str_to_datetime(tags.pop('file_creation'))
item = pystac.Item(id=item_id,
geometry=geom,
bbox=bounds,
datetime=None,
properties={
'start_datetime':
start_dt,
'end_datetime':
end_dt,
'discrete_classification_class_names':
DISCRETE_CLASSIFICATION_CLASS_NAMES,
'discrete_classification_class_palette':
DISCRETE_CLASSIFICATION_CLASS_PALETTE
})
copernicus_provider = pystac.Provider(name=PROVIDER_NAME,
url=(tags.pop('doi')),
roles=['producer', 'licensor'])
item.common_metadata.providers = [copernicus_provider]
if additional_providers is not None:
item.common_metadata.providers.extend(additional_providers)
item.common_metadata.start_datetime = start_dt
item.common_metadata.end_datetime = end_dt
item.common_metadata.created = file_creation_dt
item.common_metadata.description = tags.pop('Info')
item.common_metadata.platform = tags.pop('platform')
item.common_metadata.title = tags.pop('title')
item.ext.enable('projection')
item.ext.projection.epsg = int(
tags.pop('delivered_product_crs').replace('WGS84 (EPSG:',
'').replace(')', ''))
for k, v in tags.items():
item.extra_fields[k] = v
long_name = band_tags.pop('long_name')
band = pystac.extensions.eo.Band.create(
name=long_name,
common_name=band_tags.pop('short_name'),
description=long_name)
item.ext.enable('eo')
item.ext.eo.bands = [band]
item.add_asset(
ITEM_TIF_IMAGE_NAME,
pystac.Asset(href=tif_href,
media_type=pystac.MediaType.TIFF,
roles=['data'],
title="tif image"))
return item
| true | true |
f71af958c9c9d5d07c4709793698bfeea578307a | 2,917 | py | Python | tests/unit/test_requester.py | HKLM/sync-connect-sdk | 16ec0fecd31042970ee29146011250a74f4742ae | [
"MIT"
] | null | null | null | tests/unit/test_requester.py | HKLM/sync-connect-sdk | 16ec0fecd31042970ee29146011250a74f4742ae | [
"MIT"
] | null | null | null | tests/unit/test_requester.py | HKLM/sync-connect-sdk | 16ec0fecd31042970ee29146011250a74f4742ae | [
"MIT"
] | null | null | null | import syncconnect
import responses
import unittest
class TestRequester(unittest.TestCase):
EXPECTED = 'expected'
URL = 'http://ford.url'
def queue(self, status_code, **kwargs):
""" queue fake responses with passed status code """
if not kwargs:
json = {'message': self.EXPECTED}
else:
json = kwargs
responses.add('GET', self.URL, status=status_code, json=json)
def check(self, exception):
self.assertRaisesRegexp(
exception,
self.EXPECTED,
syncconnect.requester.call,
'GET',
self.URL)
@responses.activate
def test_user_agent(self):
self.queue(200)
syncconnect.requester.call('GET', self.URL)
self.assertEqual(
responses.calls[0].request.headers['User-Agent'],
'fordpass-na/353 CFNetwork/1121.2.2 Darwin/19.3.0',
)
@responses.activate
def test_oauth_error(self):
self.queue(401, error_description='unauthorized')
try:
syncconnect.requester.call('GET', self.URL)
except syncconnect.AuthenticationException as err:
self.assertEqual(err.message, 'unauthorized')
@responses.activate
def test_unknown_error(self):
self.queue(401, error_description='unknown error')
try:
syncconnect.requester.call('GET', self.URL)
except syncconnect.AuthenticationException as err:
self.assertEqual(err.message, 'unknown error')
@responses.activate
def test_400(self):
self.queue(400)
self.check(syncconnect.ValidationException)
@responses.activate
def test_401(self):
self.queue(401)
self.check(syncconnect.AuthenticationException)
@responses.activate
def test_403(self):
self.queue(403)
self.check(syncconnect.PermissionException)
@responses.activate
def test_404(self):
self.queue(404)
self.check(syncconnect.ResourceNotFoundException)
@responses.activate
def test_429(self):
self.queue(429)
self.check(syncconnect.RateLimitingException)
@responses.activate
def test_429(self):
self.queue(429)
self.check(syncconnect.RateLimitingException)
@responses.activate
def test_500(self):
self.queue(500)
self.check(syncconnect.ServerException)
@responses.activate
def test_504(self):
responses.add('GET', self.URL, status=504, json={
'error': 'some error', 'message': self.EXPECTED})
self.check(syncconnect.GatewayTimeoutException)
@responses.activate
def test_other(self):
self.queue(503)
with self.assertRaises(syncconnect.SyncException) as se:
syncconnect.requester.call('GET', self.URL)
self.assertEquals(se.exception.message, 'Unexpected error')
| 28.881188 | 71 | 0.637299 | import syncconnect
import responses
import unittest
class TestRequester(unittest.TestCase):
EXPECTED = 'expected'
URL = 'http://ford.url'
def queue(self, status_code, **kwargs):
if not kwargs:
json = {'message': self.EXPECTED}
else:
json = kwargs
responses.add('GET', self.URL, status=status_code, json=json)
def check(self, exception):
self.assertRaisesRegexp(
exception,
self.EXPECTED,
syncconnect.requester.call,
'GET',
self.URL)
@responses.activate
def test_user_agent(self):
self.queue(200)
syncconnect.requester.call('GET', self.URL)
self.assertEqual(
responses.calls[0].request.headers['User-Agent'],
'fordpass-na/353 CFNetwork/1121.2.2 Darwin/19.3.0',
)
@responses.activate
def test_oauth_error(self):
self.queue(401, error_description='unauthorized')
try:
syncconnect.requester.call('GET', self.URL)
except syncconnect.AuthenticationException as err:
self.assertEqual(err.message, 'unauthorized')
@responses.activate
def test_unknown_error(self):
self.queue(401, error_description='unknown error')
try:
syncconnect.requester.call('GET', self.URL)
except syncconnect.AuthenticationException as err:
self.assertEqual(err.message, 'unknown error')
@responses.activate
def test_400(self):
self.queue(400)
self.check(syncconnect.ValidationException)
@responses.activate
def test_401(self):
self.queue(401)
self.check(syncconnect.AuthenticationException)
@responses.activate
def test_403(self):
self.queue(403)
self.check(syncconnect.PermissionException)
@responses.activate
def test_404(self):
self.queue(404)
self.check(syncconnect.ResourceNotFoundException)
@responses.activate
def test_429(self):
self.queue(429)
self.check(syncconnect.RateLimitingException)
@responses.activate
def test_429(self):
self.queue(429)
self.check(syncconnect.RateLimitingException)
@responses.activate
def test_500(self):
self.queue(500)
self.check(syncconnect.ServerException)
@responses.activate
def test_504(self):
responses.add('GET', self.URL, status=504, json={
'error': 'some error', 'message': self.EXPECTED})
self.check(syncconnect.GatewayTimeoutException)
@responses.activate
def test_other(self):
self.queue(503)
with self.assertRaises(syncconnect.SyncException) as se:
syncconnect.requester.call('GET', self.URL)
self.assertEquals(se.exception.message, 'Unexpected error')
| true | true |
f71af9645244b8b41948a3f0545272ecb692549e | 2,122 | py | Python | lib/CollectionCheckerDIF.py | joser1945/cmr-metadata-review | df0bb24dd06f981af907569f1a97966753053a99 | [
"Apache-2.0"
] | 15 | 2018-06-26T19:58:44.000Z | 2022-03-01T21:19:34.000Z | lib/CollectionCheckerDIF.py | joser1945/cmr-metadata-review | df0bb24dd06f981af907569f1a97966753053a99 | [
"Apache-2.0"
] | 61 | 2018-06-27T15:15:41.000Z | 2022-03-08T15:39:32.000Z | lib/CollectionCheckerDIF.py | joser1945/cmr-metadata-review | df0bb24dd06f981af907569f1a97966753053a99 | [
"Apache-2.0"
] | 9 | 2019-01-22T15:48:48.000Z | 2021-10-01T18:38:30.000Z | '''
Copyright 2016, United States Government, as represented by the Administrator of
the National Aeronautics and Space Administration. All rights reserved.
The "pyCMR" platform is licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. You may obtain a
copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied. See the License for the specific language
governing permissions and limitations under the License.
'''
import json
import sys
from CheckerDIF import checkerRules
from CSVDIF import DIFOutputCSV
from JsonDIF import DIFOutputJSON
class Checker():
def __init__(self):
self.checkerRules = checkerRules()
self.DIFOutputCSV = DIFOutputCSV(self.checkerRules,self.wrap)
self.DIFOutputJSON = DIFOutputJSON(self.checkerRules,self.wrap)
def getItemList(self, items, keys):
results = []
if type(items) is not list:
items = [items]
if len(keys) == 0:
return items
for item in items:
if item.has_key(keys[0]):
results += self.getItemList(item[keys[0]], keys[1:])
else:
results += [None]
return results
def wrap(self, items, func, child):
results = []
keys = child.split('.')
itemLst = self.getItemList(items, keys)
for item in itemLst:
#if item == None:
#results.append('None')
#else:
results.append(func(item))
return ';'.join(results)
def checkAll(self, metadata):
return self.DIFOutputCSV.checkAll(metadata)
def checkAllJSON(self,metadata):
return self.DIFOutputJSON.checkAll(metadata)
x = Checker()
with open(sys.argv[1], 'r') as f:
contents = f.read()
resultFields = x.checkAllJSON(contents)
print(json.dumps(resultFields))
| 33.15625 | 88 | 0.661169 |
import json
import sys
from CheckerDIF import checkerRules
from CSVDIF import DIFOutputCSV
from JsonDIF import DIFOutputJSON
class Checker():
def __init__(self):
self.checkerRules = checkerRules()
self.DIFOutputCSV = DIFOutputCSV(self.checkerRules,self.wrap)
self.DIFOutputJSON = DIFOutputJSON(self.checkerRules,self.wrap)
def getItemList(self, items, keys):
results = []
if type(items) is not list:
items = [items]
if len(keys) == 0:
return items
for item in items:
if item.has_key(keys[0]):
results += self.getItemList(item[keys[0]], keys[1:])
else:
results += [None]
return results
def wrap(self, items, func, child):
results = []
keys = child.split('.')
itemLst = self.getItemList(items, keys)
for item in itemLst:
results.append(func(item))
return ';'.join(results)
def checkAll(self, metadata):
return self.DIFOutputCSV.checkAll(metadata)
def checkAllJSON(self,metadata):
return self.DIFOutputJSON.checkAll(metadata)
x = Checker()
with open(sys.argv[1], 'r') as f:
contents = f.read()
resultFields = x.checkAllJSON(contents)
print(json.dumps(resultFields))
| true | true |
f71afa37b95ab3440c489490d28114d5823b2630 | 409 | py | Python | job_extract/jobs/impl/indeed_cursor.py | evbarnett/job_extract | dafa4d69a5daca83d337192617b244c89e4b5ae6 | [
"MIT"
] | null | null | null | job_extract/jobs/impl/indeed_cursor.py | evbarnett/job_extract | dafa4d69a5daca83d337192617b244c89e4b5ae6 | [
"MIT"
] | null | null | null | job_extract/jobs/impl/indeed_cursor.py | evbarnett/job_extract | dafa4d69a5daca83d337192617b244c89e4b5ae6 | [
"MIT"
] | null | null | null | class IndeedCursor(JobCursor):
def __init__(self, title: str, location: str, radius: int = 25):
base_url = "https://www.indeed.com/jobs?"
self._title = title
self._location = location
title_esc = ul.quote(self._title, safe='')
location_esc = ul.quote(self._location, safe='')
req_url = base_url + "q={}&l={}".format(title_esc, location_esc)
# TODO
| 37.181818 | 72 | 0.611247 | class IndeedCursor(JobCursor):
def __init__(self, title: str, location: str, radius: int = 25):
base_url = "https://www.indeed.com/jobs?"
self._title = title
self._location = location
title_esc = ul.quote(self._title, safe='')
location_esc = ul.quote(self._location, safe='')
req_url = base_url + "q={}&l={}".format(title_esc, location_esc)
| true | true |
f71afacc656469a5f59aa99865a3ea05cae6a31d | 6,053 | py | Python | Test/Machine/rbm.py | tvieijra/netket | ef3ff32b242f25b6a6ae0f08db1aada85775a2ea | [
"Apache-2.0"
] | 10 | 2019-11-29T02:51:53.000Z | 2021-08-14T18:52:33.000Z | Test/Machine/rbm.py | tvieijra/netket | ef3ff32b242f25b6a6ae0f08db1aada85775a2ea | [
"Apache-2.0"
] | 2 | 2020-03-03T11:12:00.000Z | 2020-05-01T17:04:41.000Z | Test/Machine/rbm.py | tvieijra/netket | ef3ff32b242f25b6a6ae0f08db1aada85775a2ea | [
"Apache-2.0"
] | 6 | 2019-12-02T07:29:01.000Z | 2021-04-04T21:55:21.000Z | # Copyright 2019 The Simons Foundation, Inc. - All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netket
import numpy as _np
__all__ = ["PyRbm"]
class PyRbm(netket.machine.CxxMachine):
"""
__Do not use me in production code!__
A proof of concept implementation of a complex-valued RBM in pure Python.
This is an example of how to subclass `CxxMachine` so that the machine will
be usable with NetKet's C++ core.
This class can be used as a drop-in replacement for `RbmSpin`.
"""
def __init__(
self, hilbert, alpha=None, use_visible_bias=True, use_hidden_bias=True
):
r"""Constructs a new RBM.
Args:
hilbert: Hilbert space.
alpha: `alpha * hilbert.size` is the number of hidden spins.
use_visible_bias: specifies whether to use a bias for visible
spins.
use_hidden_bias: specifies whether to use a bias for hidden spins.
"""
# NOTE: The following call to __init__ is important!
super(PyRbm, self).__init__(hilbert)
n = hilbert.size
if alpha < 0:
raise ValueError("`alpha` should be non-negative")
m = int(round(alpha * n))
self._w = _np.empty([m, n], dtype=_np.complex128)
self._a = _np.empty(n, dtype=_np.complex128) if use_visible_bias else None
self._b = _np.empty(m, dtype=_np.complex128) if use_hidden_bias else None
def _number_parameters(self):
r"""Returns the number of parameters in the machine. We just sum the
sizes of all the tensors we hold.
"""
return (
self._w.size
+ (self._a.size if self._a is not None else 0)
+ (self._b.size if self._b is not None else 0)
)
def _number_visible(self):
r"""Returns the number of visible units.
"""
return self._w.shape[1]
def _get_parameters(self):
r"""Returns the parameters as a 1D tensor.
This function tries to order parameters in the exact same way as
``RbmSpin`` does so that we can do stuff like
>>> import netket
>>> import numpy
>>> hilbert = netket.hilbert.Spin(
graph=netket.graph.Hypercube(length=100, n_dim=1),
s=1/2.
)
>>> cxx_rbm = netket.machine.RbmSpin(hilbert, alpha=3)
>>> py_rbm = netket.machine.PyRbm(hilbert, alpha=3)
>>> cxx_rbm.init_random_parameters()
>>> # Order of parameters is the same, so we can assign one to the
>>> # other
>>> py_rbm.parameters = cxx_rbm.parameters
>>> x = np.array(hilbert.local_states, size=hilbert.size)
>>> assert numpy.isclose(py_rbm.log_val(x), cxx_rbm.log_val(x))
"""
params = tuple()
if self._a is not None:
params += (self._a,)
if self._b is not None:
params += (self._b,)
params += (self._w.reshape(-1, order="C"),)
return _np.concatenate(params)
def _set_parameters(self, p):
r"""Sets parameters from a 1D tensor.
``self._set_parameters(self._get_parameters())`` is an identity.
"""
i = 0
if self._a is not None:
self._a[:] = p[i : i + self._a.size]
i += self._a.size
if self._b is not None:
self._b[:] = p[i : i + self._b.size]
i += self._b.size
self._w[:] = p[i : i + self._w.size].reshape(self._w.shape, order="C")
def log_val(self, x):
r"""Computes the logarithm of the wave function given a spin
configuration ``x``.
"""
r = _np.dot(self._w, x)
if self._b is not None:
r += self._b
r = _np.sum(PyRbm._log_cosh(r))
if self._a is not None:
r += _np.dot(self._a, x)
# Officially, we should return
# self._w.shape[0] * 0.6931471805599453 + r
# but the C++ implementation ignores the "constant factor"
return r
def der_log(self, x):
r"""Computes the gradient of the logarithm of the wave function
given a spin configuration ``x``.
"""
grad = _np.empty(self.n_par, dtype=_np.complex128)
i = 0
if self._a is not None:
grad[i : i + self._a.size] = x
i += self._a.size
tanh_stuff = _np.dot(self._w, x)
if self._b is not None:
tanh_stuff += self._b
tanh_stuff = _np.tanh(tanh_stuff, out=tanh_stuff)
if self._b is not None:
grad[i : i + self._b.size] = tanh_stuff
i += self._b.size
out = grad[i : i + self._w.size]
out.shape = (tanh_stuff.size, x.size)
_np.outer(tanh_stuff, x, out=out)
return grad
def _is_holomorphic(self):
r"""Complex valued RBM a holomorphic function.
"""
return True
def save(self, filename):
r"""Saves machine weights to ``filename`` using ``pickle``.
"""
import pickle
with open(filename, "wb") as output_file:
pickle.dump((self._w, self._a, self._b), output_file)
def load(self, filename):
r"""Loads machine weights from ``filename`` using ``pickle``.
"""
import pickle
with open(filename, "rb") as input_file:
self._w, self._a, self._b = pickle.load(input_file)
@staticmethod
def _log_cosh(x):
# TODO: Handle big numbers properly
return _np.log(_np.cosh(x))
| 33.815642 | 82 | 0.587973 |
import netket
import numpy as _np
__all__ = ["PyRbm"]
class PyRbm(netket.machine.CxxMachine):
def __init__(
self, hilbert, alpha=None, use_visible_bias=True, use_hidden_bias=True
):
super(PyRbm, self).__init__(hilbert)
n = hilbert.size
if alpha < 0:
raise ValueError("`alpha` should be non-negative")
m = int(round(alpha * n))
self._w = _np.empty([m, n], dtype=_np.complex128)
self._a = _np.empty(n, dtype=_np.complex128) if use_visible_bias else None
self._b = _np.empty(m, dtype=_np.complex128) if use_hidden_bias else None
def _number_parameters(self):
return (
self._w.size
+ (self._a.size if self._a is not None else 0)
+ (self._b.size if self._b is not None else 0)
)
def _number_visible(self):
return self._w.shape[1]
def _get_parameters(self):
params = tuple()
if self._a is not None:
params += (self._a,)
if self._b is not None:
params += (self._b,)
params += (self._w.reshape(-1, order="C"),)
return _np.concatenate(params)
def _set_parameters(self, p):
i = 0
if self._a is not None:
self._a[:] = p[i : i + self._a.size]
i += self._a.size
if self._b is not None:
self._b[:] = p[i : i + self._b.size]
i += self._b.size
self._w[:] = p[i : i + self._w.size].reshape(self._w.shape, order="C")
def log_val(self, x):
r = _np.dot(self._w, x)
if self._b is not None:
r += self._b
r = _np.sum(PyRbm._log_cosh(r))
if self._a is not None:
r += _np.dot(self._a, x)
return r
def der_log(self, x):
grad = _np.empty(self.n_par, dtype=_np.complex128)
i = 0
if self._a is not None:
grad[i : i + self._a.size] = x
i += self._a.size
tanh_stuff = _np.dot(self._w, x)
if self._b is not None:
tanh_stuff += self._b
tanh_stuff = _np.tanh(tanh_stuff, out=tanh_stuff)
if self._b is not None:
grad[i : i + self._b.size] = tanh_stuff
i += self._b.size
out = grad[i : i + self._w.size]
out.shape = (tanh_stuff.size, x.size)
_np.outer(tanh_stuff, x, out=out)
return grad
def _is_holomorphic(self):
return True
def save(self, filename):
import pickle
with open(filename, "wb") as output_file:
pickle.dump((self._w, self._a, self._b), output_file)
def load(self, filename):
import pickle
with open(filename, "rb") as input_file:
self._w, self._a, self._b = pickle.load(input_file)
@staticmethod
def _log_cosh(x):
return _np.log(_np.cosh(x))
| true | true |
f71afb68427e72653aff0696997abed27acca654 | 2,058 | py | Python | aioradio/jira.py | nrccua/aioradio | 2437c2a7fcad81c2e410002c685f587df2fcd76c | [
"MIT"
] | 9 | 2021-01-04T13:13:03.000Z | 2021-04-30T18:41:08.000Z | aioradio/jira.py | nrccua/aioradio | 2437c2a7fcad81c2e410002c685f587df2fcd76c | [
"MIT"
] | null | null | null | aioradio/jira.py | nrccua/aioradio | 2437c2a7fcad81c2e410002c685f587df2fcd76c | [
"MIT"
] | null | null | null | """Generic functions related to Jira."""
from typing import Any, Dict
import httpx
async def post_jira_issue(url: str, jira_user: str, jira_token: str, payload: Dict[str, Any]) -> Dict[str, Any]:
"""Post payload to create jira issue.
Args:
url (str): url
jira_user (str): jira username
jira_token (str): jira token
payload (Dict[str, Any]): jira payload describing ticket info
Returns:
Dict[str, Any]: response of operation
"""
headers = {'Content-Type': 'application/json'}
auth = (jira_user, jira_token)
async with httpx.AsyncClient() as client:
return await client.post(url=url, json=payload, auth=auth, headers=headers)
async def get_jira_issue(url: str, jira_user: str, jira_token: str) -> Dict[str, Any]:
"""Get Jira issue using jira_link built with the expected jira_id, an
example: https://nrccua.atlassian.net/rest/api/2/issue/<jira_id>.
Args:
url (str): url
jira_user (str): jira username
jira_token (str): jira token
Returns:
Dict[str, Any]: response of operation
"""
headers = {'Content-Type': 'application/json'}
auth = (jira_user, jira_token)
async with httpx.AsyncClient() as client:
return await client.get(url=url, auth=auth, headers=headers)
async def add_comment_to_jira(url: str, jira_user: str, jira_token: str, comment: str) -> Dict[str, Any]:
"""Add Jira comment to an existing issue.
Args:
url (str): url
jira_user (str): jira username
jira_token (str): jira token
comment (str): comment to add to jira ticket
Raises:
ValueError: problem with url
Returns:
Dict[str, Any]: response of operation
"""
if not url.endswith('comment'):
msg = 'Check url value! Good example is https://nrccua.atlassian.net/rest/api/2/issue/<jira_id>/comment'
raise ValueError(msg)
return await post_jira_issue(
url=url, payload={'body': comment}, jira_user=jira_user, jira_token=jira_token)
| 30.264706 | 112 | 0.651118 |
from typing import Any, Dict
import httpx
async def post_jira_issue(url: str, jira_user: str, jira_token: str, payload: Dict[str, Any]) -> Dict[str, Any]:
headers = {'Content-Type': 'application/json'}
auth = (jira_user, jira_token)
async with httpx.AsyncClient() as client:
return await client.post(url=url, json=payload, auth=auth, headers=headers)
async def get_jira_issue(url: str, jira_user: str, jira_token: str) -> Dict[str, Any]:
headers = {'Content-Type': 'application/json'}
auth = (jira_user, jira_token)
async with httpx.AsyncClient() as client:
return await client.get(url=url, auth=auth, headers=headers)
async def add_comment_to_jira(url: str, jira_user: str, jira_token: str, comment: str) -> Dict[str, Any]:
if not url.endswith('comment'):
msg = 'Check url value! Good example is https://nrccua.atlassian.net/rest/api/2/issue/<jira_id>/comment'
raise ValueError(msg)
return await post_jira_issue(
url=url, payload={'body': comment}, jira_user=jira_user, jira_token=jira_token)
| true | true |
f71afbd2d238f05fc62377a102459dbebc72d784 | 2,830 | py | Python | rdmo/core/management/commands/make_theme.py | m6121/rdmo | db3990c7525138c6ce9634fc3e5b6b8ee9b915c8 | [
"Apache-2.0"
] | 77 | 2016-08-09T11:40:20.000Z | 2022-03-06T11:03:26.000Z | rdmo/core/management/commands/make_theme.py | m6121/rdmo | db3990c7525138c6ce9634fc3e5b6b8ee9b915c8 | [
"Apache-2.0"
] | 377 | 2016-07-01T13:59:36.000Z | 2022-03-30T13:53:19.000Z | rdmo/core/management/commands/make_theme.py | m6121/rdmo | db3990c7525138c6ce9634fc3e5b6b8ee9b915c8 | [
"Apache-2.0"
] | 47 | 2016-06-23T11:32:19.000Z | 2022-03-01T11:34:37.000Z | from shutil import copyfile
from pathlib import Path
from django.apps import apps
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def setup(self, options):
self.theme_name = options['name']
self.theme_path = Path(options['name'])
self.rdmo_path = Path(apps.get_app_config('rdmo').path)
self.local_path = Path().cwd() / 'config' / 'settings' / 'local.py'
def copy(self, path):
source_path = self.rdmo_path / path
target_path = self.theme_path / Path(*path.parts[1:])
if target_path.exists():
print('Skip {} -> {}. Target file exists.'.format(source_path, target_path))
else:
print('Copy {} -> {}.'.format(source_path, target_path))
target_path.parent.mkdir(parents=True, exist_ok=True)
copyfile(source_path, target_path)
def enable_theme(self):
settings_line = 'INSTALLED_APPS = [\'{}\'] + INSTALLED_APPS'.format(self.theme_name)
replaced = False
local_settings = self.local_path.read_text().splitlines()
for i, line in enumerate(local_settings):
if line == settings_line:
# return if the line is already there
return
if line == '# ' + settings_line:
local_settings[i] = settings_line
replaced = True
if not replaced:
local_settings.append('')
local_settings.append(settings_line)
local_settings.append('')
self.local_path.write_text('\n'.join(local_settings))
def add_arguments(self, parser):
parser.add_argument('--name', action='store', default='rdmo_theme', help='Module name for the theme.')
parser.add_argument('--file', action='store', help='Copy specific file/template, e.g. core/static/css/variables.scss.')
def handle(self, *args, **options):
self.setup(options)
if options['file']:
self.copy(Path(options['file']))
else:
self.theme_path.mkdir(exist_ok=True)
self.theme_path.joinpath('__init__.py').touch()
self.theme_path.joinpath('locale').mkdir(exist_ok=True)
self.copy(Path('core') / 'static' / 'core' / 'css' / 'variables.scss')
for language, language_string in settings.LANGUAGES:
self.copy(Path('core') / 'templates' / 'core' / 'home_text_{}.html'.format(language))
self.copy(Path('core') / 'templates' / 'core' / 'about_text_{}.html'.format(language))
self.copy(Path('core') / 'templates' / 'core' / 'footer_text_{}.html'.format(language))
print('Enable theme by adding the necessary config line.')
self.enable_theme()
print('Done')
| 37.733333 | 127 | 0.608834 | from shutil import copyfile
from pathlib import Path
from django.apps import apps
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def setup(self, options):
self.theme_name = options['name']
self.theme_path = Path(options['name'])
self.rdmo_path = Path(apps.get_app_config('rdmo').path)
self.local_path = Path().cwd() / 'config' / 'settings' / 'local.py'
def copy(self, path):
source_path = self.rdmo_path / path
target_path = self.theme_path / Path(*path.parts[1:])
if target_path.exists():
print('Skip {} -> {}. Target file exists.'.format(source_path, target_path))
else:
print('Copy {} -> {}.'.format(source_path, target_path))
target_path.parent.mkdir(parents=True, exist_ok=True)
copyfile(source_path, target_path)
def enable_theme(self):
settings_line = 'INSTALLED_APPS = [\'{}\'] + INSTALLED_APPS'.format(self.theme_name)
replaced = False
local_settings = self.local_path.read_text().splitlines()
for i, line in enumerate(local_settings):
if line == settings_line:
return
if line == '# ' + settings_line:
local_settings[i] = settings_line
replaced = True
if not replaced:
local_settings.append('')
local_settings.append(settings_line)
local_settings.append('')
self.local_path.write_text('\n'.join(local_settings))
def add_arguments(self, parser):
parser.add_argument('--name', action='store', default='rdmo_theme', help='Module name for the theme.')
parser.add_argument('--file', action='store', help='Copy specific file/template, e.g. core/static/css/variables.scss.')
def handle(self, *args, **options):
self.setup(options)
if options['file']:
self.copy(Path(options['file']))
else:
self.theme_path.mkdir(exist_ok=True)
self.theme_path.joinpath('__init__.py').touch()
self.theme_path.joinpath('locale').mkdir(exist_ok=True)
self.copy(Path('core') / 'static' / 'core' / 'css' / 'variables.scss')
for language, language_string in settings.LANGUAGES:
self.copy(Path('core') / 'templates' / 'core' / 'home_text_{}.html'.format(language))
self.copy(Path('core') / 'templates' / 'core' / 'about_text_{}.html'.format(language))
self.copy(Path('core') / 'templates' / 'core' / 'footer_text_{}.html'.format(language))
print('Enable theme by adding the necessary config line.')
self.enable_theme()
print('Done')
| true | true |
f71afbdf559edc30ab16fc96de12394ee0fbf228 | 1,115 | py | Python | boards/tests/test_view_boards.py | pydjdev78/abc-for-app | a7b9852f1e51f2e901fe00092931a1e8a2bca913 | [
"MIT"
] | 4 | 2018-12-25T13:56:18.000Z | 2019-12-22T16:04:50.000Z | boards/tests/test_view_boards.py | bvermeulen/Django | e4ef21c2f1fb7d026207c25bd443252c6df354bf | [
"MIT"
] | 15 | 2019-12-10T06:22:19.000Z | 2022-03-11T23:46:49.000Z | boards/tests/test_view_boards.py | pydjdev78/abc-for-app | a7b9852f1e51f2e901fe00092931a1e8a2bca913 | [
"MIT"
] | 2 | 2021-02-16T18:52:19.000Z | 2021-03-30T16:40:46.000Z | from django.contrib.auth.models import User
from django.urls import reverse, resolve
from django.test import TestCase
from ..views import BoardListView
from ..models import Board
class BoardsTests(TestCase):
def setUp(self):
username = 'joe'
password = '123'
_ = User.objects.create_user(username=username,
email='jane@doe.com', password=password)
self.client.login(username=username, password=password)
self.board = Board.objects.create(name='Django', description='Django board')
url = reverse('boards')
self.response = self.client.get(url)
def test_boards_view_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_boards_url_resolves_boards_view(self):
view = resolve('/boards/')
self.assertEqual(view.func.view_class, BoardListView)
def test_boards_view_contains_link_to_topics_page(self):
board_topics_url = reverse('board_topics', kwargs={'board_pk': self.board.pk})
self.assertContains(self.response, f'href="{board_topics_url}"')
| 38.448276 | 86 | 0.69148 | from django.contrib.auth.models import User
from django.urls import reverse, resolve
from django.test import TestCase
from ..views import BoardListView
from ..models import Board
class BoardsTests(TestCase):
def setUp(self):
username = 'joe'
password = '123'
_ = User.objects.create_user(username=username,
email='jane@doe.com', password=password)
self.client.login(username=username, password=password)
self.board = Board.objects.create(name='Django', description='Django board')
url = reverse('boards')
self.response = self.client.get(url)
def test_boards_view_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_boards_url_resolves_boards_view(self):
view = resolve('/boards/')
self.assertEqual(view.func.view_class, BoardListView)
def test_boards_view_contains_link_to_topics_page(self):
board_topics_url = reverse('board_topics', kwargs={'board_pk': self.board.pk})
self.assertContains(self.response, f'href="{board_topics_url}"')
| true | true |
f71afc8fa1cfc528ee34eb6345a7ce015abf36d4 | 10,281 | py | Python | test/test_praat.py | hadware/pympi | f7ee43dff7e809395bd097849d2e7bc6e602096b | [
"MIT"
] | 1 | 2019-11-09T20:33:14.000Z | 2019-11-09T20:33:14.000Z | test/test_praat.py | git-em/pympi | ad5e52b15979b09ea43df5e25dcf1c5b280e99fb | [
"MIT"
] | null | null | null | test/test_praat.py | git-em/pympi | ad5e52b15979b09ea43df5e25dcf1c5b280e99fb | [
"MIT"
] | null | null | null | #!/bin/env python
# -*- coding: utf-8 -*-
import unittest
import tempfile
import os
from pympi.Praat import TextGrid
class PraatTest(unittest.TestCase):
def setUp(self):
self.tg = TextGrid(xmax=20)
self.maxdiff = None
# Test all the Praat.TextGrid functions
def test_sort_tiers(self):
self.tg.add_tier('t2')
self.tg.add_tier('t1')
self.tg.add_tier('t3')
self.tg.add_tier('t6')
self.tg.add_tier('t4')
self.tg.add_tier('t5')
tiernames = ['t1', 't2', 't3', 't4', 't5', 't6']
self.tg.sort_tiers()
self.assertEqual([a[1] for a in self.tg.get_tier_name_num()],
tiernames)
self.tg.sort_tiers(lambda x: list(reversed(tiernames)).index(x.name))
self.assertEqual([a[1] for a in self.tg.get_tier_name_num()],
list(reversed(tiernames)))
def test_add_tier(self):
self.assertRaises(ValueError, self.tg.add_tier, 'a', number=-1)
self.assertRaises(ValueError, self.tg.add_tier, 'a', number=10)
self.tg.add_tier('tier1')
self.assertEqual(len(self.tg.tiers), 1)
self.assertEqual(self.tg.tiers[0].tier_type, 'IntervalTier')
self.tg.add_tier('tier2', tier_type='TextTier')
self.assertEqual(len(self.tg.tiers), 2)
self.assertEqual(self.tg.tiers[1].tier_type, 'TextTier')
self.tg.add_tier('tier3')
self.assertEqual(len(self.tg.tiers), 3)
self.assertEqual(['tier1', 'tier2', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.add_tier('tier4', number=2)
self.assertEqual(len(self.tg.tiers), 4)
self.assertEqual(4, len(self.tg.tiers))
def test_remove_tier(self):
self.assertRaises(Exception, self.tg.remove_tier, -1)
self.assertRaises(Exception, self.tg.remove_tier, 10)
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.tg.add_tier('tier4', number=2)
self.tg.remove_tier(3)
self.assertEqual(len(self.tg.tiers), 3)
self.assertEqual(['tier1', 'tier3', 'tier4'],
sorted(a.name for a in self.tg.tiers))
self.tg.remove_tier('tier1')
self.assertEqual(len(self.tg.tiers), 2)
self.assertEqual(['tier3', 'tier4'],
sorted(a.name for a in self.tg.tiers))
self.tg.remove_tier(2)
self.assertEqual(len(self.tg.tiers), 1)
self.assertEqual(['tier4'], [a.name for a in self.tg.tiers])
self.tg.remove_tier('tier4')
self.assertTrue(not self.tg.tiers)
def test_get_tier(self):
self.assertRaises(Exception, self.tg.get_tier, -1)
self.assertRaises(Exception, self.tg.get_tier, 'a')
self.assertRaises(Exception, self.tg.get_tier, 10)
tier1 = self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2')
tier3 = self.tg.add_tier('tier3')
self.assertEqual(tier1, self.tg.get_tier(tier1.name))
self.assertEqual(tier3, self.tg.get_tier(tier3.name))
self.assertEqual(self.tg.tiers[1], self.tg.get_tier(tier2.name))
def test_change_tier_name(self):
self.assertRaises(Exception,
self.tg.change_tier_name, -1, 'b')
self.assertRaises(Exception,
self.tg.change_tier_name, 'a', 'b')
self.assertRaises(Exception,
self.tg.change_tier_name, 10, 'b')
self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.tg.change_tier_name('tier1', 'tier1a')
self.assertEqual(['tier1a', 'tier2', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.change_tier_name(self.tg.tiers.index(tier2)+1, 'tier2a')
self.assertEqual(['tier1a', 'tier2a', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.change_tier_name('tier1a', 'tier1')
self.assertEqual(['tier1', 'tier2a', 'tier3'],
[a.name for a in self.tg.tiers])
def test_get_tiers(self):
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.assertEqual(self.tg.tiers,
list(self.tg.get_tiers()))
def test_get_tier_name_num(self):
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3', number=2)
self.assertEqual([(1, 'tier1'), (2, 'tier3'), (3, 'tier2')],
list(self.tg.get_tier_name_num()))
def test_to_file(self):
for codec in ['utf-8', 'latin_1', 'mac_roman']:
self.tg = TextGrid(xmax=20)
tier1 = self.tg.add_tier('tier')
tier1.add_interval(1, 2, 'i1')
tier1.add_interval(2, 3, 'i2')
tier1.add_interval(4, 5, 'i3')
tier4 = self.tg.add_tier('tier')
tier4.add_interval(1, 2, u'i1ü')
tier4.add_interval(2.0, 3, 'i2')
tier4.add_interval(4, 5.0, 'i3')
tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
tier2.add_point(1, u'p1ü')
tier2.add_point(2, 'p1')
tier2.add_point(3, 'p1')
tempf = tempfile.mkstemp()[1]
# Normal mode
self.tg.to_file(tempf, codec=codec)
TextGrid(tempf, codec=codec)
# Short mode
self.tg.to_file(tempf, codec=codec, mode='s')
TextGrid(tempf, codec=codec)
# Binary mode
self.tg.to_file(tempf, mode='b')
TextGrid(tempf)
os.remove(tempf)
def test_to_eaf(self):
tier1 = self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
tier1.add_interval(0, 1, 'int1')
tier1.add_interval(2, 3, 'int2')
tier1.add_interval(5, 6, 'int3')
tier2.add_point(1.5, 'point1')
tier2.add_point(2.5, 'point2')
tier2.add_point(3.5, 'point3')
eaf = self.tg.to_eaf(True, 0.03)
self.assertRaises(ValueError, self.tg.to_eaf, pointlength=-1)
self.assertEqual(sorted(eaf.get_tier_names()),
sorted(['default', 'tier1', 'tier2']))
self.assertEqual(sorted(eaf.get_annotation_data_for_tier('tier1')),
sorted([(0, 1000, 'int1'), (5000, 6000, 'int3'),
(2000, 3000, 'int2')]))
self.assertEqual(sorted(eaf.get_annotation_data_for_tier('tier2')),
sorted([(2500, 2530, 'point2'),
(1500, 1530, 'point1'),
(3500, 3530, 'point3')]))
# Test all the Praat.Tier functions
def setup_tier(self):
self.tier1 = self.tg.add_tier('tier1')
self.tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
def test_add_point(self):
self.setup_tier()
self.assertRaises(Exception, self.tier1.add_point, 5, 'a')
self.tier2.add_point(5, 't')
self.assertEqual([(5, 't')], self.tier2.intervals)
self.assertRaises(Exception, self.tier2.add_point, 5, 'a')
self.tier2.add_point(6, 'a')
self.assertEqual([(5, 't'), (6, 'a')], self.tier2.intervals)
self.tier2.add_point(5, 'a', False)
def test_add_interval(self):
self.setup_tier()
self.assertRaises(Exception,
self.tier2.add_interval, 5, 6, 'a')
self.assertRaises(Exception, self.tier2.add_interval, 6, 5, 'a')
self.tier1.add_interval(5, 6, 't')
self.assertEqual([(5, 6, 't')], self.tier1.intervals)
self.assertRaises(Exception, self.tier1.add_interval, 5.5, 6.5, 't')
self.tier1.add_interval(6, 7, 'a')
self.assertEqual([(5, 6, 't'), (6, 7, 'a')], self.tier1.intervals)
self.tier1.add_interval(5.5, 6.5, 't', False)
def test_remove_interval(self):
self.setup_tier()
self.assertRaises(Exception, self.tier2.remove_interval, 5)
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(6, 7, 'b')
self.tier1.add_interval(7, 8, 'c')
self.tier1.remove_interval(5.5)
self.assertEqual([(6, 7, 'b'), (7, 8, 'c')],
self.tier1.intervals)
self.tier1.remove_interval(8)
self.assertEqual([(6, 7, 'b')],
self.tier1.intervals)
self.tier1.remove_interval(8)
self.assertEqual([(6, 7, 'b')],
self.tier1.intervals)
def test_remove_point(self):
self.setup_tier()
self.assertRaises(Exception, self.tier1.remove_point, 5)
self.tier2.add_point(5, 'a')
self.tier2.add_point(6, 'b')
self.tier2.add_point(7, 'c')
self.tier2.remove_point(5)
self.assertEqual([(6, 'b'), (7, 'c')],
self.tier2.intervals)
self.tier2.remove_point(7)
self.assertEqual([(6, 'b')],
self.tier2.intervals)
self.tier2.remove_point(7)
self.assertEqual([(6, 'b')],
self.tier2.intervals)
def test_get_intervals(self):
self.setup_tier()
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(7, 8, 'c')
self.tier1.add_interval(6, 7, 'b')
self.assertEqual([(5, 6, 'a'), (6, 7, 'b'), (7, 8, 'c')],
sorted(self.tier1.get_intervals()))
self.tier2.add_point(5, 'a')
self.tier2.add_point(7, 'c')
self.tier2.add_point(6, 'b')
self.assertEqual([(5, 'a'), (6, 'b'), (7, 'c')],
sorted(self.tier2.get_intervals()))
def test_clear_intervals(self):
self.setup_tier()
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(6, 7, 'b')
self.tier1.add_interval(7, 8, 'c')
self.tier1.clear_intervals()
self.assertEqual([], self.tier1.intervals)
self.tier2.add_point(5, 'a')
self.tier2.add_point(6, 'b')
self.tier2.add_point(7, 'c')
self.tier2.clear_intervals()
self.assertEqual([], self.tier2.intervals)
if __name__ == '__main__':
unittest.main()
| 37.797794 | 77 | 0.563661 |
import unittest
import tempfile
import os
from pympi.Praat import TextGrid
class PraatTest(unittest.TestCase):
def setUp(self):
self.tg = TextGrid(xmax=20)
self.maxdiff = None
def test_sort_tiers(self):
self.tg.add_tier('t2')
self.tg.add_tier('t1')
self.tg.add_tier('t3')
self.tg.add_tier('t6')
self.tg.add_tier('t4')
self.tg.add_tier('t5')
tiernames = ['t1', 't2', 't3', 't4', 't5', 't6']
self.tg.sort_tiers()
self.assertEqual([a[1] for a in self.tg.get_tier_name_num()],
tiernames)
self.tg.sort_tiers(lambda x: list(reversed(tiernames)).index(x.name))
self.assertEqual([a[1] for a in self.tg.get_tier_name_num()],
list(reversed(tiernames)))
def test_add_tier(self):
self.assertRaises(ValueError, self.tg.add_tier, 'a', number=-1)
self.assertRaises(ValueError, self.tg.add_tier, 'a', number=10)
self.tg.add_tier('tier1')
self.assertEqual(len(self.tg.tiers), 1)
self.assertEqual(self.tg.tiers[0].tier_type, 'IntervalTier')
self.tg.add_tier('tier2', tier_type='TextTier')
self.assertEqual(len(self.tg.tiers), 2)
self.assertEqual(self.tg.tiers[1].tier_type, 'TextTier')
self.tg.add_tier('tier3')
self.assertEqual(len(self.tg.tiers), 3)
self.assertEqual(['tier1', 'tier2', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.add_tier('tier4', number=2)
self.assertEqual(len(self.tg.tiers), 4)
self.assertEqual(4, len(self.tg.tiers))
def test_remove_tier(self):
self.assertRaises(Exception, self.tg.remove_tier, -1)
self.assertRaises(Exception, self.tg.remove_tier, 10)
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.tg.add_tier('tier4', number=2)
self.tg.remove_tier(3)
self.assertEqual(len(self.tg.tiers), 3)
self.assertEqual(['tier1', 'tier3', 'tier4'],
sorted(a.name for a in self.tg.tiers))
self.tg.remove_tier('tier1')
self.assertEqual(len(self.tg.tiers), 2)
self.assertEqual(['tier3', 'tier4'],
sorted(a.name for a in self.tg.tiers))
self.tg.remove_tier(2)
self.assertEqual(len(self.tg.tiers), 1)
self.assertEqual(['tier4'], [a.name for a in self.tg.tiers])
self.tg.remove_tier('tier4')
self.assertTrue(not self.tg.tiers)
def test_get_tier(self):
self.assertRaises(Exception, self.tg.get_tier, -1)
self.assertRaises(Exception, self.tg.get_tier, 'a')
self.assertRaises(Exception, self.tg.get_tier, 10)
tier1 = self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2')
tier3 = self.tg.add_tier('tier3')
self.assertEqual(tier1, self.tg.get_tier(tier1.name))
self.assertEqual(tier3, self.tg.get_tier(tier3.name))
self.assertEqual(self.tg.tiers[1], self.tg.get_tier(tier2.name))
def test_change_tier_name(self):
self.assertRaises(Exception,
self.tg.change_tier_name, -1, 'b')
self.assertRaises(Exception,
self.tg.change_tier_name, 'a', 'b')
self.assertRaises(Exception,
self.tg.change_tier_name, 10, 'b')
self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.tg.change_tier_name('tier1', 'tier1a')
self.assertEqual(['tier1a', 'tier2', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.change_tier_name(self.tg.tiers.index(tier2)+1, 'tier2a')
self.assertEqual(['tier1a', 'tier2a', 'tier3'],
[a.name for a in self.tg.tiers])
self.tg.change_tier_name('tier1a', 'tier1')
self.assertEqual(['tier1', 'tier2a', 'tier3'],
[a.name for a in self.tg.tiers])
def test_get_tiers(self):
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3')
self.assertEqual(self.tg.tiers,
list(self.tg.get_tiers()))
def test_get_tier_name_num(self):
self.tg.add_tier('tier1')
self.tg.add_tier('tier2')
self.tg.add_tier('tier3', number=2)
self.assertEqual([(1, 'tier1'), (2, 'tier3'), (3, 'tier2')],
list(self.tg.get_tier_name_num()))
def test_to_file(self):
for codec in ['utf-8', 'latin_1', 'mac_roman']:
self.tg = TextGrid(xmax=20)
tier1 = self.tg.add_tier('tier')
tier1.add_interval(1, 2, 'i1')
tier1.add_interval(2, 3, 'i2')
tier1.add_interval(4, 5, 'i3')
tier4 = self.tg.add_tier('tier')
tier4.add_interval(1, 2, u'i1ü')
tier4.add_interval(2.0, 3, 'i2')
tier4.add_interval(4, 5.0, 'i3')
tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
tier2.add_point(1, u'p1ü')
tier2.add_point(2, 'p1')
tier2.add_point(3, 'p1')
tempf = tempfile.mkstemp()[1]
self.tg.to_file(tempf, codec=codec)
TextGrid(tempf, codec=codec)
self.tg.to_file(tempf, codec=codec, mode='s')
TextGrid(tempf, codec=codec)
self.tg.to_file(tempf, mode='b')
TextGrid(tempf)
os.remove(tempf)
def test_to_eaf(self):
tier1 = self.tg.add_tier('tier1')
tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
tier1.add_interval(0, 1, 'int1')
tier1.add_interval(2, 3, 'int2')
tier1.add_interval(5, 6, 'int3')
tier2.add_point(1.5, 'point1')
tier2.add_point(2.5, 'point2')
tier2.add_point(3.5, 'point3')
eaf = self.tg.to_eaf(True, 0.03)
self.assertRaises(ValueError, self.tg.to_eaf, pointlength=-1)
self.assertEqual(sorted(eaf.get_tier_names()),
sorted(['default', 'tier1', 'tier2']))
self.assertEqual(sorted(eaf.get_annotation_data_for_tier('tier1')),
sorted([(0, 1000, 'int1'), (5000, 6000, 'int3'),
(2000, 3000, 'int2')]))
self.assertEqual(sorted(eaf.get_annotation_data_for_tier('tier2')),
sorted([(2500, 2530, 'point2'),
(1500, 1530, 'point1'),
(3500, 3530, 'point3')]))
def setup_tier(self):
self.tier1 = self.tg.add_tier('tier1')
self.tier2 = self.tg.add_tier('tier2', tier_type='TextTier')
def test_add_point(self):
self.setup_tier()
self.assertRaises(Exception, self.tier1.add_point, 5, 'a')
self.tier2.add_point(5, 't')
self.assertEqual([(5, 't')], self.tier2.intervals)
self.assertRaises(Exception, self.tier2.add_point, 5, 'a')
self.tier2.add_point(6, 'a')
self.assertEqual([(5, 't'), (6, 'a')], self.tier2.intervals)
self.tier2.add_point(5, 'a', False)
def test_add_interval(self):
self.setup_tier()
self.assertRaises(Exception,
self.tier2.add_interval, 5, 6, 'a')
self.assertRaises(Exception, self.tier2.add_interval, 6, 5, 'a')
self.tier1.add_interval(5, 6, 't')
self.assertEqual([(5, 6, 't')], self.tier1.intervals)
self.assertRaises(Exception, self.tier1.add_interval, 5.5, 6.5, 't')
self.tier1.add_interval(6, 7, 'a')
self.assertEqual([(5, 6, 't'), (6, 7, 'a')], self.tier1.intervals)
self.tier1.add_interval(5.5, 6.5, 't', False)
def test_remove_interval(self):
self.setup_tier()
self.assertRaises(Exception, self.tier2.remove_interval, 5)
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(6, 7, 'b')
self.tier1.add_interval(7, 8, 'c')
self.tier1.remove_interval(5.5)
self.assertEqual([(6, 7, 'b'), (7, 8, 'c')],
self.tier1.intervals)
self.tier1.remove_interval(8)
self.assertEqual([(6, 7, 'b')],
self.tier1.intervals)
self.tier1.remove_interval(8)
self.assertEqual([(6, 7, 'b')],
self.tier1.intervals)
def test_remove_point(self):
self.setup_tier()
self.assertRaises(Exception, self.tier1.remove_point, 5)
self.tier2.add_point(5, 'a')
self.tier2.add_point(6, 'b')
self.tier2.add_point(7, 'c')
self.tier2.remove_point(5)
self.assertEqual([(6, 'b'), (7, 'c')],
self.tier2.intervals)
self.tier2.remove_point(7)
self.assertEqual([(6, 'b')],
self.tier2.intervals)
self.tier2.remove_point(7)
self.assertEqual([(6, 'b')],
self.tier2.intervals)
def test_get_intervals(self):
self.setup_tier()
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(7, 8, 'c')
self.tier1.add_interval(6, 7, 'b')
self.assertEqual([(5, 6, 'a'), (6, 7, 'b'), (7, 8, 'c')],
sorted(self.tier1.get_intervals()))
self.tier2.add_point(5, 'a')
self.tier2.add_point(7, 'c')
self.tier2.add_point(6, 'b')
self.assertEqual([(5, 'a'), (6, 'b'), (7, 'c')],
sorted(self.tier2.get_intervals()))
def test_clear_intervals(self):
self.setup_tier()
self.tier1.add_interval(5, 6, 'a')
self.tier1.add_interval(6, 7, 'b')
self.tier1.add_interval(7, 8, 'c')
self.tier1.clear_intervals()
self.assertEqual([], self.tier1.intervals)
self.tier2.add_point(5, 'a')
self.tier2.add_point(6, 'b')
self.tier2.add_point(7, 'c')
self.tier2.clear_intervals()
self.assertEqual([], self.tier2.intervals)
if __name__ == '__main__':
unittest.main()
| true | true |
f71afce939f7d6088496f8152b5131beafd2e97c | 27,561 | py | Python | cbre/cbre_net.py | jameszhou-gl/CBRE | 53c952e0afc74518fc4223f0f20881336df20f95 | [
"Apache-2.0"
] | null | null | null | cbre/cbre_net.py | jameszhou-gl/CBRE | 53c952e0afc74518fc4223f0f20881336df20f95 | [
"Apache-2.0"
] | null | null | null | cbre/cbre_net.py | jameszhou-gl/CBRE | 53c952e0afc74518fc4223f0f20881336df20f95 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import numpy as np
from cbre.util import *
class CBRENet(object):
"""
cbre_net implements the cycly-balanced representation learning for counterfactual inference
The network is implemented as a tensorflow graph. The class constructor
creates an object containing relevant TF nodes as member variables.
"""
def __init__(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):
"""
x The varibales of data
t The treatment applied to x, t.shape[1]==1
y_ The true outcome
p_t The treatment probability in all observations
z_norm todo unknown
flags The arg params
r_alpha The coefficient of reconstruction and cycle loss
r_lambda The coefficient of regularization of prediction network
r_beta The coefficient of gradient penalty in GAN
do_in The val of dropout_in
do_out The val of dropout_out
data_x_dim The dim of varibale x
"""
self.variables = {}
# wd_loss: regularization l2 loss
self.wd_loss = 0
if flags.nonlin.lower() == 'elu':
self.nonlin = tf.nn.elu
else:
self.nonlin = tf.nn.relu
self._build_graph(x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim)
def _add_variable(self, var, name):
"""
Adds variables to the internal track-keeper
"""
basename = name
i = 0
while name in self.variables:
name = '%s_%d' % (basename, i) # @TODO: not consistent with TF internally if changed
i += 1
self.variables[name] = var
def _create_variable(self, var, name):
""" Create and adds variables to the internal track-keeper """
# tf.get_variable(name=name, initializer=var)
var = tf.Variable(var, name=name)
self._add_variable(var, name)
return var
def _create_variable_with_weight_decay(self, initializer, name, wd):
""" Create and adds variables to the internal track-keeper
and adds it to the list of weight decayed variables """
var = self._create_variable(initializer, name)
self.wd_loss += wd * tf.nn.l2_loss(var)
return var
def _build_graph(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):
"""
Constructs a TensorFlow subgraph for causal effect inference.
Sets the following member variables (to TF nodes):
self.output The output prediction "y"
self.tot_loss The total objective to minimize
self.pred_loss The prediction term of the objective
self.weights_in The input/representation layer weights
self.weights_out The output/post-representation layer weights
self.weights_pred The (linear) prediction layer weights
self.h_rep The layer of the penalized representation
"""
self.x = x
self.t = t
self.y_ = y_
self.p_t = p_t
self.r_alpha = r_alpha
self.r_lambda = r_lambda
self.r_beta = r_beta
self.do_in = do_in
self.do_out = do_out
self.z_norm = z_norm
self.encoder_dim = flags.encoder_dim
encoder_dim = flags.encoder_dim
self.decoder_dim = flags.decoder_dim
self.predictor_dim = flags.predictor_dim
predictor_dim = flags.predictor_dim
mi_estimator_dim = flags.mi_estimator_dim
self.discriminator_dim = flags.discriminator_dim
discriminator_dim = flags.discriminator_dim
"""
Network Components
"""
'''
1. Encoder Network
'''
# Construct Encoder network layers, four layers with size 200
h_rep, h_rep_norm, weights_in = self._build_encoder(x, data_x_dim, flags)
'''
2. GAN
'''
d0, d1, dp, weights_dis, weights_discore = self._build_adversarial_graph(h_rep_norm, t, encoder_dim,
discriminator_dim, do_out,
flags)
# discriminator
# with sigmoid
# discriminator_loss = tf.reduce_mean(tf.nn.softplus(-d0)) + tf.reduce_mean(tf.nn.softplus(-d1) + d1) + dp
# without sigmoid
discriminator_loss = -tf.reduce_mean(d0) + tf.reduce_mean(d1) + r_beta * dp
# encoder
# with sigmoid
# rep_loss = tf.reduce_mean(tf.nn.softplus(-d1))
# without sigmoid
# todo rep_loss in paper: rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)
rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)
# rep_loss = -tf.reduce_mean(d1)
'''
3. Reconstruction
'''
# graph for reconstruction loss
x0, recons_x_0, x1, recons_x_1 = self._build_reconstruct_graph(x, t, data_x_dim, flags)
recons_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - recons_x_0)) + 1.0e-12) + tf.sqrt(
tf.reduce_mean(tf.square(x1 - recons_x_1)) + 1.0e-12)
'''
4. Cycle
'''
x0, cycle_x0, x1, cycle_x1 = self._build_cycle_graph(x, t, data_x_dim, flags)
cycle_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - cycle_x0)) + 1.0e-12) + tf.sqrt(
tf.reduce_mean(tf.square(x1 - cycle_x1)) + 1.0e-12)
'''
Predict Networks
'''
y, weights_out, weights_pred = self._build_output_graph(h_rep_norm, t, encoder_dim, predictor_dim, do_out,
flags)
""" Compute sample reweighting """
if flags.reweight_sample:
w_t = t / (2 * p_t)
w_c = (1 - t) / (2 * 1 - p_t)
sample_weight = w_t + w_c
else:
sample_weight = 1.0
self.sample_weight = sample_weight
risk = tf.reduce_mean(sample_weight * tf.square(y_ - y))
pred_error = tf.sqrt(tf.reduce_mean(tf.square(y_ - y)) + 1.0e-12)
""" Regularization """
if flags.p_lambda > 0 and flags.rep_weight_decay:
for i in range(0, flags.layer_num_encoder):
if not (flags.varsel and i == 0): # No penalty on W in variable selection
self.wd_loss += tf.nn.l2_loss(weights_in[i])
""" Total error """
tot_error = risk
if flags.p_lambda > 0:
tot_error = tot_error + r_lambda * self.wd_loss + recons_loss + cycle_loss
if flags.coef_recons > 0:
tot_error += flags.coef_recons * recons_loss
if flags.coef_cycle:
tot_error += flags.coef_cycle * cycle_loss
if flags.coef_d:
tot_error += flags.coef_d * discriminator_loss
if flags.varsel:
self.w_proj = tf.placeholder("float", shape=[data_x_dim], name='w_proj')
self.projection = weights_in[0].assign(self.w_proj)
self.output = y
self.tot_loss = tot_error
self.discriminator_loss = discriminator_loss
self.rep_loss = rep_loss
self.rec_loss = recons_loss
self.cycle_loss = cycle_loss
self.recons_cycle_loss = recons_loss + cycle_loss
self.pred_loss = pred_error
self.weights_in = weights_in
self.weights_out = weights_out
self.weights_dis = weights_dis
self.weights_discore = weights_discore
self.weights_pred = weights_pred
self.h_rep = h_rep
self.h_rep_norm = h_rep_norm
self.dp = dp
def _build_output_0(self, h_input, encoder_dim, predictor_dim, do_out, flags):
h_out = [h_input]
dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)
with tf.variable_scope('pred_0') as scope:
weights_out = []
biases_out = []
for i in range(0, flags.layer_num_predictor):
wo = tf.get_variable(name='w_{}'.format(i),
initializer=tf.random_normal([dims[i], dims[i + 1]],
stddev=flags.weight_init / np.sqrt(dims[i])))
weights_out.append(wo)
# biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))
biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))
z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]
h_out.append(self.nonlin(z))
h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)
weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(predictor_dim)),
'w_pred')
weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(
predictor_dim)))
bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')
if flags.varsel or flags.layer_num_predictor == 0:
self.wd_loss += tf.nn.l2_loss(
tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient
else:
self.wd_loss += tf.nn.l2_loss(weights_pred)
""" Construct linear classifier """
h_pred = h_out[-1]
y = tf.matmul(h_pred, weights_pred) + bias_pred
return y, weights_out, weights_pred
def _build_output_1(self, h_input, encoder_dim, predictor_dim, do_out, flags):
h_out = [h_input]
dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)
with tf.variable_scope('pred_1') as scope:
weights_out = []
biases_out = []
for i in range(0, flags.layer_num_predictor):
wo = tf.get_variable(name='w_{}'.format(i),
initializer=tf.random_normal([dims[i], dims[i + 1]],
stddev=flags.weight_init / np.sqrt(dims[i])))
weights_out.append(wo)
# biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))
biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))
z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]
h_out.append(self.nonlin(z))
h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)
weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(predictor_dim)),
'w_pred')
weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(
predictor_dim)))
bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')
if flags.varsel or flags.layer_num_predictor == 0:
self.wd_loss += tf.nn.l2_loss(
tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient
else:
self.wd_loss += tf.nn.l2_loss(weights_pred)
""" Construct linear classifier """
h_pred = h_out[-1]
y = tf.matmul(h_pred, weights_pred) + bias_pred
return y, weights_out, weights_pred
def _build_output_graph(self, rep, t, encoder_dim, predictor_dim, do_out, flags):
""" Construct output/regression layers """
if flags.split_output:
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
rep0 = tf.gather(rep, i0)
rep1 = tf.gather(rep, i1)
y0, weights_out0, weights_pred0 = self._build_output_0(rep0, encoder_dim, predictor_dim, do_out, flags)
y1, weights_out1, weights_pred1 = self._build_output_1(rep1, encoder_dim, predictor_dim, do_out, flags)
y = tf.dynamic_stitch([i0, i1], [y0, y1])
weights_out = weights_out0 + weights_out1
weights_pred = weights_pred0 + weights_pred1
else:
h_input = tf.concat(1, [rep, t])
# y, weights_out, weights_pred = self._build_output(h_input, encoder_dim + 1, predictor_dim, do_out, flags)
y, weights_out, weights_pred = None, None, None
return y, weights_out, weights_pred
def _build_encoder(self, x, data_x_dim, flags):
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE) as scope:
weights_in = []
biases_in = []
if flags.batch_norm:
bn_biases = []
bn_scales = []
h_in = [x]
for i in range(0, flags.layer_num_encoder):
if i == 0:
""" If using variable selection, first layer is just rescaling"""
if flags.varsel:
weights_in.append(tf.get_variable(name='wg_{}'.format(i),
initializer=1.0 / data_x_dim * tf.ones([data_x_dim])))
else:
wg = tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([data_x_dim, self.encoder_dim],
stddev=flags.weight_init / np.sqrt(
data_x_dim)))
weights_in.append(wg)
else:
wg = tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([self.encoder_dim, self.encoder_dim],
stddev=flags.weight_init / np.sqrt(
self.encoder_dim)))
weights_in.append(wg)
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, self.encoder_dim])))
# z equals outcome of each layer in Encoder Network.
z = tf.matmul(h_in[i], weights_in[i]) + biases_in[i]
if flags.batch_norm:
batch_mean, batch_var = tf.nn.moments(z, [0])
if flags.normalization == 'bn_fixed':
z = tf.nn.batch_normalization(z, batch_mean, batch_var, 0, 1, 1e-3)
else:
# bn_biases.append(tf.Variable(tf.zeros([self.encoder_dim])))
bn_biases.append(
tf.get_variable(name='bn_b_{}'.format(i), initializer=tf.zeros([self.encoder_dim])))
# bn_scales.append(tf.Variable(tf.ones([self.encoder_dim])))
bn_scales.append(
tf.get_variable(name='bn_s_{}'.format(i), initializer=tf.ones([self.encoder_dim])))
z = tf.nn.batch_normalization(z, batch_mean, batch_var, bn_biases[-1], bn_scales[-1], 1e-3)
h_in.append(self.nonlin(z))
h_in[i + 1] = tf.nn.dropout(h_in[i + 1], self.do_in)
h_rep = h_in[-1]
# todo normalization meaning?
if flags.normalization == 'divide':
h_rep_norm = h_rep / safe_sqrt(tf.reduce_sum(tf.square(h_rep), axis=1, keep_dims=True) + 1.0e-12)
else:
h_rep_norm = 1.0 * h_rep
return h_rep, h_rep_norm, weights_in
def _build_decoder(self, h_rep, data_x_dim, flags, suffix='0'):
with tf.variable_scope('decoder_' + suffix, reuse=tf.AUTO_REUSE) as scope:
weights_in = []
biases_in = []
recons_x = [h_rep]
decoder_dim = flags.decoder_dim
for i in range(0, flags.layer_num_decoder):
if i == 0:
weights_in.append(tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([flags.encoder_dim, decoder_dim],
stddev=flags.weight_init / np.sqrt(
flags.encoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))
elif i == flags.layer_num_decoder - 1:
weights_in.append(
tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, data_x_dim],
stddev=flags.weight_init / np.sqrt(
decoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, data_x_dim])))
else:
weights_in.append(
tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, decoder_dim],
stddev=flags.weight_init / np.sqrt(
decoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))
# z equals outcome of each layer in Encoder Network.
z = tf.matmul(recons_x[i], weights_in[i]) + biases_in[i]
recons_x.append(self.nonlin(z))
recons_x[i + 1] = tf.nn.dropout(recons_x[i + 1], self.do_in)
recons_x = recons_x[-1]
return recons_x, weights_in
def _build_discriminator_graph_mine(self, x, hrep, data_x_dim, encoder_dim, mi_estimator_dim, flags):
""" Construct MI estimation layers """
# two layers with size 200
with tf.variable_scope('gmi') as scope:
input_num = tf.shape(x)[0]
x_shuffle = tf.random_shuffle(x)
x_conc = tf.concat([x, x_shuffle], axis=0)
y_conc = tf.concat([hrep, hrep], axis=0)
# forward
# [25, 200]
weights_mi_x = self._create_variable(tf.random_normal([data_x_dim, mi_estimator_dim],
stddev=flags.weight_init / np.sqrt(data_x_dim)),
'weights_mi_x')
biases_mi_x = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_x')
# [, 200]
lin_x = tf.matmul(x_conc, weights_mi_x) + biases_mi_x
# [200, 200]
weights_mi_y = self._create_variable(tf.random_normal([encoder_dim, mi_estimator_dim],
stddev=flags.weight_init / np.sqrt(encoder_dim)),
'weights_mi_y')
biases_mi_y = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_y')
# [, 200]
lin_y = tf.matmul(y_conc, weights_mi_y) + biases_mi_y
# lin_conc = tf.nn.relu(lin_x + lin_y)
lin_conc = self.nonlin(lin_x + lin_y)
weights_mi_pred = self._create_variable(tf.random_normal([mi_estimator_dim, 1],
stddev=flags.weight_init / np.sqrt(
mi_estimator_dim)),
'gmi_p')
biases_mi_pred = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_pred')
gmi_output = tf.matmul(lin_conc, weights_mi_pred) + biases_mi_pred
# real estimator outcome: shape=[input_num, 1]
real_estimate = gmi_output[:input_num]
# fake estimator outcome: shape=[input_num, 1]
fake_estimate = gmi_output[input_num:]
return real_estimate, fake_estimate, weights_mi_x, weights_mi_y, weights_mi_pred
def _build_discriminator_adversarial(self, hrep, encoder_dim, discriminator_dim, do_out, flags):
""" Construct adversarial discriminator layers """
with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as scope:
h_dis = [hrep]
weights_dis = []
biases_dis = []
for i in range(0, flags.layer_num_discriminator):
if i == 0:
weights_dis.append(tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([encoder_dim, discriminator_dim],
stddev=flags.weight_init / np.sqrt(
encoder_dim))))
else:
weights_dis.append(tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal(
[discriminator_dim, discriminator_dim],
stddev=flags.weight_init / np.sqrt(
discriminator_dim))))
biases_dis.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, discriminator_dim])))
z = tf.matmul(h_dis[i], weights_dis[i]) + biases_dis[i]
h_dis.append(self.nonlin(z))
h_dis[i + 1] = tf.nn.dropout(h_dis[i + 1], do_out)
weights_discore = tf.get_variable(initializer=tf.random_normal([discriminator_dim, 1],
stddev=flags.weight_init / np.sqrt(
discriminator_dim)), name='dc_p')
bias_dc = tf.get_variable(initializer=tf.zeros([1]), name='dc_b_p')
h_score = h_dis[-1]
dis_score = tf.matmul(h_score, weights_discore) + bias_dc
return dis_score, weights_dis, weights_discore
def _build_adversarial_graph(self, rep, t, encoder_dim, discriminator_dim, do_out, flags):
"""
Construct adversarial discriminator
"""
# three layers with size 200
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
rep0 = tf.gather(rep, i0)
rep1 = tf.gather(rep, i1)
z_rep0 = tf.reduce_max(rep0, axis=0, keep_dims=True)
z_rep1 = tf.reduce_max(rep1, axis=0, keep_dims=True)
z_rep0_conc = tf.concat([z_rep0, self.z_norm], axis=1)
z_rep1_conc = tf.concat([z_rep1, self.z_norm], axis=1)
d0, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep0_conc, encoder_dim + encoder_dim,
discriminator_dim,
do_out, flags)
d1, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep1_conc, encoder_dim + encoder_dim,
discriminator_dim,
do_out, flags)
# gradient penalty
alpha_dist = tf.contrib.distributions.Uniform(low=0., high=1.)
alpha = alpha_dist.sample((1, 1))
interpolated = z_rep1 + alpha * (z_rep0 - z_rep1)
interpolated_conc = tf.concat([interpolated, self.z_norm], axis=1)
inte_logit, weights_dis, weights_discore = self._build_discriminator_adversarial(interpolated_conc,
encoder_dim + encoder_dim,
discriminator_dim, do_out,
flags)
gradients = tf.gradients(inte_logit, [interpolated])[0]
grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1]) + 1.0e-12)
gradient_penalty = tf.reduce_mean(tf.square(grad_l2 - 1.0))
return d0, d1, gradient_penalty, weights_dis, weights_discore
def _build_reconstruct_graph(self, x, t, data_x_dim, flags):
""" construct graph for later computing reconstruction loss easily
Parameters:
x The varibales of data
t The treatment applied to x
Returns:
x0 x[t=0]
reconstruct_x reconstruct x when pass encoder and decoder networks
"""
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
x0 = tf.gather(x, i0)
x1 = tf.gather(x, i1)
h_rep_0, h_rep_norm_0, weights_in_0 = self._build_encoder(x0, data_x_dim, flags)
h_rep_1, h_rep_norm_1, weights_in_1 = self._build_encoder(x1, data_x_dim, flags)
recons_x_0, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='0')
recons_x_1, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='1')
return x0, recons_x_0, x1, recons_x_1
def _build_cycle_graph(self, x, t, data_x_dim, flags):
""" construct graph for later computing cycle loss easily
Parameters:
x The varibales of data
t The treatment applied to x
Returns:
x0 x[t=0]
reconstruct_x reconstruct x when pass encoder and decoder networks
"""
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
x0 = tf.gather(x, i0)
x1 = tf.gather(x, i1)
# cycle x0-x1'-x0
_, h_rep_norm_0, _ = self._build_encoder(x0, data_x_dim, flags)
temp_x_0_in_1, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='1')
_, cyc_h_rep_norm_0, _ = self._build_encoder(temp_x_0_in_1, data_x_dim, flags)
cycle_x0, _ = self._build_decoder(cyc_h_rep_norm_0, data_x_dim, flags, suffix='0')
# cycle x1-x0'-x1
_, h_rep_norm_1, _ = self._build_encoder(x1, data_x_dim, flags)
temp_x_1_in_0, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='0')
_, cyc_h_rep_norm_1, _ = self._build_encoder(temp_x_1_in_0, data_x_dim, flags)
cycle_x1, _ = self._build_decoder(cyc_h_rep_norm_1, data_x_dim, flags, suffix='1')
return x0, cycle_x0, x1, cycle_x1
| 48.43761 | 128 | 0.532274 | import tensorflow as tf
import numpy as np
from cbre.util import *
class CBRENet(object):
def __init__(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):
self.variables = {}
self.wd_loss = 0
if flags.nonlin.lower() == 'elu':
self.nonlin = tf.nn.elu
else:
self.nonlin = tf.nn.relu
self._build_graph(x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim)
def _add_variable(self, var, name):
basename = name
i = 0
while name in self.variables:
name = '%s_%d' % (basename, i)
i += 1
self.variables[name] = var
def _create_variable(self, var, name):
var = tf.Variable(var, name=name)
self._add_variable(var, name)
return var
def _create_variable_with_weight_decay(self, initializer, name, wd):
var = self._create_variable(initializer, name)
self.wd_loss += wd * tf.nn.l2_loss(var)
return var
def _build_graph(self, x, t, y_, p_t, z_norm, flags, r_alpha, r_lambda, r_beta, do_in, do_out, data_x_dim):
self.x = x
self.t = t
self.y_ = y_
self.p_t = p_t
self.r_alpha = r_alpha
self.r_lambda = r_lambda
self.r_beta = r_beta
self.do_in = do_in
self.do_out = do_out
self.z_norm = z_norm
self.encoder_dim = flags.encoder_dim
encoder_dim = flags.encoder_dim
self.decoder_dim = flags.decoder_dim
self.predictor_dim = flags.predictor_dim
predictor_dim = flags.predictor_dim
mi_estimator_dim = flags.mi_estimator_dim
self.discriminator_dim = flags.discriminator_dim
discriminator_dim = flags.discriminator_dim
h_rep, h_rep_norm, weights_in = self._build_encoder(x, data_x_dim, flags)
d0, d1, dp, weights_dis, weights_discore = self._build_adversarial_graph(h_rep_norm, t, encoder_dim,
discriminator_dim, do_out,
flags)
discriminator_loss = -tf.reduce_mean(d0) + tf.reduce_mean(d1) + r_beta * dp
rep_loss = tf.reduce_mean(d0) - tf.reduce_mean(d1)
x0, recons_x_0, x1, recons_x_1 = self._build_reconstruct_graph(x, t, data_x_dim, flags)
recons_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - recons_x_0)) + 1.0e-12) + tf.sqrt(
tf.reduce_mean(tf.square(x1 - recons_x_1)) + 1.0e-12)
x0, cycle_x0, x1, cycle_x1 = self._build_cycle_graph(x, t, data_x_dim, flags)
cycle_loss = tf.sqrt(tf.reduce_mean(tf.square(x0 - cycle_x0)) + 1.0e-12) + tf.sqrt(
tf.reduce_mean(tf.square(x1 - cycle_x1)) + 1.0e-12)
y, weights_out, weights_pred = self._build_output_graph(h_rep_norm, t, encoder_dim, predictor_dim, do_out,
flags)
if flags.reweight_sample:
w_t = t / (2 * p_t)
w_c = (1 - t) / (2 * 1 - p_t)
sample_weight = w_t + w_c
else:
sample_weight = 1.0
self.sample_weight = sample_weight
risk = tf.reduce_mean(sample_weight * tf.square(y_ - y))
pred_error = tf.sqrt(tf.reduce_mean(tf.square(y_ - y)) + 1.0e-12)
if flags.p_lambda > 0 and flags.rep_weight_decay:
for i in range(0, flags.layer_num_encoder):
if not (flags.varsel and i == 0):
self.wd_loss += tf.nn.l2_loss(weights_in[i])
tot_error = risk
if flags.p_lambda > 0:
tot_error = tot_error + r_lambda * self.wd_loss + recons_loss + cycle_loss
if flags.coef_recons > 0:
tot_error += flags.coef_recons * recons_loss
if flags.coef_cycle:
tot_error += flags.coef_cycle * cycle_loss
if flags.coef_d:
tot_error += flags.coef_d * discriminator_loss
if flags.varsel:
self.w_proj = tf.placeholder("float", shape=[data_x_dim], name='w_proj')
self.projection = weights_in[0].assign(self.w_proj)
self.output = y
self.tot_loss = tot_error
self.discriminator_loss = discriminator_loss
self.rep_loss = rep_loss
self.rec_loss = recons_loss
self.cycle_loss = cycle_loss
self.recons_cycle_loss = recons_loss + cycle_loss
self.pred_loss = pred_error
self.weights_in = weights_in
self.weights_out = weights_out
self.weights_dis = weights_dis
self.weights_discore = weights_discore
self.weights_pred = weights_pred
self.h_rep = h_rep
self.h_rep_norm = h_rep_norm
self.dp = dp
def _build_output_0(self, h_input, encoder_dim, predictor_dim, do_out, flags):
h_out = [h_input]
dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)
with tf.variable_scope('pred_0') as scope:
weights_out = []
biases_out = []
for i in range(0, flags.layer_num_predictor):
wo = tf.get_variable(name='w_{}'.format(i),
initializer=tf.random_normal([dims[i], dims[i + 1]],
stddev=flags.weight_init / np.sqrt(dims[i])))
weights_out.append(wo)
biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))
z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]
h_out.append(self.nonlin(z))
h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)
weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(predictor_dim)),
'w_pred')
weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(
predictor_dim)))
bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')
if flags.varsel or flags.layer_num_predictor == 0:
self.wd_loss += tf.nn.l2_loss(
tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1]))
else:
self.wd_loss += tf.nn.l2_loss(weights_pred)
h_pred = h_out[-1]
y = tf.matmul(h_pred, weights_pred) + bias_pred
return y, weights_out, weights_pred
def _build_output_1(self, h_input, encoder_dim, predictor_dim, do_out, flags):
h_out = [h_input]
dims = [encoder_dim] + ([predictor_dim] * flags.layer_num_predictor)
with tf.variable_scope('pred_1') as scope:
weights_out = []
biases_out = []
for i in range(0, flags.layer_num_predictor):
wo = tf.get_variable(name='w_{}'.format(i),
initializer=tf.random_normal([dims[i], dims[i + 1]],
stddev=flags.weight_init / np.sqrt(dims[i])))
weights_out.append(wo)
# biases_out.append(tf.Variable(tf.zeros([1, predictor_dim])))
biases_out.append(tf.get_variable(name='b_{}'.format(i), initializer=tf.zeros([1, predictor_dim])))
z = tf.matmul(h_out[i], weights_out[i]) + biases_out[i]
h_out.append(self.nonlin(z))
h_out[i + 1] = tf.nn.dropout(h_out[i + 1], do_out)
weights_pred = self._create_variable(tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(predictor_dim)),
'w_pred')
weights_pred = tf.get_variable(name='w_pred', initializer=tf.random_normal([predictor_dim, 1],
stddev=flags.weight_init / np.sqrt(
predictor_dim)))
bias_pred = tf.get_variable(initializer=tf.zeros([1]), name='b_pred')
if flags.varsel or flags.layer_num_predictor == 0:
self.wd_loss += tf.nn.l2_loss(
tf.slice(weights_pred, [0, 0], [predictor_dim - 1, 1])) # don't penalize treatment coefficient
else:
self.wd_loss += tf.nn.l2_loss(weights_pred)
h_pred = h_out[-1]
y = tf.matmul(h_pred, weights_pred) + bias_pred
return y, weights_out, weights_pred
def _build_output_graph(self, rep, t, encoder_dim, predictor_dim, do_out, flags):
if flags.split_output:
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
rep0 = tf.gather(rep, i0)
rep1 = tf.gather(rep, i1)
y0, weights_out0, weights_pred0 = self._build_output_0(rep0, encoder_dim, predictor_dim, do_out, flags)
y1, weights_out1, weights_pred1 = self._build_output_1(rep1, encoder_dim, predictor_dim, do_out, flags)
y = tf.dynamic_stitch([i0, i1], [y0, y1])
weights_out = weights_out0 + weights_out1
weights_pred = weights_pred0 + weights_pred1
else:
h_input = tf.concat(1, [rep, t])
y, weights_out, weights_pred = None, None, None
return y, weights_out, weights_pred
def _build_encoder(self, x, data_x_dim, flags):
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE) as scope:
weights_in = []
biases_in = []
if flags.batch_norm:
bn_biases = []
bn_scales = []
h_in = [x]
for i in range(0, flags.layer_num_encoder):
if i == 0:
if flags.varsel:
weights_in.append(tf.get_variable(name='wg_{}'.format(i),
initializer=1.0 / data_x_dim * tf.ones([data_x_dim])))
else:
wg = tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([data_x_dim, self.encoder_dim],
stddev=flags.weight_init / np.sqrt(
data_x_dim)))
weights_in.append(wg)
else:
wg = tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([self.encoder_dim, self.encoder_dim],
stddev=flags.weight_init / np.sqrt(
self.encoder_dim)))
weights_in.append(wg)
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, self.encoder_dim])))
z = tf.matmul(h_in[i], weights_in[i]) + biases_in[i]
if flags.batch_norm:
batch_mean, batch_var = tf.nn.moments(z, [0])
if flags.normalization == 'bn_fixed':
z = tf.nn.batch_normalization(z, batch_mean, batch_var, 0, 1, 1e-3)
else:
bn_biases.append(
tf.get_variable(name='bn_b_{}'.format(i), initializer=tf.zeros([self.encoder_dim])))
bn_scales.append(
tf.get_variable(name='bn_s_{}'.format(i), initializer=tf.ones([self.encoder_dim])))
z = tf.nn.batch_normalization(z, batch_mean, batch_var, bn_biases[-1], bn_scales[-1], 1e-3)
h_in.append(self.nonlin(z))
h_in[i + 1] = tf.nn.dropout(h_in[i + 1], self.do_in)
h_rep = h_in[-1]
if flags.normalization == 'divide':
h_rep_norm = h_rep / safe_sqrt(tf.reduce_sum(tf.square(h_rep), axis=1, keep_dims=True) + 1.0e-12)
else:
h_rep_norm = 1.0 * h_rep
return h_rep, h_rep_norm, weights_in
def _build_decoder(self, h_rep, data_x_dim, flags, suffix='0'):
with tf.variable_scope('decoder_' + suffix, reuse=tf.AUTO_REUSE) as scope:
weights_in = []
biases_in = []
recons_x = [h_rep]
decoder_dim = flags.decoder_dim
for i in range(0, flags.layer_num_decoder):
if i == 0:
weights_in.append(tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([flags.encoder_dim, decoder_dim],
stddev=flags.weight_init / np.sqrt(
flags.encoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))
elif i == flags.layer_num_decoder - 1:
weights_in.append(
tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, data_x_dim],
stddev=flags.weight_init / np.sqrt(
decoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, data_x_dim])))
else:
weights_in.append(
tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal([decoder_dim, decoder_dim],
stddev=flags.weight_init / np.sqrt(
decoder_dim))))
biases_in.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, decoder_dim])))
z = tf.matmul(recons_x[i], weights_in[i]) + biases_in[i]
recons_x.append(self.nonlin(z))
recons_x[i + 1] = tf.nn.dropout(recons_x[i + 1], self.do_in)
recons_x = recons_x[-1]
return recons_x, weights_in
def _build_discriminator_graph_mine(self, x, hrep, data_x_dim, encoder_dim, mi_estimator_dim, flags):
with tf.variable_scope('gmi') as scope:
input_num = tf.shape(x)[0]
x_shuffle = tf.random_shuffle(x)
x_conc = tf.concat([x, x_shuffle], axis=0)
y_conc = tf.concat([hrep, hrep], axis=0)
weights_mi_x = self._create_variable(tf.random_normal([data_x_dim, mi_estimator_dim],
stddev=flags.weight_init / np.sqrt(data_x_dim)),
'weights_mi_x')
biases_mi_x = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_x')
lin_x = tf.matmul(x_conc, weights_mi_x) + biases_mi_x
weights_mi_y = self._create_variable(tf.random_normal([encoder_dim, mi_estimator_dim],
stddev=flags.weight_init / np.sqrt(encoder_dim)),
'weights_mi_y')
biases_mi_y = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_y')
lin_y = tf.matmul(y_conc, weights_mi_y) + biases_mi_y
lin_conc = self.nonlin(lin_x + lin_y)
weights_mi_pred = self._create_variable(tf.random_normal([mi_estimator_dim, 1],
stddev=flags.weight_init / np.sqrt(
mi_estimator_dim)),
'gmi_p')
biases_mi_pred = self._create_variable(tf.zeros([1, mi_estimator_dim]), 'biases_mi_pred')
gmi_output = tf.matmul(lin_conc, weights_mi_pred) + biases_mi_pred
real_estimate = gmi_output[:input_num]
fake_estimate = gmi_output[input_num:]
return real_estimate, fake_estimate, weights_mi_x, weights_mi_y, weights_mi_pred
def _build_discriminator_adversarial(self, hrep, encoder_dim, discriminator_dim, do_out, flags):
with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE) as scope:
h_dis = [hrep]
weights_dis = []
biases_dis = []
for i in range(0, flags.layer_num_discriminator):
if i == 0:
weights_dis.append(tf.get_variable(name='wg_{}'.format(i),
initializer=tf.random_normal([encoder_dim, discriminator_dim],
stddev=flags.weight_init / np.sqrt(
encoder_dim))))
else:
weights_dis.append(tf.get_variable(name='wg_{}'.format(i), initializer=tf.random_normal(
[discriminator_dim, discriminator_dim],
stddev=flags.weight_init / np.sqrt(
discriminator_dim))))
biases_dis.append(tf.get_variable(name='bi_{}'.format(i), initializer=tf.zeros([1, discriminator_dim])))
z = tf.matmul(h_dis[i], weights_dis[i]) + biases_dis[i]
h_dis.append(self.nonlin(z))
h_dis[i + 1] = tf.nn.dropout(h_dis[i + 1], do_out)
weights_discore = tf.get_variable(initializer=tf.random_normal([discriminator_dim, 1],
stddev=flags.weight_init / np.sqrt(
discriminator_dim)), name='dc_p')
bias_dc = tf.get_variable(initializer=tf.zeros([1]), name='dc_b_p')
h_score = h_dis[-1]
dis_score = tf.matmul(h_score, weights_discore) + bias_dc
return dis_score, weights_dis, weights_discore
def _build_adversarial_graph(self, rep, t, encoder_dim, discriminator_dim, do_out, flags):
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
rep0 = tf.gather(rep, i0)
rep1 = tf.gather(rep, i1)
z_rep0 = tf.reduce_max(rep0, axis=0, keep_dims=True)
z_rep1 = tf.reduce_max(rep1, axis=0, keep_dims=True)
z_rep0_conc = tf.concat([z_rep0, self.z_norm], axis=1)
z_rep1_conc = tf.concat([z_rep1, self.z_norm], axis=1)
d0, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep0_conc, encoder_dim + encoder_dim,
discriminator_dim,
do_out, flags)
d1, weights_dis, weights_discore = self._build_discriminator_adversarial(z_rep1_conc, encoder_dim + encoder_dim,
discriminator_dim,
do_out, flags)
alpha_dist = tf.contrib.distributions.Uniform(low=0., high=1.)
alpha = alpha_dist.sample((1, 1))
interpolated = z_rep1 + alpha * (z_rep0 - z_rep1)
interpolated_conc = tf.concat([interpolated, self.z_norm], axis=1)
inte_logit, weights_dis, weights_discore = self._build_discriminator_adversarial(interpolated_conc,
encoder_dim + encoder_dim,
discriminator_dim, do_out,
flags)
gradients = tf.gradients(inte_logit, [interpolated])[0]
grad_l2 = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=[1]) + 1.0e-12)
gradient_penalty = tf.reduce_mean(tf.square(grad_l2 - 1.0))
return d0, d1, gradient_penalty, weights_dis, weights_discore
def _build_reconstruct_graph(self, x, t, data_x_dim, flags):
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
x0 = tf.gather(x, i0)
x1 = tf.gather(x, i1)
h_rep_0, h_rep_norm_0, weights_in_0 = self._build_encoder(x0, data_x_dim, flags)
h_rep_1, h_rep_norm_1, weights_in_1 = self._build_encoder(x1, data_x_dim, flags)
recons_x_0, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='0')
recons_x_1, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='1')
return x0, recons_x_0, x1, recons_x_1
def _build_cycle_graph(self, x, t, data_x_dim, flags):
i0 = tf.to_int32(tf.where(t < 1)[:, 0])
i1 = tf.to_int32(tf.where(t > 0)[:, 0])
x0 = tf.gather(x, i0)
x1 = tf.gather(x, i1)
_, h_rep_norm_0, _ = self._build_encoder(x0, data_x_dim, flags)
temp_x_0_in_1, _ = self._build_decoder(h_rep_norm_0, data_x_dim, flags, suffix='1')
_, cyc_h_rep_norm_0, _ = self._build_encoder(temp_x_0_in_1, data_x_dim, flags)
cycle_x0, _ = self._build_decoder(cyc_h_rep_norm_0, data_x_dim, flags, suffix='0')
# cycle x1-x0'-x1
_, h_rep_norm_1, _ = self._build_encoder(x1, data_x_dim, flags)
temp_x_1_in_0, _ = self._build_decoder(h_rep_norm_1, data_x_dim, flags, suffix='0')
_, cyc_h_rep_norm_1, _ = self._build_encoder(temp_x_1_in_0, data_x_dim, flags)
cycle_x1, _ = self._build_decoder(cyc_h_rep_norm_1, data_x_dim, flags, suffix='1')
return x0, cycle_x0, x1, cycle_x1
| true | true |
f71afe637d8afd637eaa9306cb3f27585ad52570 | 887 | py | Python | setup.py | debdutgoswami/sorting-visualizer | e39e805acf22339b8ee06f8c8cd483e9c03ba3a4 | [
"MIT"
] | 3 | 2020-01-07T15:47:32.000Z | 2020-09-13T14:05:32.000Z | setup.py | debdutgoswami/sorting-visualizer | e39e805acf22339b8ee06f8c8cd483e9c03ba3a4 | [
"MIT"
] | 3 | 2020-10-04T18:03:36.000Z | 2020-10-08T07:13:40.000Z | setup.py | debdutgoswami/sorting-visualizer | e39e805acf22339b8ee06f8c8cd483e9c03ba3a4 | [
"MIT"
] | 3 | 2020-10-04T18:15:54.000Z | 2021-01-20T19:43:49.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sorting_visualizer",
version="1.0",
author="Debdut Goswami",
author_email="debdutgoswami@gmail.com",
description="A package to visualize various sorting algorithms.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/debdutgoswami/sorting-visualizer",
download_url = 'https://github.com/debdutgoswami/sorting-visualizer/archive/v1.0.tar.gz',
keywords = ['SORT', 'ALGORITHM', 'VISUALIZE'],
packages=setuptools.find_packages(),
install_requires=[
'matplotlib'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
) | 32.851852 | 93 | 0.67531 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sorting_visualizer",
version="1.0",
author="Debdut Goswami",
author_email="debdutgoswami@gmail.com",
description="A package to visualize various sorting algorithms.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/debdutgoswami/sorting-visualizer",
download_url = 'https://github.com/debdutgoswami/sorting-visualizer/archive/v1.0.tar.gz',
keywords = ['SORT', 'ALGORITHM', 'VISUALIZE'],
packages=setuptools.find_packages(),
install_requires=[
'matplotlib'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
) | true | true |
f71affe80caeb8141a00993c2bdfd94c37876974 | 383 | py | Python | scripts/get_repos.py | Ais105/course_project | a4ea3991756be2d12ae3fef9db6956f9d09c0c07 | [
"MIT"
] | null | null | null | scripts/get_repos.py | Ais105/course_project | a4ea3991756be2d12ae3fef9db6956f9d09c0c07 | [
"MIT"
] | null | null | null | scripts/get_repos.py | Ais105/course_project | a4ea3991756be2d12ae3fef9db6956f9d09c0c07 | [
"MIT"
] | 1 | 2020-02-18T20:56:57.000Z | 2020-02-18T20:56:57.000Z | import os
from github_client.client import GitHubClient
from utils.painter import paint
if __name__ == '__main__':
user = os.environ['user_name']
password = os.environ['user_password']
client = GitHubClient(user, password)
client.connect()
repositories = client.get_repositories()
user = client.get_user()
paint([user.login], [repositories], 500, 10000)
| 29.461538 | 51 | 0.718016 | import os
from github_client.client import GitHubClient
from utils.painter import paint
if __name__ == '__main__':
user = os.environ['user_name']
password = os.environ['user_password']
client = GitHubClient(user, password)
client.connect()
repositories = client.get_repositories()
user = client.get_user()
paint([user.login], [repositories], 500, 10000)
| true | true |
f71b00645a1360df4f8b7496608b98342bb43f7f | 8,243 | py | Python | gym_acnportal/gym_acnsim/envs/tests/test_action_spaces.py | caltech-netlab/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | null | null | null | gym_acnportal/gym_acnsim/envs/tests/test_action_spaces.py | caltech-netlab/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | 3 | 2021-04-28T14:43:32.000Z | 2021-04-28T14:58:04.000Z | gym_acnportal/gym_acnsim/envs/tests/test_action_spaces.py | sunash/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | 1 | 2020-05-12T19:13:51.000Z | 2020-05-12T19:13:51.000Z | # coding=utf-8
""" Tests for SimAction and action space functions. """
import unittest
from typing import Callable, Dict, List, Any
from unittest.mock import create_autospec
import numpy as np
from gym import Space
from ..action_spaces import (
SimAction,
single_charging_schedule,
zero_centered_single_charging_schedule,
)
from ...interfaces import GymTrainedInterface
class TestSimAction(unittest.TestCase):
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
# The type here is Any as space_function is actually a Mock
# object, but there's no Mock type in the typing library.
cls.space_function: Any = create_autospec(lambda interface: Space())
cls.to_schedule: Callable[
[GymTrainedInterface, np.ndarray], Dict[str, List[float]]
] = lambda interface, array: {"a": [0]}
cls.name: str = "stub_action"
cls.sim_action: SimAction = SimAction(
cls.space_function, cls.to_schedule, cls.name
)
cls.interface: GymTrainedInterface = create_autospec(GymTrainedInterface)
def test_correct_on_init_sim_action_name(self) -> None:
self.assertEqual(self.sim_action.name, self.name)
def test_get_space(self) -> None:
self.sim_action.get_space(self.interface)
self.space_function.assert_called_once()
def test_get_schedule(self) -> None:
array: np.ndarray = np.array([[1, 0], [0, 1]])
self.assertEqual(
self.sim_action.get_schedule(self.interface, array), {"a": [0]}
)
class TestSingleChargingSchedule(unittest.TestCase):
# Some class variables are defined outside of setUpClass so that
# the code inspector knows that inherited classes have these
# attributes.
max_rate: float = 16.0
min_rate: float = 0.0
negative_rate: float = -4.0
deadband_rate: float = 6.0
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
cls.sim_action: SimAction = single_charging_schedule()
cls.station_ids: List[str] = ["T1", "T2"]
cls.offset: float = 0.5
def _interface_builder(interface: Any, min_rate: float) -> Any:
interface.station_ids = cls.station_ids
interface.max_pilot_signal = lambda station_id: cls.max_rate
interface.min_pilot_signal = lambda station_id: (
min_rate if station_id == cls.station_ids[1] else cls.min_rate
)
return interface
cls.interface: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.min_rate
)
cls.interface_negative_min: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.negative_rate
)
cls.interface_deadband_min: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.deadband_rate
)
def test_correct_on_init_single_name(self) -> None:
self.assertEqual(self.sim_action.name, "single schedule")
def _test_space_function_helper(
self, interface: GymTrainedInterface, min_rate: float, max_rate: float
) -> None:
out_space: Space = self.sim_action.get_space(interface)
self.assertEqual(out_space.shape, (len(self.station_ids),))
np.testing.assert_equal(out_space.low, 2 * [min_rate])
np.testing.assert_equal(out_space.high, 2 * [max_rate])
self.assertEqual(out_space.dtype, "float")
def test_single_space_function(self) -> None:
self._test_space_function_helper(self.interface, self.min_rate, self.max_rate)
def test_single_space_function_negative_min(self) -> None:
self._test_space_function_helper(
self.interface_negative_min, self.negative_rate, self.max_rate
)
def test_single_space_function_deadband_min(self) -> None:
self._test_space_function_helper(
self.interface_deadband_min, self.min_rate, self.max_rate
)
def test_single_to_schedule(self) -> None:
good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array(
[self.min_rate + self.offset, (self.max_rate - self.min_rate) / 2]
),
)
self.assertEqual(
good_schedule,
{
self.station_ids[0]: [self.min_rate + self.offset],
self.station_ids[1]: [(self.max_rate - self.min_rate) / 2],
},
)
def test_single_to_bad_schedule(self) -> None:
# The get_schedule function does not test if the input schedule
# array is within the action space.
bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array([self.min_rate - self.offset, self.max_rate + self.offset]),
)
self.assertEqual(
bad_schedule,
{
self.station_ids[0]: [self.min_rate - self.offset],
self.station_ids[1]: [self.max_rate + self.offset],
},
)
def test_single_error_schedule(self) -> None:
with self.assertRaises(TypeError):
_ = self.sim_action.get_schedule(
self.interface,
np.array(
[[self.min_rate - self.offset], [self.max_rate + self.offset]]
),
)
class TestZeroCenteredSingleChargingSchedule(TestSingleChargingSchedule):
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.sim_action: SimAction = zero_centered_single_charging_schedule()
cls.shifted_max = cls.max_rate - (cls.max_rate + cls.min_rate) / 2
cls.shifted_minimums = [
cls.min_rate - (cls.max_rate + cls.min_rate) / 2,
cls.negative_rate - (cls.max_rate + cls.negative_rate) / 2,
cls.min_rate - (cls.max_rate + cls.deadband_rate) / 2,
]
cls.negative_max_shift = cls.max_rate - (cls.max_rate + cls.negative_rate) / 2
def test_correct_on_init_single_name(self) -> None:
self.assertEqual(self.sim_action.name, "zero-centered single schedule")
def test_single_space_function(self) -> None:
self._test_space_function_helper(
self.interface, self.shifted_minimums[0], self.shifted_max
)
def test_single_space_function_negative_min(self) -> None:
self._test_space_function_helper(
self.interface_negative_min,
self.shifted_minimums[1],
self.negative_max_shift,
)
def test_single_space_function_deadband_min(self) -> None:
self._test_space_function_helper(
self.interface_deadband_min, self.shifted_minimums[2], self.shifted_max
)
def test_single_to_bad_schedule(self) -> None:
# The get_schedule function does not test if the input schedule
# array is within the action space.
bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array([self.min_rate - self.offset, self.max_rate + self.offset]),
)
self.assertEqual(
bad_schedule,
{
self.station_ids[0]: [
self.min_rate - self.offset + (self.max_rate + self.min_rate) / 2
],
self.station_ids[1]: [
self.max_rate + self.offset + (self.max_rate + self.min_rate) / 2
],
},
)
def test_single_to_schedule(self) -> None:
good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array(
[
self.min_rate - (self.max_rate + self.min_rate) / 2,
self.max_rate - (self.max_rate + self.min_rate) / 2,
]
),
)
self.assertEqual(
good_schedule,
{
self.station_ids[0]: [self.min_rate],
self.station_ids[1]: [self.max_rate],
},
)
if __name__ == "__main__":
unittest.main()
| 37.298643 | 86 | 0.626592 |
import unittest
from typing import Callable, Dict, List, Any
from unittest.mock import create_autospec
import numpy as np
from gym import Space
from ..action_spaces import (
SimAction,
single_charging_schedule,
zero_centered_single_charging_schedule,
)
from ...interfaces import GymTrainedInterface
class TestSimAction(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.space_function: Any = create_autospec(lambda interface: Space())
cls.to_schedule: Callable[
[GymTrainedInterface, np.ndarray], Dict[str, List[float]]
] = lambda interface, array: {"a": [0]}
cls.name: str = "stub_action"
cls.sim_action: SimAction = SimAction(
cls.space_function, cls.to_schedule, cls.name
)
cls.interface: GymTrainedInterface = create_autospec(GymTrainedInterface)
def test_correct_on_init_sim_action_name(self) -> None:
self.assertEqual(self.sim_action.name, self.name)
def test_get_space(self) -> None:
self.sim_action.get_space(self.interface)
self.space_function.assert_called_once()
def test_get_schedule(self) -> None:
array: np.ndarray = np.array([[1, 0], [0, 1]])
self.assertEqual(
self.sim_action.get_schedule(self.interface, array), {"a": [0]}
)
class TestSingleChargingSchedule(unittest.TestCase):
# Some class variables are defined outside of setUpClass so that
# the code inspector knows that inherited classes have these
# attributes.
max_rate: float = 16.0
min_rate: float = 0.0
negative_rate: float = -4.0
deadband_rate: float = 6.0
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
cls.sim_action: SimAction = single_charging_schedule()
cls.station_ids: List[str] = ["T1", "T2"]
cls.offset: float = 0.5
def _interface_builder(interface: Any, min_rate: float) -> Any:
interface.station_ids = cls.station_ids
interface.max_pilot_signal = lambda station_id: cls.max_rate
interface.min_pilot_signal = lambda station_id: (
min_rate if station_id == cls.station_ids[1] else cls.min_rate
)
return interface
cls.interface: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.min_rate
)
cls.interface_negative_min: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.negative_rate
)
cls.interface_deadband_min: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.deadband_rate
)
def test_correct_on_init_single_name(self) -> None:
self.assertEqual(self.sim_action.name, "single schedule")
def _test_space_function_helper(
self, interface: GymTrainedInterface, min_rate: float, max_rate: float
) -> None:
out_space: Space = self.sim_action.get_space(interface)
self.assertEqual(out_space.shape, (len(self.station_ids),))
np.testing.assert_equal(out_space.low, 2 * [min_rate])
np.testing.assert_equal(out_space.high, 2 * [max_rate])
self.assertEqual(out_space.dtype, "float")
def test_single_space_function(self) -> None:
self._test_space_function_helper(self.interface, self.min_rate, self.max_rate)
def test_single_space_function_negative_min(self) -> None:
self._test_space_function_helper(
self.interface_negative_min, self.negative_rate, self.max_rate
)
def test_single_space_function_deadband_min(self) -> None:
self._test_space_function_helper(
self.interface_deadband_min, self.min_rate, self.max_rate
)
def test_single_to_schedule(self) -> None:
good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array(
[self.min_rate + self.offset, (self.max_rate - self.min_rate) / 2]
),
)
self.assertEqual(
good_schedule,
{
self.station_ids[0]: [self.min_rate + self.offset],
self.station_ids[1]: [(self.max_rate - self.min_rate) / 2],
},
)
def test_single_to_bad_schedule(self) -> None:
# The get_schedule function does not test if the input schedule
# array is within the action space.
bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array([self.min_rate - self.offset, self.max_rate + self.offset]),
)
self.assertEqual(
bad_schedule,
{
self.station_ids[0]: [self.min_rate - self.offset],
self.station_ids[1]: [self.max_rate + self.offset],
},
)
def test_single_error_schedule(self) -> None:
with self.assertRaises(TypeError):
_ = self.sim_action.get_schedule(
self.interface,
np.array(
[[self.min_rate - self.offset], [self.max_rate + self.offset]]
),
)
class TestZeroCenteredSingleChargingSchedule(TestSingleChargingSchedule):
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.sim_action: SimAction = zero_centered_single_charging_schedule()
cls.shifted_max = cls.max_rate - (cls.max_rate + cls.min_rate) / 2
cls.shifted_minimums = [
cls.min_rate - (cls.max_rate + cls.min_rate) / 2,
cls.negative_rate - (cls.max_rate + cls.negative_rate) / 2,
cls.min_rate - (cls.max_rate + cls.deadband_rate) / 2,
]
cls.negative_max_shift = cls.max_rate - (cls.max_rate + cls.negative_rate) / 2
def test_correct_on_init_single_name(self) -> None:
self.assertEqual(self.sim_action.name, "zero-centered single schedule")
def test_single_space_function(self) -> None:
self._test_space_function_helper(
self.interface, self.shifted_minimums[0], self.shifted_max
)
def test_single_space_function_negative_min(self) -> None:
self._test_space_function_helper(
self.interface_negative_min,
self.shifted_minimums[1],
self.negative_max_shift,
)
def test_single_space_function_deadband_min(self) -> None:
self._test_space_function_helper(
self.interface_deadband_min, self.shifted_minimums[2], self.shifted_max
)
def test_single_to_bad_schedule(self) -> None:
# The get_schedule function does not test if the input schedule
# array is within the action space.
bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array([self.min_rate - self.offset, self.max_rate + self.offset]),
)
self.assertEqual(
bad_schedule,
{
self.station_ids[0]: [
self.min_rate - self.offset + (self.max_rate + self.min_rate) / 2
],
self.station_ids[1]: [
self.max_rate + self.offset + (self.max_rate + self.min_rate) / 2
],
},
)
def test_single_to_schedule(self) -> None:
good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array(
[
self.min_rate - (self.max_rate + self.min_rate) / 2,
self.max_rate - (self.max_rate + self.min_rate) / 2,
]
),
)
self.assertEqual(
good_schedule,
{
self.station_ids[0]: [self.min_rate],
self.station_ids[1]: [self.max_rate],
},
)
if __name__ == "__main__":
unittest.main()
| true | true |
f71b01275c21a9328c6c1e5ce44454451a5dbe4c | 8,256 | py | Python | docs/conf.py | cic79/django-1.6-fine-uploader | 14ed9ca3e01ed9680760368da7c277aedb8dfde2 | [
"MIT"
] | 36 | 2017-02-10T18:39:03.000Z | 2022-03-23T19:52:38.000Z | docs/conf.py | cic79/django-1.6-fine-uploader | 14ed9ca3e01ed9680760368da7c277aedb8dfde2 | [
"MIT"
] | 9 | 2017-02-11T20:33:31.000Z | 2019-04-12T19:02:19.000Z | docs/conf.py | cic79/django-1.6-fine-uploader | 14ed9ca3e01ed9680760368da7c277aedb8dfde2 | [
"MIT"
] | 19 | 2017-03-19T23:54:05.000Z | 2020-09-02T14:42:57.000Z | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import django_fine_uploader
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django_fine_uploader'
copyright = u'2017, Douglas Miranda'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = django_fine_uploader.__version__
# The full version, including alpha/beta/rc tags.
release = django_fine_uploader.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-fine-uploaderdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-fine-uploader.tex', u'django_fine_uploader Documentation',
u'Douglas Miranda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-fine-uploader', u'django_fine_uploader Documentation',
[u'Douglas Miranda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-fine-uploader', u'django_fine_uploader Documentation',
u'Douglas Miranda', 'django-fine-uploader', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.376471 | 82 | 0.721294 |
import sys, os
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import django_fine_uploader
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'django_fine_uploader'
copyright = u'2017, Douglas Miranda'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = django_fine_uploader.__version__
# The full version, including alpha/beta/rc tags.
release = django_fine_uploader.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-fine-uploaderdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-fine-uploader.tex', u'django_fine_uploader Documentation',
u'Douglas Miranda', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-fine-uploader', u'django_fine_uploader Documentation',
[u'Douglas Miranda'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-fine-uploader', u'django_fine_uploader Documentation',
u'Douglas Miranda', 'django-fine-uploader', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
f71b01c4adc02f98e92a5b0a74684be8020686f2 | 397 | py | Python | diploma/urls.py | taras-ua/diplomawork | c7540d5c674194ec0da965be18bd3becaf69d5f3 | [
"Apache-2.0"
] | 1 | 2015-05-08T09:07:06.000Z | 2015-05-08T09:07:06.000Z | diploma/urls.py | taras-ua/diplomawork | c7540d5c674194ec0da965be18bd3becaf69d5f3 | [
"Apache-2.0"
] | null | null | null | diploma/urls.py | taras-ua/diplomawork | c7540d5c674194ec0da965be18bd3becaf69d5f3 | [
"Apache-2.0"
] | null | null | null | from django.conf import settings
from django.conf.urls import patterns, url
# from django.contrib import admin
urlpatterns = patterns('',
url(r'^$', 'app.views.home', name='home'),
url(r'^graph/$', 'app.views.graph', name='graph'),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
# url(r'^admin/', include(admin.site.urls)),
) | 36.090909 | 104 | 0.662469 | from django.conf import settings
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^$', 'app.views.home', name='home'),
url(r'^graph/$', 'app.views.graph', name='graph'),
url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
) | true | true |
f71b01e05fff8dd74f109eed0f8a0d197aea5341 | 2,498 | py | Python | datasets/readers/ccpd.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | datasets/readers/ccpd.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | datasets/readers/ccpd.py | ckxy/part-of-hitogata | 76402d48a336fcd964d0e64bb01d959e8f07f296 | [
"MIT"
] | null | null | null | import os
import numpy as np
from addict import Dict
from PIL import Image
from .reader import Reader
from .builder import READER
__all__ = ['CCPD2019FolderReader']
@READER.register_module()
class CCPD2019FolderReader(Reader):
def __init__(self, root, **kwargs):
super(CCPD2019FolderReader, self).__init__(**kwargs)
self.root = root
self.chars = ('京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',
'苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',
'桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
'新',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', 'I', 'O', '-')
self.img_paths = sorted(os.listdir(kwargs['root']))
assert len(self.img_paths) > 0
def get_dataset_info(self):
return range(len(self.img_paths)), Dict({'chars': self.chars})
def get_data_info(self, index):
img = Image.open(self.img_paths[index][0])
w, h = img.size
return dict(h=h, w=w)
def __call__(self, index):
# index = data_dict
# img = Image.open(os.path.join(self.root, self.img_paths[index])).convert('RGB')
img = self.read_image(os.path.join(self.root, self.img_paths[index]))
w, h = img.size
path = os.path.join(self.root, self.img_paths[index])
base_name = os.path.basename(self.img_paths[index])
img_name, suffix = os.path.splitext(base_name)
img_name = img_name.split("-")[0].split("_")[0]
# if len(img_name) == 8:
# print(path, 'a')
# if img_name[2] != 'D' and img_name[2] != 'F' and img_name[-1] != 'D' and img_name[-1] != 'F':
# print(path)
# raise ValueError
words = []
for c in img_name:
words.append(self.chars.index(c))
# return {'image': img, 'ori_size': np.array([h, w]).astype(np.float32), 'path': path, 'seq': words, 'seq_length': len(words)}
return dict(
image=img,
ori_size=np.array([h, w]).astype(np.float32),
path=path,
seq=words,
seq_length=len(words)
)
def __repr__(self):
return 'CCPD2019FolderReader(root={}, {})'.format(self.root, super(CCPD2019FolderReader, self).__repr__())
| 36.202899 | 134 | 0.502002 | import os
import numpy as np
from addict import Dict
from PIL import Image
from .reader import Reader
from .builder import READER
__all__ = ['CCPD2019FolderReader']
@READER.register_module()
class CCPD2019FolderReader(Reader):
def __init__(self, root, **kwargs):
super(CCPD2019FolderReader, self).__init__(**kwargs)
self.root = root
self.chars = ('京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',
'苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',
'桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',
'新',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', 'I', 'O', '-')
self.img_paths = sorted(os.listdir(kwargs['root']))
assert len(self.img_paths) > 0
def get_dataset_info(self):
return range(len(self.img_paths)), Dict({'chars': self.chars})
def get_data_info(self, index):
img = Image.open(self.img_paths[index][0])
w, h = img.size
return dict(h=h, w=w)
def __call__(self, index):
img = self.read_image(os.path.join(self.root, self.img_paths[index]))
w, h = img.size
path = os.path.join(self.root, self.img_paths[index])
base_name = os.path.basename(self.img_paths[index])
img_name, suffix = os.path.splitext(base_name)
img_name = img_name.split("-")[0].split("_")[0]
words = []
for c in img_name:
words.append(self.chars.index(c))
return dict(
image=img,
ori_size=np.array([h, w]).astype(np.float32),
path=path,
seq=words,
seq_length=len(words)
)
def __repr__(self):
return 'CCPD2019FolderReader(root={}, {})'.format(self.root, super(CCPD2019FolderReader, self).__repr__())
| true | true |
f71b0315edb287f8456c42472f645c6b7bd59cdc | 383 | py | Python | app/src/asgi.py | swelanauguste/kingship | d5c302b22383eb769d22f41e69e0c48e638aec92 | [
"MIT"
] | 5 | 2022-02-04T19:23:26.000Z | 2022-02-26T10:15:25.000Z | src/asgi.py | AnvarKhan/django-rest-api | b2f60bbd7ebcf0977dc13ceffd9a3a4f631a03ee | [
"Apache-2.0"
] | 1 | 2022-01-15T16:22:30.000Z | 2022-01-15T16:22:30.000Z | src/asgi.py | AnvarKhan/django-rest-api | b2f60bbd7ebcf0977dc13ceffd9a3a4f631a03ee | [
"Apache-2.0"
] | 1 | 2022-03-31T15:02:47.000Z | 2022-03-31T15:02:47.000Z | """
ASGI config for src project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'src.settings')
application = get_asgi_application()
| 22.529412 | 78 | 0.780679 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'src.settings')
application = get_asgi_application()
| true | true |
f71b0388863894e2666b6a38ce7c0c7eb1c0da2c | 462 | py | Python | codemate/exceptions.py | DavidMeu/codemate | fcdc7591c8a1cd5922ddab1a3ec7a0dae37576c3 | [
"MIT"
] | null | null | null | codemate/exceptions.py | DavidMeu/codemate | fcdc7591c8a1cd5922ddab1a3ec7a0dae37576c3 | [
"MIT"
] | null | null | null | codemate/exceptions.py | DavidMeu/codemate | fcdc7591c8a1cd5922ddab1a3ec7a0dae37576c3 | [
"MIT"
] | null | null | null | import black
class GenerationError(Exception):
"""Represents an exception while generating the Python syntax"""
class PythonSyntaxError(GenerationError):
"""Represents an exception in the python syntax"""
class InputError(GenerationError, black.InvalidInput):
"""Raised when the generated Python code isn't valid by black"""
class SaveFileError(GenerationError, OSError):
"""Raised when the generated Python code file can't be created"""
| 25.666667 | 69 | 0.757576 | import black
class GenerationError(Exception):
class PythonSyntaxError(GenerationError):
class InputError(GenerationError, black.InvalidInput):
class SaveFileError(GenerationError, OSError):
| true | true |
f71b0396126fe3f7506f8a4954c971dd92305753 | 7,390 | py | Python | rlkit/samplers/data_collector/path_collector.py | YeeCY/PASF | 95e548d365ea5da482c56408539d9a1514ef246b | [
"MIT"
] | 4 | 2021-12-23T20:55:52.000Z | 2022-03-14T04:57:02.000Z | rlkit/samplers/data_collector/path_collector.py | YeeCY/PASF | 95e548d365ea5da482c56408539d9a1514ef246b | [
"MIT"
] | null | null | null | rlkit/samplers/data_collector/path_collector.py | YeeCY/PASF | 95e548d365ea5da482c56408539d9a1514ef246b | [
"MIT"
] | 1 | 2022-01-14T01:32:04.000Z | 2022-01-14T01:32:04.000Z | from collections import deque, OrderedDict
from functools import partial
import numpy as np
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.samplers.data_collector.base import PathCollector
from rlkit.samplers.rollout_functions import rollout
class ActionAgent():
def __init__(self):
self._actions = None
self._step = 0
def reset(self):
self._step = 0
def set_action(self, actions):
self._actions = actions
def get_action(self, *args, **kwargs):
action = self._actions[self._step]
self._step += 1
return action, []
class MdpPathCollector(PathCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
rollout_fn=rollout,
save_env_in_snapshot=True,
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._rollout_fn = rollout_fn
self._action_agent = ActionAgent()
self._num_steps_total = 0
self._num_paths_total = 0
self._save_env_in_snapshot = save_env_in_snapshot
def collect_new_paths(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
paths = []
num_steps_collected = 0
while num_steps_collected < num_steps:
max_path_length_this_loop = min( # Do not go over num_steps
max_path_length,
num_steps - num_steps_collected,
)
path = self._rollout_fn(
self._env,
self._policy,
max_path_length=max_path_length_this_loop,
render=self._render,
render_kwargs=self._render_kwargs,
)
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
break
num_steps_collected += path_len
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
self._epoch_paths.extend(paths)
return paths
def collect_aligned_paths(self, path_actions, discard_incomplete_paths=True):
paths = []
num_steps_collected = 0
for p in path_actions:
max_path_length = len(p)
self._action_agent.set_action(p)
path = self._rollout_fn(
self._env,
self._action_agent,
max_path_length=max_path_length,
render=self._render,
render_kwargs=self._render_kwargs,
)
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
break
num_steps_collected += path_len
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
return paths
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
snapshot_dict = dict(
policy=self._policy,
num_steps_total=self._num_steps_total,
num_paths_total=self._num_paths_total,
)
if self._save_env_in_snapshot:
snapshot_dict['env'] = self._env
return snapshot_dict
def load_from_snapshot(self, snapshot):
self._policy = snapshot['policy']
self._num_steps_total = snapshot['num_steps_total']
self._num_paths_total = snapshot['num_paths_total']
if self._save_env_in_snapshot:
assert 'env' in snapshot
if hasattr(self._env, '_custom_goal_sampler'):
snapshot['env']._custom_goal_sampler = self._env._custom_goal_sampler
self._env = snapshot['env']
class GoalConditionedPathCollector(MdpPathCollector):
def __init__(
self,
*args,
observation_key='observation',
desired_goal_key='desired_goal',
goal_sampling_mode=None,
**kwargs
):
def obs_processor(o):
return np.hstack((o[observation_key], o[desired_goal_key]))
rollout_fn = partial(
rollout,
preprocess_obs_for_policy_fn=obs_processor,
)
super().__init__(*args, rollout_fn=rollout_fn, **kwargs)
self._observation_key = observation_key
self._desired_goal_key = desired_goal_key
self._goal_sampling_mode = goal_sampling_mode
def collect_new_paths(self, *args, **kwargs):
self._env.goal_sampling_mode = self._goal_sampling_mode
return super().collect_new_paths(*args, **kwargs)
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot.update(
observation_key=self._observation_key,
desired_goal_key=self._desired_goal_key,
)
return snapshot
def load_from_snapshot(self, snapshot):
super().load_from_snapshot(snapshot)
self._observation_key = snapshot['observation_key']
self._desired_goal_key = snapshot['desired_goal_key']
class ObsDictPathCollector(MdpPathCollector):
def __init__(
self,
*args,
observation_key='observation',
**kwargs
):
def obs_processor(obs):
return obs[observation_key]
rollout_fn = partial(
rollout,
preprocess_obs_for_policy_fn=obs_processor,
)
super().__init__(*args, rollout_fn=rollout_fn, **kwargs)
self._observation_key = observation_key
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot.update(
observation_key=self._observation_key,
)
return snapshot
class VAEWrappedEnvPathCollector(GoalConditionedPathCollector):
def __init__(
self,
env,
policy,
decode_goals=False,
**kwargs
):
"""Expects env is VAEWrappedEnv"""
super().__init__(env, policy, **kwargs)
self._decode_goals = decode_goals
def collect_new_paths(self, *args, **kwargs):
self._env.decode_goals = self._decode_goals
return super().collect_new_paths(*args, **kwargs)
| 31.446809 | 85 | 0.600406 | from collections import deque, OrderedDict
from functools import partial
import numpy as np
from rlkit.core.eval_util import create_stats_ordered_dict
from rlkit.samplers.data_collector.base import PathCollector
from rlkit.samplers.rollout_functions import rollout
class ActionAgent():
def __init__(self):
self._actions = None
self._step = 0
def reset(self):
self._step = 0
def set_action(self, actions):
self._actions = actions
def get_action(self, *args, **kwargs):
action = self._actions[self._step]
self._step += 1
return action, []
class MdpPathCollector(PathCollector):
def __init__(
self,
env,
policy,
max_num_epoch_paths_saved=None,
render=False,
render_kwargs=None,
rollout_fn=rollout,
save_env_in_snapshot=True,
):
if render_kwargs is None:
render_kwargs = {}
self._env = env
self._policy = policy
self._max_num_epoch_paths_saved = max_num_epoch_paths_saved
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
self._render = render
self._render_kwargs = render_kwargs
self._rollout_fn = rollout_fn
self._action_agent = ActionAgent()
self._num_steps_total = 0
self._num_paths_total = 0
self._save_env_in_snapshot = save_env_in_snapshot
def collect_new_paths(
self,
max_path_length,
num_steps,
discard_incomplete_paths,
):
paths = []
num_steps_collected = 0
while num_steps_collected < num_steps:
max_path_length_this_loop = min(
max_path_length,
num_steps - num_steps_collected,
)
path = self._rollout_fn(
self._env,
self._policy,
max_path_length=max_path_length_this_loop,
render=self._render,
render_kwargs=self._render_kwargs,
)
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
break
num_steps_collected += path_len
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
self._epoch_paths.extend(paths)
return paths
def collect_aligned_paths(self, path_actions, discard_incomplete_paths=True):
paths = []
num_steps_collected = 0
for p in path_actions:
max_path_length = len(p)
self._action_agent.set_action(p)
path = self._rollout_fn(
self._env,
self._action_agent,
max_path_length=max_path_length,
render=self._render,
render_kwargs=self._render_kwargs,
)
path_len = len(path['actions'])
if (
path_len != max_path_length
and not path['terminals'][-1]
and discard_incomplete_paths
):
break
num_steps_collected += path_len
paths.append(path)
self._num_paths_total += len(paths)
self._num_steps_total += num_steps_collected
return paths
def get_epoch_paths(self):
return self._epoch_paths
def end_epoch(self, epoch):
self._epoch_paths = deque(maxlen=self._max_num_epoch_paths_saved)
def get_diagnostics(self):
path_lens = [len(path['actions']) for path in self._epoch_paths]
stats = OrderedDict([
('num steps total', self._num_steps_total),
('num paths total', self._num_paths_total),
])
stats.update(create_stats_ordered_dict(
"path length",
path_lens,
always_show_all_stats=True,
))
return stats
def get_snapshot(self):
snapshot_dict = dict(
policy=self._policy,
num_steps_total=self._num_steps_total,
num_paths_total=self._num_paths_total,
)
if self._save_env_in_snapshot:
snapshot_dict['env'] = self._env
return snapshot_dict
def load_from_snapshot(self, snapshot):
self._policy = snapshot['policy']
self._num_steps_total = snapshot['num_steps_total']
self._num_paths_total = snapshot['num_paths_total']
if self._save_env_in_snapshot:
assert 'env' in snapshot
if hasattr(self._env, '_custom_goal_sampler'):
snapshot['env']._custom_goal_sampler = self._env._custom_goal_sampler
self._env = snapshot['env']
class GoalConditionedPathCollector(MdpPathCollector):
def __init__(
self,
*args,
observation_key='observation',
desired_goal_key='desired_goal',
goal_sampling_mode=None,
**kwargs
):
def obs_processor(o):
return np.hstack((o[observation_key], o[desired_goal_key]))
rollout_fn = partial(
rollout,
preprocess_obs_for_policy_fn=obs_processor,
)
super().__init__(*args, rollout_fn=rollout_fn, **kwargs)
self._observation_key = observation_key
self._desired_goal_key = desired_goal_key
self._goal_sampling_mode = goal_sampling_mode
def collect_new_paths(self, *args, **kwargs):
self._env.goal_sampling_mode = self._goal_sampling_mode
return super().collect_new_paths(*args, **kwargs)
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot.update(
observation_key=self._observation_key,
desired_goal_key=self._desired_goal_key,
)
return snapshot
def load_from_snapshot(self, snapshot):
super().load_from_snapshot(snapshot)
self._observation_key = snapshot['observation_key']
self._desired_goal_key = snapshot['desired_goal_key']
class ObsDictPathCollector(MdpPathCollector):
def __init__(
self,
*args,
observation_key='observation',
**kwargs
):
def obs_processor(obs):
return obs[observation_key]
rollout_fn = partial(
rollout,
preprocess_obs_for_policy_fn=obs_processor,
)
super().__init__(*args, rollout_fn=rollout_fn, **kwargs)
self._observation_key = observation_key
def get_snapshot(self):
snapshot = super().get_snapshot()
snapshot.update(
observation_key=self._observation_key,
)
return snapshot
class VAEWrappedEnvPathCollector(GoalConditionedPathCollector):
def __init__(
self,
env,
policy,
decode_goals=False,
**kwargs
):
super().__init__(env, policy, **kwargs)
self._decode_goals = decode_goals
def collect_new_paths(self, *args, **kwargs):
self._env.decode_goals = self._decode_goals
return super().collect_new_paths(*args, **kwargs)
| true | true |
f71b041f1c1924df958e173865289e1f39ee38d2 | 1,367 | py | Python | azure/mgmt/network/v2017_06_01/models/effective_network_security_group_list_result.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2017_06_01/models/effective_network_security_group_list_result.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2017_06_01/models/effective_network_security_group_list_result.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EffectiveNetworkSecurityGroupListResult(Model):
"""Response for list effective network security groups API service call.
Variables are only populated by the server, and will be ignored when
sending a request.
:param value: A list of effective network security groups.
:type value:
list[~azure.mgmt.network.v2017_06_01.models.EffectiveNetworkSecurityGroup]
:ivar next_link: The URL to get the next set of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None):
self.value = value
self.next_link = None
| 34.175 | 80 | 0.597659 |
from msrest.serialization import Model
class EffectiveNetworkSecurityGroupListResult(Model):
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None):
self.value = value
self.next_link = None
| true | true |
f71b05b697f301dc72ecb335f3b5caf31b92323a | 1,766 | py | Python | plaidrl/exploration_strategies/ou_strategy.py | charliec443/plaid-rl | 2e8fbf389af9efecd41361df80e40e0bf932056d | [
"MIT"
] | null | null | null | plaidrl/exploration_strategies/ou_strategy.py | charliec443/plaid-rl | 2e8fbf389af9efecd41361df80e40e0bf932056d | [
"MIT"
] | null | null | null | plaidrl/exploration_strategies/ou_strategy.py | charliec443/plaid-rl | 2e8fbf389af9efecd41361df80e40e0bf932056d | [
"MIT"
] | null | null | null | import numpy as np
import numpy.random as nr
from plaidrl.exploration_strategies.base import RawExplorationStrategy
class OUStrategy(RawExplorationStrategy):
"""
This strategy implements the Ornstein-Uhlenbeck process, which adds
time-correlated noise to the actions taken by the deterministic policy.
The OU process satisfies the following stochastic differential equation:
dxt = theta*(mu - xt)*dt + sigma*dWt
where Wt denotes the Wiener process
Based on the rllab implementation.
"""
def __init__(
self,
action_space,
mu=0,
theta=0.15,
max_sigma=0.3,
min_sigma=None,
decay_period=100000,
):
if min_sigma is None:
min_sigma = max_sigma
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self._max_sigma = max_sigma
if min_sigma is None:
min_sigma = max_sigma
self._min_sigma = min_sigma
self._decay_period = decay_period
self.dim = np.prod(action_space.low.shape)
self.low = action_space.low
self.high = action_space.high
self.state = np.ones(self.dim) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
self.state = x + dx
return self.state
def get_action_from_raw_action(self, action, t=0, **kwargs):
ou_state = self.evolve_state()
self.sigma = self._max_sigma - (self._max_sigma - self._min_sigma) * min(
1.0, t * 1.0 / self._decay_period
)
return np.clip(action + ou_state, self.low, self.high)
| 30.448276 | 81 | 0.624575 | import numpy as np
import numpy.random as nr
from plaidrl.exploration_strategies.base import RawExplorationStrategy
class OUStrategy(RawExplorationStrategy):
def __init__(
self,
action_space,
mu=0,
theta=0.15,
max_sigma=0.3,
min_sigma=None,
decay_period=100000,
):
if min_sigma is None:
min_sigma = max_sigma
self.mu = mu
self.theta = theta
self.sigma = max_sigma
self._max_sigma = max_sigma
if min_sigma is None:
min_sigma = max_sigma
self._min_sigma = min_sigma
self._decay_period = decay_period
self.dim = np.prod(action_space.low.shape)
self.low = action_space.low
self.high = action_space.high
self.state = np.ones(self.dim) * self.mu
self.reset()
def reset(self):
self.state = np.ones(self.dim) * self.mu
def evolve_state(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * nr.randn(len(x))
self.state = x + dx
return self.state
def get_action_from_raw_action(self, action, t=0, **kwargs):
ou_state = self.evolve_state()
self.sigma = self._max_sigma - (self._max_sigma - self._min_sigma) * min(
1.0, t * 1.0 / self._decay_period
)
return np.clip(action + ou_state, self.low, self.high)
| true | true |
f71b05f85e4726833080015f2927cdaf291362a9 | 5,561 | py | Python | backend/api/tests/expected_data.py | INSRapperswil/nornir-web | 458e6b24bc373197044b4b7b5da74f16f93a9459 | [
"MIT"
] | 2 | 2021-06-01T08:33:04.000Z | 2021-08-20T04:22:39.000Z | backend/api/tests/expected_data.py | INSRapperswil/nornir-web | 458e6b24bc373197044b4b7b5da74f16f93a9459 | [
"MIT"
] | null | null | null | backend/api/tests/expected_data.py | INSRapperswil/nornir-web | 458e6b24bc373197044b4b7b5da74f16f93a9459 | [
"MIT"
] | null | null | null | expected_inventory_list = {
'count': 2,
'results': [{'detail': 'http://testserver/api/inventories/1/',
'groups_file': 'web_nornir/nornir_config/example_config/groups.yaml',
'hosts_file': 'web_nornir/nornir_config/example_config/hosts.yaml',
'id': 1,
'name': 'Example',
'type': 1},
{'detail': 'http://testserver/api/inventories/2/',
'groups_file': 'web_nornir/nornir_config/inslab_config/groups.yaml',
'hosts_file': 'web_nornir/nornir_config/inslab_config/hosts.yaml',
'id': 2,
'name': 'INS Lab',
'type': 1}],
}
expected_jobtemplate_list = {
'count': 5,
'next': None,
'previous': None,
'results': [{'created_by': 1,
'created_name': 'thomastest',
'description': 'This prints a hello world',
'detail': 'http://testserver/api/templates/1/',
'file_name': 'hello_world.py',
'function_name': 'job_function',
'id': 1,
'name': 'hello_world',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Lists all CDP neighbors',
'detail': 'http://testserver/api/templates/2/',
'file_name': 'get_cdp_neighbors.py',
'function_name': 'job_function',
'id': 2,
'name': 'Get CDP Neighbors',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Gets brief information about all interfaces, sh '
'ip int br',
'detail': 'http://testserver/api/templates/3/',
'file_name': 'get_interfaces.py',
'function_name': 'job_function',
'id': 3,
'name': 'Get Interfaces',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Pings a chosen network device and reports if '
'reachable',
'detail': 'http://testserver/api/templates/4/',
'file_name': 'ping.py',
'function_name': 'job_function',
'id': 4,
'name': 'Ping Device',
'package_path': '/web_nornir/job_templates/',
'variables': ['target']},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Gets all configuration from device',
'detail': 'http://testserver/api/templates/5/',
'file_name': 'get_configuration.py',
'function_name': 'job_function',
'id': 5,
'name': 'Get Configuration',
'package_path': '/web_nornir/job_templates/',
'variables': []},
]
}
expected_task_list = {
'count': 3,
'next': None,
'previous': None,
'results': [{'created_by': 2,
'created_name': 'norbert',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/3/',
'filters': {},
'id': 3,
'inventory': 2,
'inventory_name': 'INS Lab',
'name': 'Get interfaces of INS lab',
'result': {},
'status': 0,
'template': 3,
'template_name': 'Get Interfaces',
'is_template': False,
'variables': {}},
{'created_by': 2,
'created_name': 'norbert',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/2/',
'filters': {},
'id': 2,
'inventory': 2,
'inventory_name': 'INS Lab',
'name': 'Get CDP neighbors of INS lab',
'result': {},
'status': 0,
'template': 2,
'template_name': 'Get CDP Neighbors',
'is_template': False,
'variables': {}},
{'created_by': 1,
'created_name': 'thomastest',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/1/',
'filters': {},
'id': 1,
'inventory': 1,
'inventory_name': 'Example',
'name': 'Get Hello World',
'result': {},
'status': 0,
'template': 1,
'template_name': 'hello_world',
'is_template': False,
'variables': {}}
],
}
| 41.81203 | 87 | 0.420428 | expected_inventory_list = {
'count': 2,
'results': [{'detail': 'http://testserver/api/inventories/1/',
'groups_file': 'web_nornir/nornir_config/example_config/groups.yaml',
'hosts_file': 'web_nornir/nornir_config/example_config/hosts.yaml',
'id': 1,
'name': 'Example',
'type': 1},
{'detail': 'http://testserver/api/inventories/2/',
'groups_file': 'web_nornir/nornir_config/inslab_config/groups.yaml',
'hosts_file': 'web_nornir/nornir_config/inslab_config/hosts.yaml',
'id': 2,
'name': 'INS Lab',
'type': 1}],
}
expected_jobtemplate_list = {
'count': 5,
'next': None,
'previous': None,
'results': [{'created_by': 1,
'created_name': 'thomastest',
'description': 'This prints a hello world',
'detail': 'http://testserver/api/templates/1/',
'file_name': 'hello_world.py',
'function_name': 'job_function',
'id': 1,
'name': 'hello_world',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Lists all CDP neighbors',
'detail': 'http://testserver/api/templates/2/',
'file_name': 'get_cdp_neighbors.py',
'function_name': 'job_function',
'id': 2,
'name': 'Get CDP Neighbors',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Gets brief information about all interfaces, sh '
'ip int br',
'detail': 'http://testserver/api/templates/3/',
'file_name': 'get_interfaces.py',
'function_name': 'job_function',
'id': 3,
'name': 'Get Interfaces',
'package_path': '/web_nornir/job_templates/',
'variables': []},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Pings a chosen network device and reports if '
'reachable',
'detail': 'http://testserver/api/templates/4/',
'file_name': 'ping.py',
'function_name': 'job_function',
'id': 4,
'name': 'Ping Device',
'package_path': '/web_nornir/job_templates/',
'variables': ['target']},
{'created_by': 1,
'created_name': 'thomastest',
'description': 'Gets all configuration from device',
'detail': 'http://testserver/api/templates/5/',
'file_name': 'get_configuration.py',
'function_name': 'job_function',
'id': 5,
'name': 'Get Configuration',
'package_path': '/web_nornir/job_templates/',
'variables': []},
]
}
expected_task_list = {
'count': 3,
'next': None,
'previous': None,
'results': [{'created_by': 2,
'created_name': 'norbert',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/3/',
'filters': {},
'id': 3,
'inventory': 2,
'inventory_name': 'INS Lab',
'name': 'Get interfaces of INS lab',
'result': {},
'status': 0,
'template': 3,
'template_name': 'Get Interfaces',
'is_template': False,
'variables': {}},
{'created_by': 2,
'created_name': 'norbert',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/2/',
'filters': {},
'id': 2,
'inventory': 2,
'inventory_name': 'INS Lab',
'name': 'Get CDP neighbors of INS lab',
'result': {},
'status': 0,
'template': 2,
'template_name': 'Get CDP Neighbors',
'is_template': False,
'variables': {}},
{'created_by': 1,
'created_name': 'thomastest',
'date_finished': None,
'date_scheduled': None,
'date_started': None,
'detail': 'http://testserver/api/tasks/1/',
'filters': {},
'id': 1,
'inventory': 1,
'inventory_name': 'Example',
'name': 'Get Hello World',
'result': {},
'status': 0,
'template': 1,
'template_name': 'hello_world',
'is_template': False,
'variables': {}}
],
}
| true | true |
f71b0609c2bed09adba0e74d664508aaf13cf106 | 506 | py | Python | data/scripts/templates/object/tangible/item/quest/force_sensitive/shared_fs_craft_puzzle_decryption_chip.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/item/quest/force_sensitive/shared_fs_craft_puzzle_decryption_chip.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/item/quest/force_sensitive/shared_fs_craft_puzzle_decryption_chip.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/item/quest/force_sensitive/shared_fs_craft_puzzle_decryption_chip.iff"
result.attribute_template_id = -1
result.stfName("quest_item_n","fs_craft_puzzle_decryption_chip")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 29.764706 | 106 | 0.754941 | true | true | |
f71b0616df00ef53eb05436aa01e9020ee471bc6 | 7,155 | py | Python | examples/python-guide/advanced_example.py | sdwivedi/LightGBM | f5ec54fbaca8bd5f72cdecbf755216c6278aafe3 | [
"MIT"
] | 3 | 2020-04-01T15:31:10.000Z | 2020-04-13T12:30:37.000Z | examples/python-guide/advanced_example.py | sdwivedi/LightGBM | f5ec54fbaca8bd5f72cdecbf755216c6278aafe3 | [
"MIT"
] | 1 | 2020-09-01T03:42:10.000Z | 2020-09-01T03:42:10.000Z | examples/python-guide/advanced_example.py | sdwivedi/LightGBM | f5ec54fbaca8bd5f72cdecbf755216c6278aafe3 | [
"MIT"
] | 7 | 2021-04-20T09:27:54.000Z | 2022-03-07T11:41:38.000Z | # coding: utf-8
import json
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
try:
import cPickle as pickle
except BaseException:
import pickle
print('Loading data...')
# load or create your dataset
df_train = pd.read_csv('../binary_classification/binary.train', header=None, sep='\t')
df_test = pd.read_csv('../binary_classification/binary.test', header=None, sep='\t')
W_train = pd.read_csv('../binary_classification/binary.train.weight', header=None)[0]
W_test = pd.read_csv('../binary_classification/binary.test.weight', header=None)[0]
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
num_train, num_feature = X_train.shape
# create dataset for lightgbm
# if you want to re-use data, remember to set free_raw_data=False
lgb_train = lgb.Dataset(X_train, y_train,
weight=W_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train,
weight=W_test, free_raw_data=False)
# specify your configurations as a dict
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
# generate feature names
feature_name = ['feature_' + str(col) for col in range(num_feature)]
print('Starting training...')
# feature_name and categorical_feature
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train, # eval training data
feature_name=feature_name,
categorical_feature=[21])
print('Finished first 10 rounds...')
# check feature name
print('7th feature name is:', lgb_train.feature_name[6])
print('Saving model...')
# save model to file
gbm.save_model('model.txt')
print('Dumping model to JSON...')
# dump model to JSON (and save to file)
model_json = gbm.dump_model()
with open('model.json', 'w+') as f:
json.dump(model_json, f, indent=4)
# feature names
print('Feature names:', gbm.feature_name())
# feature importances
print('Feature importances:', list(gbm.feature_importance()))
print('Loading model to predict...')
# load model to predict
bst = lgb.Booster(model_file='model.txt')
# can only predict with the best iteration (or the saving iteration)
y_pred = bst.predict(X_test)
# eval with loaded model
print("The rmse of loaded model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
print('Dumping and loading model with pickle...')
# dump model with pickle
with open('model.pkl', 'wb') as fout:
pickle.dump(gbm, fout)
# load model with pickle to predict
with open('model.pkl', 'rb') as fin:
pkl_bst = pickle.load(fin)
# can predict with any iteration when loaded in pickle way
y_pred = pkl_bst.predict(X_test, num_iteration=7)
# eval with loaded model
print("The rmse of pickled model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
# continue training
# init_model accepts:
# 1. model file name
# 2. Booster()
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model='model.txt',
valid_sets=lgb_eval)
print('Finished 10 - 20 rounds with model file...')
# decay learning rates
# learning_rates accepts:
# 1. list/tuple with length = num_boost_round
# 2. function(curr_iter)
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
learning_rates=lambda iter: 0.05 * (0.99 ** iter),
valid_sets=lgb_eval)
print('Finished 20 - 30 rounds with decay learning rates...')
# change other parameters during training
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
valid_sets=lgb_eval,
callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)])
print('Finished 30 - 40 rounds with changing bagging_fraction...')
# self-defined objective function
# f(preds: array, train_data: Dataset) -> grad: array, hess: array
# log likelihood loss
def loglikelihood(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
grad = preds - labels
hess = preds * (1. - preds)
return grad, hess
# self-defined eval metric
# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool
# binary error
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# For example, we are doing log likelihood loss, the prediction is score before logistic transformation
# Keep this in mind when you use the customization
def binary_error(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
return 'error', np.mean(labels != (preds > 0.5)), False
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelihood,
feval=binary_error,
valid_sets=lgb_eval)
print('Finished 40 - 50 rounds with self-defined objective function and eval metric...')
# another self-defined eval metric
# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool
# accuracy
# NOTE: when you do customized loss function, the default prediction value is margin
# This may make built-in evalution metric calculate wrong results
# For example, we are doing log likelihood loss, the prediction is score before logistic transformation
# Keep this in mind when you use the customization
def accuracy(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
return 'accuracy', np.mean(labels == (preds > 0.5)), True
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelihood,
feval=lambda preds, train_data: [binary_error(preds, train_data),
accuracy(preds, train_data)],
valid_sets=lgb_eval)
print('Finished 50 - 60 rounds with self-defined objective function '
'and multiple self-defined eval metrics...')
print('Starting a new training job...')
# callback
def reset_metrics():
def callback(env):
lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train)
if env.iteration - env.begin_iteration == 5:
print('Add a new valid dataset at iteration 5...')
env.model.add_valid(lgb_eval_new, 'new_valid')
callback.before_iteration = True
callback.order = 0
return callback
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train,
callbacks=[reset_metrics()])
print('Finished first 10 rounds with callback function...')
| 32.821101 | 103 | 0.665409 |
import json
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
try:
import cPickle as pickle
except BaseException:
import pickle
print('Loading data...')
df_train = pd.read_csv('../binary_classification/binary.train', header=None, sep='\t')
df_test = pd.read_csv('../binary_classification/binary.test', header=None, sep='\t')
W_train = pd.read_csv('../binary_classification/binary.train.weight', header=None)[0]
W_test = pd.read_csv('../binary_classification/binary.test.weight', header=None)[0]
y_train = df_train[0]
y_test = df_test[0]
X_train = df_train.drop(0, axis=1)
X_test = df_test.drop(0, axis=1)
num_train, num_feature = X_train.shape
lgb_train = lgb.Dataset(X_train, y_train,
weight=W_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train,
weight=W_test, free_raw_data=False)
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
feature_name = ['feature_' + str(col) for col in range(num_feature)]
print('Starting training...')
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train,
feature_name=feature_name,
categorical_feature=[21])
print('Finished first 10 rounds...')
print('7th feature name is:', lgb_train.feature_name[6])
print('Saving model...')
gbm.save_model('model.txt')
print('Dumping model to JSON...')
model_json = gbm.dump_model()
with open('model.json', 'w+') as f:
json.dump(model_json, f, indent=4)
print('Feature names:', gbm.feature_name())
print('Feature importances:', list(gbm.feature_importance()))
print('Loading model to predict...')
bst = lgb.Booster(model_file='model.txt')
y_pred = bst.predict(X_test)
print("The rmse of loaded model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
print('Dumping and loading model with pickle...')
# dump model with pickle
with open('model.pkl', 'wb') as fout:
pickle.dump(gbm, fout)
# load model with pickle to predict
with open('model.pkl', 'rb') as fin:
pkl_bst = pickle.load(fin)
# can predict with any iteration when loaded in pickle way
y_pred = pkl_bst.predict(X_test, num_iteration=7)
# eval with loaded model
print("The rmse of pickled model's prediction is:", mean_squared_error(y_test, y_pred) ** 0.5)
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model='model.txt',
valid_sets=lgb_eval)
print('Finished 10 - 20 rounds with model file...')
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
learning_rates=lambda iter: 0.05 * (0.99 ** iter),
valid_sets=lgb_eval)
print('Finished 20 - 30 rounds with decay learning rates...')
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
valid_sets=lgb_eval,
callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)])
print('Finished 30 - 40 rounds with changing bagging_fraction...')
def loglikelihood(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
grad = preds - labels
hess = preds * (1. - preds)
return grad, hess
def binary_error(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
return 'error', np.mean(labels != (preds > 0.5)), False
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelihood,
feval=binary_error,
valid_sets=lgb_eval)
print('Finished 40 - 50 rounds with self-defined objective function and eval metric...')
def accuracy(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
return 'accuracy', np.mean(labels == (preds > 0.5)), True
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelihood,
feval=lambda preds, train_data: [binary_error(preds, train_data),
accuracy(preds, train_data)],
valid_sets=lgb_eval)
print('Finished 50 - 60 rounds with self-defined objective function '
'and multiple self-defined eval metrics...')
print('Starting a new training job...')
def reset_metrics():
def callback(env):
lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train)
if env.iteration - env.begin_iteration == 5:
print('Add a new valid dataset at iteration 5...')
env.model.add_valid(lgb_eval_new, 'new_valid')
callback.before_iteration = True
callback.order = 0
return callback
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train,
callbacks=[reset_metrics()])
print('Finished first 10 rounds with callback function...')
| true | true |
f71b069e135a883bde77ccb48cb42ece31feb8eb | 5,298 | py | Python | zerver/management/commands/backup.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 3 | 2019-02-03T20:46:55.000Z | 2019-03-04T15:44:28.000Z | zerver/management/commands/backup.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 10 | 2018-11-26T23:16:45.000Z | 2019-02-18T23:17:03.000Z | zerver/management/commands/backup.py | TylerPham2000/zulip | 2e7aaba0dde5517b4a55cb0bd782f009be45e3ba | [
"Apache-2.0"
] | 2 | 2021-07-02T14:15:24.000Z | 2021-08-16T12:31:49.000Z | import os
import re
import tempfile
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any
from django.conf import settings
from django.db import connection
from django.utils.timezone import now as timezone_now
from scripts.lib.zulip_tools import TIMESTAMP_FORMAT, parse_os_release, run
from version import ZULIP_VERSION
from zerver.lib.management import ZulipBaseCommand
from zerver.logging_handlers import try_git_describe
class Command(ZulipBaseCommand):
# Fix support for multi-line usage strings
def create_parser(self, *args: Any, **kwargs: Any) -> ArgumentParser:
parser = super().create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--output", help="Filename of output tarball")
parser.add_argument("--skip-db", action="store_true", help="Skip database backup")
parser.add_argument("--skip-uploads", action="store_true", help="Skip uploads backup")
def handle(self, *args: Any, **options: Any) -> None:
timestamp = timezone_now().strftime(TIMESTAMP_FORMAT)
with tempfile.TemporaryDirectory(
prefix=f"zulip-backup-{timestamp}-",
) as tmp:
os.mkdir(os.path.join(tmp, "zulip-backup"))
members = []
paths = []
with open(os.path.join(tmp, "zulip-backup", "zulip-version"), "w") as f:
print(ZULIP_VERSION, file=f)
git = try_git_describe()
if git:
print(git, file=f)
members.append("zulip-backup/zulip-version")
with open(os.path.join(tmp, "zulip-backup", "os-version"), "w") as f:
print(
"{ID} {VERSION_ID}".format(**parse_os_release()),
file=f,
)
members.append("zulip-backup/os-version")
with open(os.path.join(tmp, "zulip-backup", "postgres-version"), "w") as f:
print(connection.pg_version, file=f)
members.append("zulip-backup/postgres-version")
if settings.DEVELOPMENT:
members.append(
os.path.join(settings.DEPLOY_ROOT, "zproject", "dev-secrets.conf"),
)
paths.append(
("zproject", os.path.join(settings.DEPLOY_ROOT, "zproject")),
)
else:
members.append("/etc/zulip")
paths.append(("settings", "/etc/zulip"))
if not options["skip_db"]:
pg_dump_command = [
"pg_dump",
"--format=directory",
"--file=" + os.path.join(tmp, "zulip-backup", "database"),
"--host=" + settings.DATABASES["default"]["HOST"],
"--port=" + settings.DATABASES["default"]["PORT"],
"--username=" + settings.DATABASES["default"]["USER"],
"--dbname=" + settings.DATABASES["default"]["NAME"],
"--no-password",
]
os.environ["PGPASSWORD"] = settings.DATABASES["default"]["PASSWORD"]
run(
pg_dump_command,
cwd=tmp,
)
members.append("zulip-backup/database")
if (
not options["skip_uploads"]
and settings.LOCAL_UPLOADS_DIR is not None
and os.path.exists(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
)
):
members.append(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
)
paths.append(
(
"uploads",
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
),
)
assert not any("|" in name or "|" in path for name, path in paths)
transform_args = [
r"--transform=s|^{}(/.*)?$|zulip-backup/{}\1|x".format(
re.escape(path),
name.replace("\\", r"\\"),
)
for name, path in paths
]
try:
if options["output"] is None:
tarball_path = tempfile.NamedTemporaryFile(
prefix=f"zulip-backup-{timestamp}-",
suffix=".tar.gz",
delete=False,
).name
else:
tarball_path = options["output"]
run(
[
"tar",
f"--directory={tmp}",
"-cPzf",
tarball_path,
*transform_args,
"--",
*members,
]
)
print(f"Backup tarball written to {tarball_path}")
except BaseException:
if options["output"] is None:
os.unlink(tarball_path)
raise
| 38.391304 | 94 | 0.495281 | import os
import re
import tempfile
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any
from django.conf import settings
from django.db import connection
from django.utils.timezone import now as timezone_now
from scripts.lib.zulip_tools import TIMESTAMP_FORMAT, parse_os_release, run
from version import ZULIP_VERSION
from zerver.lib.management import ZulipBaseCommand
from zerver.logging_handlers import try_git_describe
class Command(ZulipBaseCommand):
def create_parser(self, *args: Any, **kwargs: Any) -> ArgumentParser:
parser = super().create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--output", help="Filename of output tarball")
parser.add_argument("--skip-db", action="store_true", help="Skip database backup")
parser.add_argument("--skip-uploads", action="store_true", help="Skip uploads backup")
def handle(self, *args: Any, **options: Any) -> None:
timestamp = timezone_now().strftime(TIMESTAMP_FORMAT)
with tempfile.TemporaryDirectory(
prefix=f"zulip-backup-{timestamp}-",
) as tmp:
os.mkdir(os.path.join(tmp, "zulip-backup"))
members = []
paths = []
with open(os.path.join(tmp, "zulip-backup", "zulip-version"), "w") as f:
print(ZULIP_VERSION, file=f)
git = try_git_describe()
if git:
print(git, file=f)
members.append("zulip-backup/zulip-version")
with open(os.path.join(tmp, "zulip-backup", "os-version"), "w") as f:
print(
"{ID} {VERSION_ID}".format(**parse_os_release()),
file=f,
)
members.append("zulip-backup/os-version")
with open(os.path.join(tmp, "zulip-backup", "postgres-version"), "w") as f:
print(connection.pg_version, file=f)
members.append("zulip-backup/postgres-version")
if settings.DEVELOPMENT:
members.append(
os.path.join(settings.DEPLOY_ROOT, "zproject", "dev-secrets.conf"),
)
paths.append(
("zproject", os.path.join(settings.DEPLOY_ROOT, "zproject")),
)
else:
members.append("/etc/zulip")
paths.append(("settings", "/etc/zulip"))
if not options["skip_db"]:
pg_dump_command = [
"pg_dump",
"--format=directory",
"--file=" + os.path.join(tmp, "zulip-backup", "database"),
"--host=" + settings.DATABASES["default"]["HOST"],
"--port=" + settings.DATABASES["default"]["PORT"],
"--username=" + settings.DATABASES["default"]["USER"],
"--dbname=" + settings.DATABASES["default"]["NAME"],
"--no-password",
]
os.environ["PGPASSWORD"] = settings.DATABASES["default"]["PASSWORD"]
run(
pg_dump_command,
cwd=tmp,
)
members.append("zulip-backup/database")
if (
not options["skip_uploads"]
and settings.LOCAL_UPLOADS_DIR is not None
and os.path.exists(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
)
):
members.append(
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
)
paths.append(
(
"uploads",
os.path.join(settings.DEPLOY_ROOT, settings.LOCAL_UPLOADS_DIR),
),
)
assert not any("|" in name or "|" in path for name, path in paths)
transform_args = [
r"--transform=s|^{}(/.*)?$|zulip-backup/{}\1|x".format(
re.escape(path),
name.replace("\\", r"\\"),
)
for name, path in paths
]
try:
if options["output"] is None:
tarball_path = tempfile.NamedTemporaryFile(
prefix=f"zulip-backup-{timestamp}-",
suffix=".tar.gz",
delete=False,
).name
else:
tarball_path = options["output"]
run(
[
"tar",
f"--directory={tmp}",
"-cPzf",
tarball_path,
*transform_args,
"--",
*members,
]
)
print(f"Backup tarball written to {tarball_path}")
except BaseException:
if options["output"] is None:
os.unlink(tarball_path)
raise
| true | true |
f71b06a727f087f2bc7415f3706874d40d893939 | 92 | py | Python | arvestust/serializers/mixins/__init__.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | 1 | 2021-09-17T23:45:27.000Z | 2021-09-17T23:45:27.000Z | arvestust/serializers/mixins/__init__.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | 3 | 2020-07-25T05:40:54.000Z | 2020-08-11T04:01:19.000Z | arvestust/serializers/mixins/__init__.py | lehvitus/arvestust | 2d508317b744eaf12a643a398ff95723893a046a | [
"BSD-3-Clause"
] | null | null | null | # arvestust:serializers:mixins
from .arvestust_record import ArvestustRecordSerializerMixin
| 30.666667 | 60 | 0.891304 |
from .arvestust_record import ArvestustRecordSerializerMixin
| true | true |
f71b08cb676579ac4fac189d7b267ecde83114fa | 784 | py | Python | secret.py | nora0706/gcp_site | 1be5df86f239112e485cb6a089abf14622fd6b55 | [
"MIT"
] | null | null | null | secret.py | nora0706/gcp_site | 1be5df86f239112e485cb6a089abf14622fd6b55 | [
"MIT"
] | null | null | null | secret.py | nora0706/gcp_site | 1be5df86f239112e485cb6a089abf14622fd6b55 | [
"MIT"
] | null | null | null | import os
from google.cloud import secretmanager
class Secret:
def __init__(self):
# Create the Secret Manager client.
self.client = secretmanager.SecretManagerServiceClient()
self.project_id = os.getenv('GOOGLE_CLOUD_PROJECT')
def get_secret(self, secret_id):
# Build the parent name from the project.
name = f"projects/{self.project_id}/secrets/{secret_id}/versions/latest"
# Access the secret version.
response = self.client.access_secret_version(request={"name": name})
# Print the secret payload.
#
# WARNING: Do not print the secret in a production environment - this
# snippet is showing how to access the secret material.
return response.payload.data.decode("UTF-8")
| 34.086957 | 80 | 0.678571 | import os
from google.cloud import secretmanager
class Secret:
def __init__(self):
self.client = secretmanager.SecretManagerServiceClient()
self.project_id = os.getenv('GOOGLE_CLOUD_PROJECT')
def get_secret(self, secret_id):
name = f"projects/{self.project_id}/secrets/{secret_id}/versions/latest"
response = self.client.access_secret_version(request={"name": name})
return response.payload.data.decode("UTF-8")
| true | true |
f71b08ee83709fcec57f27b82915c939fb73d449 | 1,341 | py | Python | tag_generator.py | TREYWANGCQU/blog.reaticle.com | 6caa7cdecdb527c8dec0002d0e431632b9823376 | [
"CC0-1.0"
] | 1 | 2021-07-24T16:54:05.000Z | 2021-07-24T16:54:05.000Z | tag_generator.py | TREYWANGCQU/treywangcqu.github.io | 6caa7cdecdb527c8dec0002d0e431632b9823376 | [
"CC0-1.0"
] | null | null | null | tag_generator.py | TREYWANGCQU/treywangcqu.github.io | 6caa7cdecdb527c8dec0002d0e431632b9823376 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
'''
tag_generator.py
Copyright 2017 Long Qian
Contact: lqian8@jhu.edu
This script creates tags for your Jekyll blog hosted by Github page.
No plugins required.
'''
import glob
import os
post_dir = '_posts/'
tag_dir = 'tag/'
filenames = glob.glob(post_dir + '*md')
total_tags = []
for filename in filenames:
f = open(filename, 'r', encoding='utf8')
crawl = False
for line in f:
if crawl:
current_tags = line.strip().split()
if current_tags ==[]:
continue
if current_tags[0]== 'tags:':
total_tags.extend(current_tags[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_tags = set(total_tags)
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
for tag in total_tags:
tag_filename = tag_dir + tag + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("Tags generated, count", total_tags.__len__())
| 23.946429 | 109 | 0.56525 |
import glob
import os
post_dir = '_posts/'
tag_dir = 'tag/'
filenames = glob.glob(post_dir + '*md')
total_tags = []
for filename in filenames:
f = open(filename, 'r', encoding='utf8')
crawl = False
for line in f:
if crawl:
current_tags = line.strip().split()
if current_tags ==[]:
continue
if current_tags[0]== 'tags:':
total_tags.extend(current_tags[1:])
crawl = False
break
if line.strip() == '---':
if not crawl:
crawl = True
else:
crawl = False
break
f.close()
total_tags = set(total_tags)
old_tags = glob.glob(tag_dir + '*.md')
for tag in old_tags:
os.remove(tag)
if not os.path.exists(tag_dir):
os.makedirs(tag_dir)
for tag in total_tags:
tag_filename = tag_dir + tag + '.md'
f = open(tag_filename, 'a')
write_str = '---\nlayout: tagpage\ntitle: \"Tag: ' + tag + '\"\ntag: ' + tag + '\nrobots: noindex\n---\n'
f.write(write_str)
f.close()
print("Tags generated, count", total_tags.__len__())
| true | true |
f71b09215d4861e1ba4d13dd94a6b1b30cfd4265 | 950 | py | Python | checkenv.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
] | null | null | null | checkenv.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
] | null | null | null | checkenv.py | SmaleZ/vcl_diayn | b2c47a681675b405d2011bc4a43c3914f3af4ecc | [
"MIT"
] | null | null | null | from env_wrapper import DIAYN_Skill_Wrapper
from stable_baselines3 import SAC
from stable_baselines3.common.env_checker import check_env
import malmoenv
import gym
from pathlib import Path
xml = Path('/home/zilizhang/DIAYN/mobchase_single_agent.xml').read_text()
env = malmoenv.make()
env.init(xml, 9000)
total_timesteps = 3000
num_skills = 3
print(env.reward_range)
env = DIAYN_Skill_Wrapper(env, num_skills=num_skills)
#
# #check_env(env)
# obs = env.reset()
# env = gym.make('Walker2DMuJoCoEnv-v0')
n_steps = 10
obs = env.reset()
done = False
for _ in range(n_steps):
# Random action
if not done:
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
print("shape of observation", obs.shape)
print("current action:", env.action_space)
else:
print("has done")
# action = env.action_space.sample()
# obs, reward, done, info = env.step(action)
# print(reward)
| 27.142857 | 73 | 0.709474 | from env_wrapper import DIAYN_Skill_Wrapper
from stable_baselines3 import SAC
from stable_baselines3.common.env_checker import check_env
import malmoenv
import gym
from pathlib import Path
xml = Path('/home/zilizhang/DIAYN/mobchase_single_agent.xml').read_text()
env = malmoenv.make()
env.init(xml, 9000)
total_timesteps = 3000
num_skills = 3
print(env.reward_range)
env = DIAYN_Skill_Wrapper(env, num_skills=num_skills)
obs = env.reset()
done = False
for _ in range(n_steps):
if not done:
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
print("shape of observation", obs.shape)
print("current action:", env.action_space)
else:
print("has done")
| true | true |
f71b0955c31f832ac1f4829e34136ad342dd11b3 | 11,350 | py | Python | hoomd/md/charge.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | null | null | null | hoomd/md/charge.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | null | null | null | hoomd/md/charge.py | PetersResearchGroup/PCND | 584768cc683a6df0152ead69b567d05b781aab2b | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2009-2017 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: joaander / All Developers are free to add commands for new features
R""" Electrostatic potentials.
Charged interactions are usually long ranged, and for computational efficiency this is split
into two parts, one part computed in real space and on in Fourier space. You don't need to worry about this
implementation detail, however, as charge commands in hoomd automatically initialize and configure both the long
and short range parts.
Only one method of computing charged interactions should be used at a time. Otherwise, they would add together and
produce incorrect results.
"""
from hoomd.md import force;
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.md import pair;
from hoomd.md import nlist as nl # to avoid naming conflicts
import hoomd;
import math;
import sys;
from math import sqrt
class pppm(force._force):
R""" Long-range electrostatics computed with the PPPM method.
Args:
group (:py:mod:`hoomd.group`): Group on which to apply long range PPPM forces. The short range part is always applied between
all particles.
nlist (:py:mod:`hoomd.md.nlist`): Neighbor list
`D. LeBard et. al. 2012 <http://dx.doi.org/10.1039/c1sm06787g>`_ describes the PPPM implementation details in
HOOMD-blue. Please cite it if you utilize the PPPM functionality in your work.
:py:class:`pppm` specifies **both** the long-ranged **and** short range parts of the electrostatic
force should be computed between all charged particles in the simulation. In other words, :py:class:`pppm`
initializes and sets all parameters for its own :py:class:`hoomd.md.pair.ewald`, so do not specify an additional one.
The command supports additional screening of interactions, according to the Ewald summation for Yukawa potentials.
This is useful if one wants to compute a screened interaction (i.e. a solution to the linerized Poisson-Boltzmann
equation), yet the cut-off radius is so large that the computation with a purely short-ranged potential would become
inefficient. In that case, the inverse Debye screening length can be supplied using :py:meth`set_params()`.
Also see `Salin, G and Caillol, J. 2000, <http://dx.doi.org/10.1063/1.1326477>`.
Parameters:
- Nx - Number of grid points in x direction
- Ny - Number of grid points in y direction
- Nz - Number of grid points in z direction
- order - Number of grid points in each direction to assign charges to
- :math:`r_{\mathrm{cut}}` - Cutoff for the short-ranged part of the electrostatics calculation
Parameters Nx, Ny, Nz, order, :math:`r_{\mathrm{cut}}` must be set using
:py:meth:`set_params()` before any :py:func:`hoomd.run()` can take place.
See :ref:`page-units` for information on the units assigned to charges in hoomd.
Note:
:py:class:`pppm` takes a particle group as an option. This should be the group of all charged particles
(:py:func:`hoomd.group.charged`). However, note that this group is static and determined at the time
:py:class:`pppm` is specified. If you are going to add charged particles at a later point in the simulation
with the data access API, ensure that this group includes those particles as well.
.. important::
In MPI simulations, the number of grid point along every dimensions must be a power of two.
Example::
charged = group.charged();
pppm = charge.pppm(group=charged)
"""
def __init__(self, group, nlist):
hoomd.util.print_status_line();
# initialize the base class
force._force.__init__(self);
# register the citation
c = hoomd.cite.article(cite_key='dnlebard2012',
author=['D N LeBard', 'B G Levine', 'S A Barr', 'A Jusufi', 'S Sanders', 'M L Klein', 'A Z Panagiotopoulos'],
title='Self-assembly of coarse-grained ionic surfactants accelerated by graphics processing units',
journal='Journal of Computational Physics',
volume=8,
number=8,
pages='2385-2397',
month='',
year='2012',
doi='10.1039/c1sm06787g',
feature='PPPM')
hoomd.cite._ensure_global_bib().add(c)
# create the c++ mirror class
# PPPM itself doesn't really need a neighbor list, so subscribe call back as None
self.nlist = nlist
self.nlist.subscribe(lambda : None)
self.nlist.update_rcut()
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PPPMForceCompute(hoomd.context.current.system_definition, self.nlist.cpp_nlist, group.cpp_group);
else:
self.cpp_force = _md.PPPMForceComputeGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, group.cpp_group);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# error check flag - must be set to true by set_params in order for the run() to commence
self.params_set = False;
# initialize the short range part of electrostatics
hoomd.util.quiet_status();
self.ewald = pair.ewald(r_cut = False, nlist = self.nlist);
hoomd.util.unquiet_status();
# overrride disable and enable to work with both of the forces
def disable(self, log=False):
hoomd.util.print_status_line();
hoomd.util.quiet_status();
force._force.disable(self, log);
self.ewald.disable(log);
hoomd.util.unquiet_status();
def enable(self):
hoomd.util.print_status_line();
hoomd.util.quiet_status();
force._force.enable(self);
self.ewald.enable();
hoomd.util.unquiet_status();
def set_params(self, Nx, Ny, Nz, order, rcut, alpha = 0.0):
""" Sets PPPM parameters.
Args:
Nx (int): Number of grid points in x direction
Ny (int): Number of grid points in y direction
Nz (int): Number of grid points in z direction
order (int): Number of grid points in each direction to assign charges to
rcut (float): Cutoff for the short-ranged part of the electrostatics calculation
alpha (float, **optional**): Debye screening parameter (in units 1/distance)
.. versionadded:: 2.1
Examples::
pppm.set_params(Nx=64, Ny=64, Nz=64, order=6, rcut=2.0)
Note that the Fourier transforms are much faster for number of grid points of the form 2^N.
"""
hoomd.util.print_status_line();
if hoomd.context.current.system_definition.getNDimensions() != 3:
hoomd.context.msg.error("System must be 3 dimensional\n");
raise RuntimeError("Cannot compute PPPM");
self.params_set = True;
# get sum of charges and of squared charges
q = self.cpp_force.getQSum();
q2 = self.cpp_force.getQ2Sum();
N = hoomd.context.current.system_definition.getParticleData().getNGlobal()
box = hoomd.context.current.system_definition.getParticleData().getGlobalBox()
Lx = box.getL().x
Ly = box.getL().y
Lz = box.getL().z
hx = Lx/Nx
hy = Ly/Ny
hz = Lz/Nz
gew1 = 0.0
kappa = gew1
f = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
hmin = min(hx, hy, hz)
gew2 = 10.0/hmin
kappa = gew2
fmid = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
if f*fmid >= 0.0:
hoomd.context.msg.error("f*fmid >= 0.0\n");
raise RuntimeError("Cannot compute PPPM");
if f < 0.0:
dgew=gew2-gew1
rtb = gew1
else:
dgew=gew1-gew2
rtb = gew2
ncount = 0
while math.fabs(dgew) > 0.00001 and fmid != 0.0:
dgew *= 0.5
kappa = rtb + dgew
fmid = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
if fmid <= 0.0:
rtb = kappa
ncount += 1
if ncount > 10000.0:
hoomd.context.msg.error("kappa not converging\n");
raise RuntimeError("Cannot compute PPPM");
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
hoomd.util.quiet_status();
for i in range(0,ntypes):
for j in range(0,ntypes):
self.ewald.pair_coeff.set(type_list[i], type_list[j], kappa = kappa, alpha = alpha, r_cut=rcut)
hoomd.util.unquiet_status();
# set the parameters for the appropriate type
self.cpp_force.setParams(Nx, Ny, Nz, order, kappa, rcut, alpha);
def update_coeffs(self):
if not self.params_set:
hoomd.context.msg.error("Coefficients for PPPM are not set. Call set_coeff prior to run()\n");
raise RuntimeError("Error initializing run");
if self.nlist.cpp_nlist.getDiameterShift():
hoomd.context.msg.warning("Neighbor diameter shifting is enabled, PPPM may not correct for all excluded interactions\n");
def diffpr(hx, hy, hz, xprd, yprd, zprd, N, order, kappa, q2, rcut):
lprx = rms(hx, xprd, N, order, kappa, q2)
lpry = rms(hy, yprd, N, order, kappa, q2)
lprz = rms(hz, zprd, N, order, kappa, q2)
kspace_prec = math.sqrt(lprx*lprx + lpry*lpry + lprz*lprz) / sqrt(3.0)
real_prec = 2.0*q2 * math.exp(-kappa*kappa*rcut*rcut)/sqrt(N*rcut*xprd*yprd*zprd)
value = kspace_prec - real_prec
return value
def rms(h, prd, N, order, kappa, q2):
acons = [[0 for _ in range(8)] for _ in range(8)]
acons[1][0] = 2.0 / 3.0
acons[2][0] = 1.0 / 50.0
acons[2][1] = 5.0 / 294.0
acons[3][0] = 1.0 / 588.0
acons[3][1] = 7.0 / 1440.0
acons[3][2] = 21.0 / 3872.0
acons[4][0] = 1.0 / 4320.0
acons[4][1] = 3.0 / 1936.0
acons[4][2] = 7601.0 / 2271360.0
acons[4][3] = 143.0 / 28800.0
acons[5][0] = 1.0 / 23232.0
acons[5][1] = 7601.0 / 13628160.0
acons[5][2] = 143.0 / 69120.0
acons[5][3] = 517231.0 / 106536960.0
acons[5][4] = 106640677.0 / 11737571328.0
acons[6][0] = 691.0 / 68140800.0
acons[6][1] = 13.0 / 57600.0
acons[6][2] = 47021.0 / 35512320.0
acons[6][3] = 9694607.0 / 2095994880.0
acons[6][4] = 733191589.0 / 59609088000.0
acons[6][5] = 326190917.0 / 11700633600.0
acons[7][0] = 1.0 / 345600.0
acons[7][1] = 3617.0 / 35512320.0
acons[7][2] = 745739.0 / 838397952.0
acons[7][3] = 56399353.0 / 12773376000.0
acons[7][4] = 25091609.0 / 1560084480.0
acons[7][5] = 1755948832039.0 / 36229939200000.0
acons[7][6] = 4887769399.0 / 37838389248.0
sum = 0.0
for m in range(0,order):
sum += acons[order][m]*pow(h*kappa, 2.0*m)
value = q2*pow(h*kappa,order)*sqrt(kappa*prd*sqrt(2.0*math.pi)*sum/N)/prd/prd
return value
| 40.974729 | 134 | 0.628458 |
from hoomd.md import force;
from hoomd import _hoomd
from hoomd.md import _md
from hoomd.md import pair;
from hoomd.md import nlist as nl
import hoomd;
import math;
import sys;
from math import sqrt
class pppm(force._force):
def __init__(self, group, nlist):
hoomd.util.print_status_line();
force._force.__init__(self);
c = hoomd.cite.article(cite_key='dnlebard2012',
author=['D N LeBard', 'B G Levine', 'S A Barr', 'A Jusufi', 'S Sanders', 'M L Klein', 'A Z Panagiotopoulos'],
title='Self-assembly of coarse-grained ionic surfactants accelerated by graphics processing units',
journal='Journal of Computational Physics',
volume=8,
number=8,
pages='2385-2397',
month='',
year='2012',
doi='10.1039/c1sm06787g',
feature='PPPM')
hoomd.cite._ensure_global_bib().add(c)
self.nlist = nlist
self.nlist.subscribe(lambda : None)
self.nlist.update_rcut()
if not hoomd.context.exec_conf.isCUDAEnabled():
self.cpp_force = _md.PPPMForceCompute(hoomd.context.current.system_definition, self.nlist.cpp_nlist, group.cpp_group);
else:
self.cpp_force = _md.PPPMForceComputeGPU(hoomd.context.current.system_definition, self.nlist.cpp_nlist, group.cpp_group);
hoomd.context.current.system.addCompute(self.cpp_force, self.force_name);
# error check flag - must be set to true by set_params in order for the run() to commence
self.params_set = False;
# initialize the short range part of electrostatics
hoomd.util.quiet_status();
self.ewald = pair.ewald(r_cut = False, nlist = self.nlist);
hoomd.util.unquiet_status();
# overrride disable and enable to work with both of the forces
def disable(self, log=False):
hoomd.util.print_status_line();
hoomd.util.quiet_status();
force._force.disable(self, log);
self.ewald.disable(log);
hoomd.util.unquiet_status();
def enable(self):
hoomd.util.print_status_line();
hoomd.util.quiet_status();
force._force.enable(self);
self.ewald.enable();
hoomd.util.unquiet_status();
def set_params(self, Nx, Ny, Nz, order, rcut, alpha = 0.0):
hoomd.util.print_status_line();
if hoomd.context.current.system_definition.getNDimensions() != 3:
hoomd.context.msg.error("System must be 3 dimensional\n");
raise RuntimeError("Cannot compute PPPM");
self.params_set = True;
# get sum of charges and of squared charges
q = self.cpp_force.getQSum();
q2 = self.cpp_force.getQ2Sum();
N = hoomd.context.current.system_definition.getParticleData().getNGlobal()
box = hoomd.context.current.system_definition.getParticleData().getGlobalBox()
Lx = box.getL().x
Ly = box.getL().y
Lz = box.getL().z
hx = Lx/Nx
hy = Ly/Ny
hz = Lz/Nz
gew1 = 0.0
kappa = gew1
f = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
hmin = min(hx, hy, hz)
gew2 = 10.0/hmin
kappa = gew2
fmid = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
if f*fmid >= 0.0:
hoomd.context.msg.error("f*fmid >= 0.0\n");
raise RuntimeError("Cannot compute PPPM");
if f < 0.0:
dgew=gew2-gew1
rtb = gew1
else:
dgew=gew1-gew2
rtb = gew2
ncount = 0
while math.fabs(dgew) > 0.00001 and fmid != 0.0:
dgew *= 0.5
kappa = rtb + dgew
fmid = diffpr(hx, hy, hz, Lx, Ly, Lz, N, order, kappa, q2, rcut)
if fmid <= 0.0:
rtb = kappa
ncount += 1
if ncount > 10000.0:
hoomd.context.msg.error("kappa not converging\n");
raise RuntimeError("Cannot compute PPPM");
ntypes = hoomd.context.current.system_definition.getParticleData().getNTypes();
type_list = [];
for i in range(0,ntypes):
type_list.append(hoomd.context.current.system_definition.getParticleData().getNameByType(i));
hoomd.util.quiet_status();
for i in range(0,ntypes):
for j in range(0,ntypes):
self.ewald.pair_coeff.set(type_list[i], type_list[j], kappa = kappa, alpha = alpha, r_cut=rcut)
hoomd.util.unquiet_status();
# set the parameters for the appropriate type
self.cpp_force.setParams(Nx, Ny, Nz, order, kappa, rcut, alpha);
def update_coeffs(self):
if not self.params_set:
hoomd.context.msg.error("Coefficients for PPPM are not set. Call set_coeff prior to run()\n");
raise RuntimeError("Error initializing run");
if self.nlist.cpp_nlist.getDiameterShift():
hoomd.context.msg.warning("Neighbor diameter shifting is enabled, PPPM may not correct for all excluded interactions\n");
def diffpr(hx, hy, hz, xprd, yprd, zprd, N, order, kappa, q2, rcut):
lprx = rms(hx, xprd, N, order, kappa, q2)
lpry = rms(hy, yprd, N, order, kappa, q2)
lprz = rms(hz, zprd, N, order, kappa, q2)
kspace_prec = math.sqrt(lprx*lprx + lpry*lpry + lprz*lprz) / sqrt(3.0)
real_prec = 2.0*q2 * math.exp(-kappa*kappa*rcut*rcut)/sqrt(N*rcut*xprd*yprd*zprd)
value = kspace_prec - real_prec
return value
def rms(h, prd, N, order, kappa, q2):
acons = [[0 for _ in range(8)] for _ in range(8)]
acons[1][0] = 2.0 / 3.0
acons[2][0] = 1.0 / 50.0
acons[2][1] = 5.0 / 294.0
acons[3][0] = 1.0 / 588.0
acons[3][1] = 7.0 / 1440.0
acons[3][2] = 21.0 / 3872.0
acons[4][0] = 1.0 / 4320.0
acons[4][1] = 3.0 / 1936.0
acons[4][2] = 7601.0 / 2271360.0
acons[4][3] = 143.0 / 28800.0
acons[5][0] = 1.0 / 23232.0
acons[5][1] = 7601.0 / 13628160.0
acons[5][2] = 143.0 / 69120.0
acons[5][3] = 517231.0 / 106536960.0
acons[5][4] = 106640677.0 / 11737571328.0
acons[6][0] = 691.0 / 68140800.0
acons[6][1] = 13.0 / 57600.0
acons[6][2] = 47021.0 / 35512320.0
acons[6][3] = 9694607.0 / 2095994880.0
acons[6][4] = 733191589.0 / 59609088000.0
acons[6][5] = 326190917.0 / 11700633600.0
acons[7][0] = 1.0 / 345600.0
acons[7][1] = 3617.0 / 35512320.0
acons[7][2] = 745739.0 / 838397952.0
acons[7][3] = 56399353.0 / 12773376000.0
acons[7][4] = 25091609.0 / 1560084480.0
acons[7][5] = 1755948832039.0 / 36229939200000.0
acons[7][6] = 4887769399.0 / 37838389248.0
sum = 0.0
for m in range(0,order):
sum += acons[order][m]*pow(h*kappa, 2.0*m)
value = q2*pow(h*kappa,order)*sqrt(kappa*prd*sqrt(2.0*math.pi)*sum/N)/prd/prd
return value
| true | true |
f71b09b86f70b649fd3f792fbe2c687f37f5e62d | 3,101 | py | Python | predict.py | afonchikk/Audio-Classification | 6acc7015ec847a64338f6300dca608a0752ba554 | [
"MIT"
] | null | null | null | predict.py | afonchikk/Audio-Classification | 6acc7015ec847a64338f6300dca608a0752ba554 | [
"MIT"
] | null | null | null | predict.py | afonchikk/Audio-Classification | 6acc7015ec847a64338f6300dca608a0752ba554 | [
"MIT"
] | null | null | null | from tensorflow.keras.models import load_model
from clean import downsample_mono, envelope
from kapre.time_frequency import STFT, Magnitude, ApplyFilterbank, MagnitudeToDecibel
from sklearn.preprocessing import LabelEncoder
import numpy as np
from glob import glob
import argparse
import os
import pandas as pd
from tqdm import tqdm
def make_prediction(args):
# load the model
model = load_model(args.model_fn,
custom_objects={'STFT': STFT,
'Magnitude': Magnitude,
'ApplyFilterbank': ApplyFilterbank,
'MagnitudeToDecibel': MagnitudeToDecibel})
# find the sound data
wav_paths = glob('{}/**'.format(args.src_dir), recursive=True)
wav_paths = sorted([x.replace(os.sep, '/') for x in wav_paths if '.wav' in x])
classes = sorted(os.listdir(args.src_dir))
labels = [os.path.split(x)[0].split('/')[-1] for x in wav_paths]
le = LabelEncoder()
y_true = le.fit_transform(labels)
results = []
for z, wav_fn in tqdm(enumerate(wav_paths), total=len(wav_paths)):
rate, wav = downsample_mono(wav_fn, args.sr)
mask, env = envelope(wav, rate, threshold=args.threshold)
clean_wav = wav[mask]
step = int(args.sr * args.dt)
batch = []
for i in range(0, clean_wav.shape[0], step):
sample = clean_wav[i:i + step]
sample = sample.reshape(-1, 1)
if sample.shape[0] < step:
tmp = np.zeros(shape=(step, 1), dtype=np.float32)
tmp[:sample.shape[0], :] = sample.flatten().reshape(-1, 1)
sample = tmp
batch.append(sample)
X_batch = np.array(batch, dtype=np.float32)
y_pred = model.predict(X_batch)
y_mean = np.mean(y_pred, axis=0)
y_pred = np.argmax(y_mean)
real_class = os.path.dirname(wav_fn).split('/')[-1]
print('Actual class: {}, Predicted class: {}'.format(real_class, classes[y_pred]))
results.append(y_mean)
np.save(os.path.join('logs', args.pred_fn), np.array(results))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Audio Classification Training')
parser.add_argument('--model_fn', type=str, default='models/lstm.h5',
help='model file to make predictions')
parser.add_argument('--pred_fn', type=str, default='y_pred',
help='fn to write predictions in logs dir')
parser.add_argument('--src_dir', type=str, default='wavfiles',
help='directory containing wavfiles to predict')
parser.add_argument('--dt', type=float, default=1.0,
help='time in seconds to sample audio')
parser.add_argument('--sr', type=int, default=16000,
help='sample rate of clean audio')
parser.add_argument('--threshold', type=str, default=20,
help='threshold magnitude for np.int16 dtype')
args, _ = parser.parse_known_args()
make_prediction(args)
| 42.479452 | 90 | 0.609158 | from tensorflow.keras.models import load_model
from clean import downsample_mono, envelope
from kapre.time_frequency import STFT, Magnitude, ApplyFilterbank, MagnitudeToDecibel
from sklearn.preprocessing import LabelEncoder
import numpy as np
from glob import glob
import argparse
import os
import pandas as pd
from tqdm import tqdm
def make_prediction(args):
model = load_model(args.model_fn,
custom_objects={'STFT': STFT,
'Magnitude': Magnitude,
'ApplyFilterbank': ApplyFilterbank,
'MagnitudeToDecibel': MagnitudeToDecibel})
wav_paths = glob('{}/**'.format(args.src_dir), recursive=True)
wav_paths = sorted([x.replace(os.sep, '/') for x in wav_paths if '.wav' in x])
classes = sorted(os.listdir(args.src_dir))
labels = [os.path.split(x)[0].split('/')[-1] for x in wav_paths]
le = LabelEncoder()
y_true = le.fit_transform(labels)
results = []
for z, wav_fn in tqdm(enumerate(wav_paths), total=len(wav_paths)):
rate, wav = downsample_mono(wav_fn, args.sr)
mask, env = envelope(wav, rate, threshold=args.threshold)
clean_wav = wav[mask]
step = int(args.sr * args.dt)
batch = []
for i in range(0, clean_wav.shape[0], step):
sample = clean_wav[i:i + step]
sample = sample.reshape(-1, 1)
if sample.shape[0] < step:
tmp = np.zeros(shape=(step, 1), dtype=np.float32)
tmp[:sample.shape[0], :] = sample.flatten().reshape(-1, 1)
sample = tmp
batch.append(sample)
X_batch = np.array(batch, dtype=np.float32)
y_pred = model.predict(X_batch)
y_mean = np.mean(y_pred, axis=0)
y_pred = np.argmax(y_mean)
real_class = os.path.dirname(wav_fn).split('/')[-1]
print('Actual class: {}, Predicted class: {}'.format(real_class, classes[y_pred]))
results.append(y_mean)
np.save(os.path.join('logs', args.pred_fn), np.array(results))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Audio Classification Training')
parser.add_argument('--model_fn', type=str, default='models/lstm.h5',
help='model file to make predictions')
parser.add_argument('--pred_fn', type=str, default='y_pred',
help='fn to write predictions in logs dir')
parser.add_argument('--src_dir', type=str, default='wavfiles',
help='directory containing wavfiles to predict')
parser.add_argument('--dt', type=float, default=1.0,
help='time in seconds to sample audio')
parser.add_argument('--sr', type=int, default=16000,
help='sample rate of clean audio')
parser.add_argument('--threshold', type=str, default=20,
help='threshold magnitude for np.int16 dtype')
args, _ = parser.parse_known_args()
make_prediction(args)
| true | true |
f71b09c92a4cd91f0806d99eac65622c1baec8ea | 9,692 | py | Python | simglucose/controller/PaperController.py | electr0de/APControllerProjectGit | 141ac08e716d6ac8cebe7b144b744744024d8939 | [
"MIT"
] | null | null | null | simglucose/controller/PaperController.py | electr0de/APControllerProjectGit | 141ac08e716d6ac8cebe7b144b744744024d8939 | [
"MIT"
] | null | null | null | simglucose/controller/PaperController.py | electr0de/APControllerProjectGit | 141ac08e716d6ac8cebe7b144b744744024d8939 | [
"MIT"
] | null | null | null | from functools import partial
from pprint import pprint
import matplotlib.pyplot as plt
# import test2
from simglucose.controller.base import Controller
#from datetime import datetime, timedelta, time
import numpy as np
import math
percent_value = 0.05
sign = lambda x: math.copysign(1, x)
normalize_f = lambda x: (x - 39) / (600 - 39)
class PaperRLController(Controller):
def __init__(self, a_hyper=1, a_hypo=10, current_breakfast_bolus=0.0, current_lunch_bolus=0.0,
current_dinner_bolus=0.0, current_basal_rate=0.0, current_snack_bolus=0.0, init_state=None):
super().__init__(init_state)
np.random.seed(1)
self.a_hyper = a_hyper
self.hypo = a_hypo
self.GL = normalize_f(90)
self.GH = normalize_f(150)
self.current_basal_rate = current_basal_rate
self.current_breakfast_bolus = current_breakfast_bolus # bolus means IC ratio
self.current_lunch_bolus = current_lunch_bolus
self.current_dinner_bolus = current_dinner_bolus
# self.current_snack_bolus = current_snack_bolus
self.basal_theta = []
self.bolus_theta = []
# np.random.seed(2)
# self.bolus_theta = np.random.rand(2).tolist()
self.h = 0.5
self.c_sigma = 0.05
self.m = 0.5
self.previous_basal_rate = 0.0
np.random.seed(55)
self.w = (np.random.rand(2) * 2 - 1).tolist()
self._lambda = 0.5
self.gamma = 0.9
self.z = [0.0, 0.0]
self.a = 0.5
self.beta = 0.5
self.beta_basal = 0.5
self.value_factor = 10
# self.time_array = []
# self.theta_array_1 = []
# self.theta_array_2 = []
# self.bolus_time_array = []
# self.F_1_array = []
# self.F_2_array = []
# plt.figure(200)
# self.fig, self.axis = plt.subplots(4)
# plt.show()
# self.axis[0].set_title(" Hyper feature for basal")
# self.axis[1].set_title(" Hypo feature for basal")
# self.axis[2].set_title("Hyper theta for basal")
# self.axis[3].set_title(" Hypo theta for basal")
self.previous_state_basal = None
self.previous_state_breakfast = None
self.previous_state_lunch = None
self.previous_state_dinner = None
def extract_features(self, array):
M_hyper = []
M_hypo = []
for element in array:
if element > 150:
M_hyper.append(normalize_f(element))
elif element < 90:
M_hypo.append(normalize_f(element))
F_hyper = sum([element - self.GH for element in M_hyper]) * 1 / len(M_hyper) if M_hyper else 0
F_hypo = sum([self.GL - element for element in M_hypo]) * 1 / len(M_hypo) if M_hypo else 0
return (F_hyper, F_hypo)
def calculate_basal(self, previous_state, basal_array, time):
F_hyper, F_hypo = self.extract_features(basal_array)
F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)
#
# self.F_1_array.append(F_hyper)
# self.F_2_array.append(F_hypo)
# self.time_array.append(time)
#
# self.axis[0].plot(self.time_array, self.F_1_array)
#
# self.axis[1].plot(self.time_array, self.F_2_array)
#
# plt.pause(0.001)
Ps = None
if F_hypo == 0.0:
Ps = 0
elif F_hypo > 0.0 and F_hyper == 0.0:
Ps = -0.1 * F_hypo
elif F_hypo > 0.0 and F_hyper > 0.0:
Ps = -0.05 * F_hypo
assert Ps is not None, "No conditions matched"
P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), True)
self.previous_basal_rate = self.current_basal_rate
br_change = self.m * P * self.current_basal_rate
# uncomment to enable 5 % change
# percent_value = 0
if abs(br_change / self.current_basal_rate) > percent_value:
self.current_basal_rate += self.current_basal_rate * percent_value * sign(br_change)
print(" used % changed")
else:
self.current_basal_rate += br_change
print(" didn't use % changed")
return self.current_basal_rate
def calculate_bolus(self, previous_state, next_state, food_counter, time):
F_hyper, F_hypo = self.extract_features(next_state)
F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)
#
# self.F_1_array.append(F_hyper)
# self.F_2_array.append(F_hypo)
# self.bolus_time_array.append(time)
#
# self.axis[0].plot(self.bolus_time_array, self.F_1_array)
# self.axis[1].plot(self.bolus_time_array, self.F_2_array)
Ps = None
if F_hypo == 0.0:
Ps = 0
elif F_hypo > 0.0 and F_hyper == 0.0:
Ps = +0.1 * F_hypo
elif F_hypo > 0.0 and F_hyper > 0.0:
Ps = +0.05 * F_hypo
assert Ps is not None, "No conditions matched"
P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), False, food_counter)
if food_counter == 0:
self.current_breakfast_bolus = self.update_bolus(self.current_breakfast_bolus, P)
return self.current_breakfast_bolus
if food_counter == 1:
self.current_lunch_bolus = self.update_bolus(self.current_lunch_bolus, P)
return self.current_lunch_bolus
if food_counter == 2:
self.current_dinner_bolus = self.update_bolus(self.current_dinner_bolus, P)
return self.current_dinner_bolus
# if food_counter == 3:
# self.current_snack_bolus = self.update_bolus(self.current_snack_bolus, P)
# return self.current_snack_bolus
return 0.0
def perform_update(self, Ps, F_old, F, coming_from, food_counter=None):
if coming_from:
theta = self.basal_theta
previous_state = self.previous_state_basal
else:
theta = self.bolus_theta
if food_counter == 0:
previous_state = self.previous_state_breakfast
elif food_counter == 1:
previous_state = self.previous_state_lunch
elif food_counter == 2:
previous_state = self.previous_state_dinner
else:
return 0
# theta = self.theta
print(f"theta: {theta}")
Pa = sum([element1 * element2 for element1, element2 in zip(F, theta)])
Pd = self.h * Pa + (1 - self.h) * Ps
sigma = self.c_sigma * (F[0] ** 2 + F[1] ** 2)
Pe = Pd + np.random.normal(0, sigma)
cost = 1 * F[0] + self.value_factor * F[1]
if not previous_state:
previous_state = sum(
[((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F_old, self.w)])
next_value = sum(
[((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F, self.w)])
d = cost + self.gamma * next_value - previous_state
self.w = [element1 + self.a * d * element2 for element1, element2 in zip(self.w, self.z)]
self.z = [self._lambda * element1 + element2 for element1, element2 in zip(self.z, F)]
if coming_from:
self.basal_theta = [element1 - self.beta_basal * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for
element1, element2 in zip(self.basal_theta, F)]
self.previous_state_basal = next_value
else:
self.bolus_theta = [element1 - self.beta * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for
element1, element2 in zip(self.bolus_theta, F)]
if food_counter == 0:
self.previous_state_breakfast = next_value
elif food_counter == 1:
self.previous_state_lunch = next_value
else:
self.previous_state_dinner = next_value
assert sigma > 0.0000001, "sigma is too low"
# self.theta_array_1.append(self.theta[0])
# self.theta_array_2.append(self.theta[1])
# self.axis[2].plot(self.time_array, self.theta_array_1)
# self.axis[3].plot(self.time_array, self.theta_array_2)
return Pe
def update_bolus(self, old_bolus, P):
fusion_rate = old_bolus + self.m * P * old_bolus
l = 1 if (self.current_basal_rate > self.previous_basal_rate and fusion_rate < old_bolus) or (
self.current_basal_rate < self.previous_basal_rate and fusion_rate > old_bolus) else 0
# fusion_rate = l * old_bolus + (1 - l) * fusion_rate
bl_change = fusion_rate - old_bolus
if abs(bl_change / old_bolus) > percent_value:
old_bolus += sign(bl_change) * old_bolus * percent_value
print(" used % changed")
else:
old_bolus += bl_change
print(" didn't use % changed")
return old_bolus
# if __name__ == '__main__':
#
# GL = normalize_f(90)
# GH = normalize_f(150)
#
# def extract_features(array):
# M_hyper = []
# M_hypo = []
#
# for element in array:
# if element > 150:
# M_hyper.append(normalize_f(element))
# elif element < 90:
# M_hypo.append(normalize_f(element))
#
# F_hyper = sum([element - GH for element in M_hyper]) * 1 / len(M_hyper) if M_hyper else 0
#
# F_hypo = sum([GL - element for element in M_hypo]) * 1 / len(M_hypo) if M_hypo else 0
#
# return (F_hyper, F_hypo)
#
# array = test2.array
# print(extract_features(array))
| 35.501832 | 119 | 0.596574 | from functools import partial
from pprint import pprint
import matplotlib.pyplot as plt
from simglucose.controller.base import Controller
import numpy as np
import math
percent_value = 0.05
sign = lambda x: math.copysign(1, x)
normalize_f = lambda x: (x - 39) / (600 - 39)
class PaperRLController(Controller):
def __init__(self, a_hyper=1, a_hypo=10, current_breakfast_bolus=0.0, current_lunch_bolus=0.0,
current_dinner_bolus=0.0, current_basal_rate=0.0, current_snack_bolus=0.0, init_state=None):
super().__init__(init_state)
np.random.seed(1)
self.a_hyper = a_hyper
self.hypo = a_hypo
self.GL = normalize_f(90)
self.GH = normalize_f(150)
self.current_basal_rate = current_basal_rate
self.current_breakfast_bolus = current_breakfast_bolus
self.current_lunch_bolus = current_lunch_bolus
self.current_dinner_bolus = current_dinner_bolus
self.basal_theta = []
self.bolus_theta = []
self.h = 0.5
self.c_sigma = 0.05
self.m = 0.5
self.previous_basal_rate = 0.0
np.random.seed(55)
self.w = (np.random.rand(2) * 2 - 1).tolist()
self._lambda = 0.5
self.gamma = 0.9
self.z = [0.0, 0.0]
self.a = 0.5
self.beta = 0.5
self.beta_basal = 0.5
self.value_factor = 10
self.previous_state_basal = None
self.previous_state_breakfast = None
self.previous_state_lunch = None
self.previous_state_dinner = None
def extract_features(self, array):
M_hyper = []
M_hypo = []
for element in array:
if element > 150:
M_hyper.append(normalize_f(element))
elif element < 90:
M_hypo.append(normalize_f(element))
F_hyper = sum([element - self.GH for element in M_hyper]) * 1 / len(M_hyper) if M_hyper else 0
F_hypo = sum([self.GL - element for element in M_hypo]) * 1 / len(M_hypo) if M_hypo else 0
return (F_hyper, F_hypo)
def calculate_basal(self, previous_state, basal_array, time):
F_hyper, F_hypo = self.extract_features(basal_array)
F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)
Ps = None
if F_hypo == 0.0:
Ps = 0
elif F_hypo > 0.0 and F_hyper == 0.0:
Ps = -0.1 * F_hypo
elif F_hypo > 0.0 and F_hyper > 0.0:
Ps = -0.05 * F_hypo
assert Ps is not None, "No conditions matched"
P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), True)
self.previous_basal_rate = self.current_basal_rate
br_change = self.m * P * self.current_basal_rate
if abs(br_change / self.current_basal_rate) > percent_value:
self.current_basal_rate += self.current_basal_rate * percent_value * sign(br_change)
print(" used % changed")
else:
self.current_basal_rate += br_change
print(" didn't use % changed")
return self.current_basal_rate
def calculate_bolus(self, previous_state, next_state, food_counter, time):
F_hyper, F_hypo = self.extract_features(next_state)
F_hyper_prev, F_hypo_prev = self.extract_features(previous_state)
#
# self.F_1_array.append(F_hyper)
# self.F_2_array.append(F_hypo)
# self.bolus_time_array.append(time)
#
# self.axis[0].plot(self.bolus_time_array, self.F_1_array)
# self.axis[1].plot(self.bolus_time_array, self.F_2_array)
Ps = None
if F_hypo == 0.0:
Ps = 0
elif F_hypo > 0.0 and F_hyper == 0.0:
Ps = +0.1 * F_hypo
elif F_hypo > 0.0 and F_hyper > 0.0:
Ps = +0.05 * F_hypo
assert Ps is not None, "No conditions matched"
P = self.perform_update(Ps, (F_hyper_prev, F_hypo_prev), (F_hyper, F_hypo), False, food_counter)
if food_counter == 0:
self.current_breakfast_bolus = self.update_bolus(self.current_breakfast_bolus, P)
return self.current_breakfast_bolus
if food_counter == 1:
self.current_lunch_bolus = self.update_bolus(self.current_lunch_bolus, P)
return self.current_lunch_bolus
if food_counter == 2:
self.current_dinner_bolus = self.update_bolus(self.current_dinner_bolus, P)
return self.current_dinner_bolus
# if food_counter == 3:
# self.current_snack_bolus = self.update_bolus(self.current_snack_bolus, P)
# return self.current_snack_bolus
return 0.0
def perform_update(self, Ps, F_old, F, coming_from, food_counter=None):
if coming_from:
theta = self.basal_theta
previous_state = self.previous_state_basal
else:
theta = self.bolus_theta
if food_counter == 0:
previous_state = self.previous_state_breakfast
elif food_counter == 1:
previous_state = self.previous_state_lunch
elif food_counter == 2:
previous_state = self.previous_state_dinner
else:
return 0
# theta = self.theta
print(f"theta: {theta}")
Pa = sum([element1 * element2 for element1, element2 in zip(F, theta)])
Pd = self.h * Pa + (1 - self.h) * Ps
sigma = self.c_sigma * (F[0] ** 2 + F[1] ** 2)
Pe = Pd + np.random.normal(0, sigma)
cost = 1 * F[0] + self.value_factor * F[1]
if not previous_state:
previous_state = sum(
[((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F_old, self.w)])
next_value = sum(
[((Pe - Pd) / sigma ** 2 * self.h * element1) * element2 for element1, element2 in zip(F, self.w)])
d = cost + self.gamma * next_value - previous_state
self.w = [element1 + self.a * d * element2 for element1, element2 in zip(self.w, self.z)]
self.z = [self._lambda * element1 + element2 for element1, element2 in zip(self.z, F)]
if coming_from:
self.basal_theta = [element1 - self.beta_basal * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for
element1, element2 in zip(self.basal_theta, F)]
self.previous_state_basal = next_value
else:
self.bolus_theta = [element1 - self.beta * d * (Pe - Pd) / sigma ** 2 * self.h * element2 for
element1, element2 in zip(self.bolus_theta, F)]
if food_counter == 0:
self.previous_state_breakfast = next_value
elif food_counter == 1:
self.previous_state_lunch = next_value
else:
self.previous_state_dinner = next_value
assert sigma > 0.0000001, "sigma is too low"
# self.theta_array_1.append(self.theta[0])
# self.theta_array_2.append(self.theta[1])
# self.axis[2].plot(self.time_array, self.theta_array_1)
# self.axis[3].plot(self.time_array, self.theta_array_2)
return Pe
def update_bolus(self, old_bolus, P):
fusion_rate = old_bolus + self.m * P * old_bolus
l = 1 if (self.current_basal_rate > self.previous_basal_rate and fusion_rate < old_bolus) or (
self.current_basal_rate < self.previous_basal_rate and fusion_rate > old_bolus) else 0
# fusion_rate = l * old_bolus + (1 - l) * fusion_rate
bl_change = fusion_rate - old_bolus
if abs(bl_change / old_bolus) > percent_value:
old_bolus += sign(bl_change) * old_bolus * percent_value
print(" used % changed")
else:
old_bolus += bl_change
print(" didn't use % changed")
return old_bolus
| true | true |
f71b09cc7eff04c4f945a4c71943c706e084229f | 43,925 | py | Python | benchmarks/ltl_maxplus/f3/maxplus_20_83.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/ltl_maxplus/f3/maxplus_20_83.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/ltl_maxplus/f3/maxplus_20_83.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z |
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_true, msat_make_false
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type
from mathsat import msat_make_and as _msat_make_and
from mathsat import msat_make_or as _msat_make_or
from mathsat import msat_make_not
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
def msat_make_and(menv: msat_env, *args):
if len(args) == 0:
return msat_make_true(menv)
if len(args) == 1:
return args[0]
res = _msat_make_and(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_and(menv, res, arg)
return res
def msat_make_or(menv: msat_env, *args):
if len(args) == 0:
return msat_make_false(menv)
if len(args) == 1:
return args[0]
res = _msat_make_or(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_or(menv, res, arg)
return res
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_m1 = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, n_m1)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
names = ["x_0", "x_1", "x_2", "x_3", "x_4", "x_5", "x_6", "x_7", "x_8", "x_9", "x_10", "x_11", "x_12", "x_13", "x_14", "x_15", "x_16", "x_17", "x_18", "x_19"]
xs = [msat_declare_function(menv, name, real_type)
for name in names]
xs = [msat_make_constant(menv, x) for x in xs]
x_xs = [msat_declare_function(menv, name_next(name), real_type)
for name in names]
x_xs = [msat_make_constant(menv, x_x) for x_x in x_xs]
curr2next = {x: x_x for x, x_x in zip(xs, x_xs)}
n_10_0 = msat_make_number(menv, "10.0")
n_11_0 = msat_make_number(menv, "11.0")
n_12_0 = msat_make_number(menv, "12.0")
n_13_0 = msat_make_number(menv, "13.0")
n_14_0 = msat_make_number(menv, "14.0")
n_15_0 = msat_make_number(menv, "15.0")
n_16_0 = msat_make_number(menv, "16.0")
n_17_0 = msat_make_number(menv, "17.0")
n_18_0 = msat_make_number(menv, "18.0")
n_19_0 = msat_make_number(menv, "19.0")
n_1_0 = msat_make_number(menv, "1.0")
n_20_0 = msat_make_number(menv, "20.0")
n_2_0 = msat_make_number(menv, "2.0")
n_3_0 = msat_make_number(menv, "3.0")
n_4_0 = msat_make_number(menv, "4.0")
n_5_0 = msat_make_number(menv, "5.0")
n_6_0 = msat_make_number(menv, "6.0")
n_7_0 = msat_make_number(menv, "7.0")
n_8_0 = msat_make_number(menv, "8.0")
n_9_0 = msat_make_number(menv, "9.0")
init = msat_make_true(menv)
trans = msat_make_true(menv)
# transitions
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[4], n_9_0)
expr2 = msat_make_plus(menv, xs[5], n_9_0)
expr3 = msat_make_plus(menv, xs[7], n_12_0)
expr4 = msat_make_plus(menv, xs[11], n_20_0)
expr5 = msat_make_plus(menv, xs[12], n_15_0)
expr6 = msat_make_plus(menv, xs[14], n_12_0)
expr7 = msat_make_plus(menv, xs[15], n_5_0)
expr8 = msat_make_plus(menv, xs[18], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_5_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[0], expr0),
msat_make_geq(menv, x_xs[0], expr1),
msat_make_geq(menv, x_xs[0], expr2),
msat_make_geq(menv, x_xs[0], expr3),
msat_make_geq(menv, x_xs[0], expr4),
msat_make_geq(menv, x_xs[0], expr5),
msat_make_geq(menv, x_xs[0], expr6),
msat_make_geq(menv, x_xs[0], expr7),
msat_make_geq(menv, x_xs[0], expr8),
msat_make_geq(menv, x_xs[0], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[0], expr0),
msat_make_equal(menv, x_xs[0], expr1),
msat_make_equal(menv, x_xs[0], expr2),
msat_make_equal(menv, x_xs[0], expr3),
msat_make_equal(menv, x_xs[0], expr4),
msat_make_equal(menv, x_xs[0], expr5),
msat_make_equal(menv, x_xs[0], expr6),
msat_make_equal(menv, x_xs[0], expr7),
msat_make_equal(menv, x_xs[0], expr8),
msat_make_equal(menv, x_xs[0], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_20_0)
expr1 = msat_make_plus(menv, xs[4], n_16_0)
expr2 = msat_make_plus(menv, xs[5], n_17_0)
expr3 = msat_make_plus(menv, xs[6], n_6_0)
expr4 = msat_make_plus(menv, xs[8], n_19_0)
expr5 = msat_make_plus(menv, xs[11], n_13_0)
expr6 = msat_make_plus(menv, xs[12], n_1_0)
expr7 = msat_make_plus(menv, xs[15], n_5_0)
expr8 = msat_make_plus(menv, xs[16], n_1_0)
expr9 = msat_make_plus(menv, xs[18], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[1], expr0),
msat_make_geq(menv, x_xs[1], expr1),
msat_make_geq(menv, x_xs[1], expr2),
msat_make_geq(menv, x_xs[1], expr3),
msat_make_geq(menv, x_xs[1], expr4),
msat_make_geq(menv, x_xs[1], expr5),
msat_make_geq(menv, x_xs[1], expr6),
msat_make_geq(menv, x_xs[1], expr7),
msat_make_geq(menv, x_xs[1], expr8),
msat_make_geq(menv, x_xs[1], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[1], expr0),
msat_make_equal(menv, x_xs[1], expr1),
msat_make_equal(menv, x_xs[1], expr2),
msat_make_equal(menv, x_xs[1], expr3),
msat_make_equal(menv, x_xs[1], expr4),
msat_make_equal(menv, x_xs[1], expr5),
msat_make_equal(menv, x_xs[1], expr6),
msat_make_equal(menv, x_xs[1], expr7),
msat_make_equal(menv, x_xs[1], expr8),
msat_make_equal(menv, x_xs[1], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_4_0)
expr1 = msat_make_plus(menv, xs[1], n_1_0)
expr2 = msat_make_plus(menv, xs[3], n_12_0)
expr3 = msat_make_plus(menv, xs[5], n_18_0)
expr4 = msat_make_plus(menv, xs[7], n_13_0)
expr5 = msat_make_plus(menv, xs[8], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_17_0)
expr7 = msat_make_plus(menv, xs[16], n_14_0)
expr8 = msat_make_plus(menv, xs[17], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_16_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[2], expr0),
msat_make_geq(menv, x_xs[2], expr1),
msat_make_geq(menv, x_xs[2], expr2),
msat_make_geq(menv, x_xs[2], expr3),
msat_make_geq(menv, x_xs[2], expr4),
msat_make_geq(menv, x_xs[2], expr5),
msat_make_geq(menv, x_xs[2], expr6),
msat_make_geq(menv, x_xs[2], expr7),
msat_make_geq(menv, x_xs[2], expr8),
msat_make_geq(menv, x_xs[2], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[2], expr0),
msat_make_equal(menv, x_xs[2], expr1),
msat_make_equal(menv, x_xs[2], expr2),
msat_make_equal(menv, x_xs[2], expr3),
msat_make_equal(menv, x_xs[2], expr4),
msat_make_equal(menv, x_xs[2], expr5),
msat_make_equal(menv, x_xs[2], expr6),
msat_make_equal(menv, x_xs[2], expr7),
msat_make_equal(menv, x_xs[2], expr8),
msat_make_equal(menv, x_xs[2], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_9_0)
expr1 = msat_make_plus(menv, xs[3], n_17_0)
expr2 = msat_make_plus(menv, xs[5], n_2_0)
expr3 = msat_make_plus(menv, xs[7], n_5_0)
expr4 = msat_make_plus(menv, xs[13], n_20_0)
expr5 = msat_make_plus(menv, xs[15], n_4_0)
expr6 = msat_make_plus(menv, xs[16], n_20_0)
expr7 = msat_make_plus(menv, xs[17], n_7_0)
expr8 = msat_make_plus(menv, xs[18], n_11_0)
expr9 = msat_make_plus(menv, xs[19], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[3], expr0),
msat_make_geq(menv, x_xs[3], expr1),
msat_make_geq(menv, x_xs[3], expr2),
msat_make_geq(menv, x_xs[3], expr3),
msat_make_geq(menv, x_xs[3], expr4),
msat_make_geq(menv, x_xs[3], expr5),
msat_make_geq(menv, x_xs[3], expr6),
msat_make_geq(menv, x_xs[3], expr7),
msat_make_geq(menv, x_xs[3], expr8),
msat_make_geq(menv, x_xs[3], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[3], expr0),
msat_make_equal(menv, x_xs[3], expr1),
msat_make_equal(menv, x_xs[3], expr2),
msat_make_equal(menv, x_xs[3], expr3),
msat_make_equal(menv, x_xs[3], expr4),
msat_make_equal(menv, x_xs[3], expr5),
msat_make_equal(menv, x_xs[3], expr6),
msat_make_equal(menv, x_xs[3], expr7),
msat_make_equal(menv, x_xs[3], expr8),
msat_make_equal(menv, x_xs[3], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_14_0)
expr1 = msat_make_plus(menv, xs[2], n_2_0)
expr2 = msat_make_plus(menv, xs[4], n_13_0)
expr3 = msat_make_plus(menv, xs[5], n_4_0)
expr4 = msat_make_plus(menv, xs[6], n_5_0)
expr5 = msat_make_plus(menv, xs[10], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_16_0)
expr7 = msat_make_plus(menv, xs[14], n_15_0)
expr8 = msat_make_plus(menv, xs[15], n_15_0)
expr9 = msat_make_plus(menv, xs[18], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[4], expr0),
msat_make_geq(menv, x_xs[4], expr1),
msat_make_geq(menv, x_xs[4], expr2),
msat_make_geq(menv, x_xs[4], expr3),
msat_make_geq(menv, x_xs[4], expr4),
msat_make_geq(menv, x_xs[4], expr5),
msat_make_geq(menv, x_xs[4], expr6),
msat_make_geq(menv, x_xs[4], expr7),
msat_make_geq(menv, x_xs[4], expr8),
msat_make_geq(menv, x_xs[4], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[4], expr0),
msat_make_equal(menv, x_xs[4], expr1),
msat_make_equal(menv, x_xs[4], expr2),
msat_make_equal(menv, x_xs[4], expr3),
msat_make_equal(menv, x_xs[4], expr4),
msat_make_equal(menv, x_xs[4], expr5),
msat_make_equal(menv, x_xs[4], expr6),
msat_make_equal(menv, x_xs[4], expr7),
msat_make_equal(menv, x_xs[4], expr8),
msat_make_equal(menv, x_xs[4], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_17_0)
expr1 = msat_make_plus(menv, xs[4], n_2_0)
expr2 = msat_make_plus(menv, xs[5], n_18_0)
expr3 = msat_make_plus(menv, xs[6], n_17_0)
expr4 = msat_make_plus(menv, xs[8], n_20_0)
expr5 = msat_make_plus(menv, xs[10], n_7_0)
expr6 = msat_make_plus(menv, xs[14], n_2_0)
expr7 = msat_make_plus(menv, xs[16], n_19_0)
expr8 = msat_make_plus(menv, xs[17], n_12_0)
expr9 = msat_make_plus(menv, xs[18], n_13_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[5], expr0),
msat_make_geq(menv, x_xs[5], expr1),
msat_make_geq(menv, x_xs[5], expr2),
msat_make_geq(menv, x_xs[5], expr3),
msat_make_geq(menv, x_xs[5], expr4),
msat_make_geq(menv, x_xs[5], expr5),
msat_make_geq(menv, x_xs[5], expr6),
msat_make_geq(menv, x_xs[5], expr7),
msat_make_geq(menv, x_xs[5], expr8),
msat_make_geq(menv, x_xs[5], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[5], expr0),
msat_make_equal(menv, x_xs[5], expr1),
msat_make_equal(menv, x_xs[5], expr2),
msat_make_equal(menv, x_xs[5], expr3),
msat_make_equal(menv, x_xs[5], expr4),
msat_make_equal(menv, x_xs[5], expr5),
msat_make_equal(menv, x_xs[5], expr6),
msat_make_equal(menv, x_xs[5], expr7),
msat_make_equal(menv, x_xs[5], expr8),
msat_make_equal(menv, x_xs[5], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_8_0)
expr1 = msat_make_plus(menv, xs[3], n_2_0)
expr2 = msat_make_plus(menv, xs[5], n_13_0)
expr3 = msat_make_plus(menv, xs[9], n_15_0)
expr4 = msat_make_plus(menv, xs[11], n_12_0)
expr5 = msat_make_plus(menv, xs[13], n_2_0)
expr6 = msat_make_plus(menv, xs[14], n_18_0)
expr7 = msat_make_plus(menv, xs[16], n_17_0)
expr8 = msat_make_plus(menv, xs[17], n_7_0)
expr9 = msat_make_plus(menv, xs[18], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[6], expr0),
msat_make_geq(menv, x_xs[6], expr1),
msat_make_geq(menv, x_xs[6], expr2),
msat_make_geq(menv, x_xs[6], expr3),
msat_make_geq(menv, x_xs[6], expr4),
msat_make_geq(menv, x_xs[6], expr5),
msat_make_geq(menv, x_xs[6], expr6),
msat_make_geq(menv, x_xs[6], expr7),
msat_make_geq(menv, x_xs[6], expr8),
msat_make_geq(menv, x_xs[6], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[6], expr0),
msat_make_equal(menv, x_xs[6], expr1),
msat_make_equal(menv, x_xs[6], expr2),
msat_make_equal(menv, x_xs[6], expr3),
msat_make_equal(menv, x_xs[6], expr4),
msat_make_equal(menv, x_xs[6], expr5),
msat_make_equal(menv, x_xs[6], expr6),
msat_make_equal(menv, x_xs[6], expr7),
msat_make_equal(menv, x_xs[6], expr8),
msat_make_equal(menv, x_xs[6], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_3_0)
expr1 = msat_make_plus(menv, xs[3], n_11_0)
expr2 = msat_make_plus(menv, xs[5], n_12_0)
expr3 = msat_make_plus(menv, xs[7], n_3_0)
expr4 = msat_make_plus(menv, xs[10], n_5_0)
expr5 = msat_make_plus(menv, xs[11], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_5_0)
expr7 = msat_make_plus(menv, xs[17], n_20_0)
expr8 = msat_make_plus(menv, xs[18], n_14_0)
expr9 = msat_make_plus(menv, xs[19], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[7], expr0),
msat_make_geq(menv, x_xs[7], expr1),
msat_make_geq(menv, x_xs[7], expr2),
msat_make_geq(menv, x_xs[7], expr3),
msat_make_geq(menv, x_xs[7], expr4),
msat_make_geq(menv, x_xs[7], expr5),
msat_make_geq(menv, x_xs[7], expr6),
msat_make_geq(menv, x_xs[7], expr7),
msat_make_geq(menv, x_xs[7], expr8),
msat_make_geq(menv, x_xs[7], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[7], expr0),
msat_make_equal(menv, x_xs[7], expr1),
msat_make_equal(menv, x_xs[7], expr2),
msat_make_equal(menv, x_xs[7], expr3),
msat_make_equal(menv, x_xs[7], expr4),
msat_make_equal(menv, x_xs[7], expr5),
msat_make_equal(menv, x_xs[7], expr6),
msat_make_equal(menv, x_xs[7], expr7),
msat_make_equal(menv, x_xs[7], expr8),
msat_make_equal(menv, x_xs[7], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_15_0)
expr1 = msat_make_plus(menv, xs[3], n_9_0)
expr2 = msat_make_plus(menv, xs[5], n_4_0)
expr3 = msat_make_plus(menv, xs[6], n_16_0)
expr4 = msat_make_plus(menv, xs[9], n_3_0)
expr5 = msat_make_plus(menv, xs[10], n_18_0)
expr6 = msat_make_plus(menv, xs[12], n_1_0)
expr7 = msat_make_plus(menv, xs[16], n_7_0)
expr8 = msat_make_plus(menv, xs[17], n_14_0)
expr9 = msat_make_plus(menv, xs[19], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[8], expr0),
msat_make_geq(menv, x_xs[8], expr1),
msat_make_geq(menv, x_xs[8], expr2),
msat_make_geq(menv, x_xs[8], expr3),
msat_make_geq(menv, x_xs[8], expr4),
msat_make_geq(menv, x_xs[8], expr5),
msat_make_geq(menv, x_xs[8], expr6),
msat_make_geq(menv, x_xs[8], expr7),
msat_make_geq(menv, x_xs[8], expr8),
msat_make_geq(menv, x_xs[8], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[8], expr0),
msat_make_equal(menv, x_xs[8], expr1),
msat_make_equal(menv, x_xs[8], expr2),
msat_make_equal(menv, x_xs[8], expr3),
msat_make_equal(menv, x_xs[8], expr4),
msat_make_equal(menv, x_xs[8], expr5),
msat_make_equal(menv, x_xs[8], expr6),
msat_make_equal(menv, x_xs[8], expr7),
msat_make_equal(menv, x_xs[8], expr8),
msat_make_equal(menv, x_xs[8], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[3], n_5_0)
expr1 = msat_make_plus(menv, xs[4], n_4_0)
expr2 = msat_make_plus(menv, xs[5], n_19_0)
expr3 = msat_make_plus(menv, xs[6], n_9_0)
expr4 = msat_make_plus(menv, xs[10], n_5_0)
expr5 = msat_make_plus(menv, xs[12], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_7_0)
expr7 = msat_make_plus(menv, xs[15], n_12_0)
expr8 = msat_make_plus(menv, xs[16], n_20_0)
expr9 = msat_make_plus(menv, xs[17], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[9], expr0),
msat_make_geq(menv, x_xs[9], expr1),
msat_make_geq(menv, x_xs[9], expr2),
msat_make_geq(menv, x_xs[9], expr3),
msat_make_geq(menv, x_xs[9], expr4),
msat_make_geq(menv, x_xs[9], expr5),
msat_make_geq(menv, x_xs[9], expr6),
msat_make_geq(menv, x_xs[9], expr7),
msat_make_geq(menv, x_xs[9], expr8),
msat_make_geq(menv, x_xs[9], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[9], expr0),
msat_make_equal(menv, x_xs[9], expr1),
msat_make_equal(menv, x_xs[9], expr2),
msat_make_equal(menv, x_xs[9], expr3),
msat_make_equal(menv, x_xs[9], expr4),
msat_make_equal(menv, x_xs[9], expr5),
msat_make_equal(menv, x_xs[9], expr6),
msat_make_equal(menv, x_xs[9], expr7),
msat_make_equal(menv, x_xs[9], expr8),
msat_make_equal(menv, x_xs[9], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_20_0)
expr1 = msat_make_plus(menv, xs[3], n_12_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[8], n_8_0)
expr4 = msat_make_plus(menv, xs[9], n_8_0)
expr5 = msat_make_plus(menv, xs[10], n_2_0)
expr6 = msat_make_plus(menv, xs[11], n_16_0)
expr7 = msat_make_plus(menv, xs[16], n_18_0)
expr8 = msat_make_plus(menv, xs[17], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[10], expr0),
msat_make_geq(menv, x_xs[10], expr1),
msat_make_geq(menv, x_xs[10], expr2),
msat_make_geq(menv, x_xs[10], expr3),
msat_make_geq(menv, x_xs[10], expr4),
msat_make_geq(menv, x_xs[10], expr5),
msat_make_geq(menv, x_xs[10], expr6),
msat_make_geq(menv, x_xs[10], expr7),
msat_make_geq(menv, x_xs[10], expr8),
msat_make_geq(menv, x_xs[10], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[10], expr0),
msat_make_equal(menv, x_xs[10], expr1),
msat_make_equal(menv, x_xs[10], expr2),
msat_make_equal(menv, x_xs[10], expr3),
msat_make_equal(menv, x_xs[10], expr4),
msat_make_equal(menv, x_xs[10], expr5),
msat_make_equal(menv, x_xs[10], expr6),
msat_make_equal(menv, x_xs[10], expr7),
msat_make_equal(menv, x_xs[10], expr8),
msat_make_equal(menv, x_xs[10], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[4], n_4_0)
expr2 = msat_make_plus(menv, xs[7], n_2_0)
expr3 = msat_make_plus(menv, xs[8], n_12_0)
expr4 = msat_make_plus(menv, xs[10], n_17_0)
expr5 = msat_make_plus(menv, xs[11], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_19_0)
expr7 = msat_make_plus(menv, xs[15], n_9_0)
expr8 = msat_make_plus(menv, xs[18], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[11], expr0),
msat_make_geq(menv, x_xs[11], expr1),
msat_make_geq(menv, x_xs[11], expr2),
msat_make_geq(menv, x_xs[11], expr3),
msat_make_geq(menv, x_xs[11], expr4),
msat_make_geq(menv, x_xs[11], expr5),
msat_make_geq(menv, x_xs[11], expr6),
msat_make_geq(menv, x_xs[11], expr7),
msat_make_geq(menv, x_xs[11], expr8),
msat_make_geq(menv, x_xs[11], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[11], expr0),
msat_make_equal(menv, x_xs[11], expr1),
msat_make_equal(menv, x_xs[11], expr2),
msat_make_equal(menv, x_xs[11], expr3),
msat_make_equal(menv, x_xs[11], expr4),
msat_make_equal(menv, x_xs[11], expr5),
msat_make_equal(menv, x_xs[11], expr6),
msat_make_equal(menv, x_xs[11], expr7),
msat_make_equal(menv, x_xs[11], expr8),
msat_make_equal(menv, x_xs[11], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_20_0)
expr1 = msat_make_plus(menv, xs[5], n_1_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[7], n_14_0)
expr4 = msat_make_plus(menv, xs[8], n_13_0)
expr5 = msat_make_plus(menv, xs[10], n_17_0)
expr6 = msat_make_plus(menv, xs[11], n_9_0)
expr7 = msat_make_plus(menv, xs[12], n_8_0)
expr8 = msat_make_plus(menv, xs[13], n_14_0)
expr9 = msat_make_plus(menv, xs[18], n_12_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[12], expr0),
msat_make_geq(menv, x_xs[12], expr1),
msat_make_geq(menv, x_xs[12], expr2),
msat_make_geq(menv, x_xs[12], expr3),
msat_make_geq(menv, x_xs[12], expr4),
msat_make_geq(menv, x_xs[12], expr5),
msat_make_geq(menv, x_xs[12], expr6),
msat_make_geq(menv, x_xs[12], expr7),
msat_make_geq(menv, x_xs[12], expr8),
msat_make_geq(menv, x_xs[12], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[12], expr0),
msat_make_equal(menv, x_xs[12], expr1),
msat_make_equal(menv, x_xs[12], expr2),
msat_make_equal(menv, x_xs[12], expr3),
msat_make_equal(menv, x_xs[12], expr4),
msat_make_equal(menv, x_xs[12], expr5),
msat_make_equal(menv, x_xs[12], expr6),
msat_make_equal(menv, x_xs[12], expr7),
msat_make_equal(menv, x_xs[12], expr8),
msat_make_equal(menv, x_xs[12], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_10_0)
expr1 = msat_make_plus(menv, xs[1], n_15_0)
expr2 = msat_make_plus(menv, xs[2], n_4_0)
expr3 = msat_make_plus(menv, xs[7], n_13_0)
expr4 = msat_make_plus(menv, xs[10], n_15_0)
expr5 = msat_make_plus(menv, xs[12], n_17_0)
expr6 = msat_make_plus(menv, xs[13], n_19_0)
expr7 = msat_make_plus(menv, xs[14], n_7_0)
expr8 = msat_make_plus(menv, xs[15], n_3_0)
expr9 = msat_make_plus(menv, xs[18], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[13], expr0),
msat_make_geq(menv, x_xs[13], expr1),
msat_make_geq(menv, x_xs[13], expr2),
msat_make_geq(menv, x_xs[13], expr3),
msat_make_geq(menv, x_xs[13], expr4),
msat_make_geq(menv, x_xs[13], expr5),
msat_make_geq(menv, x_xs[13], expr6),
msat_make_geq(menv, x_xs[13], expr7),
msat_make_geq(menv, x_xs[13], expr8),
msat_make_geq(menv, x_xs[13], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[13], expr0),
msat_make_equal(menv, x_xs[13], expr1),
msat_make_equal(menv, x_xs[13], expr2),
msat_make_equal(menv, x_xs[13], expr3),
msat_make_equal(menv, x_xs[13], expr4),
msat_make_equal(menv, x_xs[13], expr5),
msat_make_equal(menv, x_xs[13], expr6),
msat_make_equal(menv, x_xs[13], expr7),
msat_make_equal(menv, x_xs[13], expr8),
msat_make_equal(menv, x_xs[13], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_1_0)
expr1 = msat_make_plus(menv, xs[1], n_1_0)
expr2 = msat_make_plus(menv, xs[4], n_16_0)
expr3 = msat_make_plus(menv, xs[8], n_20_0)
expr4 = msat_make_plus(menv, xs[9], n_12_0)
expr5 = msat_make_plus(menv, xs[10], n_9_0)
expr6 = msat_make_plus(menv, xs[11], n_15_0)
expr7 = msat_make_plus(menv, xs[14], n_11_0)
expr8 = msat_make_plus(menv, xs[18], n_9_0)
expr9 = msat_make_plus(menv, xs[19], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[14], expr0),
msat_make_geq(menv, x_xs[14], expr1),
msat_make_geq(menv, x_xs[14], expr2),
msat_make_geq(menv, x_xs[14], expr3),
msat_make_geq(menv, x_xs[14], expr4),
msat_make_geq(menv, x_xs[14], expr5),
msat_make_geq(menv, x_xs[14], expr6),
msat_make_geq(menv, x_xs[14], expr7),
msat_make_geq(menv, x_xs[14], expr8),
msat_make_geq(menv, x_xs[14], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[14], expr0),
msat_make_equal(menv, x_xs[14], expr1),
msat_make_equal(menv, x_xs[14], expr2),
msat_make_equal(menv, x_xs[14], expr3),
msat_make_equal(menv, x_xs[14], expr4),
msat_make_equal(menv, x_xs[14], expr5),
msat_make_equal(menv, x_xs[14], expr6),
msat_make_equal(menv, x_xs[14], expr7),
msat_make_equal(menv, x_xs[14], expr8),
msat_make_equal(menv, x_xs[14], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_17_0)
expr1 = msat_make_plus(menv, xs[3], n_18_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[9], n_5_0)
expr4 = msat_make_plus(menv, xs[10], n_16_0)
expr5 = msat_make_plus(menv, xs[13], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_14_0)
expr7 = msat_make_plus(menv, xs[17], n_10_0)
expr8 = msat_make_plus(menv, xs[18], n_13_0)
expr9 = msat_make_plus(menv, xs[19], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[15], expr0),
msat_make_geq(menv, x_xs[15], expr1),
msat_make_geq(menv, x_xs[15], expr2),
msat_make_geq(menv, x_xs[15], expr3),
msat_make_geq(menv, x_xs[15], expr4),
msat_make_geq(menv, x_xs[15], expr5),
msat_make_geq(menv, x_xs[15], expr6),
msat_make_geq(menv, x_xs[15], expr7),
msat_make_geq(menv, x_xs[15], expr8),
msat_make_geq(menv, x_xs[15], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[15], expr0),
msat_make_equal(menv, x_xs[15], expr1),
msat_make_equal(menv, x_xs[15], expr2),
msat_make_equal(menv, x_xs[15], expr3),
msat_make_equal(menv, x_xs[15], expr4),
msat_make_equal(menv, x_xs[15], expr5),
msat_make_equal(menv, x_xs[15], expr6),
msat_make_equal(menv, x_xs[15], expr7),
msat_make_equal(menv, x_xs[15], expr8),
msat_make_equal(menv, x_xs[15], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_14_0)
expr1 = msat_make_plus(menv, xs[1], n_2_0)
expr2 = msat_make_plus(menv, xs[4], n_3_0)
expr3 = msat_make_plus(menv, xs[5], n_18_0)
expr4 = msat_make_plus(menv, xs[6], n_8_0)
expr5 = msat_make_plus(menv, xs[9], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_17_0)
expr7 = msat_make_plus(menv, xs[13], n_2_0)
expr8 = msat_make_plus(menv, xs[15], n_4_0)
expr9 = msat_make_plus(menv, xs[17], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[16], expr0),
msat_make_geq(menv, x_xs[16], expr1),
msat_make_geq(menv, x_xs[16], expr2),
msat_make_geq(menv, x_xs[16], expr3),
msat_make_geq(menv, x_xs[16], expr4),
msat_make_geq(menv, x_xs[16], expr5),
msat_make_geq(menv, x_xs[16], expr6),
msat_make_geq(menv, x_xs[16], expr7),
msat_make_geq(menv, x_xs[16], expr8),
msat_make_geq(menv, x_xs[16], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[16], expr0),
msat_make_equal(menv, x_xs[16], expr1),
msat_make_equal(menv, x_xs[16], expr2),
msat_make_equal(menv, x_xs[16], expr3),
msat_make_equal(menv, x_xs[16], expr4),
msat_make_equal(menv, x_xs[16], expr5),
msat_make_equal(menv, x_xs[16], expr6),
msat_make_equal(menv, x_xs[16], expr7),
msat_make_equal(menv, x_xs[16], expr8),
msat_make_equal(menv, x_xs[16], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_16_0)
expr1 = msat_make_plus(menv, xs[4], n_14_0)
expr2 = msat_make_plus(menv, xs[6], n_20_0)
expr3 = msat_make_plus(menv, xs[7], n_15_0)
expr4 = msat_make_plus(menv, xs[8], n_2_0)
expr5 = msat_make_plus(menv, xs[11], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_13_0)
expr7 = msat_make_plus(menv, xs[16], n_10_0)
expr8 = msat_make_plus(menv, xs[18], n_4_0)
expr9 = msat_make_plus(menv, xs[19], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[17], expr0),
msat_make_geq(menv, x_xs[17], expr1),
msat_make_geq(menv, x_xs[17], expr2),
msat_make_geq(menv, x_xs[17], expr3),
msat_make_geq(menv, x_xs[17], expr4),
msat_make_geq(menv, x_xs[17], expr5),
msat_make_geq(menv, x_xs[17], expr6),
msat_make_geq(menv, x_xs[17], expr7),
msat_make_geq(menv, x_xs[17], expr8),
msat_make_geq(menv, x_xs[17], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[17], expr0),
msat_make_equal(menv, x_xs[17], expr1),
msat_make_equal(menv, x_xs[17], expr2),
msat_make_equal(menv, x_xs[17], expr3),
msat_make_equal(menv, x_xs[17], expr4),
msat_make_equal(menv, x_xs[17], expr5),
msat_make_equal(menv, x_xs[17], expr6),
msat_make_equal(menv, x_xs[17], expr7),
msat_make_equal(menv, x_xs[17], expr8),
msat_make_equal(menv, x_xs[17], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_9_0)
expr1 = msat_make_plus(menv, xs[5], n_12_0)
expr2 = msat_make_plus(menv, xs[6], n_19_0)
expr3 = msat_make_plus(menv, xs[7], n_11_0)
expr4 = msat_make_plus(menv, xs[9], n_9_0)
expr5 = msat_make_plus(menv, xs[10], n_19_0)
expr6 = msat_make_plus(menv, xs[11], n_20_0)
expr7 = msat_make_plus(menv, xs[12], n_2_0)
expr8 = msat_make_plus(menv, xs[13], n_17_0)
expr9 = msat_make_plus(menv, xs[15], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[18], expr0),
msat_make_geq(menv, x_xs[18], expr1),
msat_make_geq(menv, x_xs[18], expr2),
msat_make_geq(menv, x_xs[18], expr3),
msat_make_geq(menv, x_xs[18], expr4),
msat_make_geq(menv, x_xs[18], expr5),
msat_make_geq(menv, x_xs[18], expr6),
msat_make_geq(menv, x_xs[18], expr7),
msat_make_geq(menv, x_xs[18], expr8),
msat_make_geq(menv, x_xs[18], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[18], expr0),
msat_make_equal(menv, x_xs[18], expr1),
msat_make_equal(menv, x_xs[18], expr2),
msat_make_equal(menv, x_xs[18], expr3),
msat_make_equal(menv, x_xs[18], expr4),
msat_make_equal(menv, x_xs[18], expr5),
msat_make_equal(menv, x_xs[18], expr6),
msat_make_equal(menv, x_xs[18], expr7),
msat_make_equal(menv, x_xs[18], expr8),
msat_make_equal(menv, x_xs[18], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_6_0)
expr1 = msat_make_plus(menv, xs[1], n_18_0)
expr2 = msat_make_plus(menv, xs[3], n_2_0)
expr3 = msat_make_plus(menv, xs[7], n_4_0)
expr4 = msat_make_plus(menv, xs[9], n_1_0)
expr5 = msat_make_plus(menv, xs[10], n_2_0)
expr6 = msat_make_plus(menv, xs[14], n_11_0)
expr7 = msat_make_plus(menv, xs[16], n_2_0)
expr8 = msat_make_plus(menv, xs[17], n_16_0)
expr9 = msat_make_plus(menv, xs[19], n_5_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[19], expr0),
msat_make_geq(menv, x_xs[19], expr1),
msat_make_geq(menv, x_xs[19], expr2),
msat_make_geq(menv, x_xs[19], expr3),
msat_make_geq(menv, x_xs[19], expr4),
msat_make_geq(menv, x_xs[19], expr5),
msat_make_geq(menv, x_xs[19], expr6),
msat_make_geq(menv, x_xs[19], expr7),
msat_make_geq(menv, x_xs[19], expr8),
msat_make_geq(menv, x_xs[19], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[19], expr0),
msat_make_equal(menv, x_xs[19], expr1),
msat_make_equal(menv, x_xs[19], expr2),
msat_make_equal(menv, x_xs[19], expr3),
msat_make_equal(menv, x_xs[19], expr4),
msat_make_equal(menv, x_xs[19], expr5),
msat_make_equal(menv, x_xs[19], expr6),
msat_make_equal(menv, x_xs[19], expr7),
msat_make_equal(menv, x_xs[19], expr8),
msat_make_equal(menv, x_xs[19], expr9),))
trans = msat_make_and(menv, trans, _t)
# ltl property: ((x_4 - x_10 > -8) & ((x_2 - x_12 > 17) U (x_3 - x_14 > 10)))
ltl = msat_make_and(menv, msat_make_gt(menv, msat_make_minus(menv, xs[4], xs[10]), msat_make_number(menv, "-8")), enc.make_U(msat_make_gt(menv, msat_make_minus(menv, xs[2], xs[12]), msat_make_number(menv, "17")), msat_make_gt(menv, msat_make_minus(menv, xs[3], xs[14]), msat_make_number(menv, "10"))))
return TermMap(curr2next), init, trans, ltl
| 54.02829 | 305 | 0.507388 |
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_true, msat_make_false
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type
from mathsat import msat_make_and as _msat_make_and
from mathsat import msat_make_or as _msat_make_or
from mathsat import msat_make_not
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
def msat_make_and(menv: msat_env, *args):
if len(args) == 0:
return msat_make_true(menv)
if len(args) == 1:
return args[0]
res = _msat_make_and(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_and(menv, res, arg)
return res
def msat_make_or(menv: msat_env, *args):
if len(args) == 0:
return msat_make_false(menv)
if len(args) == 1:
return args[0]
res = _msat_make_or(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_or(menv, res, arg)
return res
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_m1 = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, n_m1)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
names = ["x_0", "x_1", "x_2", "x_3", "x_4", "x_5", "x_6", "x_7", "x_8", "x_9", "x_10", "x_11", "x_12", "x_13", "x_14", "x_15", "x_16", "x_17", "x_18", "x_19"]
xs = [msat_declare_function(menv, name, real_type)
for name in names]
xs = [msat_make_constant(menv, x) for x in xs]
x_xs = [msat_declare_function(menv, name_next(name), real_type)
for name in names]
x_xs = [msat_make_constant(menv, x_x) for x_x in x_xs]
curr2next = {x: x_x for x, x_x in zip(xs, x_xs)}
n_10_0 = msat_make_number(menv, "10.0")
n_11_0 = msat_make_number(menv, "11.0")
n_12_0 = msat_make_number(menv, "12.0")
n_13_0 = msat_make_number(menv, "13.0")
n_14_0 = msat_make_number(menv, "14.0")
n_15_0 = msat_make_number(menv, "15.0")
n_16_0 = msat_make_number(menv, "16.0")
n_17_0 = msat_make_number(menv, "17.0")
n_18_0 = msat_make_number(menv, "18.0")
n_19_0 = msat_make_number(menv, "19.0")
n_1_0 = msat_make_number(menv, "1.0")
n_20_0 = msat_make_number(menv, "20.0")
n_2_0 = msat_make_number(menv, "2.0")
n_3_0 = msat_make_number(menv, "3.0")
n_4_0 = msat_make_number(menv, "4.0")
n_5_0 = msat_make_number(menv, "5.0")
n_6_0 = msat_make_number(menv, "6.0")
n_7_0 = msat_make_number(menv, "7.0")
n_8_0 = msat_make_number(menv, "8.0")
n_9_0 = msat_make_number(menv, "9.0")
init = msat_make_true(menv)
trans = msat_make_true(menv)
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[4], n_9_0)
expr2 = msat_make_plus(menv, xs[5], n_9_0)
expr3 = msat_make_plus(menv, xs[7], n_12_0)
expr4 = msat_make_plus(menv, xs[11], n_20_0)
expr5 = msat_make_plus(menv, xs[12], n_15_0)
expr6 = msat_make_plus(menv, xs[14], n_12_0)
expr7 = msat_make_plus(menv, xs[15], n_5_0)
expr8 = msat_make_plus(menv, xs[18], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_5_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[0], expr0),
msat_make_geq(menv, x_xs[0], expr1),
msat_make_geq(menv, x_xs[0], expr2),
msat_make_geq(menv, x_xs[0], expr3),
msat_make_geq(menv, x_xs[0], expr4),
msat_make_geq(menv, x_xs[0], expr5),
msat_make_geq(menv, x_xs[0], expr6),
msat_make_geq(menv, x_xs[0], expr7),
msat_make_geq(menv, x_xs[0], expr8),
msat_make_geq(menv, x_xs[0], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[0], expr0),
msat_make_equal(menv, x_xs[0], expr1),
msat_make_equal(menv, x_xs[0], expr2),
msat_make_equal(menv, x_xs[0], expr3),
msat_make_equal(menv, x_xs[0], expr4),
msat_make_equal(menv, x_xs[0], expr5),
msat_make_equal(menv, x_xs[0], expr6),
msat_make_equal(menv, x_xs[0], expr7),
msat_make_equal(menv, x_xs[0], expr8),
msat_make_equal(menv, x_xs[0], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_20_0)
expr1 = msat_make_plus(menv, xs[4], n_16_0)
expr2 = msat_make_plus(menv, xs[5], n_17_0)
expr3 = msat_make_plus(menv, xs[6], n_6_0)
expr4 = msat_make_plus(menv, xs[8], n_19_0)
expr5 = msat_make_plus(menv, xs[11], n_13_0)
expr6 = msat_make_plus(menv, xs[12], n_1_0)
expr7 = msat_make_plus(menv, xs[15], n_5_0)
expr8 = msat_make_plus(menv, xs[16], n_1_0)
expr9 = msat_make_plus(menv, xs[18], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[1], expr0),
msat_make_geq(menv, x_xs[1], expr1),
msat_make_geq(menv, x_xs[1], expr2),
msat_make_geq(menv, x_xs[1], expr3),
msat_make_geq(menv, x_xs[1], expr4),
msat_make_geq(menv, x_xs[1], expr5),
msat_make_geq(menv, x_xs[1], expr6),
msat_make_geq(menv, x_xs[1], expr7),
msat_make_geq(menv, x_xs[1], expr8),
msat_make_geq(menv, x_xs[1], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[1], expr0),
msat_make_equal(menv, x_xs[1], expr1),
msat_make_equal(menv, x_xs[1], expr2),
msat_make_equal(menv, x_xs[1], expr3),
msat_make_equal(menv, x_xs[1], expr4),
msat_make_equal(menv, x_xs[1], expr5),
msat_make_equal(menv, x_xs[1], expr6),
msat_make_equal(menv, x_xs[1], expr7),
msat_make_equal(menv, x_xs[1], expr8),
msat_make_equal(menv, x_xs[1], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_4_0)
expr1 = msat_make_plus(menv, xs[1], n_1_0)
expr2 = msat_make_plus(menv, xs[3], n_12_0)
expr3 = msat_make_plus(menv, xs[5], n_18_0)
expr4 = msat_make_plus(menv, xs[7], n_13_0)
expr5 = msat_make_plus(menv, xs[8], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_17_0)
expr7 = msat_make_plus(menv, xs[16], n_14_0)
expr8 = msat_make_plus(menv, xs[17], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_16_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[2], expr0),
msat_make_geq(menv, x_xs[2], expr1),
msat_make_geq(menv, x_xs[2], expr2),
msat_make_geq(menv, x_xs[2], expr3),
msat_make_geq(menv, x_xs[2], expr4),
msat_make_geq(menv, x_xs[2], expr5),
msat_make_geq(menv, x_xs[2], expr6),
msat_make_geq(menv, x_xs[2], expr7),
msat_make_geq(menv, x_xs[2], expr8),
msat_make_geq(menv, x_xs[2], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[2], expr0),
msat_make_equal(menv, x_xs[2], expr1),
msat_make_equal(menv, x_xs[2], expr2),
msat_make_equal(menv, x_xs[2], expr3),
msat_make_equal(menv, x_xs[2], expr4),
msat_make_equal(menv, x_xs[2], expr5),
msat_make_equal(menv, x_xs[2], expr6),
msat_make_equal(menv, x_xs[2], expr7),
msat_make_equal(menv, x_xs[2], expr8),
msat_make_equal(menv, x_xs[2], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_9_0)
expr1 = msat_make_plus(menv, xs[3], n_17_0)
expr2 = msat_make_plus(menv, xs[5], n_2_0)
expr3 = msat_make_plus(menv, xs[7], n_5_0)
expr4 = msat_make_plus(menv, xs[13], n_20_0)
expr5 = msat_make_plus(menv, xs[15], n_4_0)
expr6 = msat_make_plus(menv, xs[16], n_20_0)
expr7 = msat_make_plus(menv, xs[17], n_7_0)
expr8 = msat_make_plus(menv, xs[18], n_11_0)
expr9 = msat_make_plus(menv, xs[19], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[3], expr0),
msat_make_geq(menv, x_xs[3], expr1),
msat_make_geq(menv, x_xs[3], expr2),
msat_make_geq(menv, x_xs[3], expr3),
msat_make_geq(menv, x_xs[3], expr4),
msat_make_geq(menv, x_xs[3], expr5),
msat_make_geq(menv, x_xs[3], expr6),
msat_make_geq(menv, x_xs[3], expr7),
msat_make_geq(menv, x_xs[3], expr8),
msat_make_geq(menv, x_xs[3], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[3], expr0),
msat_make_equal(menv, x_xs[3], expr1),
msat_make_equal(menv, x_xs[3], expr2),
msat_make_equal(menv, x_xs[3], expr3),
msat_make_equal(menv, x_xs[3], expr4),
msat_make_equal(menv, x_xs[3], expr5),
msat_make_equal(menv, x_xs[3], expr6),
msat_make_equal(menv, x_xs[3], expr7),
msat_make_equal(menv, x_xs[3], expr8),
msat_make_equal(menv, x_xs[3], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_14_0)
expr1 = msat_make_plus(menv, xs[2], n_2_0)
expr2 = msat_make_plus(menv, xs[4], n_13_0)
expr3 = msat_make_plus(menv, xs[5], n_4_0)
expr4 = msat_make_plus(menv, xs[6], n_5_0)
expr5 = msat_make_plus(menv, xs[10], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_16_0)
expr7 = msat_make_plus(menv, xs[14], n_15_0)
expr8 = msat_make_plus(menv, xs[15], n_15_0)
expr9 = msat_make_plus(menv, xs[18], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[4], expr0),
msat_make_geq(menv, x_xs[4], expr1),
msat_make_geq(menv, x_xs[4], expr2),
msat_make_geq(menv, x_xs[4], expr3),
msat_make_geq(menv, x_xs[4], expr4),
msat_make_geq(menv, x_xs[4], expr5),
msat_make_geq(menv, x_xs[4], expr6),
msat_make_geq(menv, x_xs[4], expr7),
msat_make_geq(menv, x_xs[4], expr8),
msat_make_geq(menv, x_xs[4], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[4], expr0),
msat_make_equal(menv, x_xs[4], expr1),
msat_make_equal(menv, x_xs[4], expr2),
msat_make_equal(menv, x_xs[4], expr3),
msat_make_equal(menv, x_xs[4], expr4),
msat_make_equal(menv, x_xs[4], expr5),
msat_make_equal(menv, x_xs[4], expr6),
msat_make_equal(menv, x_xs[4], expr7),
msat_make_equal(menv, x_xs[4], expr8),
msat_make_equal(menv, x_xs[4], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_17_0)
expr1 = msat_make_plus(menv, xs[4], n_2_0)
expr2 = msat_make_plus(menv, xs[5], n_18_0)
expr3 = msat_make_plus(menv, xs[6], n_17_0)
expr4 = msat_make_plus(menv, xs[8], n_20_0)
expr5 = msat_make_plus(menv, xs[10], n_7_0)
expr6 = msat_make_plus(menv, xs[14], n_2_0)
expr7 = msat_make_plus(menv, xs[16], n_19_0)
expr8 = msat_make_plus(menv, xs[17], n_12_0)
expr9 = msat_make_plus(menv, xs[18], n_13_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[5], expr0),
msat_make_geq(menv, x_xs[5], expr1),
msat_make_geq(menv, x_xs[5], expr2),
msat_make_geq(menv, x_xs[5], expr3),
msat_make_geq(menv, x_xs[5], expr4),
msat_make_geq(menv, x_xs[5], expr5),
msat_make_geq(menv, x_xs[5], expr6),
msat_make_geq(menv, x_xs[5], expr7),
msat_make_geq(menv, x_xs[5], expr8),
msat_make_geq(menv, x_xs[5], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[5], expr0),
msat_make_equal(menv, x_xs[5], expr1),
msat_make_equal(menv, x_xs[5], expr2),
msat_make_equal(menv, x_xs[5], expr3),
msat_make_equal(menv, x_xs[5], expr4),
msat_make_equal(menv, x_xs[5], expr5),
msat_make_equal(menv, x_xs[5], expr6),
msat_make_equal(menv, x_xs[5], expr7),
msat_make_equal(menv, x_xs[5], expr8),
msat_make_equal(menv, x_xs[5], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_8_0)
expr1 = msat_make_plus(menv, xs[3], n_2_0)
expr2 = msat_make_plus(menv, xs[5], n_13_0)
expr3 = msat_make_plus(menv, xs[9], n_15_0)
expr4 = msat_make_plus(menv, xs[11], n_12_0)
expr5 = msat_make_plus(menv, xs[13], n_2_0)
expr6 = msat_make_plus(menv, xs[14], n_18_0)
expr7 = msat_make_plus(menv, xs[16], n_17_0)
expr8 = msat_make_plus(menv, xs[17], n_7_0)
expr9 = msat_make_plus(menv, xs[18], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[6], expr0),
msat_make_geq(menv, x_xs[6], expr1),
msat_make_geq(menv, x_xs[6], expr2),
msat_make_geq(menv, x_xs[6], expr3),
msat_make_geq(menv, x_xs[6], expr4),
msat_make_geq(menv, x_xs[6], expr5),
msat_make_geq(menv, x_xs[6], expr6),
msat_make_geq(menv, x_xs[6], expr7),
msat_make_geq(menv, x_xs[6], expr8),
msat_make_geq(menv, x_xs[6], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[6], expr0),
msat_make_equal(menv, x_xs[6], expr1),
msat_make_equal(menv, x_xs[6], expr2),
msat_make_equal(menv, x_xs[6], expr3),
msat_make_equal(menv, x_xs[6], expr4),
msat_make_equal(menv, x_xs[6], expr5),
msat_make_equal(menv, x_xs[6], expr6),
msat_make_equal(menv, x_xs[6], expr7),
msat_make_equal(menv, x_xs[6], expr8),
msat_make_equal(menv, x_xs[6], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_3_0)
expr1 = msat_make_plus(menv, xs[3], n_11_0)
expr2 = msat_make_plus(menv, xs[5], n_12_0)
expr3 = msat_make_plus(menv, xs[7], n_3_0)
expr4 = msat_make_plus(menv, xs[10], n_5_0)
expr5 = msat_make_plus(menv, xs[11], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_5_0)
expr7 = msat_make_plus(menv, xs[17], n_20_0)
expr8 = msat_make_plus(menv, xs[18], n_14_0)
expr9 = msat_make_plus(menv, xs[19], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[7], expr0),
msat_make_geq(menv, x_xs[7], expr1),
msat_make_geq(menv, x_xs[7], expr2),
msat_make_geq(menv, x_xs[7], expr3),
msat_make_geq(menv, x_xs[7], expr4),
msat_make_geq(menv, x_xs[7], expr5),
msat_make_geq(menv, x_xs[7], expr6),
msat_make_geq(menv, x_xs[7], expr7),
msat_make_geq(menv, x_xs[7], expr8),
msat_make_geq(menv, x_xs[7], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[7], expr0),
msat_make_equal(menv, x_xs[7], expr1),
msat_make_equal(menv, x_xs[7], expr2),
msat_make_equal(menv, x_xs[7], expr3),
msat_make_equal(menv, x_xs[7], expr4),
msat_make_equal(menv, x_xs[7], expr5),
msat_make_equal(menv, x_xs[7], expr6),
msat_make_equal(menv, x_xs[7], expr7),
msat_make_equal(menv, x_xs[7], expr8),
msat_make_equal(menv, x_xs[7], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_15_0)
expr1 = msat_make_plus(menv, xs[3], n_9_0)
expr2 = msat_make_plus(menv, xs[5], n_4_0)
expr3 = msat_make_plus(menv, xs[6], n_16_0)
expr4 = msat_make_plus(menv, xs[9], n_3_0)
expr5 = msat_make_plus(menv, xs[10], n_18_0)
expr6 = msat_make_plus(menv, xs[12], n_1_0)
expr7 = msat_make_plus(menv, xs[16], n_7_0)
expr8 = msat_make_plus(menv, xs[17], n_14_0)
expr9 = msat_make_plus(menv, xs[19], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[8], expr0),
msat_make_geq(menv, x_xs[8], expr1),
msat_make_geq(menv, x_xs[8], expr2),
msat_make_geq(menv, x_xs[8], expr3),
msat_make_geq(menv, x_xs[8], expr4),
msat_make_geq(menv, x_xs[8], expr5),
msat_make_geq(menv, x_xs[8], expr6),
msat_make_geq(menv, x_xs[8], expr7),
msat_make_geq(menv, x_xs[8], expr8),
msat_make_geq(menv, x_xs[8], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[8], expr0),
msat_make_equal(menv, x_xs[8], expr1),
msat_make_equal(menv, x_xs[8], expr2),
msat_make_equal(menv, x_xs[8], expr3),
msat_make_equal(menv, x_xs[8], expr4),
msat_make_equal(menv, x_xs[8], expr5),
msat_make_equal(menv, x_xs[8], expr6),
msat_make_equal(menv, x_xs[8], expr7),
msat_make_equal(menv, x_xs[8], expr8),
msat_make_equal(menv, x_xs[8], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[3], n_5_0)
expr1 = msat_make_plus(menv, xs[4], n_4_0)
expr2 = msat_make_plus(menv, xs[5], n_19_0)
expr3 = msat_make_plus(menv, xs[6], n_9_0)
expr4 = msat_make_plus(menv, xs[10], n_5_0)
expr5 = msat_make_plus(menv, xs[12], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_7_0)
expr7 = msat_make_plus(menv, xs[15], n_12_0)
expr8 = msat_make_plus(menv, xs[16], n_20_0)
expr9 = msat_make_plus(menv, xs[17], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[9], expr0),
msat_make_geq(menv, x_xs[9], expr1),
msat_make_geq(menv, x_xs[9], expr2),
msat_make_geq(menv, x_xs[9], expr3),
msat_make_geq(menv, x_xs[9], expr4),
msat_make_geq(menv, x_xs[9], expr5),
msat_make_geq(menv, x_xs[9], expr6),
msat_make_geq(menv, x_xs[9], expr7),
msat_make_geq(menv, x_xs[9], expr8),
msat_make_geq(menv, x_xs[9], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[9], expr0),
msat_make_equal(menv, x_xs[9], expr1),
msat_make_equal(menv, x_xs[9], expr2),
msat_make_equal(menv, x_xs[9], expr3),
msat_make_equal(menv, x_xs[9], expr4),
msat_make_equal(menv, x_xs[9], expr5),
msat_make_equal(menv, x_xs[9], expr6),
msat_make_equal(menv, x_xs[9], expr7),
msat_make_equal(menv, x_xs[9], expr8),
msat_make_equal(menv, x_xs[9], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_20_0)
expr1 = msat_make_plus(menv, xs[3], n_12_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[8], n_8_0)
expr4 = msat_make_plus(menv, xs[9], n_8_0)
expr5 = msat_make_plus(menv, xs[10], n_2_0)
expr6 = msat_make_plus(menv, xs[11], n_16_0)
expr7 = msat_make_plus(menv, xs[16], n_18_0)
expr8 = msat_make_plus(menv, xs[17], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[10], expr0),
msat_make_geq(menv, x_xs[10], expr1),
msat_make_geq(menv, x_xs[10], expr2),
msat_make_geq(menv, x_xs[10], expr3),
msat_make_geq(menv, x_xs[10], expr4),
msat_make_geq(menv, x_xs[10], expr5),
msat_make_geq(menv, x_xs[10], expr6),
msat_make_geq(menv, x_xs[10], expr7),
msat_make_geq(menv, x_xs[10], expr8),
msat_make_geq(menv, x_xs[10], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[10], expr0),
msat_make_equal(menv, x_xs[10], expr1),
msat_make_equal(menv, x_xs[10], expr2),
msat_make_equal(menv, x_xs[10], expr3),
msat_make_equal(menv, x_xs[10], expr4),
msat_make_equal(menv, x_xs[10], expr5),
msat_make_equal(menv, x_xs[10], expr6),
msat_make_equal(menv, x_xs[10], expr7),
msat_make_equal(menv, x_xs[10], expr8),
msat_make_equal(menv, x_xs[10], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[4], n_4_0)
expr2 = msat_make_plus(menv, xs[7], n_2_0)
expr3 = msat_make_plus(menv, xs[8], n_12_0)
expr4 = msat_make_plus(menv, xs[10], n_17_0)
expr5 = msat_make_plus(menv, xs[11], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_19_0)
expr7 = msat_make_plus(menv, xs[15], n_9_0)
expr8 = msat_make_plus(menv, xs[18], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[11], expr0),
msat_make_geq(menv, x_xs[11], expr1),
msat_make_geq(menv, x_xs[11], expr2),
msat_make_geq(menv, x_xs[11], expr3),
msat_make_geq(menv, x_xs[11], expr4),
msat_make_geq(menv, x_xs[11], expr5),
msat_make_geq(menv, x_xs[11], expr6),
msat_make_geq(menv, x_xs[11], expr7),
msat_make_geq(menv, x_xs[11], expr8),
msat_make_geq(menv, x_xs[11], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[11], expr0),
msat_make_equal(menv, x_xs[11], expr1),
msat_make_equal(menv, x_xs[11], expr2),
msat_make_equal(menv, x_xs[11], expr3),
msat_make_equal(menv, x_xs[11], expr4),
msat_make_equal(menv, x_xs[11], expr5),
msat_make_equal(menv, x_xs[11], expr6),
msat_make_equal(menv, x_xs[11], expr7),
msat_make_equal(menv, x_xs[11], expr8),
msat_make_equal(menv, x_xs[11], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_20_0)
expr1 = msat_make_plus(menv, xs[5], n_1_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[7], n_14_0)
expr4 = msat_make_plus(menv, xs[8], n_13_0)
expr5 = msat_make_plus(menv, xs[10], n_17_0)
expr6 = msat_make_plus(menv, xs[11], n_9_0)
expr7 = msat_make_plus(menv, xs[12], n_8_0)
expr8 = msat_make_plus(menv, xs[13], n_14_0)
expr9 = msat_make_plus(menv, xs[18], n_12_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[12], expr0),
msat_make_geq(menv, x_xs[12], expr1),
msat_make_geq(menv, x_xs[12], expr2),
msat_make_geq(menv, x_xs[12], expr3),
msat_make_geq(menv, x_xs[12], expr4),
msat_make_geq(menv, x_xs[12], expr5),
msat_make_geq(menv, x_xs[12], expr6),
msat_make_geq(menv, x_xs[12], expr7),
msat_make_geq(menv, x_xs[12], expr8),
msat_make_geq(menv, x_xs[12], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[12], expr0),
msat_make_equal(menv, x_xs[12], expr1),
msat_make_equal(menv, x_xs[12], expr2),
msat_make_equal(menv, x_xs[12], expr3),
msat_make_equal(menv, x_xs[12], expr4),
msat_make_equal(menv, x_xs[12], expr5),
msat_make_equal(menv, x_xs[12], expr6),
msat_make_equal(menv, x_xs[12], expr7),
msat_make_equal(menv, x_xs[12], expr8),
msat_make_equal(menv, x_xs[12], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_10_0)
expr1 = msat_make_plus(menv, xs[1], n_15_0)
expr2 = msat_make_plus(menv, xs[2], n_4_0)
expr3 = msat_make_plus(menv, xs[7], n_13_0)
expr4 = msat_make_plus(menv, xs[10], n_15_0)
expr5 = msat_make_plus(menv, xs[12], n_17_0)
expr6 = msat_make_plus(menv, xs[13], n_19_0)
expr7 = msat_make_plus(menv, xs[14], n_7_0)
expr8 = msat_make_plus(menv, xs[15], n_3_0)
expr9 = msat_make_plus(menv, xs[18], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[13], expr0),
msat_make_geq(menv, x_xs[13], expr1),
msat_make_geq(menv, x_xs[13], expr2),
msat_make_geq(menv, x_xs[13], expr3),
msat_make_geq(menv, x_xs[13], expr4),
msat_make_geq(menv, x_xs[13], expr5),
msat_make_geq(menv, x_xs[13], expr6),
msat_make_geq(menv, x_xs[13], expr7),
msat_make_geq(menv, x_xs[13], expr8),
msat_make_geq(menv, x_xs[13], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[13], expr0),
msat_make_equal(menv, x_xs[13], expr1),
msat_make_equal(menv, x_xs[13], expr2),
msat_make_equal(menv, x_xs[13], expr3),
msat_make_equal(menv, x_xs[13], expr4),
msat_make_equal(menv, x_xs[13], expr5),
msat_make_equal(menv, x_xs[13], expr6),
msat_make_equal(menv, x_xs[13], expr7),
msat_make_equal(menv, x_xs[13], expr8),
msat_make_equal(menv, x_xs[13], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_1_0)
expr1 = msat_make_plus(menv, xs[1], n_1_0)
expr2 = msat_make_plus(menv, xs[4], n_16_0)
expr3 = msat_make_plus(menv, xs[8], n_20_0)
expr4 = msat_make_plus(menv, xs[9], n_12_0)
expr5 = msat_make_plus(menv, xs[10], n_9_0)
expr6 = msat_make_plus(menv, xs[11], n_15_0)
expr7 = msat_make_plus(menv, xs[14], n_11_0)
expr8 = msat_make_plus(menv, xs[18], n_9_0)
expr9 = msat_make_plus(menv, xs[19], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[14], expr0),
msat_make_geq(menv, x_xs[14], expr1),
msat_make_geq(menv, x_xs[14], expr2),
msat_make_geq(menv, x_xs[14], expr3),
msat_make_geq(menv, x_xs[14], expr4),
msat_make_geq(menv, x_xs[14], expr5),
msat_make_geq(menv, x_xs[14], expr6),
msat_make_geq(menv, x_xs[14], expr7),
msat_make_geq(menv, x_xs[14], expr8),
msat_make_geq(menv, x_xs[14], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[14], expr0),
msat_make_equal(menv, x_xs[14], expr1),
msat_make_equal(menv, x_xs[14], expr2),
msat_make_equal(menv, x_xs[14], expr3),
msat_make_equal(menv, x_xs[14], expr4),
msat_make_equal(menv, x_xs[14], expr5),
msat_make_equal(menv, x_xs[14], expr6),
msat_make_equal(menv, x_xs[14], expr7),
msat_make_equal(menv, x_xs[14], expr8),
msat_make_equal(menv, x_xs[14], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_17_0)
expr1 = msat_make_plus(menv, xs[3], n_18_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[9], n_5_0)
expr4 = msat_make_plus(menv, xs[10], n_16_0)
expr5 = msat_make_plus(menv, xs[13], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_14_0)
expr7 = msat_make_plus(menv, xs[17], n_10_0)
expr8 = msat_make_plus(menv, xs[18], n_13_0)
expr9 = msat_make_plus(menv, xs[19], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[15], expr0),
msat_make_geq(menv, x_xs[15], expr1),
msat_make_geq(menv, x_xs[15], expr2),
msat_make_geq(menv, x_xs[15], expr3),
msat_make_geq(menv, x_xs[15], expr4),
msat_make_geq(menv, x_xs[15], expr5),
msat_make_geq(menv, x_xs[15], expr6),
msat_make_geq(menv, x_xs[15], expr7),
msat_make_geq(menv, x_xs[15], expr8),
msat_make_geq(menv, x_xs[15], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[15], expr0),
msat_make_equal(menv, x_xs[15], expr1),
msat_make_equal(menv, x_xs[15], expr2),
msat_make_equal(menv, x_xs[15], expr3),
msat_make_equal(menv, x_xs[15], expr4),
msat_make_equal(menv, x_xs[15], expr5),
msat_make_equal(menv, x_xs[15], expr6),
msat_make_equal(menv, x_xs[15], expr7),
msat_make_equal(menv, x_xs[15], expr8),
msat_make_equal(menv, x_xs[15], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_14_0)
expr1 = msat_make_plus(menv, xs[1], n_2_0)
expr2 = msat_make_plus(menv, xs[4], n_3_0)
expr3 = msat_make_plus(menv, xs[5], n_18_0)
expr4 = msat_make_plus(menv, xs[6], n_8_0)
expr5 = msat_make_plus(menv, xs[9], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_17_0)
expr7 = msat_make_plus(menv, xs[13], n_2_0)
expr8 = msat_make_plus(menv, xs[15], n_4_0)
expr9 = msat_make_plus(menv, xs[17], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[16], expr0),
msat_make_geq(menv, x_xs[16], expr1),
msat_make_geq(menv, x_xs[16], expr2),
msat_make_geq(menv, x_xs[16], expr3),
msat_make_geq(menv, x_xs[16], expr4),
msat_make_geq(menv, x_xs[16], expr5),
msat_make_geq(menv, x_xs[16], expr6),
msat_make_geq(menv, x_xs[16], expr7),
msat_make_geq(menv, x_xs[16], expr8),
msat_make_geq(menv, x_xs[16], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[16], expr0),
msat_make_equal(menv, x_xs[16], expr1),
msat_make_equal(menv, x_xs[16], expr2),
msat_make_equal(menv, x_xs[16], expr3),
msat_make_equal(menv, x_xs[16], expr4),
msat_make_equal(menv, x_xs[16], expr5),
msat_make_equal(menv, x_xs[16], expr6),
msat_make_equal(menv, x_xs[16], expr7),
msat_make_equal(menv, x_xs[16], expr8),
msat_make_equal(menv, x_xs[16], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_16_0)
expr1 = msat_make_plus(menv, xs[4], n_14_0)
expr2 = msat_make_plus(menv, xs[6], n_20_0)
expr3 = msat_make_plus(menv, xs[7], n_15_0)
expr4 = msat_make_plus(menv, xs[8], n_2_0)
expr5 = msat_make_plus(menv, xs[11], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_13_0)
expr7 = msat_make_plus(menv, xs[16], n_10_0)
expr8 = msat_make_plus(menv, xs[18], n_4_0)
expr9 = msat_make_plus(menv, xs[19], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[17], expr0),
msat_make_geq(menv, x_xs[17], expr1),
msat_make_geq(menv, x_xs[17], expr2),
msat_make_geq(menv, x_xs[17], expr3),
msat_make_geq(menv, x_xs[17], expr4),
msat_make_geq(menv, x_xs[17], expr5),
msat_make_geq(menv, x_xs[17], expr6),
msat_make_geq(menv, x_xs[17], expr7),
msat_make_geq(menv, x_xs[17], expr8),
msat_make_geq(menv, x_xs[17], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[17], expr0),
msat_make_equal(menv, x_xs[17], expr1),
msat_make_equal(menv, x_xs[17], expr2),
msat_make_equal(menv, x_xs[17], expr3),
msat_make_equal(menv, x_xs[17], expr4),
msat_make_equal(menv, x_xs[17], expr5),
msat_make_equal(menv, x_xs[17], expr6),
msat_make_equal(menv, x_xs[17], expr7),
msat_make_equal(menv, x_xs[17], expr8),
msat_make_equal(menv, x_xs[17], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_9_0)
expr1 = msat_make_plus(menv, xs[5], n_12_0)
expr2 = msat_make_plus(menv, xs[6], n_19_0)
expr3 = msat_make_plus(menv, xs[7], n_11_0)
expr4 = msat_make_plus(menv, xs[9], n_9_0)
expr5 = msat_make_plus(menv, xs[10], n_19_0)
expr6 = msat_make_plus(menv, xs[11], n_20_0)
expr7 = msat_make_plus(menv, xs[12], n_2_0)
expr8 = msat_make_plus(menv, xs[13], n_17_0)
expr9 = msat_make_plus(menv, xs[15], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[18], expr0),
msat_make_geq(menv, x_xs[18], expr1),
msat_make_geq(menv, x_xs[18], expr2),
msat_make_geq(menv, x_xs[18], expr3),
msat_make_geq(menv, x_xs[18], expr4),
msat_make_geq(menv, x_xs[18], expr5),
msat_make_geq(menv, x_xs[18], expr6),
msat_make_geq(menv, x_xs[18], expr7),
msat_make_geq(menv, x_xs[18], expr8),
msat_make_geq(menv, x_xs[18], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[18], expr0),
msat_make_equal(menv, x_xs[18], expr1),
msat_make_equal(menv, x_xs[18], expr2),
msat_make_equal(menv, x_xs[18], expr3),
msat_make_equal(menv, x_xs[18], expr4),
msat_make_equal(menv, x_xs[18], expr5),
msat_make_equal(menv, x_xs[18], expr6),
msat_make_equal(menv, x_xs[18], expr7),
msat_make_equal(menv, x_xs[18], expr8),
msat_make_equal(menv, x_xs[18], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_6_0)
expr1 = msat_make_plus(menv, xs[1], n_18_0)
expr2 = msat_make_plus(menv, xs[3], n_2_0)
expr3 = msat_make_plus(menv, xs[7], n_4_0)
expr4 = msat_make_plus(menv, xs[9], n_1_0)
expr5 = msat_make_plus(menv, xs[10], n_2_0)
expr6 = msat_make_plus(menv, xs[14], n_11_0)
expr7 = msat_make_plus(menv, xs[16], n_2_0)
expr8 = msat_make_plus(menv, xs[17], n_16_0)
expr9 = msat_make_plus(menv, xs[19], n_5_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[19], expr0),
msat_make_geq(menv, x_xs[19], expr1),
msat_make_geq(menv, x_xs[19], expr2),
msat_make_geq(menv, x_xs[19], expr3),
msat_make_geq(menv, x_xs[19], expr4),
msat_make_geq(menv, x_xs[19], expr5),
msat_make_geq(menv, x_xs[19], expr6),
msat_make_geq(menv, x_xs[19], expr7),
msat_make_geq(menv, x_xs[19], expr8),
msat_make_geq(menv, x_xs[19], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[19], expr0),
msat_make_equal(menv, x_xs[19], expr1),
msat_make_equal(menv, x_xs[19], expr2),
msat_make_equal(menv, x_xs[19], expr3),
msat_make_equal(menv, x_xs[19], expr4),
msat_make_equal(menv, x_xs[19], expr5),
msat_make_equal(menv, x_xs[19], expr6),
msat_make_equal(menv, x_xs[19], expr7),
msat_make_equal(menv, x_xs[19], expr8),
msat_make_equal(menv, x_xs[19], expr9),))
trans = msat_make_and(menv, trans, _t)
ltl = msat_make_and(menv, msat_make_gt(menv, msat_make_minus(menv, xs[4], xs[10]), msat_make_number(menv, "-8")), enc.make_U(msat_make_gt(menv, msat_make_minus(menv, xs[2], xs[12]), msat_make_number(menv, "17")), msat_make_gt(menv, msat_make_minus(menv, xs[3], xs[14]), msat_make_number(menv, "10"))))
return TermMap(curr2next), init, trans, ltl
| true | true |
f71b0aee44ad99983b9dca55c4966839e2bc48a0 | 724 | py | Python | cogs/meme.py | toxic3918/fiirrd-bot | 3005fe4941a24cd5c5e496c67ce90323ccba8d08 | [
"MIT"
] | null | null | null | cogs/meme.py | toxic3918/fiirrd-bot | 3005fe4941a24cd5c5e496c67ce90323ccba8d08 | [
"MIT"
] | null | null | null | cogs/meme.py | toxic3918/fiirrd-bot | 3005fe4941a24cd5c5e496c67ce90323ccba8d08 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import aiohttp
import random
class Meme(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def meme(self, ctx):
async with aiohttp.ClientSession() as cs:
async with cs.get("https://www.reddit.com/r/memes.json") as r:
memes = await r.json()
embed = discord.Embed(color=discord.Color.random())
embed.set_image(url=memes['data']['children'][random.randint(0, 25)]['data']['url'])
embed.set_footer(text=f"Requested By {ctx.author}")
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Meme(client)) | 27.846154 | 100 | 0.614641 | import discord
from discord.ext import commands
import aiohttp
import random
class Meme(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def meme(self, ctx):
async with aiohttp.ClientSession() as cs:
async with cs.get("https://www.reddit.com/r/memes.json") as r:
memes = await r.json()
embed = discord.Embed(color=discord.Color.random())
embed.set_image(url=memes['data']['children'][random.randint(0, 25)]['data']['url'])
embed.set_footer(text=f"Requested By {ctx.author}")
await ctx.send(embed=embed)
def setup(client):
client.add_cog(Meme(client)) | true | true |
f71b0d762d4dc64602e981f1eb7b945440962f2b | 512 | py | Python | Client/setup.py | KostasPakas17/RSPET | de4356e40d803a7c224e2c919cac6a2d6c0a330f | [
"MIT"
] | 289 | 2016-03-15T21:48:09.000Z | 2022-03-16T23:04:11.000Z | Client/setup.py | crypticterminal/RSPET | de4356e40d803a7c224e2c919cac6a2d6c0a330f | [
"MIT"
] | 39 | 2016-04-30T10:14:29.000Z | 2017-10-23T21:08:10.000Z | Client/setup.py | crypticterminal/RSPET | de4356e40d803a7c224e2c919cac6a2d6c0a330f | [
"MIT"
] | 115 | 2016-03-15T20:25:57.000Z | 2021-11-08T23:49:31.000Z | '''
Written for DigitalOcean's Hacktoberfest!
Requires cx_Freeze and must be built on Windows :(
Unfortunately, neither cx_Freeze nor py2exe support cross platform compilation
thus, this particular solution was set into motion
'''
import sys
from cx_Freeze import setup, Executable
setup(
name = "RSPET Test", #Change these values to your liking
version = "0.1",
description = "A Test Executable",
executables = [Executable("rspet_client.py", base = "Win32GUI")]) | 28.444444 | 79 | 0.701172 |
import sys
from cx_Freeze import setup, Executable
setup(
name = "RSPET Test",
version = "0.1",
description = "A Test Executable",
executables = [Executable("rspet_client.py", base = "Win32GUI")]) | true | true |
f71b0e90747b4d9d2219b1202a357213e814bbef | 6,923 | py | Python | heron/tools/cli/src/python/execute.py | pjfanning/incubator-heron | 7db7c24733bd7e66ecfe704ea65f864d1fff4adc | [
"Apache-2.0"
] | 3,348 | 2016-05-25T16:04:31.000Z | 2018-03-28T17:46:14.000Z | heron/tools/cli/src/python/execute.py | pjfanning/incubator-heron | 7db7c24733bd7e66ecfe704ea65f864d1fff4adc | [
"Apache-2.0"
] | 1,542 | 2016-05-25T16:46:44.000Z | 2018-03-29T17:30:23.000Z | heron/tools/cli/src/python/execute.py | pjfanning/incubator-heron | 7db7c24733bd7e66ecfe704ea65f864d1fff4adc | [
"Apache-2.0"
] | 702 | 2016-05-25T16:07:43.000Z | 2018-03-27T06:31:07.000Z | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' execute.py '''
import contextlib
import os
import subprocess
import shlex
import tarfile
import tempfile
import traceback
from heron.common.src.python.utils.log import Log
from heron.tools.cli.src.python.result import SimpleResult, ProcessResult, Status
from heron.common.src.python import pex_loader
from heron.tools.cli.src.python import opts
from heron.tools.cli.src.python import jars
from heron.tools.common.src.python.utils import config
################################################################################
def heron_class(class_name, lib_jars, extra_jars=None, args=None, java_defines=None):
'''
Execute a heron class given the args and the jars needed for class path
:param class_name:
:param lib_jars:
:param extra_jars:
:param args:
:param java_defines:
:return:
'''
# default optional params to empty list if not provided
if extra_jars is None:
extra_jars = []
if args is None:
args = []
if java_defines is None:
java_defines = []
# Format all java -D options that need to be passed while running
# the class locally.
java_opts = ['-D' + opt for opt in java_defines]
java_path = config.get_java_path()
if java_path is None:
err_context = "Unable to find java command"
return SimpleResult(Status.InvocationError, err_context)
# Construct the command line for the sub process to run
# Because of the way Python execute works,
# the java opts must be passed as part of the list
all_args = [java_path, "-client", "-Xmx1g"] + \
java_opts + \
["-cp", config.get_classpath(extra_jars + lib_jars)]
all_args += [class_name] + list(args)
# set heron_config environment variable
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
# print the verbose message
Log.debug("Invoking class using command: `%s`", ' '.join(shlex.quote(a) for a in all_args))
Log.debug("Heron options: {%s}", str(heron_env["HERON_OPTIONS"]))
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
process = subprocess.Popen(all_args, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
# stdout message has the information Java program sends back
# stderr message has extra information, such as debugging message
return ProcessResult(process)
def heron_tar(class_name, topology_tar, arguments, tmpdir_root, java_defines):
'''
:param class_name:
:param topology_tar:
:param arguments:
:param tmpdir_root:
:param java_defines:
:return:
'''
# Extract tar to a tmp folder.
tmpdir = tempfile.mkdtemp(dir=tmpdir_root, prefix='tmp')
with contextlib.closing(tarfile.open(topology_tar)) as tar:
tar.extractall(path=tmpdir)
# A tar generated by pants has all dependency jars under libs/
# in addition to the topology jar at top level. Pants keeps
# filename for jar and tar the same except for extension.
topology_jar = os.path.basename(topology_tar).replace(".tar.gz", "").replace(".tar", "") + ".jar"
extra_jars = [
os.path.join(tmpdir, topology_jar),
os.path.join(tmpdir, "*"),
os.path.join(tmpdir, "libs/*")
]
lib_jars = config.get_heron_libs(jars.topology_jars())
# Now execute the class
return heron_class(class_name, lib_jars, extra_jars, arguments, java_defines)
def heron_pex(topology_pex, topology_class_name, args=None):
"""Use a topology defined in a PEX."""
Log.debug("Importing %s from %s", topology_class_name, topology_pex)
if topology_class_name == '-':
# loading topology by running its main method (if __name__ == "__main__")
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_pex]
if args is not None:
cmd.extend(args)
Log.debug("Invoking class using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
process = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
# pylint: disable=fixme
# todo(rli): improve python topology submission workflow
return ProcessResult(process)
try:
# loading topology from Topology's subclass (no main method)
# to support specifying the name of topology
Log.debug("args: %s", args)
if args is not None and isinstance(args, (list, tuple)) and len(args) > 0:
opts.set_config('cmdline.topology.name', args[0])
os.environ["HERON_OPTIONS"] = opts.get_heron_config()
Log.debug("Heron options: {%s}", os.environ["HERON_OPTIONS"])
pex_loader.load_pex(topology_pex)
topology_class = pex_loader.import_and_get_class(topology_pex, topology_class_name)
topology_class.write()
return SimpleResult(Status.Ok)
except Exception as ex:
Log.debug(traceback.format_exc())
err_context = f"Topology {topology_class_name} failed to be loaded from the given pex: {ex}"
return SimpleResult(Status.HeronError, err_context)
return None
# pylint: disable=superfluous-parens
def heron_cpp(topology_binary, args=None):
Log.debug("Executing %s", topology_binary)
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_binary]
if args is not None:
cmd.extend(args)
Log.debug("Invoking binary using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
print(f"""Invoking class using command: ``{' '.join(cmd)}''""")
print(f"Heron options: {str(heron_env['HERON_OPTIONS'])}")
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
proc = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
return ProcessResult(proc)
| 38.675978 | 99 | 0.707497 |
import contextlib
import os
import subprocess
import shlex
import tarfile
import tempfile
import traceback
from heron.common.src.python.utils.log import Log
from heron.tools.cli.src.python.result import SimpleResult, ProcessResult, Status
from heron.common.src.python import pex_loader
from heron.tools.cli.src.python import opts
from heron.tools.cli.src.python import jars
from heron.tools.common.src.python.utils import config
eronError, err_context)
return None
# pylint: disable=superfluous-parens
def heron_cpp(topology_binary, args=None):
Log.debug("Executing %s", topology_binary)
heron_env = os.environ.copy()
heron_env['HERON_OPTIONS'] = opts.get_heron_config()
cmd = [topology_binary]
if args is not None:
cmd.extend(args)
Log.debug("Invoking binary using command: ``%s''", ' '.join(cmd))
Log.debug('Heron options: {%s}', str(heron_env['HERON_OPTIONS']))
print(f"""Invoking class using command: ``{' '.join(cmd)}''""")
print(f"Heron options: {str(heron_env['HERON_OPTIONS'])}")
# invoke the command with subprocess and print error message, if any
# pylint: disable=consider-using-with
proc = subprocess.Popen(cmd, env=heron_env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True, bufsize=1)
return ProcessResult(proc)
| true | true |
f71b0f0cae9bc62f10f26544cdf87a9d56fde71e | 85,110 | py | Python | src/full_node/full_node.py | akubera/chia-blockchain | 91f038e2193755e2a6ca22e2160e2c8f547c23fe | [
"Apache-2.0"
] | null | null | null | src/full_node/full_node.py | akubera/chia-blockchain | 91f038e2193755e2a6ca22e2160e2c8f547c23fe | [
"Apache-2.0"
] | null | null | null | src/full_node/full_node.py | akubera/chia-blockchain | 91f038e2193755e2a6ca22e2160e2c8f547c23fe | [
"Apache-2.0"
] | null | null | null | import asyncio
import dataclasses
import logging
import random
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import aiosqlite
from blspy import AugSchemeMPL
import src.server.ws_connection as ws # lgtm [py/import-and-import-from]
from src.consensus.block_creation import unfinished_block_to_full_block
from src.consensus.block_record import BlockRecord
from src.consensus.blockchain import Blockchain, ReceiveBlockResult
from src.consensus.constants import ConsensusConstants
from src.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from src.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from src.consensus.multiprocess_validation import PreValidationResult
from src.consensus.network_type import NetworkType
from src.consensus.pot_iterations import calculate_sp_iters
from src.full_node.block_store import BlockStore
from src.full_node.coin_store import CoinStore
from src.full_node.full_node_store import FullNodeStore
from src.full_node.mempool_manager import MempoolManager
from src.full_node.signage_point import SignagePoint
from src.full_node.sync_store import SyncStore
from src.full_node.weight_proof import WeightProofHandler
from src.protocols import farmer_protocol, full_node_protocol, timelord_protocol, wallet_protocol
from src.protocols.full_node_protocol import RejectBlocks, RequestBlocks, RespondBlock, RespondBlocks
from src.protocols.protocol_message_types import ProtocolMessageTypes
from src.server.node_discovery import FullNodePeers
from src.server.outbound_message import Message, NodeType, make_msg
from src.server.server import ChiaServer
from src.types.blockchain_format.classgroup import ClassgroupElement
from src.types.blockchain_format.pool_target import PoolTarget
from src.types.blockchain_format.sized_bytes import bytes32
from src.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from src.types.blockchain_format.vdf import CompressibleVDFField, VDFInfo, VDFProof
from src.types.end_of_slot_bundle import EndOfSubSlotBundle
from src.types.full_block import FullBlock
from src.types.header_block import HeaderBlock
from src.types.mempool_inclusion_status import MempoolInclusionStatus
from src.types.spend_bundle import SpendBundle
from src.types.unfinished_block import UnfinishedBlock
from src.util.errors import ConsensusError, Err
from src.util.genesis_wait import wait_for_genesis_challenge
from src.util.ints import uint8, uint32, uint64, uint128
from src.util.path import mkdir, path_from_root
class FullNode:
block_store: BlockStore
full_node_store: FullNodeStore
full_node_peers: Optional[FullNodePeers]
sync_store: Any
coin_store: CoinStore
mempool_manager: MempoolManager
connection: aiosqlite.Connection
_sync_task: Optional[asyncio.Task]
blockchain: Blockchain
config: Dict
server: Any
log: logging.Logger
constants: ConsensusConstants
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
timelord_lock: asyncio.Lock
initialized: bool
def __init__(
self,
config: Dict,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
):
self.initialized = False
self.root_path = root_path
self.config = config
self.server = None
self._shut_down = False # Set to true to close all infinite loops
self.constants = consensus_constants
self.pow_creation: Dict[uint32, asyncio.Event] = {}
self.state_changed_callback: Optional[Callable] = None
self.full_node_peers = None
self.sync_store = None
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
db_path_replaced: str = config["database_path"].replace("CHALLENGE", config["selected_network"])
self.db_path = path_from_root(root_path, db_path_replaced)
mkdir(self.db_path.parent)
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def regular_start(self):
self.log.info("regular_start")
self.connection = await aiosqlite.connect(self.db_path)
self.block_store = await BlockStore.create(self.connection)
self.full_node_store = await FullNodeStore.create(self.constants)
self.sync_store = await SyncStore.create()
self.coin_store = await CoinStore.create(self.connection)
self.log.info("Initializing blockchain from disk")
start_time = time.time()
self.blockchain = await Blockchain.create(self.coin_store, self.block_store, self.constants)
self.mempool_manager = MempoolManager(self.coin_store, self.constants)
self.weight_proof_handler = WeightProofHandler(self.constants, self.blockchain)
self._sync_task = None
time_taken = time.time() - start_time
if self.blockchain.get_peak() is None:
self.log.info(f"Initialized with empty blockchain time taken: {int(time_taken)}s")
else:
self.log.info(
f"Blockchain initialized to peak {self.blockchain.get_peak().header_hash} height"
f" {self.blockchain.get_peak().height}, "
f"time taken: {int(time_taken)}s"
)
pending_tx = await self.mempool_manager.new_peak(self.blockchain.get_peak())
assert len(pending_tx) == 0 # no pending transactions when starting up
peak: Optional[BlockRecord] = self.blockchain.get_peak()
self.uncompact_task = None
if peak is not None:
full_peak = await self.blockchain.get_full_peak()
await self.peak_post_processing(full_peak, peak, max(peak.height - 1, 0), None)
if self.config["send_uncompact_interval"] != 0:
assert self.config["target_uncompact_proofs"] != 0
self.uncompact_task = asyncio.create_task(
self.broadcast_uncompact_blocks(
self.config["send_uncompact_interval"],
self.config["target_uncompact_proofs"],
)
)
self.initialized = True
async def delayed_start(self):
self.log.info("delayed_start")
config, constants = await wait_for_genesis_challenge(self.root_path, self.constants, "full_node")
self.config = config
self.constants = constants
await self.regular_start()
async def _start(self):
self.timelord_lock = asyncio.Lock()
# create the store (db) and full node instance
if self.constants.GENESIS_CHALLENGE is not None:
await self.regular_start()
else:
asyncio.create_task(self.delayed_start())
def set_server(self, server: ChiaServer):
self.server = server
try:
self.full_node_peers = FullNodePeers(
self.server,
self.root_path,
self.config["target_peer_count"] - self.config["target_outbound_peer_count"],
self.config["target_outbound_peer_count"],
self.config["peer_db_path"],
self.config["introducer_peer"],
self.config["peer_connect_interval"],
self.log,
)
asyncio.create_task(self.full_node_peers.start())
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception: {e}")
self.log.error(f"Exception in peer discovery: {e}")
self.log.error(f"Exception Stack: {error_stack}")
def _state_changed(self, change: str):
if self.state_changed_callback is not None:
self.state_changed_callback(change)
async def short_sync_batch(self, peer: ws.WSChiaConnection, start_height: uint32, target_height: uint32) -> bool:
"""
Tries to sync to a chain which is not too far in the future, by downloading batches of blocks. If the first
block that we download is not connected to our chain, we return False and do an expensive long sync instead.
Long sync is not preferred because it requires downloading and validating a weight proof.
Args:
peer: peer to sync from
start_height: height that we should start downloading at. (Our peak is higher)
target_height: target to sync to
Returns:
False if the fork point was not found, and we need to do a long sync. True otherwise.
"""
# Don't trigger multiple batch syncs to the same peer
if (
peer.peer_node_id in self.sync_store.backtrack_syncing
and self.sync_store.backtrack_syncing[peer.peer_node_id] > 0
):
return True # Don't batch sync, we are already in progress of a backtrack sync
if peer.peer_node_id in self.sync_store.batch_syncing:
return True # Don't trigger a long sync
self.sync_store.batch_syncing.add(peer.peer_node_id)
self.log.info(f"Starting batch short sync from {start_height} to height {target_height}")
if start_height > 0:
first = await peer.request_block(full_node_protocol.RequestBlock(uint32(start_height), False))
if first is None or not isinstance(first, full_node_protocol.RespondBlock):
self.sync_store.batch_syncing.remove(peer.peer_node_id)
raise ValueError(f"Error short batch syncing, could not fetch block at height {start_height}")
if not self.blockchain.contains_block(first.block.prev_header_hash):
self.log.info("Batch syncing stopped, this is a deep chain")
self.sync_store.batch_syncing.remove(peer.peer_node_id)
# First sb not connected to our blockchain, do a long sync instead
return False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
try:
for height in range(start_height, target_height, batch_size):
end_height = min(target_height, height + batch_size)
request = RequestBlocks(uint32(height), uint32(end_height), True)
response = await peer.request_blocks(request)
if not response:
raise ValueError(f"Error short batch syncing, invalid/no response for {height}-{end_height}")
async with self.blockchain.lock:
success, advanced_peak, fork_height = await self.receive_block_batch(response.blocks, peer, None)
if not success:
raise ValueError(f"Error short batch syncing, failed to validate blocks {height}-{end_height}")
if advanced_peak:
peak = self.blockchain.get_peak()
peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak()
assert peak is not None and peak_fb is not None and fork_height is not None
await self.peak_post_processing(peak_fb, peak, fork_height, peer)
self.log.info(f"Added blocks {height}-{end_height}")
except Exception:
self.sync_store.batch_syncing.remove(peer.peer_node_id)
raise
self.sync_store.batch_syncing.remove(peer.peer_node_id)
return True
async def short_sync_backtrack(
self, peer: ws.WSChiaConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32
):
"""
Performs a backtrack sync, where blocks are downloaded one at a time from newest to oldest. If we do not
find the fork point 5 deeper than our peak, we return False and do a long sync instead.
Args:
peer: peer to sync from
peak_height: height of our peak
target_height: target height
target_unf_hash: partial hash of the unfinished block of the target
Returns:
True iff we found the fork point, and we do not need to long sync.
"""
try:
if peer.peer_node_id not in self.sync_store.backtrack_syncing:
self.sync_store.backtrack_syncing[peer.peer_node_id] = 0
self.sync_store.backtrack_syncing[peer.peer_node_id] += 1
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash)
curr_height: int = target_height
found_fork_point = False
responses = []
while curr_height > peak_height - 5:
# If we already have the unfinished block, don't fetch the transactions. In the normal case, we will
# already have the unfinished block, from when it was broadcast, so we just need to download the header,
# but not the transactions
fetch_tx: bool = unfinished_block is None or curr_height != target_height
curr = await peer.request_block(full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx))
if curr is None:
raise ValueError(f"Failed to fetch block {curr_height} from {peer.get_peer_info()}, timed out")
if curr is None or not isinstance(curr, full_node_protocol.RespondBlock):
raise ValueError(
f"Failed to fetch block {curr_height} from {peer.get_peer_info()}, wrong type {type(curr)}"
)
responses.append(curr)
if self.blockchain.contains_block(curr.block.prev_header_hash) or curr_height == 0:
found_fork_point = True
break
curr_height -= 1
if found_fork_point:
for response in reversed(responses):
await self.respond_block(response)
except Exception as e:
self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1
raise e
self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1
return found_fork_point
async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSChiaConnection):
"""
We have received a notification of a new peak from a peer. This happens either when we have just connected,
or when the peer has updated their peak.
Args:
request: information about the new peak
peer: peer that sent the message
"""
# Store this peak/peer combination in case we want to sync to it, and to keep track of peers
self.sync_store.peer_has_block(request.header_hash, peer.peer_node_id, request.weight, request.height, True)
if self.blockchain.contains_block(request.header_hash):
return None
# Not interested in less heavy peaks
peak: Optional[BlockRecord] = self.blockchain.get_peak()
curr_peak_height = uint32(0) if peak is None else peak.height
if peak is not None and peak.weight > request.weight:
return None
if self.sync_store.get_sync_mode():
# If peer connects while we are syncing, check if they have the block we are syncing towards
peak_sync_hash = self.sync_store.get_sync_target_hash()
peak_sync_height = self.sync_store.get_sync_target_height()
if peak_sync_hash is not None and request.header_hash != peak_sync_hash and peak_sync_height is not None:
peak_peers: Set[bytes32] = self.sync_store.get_peers_that_have_peak([peak_sync_hash])
# Don't ask if we already know this peer has the peak
if peer.peer_node_id not in peak_peers:
target_peak_response: Optional[RespondBlock] = await peer.request_block(
full_node_protocol.RequestBlock(uint32(peak_sync_height), False), timeout=10
)
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
self.sync_store.peer_has_block(
peak_sync_hash,
peer.peer_node_id,
target_peak_response.block.weight,
peak_sync_height,
False,
)
else:
if request.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]:
# This is the normal case of receiving the next block
if await self.short_sync_backtrack(
peer, curr_peak_height, request.height, request.unfinished_reward_block_hash
):
return
if request.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
# This is the case of syncing up more than a few blocks, at the start of the chain
# TODO(almog): fix weight proofs so they work at the beginning as well
self.log.debug("Doing batch sync, no backup")
await self.short_sync_batch(peer, uint32(0), request.height)
return
if request.height < curr_peak_height + self.config["sync_blocks_behind_threshold"]:
# This case of being behind but not by so much
if await self.short_sync_batch(peer, uint32(max(curr_peak_height - 6, 0)), request.height):
return
# This is the either the case where we were not able to sync successfully (for example, due to the fork
# point being in the past), or we are very far behind. Performs a long sync.
self._sync_task = asyncio.create_task(self._sync())
async def send_peak_to_timelords(
self, peak_block: Optional[FullBlock] = None, peer: Optional[ws.WSChiaConnection] = None
):
"""
Sends current peak to timelords
"""
if peak_block is None:
peak_block = await self.blockchain.get_full_peak()
if peak_block is not None:
peak = self.blockchain.block_record(peak_block.header_hash)
difficulty = self.blockchain.get_next_difficulty(peak.header_hash, False)
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants,
self.blockchain,
peak.required_iters,
peak_block,
True,
)
recent_rc = self.blockchain.get_recent_reward_challenges()
curr = peak
while not curr.is_challenge_block(self.constants) and not curr.first_in_sub_slot:
curr = self.blockchain.block_record(curr.prev_hash)
if curr.is_challenge_block(self.constants):
last_csb_or_eos = curr.total_iters
else:
last_csb_or_eos = curr.ip_sub_slot_total_iters(self.constants)
curr = peak
passed_ses_height_but_not_yet_included = True
while (curr.height % self.constants.SUB_EPOCH_BLOCKS) != 0:
if curr.sub_epoch_summary_included:
passed_ses_height_but_not_yet_included = False
curr = self.blockchain.block_record(curr.prev_hash)
if curr.sub_epoch_summary_included or curr.height == 0:
passed_ses_height_but_not_yet_included = False
timelord_new_peak: timelord_protocol.NewPeakTimelord = timelord_protocol.NewPeakTimelord(
peak_block.reward_chain_block,
difficulty,
peak.deficit,
peak.sub_slot_iters,
ses,
recent_rc,
last_csb_or_eos,
passed_ses_height_but_not_yet_included,
)
msg = make_msg(ProtocolMessageTypes.new_peak_timelord, timelord_new_peak)
if peer is None:
await self.server.send_to_all([msg], NodeType.TIMELORD)
else:
await peer.new_peak_timelord(timelord_new_peak)
async def synced(self) -> bool:
curr: Optional[BlockRecord] = self.blockchain.get_peak()
if curr is None:
return False
while curr is not None and not curr.is_transaction_block:
curr = self.blockchain.try_block_record(curr.prev_hash)
now = time.time()
if (
curr is None
or curr.timestamp is None
or curr.timestamp < uint64(int(now - 60 * 7))
or self.sync_store.get_sync_mode()
):
return False
else:
return True
async def on_connect(self, connection: ws.WSChiaConnection):
"""
Whenever we connect to another node / wallet, send them our current heads. Also send heads to farmers
and challenges to timelords.
"""
self._state_changed("add_connection")
self._state_changed("sync_mode")
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.on_connect(connection))
if self.initialized is False:
return
if connection.connection_type is NodeType.FULL_NODE:
# Send filter to node and request mempool items that are not in it (Only if we are currently synced)
synced = await self.synced()
peak_height = self.blockchain.get_peak_height()
if synced and peak_height is not None and peak_height > self.constants.INITIAL_FREEZE_PERIOD:
my_filter = self.mempool_manager.get_filter()
mempool_request = full_node_protocol.RequestMempoolTransactions(my_filter)
msg = make_msg(ProtocolMessageTypes.request_mempool_transactions, mempool_request)
await connection.send_message(msg)
peak_full: Optional[FullBlock] = await self.blockchain.get_full_peak()
if peak_full is not None:
peak: BlockRecord = self.blockchain.block_record(peak_full.header_hash)
if connection.connection_type is NodeType.FULL_NODE:
request_node = full_node_protocol.NewPeak(
peak.header_hash,
peak.height,
peak.weight,
peak.height,
peak_full.reward_chain_block.get_unfinished().get_hash(),
)
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak, request_node))
elif connection.connection_type is NodeType.WALLET:
# If connected to a wallet, send the Peak
request_wallet = wallet_protocol.NewPeakWallet(
peak.header_hash,
peak.height,
peak.weight,
peak.height,
)
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak_wallet, request_wallet))
elif connection.connection_type is NodeType.TIMELORD:
await self.send_peak_to_timelords()
def on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self._state_changed("close_connection")
self._state_changed("sync_mode")
if self.sync_store is not None:
self.sync_store.peer_disconnected(connection.peer_node_id)
def _num_needed_peers(self) -> int:
assert self.server is not None
assert self.server.all_connections is not None
diff = self.config["target_peer_count"] - len(self.server.all_connections)
return diff if diff >= 0 else 0
def _close(self):
self._shut_down = True
if self.blockchain is not None:
self.blockchain.shut_down()
if self.mempool_manager is not None:
self.mempool_manager.shut_down()
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.close())
if self.uncompact_task is not None:
self.uncompact_task.cancel()
async def _await_closed(self):
try:
if self._sync_task is not None:
self._sync_task.cancel()
except asyncio.TimeoutError:
pass
await self.connection.close()
async def _sync(self):
"""
Performs a full sync of the blockchain up to the peak.
- Wait a few seconds for peers to send us their peaks
- Select the heaviest peak, and request a weight proof from a peer with that peak
- Validate the weight proof, and disconnect from the peer if invalid
- Find the fork point to see where to start downloading blocks
- Download blocks in batch (and in parallel) and verify them one at a time
- Disconnect peers that provide invalid blocks or don't have the blocks
"""
# Ensure we are only syncing once and not double calling this method
if self.sync_store.get_sync_mode():
return
self.sync_store.set_sync_mode(True)
self._state_changed("sync_mode")
try:
self.log.info("Starting to perform sync.")
self.log.info("Waiting to receive peaks from peers.")
# Wait until we have 3 peaks or up to a max of 30 seconds
peaks = []
for i in range(300):
peaks = [tup[0] for tup in self.sync_store.get_peak_of_each_peer().values()]
if len(self.sync_store.get_peers_that_have_peak(peaks)) < 3:
if self._shut_down:
return
await asyncio.sleep(0.1)
self.log.info(f"Collected a total of {len(peaks)} peaks.")
self.sync_peers_handler = None
# Based on responses from peers about the current peaks, see which peak is the heaviest
# (similar to longest chain rule).
target_peak = self.sync_store.get_heaviest_peak()
if target_peak is None:
raise RuntimeError("Not performing sync, no peaks collected")
heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight = target_peak
self.sync_store.set_peak_target(heaviest_peak_hash, heaviest_peak_height)
self.log.info(f"Selected peak {heaviest_peak_height}, {heaviest_peak_hash}")
# Check which peers are updated to this height
peers = []
coroutines = []
for peer in self.server.all_connections.values():
if peer.connection_type == NodeType.FULL_NODE:
peers.append(peer.peer_node_id)
coroutines.append(
peer.request_block(
full_node_protocol.RequestBlock(uint32(heaviest_peak_height), True), timeout=10
)
)
for i, target_peak_response in enumerate(await asyncio.gather(*coroutines)):
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
self.sync_store.peer_has_block(
heaviest_peak_hash, peers[i], heaviest_peak_weight, heaviest_peak_height, False
)
# TODO: disconnect from peer which gave us the heaviest_peak, if nobody has the peak
peer_ids: Set[bytes32] = self.sync_store.get_peers_that_have_peak([heaviest_peak_hash])
peers_with_peak: List = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
# Request weight proof from a random peer
self.log.info(f"Total of {len(peers_with_peak)} peers with peak {heaviest_peak_height}")
weight_proof_peer = random.choice(peers_with_peak)
self.log.info(
f"Requesting weight proof from peer {weight_proof_peer.peer_host} up to height"
f" {heaviest_peak_height}"
)
if self.blockchain.get_peak() is not None and heaviest_peak_weight <= self.blockchain.get_peak().weight:
raise ValueError("Not performing sync, already caught up.")
request = full_node_protocol.RequestProofOfWeight(heaviest_peak_height, heaviest_peak_hash)
response = await weight_proof_peer.request_proof_of_weight(request, timeout=180)
# Disconnect from this peer, because they have not behaved properly
if response is None or not isinstance(response, full_node_protocol.RespondProofOfWeight):
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof did not arrive in time from peer: {weight_proof_peer.peer_host}")
if response.wp.recent_chain_data[-1].reward_chain_block.height != heaviest_peak_height:
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof had the wrong height: {weight_proof_peer.peer_host}")
if response.wp.recent_chain_data[-1].reward_chain_block.weight != heaviest_peak_weight:
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof had the wrong weight: {weight_proof_peer.peer_host}")
try:
validated, fork_point = await self.weight_proof_handler.validate_weight_proof(response.wp)
except Exception as e:
await weight_proof_peer.close(600)
raise ValueError(f"Weight proof validation threw an error {e}")
if not validated:
await weight_proof_peer.close(600)
raise ValueError("Weight proof validation failed")
self.log.info(f"Re-checked peers: total of {len(peers_with_peak)} peers with peak {heaviest_peak_height}")
# Ensures that the fork point does not change
async with self.blockchain.lock:
await self.blockchain.warmup(fork_point)
await self.sync_from_fork_point(fork_point, heaviest_peak_height, heaviest_peak_hash)
except asyncio.CancelledError:
self.log.warning("Syncing failed, CancelledError")
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error with syncing: {type(e)}{tb}")
finally:
if self._shut_down:
return
await self._finish_sync()
async def sync_from_fork_point(self, fork_point_height: int, target_peak_sb_height: uint32, peak_hash: bytes32):
self.log.info(f"Start syncing from fork point at {fork_point_height} up to {target_peak_sb_height}")
peer_ids: Set[bytes32] = self.sync_store.get_peers_that_have_peak([peak_hash])
peers_with_peak: List = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
if len(peers_with_peak) == 0:
raise RuntimeError(f"Not syncing, no peers with header_hash {peak_hash} ")
advanced_peak = False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
for i in range(fork_point_height, target_peak_sb_height, batch_size):
start_height = i
end_height = min(target_peak_sb_height, start_height + batch_size)
request = RequestBlocks(uint32(start_height), uint32(end_height), True)
self.log.info(f"Requesting blocks: {start_height} to {end_height}")
batch_added = False
to_remove = []
for peer in peers_with_peak:
if peer.closed:
to_remove.append(peer)
continue
response = await peer.request_blocks(request, timeout=15)
if response is None:
await peer.close()
to_remove.append(peer)
continue
if isinstance(response, RejectBlocks):
to_remove.append(peer)
continue
elif isinstance(response, RespondBlocks):
success, advanced_peak, _ = await self.receive_block_batch(
response.blocks, peer, None if advanced_peak else uint32(fork_point_height)
)
if success is False:
await peer.close()
continue
else:
batch_added = True
break
peak = self.blockchain.get_peak()
assert peak is not None
msg = make_msg(
ProtocolMessageTypes.new_peak_wallet,
wallet_protocol.NewPeakWallet(
peak.header_hash,
peak.height,
peak.weight,
uint32(max(peak.height - 1, uint32(0))),
),
)
await self.server.send_to_all([msg], NodeType.WALLET)
for peer in to_remove:
peers_with_peak.remove(peer)
if self.sync_store.peers_changed.is_set():
peer_ids = self.sync_store.get_peers_that_have_peak([peak_hash])
peers_with_peak = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
self.log.info(f"Number of peers we are syncing from: {len(peers_with_peak)}")
self.sync_store.peers_changed.clear()
if batch_added is False:
self.log.info(f"Failed to fetch blocks {start_height} to {end_height} from peers: {peers_with_peak}")
break
else:
self.log.info(f"Added blocks {start_height} to {end_height}")
self.blockchain.clean_block_record(
min(
end_height - self.constants.BLOCKS_CACHE_SIZE,
peak.height - self.constants.BLOCKS_CACHE_SIZE,
)
)
async def receive_block_batch(
self, all_blocks: List[FullBlock], peer: ws.WSChiaConnection, fork_point: Optional[uint32]
) -> Tuple[bool, bool, Optional[uint32]]:
advanced_peak = False
fork_height: Optional[uint32] = uint32(0)
blocks_to_validate: List[FullBlock] = []
for i, block in enumerate(all_blocks):
if not self.blockchain.contains_block(block.header_hash):
blocks_to_validate = all_blocks[i:]
break
if len(blocks_to_validate) == 0:
return True, False, fork_height
pre_validate_start = time.time()
pre_validation_results: Optional[
List[PreValidationResult]
] = await self.blockchain.pre_validate_blocks_multiprocessing(blocks_to_validate)
self.log.debug(f"Block pre-validation time: {time.time() - pre_validate_start}")
if pre_validation_results is None:
return False, False, None
for i, block in enumerate(blocks_to_validate):
if pre_validation_results[i].error is not None:
self.log.error(
f"Invalid block from peer: {peer.get_peer_info()} {Err(pre_validation_results[i].error)}"
)
return False, advanced_peak, fork_height
assert pre_validation_results[i].required_iters is not None
(result, error, fork_height,) = await self.blockchain.receive_block(
block, pre_validation_results[i], None if advanced_peak else fork_point
)
if result == ReceiveBlockResult.NEW_PEAK:
advanced_peak = True
elif result == ReceiveBlockResult.INVALID_BLOCK or result == ReceiveBlockResult.DISCONNECTED_BLOCK:
if error is not None:
self.log.error(f"Error: {error}, Invalid block from peer: {peer.get_peer_info()} ")
return False, advanced_peak, fork_height
block_record = self.blockchain.block_record(block.header_hash)
if block_record.sub_epoch_summary_included is not None:
await self.weight_proof_handler.create_prev_sub_epoch_segments()
if advanced_peak:
self._state_changed("new_peak")
self.log.debug(
f"Total time for {len(blocks_to_validate)} blocks: {time.time() - pre_validate_start}, "
f"advanced: {advanced_peak}"
)
return True, advanced_peak, fork_height
async def _finish_sync(self):
"""
Finalize sync by setting sync mode to False, clearing all sync information, and adding any final
blocks that we have finalized recently.
"""
self.sync_store.set_sync_mode(False)
self._state_changed("sync_mode")
if self.server is None:
return
peak: Optional[BlockRecord] = self.blockchain.get_peak()
async with self.blockchain.lock:
await self.sync_store.clear_sync_info()
peak_fb: FullBlock = await self.blockchain.get_full_peak()
if peak is not None:
await self.peak_post_processing(peak_fb, peak, peak.height - 1, None)
if peak is not None:
await self.weight_proof_handler.get_proof_of_weight(peak.header_hash)
self._state_changed("block")
def has_valid_pool_sig(self, block: Union[UnfinishedBlock, FullBlock]):
if (
block.foliage.foliage_block_data.pool_target
== PoolTarget(self.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH, uint32(0))
and block.foliage.prev_block_hash != self.constants.GENESIS_CHALLENGE
and block.reward_chain_block.proof_of_space.pool_public_key is not None
):
if not AugSchemeMPL.verify(
block.reward_chain_block.proof_of_space.pool_public_key,
bytes(block.foliage.foliage_block_data.pool_target),
block.foliage.foliage_block_data.pool_signature,
):
return False
return True
async def peak_post_processing(
self, block: FullBlock, record: BlockRecord, fork_height: uint32, peer: Optional[ws.WSChiaConnection]
):
"""
Must be called under self.blockchain.lock. This updates the internal state of the full node with the
latest peak information. It also notifies peers about the new peak.
"""
difficulty = self.blockchain.get_next_difficulty(record.header_hash, False)
sub_slot_iters = self.blockchain.get_next_slot_iters(record.header_hash, False)
self.log.info(
f"🌱 Updated peak to height {record.height}, weight {record.weight}, "
f"hh {record.header_hash}, "
f"forked at {fork_height}, rh: {record.reward_infusion_new_challenge}, "
f"total iters: {record.total_iters}, "
f"overflow: {record.overflow}, "
f"deficit: {record.deficit}, "
f"difficulty: {difficulty}, "
f"sub slot iters: {sub_slot_iters}"
)
sub_slots = await self.blockchain.get_sp_and_ip_sub_slots(record.header_hash)
assert sub_slots is not None
if not self.sync_store.get_sync_mode():
self.blockchain.clean_block_records()
added_eos, new_sps, new_ips = self.full_node_store.new_peak(
record,
block,
sub_slots[0],
sub_slots[1],
fork_height != block.height - 1 and block.height != 0,
self.blockchain,
)
if sub_slots[1] is None:
assert record.ip_sub_slot_total_iters(self.constants) == 0
# Ensure the signage point is also in the store, for consistency
self.full_node_store.new_signage_point(
record.signage_point_index,
self.blockchain,
record,
record.sub_slot_iters,
SignagePoint(
block.reward_chain_block.challenge_chain_sp_vdf,
block.challenge_chain_sp_proof,
block.reward_chain_block.reward_chain_sp_vdf,
block.reward_chain_sp_proof,
),
skip_vdf_validation=True,
)
# Update the mempool (returns successful pending transactions added to the mempool)
for bundle, result, spend_name in await self.mempool_manager.new_peak(self.blockchain.get_peak()):
self.log.debug(f"Added transaction to mempool: {spend_name}")
mempool_item = self.mempool_manager.get_mempool_item(spend_name)
assert mempool_item is not None
fees = mempool_item.fee
assert fees >= 0
assert result.cost is not None
new_tx = full_node_protocol.NewTransaction(
spend_name,
result.cost,
uint64(bundle.fees()),
)
msg = make_msg(ProtocolMessageTypes.new_transaction, new_tx)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# If there were pending end of slots that happen after this peak, broadcast them if they are added
if added_eos is not None:
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
added_eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
added_eos.challenge_chain.get_hash(),
uint8(0),
added_eos.reward_chain.end_of_slot_vdf.challenge,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# TODO: maybe add and broadcast new SP/IPs as well?
if record.height % 1000 == 0:
# Occasionally clear the seen list to keep it small
self.full_node_store.clear_seen_unfinished_blocks()
if self.sync_store.get_sync_mode() is False:
await self.send_peak_to_timelords(block)
# Tell full nodes about the new peak
msg = make_msg(
ProtocolMessageTypes.new_peak,
full_node_protocol.NewPeak(
record.header_hash,
record.height,
record.weight,
fork_height,
block.reward_chain_block.get_unfinished().get_hash(),
),
)
if peer is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# Tell wallets about the new peak
msg = make_msg(
ProtocolMessageTypes.new_peak_wallet,
wallet_protocol.NewPeakWallet(
record.header_hash,
record.height,
record.weight,
fork_height,
),
)
await self.server.send_to_all([msg], NodeType.WALLET)
self._state_changed("new_peak")
async def respond_block(
self,
respond_block: full_node_protocol.RespondBlock,
peer: Optional[ws.WSChiaConnection] = None,
) -> Optional[Message]:
"""
Receive a full block from a peer full node (or ourselves).
"""
block: FullBlock = respond_block.block
if self.sync_store.get_sync_mode():
return None
# Adds the block to seen, and check if it's seen before (which means header is in memory)
header_hash = block.header_hash
if self.blockchain.contains_block(header_hash):
return None
pre_validation_result: Optional[PreValidationResult] = None
if block.is_transaction_block() and block.transactions_generator is None:
# This is the case where we already had the unfinished block, and asked for this block without
# the transactions (since we already had them). Therefore, here we add the transactions.
unfinished_rh: bytes32 = block.reward_chain_block.get_unfinished().get_hash()
unf_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(unfinished_rh)
if unf_block is not None and unf_block.transactions_generator is not None:
pre_validation_result = self.full_node_store.get_unfinished_block_result(unfinished_rh)
assert pre_validation_result is not None
block = dataclasses.replace(block, transactions_generator=unf_block.transactions_generator)
async with self.blockchain.lock:
# After acquiring the lock, check again, because another asyncio thread might have added it
if self.blockchain.contains_block(header_hash):
return None
validation_start = time.time()
# Tries to add the block to the blockchain, if we already validated transactions, don't do it again
pre_validation_results: Optional[
List[PreValidationResult]
] = await self.blockchain.pre_validate_blocks_multiprocessing([block], pre_validation_result is None)
if pre_validation_results is None:
raise ValueError(f"Failed to validate block {header_hash} height {block.height}")
if pre_validation_results[0].error is not None:
if Err(pre_validation_results[0].error) == Err.INVALID_PREV_BLOCK_HASH:
added: ReceiveBlockResult = ReceiveBlockResult.DISCONNECTED_BLOCK
error_code: Optional[Err] = Err.INVALID_PREV_BLOCK_HASH
fork_height: Optional[uint32] = None
else:
raise ValueError(
f"Failed to validate block {header_hash} height "
f"{block.height}: {Err(pre_validation_results[0].error).name}"
)
else:
result_to_validate = (
pre_validation_results[0] if pre_validation_result is None else pre_validation_result
)
assert result_to_validate.required_iters == pre_validation_results[0].required_iters
added, error_code, fork_height = await self.blockchain.receive_block(block, result_to_validate, None)
validation_time = time.time() - validation_start
if added == ReceiveBlockResult.ALREADY_HAVE_BLOCK:
return None
elif added == ReceiveBlockResult.INVALID_BLOCK:
assert error_code is not None
self.log.error(f"Block {header_hash} at height {block.height} is invalid with code {error_code}.")
raise ConsensusError(error_code, header_hash)
elif added == ReceiveBlockResult.DISCONNECTED_BLOCK:
self.log.info(f"Disconnected block {header_hash} at height {block.height}")
return None
elif added == ReceiveBlockResult.NEW_PEAK:
# Only propagate blocks which extend the blockchain (becomes one of the heads)
new_peak: Optional[BlockRecord] = self.blockchain.get_peak()
assert new_peak is not None and fork_height is not None
self.log.debug(f"Validation time for peak: {validation_time}")
await self.peak_post_processing(block, new_peak, fork_height, peer)
elif added == ReceiveBlockResult.ADDED_AS_ORPHAN:
self.log.info(
f"Received orphan block of height {block.height} rh " f"{block.reward_chain_block.get_hash()}"
)
else:
# Should never reach here, all the cases are covered
raise RuntimeError(f"Invalid result from receive_block {added}")
# This code path is reached if added == ADDED_AS_ORPHAN or NEW_TIP
peak = self.blockchain.get_peak()
assert peak is not None
# Removes all temporary data for old blocks
clear_height = uint32(max(0, peak.height - 50))
self.full_node_store.clear_candidate_blocks_below(clear_height)
self.full_node_store.clear_unfinished_blocks_below(clear_height)
if peak.height % 1000 == 0 and not self.sync_store.get_sync_mode():
await self.sync_store.clear_sync_info() # Occasionally clear sync peer info
self._state_changed("block")
return None
async def respond_unfinished_block(
self,
respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock,
peer: Optional[ws.WSChiaConnection],
farmed_block: bool = False,
):
"""
We have received an unfinished block, either created by us, or from another peer.
We can validate it and if it's a good block, propagate it to other peers and
timelords.
"""
block = respond_unfinished_block.unfinished_block
if block.prev_header_hash != self.constants.GENESIS_CHALLENGE and not self.blockchain.contains_block(
block.prev_header_hash
):
# No need to request the parent, since the peer will send it to us anyway, via NewPeak
self.log.debug("Received a disconnected unfinished block")
return
# Adds the unfinished block to seen, and check if it's seen before, to prevent
# processing it twice. This searches for the exact version of the unfinished block (there can be many different
# foliages for the same trunk). This is intentional, to prevent DOS attacks.
# Note that it does not require that this block was successfully processed
if self.full_node_store.seen_unfinished_block(block.get_hash()):
return
block_hash = block.reward_chain_block.get_hash()
# This searched for the trunk hash (unfinished reward hash). If we have already added a block with the same
# hash, return
if self.full_node_store.get_unfinished_block(block_hash) is not None:
return
peak: Optional[BlockRecord] = self.blockchain.get_peak()
if peak is not None:
if block.total_iters < peak.sp_total_iters(self.constants):
# This means this unfinished block is pretty far behind, it will not add weight to our chain
return
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:
prev_b = None
else:
prev_b = self.blockchain.block_record(block.prev_header_hash)
# Count the blocks in sub slot, and check if it's a new epoch
if len(block.finished_sub_slots) > 0:
num_blocks_in_ss = 1 # Curr
else:
curr = self.blockchain.try_block_record(block.prev_header_hash)
num_blocks_in_ss = 2 # Curr and prev
while (curr is not None) and not curr.first_in_sub_slot:
curr = self.blockchain.try_block_record(curr.prev_hash)
num_blocks_in_ss += 1
if num_blocks_in_ss > self.constants.MAX_SUB_SLOT_BLOCKS:
# TODO: potentially allow overflow blocks here, which count for the next slot
self.log.warning("Too many blocks added, not adding block")
return
async with self.blockchain.lock:
# TODO: pre-validate VDFs outside of lock
validate_result = await self.blockchain.validate_unfinished_block(block)
if validate_result.error is not None:
raise ConsensusError(Err(validate_result.error))
assert validate_result.required_iters is not None
# Perform another check, in case we have already concurrently added the same unfinished block
if self.full_node_store.get_unfinished_block(block_hash) is not None:
return
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:
height = uint32(0)
else:
height = uint32(self.blockchain.block_record(block.prev_header_hash).height + 1)
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants,
self.blockchain,
validate_result.required_iters,
block,
True,
)
self.full_node_store.add_unfinished_block(height, block, validate_result)
if farmed_block is True:
self.log.info(f"🍀 ️Farmed unfinished_block {block_hash}")
else:
self.log.info(f"Added unfinished_block {block_hash}, not farmed")
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants,
len(block.finished_sub_slots) > 0,
prev_b,
self.blockchain,
)
if block.reward_chain_block.signage_point_index == 0:
res = self.full_node_store.get_sub_slot(block.reward_chain_block.pos_ss_cc_challenge_hash)
if res is None:
if block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE:
rc_prev = self.constants.GENESIS_CHALLENGE
else:
self.log.warning(f"Do not have sub slot {block.reward_chain_block.pos_ss_cc_challenge_hash}")
return
else:
rc_prev = res[0].reward_chain.get_hash()
else:
assert block.reward_chain_block.reward_chain_sp_vdf is not None
rc_prev = block.reward_chain_block.reward_chain_sp_vdf.challenge
timelord_request = timelord_protocol.NewUnfinishedBlock(
block.reward_chain_block,
difficulty,
sub_slot_iters,
block.foliage,
ses,
rc_prev,
)
msg = make_msg(ProtocolMessageTypes.new_unfinished_block, timelord_request)
await self.server.send_to_all([msg], NodeType.TIMELORD)
full_node_request = full_node_protocol.NewUnfinishedBlock(block.reward_chain_block.get_hash())
msg = make_msg(ProtocolMessageTypes.new_unfinished_block, full_node_request)
if peer is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
self._state_changed("unfinished_block")
async def new_infusion_point_vdf(
self, request: timelord_protocol.NewInfusionPointVDF, timelord_peer: Optional[ws.WSChiaConnection] = None
) -> Optional[Message]:
# Lookup unfinished blocks
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(
request.unfinished_reward_hash
)
if unfinished_block is None:
self.log.warning(
f"Do not have unfinished reward chain block {request.unfinished_reward_hash}, cannot finish."
)
return None
prev_b: Optional[BlockRecord] = None
target_rc_hash = request.reward_chain_ip_vdf.challenge
last_slot_cc_hash = request.challenge_chain_ip_vdf.challenge
# Backtracks through end of slot objects, should work for multiple empty sub slots
for eos, _, _ in reversed(self.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == target_rc_hash:
target_rc_hash = eos.reward_chain.end_of_slot_vdf.challenge
if target_rc_hash == self.constants.GENESIS_CHALLENGE:
prev_b = None
else:
# Find the prev block, starts looking backwards from the peak. target_rc_hash must be the hash of a block
# and not an end of slot (since we just looked through the slots and backtracked)
curr: Optional[BlockRecord] = self.blockchain.get_peak()
for _ in range(10):
if curr is None:
break
if curr.reward_infusion_new_challenge == target_rc_hash:
# Found our prev block
prev_b = curr
break
curr = self.blockchain.try_block_record(curr.prev_hash)
# If not found, cache keyed on prev block
if prev_b is None:
self.full_node_store.add_to_future_ip(request)
self.log.warning(f"Previous block is None, infusion point {request.reward_chain_ip_vdf.challenge}")
return None
finished_sub_slots: Optional[List[EndOfSubSlotBundle]] = self.full_node_store.get_finished_sub_slots(
self.blockchain,
prev_b,
last_slot_cc_hash,
)
if finished_sub_slots is None:
return None
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants,
len(finished_sub_slots) > 0,
prev_b,
self.blockchain,
)
if unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE:
sub_slot_start_iters = uint128(0)
else:
ss_res = self.full_node_store.get_sub_slot(unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash)
if ss_res is None:
self.log.warning(f"Do not have sub slot {unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash}")
return None
_, _, sub_slot_start_iters = ss_res
sp_total_iters = uint128(
sub_slot_start_iters
+ calculate_sp_iters(
self.constants,
sub_slot_iters,
unfinished_block.reward_chain_block.signage_point_index,
)
)
block: FullBlock = unfinished_block_to_full_block(
unfinished_block,
request.challenge_chain_ip_vdf,
request.challenge_chain_ip_proof,
request.reward_chain_ip_vdf,
request.reward_chain_ip_proof,
request.infused_challenge_chain_ip_vdf,
request.infused_challenge_chain_ip_proof,
finished_sub_slots,
prev_b,
self.blockchain,
sp_total_iters,
difficulty,
)
if not self.has_valid_pool_sig(block):
self.log.warning("Trying to make a pre-farm block but height is not 0")
return None
try:
await self.respond_block(full_node_protocol.RespondBlock(block))
except Exception as e:
self.log.warning(f"Consensus error validating block: {e}")
if timelord_peer is not None:
# Only sends to the timelord who sent us this VDF, to reset them to the correct peak
await self.send_peak_to_timelords(peer=timelord_peer)
return None
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSChiaConnection
) -> Tuple[Optional[Message], bool]:
fetched_ss = self.full_node_store.get_sub_slot(request.end_of_slot_bundle.challenge_chain.get_hash())
if fetched_ss is not None:
# Already have the sub-slot
return None, True
async with self.timelord_lock:
fetched_ss = self.full_node_store.get_sub_slot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
if (
(fetched_ss is None)
and request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
!= self.constants.GENESIS_CHALLENGE
):
# If we don't have the prev, request the prev instead
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
uint8(0),
bytes([0] * 32),
)
return (
make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request),
False,
)
peak = self.blockchain.get_peak()
if peak is not None and peak.height > 2:
next_sub_slot_iters = self.blockchain.get_next_slot_iters(peak.header_hash, True)
next_difficulty = self.blockchain.get_next_difficulty(peak.header_hash, True)
else:
next_sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
next_difficulty = self.constants.DIFFICULTY_STARTING
# Adds the sub slot and potentially get new infusions
new_infusions = self.full_node_store.new_finished_sub_slot(
request.end_of_slot_bundle,
self.blockchain,
peak,
await self.blockchain.get_full_peak(),
)
# It may be an empty list, even if it's not None. Not None means added successfully
if new_infusions is not None:
self.log.info(
f"⏲️ Finished sub slot, SP {self.constants.NUM_SPS_SUB_SLOT}/{self.constants.NUM_SPS_SUB_SLOT}, "
f"{request.end_of_slot_bundle.challenge_chain.get_hash()}, "
f"number of sub-slots: {len(self.full_node_store.finished_sub_slots)}, "
f"RC hash: {request.end_of_slot_bundle.reward_chain.get_hash()}, "
f"Deficit {request.end_of_slot_bundle.reward_chain.deficit}"
)
# Notify full nodes of the new sub-slot
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
request.end_of_slot_bundle.challenge_chain.get_hash(),
uint8(0),
request.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast)
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
for infusion in new_infusions:
await self.new_infusion_point_vdf(infusion)
# Notify farmers of the new sub-slot
broadcast_farmer = farmer_protocol.NewSignagePoint(
request.end_of_slot_bundle.challenge_chain.get_hash(),
request.end_of_slot_bundle.challenge_chain.get_hash(),
request.end_of_slot_bundle.reward_chain.get_hash(),
next_difficulty,
next_sub_slot_iters,
uint8(0),
)
msg = make_msg(ProtocolMessageTypes.new_signage_point, broadcast_farmer)
await self.server.send_to_all([msg], NodeType.FARMER)
return None, True
else:
self.log.info(
f"End of slot not added CC challenge "
f"{request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}"
)
return None, False
async def respond_transaction(
self,
transaction: SpendBundle,
spend_name: bytes32,
peer: Optional[ws.WSChiaConnection] = None,
test: bool = False,
) -> Tuple[MempoolInclusionStatus, Optional[Err]]:
if self.sync_store.get_sync_mode():
return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING
if not test and not (await self.synced()):
return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING
peak_height = self.blockchain.get_peak_height()
# No transactions in mempool in initial client. Remove 6 weeks after launch
if (
peak_height is None
or peak_height <= self.constants.INITIAL_FREEZE_PERIOD
or self.constants.NETWORK_TYPE == NetworkType.MAINNET
):
return MempoolInclusionStatus.FAILED, Err.INITIAL_TRANSACTION_FREEZE
if self.mempool_manager.seen(spend_name):
return MempoolInclusionStatus.FAILED, Err.ALREADY_INCLUDING_TRANSACTION
self.mempool_manager.add_and_maybe_pop_seen(spend_name)
self.log.debug(f"Processing transaction: {spend_name}")
# Ignore if syncing
if self.sync_store.get_sync_mode():
status = MempoolInclusionStatus.FAILED
error: Optional[Err] = Err.NO_TRANSACTIONS_WHILE_SYNCING
else:
try:
cost_result = await self.mempool_manager.pre_validate_spendbundle(transaction)
except Exception as e:
self.mempool_manager.remove_seen(spend_name)
raise e
async with self.blockchain.lock:
if self.mempool_manager.get_spendbundle(spend_name) is not None:
self.mempool_manager.remove_seen(spend_name)
return MempoolInclusionStatus.FAILED, Err.ALREADY_INCLUDING_TRANSACTION
cost, status, error = await self.mempool_manager.add_spendbundle(transaction, cost_result, spend_name)
if status == MempoolInclusionStatus.SUCCESS:
self.log.debug(f"Added transaction to mempool: {spend_name}")
# Only broadcast successful transactions, not pending ones. Otherwise it's a DOS
# vector.
mempool_item = self.mempool_manager.get_mempool_item(spend_name)
assert mempool_item is not None
fees = mempool_item.fee
assert fees >= 0
assert cost is not None
new_tx = full_node_protocol.NewTransaction(
spend_name,
cost,
uint64(transaction.fees()),
)
msg = make_msg(ProtocolMessageTypes.new_transaction, new_tx)
if peer is None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
else:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
self.mempool_manager.remove_seen(spend_name)
self.log.warning(
f"Wasn't able to add transaction with id {spend_name}, " f"status {status} error: {error}"
)
return status, error
async def _needs_compact_proof(
self, vdf_info: VDFInfo, header_block: HeaderBlock, field_vdf: CompressibleVDFField
) -> bool:
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
if (
sub_slot.proofs.challenge_chain_slot_proof.witness_type == 0
and sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
):
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
if (
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type == 0
and sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.CC_SP_VDF:
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
return False
if vdf_info == header_block.reward_chain_block.challenge_chain_sp_vdf:
assert header_block.challenge_chain_sp_proof is not None
if (
header_block.challenge_chain_sp_proof.witness_type == 0
and header_block.challenge_chain_sp_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.CC_IP_VDF:
if vdf_info == header_block.reward_chain_block.challenge_chain_ip_vdf:
if (
header_block.challenge_chain_ip_proof.witness_type == 0
and header_block.challenge_chain_ip_proof.normalized_to_identity
):
return False
return True
return False
async def _can_accept_compact_proof(
self,
vdf_info: VDFInfo,
vdf_proof: VDFProof,
height: uint32,
header_hash: bytes32,
field_vdf: CompressibleVDFField,
) -> bool:
"""
- Checks if the provided proof is indeed compact.
- Checks if proof verifies given the vdf_info from the start of sub-slot.
- Checks if the provided vdf_info is correct, assuming it refers to the start of sub-slot.
- Checks if the existing proof was non-compact. Ignore this proof if we already have a compact proof.
"""
is_fully_compactified = await self.block_store.is_fully_compactified(header_hash)
if is_fully_compactified is None or is_fully_compactified:
self.log.info(f"Already compactified block: {header_hash}. Ignoring.")
return False
if vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
self.log.error(f"Received vdf proof is not compact: {vdf_proof}.")
return False
if not vdf_proof.is_valid(self.constants, ClassgroupElement.get_default_element(), vdf_info):
self.log.error(f"Received compact vdf proof is not valid: {vdf_proof}.")
return False
header_block = await self.blockchain.get_header_block_by_height(height, header_hash)
if header_block is None:
self.log.error(f"Can't find block for given compact vdf. Height: {height} Header hash: {header_hash}")
return False
is_new_proof = await self._needs_compact_proof(vdf_info, header_block, field_vdf)
if not is_new_proof:
self.log.info(f"Duplicate compact proof. Height: {height}. Header hash: {header_hash}.")
return is_new_proof
async def _replace_proof(
self,
vdf_info: VDFInfo,
vdf_proof: VDFProof,
height: uint32,
field_vdf: CompressibleVDFField,
):
full_blocks = await self.block_store.get_full_blocks_at([height])
assert len(full_blocks) > 0
for block in full_blocks:
new_block = None
block_record = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(height))
assert block_record is not None
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for index, sub_slot in enumerate(block.finished_sub_slots):
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
new_proofs = dataclasses.replace(sub_slot.proofs, challenge_chain_slot_proof=vdf_proof)
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
new_finished_subslots = block.finished_sub_slots
new_finished_subslots[index] = new_subslot
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
break
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for index, sub_slot in enumerate(block.finished_sub_slots):
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
):
new_proofs = dataclasses.replace(sub_slot.proofs, infused_challenge_chain_slot_proof=vdf_proof)
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
new_finished_subslots = block.finished_sub_slots
new_finished_subslots[index] = new_subslot
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
break
if field_vdf == CompressibleVDFField.CC_SP_VDF:
assert block.challenge_chain_sp_proof is not None
new_block = dataclasses.replace(block, challenge_chain_sp_proof=vdf_proof)
if field_vdf == CompressibleVDFField.CC_IP_VDF:
new_block = dataclasses.replace(block, challenge_chain_ip_proof=vdf_proof)
assert new_block is not None
await self.block_store.add_full_block(new_block, block_record)
async def respond_compact_vdf_timelord(self, request: timelord_protocol.RespondCompactProofOfTime):
field_vdf = CompressibleVDFField(int(request.field_vdf))
if not await self._can_accept_compact_proof(
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
):
return
async with self.blockchain.lock:
await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf)
msg = make_msg(
ProtocolMessageTypes.new_compact_vdf,
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
)
if self.server is not None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection):
is_fully_compactified = await self.block_store.is_fully_compactified(request.header_hash)
if is_fully_compactified is None or is_fully_compactified:
return False
header_block = await self.blockchain.get_header_block_by_height(request.height, request.header_hash)
if header_block is None:
return
field_vdf = CompressibleVDFField(int(request.field_vdf))
if await self._needs_compact_proof(request.vdf_info, header_block, field_vdf):
msg = make_msg(
ProtocolMessageTypes.request_compact_vdf,
full_node_protocol.RequestCompactVDF(
request.height, request.header_hash, request.field_vdf, request.vdf_info
),
)
await peer.send_message(msg)
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection):
header_block = await self.blockchain.get_header_block_by_height(request.height, request.header_hash)
if header_block is None:
return
vdf_proof: Optional[VDFProof] = None
field_vdf = CompressibleVDFField(int(request.field_vdf))
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == request.vdf_info:
vdf_proof = sub_slot.proofs.challenge_chain_slot_proof
break
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == request.vdf_info
):
vdf_proof = sub_slot.proofs.infused_challenge_chain_slot_proof
break
if (
field_vdf == CompressibleVDFField.CC_SP_VDF
and header_block.reward_chain_block.challenge_chain_sp_vdf == request.vdf_info
):
vdf_proof = header_block.challenge_chain_sp_proof
if (
field_vdf == CompressibleVDFField.CC_IP_VDF
and header_block.reward_chain_block.challenge_chain_ip_vdf == request.vdf_info
):
vdf_proof = header_block.challenge_chain_ip_proof
if vdf_proof is None or vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
self.log.error(f"{peer} requested compact vdf we don't have, height: {request.height}.")
return
compact_vdf = full_node_protocol.RespondCompactVDF(
request.height,
request.header_hash,
request.field_vdf,
request.vdf_info,
vdf_proof,
)
msg = make_msg(ProtocolMessageTypes.respond_compact_vdf, compact_vdf)
await peer.send_message(msg)
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection):
field_vdf = CompressibleVDFField(int(request.field_vdf))
if not await self._can_accept_compact_proof(
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
):
return
async with self.blockchain.lock:
if self.blockchain.seen_compact_proofs(request.vdf_info, request.height):
return
await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf)
msg = make_msg(
ProtocolMessageTypes.new_compact_vdf,
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
)
if self.server is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
async def broadcast_uncompact_blocks(self, uncompact_interval_scan: int, target_uncompact_proofs: int):
min_height: Optional[int] = 0
try:
while not self._shut_down:
while self.sync_store.get_sync_mode():
if self._shut_down:
return
await asyncio.sleep(30)
broadcast_list: List[timelord_protocol.RequestCompactProofOfTime] = []
new_min_height = None
max_height = self.blockchain.get_peak_height()
if max_height is None:
await asyncio.sleep(30)
continue
# Calculate 'min_height' correctly the first time this task is launched, using the db.
assert min_height is not None
min_height = await self.block_store.get_first_not_compactified(min_height)
if min_height is None or min_height > max(0, max_height - 1000):
min_height = max(0, max_height - 1000)
batches_finished = 0
self.log.info("Scanning the blockchain for uncompact blocks.")
for h in range(min_height, max_height, 100):
# Got 10 times the target header count, sampling the target headers should contain
# enough randomness to split the work between blueboxes.
if len(broadcast_list) > target_uncompact_proofs * 10:
break
stop_height = min(h + 99, max_height)
headers = await self.blockchain.get_header_blocks_in_range(min_height, stop_height)
for header in headers.values():
prev_broadcast_list_len = len(broadcast_list)
expected_header_hash = self.blockchain.height_to_hash(header.height)
if header.header_hash != expected_header_hash:
continue
for sub_slot in header.finished_sub_slots:
if (
sub_slot.proofs.challenge_chain_slot_proof.witness_type > 0
or not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
):
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_EOS_VDF),
)
)
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None and (
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type > 0
or not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
):
assert sub_slot.infused_challenge_chain is not None
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.ICC_EOS_VDF),
)
)
if header.challenge_chain_sp_proof is not None and (
header.challenge_chain_sp_proof.witness_type > 0
or not header.challenge_chain_sp_proof.normalized_to_identity
):
assert header.reward_chain_block.challenge_chain_sp_vdf is not None
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
header.reward_chain_block.challenge_chain_sp_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_SP_VDF),
)
)
if (
header.challenge_chain_ip_proof.witness_type > 0
or not header.challenge_chain_ip_proof.normalized_to_identity
):
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
header.reward_chain_block.challenge_chain_ip_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_IP_VDF),
)
)
# This is the first header with uncompact proofs. Store its height so next time we iterate
# only from here. Fix header block iteration window to at least 1000, so reorgs will be
# handled correctly.
if prev_broadcast_list_len == 0 and len(broadcast_list) > 0 and h <= max(0, max_height - 1000):
new_min_height = header.height
# Small sleep between batches.
batches_finished += 1
if batches_finished % 10 == 0:
await asyncio.sleep(1)
# We have no uncompact blocks, but mentain the block iteration window to at least 1000 blocks.
if new_min_height is None:
new_min_height = max(0, max_height - 1000)
min_height = new_min_height
if len(broadcast_list) > target_uncompact_proofs:
random.shuffle(broadcast_list)
broadcast_list = broadcast_list[:target_uncompact_proofs]
if self.sync_store.get_sync_mode():
continue
if self.server is not None:
for new_pot in broadcast_list:
msg = make_msg(ProtocolMessageTypes.request_compact_proof_of_time, new_pot)
await self.server.send_to_all([msg], NodeType.TIMELORD)
await asyncio.sleep(uncompact_interval_scan)
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception in broadcast_uncompact_blocks: {e}")
self.log.error(f"Exception Stack: {error_stack}")
| 49.367749 | 120 | 0.625814 | import asyncio
import dataclasses
import logging
import random
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
import aiosqlite
from blspy import AugSchemeMPL
import src.server.ws_connection as ws
from src.consensus.block_creation import unfinished_block_to_full_block
from src.consensus.block_record import BlockRecord
from src.consensus.blockchain import Blockchain, ReceiveBlockResult
from src.consensus.constants import ConsensusConstants
from src.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from src.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from src.consensus.multiprocess_validation import PreValidationResult
from src.consensus.network_type import NetworkType
from src.consensus.pot_iterations import calculate_sp_iters
from src.full_node.block_store import BlockStore
from src.full_node.coin_store import CoinStore
from src.full_node.full_node_store import FullNodeStore
from src.full_node.mempool_manager import MempoolManager
from src.full_node.signage_point import SignagePoint
from src.full_node.sync_store import SyncStore
from src.full_node.weight_proof import WeightProofHandler
from src.protocols import farmer_protocol, full_node_protocol, timelord_protocol, wallet_protocol
from src.protocols.full_node_protocol import RejectBlocks, RequestBlocks, RespondBlock, RespondBlocks
from src.protocols.protocol_message_types import ProtocolMessageTypes
from src.server.node_discovery import FullNodePeers
from src.server.outbound_message import Message, NodeType, make_msg
from src.server.server import ChiaServer
from src.types.blockchain_format.classgroup import ClassgroupElement
from src.types.blockchain_format.pool_target import PoolTarget
from src.types.blockchain_format.sized_bytes import bytes32
from src.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from src.types.blockchain_format.vdf import CompressibleVDFField, VDFInfo, VDFProof
from src.types.end_of_slot_bundle import EndOfSubSlotBundle
from src.types.full_block import FullBlock
from src.types.header_block import HeaderBlock
from src.types.mempool_inclusion_status import MempoolInclusionStatus
from src.types.spend_bundle import SpendBundle
from src.types.unfinished_block import UnfinishedBlock
from src.util.errors import ConsensusError, Err
from src.util.genesis_wait import wait_for_genesis_challenge
from src.util.ints import uint8, uint32, uint64, uint128
from src.util.path import mkdir, path_from_root
class FullNode:
block_store: BlockStore
full_node_store: FullNodeStore
full_node_peers: Optional[FullNodePeers]
sync_store: Any
coin_store: CoinStore
mempool_manager: MempoolManager
connection: aiosqlite.Connection
_sync_task: Optional[asyncio.Task]
blockchain: Blockchain
config: Dict
server: Any
log: logging.Logger
constants: ConsensusConstants
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
timelord_lock: asyncio.Lock
initialized: bool
def __init__(
self,
config: Dict,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
):
self.initialized = False
self.root_path = root_path
self.config = config
self.server = None
self._shut_down = False
self.constants = consensus_constants
self.pow_creation: Dict[uint32, asyncio.Event] = {}
self.state_changed_callback: Optional[Callable] = None
self.full_node_peers = None
self.sync_store = None
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
db_path_replaced: str = config["database_path"].replace("CHALLENGE", config["selected_network"])
self.db_path = path_from_root(root_path, db_path_replaced)
mkdir(self.db_path.parent)
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def regular_start(self):
self.log.info("regular_start")
self.connection = await aiosqlite.connect(self.db_path)
self.block_store = await BlockStore.create(self.connection)
self.full_node_store = await FullNodeStore.create(self.constants)
self.sync_store = await SyncStore.create()
self.coin_store = await CoinStore.create(self.connection)
self.log.info("Initializing blockchain from disk")
start_time = time.time()
self.blockchain = await Blockchain.create(self.coin_store, self.block_store, self.constants)
self.mempool_manager = MempoolManager(self.coin_store, self.constants)
self.weight_proof_handler = WeightProofHandler(self.constants, self.blockchain)
self._sync_task = None
time_taken = time.time() - start_time
if self.blockchain.get_peak() is None:
self.log.info(f"Initialized with empty blockchain time taken: {int(time_taken)}s")
else:
self.log.info(
f"Blockchain initialized to peak {self.blockchain.get_peak().header_hash} height"
f" {self.blockchain.get_peak().height}, "
f"time taken: {int(time_taken)}s"
)
pending_tx = await self.mempool_manager.new_peak(self.blockchain.get_peak())
assert len(pending_tx) == 0
peak: Optional[BlockRecord] = self.blockchain.get_peak()
self.uncompact_task = None
if peak is not None:
full_peak = await self.blockchain.get_full_peak()
await self.peak_post_processing(full_peak, peak, max(peak.height - 1, 0), None)
if self.config["send_uncompact_interval"] != 0:
assert self.config["target_uncompact_proofs"] != 0
self.uncompact_task = asyncio.create_task(
self.broadcast_uncompact_blocks(
self.config["send_uncompact_interval"],
self.config["target_uncompact_proofs"],
)
)
self.initialized = True
async def delayed_start(self):
self.log.info("delayed_start")
config, constants = await wait_for_genesis_challenge(self.root_path, self.constants, "full_node")
self.config = config
self.constants = constants
await self.regular_start()
async def _start(self):
self.timelord_lock = asyncio.Lock()
if self.constants.GENESIS_CHALLENGE is not None:
await self.regular_start()
else:
asyncio.create_task(self.delayed_start())
def set_server(self, server: ChiaServer):
self.server = server
try:
self.full_node_peers = FullNodePeers(
self.server,
self.root_path,
self.config["target_peer_count"] - self.config["target_outbound_peer_count"],
self.config["target_outbound_peer_count"],
self.config["peer_db_path"],
self.config["introducer_peer"],
self.config["peer_connect_interval"],
self.log,
)
asyncio.create_task(self.full_node_peers.start())
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception: {e}")
self.log.error(f"Exception in peer discovery: {e}")
self.log.error(f"Exception Stack: {error_stack}")
def _state_changed(self, change: str):
if self.state_changed_callback is not None:
self.state_changed_callback(change)
async def short_sync_batch(self, peer: ws.WSChiaConnection, start_height: uint32, target_height: uint32) -> bool:
if (
peer.peer_node_id in self.sync_store.backtrack_syncing
and self.sync_store.backtrack_syncing[peer.peer_node_id] > 0
):
return True # Don't batch sync, we are already in progress of a backtrack sync
if peer.peer_node_id in self.sync_store.batch_syncing:
return True
self.sync_store.batch_syncing.add(peer.peer_node_id)
self.log.info(f"Starting batch short sync from {start_height} to height {target_height}")
if start_height > 0:
first = await peer.request_block(full_node_protocol.RequestBlock(uint32(start_height), False))
if first is None or not isinstance(first, full_node_protocol.RespondBlock):
self.sync_store.batch_syncing.remove(peer.peer_node_id)
raise ValueError(f"Error short batch syncing, could not fetch block at height {start_height}")
if not self.blockchain.contains_block(first.block.prev_header_hash):
self.log.info("Batch syncing stopped, this is a deep chain")
self.sync_store.batch_syncing.remove(peer.peer_node_id)
# First sb not connected to our blockchain, do a long sync instead
return False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
try:
for height in range(start_height, target_height, batch_size):
end_height = min(target_height, height + batch_size)
request = RequestBlocks(uint32(height), uint32(end_height), True)
response = await peer.request_blocks(request)
if not response:
raise ValueError(f"Error short batch syncing, invalid/no response for {height}-{end_height}")
async with self.blockchain.lock:
success, advanced_peak, fork_height = await self.receive_block_batch(response.blocks, peer, None)
if not success:
raise ValueError(f"Error short batch syncing, failed to validate blocks {height}-{end_height}")
if advanced_peak:
peak = self.blockchain.get_peak()
peak_fb: Optional[FullBlock] = await self.blockchain.get_full_peak()
assert peak is not None and peak_fb is not None and fork_height is not None
await self.peak_post_processing(peak_fb, peak, fork_height, peer)
self.log.info(f"Added blocks {height}-{end_height}")
except Exception:
self.sync_store.batch_syncing.remove(peer.peer_node_id)
raise
self.sync_store.batch_syncing.remove(peer.peer_node_id)
return True
async def short_sync_backtrack(
self, peer: ws.WSChiaConnection, peak_height: uint32, target_height: uint32, target_unf_hash: bytes32
):
try:
if peer.peer_node_id not in self.sync_store.backtrack_syncing:
self.sync_store.backtrack_syncing[peer.peer_node_id] = 0
self.sync_store.backtrack_syncing[peer.peer_node_id] += 1
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(target_unf_hash)
curr_height: int = target_height
found_fork_point = False
responses = []
while curr_height > peak_height - 5:
# If we already have the unfinished block, don't fetch the transactions. In the normal case, we will
fetch_tx: bool = unfinished_block is None or curr_height != target_height
curr = await peer.request_block(full_node_protocol.RequestBlock(uint32(curr_height), fetch_tx))
if curr is None:
raise ValueError(f"Failed to fetch block {curr_height} from {peer.get_peer_info()}, timed out")
if curr is None or not isinstance(curr, full_node_protocol.RespondBlock):
raise ValueError(
f"Failed to fetch block {curr_height} from {peer.get_peer_info()}, wrong type {type(curr)}"
)
responses.append(curr)
if self.blockchain.contains_block(curr.block.prev_header_hash) or curr_height == 0:
found_fork_point = True
break
curr_height -= 1
if found_fork_point:
for response in reversed(responses):
await self.respond_block(response)
except Exception as e:
self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1
raise e
self.sync_store.backtrack_syncing[peer.peer_node_id] -= 1
return found_fork_point
async def new_peak(self, request: full_node_protocol.NewPeak, peer: ws.WSChiaConnection):
self.sync_store.peer_has_block(request.header_hash, peer.peer_node_id, request.weight, request.height, True)
if self.blockchain.contains_block(request.header_hash):
return None
peak: Optional[BlockRecord] = self.blockchain.get_peak()
curr_peak_height = uint32(0) if peak is None else peak.height
if peak is not None and peak.weight > request.weight:
return None
if self.sync_store.get_sync_mode():
peak_sync_hash = self.sync_store.get_sync_target_hash()
peak_sync_height = self.sync_store.get_sync_target_height()
if peak_sync_hash is not None and request.header_hash != peak_sync_hash and peak_sync_height is not None:
peak_peers: Set[bytes32] = self.sync_store.get_peers_that_have_peak([peak_sync_hash])
if peer.peer_node_id not in peak_peers:
target_peak_response: Optional[RespondBlock] = await peer.request_block(
full_node_protocol.RequestBlock(uint32(peak_sync_height), False), timeout=10
)
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
self.sync_store.peer_has_block(
peak_sync_hash,
peer.peer_node_id,
target_peak_response.block.weight,
peak_sync_height,
False,
)
else:
if request.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]:
# This is the normal case of receiving the next block
if await self.short_sync_backtrack(
peer, curr_peak_height, request.height, request.unfinished_reward_block_hash
):
return
if request.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
# This is the case of syncing up more than a few blocks, at the start of the chain
# TODO(almog): fix weight proofs so they work at the beginning as well
self.log.debug("Doing batch sync, no backup")
await self.short_sync_batch(peer, uint32(0), request.height)
return
if request.height < curr_peak_height + self.config["sync_blocks_behind_threshold"]:
# This case of being behind but not by so much
if await self.short_sync_batch(peer, uint32(max(curr_peak_height - 6, 0)), request.height):
return
# This is the either the case where we were not able to sync successfully (for example, due to the fork
# point being in the past), or we are very far behind. Performs a long sync.
self._sync_task = asyncio.create_task(self._sync())
async def send_peak_to_timelords(
self, peak_block: Optional[FullBlock] = None, peer: Optional[ws.WSChiaConnection] = None
):
if peak_block is None:
peak_block = await self.blockchain.get_full_peak()
if peak_block is not None:
peak = self.blockchain.block_record(peak_block.header_hash)
difficulty = self.blockchain.get_next_difficulty(peak.header_hash, False)
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants,
self.blockchain,
peak.required_iters,
peak_block,
True,
)
recent_rc = self.blockchain.get_recent_reward_challenges()
curr = peak
while not curr.is_challenge_block(self.constants) and not curr.first_in_sub_slot:
curr = self.blockchain.block_record(curr.prev_hash)
if curr.is_challenge_block(self.constants):
last_csb_or_eos = curr.total_iters
else:
last_csb_or_eos = curr.ip_sub_slot_total_iters(self.constants)
curr = peak
passed_ses_height_but_not_yet_included = True
while (curr.height % self.constants.SUB_EPOCH_BLOCKS) != 0:
if curr.sub_epoch_summary_included:
passed_ses_height_but_not_yet_included = False
curr = self.blockchain.block_record(curr.prev_hash)
if curr.sub_epoch_summary_included or curr.height == 0:
passed_ses_height_but_not_yet_included = False
timelord_new_peak: timelord_protocol.NewPeakTimelord = timelord_protocol.NewPeakTimelord(
peak_block.reward_chain_block,
difficulty,
peak.deficit,
peak.sub_slot_iters,
ses,
recent_rc,
last_csb_or_eos,
passed_ses_height_but_not_yet_included,
)
msg = make_msg(ProtocolMessageTypes.new_peak_timelord, timelord_new_peak)
if peer is None:
await self.server.send_to_all([msg], NodeType.TIMELORD)
else:
await peer.new_peak_timelord(timelord_new_peak)
async def synced(self) -> bool:
curr: Optional[BlockRecord] = self.blockchain.get_peak()
if curr is None:
return False
while curr is not None and not curr.is_transaction_block:
curr = self.blockchain.try_block_record(curr.prev_hash)
now = time.time()
if (
curr is None
or curr.timestamp is None
or curr.timestamp < uint64(int(now - 60 * 7))
or self.sync_store.get_sync_mode()
):
return False
else:
return True
async def on_connect(self, connection: ws.WSChiaConnection):
self._state_changed("add_connection")
self._state_changed("sync_mode")
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.on_connect(connection))
if self.initialized is False:
return
if connection.connection_type is NodeType.FULL_NODE:
# Send filter to node and request mempool items that are not in it (Only if we are currently synced)
synced = await self.synced()
peak_height = self.blockchain.get_peak_height()
if synced and peak_height is not None and peak_height > self.constants.INITIAL_FREEZE_PERIOD:
my_filter = self.mempool_manager.get_filter()
mempool_request = full_node_protocol.RequestMempoolTransactions(my_filter)
msg = make_msg(ProtocolMessageTypes.request_mempool_transactions, mempool_request)
await connection.send_message(msg)
peak_full: Optional[FullBlock] = await self.blockchain.get_full_peak()
if peak_full is not None:
peak: BlockRecord = self.blockchain.block_record(peak_full.header_hash)
if connection.connection_type is NodeType.FULL_NODE:
request_node = full_node_protocol.NewPeak(
peak.header_hash,
peak.height,
peak.weight,
peak.height,
peak_full.reward_chain_block.get_unfinished().get_hash(),
)
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak, request_node))
elif connection.connection_type is NodeType.WALLET:
# If connected to a wallet, send the Peak
request_wallet = wallet_protocol.NewPeakWallet(
peak.header_hash,
peak.height,
peak.weight,
peak.height,
)
await connection.send_message(make_msg(ProtocolMessageTypes.new_peak_wallet, request_wallet))
elif connection.connection_type is NodeType.TIMELORD:
await self.send_peak_to_timelords()
def on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self._state_changed("close_connection")
self._state_changed("sync_mode")
if self.sync_store is not None:
self.sync_store.peer_disconnected(connection.peer_node_id)
def _num_needed_peers(self) -> int:
assert self.server is not None
assert self.server.all_connections is not None
diff = self.config["target_peer_count"] - len(self.server.all_connections)
return diff if diff >= 0 else 0
def _close(self):
self._shut_down = True
if self.blockchain is not None:
self.blockchain.shut_down()
if self.mempool_manager is not None:
self.mempool_manager.shut_down()
if self.full_node_peers is not None:
asyncio.create_task(self.full_node_peers.close())
if self.uncompact_task is not None:
self.uncompact_task.cancel()
async def _await_closed(self):
try:
if self._sync_task is not None:
self._sync_task.cancel()
except asyncio.TimeoutError:
pass
await self.connection.close()
async def _sync(self):
# Ensure we are only syncing once and not double calling this method
if self.sync_store.get_sync_mode():
return
self.sync_store.set_sync_mode(True)
self._state_changed("sync_mode")
try:
self.log.info("Starting to perform sync.")
self.log.info("Waiting to receive peaks from peers.")
# Wait until we have 3 peaks or up to a max of 30 seconds
peaks = []
for i in range(300):
peaks = [tup[0] for tup in self.sync_store.get_peak_of_each_peer().values()]
if len(self.sync_store.get_peers_that_have_peak(peaks)) < 3:
if self._shut_down:
return
await asyncio.sleep(0.1)
self.log.info(f"Collected a total of {len(peaks)} peaks.")
self.sync_peers_handler = None
# Based on responses from peers about the current peaks, see which peak is the heaviest
# (similar to longest chain rule).
target_peak = self.sync_store.get_heaviest_peak()
if target_peak is None:
raise RuntimeError("Not performing sync, no peaks collected")
heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight = target_peak
self.sync_store.set_peak_target(heaviest_peak_hash, heaviest_peak_height)
self.log.info(f"Selected peak {heaviest_peak_height}, {heaviest_peak_hash}")
# Check which peers are updated to this height
peers = []
coroutines = []
for peer in self.server.all_connections.values():
if peer.connection_type == NodeType.FULL_NODE:
peers.append(peer.peer_node_id)
coroutines.append(
peer.request_block(
full_node_protocol.RequestBlock(uint32(heaviest_peak_height), True), timeout=10
)
)
for i, target_peak_response in enumerate(await asyncio.gather(*coroutines)):
if target_peak_response is not None and isinstance(target_peak_response, RespondBlock):
self.sync_store.peer_has_block(
heaviest_peak_hash, peers[i], heaviest_peak_weight, heaviest_peak_height, False
)
# TODO: disconnect from peer which gave us the heaviest_peak, if nobody has the peak
peer_ids: Set[bytes32] = self.sync_store.get_peers_that_have_peak([heaviest_peak_hash])
peers_with_peak: List = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
# Request weight proof from a random peer
self.log.info(f"Total of {len(peers_with_peak)} peers with peak {heaviest_peak_height}")
weight_proof_peer = random.choice(peers_with_peak)
self.log.info(
f"Requesting weight proof from peer {weight_proof_peer.peer_host} up to height"
f" {heaviest_peak_height}"
)
if self.blockchain.get_peak() is not None and heaviest_peak_weight <= self.blockchain.get_peak().weight:
raise ValueError("Not performing sync, already caught up.")
request = full_node_protocol.RequestProofOfWeight(heaviest_peak_height, heaviest_peak_hash)
response = await weight_proof_peer.request_proof_of_weight(request, timeout=180)
# Disconnect from this peer, because they have not behaved properly
if response is None or not isinstance(response, full_node_protocol.RespondProofOfWeight):
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof did not arrive in time from peer: {weight_proof_peer.peer_host}")
if response.wp.recent_chain_data[-1].reward_chain_block.height != heaviest_peak_height:
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof had the wrong height: {weight_proof_peer.peer_host}")
if response.wp.recent_chain_data[-1].reward_chain_block.weight != heaviest_peak_weight:
await weight_proof_peer.close(600)
raise RuntimeError(f"Weight proof had the wrong weight: {weight_proof_peer.peer_host}")
try:
validated, fork_point = await self.weight_proof_handler.validate_weight_proof(response.wp)
except Exception as e:
await weight_proof_peer.close(600)
raise ValueError(f"Weight proof validation threw an error {e}")
if not validated:
await weight_proof_peer.close(600)
raise ValueError("Weight proof validation failed")
self.log.info(f"Re-checked peers: total of {len(peers_with_peak)} peers with peak {heaviest_peak_height}")
# Ensures that the fork point does not change
async with self.blockchain.lock:
await self.blockchain.warmup(fork_point)
await self.sync_from_fork_point(fork_point, heaviest_peak_height, heaviest_peak_hash)
except asyncio.CancelledError:
self.log.warning("Syncing failed, CancelledError")
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error with syncing: {type(e)}{tb}")
finally:
if self._shut_down:
return
await self._finish_sync()
async def sync_from_fork_point(self, fork_point_height: int, target_peak_sb_height: uint32, peak_hash: bytes32):
self.log.info(f"Start syncing from fork point at {fork_point_height} up to {target_peak_sb_height}")
peer_ids: Set[bytes32] = self.sync_store.get_peers_that_have_peak([peak_hash])
peers_with_peak: List = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
if len(peers_with_peak) == 0:
raise RuntimeError(f"Not syncing, no peers with header_hash {peak_hash} ")
advanced_peak = False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
for i in range(fork_point_height, target_peak_sb_height, batch_size):
start_height = i
end_height = min(target_peak_sb_height, start_height + batch_size)
request = RequestBlocks(uint32(start_height), uint32(end_height), True)
self.log.info(f"Requesting blocks: {start_height} to {end_height}")
batch_added = False
to_remove = []
for peer in peers_with_peak:
if peer.closed:
to_remove.append(peer)
continue
response = await peer.request_blocks(request, timeout=15)
if response is None:
await peer.close()
to_remove.append(peer)
continue
if isinstance(response, RejectBlocks):
to_remove.append(peer)
continue
elif isinstance(response, RespondBlocks):
success, advanced_peak, _ = await self.receive_block_batch(
response.blocks, peer, None if advanced_peak else uint32(fork_point_height)
)
if success is False:
await peer.close()
continue
else:
batch_added = True
break
peak = self.blockchain.get_peak()
assert peak is not None
msg = make_msg(
ProtocolMessageTypes.new_peak_wallet,
wallet_protocol.NewPeakWallet(
peak.header_hash,
peak.height,
peak.weight,
uint32(max(peak.height - 1, uint32(0))),
),
)
await self.server.send_to_all([msg], NodeType.WALLET)
for peer in to_remove:
peers_with_peak.remove(peer)
if self.sync_store.peers_changed.is_set():
peer_ids = self.sync_store.get_peers_that_have_peak([peak_hash])
peers_with_peak = [c for c in self.server.all_connections.values() if c.peer_node_id in peer_ids]
self.log.info(f"Number of peers we are syncing from: {len(peers_with_peak)}")
self.sync_store.peers_changed.clear()
if batch_added is False:
self.log.info(f"Failed to fetch blocks {start_height} to {end_height} from peers: {peers_with_peak}")
break
else:
self.log.info(f"Added blocks {start_height} to {end_height}")
self.blockchain.clean_block_record(
min(
end_height - self.constants.BLOCKS_CACHE_SIZE,
peak.height - self.constants.BLOCKS_CACHE_SIZE,
)
)
async def receive_block_batch(
self, all_blocks: List[FullBlock], peer: ws.WSChiaConnection, fork_point: Optional[uint32]
) -> Tuple[bool, bool, Optional[uint32]]:
advanced_peak = False
fork_height: Optional[uint32] = uint32(0)
blocks_to_validate: List[FullBlock] = []
for i, block in enumerate(all_blocks):
if not self.blockchain.contains_block(block.header_hash):
blocks_to_validate = all_blocks[i:]
break
if len(blocks_to_validate) == 0:
return True, False, fork_height
pre_validate_start = time.time()
pre_validation_results: Optional[
List[PreValidationResult]
] = await self.blockchain.pre_validate_blocks_multiprocessing(blocks_to_validate)
self.log.debug(f"Block pre-validation time: {time.time() - pre_validate_start}")
if pre_validation_results is None:
return False, False, None
for i, block in enumerate(blocks_to_validate):
if pre_validation_results[i].error is not None:
self.log.error(
f"Invalid block from peer: {peer.get_peer_info()} {Err(pre_validation_results[i].error)}"
)
return False, advanced_peak, fork_height
assert pre_validation_results[i].required_iters is not None
(result, error, fork_height,) = await self.blockchain.receive_block(
block, pre_validation_results[i], None if advanced_peak else fork_point
)
if result == ReceiveBlockResult.NEW_PEAK:
advanced_peak = True
elif result == ReceiveBlockResult.INVALID_BLOCK or result == ReceiveBlockResult.DISCONNECTED_BLOCK:
if error is not None:
self.log.error(f"Error: {error}, Invalid block from peer: {peer.get_peer_info()} ")
return False, advanced_peak, fork_height
block_record = self.blockchain.block_record(block.header_hash)
if block_record.sub_epoch_summary_included is not None:
await self.weight_proof_handler.create_prev_sub_epoch_segments()
if advanced_peak:
self._state_changed("new_peak")
self.log.debug(
f"Total time for {len(blocks_to_validate)} blocks: {time.time() - pre_validate_start}, "
f"advanced: {advanced_peak}"
)
return True, advanced_peak, fork_height
async def _finish_sync(self):
self.sync_store.set_sync_mode(False)
self._state_changed("sync_mode")
if self.server is None:
return
peak: Optional[BlockRecord] = self.blockchain.get_peak()
async with self.blockchain.lock:
await self.sync_store.clear_sync_info()
peak_fb: FullBlock = await self.blockchain.get_full_peak()
if peak is not None:
await self.peak_post_processing(peak_fb, peak, peak.height - 1, None)
if peak is not None:
await self.weight_proof_handler.get_proof_of_weight(peak.header_hash)
self._state_changed("block")
def has_valid_pool_sig(self, block: Union[UnfinishedBlock, FullBlock]):
if (
block.foliage.foliage_block_data.pool_target
== PoolTarget(self.constants.GENESIS_PRE_FARM_POOL_PUZZLE_HASH, uint32(0))
and block.foliage.prev_block_hash != self.constants.GENESIS_CHALLENGE
and block.reward_chain_block.proof_of_space.pool_public_key is not None
):
if not AugSchemeMPL.verify(
block.reward_chain_block.proof_of_space.pool_public_key,
bytes(block.foliage.foliage_block_data.pool_target),
block.foliage.foliage_block_data.pool_signature,
):
return False
return True
async def peak_post_processing(
self, block: FullBlock, record: BlockRecord, fork_height: uint32, peer: Optional[ws.WSChiaConnection]
):
difficulty = self.blockchain.get_next_difficulty(record.header_hash, False)
sub_slot_iters = self.blockchain.get_next_slot_iters(record.header_hash, False)
self.log.info(
f"🌱 Updated peak to height {record.height}, weight {record.weight}, "
f"hh {record.header_hash}, "
f"forked at {fork_height}, rh: {record.reward_infusion_new_challenge}, "
f"total iters: {record.total_iters}, "
f"overflow: {record.overflow}, "
f"deficit: {record.deficit}, "
f"difficulty: {difficulty}, "
f"sub slot iters: {sub_slot_iters}"
)
sub_slots = await self.blockchain.get_sp_and_ip_sub_slots(record.header_hash)
assert sub_slots is not None
if not self.sync_store.get_sync_mode():
self.blockchain.clean_block_records()
added_eos, new_sps, new_ips = self.full_node_store.new_peak(
record,
block,
sub_slots[0],
sub_slots[1],
fork_height != block.height - 1 and block.height != 0,
self.blockchain,
)
if sub_slots[1] is None:
assert record.ip_sub_slot_total_iters(self.constants) == 0
# Ensure the signage point is also in the store, for consistency
self.full_node_store.new_signage_point(
record.signage_point_index,
self.blockchain,
record,
record.sub_slot_iters,
SignagePoint(
block.reward_chain_block.challenge_chain_sp_vdf,
block.challenge_chain_sp_proof,
block.reward_chain_block.reward_chain_sp_vdf,
block.reward_chain_sp_proof,
),
skip_vdf_validation=True,
)
# Update the mempool (returns successful pending transactions added to the mempool)
for bundle, result, spend_name in await self.mempool_manager.new_peak(self.blockchain.get_peak()):
self.log.debug(f"Added transaction to mempool: {spend_name}")
mempool_item = self.mempool_manager.get_mempool_item(spend_name)
assert mempool_item is not None
fees = mempool_item.fee
assert fees >= 0
assert result.cost is not None
new_tx = full_node_protocol.NewTransaction(
spend_name,
result.cost,
uint64(bundle.fees()),
)
msg = make_msg(ProtocolMessageTypes.new_transaction, new_tx)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# If there were pending end of slots that happen after this peak, broadcast them if they are added
if added_eos is not None:
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
added_eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
added_eos.challenge_chain.get_hash(),
uint8(0),
added_eos.reward_chain.end_of_slot_vdf.challenge,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast)
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# TODO: maybe add and broadcast new SP/IPs as well?
if record.height % 1000 == 0:
# Occasionally clear the seen list to keep it small
self.full_node_store.clear_seen_unfinished_blocks()
if self.sync_store.get_sync_mode() is False:
await self.send_peak_to_timelords(block)
# Tell full nodes about the new peak
msg = make_msg(
ProtocolMessageTypes.new_peak,
full_node_protocol.NewPeak(
record.header_hash,
record.height,
record.weight,
fork_height,
block.reward_chain_block.get_unfinished().get_hash(),
),
)
if peer is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
# Tell wallets about the new peak
msg = make_msg(
ProtocolMessageTypes.new_peak_wallet,
wallet_protocol.NewPeakWallet(
record.header_hash,
record.height,
record.weight,
fork_height,
),
)
await self.server.send_to_all([msg], NodeType.WALLET)
self._state_changed("new_peak")
async def respond_block(
self,
respond_block: full_node_protocol.RespondBlock,
peer: Optional[ws.WSChiaConnection] = None,
) -> Optional[Message]:
block: FullBlock = respond_block.block
if self.sync_store.get_sync_mode():
return None
# Adds the block to seen, and check if it's seen before (which means header is in memory)
header_hash = block.header_hash
if self.blockchain.contains_block(header_hash):
return None
pre_validation_result: Optional[PreValidationResult] = None
if block.is_transaction_block() and block.transactions_generator is None:
unfinished_rh: bytes32 = block.reward_chain_block.get_unfinished().get_hash()
unf_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(unfinished_rh)
if unf_block is not None and unf_block.transactions_generator is not None:
pre_validation_result = self.full_node_store.get_unfinished_block_result(unfinished_rh)
assert pre_validation_result is not None
block = dataclasses.replace(block, transactions_generator=unf_block.transactions_generator)
async with self.blockchain.lock:
if self.blockchain.contains_block(header_hash):
return None
validation_start = time.time()
pre_validation_results: Optional[
List[PreValidationResult]
] = await self.blockchain.pre_validate_blocks_multiprocessing([block], pre_validation_result is None)
if pre_validation_results is None:
raise ValueError(f"Failed to validate block {header_hash} height {block.height}")
if pre_validation_results[0].error is not None:
if Err(pre_validation_results[0].error) == Err.INVALID_PREV_BLOCK_HASH:
added: ReceiveBlockResult = ReceiveBlockResult.DISCONNECTED_BLOCK
error_code: Optional[Err] = Err.INVALID_PREV_BLOCK_HASH
fork_height: Optional[uint32] = None
else:
raise ValueError(
f"Failed to validate block {header_hash} height "
f"{block.height}: {Err(pre_validation_results[0].error).name}"
)
else:
result_to_validate = (
pre_validation_results[0] if pre_validation_result is None else pre_validation_result
)
assert result_to_validate.required_iters == pre_validation_results[0].required_iters
added, error_code, fork_height = await self.blockchain.receive_block(block, result_to_validate, None)
validation_time = time.time() - validation_start
if added == ReceiveBlockResult.ALREADY_HAVE_BLOCK:
return None
elif added == ReceiveBlockResult.INVALID_BLOCK:
assert error_code is not None
self.log.error(f"Block {header_hash} at height {block.height} is invalid with code {error_code}.")
raise ConsensusError(error_code, header_hash)
elif added == ReceiveBlockResult.DISCONNECTED_BLOCK:
self.log.info(f"Disconnected block {header_hash} at height {block.height}")
return None
elif added == ReceiveBlockResult.NEW_PEAK:
# Only propagate blocks which extend the blockchain (becomes one of the heads)
new_peak: Optional[BlockRecord] = self.blockchain.get_peak()
assert new_peak is not None and fork_height is not None
self.log.debug(f"Validation time for peak: {validation_time}")
await self.peak_post_processing(block, new_peak, fork_height, peer)
elif added == ReceiveBlockResult.ADDED_AS_ORPHAN:
self.log.info(
f"Received orphan block of height {block.height} rh " f"{block.reward_chain_block.get_hash()}"
)
else:
# Should never reach here, all the cases are covered
raise RuntimeError(f"Invalid result from receive_block {added}")
# This code path is reached if added == ADDED_AS_ORPHAN or NEW_TIP
peak = self.blockchain.get_peak()
assert peak is not None
# Removes all temporary data for old blocks
clear_height = uint32(max(0, peak.height - 50))
self.full_node_store.clear_candidate_blocks_below(clear_height)
self.full_node_store.clear_unfinished_blocks_below(clear_height)
if peak.height % 1000 == 0 and not self.sync_store.get_sync_mode():
await self.sync_store.clear_sync_info() # Occasionally clear sync peer info
self._state_changed("block")
return None
async def respond_unfinished_block(
self,
respond_unfinished_block: full_node_protocol.RespondUnfinishedBlock,
peer: Optional[ws.WSChiaConnection],
farmed_block: bool = False,
):
block = respond_unfinished_block.unfinished_block
if block.prev_header_hash != self.constants.GENESIS_CHALLENGE and not self.blockchain.contains_block(
block.prev_header_hash
):
# No need to request the parent, since the peer will send it to us anyway, via NewPeak
self.log.debug("Received a disconnected unfinished block")
return
# Adds the unfinished block to seen, and check if it's seen before, to prevent
if self.full_node_store.seen_unfinished_block(block.get_hash()):
return
block_hash = block.reward_chain_block.get_hash()
if self.full_node_store.get_unfinished_block(block_hash) is not None:
return
peak: Optional[BlockRecord] = self.blockchain.get_peak()
if peak is not None:
if block.total_iters < peak.sp_total_iters(self.constants):
return
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:
prev_b = None
else:
prev_b = self.blockchain.block_record(block.prev_header_hash)
if len(block.finished_sub_slots) > 0:
num_blocks_in_ss = 1 # Curr
else:
curr = self.blockchain.try_block_record(block.prev_header_hash)
num_blocks_in_ss = 2 # Curr and prev
while (curr is not None) and not curr.first_in_sub_slot:
curr = self.blockchain.try_block_record(curr.prev_hash)
num_blocks_in_ss += 1
if num_blocks_in_ss > self.constants.MAX_SUB_SLOT_BLOCKS:
# TODO: potentially allow overflow blocks here, which count for the next slot
self.log.warning("Too many blocks added, not adding block")
return
async with self.blockchain.lock:
# TODO: pre-validate VDFs outside of lock
validate_result = await self.blockchain.validate_unfinished_block(block)
if validate_result.error is not None:
raise ConsensusError(Err(validate_result.error))
assert validate_result.required_iters is not None
# Perform another check, in case we have already concurrently added the same unfinished block
if self.full_node_store.get_unfinished_block(block_hash) is not None:
return
if block.prev_header_hash == self.constants.GENESIS_CHALLENGE:
height = uint32(0)
else:
height = uint32(self.blockchain.block_record(block.prev_header_hash).height + 1)
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants,
self.blockchain,
validate_result.required_iters,
block,
True,
)
self.full_node_store.add_unfinished_block(height, block, validate_result)
if farmed_block is True:
self.log.info(f"🍀 ️Farmed unfinished_block {block_hash}")
else:
self.log.info(f"Added unfinished_block {block_hash}, not farmed")
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants,
len(block.finished_sub_slots) > 0,
prev_b,
self.blockchain,
)
if block.reward_chain_block.signage_point_index == 0:
res = self.full_node_store.get_sub_slot(block.reward_chain_block.pos_ss_cc_challenge_hash)
if res is None:
if block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE:
rc_prev = self.constants.GENESIS_CHALLENGE
else:
self.log.warning(f"Do not have sub slot {block.reward_chain_block.pos_ss_cc_challenge_hash}")
return
else:
rc_prev = res[0].reward_chain.get_hash()
else:
assert block.reward_chain_block.reward_chain_sp_vdf is not None
rc_prev = block.reward_chain_block.reward_chain_sp_vdf.challenge
timelord_request = timelord_protocol.NewUnfinishedBlock(
block.reward_chain_block,
difficulty,
sub_slot_iters,
block.foliage,
ses,
rc_prev,
)
msg = make_msg(ProtocolMessageTypes.new_unfinished_block, timelord_request)
await self.server.send_to_all([msg], NodeType.TIMELORD)
full_node_request = full_node_protocol.NewUnfinishedBlock(block.reward_chain_block.get_hash())
msg = make_msg(ProtocolMessageTypes.new_unfinished_block, full_node_request)
if peer is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
self._state_changed("unfinished_block")
async def new_infusion_point_vdf(
self, request: timelord_protocol.NewInfusionPointVDF, timelord_peer: Optional[ws.WSChiaConnection] = None
) -> Optional[Message]:
# Lookup unfinished blocks
unfinished_block: Optional[UnfinishedBlock] = self.full_node_store.get_unfinished_block(
request.unfinished_reward_hash
)
if unfinished_block is None:
self.log.warning(
f"Do not have unfinished reward chain block {request.unfinished_reward_hash}, cannot finish."
)
return None
prev_b: Optional[BlockRecord] = None
target_rc_hash = request.reward_chain_ip_vdf.challenge
last_slot_cc_hash = request.challenge_chain_ip_vdf.challenge
# Backtracks through end of slot objects, should work for multiple empty sub slots
for eos, _, _ in reversed(self.full_node_store.finished_sub_slots):
if eos is not None and eos.reward_chain.get_hash() == target_rc_hash:
target_rc_hash = eos.reward_chain.end_of_slot_vdf.challenge
if target_rc_hash == self.constants.GENESIS_CHALLENGE:
prev_b = None
else:
# Find the prev block, starts looking backwards from the peak. target_rc_hash must be the hash of a block
# and not an end of slot (since we just looked through the slots and backtracked)
curr: Optional[BlockRecord] = self.blockchain.get_peak()
for _ in range(10):
if curr is None:
break
if curr.reward_infusion_new_challenge == target_rc_hash:
# Found our prev block
prev_b = curr
break
curr = self.blockchain.try_block_record(curr.prev_hash)
# If not found, cache keyed on prev block
if prev_b is None:
self.full_node_store.add_to_future_ip(request)
self.log.warning(f"Previous block is None, infusion point {request.reward_chain_ip_vdf.challenge}")
return None
finished_sub_slots: Optional[List[EndOfSubSlotBundle]] = self.full_node_store.get_finished_sub_slots(
self.blockchain,
prev_b,
last_slot_cc_hash,
)
if finished_sub_slots is None:
return None
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants,
len(finished_sub_slots) > 0,
prev_b,
self.blockchain,
)
if unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash == self.constants.GENESIS_CHALLENGE:
sub_slot_start_iters = uint128(0)
else:
ss_res = self.full_node_store.get_sub_slot(unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash)
if ss_res is None:
self.log.warning(f"Do not have sub slot {unfinished_block.reward_chain_block.pos_ss_cc_challenge_hash}")
return None
_, _, sub_slot_start_iters = ss_res
sp_total_iters = uint128(
sub_slot_start_iters
+ calculate_sp_iters(
self.constants,
sub_slot_iters,
unfinished_block.reward_chain_block.signage_point_index,
)
)
block: FullBlock = unfinished_block_to_full_block(
unfinished_block,
request.challenge_chain_ip_vdf,
request.challenge_chain_ip_proof,
request.reward_chain_ip_vdf,
request.reward_chain_ip_proof,
request.infused_challenge_chain_ip_vdf,
request.infused_challenge_chain_ip_proof,
finished_sub_slots,
prev_b,
self.blockchain,
sp_total_iters,
difficulty,
)
if not self.has_valid_pool_sig(block):
self.log.warning("Trying to make a pre-farm block but height is not 0")
return None
try:
await self.respond_block(full_node_protocol.RespondBlock(block))
except Exception as e:
self.log.warning(f"Consensus error validating block: {e}")
if timelord_peer is not None:
# Only sends to the timelord who sent us this VDF, to reset them to the correct peak
await self.send_peak_to_timelords(peer=timelord_peer)
return None
async def respond_end_of_sub_slot(
self, request: full_node_protocol.RespondEndOfSubSlot, peer: ws.WSChiaConnection
) -> Tuple[Optional[Message], bool]:
fetched_ss = self.full_node_store.get_sub_slot(request.end_of_slot_bundle.challenge_chain.get_hash())
if fetched_ss is not None:
# Already have the sub-slot
return None, True
async with self.timelord_lock:
fetched_ss = self.full_node_store.get_sub_slot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
)
if (
(fetched_ss is None)
and request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge
!= self.constants.GENESIS_CHALLENGE
):
# If we don't have the prev, request the prev instead
full_node_request = full_node_protocol.RequestSignagePointOrEndOfSubSlot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
uint8(0),
bytes([0] * 32),
)
return (
make_msg(ProtocolMessageTypes.request_signage_point_or_end_of_sub_slot, full_node_request),
False,
)
peak = self.blockchain.get_peak()
if peak is not None and peak.height > 2:
next_sub_slot_iters = self.blockchain.get_next_slot_iters(peak.header_hash, True)
next_difficulty = self.blockchain.get_next_difficulty(peak.header_hash, True)
else:
next_sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
next_difficulty = self.constants.DIFFICULTY_STARTING
new_infusions = self.full_node_store.new_finished_sub_slot(
request.end_of_slot_bundle,
self.blockchain,
peak,
await self.blockchain.get_full_peak(),
)
if new_infusions is not None:
self.log.info(
f"⏲️ Finished sub slot, SP {self.constants.NUM_SPS_SUB_SLOT}/{self.constants.NUM_SPS_SUB_SLOT}, "
f"{request.end_of_slot_bundle.challenge_chain.get_hash()}, "
f"number of sub-slots: {len(self.full_node_store.finished_sub_slots)}, "
f"RC hash: {request.end_of_slot_bundle.reward_chain.get_hash()}, "
f"Deficit {request.end_of_slot_bundle.reward_chain.deficit}"
)
# Notify full nodes of the new sub-slot
broadcast = full_node_protocol.NewSignagePointOrEndOfSubSlot(
request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge,
request.end_of_slot_bundle.challenge_chain.get_hash(),
uint8(0),
request.end_of_slot_bundle.reward_chain.end_of_slot_vdf.challenge,
)
msg = make_msg(ProtocolMessageTypes.new_signage_point_or_end_of_sub_slot, broadcast)
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
for infusion in new_infusions:
await self.new_infusion_point_vdf(infusion)
# Notify farmers of the new sub-slot
broadcast_farmer = farmer_protocol.NewSignagePoint(
request.end_of_slot_bundle.challenge_chain.get_hash(),
request.end_of_slot_bundle.challenge_chain.get_hash(),
request.end_of_slot_bundle.reward_chain.get_hash(),
next_difficulty,
next_sub_slot_iters,
uint8(0),
)
msg = make_msg(ProtocolMessageTypes.new_signage_point, broadcast_farmer)
await self.server.send_to_all([msg], NodeType.FARMER)
return None, True
else:
self.log.info(
f"End of slot not added CC challenge "
f"{request.end_of_slot_bundle.challenge_chain.challenge_chain_end_of_slot_vdf.challenge}"
)
return None, False
async def respond_transaction(
self,
transaction: SpendBundle,
spend_name: bytes32,
peer: Optional[ws.WSChiaConnection] = None,
test: bool = False,
) -> Tuple[MempoolInclusionStatus, Optional[Err]]:
if self.sync_store.get_sync_mode():
return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING
if not test and not (await self.synced()):
return MempoolInclusionStatus.FAILED, Err.NO_TRANSACTIONS_WHILE_SYNCING
peak_height = self.blockchain.get_peak_height()
# No transactions in mempool in initial client. Remove 6 weeks after launch
if (
peak_height is None
or peak_height <= self.constants.INITIAL_FREEZE_PERIOD
or self.constants.NETWORK_TYPE == NetworkType.MAINNET
):
return MempoolInclusionStatus.FAILED, Err.INITIAL_TRANSACTION_FREEZE
if self.mempool_manager.seen(spend_name):
return MempoolInclusionStatus.FAILED, Err.ALREADY_INCLUDING_TRANSACTION
self.mempool_manager.add_and_maybe_pop_seen(spend_name)
self.log.debug(f"Processing transaction: {spend_name}")
# Ignore if syncing
if self.sync_store.get_sync_mode():
status = MempoolInclusionStatus.FAILED
error: Optional[Err] = Err.NO_TRANSACTIONS_WHILE_SYNCING
else:
try:
cost_result = await self.mempool_manager.pre_validate_spendbundle(transaction)
except Exception as e:
self.mempool_manager.remove_seen(spend_name)
raise e
async with self.blockchain.lock:
if self.mempool_manager.get_spendbundle(spend_name) is not None:
self.mempool_manager.remove_seen(spend_name)
return MempoolInclusionStatus.FAILED, Err.ALREADY_INCLUDING_TRANSACTION
cost, status, error = await self.mempool_manager.add_spendbundle(transaction, cost_result, spend_name)
if status == MempoolInclusionStatus.SUCCESS:
self.log.debug(f"Added transaction to mempool: {spend_name}")
# Only broadcast successful transactions, not pending ones. Otherwise it's a DOS
mempool_item = self.mempool_manager.get_mempool_item(spend_name)
assert mempool_item is not None
fees = mempool_item.fee
assert fees >= 0
assert cost is not None
new_tx = full_node_protocol.NewTransaction(
spend_name,
cost,
uint64(transaction.fees()),
)
msg = make_msg(ProtocolMessageTypes.new_transaction, new_tx)
if peer is None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
else:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
else:
self.mempool_manager.remove_seen(spend_name)
self.log.warning(
f"Wasn't able to add transaction with id {spend_name}, " f"status {status} error: {error}"
)
return status, error
async def _needs_compact_proof(
self, vdf_info: VDFInfo, header_block: HeaderBlock, field_vdf: CompressibleVDFField
) -> bool:
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
if (
sub_slot.proofs.challenge_chain_slot_proof.witness_type == 0
and sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
):
assert sub_slot.proofs.infused_challenge_chain_slot_proof is not None
if (
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type == 0
and sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.CC_SP_VDF:
if header_block.reward_chain_block.challenge_chain_sp_vdf is None:
return False
if vdf_info == header_block.reward_chain_block.challenge_chain_sp_vdf:
assert header_block.challenge_chain_sp_proof is not None
if (
header_block.challenge_chain_sp_proof.witness_type == 0
and header_block.challenge_chain_sp_proof.normalized_to_identity
):
return False
return True
if field_vdf == CompressibleVDFField.CC_IP_VDF:
if vdf_info == header_block.reward_chain_block.challenge_chain_ip_vdf:
if (
header_block.challenge_chain_ip_proof.witness_type == 0
and header_block.challenge_chain_ip_proof.normalized_to_identity
):
return False
return True
return False
async def _can_accept_compact_proof(
self,
vdf_info: VDFInfo,
vdf_proof: VDFProof,
height: uint32,
header_hash: bytes32,
field_vdf: CompressibleVDFField,
) -> bool:
is_fully_compactified = await self.block_store.is_fully_compactified(header_hash)
if is_fully_compactified is None or is_fully_compactified:
self.log.info(f"Already compactified block: {header_hash}. Ignoring.")
return False
if vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
self.log.error(f"Received vdf proof is not compact: {vdf_proof}.")
return False
if not vdf_proof.is_valid(self.constants, ClassgroupElement.get_default_element(), vdf_info):
self.log.error(f"Received compact vdf proof is not valid: {vdf_proof}.")
return False
header_block = await self.blockchain.get_header_block_by_height(height, header_hash)
if header_block is None:
self.log.error(f"Can't find block for given compact vdf. Height: {height} Header hash: {header_hash}")
return False
is_new_proof = await self._needs_compact_proof(vdf_info, header_block, field_vdf)
if not is_new_proof:
self.log.info(f"Duplicate compact proof. Height: {height}. Header hash: {header_hash}.")
return is_new_proof
async def _replace_proof(
self,
vdf_info: VDFInfo,
vdf_proof: VDFProof,
height: uint32,
field_vdf: CompressibleVDFField,
):
full_blocks = await self.block_store.get_full_blocks_at([height])
assert len(full_blocks) > 0
for block in full_blocks:
new_block = None
block_record = await self.blockchain.get_block_record_from_db(self.blockchain.height_to_hash(height))
assert block_record is not None
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for index, sub_slot in enumerate(block.finished_sub_slots):
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == vdf_info:
new_proofs = dataclasses.replace(sub_slot.proofs, challenge_chain_slot_proof=vdf_proof)
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
new_finished_subslots = block.finished_sub_slots
new_finished_subslots[index] = new_subslot
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
break
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for index, sub_slot in enumerate(block.finished_sub_slots):
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == vdf_info
):
new_proofs = dataclasses.replace(sub_slot.proofs, infused_challenge_chain_slot_proof=vdf_proof)
new_subslot = dataclasses.replace(sub_slot, proofs=new_proofs)
new_finished_subslots = block.finished_sub_slots
new_finished_subslots[index] = new_subslot
new_block = dataclasses.replace(block, finished_sub_slots=new_finished_subslots)
break
if field_vdf == CompressibleVDFField.CC_SP_VDF:
assert block.challenge_chain_sp_proof is not None
new_block = dataclasses.replace(block, challenge_chain_sp_proof=vdf_proof)
if field_vdf == CompressibleVDFField.CC_IP_VDF:
new_block = dataclasses.replace(block, challenge_chain_ip_proof=vdf_proof)
assert new_block is not None
await self.block_store.add_full_block(new_block, block_record)
async def respond_compact_vdf_timelord(self, request: timelord_protocol.RespondCompactProofOfTime):
field_vdf = CompressibleVDFField(int(request.field_vdf))
if not await self._can_accept_compact_proof(
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
):
return
async with self.blockchain.lock:
await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf)
msg = make_msg(
ProtocolMessageTypes.new_compact_vdf,
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
)
if self.server is not None:
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async def new_compact_vdf(self, request: full_node_protocol.NewCompactVDF, peer: ws.WSChiaConnection):
is_fully_compactified = await self.block_store.is_fully_compactified(request.header_hash)
if is_fully_compactified is None or is_fully_compactified:
return False
header_block = await self.blockchain.get_header_block_by_height(request.height, request.header_hash)
if header_block is None:
return
field_vdf = CompressibleVDFField(int(request.field_vdf))
if await self._needs_compact_proof(request.vdf_info, header_block, field_vdf):
msg = make_msg(
ProtocolMessageTypes.request_compact_vdf,
full_node_protocol.RequestCompactVDF(
request.height, request.header_hash, request.field_vdf, request.vdf_info
),
)
await peer.send_message(msg)
async def request_compact_vdf(self, request: full_node_protocol.RequestCompactVDF, peer: ws.WSChiaConnection):
header_block = await self.blockchain.get_header_block_by_height(request.height, request.header_hash)
if header_block is None:
return
vdf_proof: Optional[VDFProof] = None
field_vdf = CompressibleVDFField(int(request.field_vdf))
if field_vdf == CompressibleVDFField.CC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf == request.vdf_info:
vdf_proof = sub_slot.proofs.challenge_chain_slot_proof
break
if field_vdf == CompressibleVDFField.ICC_EOS_VDF:
for sub_slot in header_block.finished_sub_slots:
if (
sub_slot.infused_challenge_chain is not None
and sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf == request.vdf_info
):
vdf_proof = sub_slot.proofs.infused_challenge_chain_slot_proof
break
if (
field_vdf == CompressibleVDFField.CC_SP_VDF
and header_block.reward_chain_block.challenge_chain_sp_vdf == request.vdf_info
):
vdf_proof = header_block.challenge_chain_sp_proof
if (
field_vdf == CompressibleVDFField.CC_IP_VDF
and header_block.reward_chain_block.challenge_chain_ip_vdf == request.vdf_info
):
vdf_proof = header_block.challenge_chain_ip_proof
if vdf_proof is None or vdf_proof.witness_type > 0 or not vdf_proof.normalized_to_identity:
self.log.error(f"{peer} requested compact vdf we don't have, height: {request.height}.")
return
compact_vdf = full_node_protocol.RespondCompactVDF(
request.height,
request.header_hash,
request.field_vdf,
request.vdf_info,
vdf_proof,
)
msg = make_msg(ProtocolMessageTypes.respond_compact_vdf, compact_vdf)
await peer.send_message(msg)
async def respond_compact_vdf(self, request: full_node_protocol.RespondCompactVDF, peer: ws.WSChiaConnection):
field_vdf = CompressibleVDFField(int(request.field_vdf))
if not await self._can_accept_compact_proof(
request.vdf_info, request.vdf_proof, request.height, request.header_hash, field_vdf
):
return
async with self.blockchain.lock:
if self.blockchain.seen_compact_proofs(request.vdf_info, request.height):
return
await self._replace_proof(request.vdf_info, request.vdf_proof, request.height, field_vdf)
msg = make_msg(
ProtocolMessageTypes.new_compact_vdf,
full_node_protocol.NewCompactVDF(request.height, request.header_hash, request.field_vdf, request.vdf_info),
)
if self.server is not None:
await self.server.send_to_all_except([msg], NodeType.FULL_NODE, peer.peer_node_id)
async def broadcast_uncompact_blocks(self, uncompact_interval_scan: int, target_uncompact_proofs: int):
min_height: Optional[int] = 0
try:
while not self._shut_down:
while self.sync_store.get_sync_mode():
if self._shut_down:
return
await asyncio.sleep(30)
broadcast_list: List[timelord_protocol.RequestCompactProofOfTime] = []
new_min_height = None
max_height = self.blockchain.get_peak_height()
if max_height is None:
await asyncio.sleep(30)
continue
# Calculate 'min_height' correctly the first time this task is launched, using the db.
assert min_height is not None
min_height = await self.block_store.get_first_not_compactified(min_height)
if min_height is None or min_height > max(0, max_height - 1000):
min_height = max(0, max_height - 1000)
batches_finished = 0
self.log.info("Scanning the blockchain for uncompact blocks.")
for h in range(min_height, max_height, 100):
# Got 10 times the target header count, sampling the target headers should contain
# enough randomness to split the work between blueboxes.
if len(broadcast_list) > target_uncompact_proofs * 10:
break
stop_height = min(h + 99, max_height)
headers = await self.blockchain.get_header_blocks_in_range(min_height, stop_height)
for header in headers.values():
prev_broadcast_list_len = len(broadcast_list)
expected_header_hash = self.blockchain.height_to_hash(header.height)
if header.header_hash != expected_header_hash:
continue
for sub_slot in header.finished_sub_slots:
if (
sub_slot.proofs.challenge_chain_slot_proof.witness_type > 0
or not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
):
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_EOS_VDF),
)
)
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None and (
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type > 0
or not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
):
assert sub_slot.infused_challenge_chain is not None
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
sub_slot.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.ICC_EOS_VDF),
)
)
if header.challenge_chain_sp_proof is not None and (
header.challenge_chain_sp_proof.witness_type > 0
or not header.challenge_chain_sp_proof.normalized_to_identity
):
assert header.reward_chain_block.challenge_chain_sp_vdf is not None
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
header.reward_chain_block.challenge_chain_sp_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_SP_VDF),
)
)
if (
header.challenge_chain_ip_proof.witness_type > 0
or not header.challenge_chain_ip_proof.normalized_to_identity
):
broadcast_list.append(
timelord_protocol.RequestCompactProofOfTime(
header.reward_chain_block.challenge_chain_ip_vdf,
header.header_hash,
header.height,
uint8(CompressibleVDFField.CC_IP_VDF),
)
)
# This is the first header with uncompact proofs. Store its height so next time we iterate
# only from here. Fix header block iteration window to at least 1000, so reorgs will be
# handled correctly.
if prev_broadcast_list_len == 0 and len(broadcast_list) > 0 and h <= max(0, max_height - 1000):
new_min_height = header.height
# Small sleep between batches.
batches_finished += 1
if batches_finished % 10 == 0:
await asyncio.sleep(1)
# We have no uncompact blocks, but mentain the block iteration window to at least 1000 blocks.
if new_min_height is None:
new_min_height = max(0, max_height - 1000)
min_height = new_min_height
if len(broadcast_list) > target_uncompact_proofs:
random.shuffle(broadcast_list)
broadcast_list = broadcast_list[:target_uncompact_proofs]
if self.sync_store.get_sync_mode():
continue
if self.server is not None:
for new_pot in broadcast_list:
msg = make_msg(ProtocolMessageTypes.request_compact_proof_of_time, new_pot)
await self.server.send_to_all([msg], NodeType.TIMELORD)
await asyncio.sleep(uncompact_interval_scan)
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception in broadcast_uncompact_blocks: {e}")
self.log.error(f"Exception Stack: {error_stack}")
| true | true |
f71b10389f6b985a22b63f8c11ff239efa2dcf22 | 767 | py | Python | venv/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__about__.py | realxwx/leetcode-solve | 3a7d7d8e92a5fd5fecc347d141a1c532b92e763e | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__about__.py | realxwx/leetcode-solve | 3a7d7d8e92a5fd5fecc347d141a1c532b92e763e | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/pkg_resources/_vendor/packaging/__about__.py | realxwx/leetcode-solve | 3a7d7d8e92a5fd5fecc347d141a1c532b92e763e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020
# Author: xiaoweixiang
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "16.8"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__license__ = "BSD or Apache License, Version 2.0"
__copyright__ = "Copyright 2014-2016 %s" % __author__
| 30.68 | 79 | 0.744459 |
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "16.8"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__license__ = "BSD or Apache License, Version 2.0"
__copyright__ = "Copyright 2014-2016 %s" % __author__
| true | true |
f71b11050a7b8a94c7408d4babb5e1fedbe1941d | 4,418 | py | Python | models/rdn.py | 0Miquel/LIIF-temporal | b992cb87cb9bdeba6d4c9bc3960b36ba52a1ba75 | [
"BSD-3-Clause"
] | 1 | 2021-08-09T22:43:22.000Z | 2021-08-09T22:43:22.000Z | models/rdn.py | 0Miquel/LIIF-temporal | b992cb87cb9bdeba6d4c9bc3960b36ba52a1ba75 | [
"BSD-3-Clause"
] | null | null | null | models/rdn.py | 0Miquel/LIIF-temporal | b992cb87cb9bdeba6d4c9bc3960b36ba52a1ba75 | [
"BSD-3-Clause"
] | null | null | null | # Residual Dense Network for Image Super-Resolution
# https://arxiv.org/abs/1802.08797
# modified from: https://github.com/thstkdgus35/EDSR-PyTorch
from argparse import Namespace
import torch
import torch.nn as nn
from models import register
class RDB_Conv(nn.Module):
def __init__(self, inChannels, growRate, kSize=3):
super(RDB_Conv, self).__init__()
Cin = inChannels
G = growRate
self.conv = nn.Sequential(*[
#nn.Conv2d(Cin, G, kSize, padding=(kSize-1)//2, stride=1),
nn.Conv3d(Cin, G, kSize, padding=(kSize - 1) // 2, stride=1),
nn.ReLU()
])
def forward(self, x):
out = self.conv(x)
return torch.cat((x, out), 1)
class RDB(nn.Module):
def __init__(self, growRate0, growRate, nConvLayers, kSize=3):
super(RDB, self).__init__()
G0 = growRate0
G = growRate
C = nConvLayers
convs = []
for c in range(C):
convs.append(RDB_Conv(G0 + c*G, G))
self.convs = nn.Sequential(*convs)
# Local Feature Fusion
self.LFF = nn.Conv3d(G0 + C * G, G0, 1, padding=0, stride=1)
#self.LFF = nn.Conv2d(G0 + C*G, G0, 1, padding=0, stride=1)
def forward(self, x):
return self.LFF(self.convs(x)) + x
class RDN(nn.Module):
def __init__(self, args):
super(RDN, self).__init__()
self.args = args
r = args.scale[0]
G0 = args.G0
kSize = args.RDNkSize
# number of RDB blocks, conv layers, out channels
self.D, C, G = {
'A': (20, 6, 32),
'B': (16, 8, 64),
}[args.RDNconfig]
# Shallow feature extraction net
#self.SFENet1 = nn.Conv2d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)
#self.SFENet2 = nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
self.SFENet1 = nn.Conv3d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)
self.SFENet2 = nn.Conv3d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
# Redidual dense blocks and dense feature fusion
self.RDBs = nn.ModuleList()
for i in range(self.D):
self.RDBs.append(
RDB(growRate0 = G0, growRate = G, nConvLayers = C)
)
# Global Feature Fusion
self.GFF = nn.Sequential(*[
#nn.Conv2d(self.D * G0, G0, 1, padding=0, stride=1),
#nn.Conv2d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
nn.Conv3d(self.D * G0, G0, 1, padding=0, stride=1),
nn.Conv3d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)
])
if args.no_upsampling:
self.out_dim = G0
else:
self.out_dim = args.n_colors
# Up-sampling net
if r == 2 or r == 3:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(r),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
elif r == 4:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
else:
raise ValueError("scale must be 2 or 3 or 4.")
def forward(self, x):
f__1 = self.SFENet1(x)
x = self.SFENet2(f__1)
RDBs_out = []
for i in range(self.D):
x = self.RDBs[i](x)
RDBs_out.append(x)
x = self.GFF(torch.cat(RDBs_out,1))
x += f__1
if self.args.no_upsampling:
return x
else:
return self.UPNet(x)
@register('rdn')
def make_rdn(G0=64, RDNkSize=3, RDNconfig='B',
scale=2, no_upsampling=False):
args = Namespace()
args.G0 = G0
args.RDNkSize = RDNkSize
args.RDNconfig = RDNconfig
args.scale = [scale]
args.no_upsampling = no_upsampling
args.n_colors = 3
return RDN(args)
| 32.725926 | 92 | 0.51675 |
from argparse import Namespace
import torch
import torch.nn as nn
from models import register
class RDB_Conv(nn.Module):
def __init__(self, inChannels, growRate, kSize=3):
super(RDB_Conv, self).__init__()
Cin = inChannels
G = growRate
self.conv = nn.Sequential(*[
nn.Conv3d(Cin, G, kSize, padding=(kSize - 1) // 2, stride=1),
nn.ReLU()
])
def forward(self, x):
out = self.conv(x)
return torch.cat((x, out), 1)
class RDB(nn.Module):
def __init__(self, growRate0, growRate, nConvLayers, kSize=3):
super(RDB, self).__init__()
G0 = growRate0
G = growRate
C = nConvLayers
convs = []
for c in range(C):
convs.append(RDB_Conv(G0 + c*G, G))
self.convs = nn.Sequential(*convs)
self.LFF = nn.Conv3d(G0 + C * G, G0, 1, padding=0, stride=1)
def forward(self, x):
return self.LFF(self.convs(x)) + x
class RDN(nn.Module):
def __init__(self, args):
super(RDN, self).__init__()
self.args = args
r = args.scale[0]
G0 = args.G0
kSize = args.RDNkSize
self.D, C, G = {
'A': (20, 6, 32),
'B': (16, 8, 64),
}[args.RDNconfig]
self.SFENet1 = nn.Conv3d(args.n_colors, G0, kSize, padding=(kSize-1)//2, stride=1)
self.SFENet2 = nn.Conv3d(G0, G0, kSize, padding=(kSize-1)//2, stride=1)
self.RDBs = nn.ModuleList()
for i in range(self.D):
self.RDBs.append(
RDB(growRate0 = G0, growRate = G, nConvLayers = C)
)
self.GFF = nn.Sequential(*[
nn.Conv3d(self.D * G0, G0, 1, padding=0, stride=1),
nn.Conv3d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)
])
if args.no_upsampling:
self.out_dim = G0
else:
self.out_dim = args.n_colors
if r == 2 or r == 3:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * r * r, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(r),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
elif r == 4:
self.UPNet = nn.Sequential(*[
nn.Conv2d(G0, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, G * 4, kSize, padding=(kSize-1)//2, stride=1),
nn.PixelShuffle(2),
nn.Conv2d(G, args.n_colors, kSize, padding=(kSize-1)//2, stride=1)
])
else:
raise ValueError("scale must be 2 or 3 or 4.")
def forward(self, x):
f__1 = self.SFENet1(x)
x = self.SFENet2(f__1)
RDBs_out = []
for i in range(self.D):
x = self.RDBs[i](x)
RDBs_out.append(x)
x = self.GFF(torch.cat(RDBs_out,1))
x += f__1
if self.args.no_upsampling:
return x
else:
return self.UPNet(x)
@register('rdn')
def make_rdn(G0=64, RDNkSize=3, RDNconfig='B',
scale=2, no_upsampling=False):
args = Namespace()
args.G0 = G0
args.RDNkSize = RDNkSize
args.RDNconfig = RDNconfig
args.scale = [scale]
args.no_upsampling = no_upsampling
args.n_colors = 3
return RDN(args)
| true | true |
f71b1176d7d60f57abf30cc89fa766a5c19610b3 | 588 | py | Python | oauth2/socialapp.py | DemocracyLab/DemocracyLab-CivicTechExchange | eec4715373679259318ff6c384c815acebf0831f | [
"MIT"
] | 64 | 2017-09-30T16:23:43.000Z | 2022-03-30T23:26:50.000Z | oauth2/socialapp.py | DemocracyLab/DemocracyLab-CivicTechExchange | eec4715373679259318ff6c384c815acebf0831f | [
"MIT"
] | 339 | 2017-10-26T06:59:14.000Z | 2022-03-10T22:34:29.000Z | oauth2/socialapp.py | DemocracyLab/DemocracyLab-CivicTechExchange | eec4715373679259318ff6c384c815acebf0831f | [
"MIT"
] | 58 | 2017-09-16T17:25:10.000Z | 2022-03-04T18:14:02.000Z | """
Decouples SocialApp client credentials from the database
"""
from django.conf import settings
class SocialAppMixin:
class Meta:
abstract = True
# Get credentials to be used by OAuth2Client
def get_app(self, request):
app = settings.SOCIAL_APPS.get(self.id)
from allauth.socialaccount.models import SocialApp
return SocialApp(
id=app.get('id'),
name='SocialApp instance',
provider=self.id,
client_id=app.get('client_id'),
secret=app.get('secret'),
key=''
)
| 25.565217 | 58 | 0.605442 | from django.conf import settings
class SocialAppMixin:
class Meta:
abstract = True
def get_app(self, request):
app = settings.SOCIAL_APPS.get(self.id)
from allauth.socialaccount.models import SocialApp
return SocialApp(
id=app.get('id'),
name='SocialApp instance',
provider=self.id,
client_id=app.get('client_id'),
secret=app.get('secret'),
key=''
)
| true | true |
f71b12227f1c54ad2eb63c792a779e62f95046dc | 1,353 | py | Python | ingredients/schema.py | gtg7784/Graphene-Django | 77fbbe54ea940f566da561edc492823ae7cc7643 | [
"MIT"
] | 1 | 2021-10-14T01:23:31.000Z | 2021-10-14T01:23:31.000Z | ingredients/schema.py | gtg7784/Graphene-Django | 77fbbe54ea940f566da561edc492823ae7cc7643 | [
"MIT"
] | 1 | 2021-09-22T19:41:41.000Z | 2021-09-22T19:41:41.000Z | ingredients/schema.py | gtg7784/Graphene-Django | 77fbbe54ea940f566da561edc492823ae7cc7643 | [
"MIT"
] | null | null | null | import graphene
from graphene_django.types import DjangoObjectType
from .models import Category, Ingredient
class CategoryType(DjangoObjectType):
class Meta:
model = Category
class IngredientType(DjangoObjectType):
class Meta:
model = Ingredient
class Query(object):
category = graphene.Field(CategoryType, id=graphene.Int(), name=graphene.String())
all_categories = graphene.List(CategoryType)
Ingredient = graphene.Field(IngredientType, id=graphene.Int(), name=graphene.String())
all_ingredients = graphene.List(IngredientType)
def resolve_all_categories(self, info, **kwargs):
return Category.objects.all()
def resolve_all_ingredients(self, info, **kwargs):
return Ingredient.objects.all()
def resolve_category(self, info, **kwargs):
id = kwargs.get('id')
name = kwargs.get('name')
if id is not None:
return Category.objects.get(pk=id)
if name is not None:
return Category.objects.get(name=name)
return None
def resolve_ingredient(self, info, **kwrags):
id = kwrags.get('id')
name = kwrags.get('name')
if id is not None:
return Ingredient.objects.get(pk=id)
if name is not None:
return Ingredient.objects.get(name=name)
return None
| 27.612245 | 90 | 0.659276 | import graphene
from graphene_django.types import DjangoObjectType
from .models import Category, Ingredient
class CategoryType(DjangoObjectType):
class Meta:
model = Category
class IngredientType(DjangoObjectType):
class Meta:
model = Ingredient
class Query(object):
category = graphene.Field(CategoryType, id=graphene.Int(), name=graphene.String())
all_categories = graphene.List(CategoryType)
Ingredient = graphene.Field(IngredientType, id=graphene.Int(), name=graphene.String())
all_ingredients = graphene.List(IngredientType)
def resolve_all_categories(self, info, **kwargs):
return Category.objects.all()
def resolve_all_ingredients(self, info, **kwargs):
return Ingredient.objects.all()
def resolve_category(self, info, **kwargs):
id = kwargs.get('id')
name = kwargs.get('name')
if id is not None:
return Category.objects.get(pk=id)
if name is not None:
return Category.objects.get(name=name)
return None
def resolve_ingredient(self, info, **kwrags):
id = kwrags.get('id')
name = kwrags.get('name')
if id is not None:
return Ingredient.objects.get(pk=id)
if name is not None:
return Ingredient.objects.get(name=name)
return None
| true | true |
f71b132f36f52a58f2c573303fc3826c3fffb90a | 6,341 | py | Python | holidays/countries/south_africa.py | Drill-D/python-holidays | f669856d9a441324d66ee3477c4d69a04e0a00ce | [
"MIT"
] | 48 | 2016-11-22T09:18:50.000Z | 2018-01-14T14:06:49.000Z | holidays/countries/south_africa.py | Drill-D/python-holidays | f669856d9a441324d66ee3477c4d69a04e0a00ce | [
"MIT"
] | 59 | 2016-12-03T15:52:36.000Z | 2018-01-16T09:37:15.000Z | holidays/countries/south_africa.py | Drill-D/python-holidays | f669856d9a441324d66ee3477c4d69a04e0a00ce | [
"MIT"
] | 51 | 2016-11-25T14:53:55.000Z | 2018-01-16T09:58:56.000Z | # -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, datetime
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import FRI, SUN
from holidays.constants import (
JAN,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.holiday_base import HolidayBase
class SouthAfrica(HolidayBase):
country = "ZA"
def __init__(self, **kwargs):
# http://www.gov.za/about-sa/public-holidays
# https://en.wikipedia.org/wiki/Public_holidays_in_South_Africa
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# Observed since 1910, with a few name changes
if year > 1909:
self[date(year, 1, 1)] = "New Year's Day"
e = easter(year)
good_friday = e - rd(days=2)
easter_monday = e + rd(days=1)
self[good_friday] = "Good Friday"
if year > 1979:
self[easter_monday] = "Family Day"
else:
self[easter_monday] = "Easter Monday"
if 1909 < year < 1952:
dec_16_name = "Dingaan's Day"
elif 1951 < year < 1980:
dec_16_name = "Day of the Covenant"
elif 1979 < year < 1995:
dec_16_name = "Day of the Vow"
else:
dec_16_name = "Day of Reconciliation"
self[date(year, DEC, 16)] = dec_16_name
self[date(year, DEC, 25)] = "Christmas Day"
if year > 1979:
dec_26_name = "Day of Goodwill"
else:
dec_26_name = "Boxing Day"
self[date(year, 12, 26)] = dec_26_name
# Observed since 1995/1/1
if year > 1994:
self[date(year, MAR, 21)] = "Human Rights Day"
self[date(year, APR, 27)] = "Freedom Day"
self[date(year, MAY, 1)] = "Workers' Day"
self[date(year, JUN, 16)] = "Youth Day"
self[date(year, AUG, 9)] = "National Women's Day"
self[date(year, SEP, 24)] = "Heritage Day"
# Once-off public holidays
national_election = "National and provincial government elections"
y2k = "Y2K changeover"
local_election = "Local government elections"
presidential = "By presidential decree"
municipal_election = "Municipal elections"
if year == 1999:
self[date(1999, JUN, 2)] = national_election
self[date(1999, DEC, 31)] = y2k
if year == 2000:
self[date(2000, JAN, 2)] = y2k
if year == 2004:
self[date(2004, APR, 14)] = national_election
if year == 2006:
self[date(2006, MAR, 1)] = local_election
if year == 2008:
self[date(2008, MAY, 2)] = presidential
if year == 2009:
self[date(2009, APR, 22)] = national_election
if year == 2011:
self[date(2011, MAY, 18)] = local_election
self[date(2011, DEC, 27)] = presidential
if year == 2014:
self[date(2014, MAY, 7)] = national_election
if year == 2016:
self[date(2016, AUG, 3)] = local_election
if year == 2019:
self[date(2019, MAY, 8)] = national_election
if year == 2021:
self[date(2021, NOV, 1)] = municipal_election
# As of 1995/1/1, whenever a public holiday falls on a Sunday,
# it rolls over to the following Monday
for k, v in list(self.items()):
if (
self.observed
and year > 1994
and k.weekday() == SUN
and k.year == year
):
add_days = 1
while self.get(k + rd(days=add_days)) is not None:
add_days += 1
self[k + rd(days=add_days)] = v + " (Observed)"
# Historic public holidays no longer observed
if 1951 < year < 1974:
self[date(year, APR, 6)] = "Van Riebeeck's Day"
elif 1979 < year < 1995:
self[date(year, APR, 6)] = "Founder's Day"
if 1986 < year < 1990:
historic_workers_day = datetime(year, MAY, 1)
# observed on first Friday in May
while historic_workers_day.weekday() != FRI:
historic_workers_day += rd(days=1)
self[historic_workers_day] = "Workers' Day"
if 1909 < year < 1994:
ascension_day = e + rd(days=40)
self[ascension_day] = "Ascension Day"
if 1909 < year < 1952:
self[date(year, MAY, 24)] = "Empire Day"
if 1909 < year < 1961:
self[date(year, MAY, 31)] = "Union Day"
elif 1960 < year < 1994:
self[date(year, MAY, 31)] = "Republic Day"
if 1951 < year < 1961:
queens_birthday = datetime(year, JUN, 7)
# observed on second Monday in June
while queens_birthday.weekday() != 0:
queens_birthday += rd(days=1)
self[queens_birthday] = "Queen's Birthday"
if 1960 < year < 1974:
self[date(year, JUL, 10)] = "Family Day"
if 1909 < year < 1952:
kings_birthday = datetime(year, AUG, 1)
# observed on first Monday in August
while kings_birthday.weekday() != 0:
kings_birthday += rd(days=1)
self[kings_birthday] = "King's Birthday"
if 1951 < year < 1980:
settlers_day = datetime(year, SEP, 1)
while settlers_day.weekday() != 0:
settlers_day += rd(days=1)
self[settlers_day] = "Settlers' Day"
if 1951 < year < 1994:
self[date(year, OCT, 10)] = "Kruger Day"
class ZA(SouthAfrica):
pass
class ZAF(SouthAfrica):
pass
| 33.026042 | 78 | 0.543763 |
from datetime import date, datetime
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import FRI, SUN
from holidays.constants import (
JAN,
MAR,
APR,
MAY,
JUN,
JUL,
AUG,
SEP,
OCT,
NOV,
DEC,
)
from holidays.holiday_base import HolidayBase
class SouthAfrica(HolidayBase):
country = "ZA"
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
if year > 1909:
self[date(year, 1, 1)] = "New Year's Day"
e = easter(year)
good_friday = e - rd(days=2)
easter_monday = e + rd(days=1)
self[good_friday] = "Good Friday"
if year > 1979:
self[easter_monday] = "Family Day"
else:
self[easter_monday] = "Easter Monday"
if 1909 < year < 1952:
dec_16_name = "Dingaan's Day"
elif 1951 < year < 1980:
dec_16_name = "Day of the Covenant"
elif 1979 < year < 1995:
dec_16_name = "Day of the Vow"
else:
dec_16_name = "Day of Reconciliation"
self[date(year, DEC, 16)] = dec_16_name
self[date(year, DEC, 25)] = "Christmas Day"
if year > 1979:
dec_26_name = "Day of Goodwill"
else:
dec_26_name = "Boxing Day"
self[date(year, 12, 26)] = dec_26_name
if year > 1994:
self[date(year, MAR, 21)] = "Human Rights Day"
self[date(year, APR, 27)] = "Freedom Day"
self[date(year, MAY, 1)] = "Workers' Day"
self[date(year, JUN, 16)] = "Youth Day"
self[date(year, AUG, 9)] = "National Women's Day"
self[date(year, SEP, 24)] = "Heritage Day"
national_election = "National and provincial government elections"
y2k = "Y2K changeover"
local_election = "Local government elections"
presidential = "By presidential decree"
municipal_election = "Municipal elections"
if year == 1999:
self[date(1999, JUN, 2)] = national_election
self[date(1999, DEC, 31)] = y2k
if year == 2000:
self[date(2000, JAN, 2)] = y2k
if year == 2004:
self[date(2004, APR, 14)] = national_election
if year == 2006:
self[date(2006, MAR, 1)] = local_election
if year == 2008:
self[date(2008, MAY, 2)] = presidential
if year == 2009:
self[date(2009, APR, 22)] = national_election
if year == 2011:
self[date(2011, MAY, 18)] = local_election
self[date(2011, DEC, 27)] = presidential
if year == 2014:
self[date(2014, MAY, 7)] = national_election
if year == 2016:
self[date(2016, AUG, 3)] = local_election
if year == 2019:
self[date(2019, MAY, 8)] = national_election
if year == 2021:
self[date(2021, NOV, 1)] = municipal_election
for k, v in list(self.items()):
if (
self.observed
and year > 1994
and k.weekday() == SUN
and k.year == year
):
add_days = 1
while self.get(k + rd(days=add_days)) is not None:
add_days += 1
self[k + rd(days=add_days)] = v + " (Observed)"
if 1951 < year < 1974:
self[date(year, APR, 6)] = "Van Riebeeck's Day"
elif 1979 < year < 1995:
self[date(year, APR, 6)] = "Founder's Day"
if 1986 < year < 1990:
historic_workers_day = datetime(year, MAY, 1)
while historic_workers_day.weekday() != FRI:
historic_workers_day += rd(days=1)
self[historic_workers_day] = "Workers' Day"
if 1909 < year < 1994:
ascension_day = e + rd(days=40)
self[ascension_day] = "Ascension Day"
if 1909 < year < 1952:
self[date(year, MAY, 24)] = "Empire Day"
if 1909 < year < 1961:
self[date(year, MAY, 31)] = "Union Day"
elif 1960 < year < 1994:
self[date(year, MAY, 31)] = "Republic Day"
if 1951 < year < 1961:
queens_birthday = datetime(year, JUN, 7)
# observed on second Monday in June
while queens_birthday.weekday() != 0:
queens_birthday += rd(days=1)
self[queens_birthday] = "Queen's Birthday"
if 1960 < year < 1974:
self[date(year, JUL, 10)] = "Family Day"
if 1909 < year < 1952:
kings_birthday = datetime(year, AUG, 1)
while kings_birthday.weekday() != 0:
kings_birthday += rd(days=1)
self[kings_birthday] = "King's Birthday"
if 1951 < year < 1980:
settlers_day = datetime(year, SEP, 1)
while settlers_day.weekday() != 0:
settlers_day += rd(days=1)
self[settlers_day] = "Settlers' Day"
if 1951 < year < 1994:
self[date(year, OCT, 10)] = "Kruger Day"
class ZA(SouthAfrica):
pass
class ZAF(SouthAfrica):
pass
| true | true |
f71b157abd84e0fcedf83134af5ef10acd81bdb3 | 1,972 | py | Python | tests/test_settings.py | Gilnaa/Hydra | 4d24863819bdcdd7c757e2dfb8a8996b009521b6 | [
"MIT"
] | 5 | 2019-07-11T09:24:29.000Z | 2020-10-07T08:11:29.000Z | tests/test_settings.py | Gilnaa/Hydras | 4d24863819bdcdd7c757e2dfb8a8996b009521b6 | [
"MIT"
] | 3 | 2019-11-05T11:33:30.000Z | 2020-08-20T12:15:29.000Z | tests/test_settings.py | Gilnaa/Hydra | 4d24863819bdcdd7c757e2dfb8a8996b009521b6 | [
"MIT"
] | 2 | 2018-12-17T12:56:53.000Z | 2018-12-24T14:09:50.000Z | #!/usr/bin/env python
from .utils import *
# This struct's endianness is of the "target"
class TargetStruct(Struct):
a = u16(0xAABB)
# while this struct's endianness is always big.
class SpecificStruct(Struct):
a = u16_be(0xAABB)
class SettingsTests(HydrasTestCase):
def test_priority(self):
s = SpecificStruct()
h = TargetStruct()
# 1. Global - Make sure that the serialized struct reacts to the global settings.
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(h.serialize(), b'\xBB\xAA')
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(h.serialize(), b'\xAA\xBB')
# 2. Serialization-settings - Make sure that the struct uses the overriden endianness
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(h.serialize(HydraSettings(target_endian=Endianness.BIG)), b'\xAA\xBB')
self.assertEqual(h, TargetStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.BIG)))
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(h, TargetStruct.deserialize(b'\xBB\xAA', HydraSettings(target_endian=Endianness.LITTLE)))
# 3. Field-settings - Make sure that the BE fields ignore any settings
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(s.serialize(), b'\xAA\xBB')
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(s.serialize(), b'\xAA\xBB')
self.assertEqual(s.serialize(HydraSettings(target_endian=Endianness.BIG)), b'\xAA\xBB')
self.assertEqual(s.serialize(HydraSettings(target_endian=Endianness.LITTLE)), b'\xAA\xBB')
self.assertEqual(SpecificStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.BIG)), s)
self.assertEqual(SpecificStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.LITTLE)), s)
if __name__ == '__main__':
unittest.main() | 40.244898 | 116 | 0.712475 |
from .utils import *
class TargetStruct(Struct):
a = u16(0xAABB)
# while this struct's endianness is always big.
class SpecificStruct(Struct):
a = u16_be(0xAABB)
class SettingsTests(HydrasTestCase):
def test_priority(self):
s = SpecificStruct()
h = TargetStruct()
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(h.serialize(), b'\xBB\xAA')
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(h.serialize(), b'\xAA\xBB')
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(h.serialize(HydraSettings(target_endian=Endianness.BIG)), b'\xAA\xBB')
self.assertEqual(h, TargetStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.BIG)))
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(h, TargetStruct.deserialize(b'\xBB\xAA', HydraSettings(target_endian=Endianness.LITTLE)))
HydraSettings.target_endian = Endianness.LITTLE
self.assertEqual(s.serialize(), b'\xAA\xBB')
HydraSettings.target_endian = Endianness.BIG
self.assertEqual(s.serialize(), b'\xAA\xBB')
self.assertEqual(s.serialize(HydraSettings(target_endian=Endianness.BIG)), b'\xAA\xBB')
self.assertEqual(s.serialize(HydraSettings(target_endian=Endianness.LITTLE)), b'\xAA\xBB')
self.assertEqual(SpecificStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.BIG)), s)
self.assertEqual(SpecificStruct.deserialize(b'\xAA\xBB', HydraSettings(target_endian=Endianness.LITTLE)), s)
if __name__ == '__main__':
unittest.main() | true | true |
f71b1592752b5eb648bd828db7dbdcaf6507e648 | 2,687 | py | Python | legocollector/inventory/migrations/0001_initial.py | ericziethen/legocollector | 06aa984a5998979e7aa9c59e94a38633d653de55 | [
"MIT"
] | 1 | 2020-12-21T22:23:09.000Z | 2020-12-21T22:23:09.000Z | legocollector/inventory/migrations/0001_initial.py | ericziethen/legocollector | 06aa984a5998979e7aa9c59e94a38633d653de55 | [
"MIT"
] | 150 | 2019-08-28T20:20:01.000Z | 2020-07-12T07:09:05.000Z | legocollector/inventory/migrations/0001_initial.py | ericziethen/legocollector | 06aa984a5998979e7aa9c59e94a38633d653de55 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2019-09-01 00:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Color',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
('rgb', models.CharField(max_length=6)),
('transparent', models.BooleanField()),
],
),
migrations.CreateModel(
name='PartCategory',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Part',
fields=[
('part_num', models.CharField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(max_length=250)),
('width', models.PositiveIntegerField(blank=True, null=True)),
('height', models.PositiveIntegerField(blank=True, null=True)),
('length', models.PositiveIntegerField(blank=True, null=True)),
('stud_count', models.PositiveIntegerField(blank=True, null=True)),
('multi_height', models.BooleanField(blank=True, null=True)),
('uneven_dimensions', models.BooleanField(blank=True, null=True)),
('category_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parts', to='inventory.PartCategory')),
],
),
migrations.CreateModel(
name='UserPart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to='inventory.Color')),
('part_num', models.ForeignKey(db_column='part_num_id', on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to='inventory.Part')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user_id', 'part_num', 'color')},
},
),
]
| 44.783333 | 166 | 0.596576 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Color',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
('rgb', models.CharField(max_length=6)),
('transparent', models.BooleanField()),
],
),
migrations.CreateModel(
name='PartCategory',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Part',
fields=[
('part_num', models.CharField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(max_length=250)),
('width', models.PositiveIntegerField(blank=True, null=True)),
('height', models.PositiveIntegerField(blank=True, null=True)),
('length', models.PositiveIntegerField(blank=True, null=True)),
('stud_count', models.PositiveIntegerField(blank=True, null=True)),
('multi_height', models.BooleanField(blank=True, null=True)),
('uneven_dimensions', models.BooleanField(blank=True, null=True)),
('category_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parts', to='inventory.PartCategory')),
],
),
migrations.CreateModel(
name='UserPart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to='inventory.Color')),
('part_num', models.ForeignKey(db_column='part_num_id', on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to='inventory.Part')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user_id', 'part_num', 'color')},
},
),
]
| true | true |
f71b1688f1e13f2ccad8ec2d54fddf589eeb3e82 | 831 | py | Python | src/encoded/tests/test_schema_annotation.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 102 | 2015-05-20T01:17:43.000Z | 2022-03-07T06:03:55.000Z | src/encoded/tests/test_schema_annotation.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 901 | 2015-01-07T23:11:57.000Z | 2022-03-18T13:56:12.000Z | src/encoded/tests/test_schema_annotation.py | procha2/encoded | e9f122362b71f3b8641023b8d2d5ad531d3484b7 | [
"MIT"
] | 65 | 2015-02-06T23:00:26.000Z | 2022-01-22T07:58:44.000Z | import pytest
def test_annotation_with_subtype(
testapp,
submitter_testapp,
annotation_dhs,
annotation_ccre_2,
annotation_dataset
):
testapp.patch_json(
annotation_dhs['@id'],
{'annotation_subtype': 'all'},
status=200)
# annotation_subtype can only be submitted with admin permissions
res = testapp.post_json('/annotation', annotation_ccre_2, status=201)
submitter_testapp.patch_json(
res.json['@graph'][0]['@id'],
{'annotation_subtype': 'all'}, status=422)
testapp.patch_json(
res.json['@graph'][0]['@id'],
{'annotation_subtype': 'all'}, status=200)
# annotation_subtype may be submitted for cCRE or rDHS only
testapp.patch_json(
annotation_dataset['@id'],
{'annotation_subtype': 'all'},
status=422)
| 29.678571 | 73 | 0.649819 | import pytest
def test_annotation_with_subtype(
testapp,
submitter_testapp,
annotation_dhs,
annotation_ccre_2,
annotation_dataset
):
testapp.patch_json(
annotation_dhs['@id'],
{'annotation_subtype': 'all'},
status=200)
res = testapp.post_json('/annotation', annotation_ccre_2, status=201)
submitter_testapp.patch_json(
res.json['@graph'][0]['@id'],
{'annotation_subtype': 'all'}, status=422)
testapp.patch_json(
res.json['@graph'][0]['@id'],
{'annotation_subtype': 'all'}, status=200)
testapp.patch_json(
annotation_dataset['@id'],
{'annotation_subtype': 'all'},
status=422)
| true | true |
f71b16a8b249c28b8c45d7997b87d3e69e8d654b | 592 | py | Python | server/shserver/Token.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | 1 | 2017-05-02T10:02:28.000Z | 2017-05-02T10:02:28.000Z | server/shserver/Token.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | null | null | null | server/shserver/Token.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#-*- coding:utf-8 -*-
"""
Author: AsherYang
Email: 1181830457@qq.com
Date: 2017/7/24
"""
class Token():
@property
def access_token(self):
return self.access_token
@property
def access_token(self, value):
self.access_token = value
@property
def expire_in(self):
return self.expire_in
@property
def expire_in(self, value):
self.expire_in = value
@property
def update_time(self):
return self.update_time
@property
def update_time(self, value):
self.update_time = value
| 16.444444 | 34 | 0.621622 |
class Token():
@property
def access_token(self):
return self.access_token
@property
def access_token(self, value):
self.access_token = value
@property
def expire_in(self):
return self.expire_in
@property
def expire_in(self, value):
self.expire_in = value
@property
def update_time(self):
return self.update_time
@property
def update_time(self, value):
self.update_time = value
| true | true |
f71b16c4ec2d0b67810f480a086abbd14c87ad42 | 3,454 | py | Python | tempest/api/image/v2/test_images_metadefs_namespaces.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | 1 | 2020-01-14T03:20:44.000Z | 2020-01-14T03:20:44.000Z | tempest/api/image/v2/test_images_metadefs_namespaces.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | 1 | 2019-08-08T10:36:44.000Z | 2019-08-09T05:58:23.000Z | tempest/api/image/v2/test_images_metadefs_namespaces.py | mail2nsrajesh/tempest | 1a3b3dc50b418d3a15839830d7d1ff88c8c76cff | [
"Apache-2.0"
] | 5 | 2016-06-24T20:03:52.000Z | 2020-02-05T10:14:54.000Z | # Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.image import base
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class MetadataNamespacesTest(base.BaseV2ImageTest):
"""Test the Metadata definition Namespaces basic functionality"""
@decorators.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768')
def test_basic_metadata_definition_namespaces(self):
# get the available resource types and use one resource_type
body = self.resource_types_client.list_resource_types()
resource_name = body['resource_types'][0]['name']
name = [{'name': resource_name}]
namespace_name = data_utils.rand_name('namespace')
# create the metadef namespace
body = self.namespaces_client.create_namespace(
namespace=namespace_name,
visibility='public',
description='Tempest',
display_name=namespace_name,
resource_type_associations=name,
protected=True)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self._cleanup_namespace, namespace_name)
# list namespaces
bodys = self.namespaces_client.list_namespaces()['namespaces']
body = [namespace['namespace'] for namespace in bodys]
self.assertIn(namespace_name, body)
# get namespace details
body = self.namespaces_client.show_namespace(namespace_name)
self.assertEqual(namespace_name, body['namespace'])
self.assertEqual('public', body['visibility'])
# unable to delete protected namespace
self.assertRaises(lib_exc.Forbidden,
self.namespaces_client.delete_namespace,
namespace_name)
# update the visibility to private and protected to False
body = self.namespaces_client.update_namespace(
namespace=namespace_name,
description='Tempest',
visibility='private',
display_name=namespace_name,
protected=False)
self.assertEqual('private', body['visibility'])
self.assertEqual(False, body['protected'])
# now able to delete the non-protected namespace
self.namespaces_client.delete_namespace(namespace_name)
def _cleanup_namespace(self, namespace_name):
body = self.namespaces_client.show_namespace(namespace_name)
self.assertEqual(namespace_name, body['namespace'])
body = self.namespaces_client.update_namespace(
namespace=namespace_name,
description='Tempest',
visibility='private',
display_name=namespace_name,
protected=False)
self.namespaces_client.delete_namespace(namespace_name)
| 44.857143 | 78 | 0.689925 |
from tempest.api.image import base
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class MetadataNamespacesTest(base.BaseV2ImageTest):
@decorators.idempotent_id('319b765e-7f3d-4b3d-8b37-3ca3876ee768')
def test_basic_metadata_definition_namespaces(self):
body = self.resource_types_client.list_resource_types()
resource_name = body['resource_types'][0]['name']
name = [{'name': resource_name}]
namespace_name = data_utils.rand_name('namespace')
body = self.namespaces_client.create_namespace(
namespace=namespace_name,
visibility='public',
description='Tempest',
display_name=namespace_name,
resource_type_associations=name,
protected=True)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self._cleanup_namespace, namespace_name)
bodys = self.namespaces_client.list_namespaces()['namespaces']
body = [namespace['namespace'] for namespace in bodys]
self.assertIn(namespace_name, body)
body = self.namespaces_client.show_namespace(namespace_name)
self.assertEqual(namespace_name, body['namespace'])
self.assertEqual('public', body['visibility'])
self.assertRaises(lib_exc.Forbidden,
self.namespaces_client.delete_namespace,
namespace_name)
body = self.namespaces_client.update_namespace(
namespace=namespace_name,
description='Tempest',
visibility='private',
display_name=namespace_name,
protected=False)
self.assertEqual('private', body['visibility'])
self.assertEqual(False, body['protected'])
self.namespaces_client.delete_namespace(namespace_name)
def _cleanup_namespace(self, namespace_name):
body = self.namespaces_client.show_namespace(namespace_name)
self.assertEqual(namespace_name, body['namespace'])
body = self.namespaces_client.update_namespace(
namespace=namespace_name,
description='Tempest',
visibility='private',
display_name=namespace_name,
protected=False)
self.namespaces_client.delete_namespace(namespace_name)
| true | true |
f71b16cbd42077bfe153963de61a031de29f4b72 | 49,097 | py | Python | teuthology/task/kernel.py | sunilangadi2/teuthology | d19730ce070d52d0dd5e71443f02a8d1b7912493 | [
"MIT"
] | null | null | null | teuthology/task/kernel.py | sunilangadi2/teuthology | d19730ce070d52d0dd5e71443f02a8d1b7912493 | [
"MIT"
] | null | null | null | teuthology/task/kernel.py | sunilangadi2/teuthology | d19730ce070d52d0dd5e71443f02a8d1b7912493 | [
"MIT"
] | null | null | null | """
Kernel installation task
"""
import logging
import os
import re
import shlex
from io import StringIO
from teuthology.util.compat import urljoin
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
from teuthology.exceptions import (
UnsupportedPackageTypeError,
ConfigError,
VersionNotFoundError,
)
from teuthology.packaging import (
install_package,
get_koji_build_info,
get_kojiroot_base_url,
get_koji_package_name,
get_koji_task_rpm_info,
get_koji_task_result,
get_builder_project,
)
log = logging.getLogger(__name__)
CONFIG_DEFAULT = {'branch': 'master'}
TIMEOUT_DEFAULT = 300
VERSION_KEYS = ['branch', 'tag', 'sha1', 'deb', 'rpm', 'koji', 'koji_task']
def normalize_config(ctx, config):
"""
Returns a config whose keys are all real roles.
Generic roles (client, mon, osd, etc.) are replaced with
the actual roles (client.0, client.1, etc.). If the config
specifies a different version for a specific role, this is
unchanged.
For example, with 4 OSDs this::
osd:
tag: v3.0
kdb: true
osd.1:
branch: new_btrfs
kdb: false
osd.3:
deb: /path/to/linux-whatever.deb
is transformed into::
osd.0:
tag: v3.0
kdb: true
osd.1:
branch: new_btrfs
kdb: false
osd.2:
tag: v3.0
kdb: true
osd.3:
deb: /path/to/linux-whatever.deb
If config is None or just specifies a version to use,
it is applied to all nodes.
:param ctx: Context
:param config: Configuration
"""
if not config or \
len([x for x in config.keys() if x in
VERSION_KEYS + ['kdb', 'flavor']]) == len(config.keys()):
new_config = {}
if not config:
config = CONFIG_DEFAULT
for role in teuthology.all_roles(ctx.cluster):
new_config[role] = config.copy()
return new_config
new_config = {}
for role, role_config in config.items():
if role_config is None:
role_config = CONFIG_DEFAULT
if '.' in role:
new_config[role] = role_config.copy()
else:
for id_ in teuthology.all_roles_of_type(ctx.cluster, role):
name = '{type}.{id}'.format(type=role, id=id_)
# specific overrides generic
if name not in config:
new_config[name] = role_config.copy()
return new_config
def normalize_and_apply_overrides(ctx, config, overrides):
"""
kernel task config is hierarchical and needs to be transformed into
a normal form, see normalize_config() for details. Applying overrides is
also more involved compared to other tasks because of the number of ways
a version of the kernel to install can be specified.
Returns a (normalized config, timeout) tuple.
:param ctx: Context
:param config: Configuration
"""
timeout = TIMEOUT_DEFAULT
if 'timeout' in config:
timeout = config.pop('timeout')
config = normalize_config(ctx, config)
log.debug('normalized config %s' % config)
if 'timeout' in overrides:
timeout = overrides.pop('timeout')
if overrides:
overrides = normalize_config(ctx, overrides)
log.debug('normalized overrides %s' % overrides)
# Handle a case when a version specified with one type of version key
# is overridden by a version specified with another type of version key
# (e.g. 'branch: foo' is overridden with 'tag: bar'). To be able to
# use deep_merge(), drop all version keys from the original config if
# the corresponding override has a version key.
for role, role_config in config.items():
if (role in overrides and
any(k in overrides[role] for k in VERSION_KEYS)):
for k in VERSION_KEYS:
role_config.pop(k, None)
teuthology.deep_merge(config, overrides)
return (config, timeout)
def validate_config(ctx, config):
"""
Make sure that all kernels in the list of remove kernels
refer to the same kernel.
:param ctx: Context
:param config: Configuration
"""
for _, roles_for_host in ctx.cluster.remotes.items():
kernel = None
for role in roles_for_host:
role_kernel = config.get(role, kernel)
if kernel is None:
kernel = role_kernel
elif role_kernel is not None:
assert kernel == role_kernel, \
"everything on the same host must use the same kernel"
if role in config:
del config[role]
def need_to_install(ctx, role, version):
"""
Check to see if we need to install a kernel. Get the version of the
currently running kernel, and compare it against the value passed in.
:param ctx: Context
:param role: Role
:param version: value to compare against (used in checking), can be either
a utsrelease string (e.g. '3.13.0-rc3-ceph-00049-ge2817b3')
or a sha1.
"""
ret = True
log.info('Checking kernel version of {role}, want "{ver}"...'.format(
role=role, ver=version))
uname_fp = StringIO()
ctx.cluster.only(role).run(
args=[
'uname',
'-r',
],
stdout=uname_fp,
)
cur_version = uname_fp.getvalue().rstrip('\n')
log.debug('current kernel version is {ver} vs {want}'.format(ver=cur_version,
want=version))
if '.' in str(version):
# version is utsrelease, yay
if cur_version == version:
log.debug('utsrelease strings match, do not need to install')
ret = False
else:
# version is sha1, need to try to extract sha1 from cur_version
match = re.search('[-_]g([0-9a-f]{6,40})', cur_version)
if match:
cur_sha1 = match.group(1)
log.debug('extracting sha1, {ver} -> {sha1}'.format(
ver=cur_version, sha1=cur_sha1))
m = min(len(cur_sha1), len(version))
assert m >= 6, "cur_sha1 and/or version is too short, m = %d" % m
if cur_sha1[0:m] == version[0:m]:
log.debug('extracted sha1 matches, do not need to install')
ret = False
else:
log.debug('failed to parse current kernel version')
uname_fp.close()
return ret
def install_firmware(ctx, config):
"""
Go to the github to get the latest firmware.
:param ctx: Context
:param config: Configuration
"""
linux_firmware_git_upstream = 'git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git'
uri = teuth_config.linux_firmware_git_url or linux_firmware_git_upstream
fw_dir = '/lib/firmware/updates'
for role in config.keys():
if isinstance(config[role], str) and config[role].find('distro') >= 0:
log.info('Skipping firmware on distro kernel');
return
(role_remote,) = ctx.cluster.only(role).remotes.keys()
package_type = role_remote.os.package_type
if package_type == 'rpm':
role_remote.run(args=[
'sudo', 'yum', 'upgrade', '-y', 'linux-firmware',
])
continue
log.info('Installing linux-firmware on {role}...'.format(role=role))
role_remote.run(
args=[
# kludge around mysterious 0-byte .git/HEAD files
'cd', fw_dir,
run.Raw('&&'),
'test', '-d', '.git',
run.Raw('&&'),
'test', '!', '-s', '.git/HEAD',
run.Raw('&&'),
'sudo', 'rm', '-rf', '.git',
run.Raw(';'),
# init
'sudo', 'install', '-d', '-m0755', fw_dir,
run.Raw('&&'),
'cd', fw_dir,
run.Raw('&&'),
'sudo', 'git', 'init',
],
)
role_remote.run(
args=[
'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'config',
'--get', 'remote.origin.url', run.Raw('>/dev/null'),
run.Raw('||'),
'sudo', 'git', '--git-dir=%s/.git' % fw_dir,
'remote', 'add', 'origin', uri,
],
)
# In case the remote already existed, set its url
role_remote.run(
args=[
'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'remote',
'set-url', 'origin', uri, run.Raw('>/dev/null')
]
)
role_remote.run(
args=[
'cd', fw_dir,
run.Raw('&&'),
'sudo', 'git', 'fetch', 'origin',
run.Raw('&&'),
'sudo', 'git', 'reset', '--hard', 'origin/master'
],
)
def gitbuilder_pkg_name(remote):
if remote.os.package_type == 'rpm':
pkg_name = 'kernel.x86_64.rpm'
elif remote.os.package_type == 'deb':
pkg_name = 'linux-image.deb'
else:
raise UnsupportedPackageTypeError(remote)
return pkg_name
def remote_pkg_path(remote):
"""
This is where kernel packages are copied over (in case of local
packages) or downloaded to (in case of gitbuilder packages) and
then installed from.
"""
return os.path.join('/tmp', gitbuilder_pkg_name(remote))
def download_kernel(ctx, config):
"""
Supply each remote with a kernel package:
- local kernels are copied over
- gitbuilder kernels are downloaded
- nothing is done for distro kernels
:param ctx: Context
:param config: Configuration
"""
procs = {}
for role, src in config.items():
needs_download = False
if src == 'distro':
# don't need to download distro kernels
log.debug("src is distro, skipping download");
continue
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if isinstance(src, dict):
# we're downloading a kernel from koji, the src dict here
# is the build_info retrieved from koji using get_koji_build_info
if src.get("id"):
build_id = src["id"]
log.info("Downloading kernel with build_id {build_id} on {role}...".format(
build_id=build_id,
role=role
))
needs_download = True
baseurl = get_kojiroot_base_url(src)
pkg_name = get_koji_package_name("kernel", src)
elif src.get("task_id"):
needs_download = True
log.info("Downloading kernel with task_id {task_id} on {role}...".format(
task_id=src["task_id"],
role=role
))
baseurl = src["base_url"]
# this var is also poorly named as it's not the package name,
# but the full name of the rpm file to download.
pkg_name = src["rpm_name"]
elif src.find('/') >= 0:
# local package - src is path
log.info('Copying kernel package {path} to {role}...'.format(
path=src, role=role))
role_remote.put_file(src,remote_pkg_path(role_remote))
else:
# gitbuilder package - src is sha1
log.info('Downloading kernel {sha1} on {role}...'.format(
sha1=src,
role=role,
))
needs_download = True
builder = get_builder_project()(
'kernel',
{'sha1': src},
ctx=ctx,
remote=role_remote,
)
if teuth_config.use_shaman:
if role_remote.os.package_type == 'rpm':
arch = builder.arch
baseurl = urljoin(
builder.base_url,
'/'.join([arch, ''])
)
pkg_name = "kernel-%s.%s.rpm" % (
builder.version,
arch,
)
elif role_remote.os.package_type == 'deb':
arch = 'amd64' # FIXME
baseurl = urljoin(
builder.base_url,
'/'.join([
'pool', 'main', 'l',
'linux-%s' % builder.scm_version, ''
])
)
pkg_name = 'linux-image-%s_%s_%s.deb' % (
builder.scm_version,
builder.version,
arch,
)
else:
baseurl = builder.base_url + "/"
pkg_name = gitbuilder_pkg_name(role_remote)
log.info("fetching, builder baseurl is %s", baseurl)
if needs_download:
proc = role_remote.run(
args=[
'rm', '-f', remote_pkg_path(role_remote),
run.Raw('&&'),
'echo',
pkg_name,
run.Raw('|'),
'wget',
'-nv',
'-O',
remote_pkg_path(role_remote),
'--base={url}'.format(url=baseurl),
'--input-file=-',
],
wait=False)
procs[role_remote.name] = proc
for name, proc in procs.items():
log.debug('Waiting for download/copy to %s to complete...', name)
proc.wait()
def _no_grub_link(in_file, remote, kernel_ver):
"""
Copy and link kernel related files if grub cannot be used
(as is the case in Arm kernels)
:param infile: kernel file or image file to be copied.
:param remote: remote machine
:param kernel_ver: kernel version
"""
boot1 = '/boot/%s' % in_file
boot2 = '%s.old' % boot1
remote.run(
args=[
'if', 'test', '-e', boot1, run.Raw(';'), 'then',
'sudo', 'mv', boot1, boot2, run.Raw(';'), 'fi',],
)
remote.run(
args=['sudo', 'ln', '-s', '%s-%s' % (in_file, kernel_ver) , boot1, ],
)
def install_latest_rh_kernel(ctx, config):
"""
Installs the lastest z stream kernel
Reboot for the new kernel to take effect
"""
if config is None:
config = {}
if config.get('skip'):
return
with parallel() as p:
for remote in ctx.cluster.remotes.keys():
p.spawn(update_rh_kernel, remote)
def update_rh_kernel(remote):
package_type = remote.os.package_type
remote.run(args=['uname', '-a'])
import time
if package_type == 'rpm':
update_log = remote.sh('sudo yum update -y kernel')
log.info(update_log)
if not update_log.find("Installed") == -1:
log.info("Kernel updated to latest z stream on %s", remote.shortname)
log.info("Rebooting %s", remote.shortname)
remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False)
time.sleep(40)
log.info("Reconnecting after reboot")
remote.reconnect(timeout=300)
remote.run(args=['uname', '-a'])
elif not update_log.find('No packages marked for update') == -1:
log.info("Latest version already installed on %s", remote.shortname)
def install_and_reboot(ctx, config):
"""
Install and reboot the kernel. This mostly performs remote
installation operations. The code does check for Arm images
and skips grub operations if the kernel is Arm. Otherwise, it
extracts kernel titles from submenu entries and makes the appropriate
grub calls. The assumptions here are somewhat simplified in that
it expects kernel entries to be present under submenu entries.
:param ctx: Context
:param config: Configuration
"""
procs = {}
kernel_title = ''
for role, src in config.items():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if isinstance(src, str) and src.find('distro') >= 0:
log.info('Installing distro kernel on {role}...'.format(role=role))
install_kernel(role_remote, version=src)
continue
log.info('Installing kernel {src} on {role}...'.format(src=src,
role=role))
package_type = role_remote.os.package_type
if package_type == 'rpm':
proc = role_remote.run(
args=[
'sudo',
'rpm',
'-ivh',
'--oldpackage',
'--replacefiles',
'--replacepkgs',
remote_pkg_path(role_remote),
])
install_kernel(role_remote, remote_pkg_path(role_remote))
continue
# TODO: Refactor this into install_kernel() so that it handles all
# cases for both rpm and deb packages.
proc = role_remote.run(
args=[
# install the kernel deb
'sudo',
'dpkg',
'-i',
remote_pkg_path(role_remote),
],
)
# collect kernel image name from the .deb
kernel_title = get_image_version(role_remote,
remote_pkg_path(role_remote))
log.info('searching for kernel {}'.format(kernel_title))
if kernel_title.endswith("-highbank"):
_no_grub_link('vmlinuz', role_remote, kernel_title)
_no_grub_link('initrd.img', role_remote, kernel_title)
proc = role_remote.run(
args=[
'sudo',
'shutdown',
'-r',
'now',
],
wait=False,
)
procs[role_remote.name] = proc
continue
# look for menuentry for our kernel, and collect any
# submenu entries for their titles. Assume that if our
# kernel entry appears later in the file than a submenu entry,
# it's actually nested under that submenu. If it gets more
# complex this will totally break.
kernel_entries = role_remote.sh([
'egrep',
'(submenu|menuentry.*' + kernel_title + ').*{',
'/boot/grub/grub.cfg'
]).split('\n')
submenu_title = ''
default_title = ''
for l in kernel_entries:
fields = shlex.split(l)
if len(fields) >= 2:
command, title = fields[:2]
if command == 'submenu':
submenu_title = title + '>'
if command == 'menuentry':
if title.endswith(kernel_title):
default_title = title
break
log.info('submenu_title:{}'.format(submenu_title))
log.info('default_title:{}'.format(default_title))
proc = role_remote.run(
args=[
# use the title(s) to construct the content of
# the grub menu entry, so we can default to it.
'/bin/echo',
'-e',
r'cat <<EOF\nset default="' + submenu_title + \
default_title + r'"\nEOF\n',
# make it look like an emacs backup file so
# unfortunately timed update-grub runs don't pick it
# up yet; use sudo tee so we are able to write to /etc
run.Raw('|'),
'sudo',
'tee',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
run.Raw('>/dev/null'),
run.Raw('&&'),
'sudo',
'chmod',
'a+x',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
run.Raw('&&'),
'sudo',
'mv',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
'/etc/grub.d/01_ceph_kernel',
# update grub again so it accepts our default
run.Raw('&&'),
'sudo',
'update-grub',
run.Raw('&&'),
'rm',
remote_pkg_path(role_remote),
run.Raw('&&'),
# work around a systemd issue, where network gets shut down
# before ssh can close its session
run.Raw('('),
'sleep',
'1',
run.Raw('&&'),
'sudo',
'shutdown',
'-r',
'now',
run.Raw('&'),
run.Raw(')'),
],
wait=False,
)
procs[role_remote.name] = proc
for name, proc in procs.items():
log.debug('Waiting for install on %s to complete...', name)
proc.wait()
def enable_disable_kdb(ctx, config):
"""
Enable kdb on remote machines in use. Disable on those that are
not in use.
:param ctx: Context
:param config: Configuration
"""
for role, enable in config.items():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if "mira" in role_remote.name:
serialdev = "ttyS2"
else:
serialdev = "ttyS1"
if enable:
log.info('Enabling kdb on {role}...'.format(role=role))
try:
role_remote.run(
args=[
'echo', serialdev,
run.Raw('|'),
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc'
])
except run.CommandFailedError:
log.warn('Kernel does not support kdb')
else:
log.info('Disabling kdb on {role}...'.format(role=role))
# Add true pipe so command doesn't fail on kernel without kdb support.
try:
role_remote.run(
args=[
'echo', '',
run.Raw('|'),
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc',
run.Raw('|'),
'true',
])
except run.CommandFailedError:
log.warn('Kernel does not support kdb')
def wait_for_reboot(ctx, need_install, timeout, distro=False):
"""
Loop reconnecting and checking kernel versions until
they're all correct or the timeout is exceeded.
:param ctx: Context
:param need_install: list of packages that we need to reinstall.
:param timeout: number of second before we timeout.
"""
import time
# do not try to reconnect immediately after triggering the reboot,
# because the reboot sequence might not have started yet (!) --
# see https://tracker.ceph.com/issues/44187
time.sleep(30)
starttime = time.time()
while need_install:
teuthology.reconnect(ctx, timeout)
for client in list(need_install.keys()):
if 'distro' in str(need_install[client]):
distro = True
log.info('Checking client {client} for new kernel version...'.format(client=client))
try:
if distro:
(remote,) = ctx.cluster.only(client).remotes.keys()
assert not need_to_install_distro(remote), \
'failed to install new distro kernel version within timeout'
else:
assert not need_to_install(ctx, client, need_install[client]), \
'failed to install new kernel version within timeout'
del need_install[client]
except Exception:
log.exception("Saw exception")
# ignore connection resets and asserts while time is left
if time.time() - starttime > timeout:
raise
time.sleep(1)
def get_version_of_running_kernel(remote):
"""
Get the current running kernel version in a format that can be compared
with the output of "rpm -q kernel..."
"""
dist_release = remote.os.name
uname_r = remote.sh("uname -r").strip()
current = None
if dist_release in ['opensuse', 'sle']:
# "uname -r" returns 4.12.14-lp151.28.36-default
# "rpm -q kernel-default" returns 4.12.14-lp151.28.36.1.x86_64
# In order to be able to meaningfully check whether the former
# is "in" the latter, we have to chop off the "-default".
current = re.sub(r"-default$", "", uname_r)
else:
current = uname_r
return current
def need_to_install_distro(remote):
"""
Installing kernels on rpm won't setup grub/boot into them. This installs
the newest kernel package and checks its version and compares against
the running kernel (uname -r). Similar check for deb.
:returns: False if running the newest distro kernel. Returns the version of
the newest if it is not running.
"""
dist_release = remote.os.name
package_type = remote.os.package_type
current = get_version_of_running_kernel(remote)
log.info("Running kernel on {node}: {version}".format(
node=remote.shortname, version=current))
installed_version = None
if package_type == 'rpm':
if dist_release in ['opensuse', 'sle']:
install_stdout = remote.sh(
'sudo zypper --non-interactive install kernel-default'
)
else:
install_stdout = remote.sh(
'sudo yum install -y kernel'
)
match = re.search(
"Package (.*) already installed",
install_stdout, flags=re.MULTILINE)
if 'Nothing to do' in install_stdout:
installed_version = match.groups()[0] if match else ''
err_mess = StringIO()
err_mess.truncate(0)
remote.run(args=['echo', 'no', run.Raw('|'), 'sudo', 'yum',
'reinstall', 'kernel', run.Raw('||'), 'true'],
stderr=err_mess)
reinstall_stderr = err_mess.getvalue()
err_mess.close()
if 'Skipping the running kernel' in reinstall_stderr:
running_version = re.search(
"Skipping the running kernel: (.*)",
reinstall_stderr, flags=re.MULTILINE).groups()[0]
if installed_version == running_version:
log.info(
'Newest distro kernel already installed and running')
return False
else:
remote.run(args=['sudo', 'yum', 'reinstall', '-y', 'kernel',
run.Raw('||'), 'true'])
newest = get_latest_image_version_rpm(remote)
if package_type == 'deb':
newest = get_latest_image_version_deb(remote, dist_release)
if current in newest or current.replace('-', '_') in newest:
log.info('Newest distro kernel installed and running')
return False
log.info(
'Not newest distro kernel. Current: {cur} Expected: {new}'.format(
cur=current, new=newest))
return newest
def maybe_generate_initrd_rpm(remote, path, version):
"""
Generate initrd with mkinitrd if the hooks that should make it
happen on its own aren't there.
:param path: rpm package path
:param version: kernel version to generate initrd for
e.g. 3.18.0-rc6-ceph-00562-g79a9fa5
"""
out = remote.sh(['rpm', '--scripts', '-qp', path])
if 'bin/installkernel' in out or 'bin/kernel-install' in out:
return
log.info("No installkernel or kernel-install hook in %s, "
"will generate initrd for %s", path, version)
remote.run(
args=[
'sudo',
'mkinitrd',
'--allow-missing',
'-f', # overwrite existing initrd
'/boot/initramfs-' + version + '.img',
version,
])
def install_kernel(remote, path=None, version=None):
"""
A bit of misnomer perhaps - the actual kernel package is installed
elsewhere, this function deals with initrd and grub. Currently the
following cases are handled:
- local, gitbuilder, distro for rpm packages
- distro for deb packages - see TODO in install_and_reboot()
TODO: reboots should be issued from install_and_reboot()
:param path: package path (for local and gitbuilder cases)
:param version: for RPM distro kernels, pass this to update_grub_rpm
"""
dist_release = remote.os.name
templ = "install_kernel(remote={remote}, path={path}, version={version})"
log.debug(templ.format(remote=remote, path=path, version=version))
package_type = remote.os.package_type
if package_type == 'rpm':
if dist_release in ['opensuse', 'sle']:
# FIXME
pass
else:
if path:
version = get_image_version(remote, path)
# This is either a gitbuilder or a local package and both of these
# could have been built with upstream rpm targets with specs that
# don't have a %post section at all, which means no initrd.
maybe_generate_initrd_rpm(remote, path, version)
elif not version or version == 'distro':
version = get_latest_image_version_rpm(remote)
update_grub_rpm(remote, version)
remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
if package_type == 'deb':
newversion = get_latest_image_version_deb(remote, dist_release)
if 'ubuntu' in dist_release:
grub2conf = teuthology.get_file(remote,
'/boot/grub/grub.cfg', sudo=True).decode()
submenu = ''
menuentry = ''
for line in grub2conf.split('\n'):
if 'submenu' in line:
submenu = line.split('submenu ')[1]
# Ubuntu likes to be sneaky and change formatting of
# grub.cfg between quotes/doublequotes between versions
if submenu.startswith("'"):
submenu = submenu.split("'")[1]
if submenu.startswith('"'):
submenu = submenu.split('"')[1]
if 'menuentry' in line:
if newversion in line and 'recovery' not in line:
menuentry = line.split('\'')[1]
break
if submenu:
grubvalue = submenu + '>' + menuentry
else:
grubvalue = menuentry
grubfile = 'cat <<EOF\nset default="' + grubvalue + '"\nEOF'
teuthology.delete_file(remote, '/etc/grub.d/01_ceph_kernel', sudo=True, force=True)
teuthology.sudo_write_file(remote, '/etc/grub.d/01_ceph_kernel', StringIO(grubfile), '755')
log.info('Distro Kernel Version: {version}'.format(version=newversion))
remote.run(args=['sudo', 'update-grub'])
remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
if 'debian' in dist_release:
grub2_kernel_select_generic(remote, newversion, 'deb')
log.info('Distro Kernel Version: {version}'.format(version=newversion))
remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
def update_grub_rpm(remote, newversion):
"""
Updates grub file to boot new kernel version on both legacy grub/grub2.
"""
grub='grub2'
# Check if grub2 is isntalled
try:
remote.run(args=['sudo', 'rpm', '-qi', 'grub2-tools'])
except Exception:
grub = 'legacy'
log.info('Updating Grub Version: {grub}'.format(grub=grub))
if grub == 'legacy':
data = ''
#Write new legacy grub entry.
newgrub = generate_legacy_grub_entry(remote, newversion)
for line in newgrub:
data += line + '\n'
temp_file_path = remote.mktemp()
teuthology.sudo_write_file(remote, temp_file_path, StringIO(data), '755')
teuthology.move_file(remote, temp_file_path, '/boot/grub/grub.conf', True)
else:
#Update grub menu entry to new version.
grub2_kernel_select_generic(remote, newversion, 'rpm')
def grub2_kernel_select_generic(remote, newversion, ostype):
"""
Can be used on DEB and RPM. Sets which entry should be boted by entrynum.
"""
log.info("Updating grub on {node} to boot {version}".format(
node=remote.shortname, version=newversion))
if ostype == 'rpm':
grubset = 'grub2-set-default'
mkconfig = 'grub2-mkconfig'
grubconfig = '/boot/grub2/grub.cfg'
if ostype == 'deb':
grubset = 'grub-set-default'
grubconfig = '/boot/grub/grub.cfg'
mkconfig = 'grub-mkconfig'
remote.run(args=['sudo', mkconfig, '-o', grubconfig, ])
grub2conf = teuthology.get_file(remote, grubconfig, sudo=True).decode()
entry_num = 0
if '\nmenuentry ' not in grub2conf:
# okay, do the newer (el8) grub2 thing
grub2conf = remote.sh('sudo /bin/ls /boot/loader/entries || true')
entry = None
for line in grub2conf.split('\n'):
if line.endswith('.conf') and newversion in line:
entry = line[:-5] # drop .conf suffix
break
else:
# do old menuitem counting thing
for line in grub2conf.split('\n'):
if line.startswith('menuentry '):
if newversion in line:
break
entry_num += 1
entry = str(entry_num)
if entry is None:
log.warning('Unable to update grub2 order')
else:
remote.run(args=['sudo', grubset, entry])
def generate_legacy_grub_entry(remote, newversion):
"""
This will likely need to be used for ceph kernels as well
as legacy grub rpm distros don't have an easy way of selecting
a kernel just via a command. This generates an entry in legacy
grub for a new kernel version using the existing entry as a base.
"""
grubconf = teuthology.get_file(remote,
'/boot/grub/grub.conf', sudo=True).decode()
titleline = ''
rootline = ''
kernelline = ''
initline = ''
kernelversion = ''
linenum = 0
titlelinenum = 0
#Grab first kernel entry (title/root/kernel/init lines)
for line in grubconf.split('\n'):
if re.match('^title', line):
titleline = line
titlelinenum = linenum
if re.match('(^\s+)root', line):
rootline = line
if re.match('(^\s+)kernel', line):
kernelline = line
for word in line.split(' '):
if 'vmlinuz' in word:
kernelversion = word.split('vmlinuz-')[-1]
if re.match('(^\s+)initrd', line):
initline = line
if (kernelline != '') and (initline != ''):
break
else:
linenum += 1
#insert new entry into grubconfnew list:
linenum = 0
newgrubconf = []
for line in grubconf.split('\n'):
line = line.rstrip('\n')
if linenum == titlelinenum:
newtitle = re.sub(kernelversion, newversion, titleline)
newroot = re.sub(kernelversion, newversion, rootline)
newkernel = re.sub(kernelversion, newversion, kernelline)
newinit = re.sub(kernelversion, newversion, initline)
newgrubconf.append(newtitle)
newgrubconf.append(newroot)
newgrubconf.append(newkernel)
newgrubconf.append(newinit)
newgrubconf.append('')
newgrubconf.append(line)
else:
newgrubconf.append(line)
linenum += 1
return newgrubconf
def get_image_version(remote, path):
"""
Get kernel image version from (rpm or deb) package.
:param path: (rpm or deb) package path
"""
if remote.os.package_type == 'rpm':
files = remote.sh(['rpm', '-qlp', path])
elif remote.os.package_type == 'deb':
files = remote.sh(['dpkg-deb', '-c', path])
else:
raise UnsupportedPackageTypeError(remote)
for file in files.split('\n'):
if '/boot/vmlinuz-' in file:
version = file.split('/boot/vmlinuz-')[1]
break
log.debug("get_image_version: %s", version)
return version
def get_latest_image_version_rpm(remote):
"""
Get kernel image version of the newest kernel rpm package.
Used for distro case.
"""
dist_release = remote.os.name
kernel_pkg_name = None
version = None
if dist_release in ['opensuse', 'sle']:
kernel_pkg_name = "kernel-default"
else:
kernel_pkg_name = "kernel"
# get tip of package list ordered by install time
newest_package = remote.sh(
'rpm -q %s --last | head -n 1' % kernel_pkg_name).strip()
for kernel in newest_package.split():
if kernel.startswith('kernel'):
if 'ceph' not in kernel:
if dist_release in ['opensuse', 'sle']:
kernel = kernel.split()[0]
version = kernel.split(str(kernel_pkg_name) + '-')[1]
log.debug("get_latest_image_version_rpm: %s", version)
return version
def get_latest_image_version_deb(remote, ostype):
"""
Get kernel image version of the newest kernel deb package.
Used for distro case.
Round-about way to get the newest kernel uname -r compliant version string
from the virtual package which is the newest kenel for debian/ubuntu.
"""
remote.run(args=['sudo', 'apt-get', 'clean'])
remote.run(args=['sudo', 'apt-get', 'update'])
output = StringIO()
newest = ''
# Depend of virtual package has uname -r output in package name. Grab that.
# Note that a dependency list may have multiple comma-separated entries,
# but also each entry may be an alternative (pkg1 | pkg2)
if 'debian' in ostype:
remote.run(args=['sudo', 'apt-get', '-y', 'install',
'linux-image-amd64'], stdout=output)
remote.run(args=['dpkg', '-s', 'linux-image-amd64'], stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
newest = line.split('linux-image-')[1]
output.close()
return newest
# Ubuntu is a depend in a depend.
if 'ubuntu' in ostype:
try:
remote.run(args=['sudo', 'DEBIAN_FRONTEND=noninteractive',
'apt-get', '-y', 'install',
'linux-image-current-generic'])
remote.run(args=['dpkg', '-s', 'linux-image-current-generic'],
stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
depends = line.split('Depends: ')[1]
remote.run(args=['sudo', 'apt-get', '-y', 'install',
depends])
remote.run(args=['dpkg', '-s', depends], stdout=output)
except run.CommandFailedError:
# Non precise ubuntu machines (like trusty) don't have
# linux-image-current-generic so use linux-image-generic instead.
remote.run(args=['sudo', 'DEBIAN_FRONTEND=noninteractive',
'apt-get', '-y', 'install',
'linux-image-generic'], stdout=output)
remote.run(args=['dpkg', '-s', 'linux-image-generic'],
stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
newest = line.split('linux-image-')[1]
if ',' in newest:
newest = newest.split(',')[0]
if '|' in newest:
# not strictly correct, as any of the |-joined
# packages may satisfy the dependency
newest = newest.split('|')[0].strip()
output.close()
return newest
def get_sha1_from_pkg_name(path):
"""
Get commit hash (min 12 max 40 chars) from (rpm or deb) package name.
Example package names ("make bindeb-pkg" and "make binrpm-pkg"):
linux-image-4.9.0-rc4-ceph-g156db39ecfbd_4.9.0-rc4-ceph-g156db39ecfbd-1_amd64.deb
kernel-4.9.0_rc4_ceph_g156db39ecfbd-2.x86_64.rpm
:param path: (rpm or deb) package path (only basename is used)
"""
basename = os.path.basename(path)
match = re.search('[-_]ceph[-_]g([0-9a-f]{12,40})', basename)
sha1 = match.group(1) if match else None
log.debug("get_sha1_from_pkg_name: %s -> %s -> %s", path, basename, sha1)
return sha1
def task(ctx, config):
"""
Make sure the specified kernel is installed.
This can be a branch, tag, or sha1 of ceph-client.git or a local
kernel package.
To install ceph-client.git branch (default: master)::
kernel:
branch: testing
To install ceph-client.git tag::
kernel:
tag: v3.18
To install ceph-client.git sha1::
kernel:
sha1: 275dd19ea4e84c34f985ba097f9cddb539f54a50
To install from a koji build_id::
kernel:
koji: 416058
To install from a koji task_id::
kernel:
koji_task: 9678206
When installing from koji you also need to set the urls for koji hub
and the koji root in your teuthology.yaml config file. These are shown
below with their default values::
kojihub_url: http://koji.fedoraproject.org/kojihub
kojiroot_url: http://kojipkgs.fedoraproject.org/packages
When installing from a koji task_id you also need to set koji_task_url,
which is the base url used to download rpms from koji task results::
koji_task_url: https://kojipkgs.fedoraproject.org/work/
To install local rpm (target should be an rpm system)::
kernel:
rpm: /path/to/appropriately-named.rpm
To install local deb (target should be a deb system)::
kernel:
deb: /path/to/appropriately-named.deb
For rpm: or deb: to work it should be able to figure out sha1 from
local kernel package basename, see get_sha1_from_pkg_name(). This
means that you can't for example install a local tag - package built
with upstream {rpm,deb}-pkg targets won't have a sha1 in its name.
If you want to schedule a run and use a local kernel package, you
have to copy the package over to a box teuthology workers are
running on and specify a path to the package on that box.
All of the above will install a specified kernel on all targets.
You can specify different kernels for each role or for all roles of
a certain type (more specific roles override less specific, see
normalize_config() for details)::
kernel:
client:
tag: v3.0
osd:
branch: btrfs_fixes
client.1:
branch: more_specific
osd.3:
branch: master
To wait 3 minutes for hosts to reboot (default: 300)::
kernel:
timeout: 180
To enable kdb::
kernel:
kdb: true
:param ctx: Context
:param config: Configuration
"""
if config is None:
config = {}
assert isinstance(config, dict), \
"task kernel only supports a dictionary for configuration"
overrides = ctx.config.get('overrides', {}).get('kernel', {})
config, timeout = normalize_and_apply_overrides(ctx, config, overrides)
validate_config(ctx, config)
log.info('config %s, timeout %d' % (config, timeout))
need_install = {} # sha1 to dl, or path to rpm or deb
need_version = {} # utsrelease or sha1
kdb = {}
for role, role_config in config.items():
# gather information about this remote
(role_remote,) = ctx.cluster.only(role).remotes.keys()
system_type = role_remote.os.name
if role_config.get('rpm') or role_config.get('deb'):
# We only care about path - deb: vs rpm: is meaningless,
# rpm: just happens to be parsed first. Nothing is stopping
# 'deb: /path/to/foo.rpm' and it will work provided remote's
# os.package_type is 'rpm' and vice versa.
path = role_config.get('rpm')
if not path:
path = role_config.get('deb')
sha1 = get_sha1_from_pkg_name(path)
assert sha1, "failed to extract commit hash from path %s" % path
if need_to_install(ctx, role, sha1):
need_install[role] = path
need_version[role] = sha1
elif role_config.get('sha1') == 'distro':
version = need_to_install_distro(role_remote)
if version:
need_install[role] = 'distro'
need_version[role] = version
elif role_config.get("koji") or role_config.get('koji_task'):
# installing a kernel from koji
build_id = role_config.get("koji")
task_id = role_config.get("koji_task")
if role_remote.os.package_type != "rpm":
msg = (
"Installing a kernel from koji is only supported "
"on rpm based systems. System type is {system_type}."
)
msg = msg.format(system_type=system_type)
log.error(msg)
ctx.summary["failure_reason"] = msg
ctx.summary["status"] = "dead"
raise ConfigError(msg)
# FIXME: this install should probably happen somewhere else
# but I'm not sure where, so we'll leave it here for now.
install_package('koji', role_remote)
if build_id:
# get information about this build from koji
build_info = get_koji_build_info(build_id, role_remote, ctx)
version = "{ver}-{rel}.x86_64".format(
ver=build_info["version"],
rel=build_info["release"]
)
elif task_id:
# get information about results of this task from koji
task_result = get_koji_task_result(task_id, role_remote, ctx)
# this is not really 'build_info', it's a dict of information
# about the kernel rpm from the task results, but for the sake
# of reusing the code below I'll still call it that.
build_info = get_koji_task_rpm_info(
'kernel',
task_result['rpms']
)
# add task_id so we can know later that we're installing
# from a task and not a build.
build_info["task_id"] = task_id
version = build_info["version"]
if need_to_install(ctx, role, version):
need_install[role] = build_info
need_version[role] = version
else:
builder = get_builder_project()(
"kernel",
role_config,
ctx=ctx,
remote=role_remote,
)
sha1 = builder.sha1
log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1))
ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1
if need_to_install(ctx, role, sha1):
if teuth_config.use_shaman:
version = builder.scm_version
else:
version = builder.version
if not version:
raise VersionNotFoundError(builder.base_url)
need_install[role] = sha1
need_version[role] = version
# enable or disable kdb if specified, otherwise do not touch
if role_config.get('kdb') is not None:
kdb[role] = role_config.get('kdb')
if need_install:
install_firmware(ctx, need_install)
download_kernel(ctx, need_install)
install_and_reboot(ctx, need_install)
wait_for_reboot(ctx, need_version, timeout)
enable_disable_kdb(ctx, kdb)
| 36.915038 | 109 | 0.547508 |
import logging
import os
import re
import shlex
from io import StringIO
from teuthology.util.compat import urljoin
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.config import config as teuth_config
from teuthology.orchestra import run
from teuthology.exceptions import (
UnsupportedPackageTypeError,
ConfigError,
VersionNotFoundError,
)
from teuthology.packaging import (
install_package,
get_koji_build_info,
get_kojiroot_base_url,
get_koji_package_name,
get_koji_task_rpm_info,
get_koji_task_result,
get_builder_project,
)
log = logging.getLogger(__name__)
CONFIG_DEFAULT = {'branch': 'master'}
TIMEOUT_DEFAULT = 300
VERSION_KEYS = ['branch', 'tag', 'sha1', 'deb', 'rpm', 'koji', 'koji_task']
def normalize_config(ctx, config):
if not config or \
len([x for x in config.keys() if x in
VERSION_KEYS + ['kdb', 'flavor']]) == len(config.keys()):
new_config = {}
if not config:
config = CONFIG_DEFAULT
for role in teuthology.all_roles(ctx.cluster):
new_config[role] = config.copy()
return new_config
new_config = {}
for role, role_config in config.items():
if role_config is None:
role_config = CONFIG_DEFAULT
if '.' in role:
new_config[role] = role_config.copy()
else:
for id_ in teuthology.all_roles_of_type(ctx.cluster, role):
name = '{type}.{id}'.format(type=role, id=id_)
if name not in config:
new_config[name] = role_config.copy()
return new_config
def normalize_and_apply_overrides(ctx, config, overrides):
timeout = TIMEOUT_DEFAULT
if 'timeout' in config:
timeout = config.pop('timeout')
config = normalize_config(ctx, config)
log.debug('normalized config %s' % config)
if 'timeout' in overrides:
timeout = overrides.pop('timeout')
if overrides:
overrides = normalize_config(ctx, overrides)
log.debug('normalized overrides %s' % overrides)
for role, role_config in config.items():
if (role in overrides and
any(k in overrides[role] for k in VERSION_KEYS)):
for k in VERSION_KEYS:
role_config.pop(k, None)
teuthology.deep_merge(config, overrides)
return (config, timeout)
def validate_config(ctx, config):
for _, roles_for_host in ctx.cluster.remotes.items():
kernel = None
for role in roles_for_host:
role_kernel = config.get(role, kernel)
if kernel is None:
kernel = role_kernel
elif role_kernel is not None:
assert kernel == role_kernel, \
"everything on the same host must use the same kernel"
if role in config:
del config[role]
def need_to_install(ctx, role, version):
ret = True
log.info('Checking kernel version of {role}, want "{ver}"...'.format(
role=role, ver=version))
uname_fp = StringIO()
ctx.cluster.only(role).run(
args=[
'uname',
'-r',
],
stdout=uname_fp,
)
cur_version = uname_fp.getvalue().rstrip('\n')
log.debug('current kernel version is {ver} vs {want}'.format(ver=cur_version,
want=version))
if '.' in str(version):
if cur_version == version:
log.debug('utsrelease strings match, do not need to install')
ret = False
else:
match = re.search('[-_]g([0-9a-f]{6,40})', cur_version)
if match:
cur_sha1 = match.group(1)
log.debug('extracting sha1, {ver} -> {sha1}'.format(
ver=cur_version, sha1=cur_sha1))
m = min(len(cur_sha1), len(version))
assert m >= 6, "cur_sha1 and/or version is too short, m = %d" % m
if cur_sha1[0:m] == version[0:m]:
log.debug('extracted sha1 matches, do not need to install')
ret = False
else:
log.debug('failed to parse current kernel version')
uname_fp.close()
return ret
def install_firmware(ctx, config):
linux_firmware_git_upstream = 'git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git'
uri = teuth_config.linux_firmware_git_url or linux_firmware_git_upstream
fw_dir = '/lib/firmware/updates'
for role in config.keys():
if isinstance(config[role], str) and config[role].find('distro') >= 0:
log.info('Skipping firmware on distro kernel');
return
(role_remote,) = ctx.cluster.only(role).remotes.keys()
package_type = role_remote.os.package_type
if package_type == 'rpm':
role_remote.run(args=[
'sudo', 'yum', 'upgrade', '-y', 'linux-firmware',
])
continue
log.info('Installing linux-firmware on {role}...'.format(role=role))
role_remote.run(
args=[
'cd', fw_dir,
run.Raw('&&'),
'test', '-d', '.git',
run.Raw('&&'),
'test', '!', '-s', '.git/HEAD',
run.Raw('&&'),
'sudo', 'rm', '-rf', '.git',
run.Raw(';'),
'sudo', 'install', '-d', '-m0755', fw_dir,
run.Raw('&&'),
'cd', fw_dir,
run.Raw('&&'),
'sudo', 'git', 'init',
],
)
role_remote.run(
args=[
'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'config',
'--get', 'remote.origin.url', run.Raw('>/dev/null'),
run.Raw('||'),
'sudo', 'git', '--git-dir=%s/.git' % fw_dir,
'remote', 'add', 'origin', uri,
],
)
role_remote.run(
args=[
'sudo', 'git', '--git-dir=%s/.git' % fw_dir, 'remote',
'set-url', 'origin', uri, run.Raw('>/dev/null')
]
)
role_remote.run(
args=[
'cd', fw_dir,
run.Raw('&&'),
'sudo', 'git', 'fetch', 'origin',
run.Raw('&&'),
'sudo', 'git', 'reset', '--hard', 'origin/master'
],
)
def gitbuilder_pkg_name(remote):
if remote.os.package_type == 'rpm':
pkg_name = 'kernel.x86_64.rpm'
elif remote.os.package_type == 'deb':
pkg_name = 'linux-image.deb'
else:
raise UnsupportedPackageTypeError(remote)
return pkg_name
def remote_pkg_path(remote):
return os.path.join('/tmp', gitbuilder_pkg_name(remote))
def download_kernel(ctx, config):
procs = {}
for role, src in config.items():
needs_download = False
if src == 'distro':
log.debug("src is distro, skipping download");
continue
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if isinstance(src, dict):
# we're downloading a kernel from koji, the src dict here
if src.get("id"):
build_id = src["id"]
log.info("Downloading kernel with build_id {build_id} on {role}...".format(
build_id=build_id,
role=role
))
needs_download = True
baseurl = get_kojiroot_base_url(src)
pkg_name = get_koji_package_name("kernel", src)
elif src.get("task_id"):
needs_download = True
log.info("Downloading kernel with task_id {task_id} on {role}...".format(
task_id=src["task_id"],
role=role
))
baseurl = src["base_url"]
# but the full name of the rpm file to download.
pkg_name = src["rpm_name"]
elif src.find('/') >= 0:
# local package - src is path
log.info('Copying kernel package {path} to {role}...'.format(
path=src, role=role))
role_remote.put_file(src,remote_pkg_path(role_remote))
else:
# gitbuilder package - src is sha1
log.info('Downloading kernel {sha1} on {role}...'.format(
sha1=src,
role=role,
))
needs_download = True
builder = get_builder_project()(
'kernel',
{'sha1': src},
ctx=ctx,
remote=role_remote,
)
if teuth_config.use_shaman:
if role_remote.os.package_type == 'rpm':
arch = builder.arch
baseurl = urljoin(
builder.base_url,
'/'.join([arch, ''])
)
pkg_name = "kernel-%s.%s.rpm" % (
builder.version,
arch,
)
elif role_remote.os.package_type == 'deb':
arch = 'amd64' # FIXME
baseurl = urljoin(
builder.base_url,
'/'.join([
'pool', 'main', 'l',
'linux-%s' % builder.scm_version, ''
])
)
pkg_name = 'linux-image-%s_%s_%s.deb' % (
builder.scm_version,
builder.version,
arch,
)
else:
baseurl = builder.base_url + "/"
pkg_name = gitbuilder_pkg_name(role_remote)
log.info("fetching, builder baseurl is %s", baseurl)
if needs_download:
proc = role_remote.run(
args=[
'rm', '-f', remote_pkg_path(role_remote),
run.Raw('&&'),
'echo',
pkg_name,
run.Raw('|'),
'wget',
'-nv',
'-O',
remote_pkg_path(role_remote),
'--base={url}'.format(url=baseurl),
'--input-file=-',
],
wait=False)
procs[role_remote.name] = proc
for name, proc in procs.items():
log.debug('Waiting for download/copy to %s to complete...', name)
proc.wait()
def _no_grub_link(in_file, remote, kernel_ver):
boot1 = '/boot/%s' % in_file
boot2 = '%s.old' % boot1
remote.run(
args=[
'if', 'test', '-e', boot1, run.Raw(';'), 'then',
'sudo', 'mv', boot1, boot2, run.Raw(';'), 'fi',],
)
remote.run(
args=['sudo', 'ln', '-s', '%s-%s' % (in_file, kernel_ver) , boot1, ],
)
def install_latest_rh_kernel(ctx, config):
if config is None:
config = {}
if config.get('skip'):
return
with parallel() as p:
for remote in ctx.cluster.remotes.keys():
p.spawn(update_rh_kernel, remote)
def update_rh_kernel(remote):
package_type = remote.os.package_type
remote.run(args=['uname', '-a'])
import time
if package_type == 'rpm':
update_log = remote.sh('sudo yum update -y kernel')
log.info(update_log)
if not update_log.find("Installed") == -1:
log.info("Kernel updated to latest z stream on %s", remote.shortname)
log.info("Rebooting %s", remote.shortname)
remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False)
time.sleep(40)
log.info("Reconnecting after reboot")
remote.reconnect(timeout=300)
remote.run(args=['uname', '-a'])
elif not update_log.find('No packages marked for update') == -1:
log.info("Latest version already installed on %s", remote.shortname)
def install_and_reboot(ctx, config):
procs = {}
kernel_title = ''
for role, src in config.items():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if isinstance(src, str) and src.find('distro') >= 0:
log.info('Installing distro kernel on {role}...'.format(role=role))
install_kernel(role_remote, version=src)
continue
log.info('Installing kernel {src} on {role}...'.format(src=src,
role=role))
package_type = role_remote.os.package_type
if package_type == 'rpm':
proc = role_remote.run(
args=[
'sudo',
'rpm',
'-ivh',
'--oldpackage',
'--replacefiles',
'--replacepkgs',
remote_pkg_path(role_remote),
])
install_kernel(role_remote, remote_pkg_path(role_remote))
continue
# TODO: Refactor this into install_kernel() so that it handles all
# cases for both rpm and deb packages.
proc = role_remote.run(
args=[
# install the kernel deb
'sudo',
'dpkg',
'-i',
remote_pkg_path(role_remote),
],
)
# collect kernel image name from the .deb
kernel_title = get_image_version(role_remote,
remote_pkg_path(role_remote))
log.info('searching for kernel {}'.format(kernel_title))
if kernel_title.endswith("-highbank"):
_no_grub_link('vmlinuz', role_remote, kernel_title)
_no_grub_link('initrd.img', role_remote, kernel_title)
proc = role_remote.run(
args=[
'sudo',
'shutdown',
'-r',
'now',
],
wait=False,
)
procs[role_remote.name] = proc
continue
# look for menuentry for our kernel, and collect any
# submenu entries for their titles. Assume that if our
# kernel entry appears later in the file than a submenu entry,
# it's actually nested under that submenu. If it gets more
kernel_entries = role_remote.sh([
'egrep',
'(submenu|menuentry.*' + kernel_title + ').*{',
'/boot/grub/grub.cfg'
]).split('\n')
submenu_title = ''
default_title = ''
for l in kernel_entries:
fields = shlex.split(l)
if len(fields) >= 2:
command, title = fields[:2]
if command == 'submenu':
submenu_title = title + '>'
if command == 'menuentry':
if title.endswith(kernel_title):
default_title = title
break
log.info('submenu_title:{}'.format(submenu_title))
log.info('default_title:{}'.format(default_title))
proc = role_remote.run(
args=[
'/bin/echo',
'-e',
r'cat <<EOF\nset default="' + submenu_title + \
default_title + r'"\nEOF\n',
# up yet; use sudo tee so we are able to write to /etc
run.Raw('|'),
'sudo',
'tee',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
run.Raw('>/dev/null'),
run.Raw('&&'),
'sudo',
'chmod',
'a+x',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
run.Raw('&&'),
'sudo',
'mv',
'--',
'/etc/grub.d/01_ceph_kernel.tmp~',
'/etc/grub.d/01_ceph_kernel',
# update grub again so it accepts our default
run.Raw('&&'),
'sudo',
'update-grub',
run.Raw('&&'),
'rm',
remote_pkg_path(role_remote),
run.Raw('&&'),
# work around a systemd issue, where network gets shut down
# before ssh can close its session
run.Raw('('),
'sleep',
'1',
run.Raw('&&'),
'sudo',
'shutdown',
'-r',
'now',
run.Raw('&'),
run.Raw(')'),
],
wait=False,
)
procs[role_remote.name] = proc
for name, proc in procs.items():
log.debug('Waiting for install on %s to complete...', name)
proc.wait()
def enable_disable_kdb(ctx, config):
for role, enable in config.items():
(role_remote,) = ctx.cluster.only(role).remotes.keys()
if "mira" in role_remote.name:
serialdev = "ttyS2"
else:
serialdev = "ttyS1"
if enable:
log.info('Enabling kdb on {role}...'.format(role=role))
try:
role_remote.run(
args=[
'echo', serialdev,
run.Raw('|'),
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc'
])
except run.CommandFailedError:
log.warn('Kernel does not support kdb')
else:
log.info('Disabling kdb on {role}...'.format(role=role))
# Add true pipe so command doesn't fail on kernel without kdb support.
try:
role_remote.run(
args=[
'echo', '',
run.Raw('|'),
'sudo', 'tee', '/sys/module/kgdboc/parameters/kgdboc',
run.Raw('|'),
'true',
])
except run.CommandFailedError:
log.warn('Kernel does not support kdb')
def wait_for_reboot(ctx, need_install, timeout, distro=False):
import time
time.sleep(30)
starttime = time.time()
while need_install:
teuthology.reconnect(ctx, timeout)
for client in list(need_install.keys()):
if 'distro' in str(need_install[client]):
distro = True
log.info('Checking client {client} for new kernel version...'.format(client=client))
try:
if distro:
(remote,) = ctx.cluster.only(client).remotes.keys()
assert not need_to_install_distro(remote), \
'failed to install new distro kernel version within timeout'
else:
assert not need_to_install(ctx, client, need_install[client]), \
'failed to install new kernel version within timeout'
del need_install[client]
except Exception:
log.exception("Saw exception")
if time.time() - starttime > timeout:
raise
time.sleep(1)
def get_version_of_running_kernel(remote):
dist_release = remote.os.name
uname_r = remote.sh("uname -r").strip()
current = None
if dist_release in ['opensuse', 'sle']:
current = re.sub(r"-default$", "", uname_r)
else:
current = uname_r
return current
def need_to_install_distro(remote):
dist_release = remote.os.name
package_type = remote.os.package_type
current = get_version_of_running_kernel(remote)
log.info("Running kernel on {node}: {version}".format(
node=remote.shortname, version=current))
installed_version = None
if package_type == 'rpm':
if dist_release in ['opensuse', 'sle']:
install_stdout = remote.sh(
'sudo zypper --non-interactive install kernel-default'
)
else:
install_stdout = remote.sh(
'sudo yum install -y kernel'
)
match = re.search(
"Package (.*) already installed",
install_stdout, flags=re.MULTILINE)
if 'Nothing to do' in install_stdout:
installed_version = match.groups()[0] if match else ''
err_mess = StringIO()
err_mess.truncate(0)
remote.run(args=['echo', 'no', run.Raw('|'), 'sudo', 'yum',
'reinstall', 'kernel', run.Raw('||'), 'true'],
stderr=err_mess)
reinstall_stderr = err_mess.getvalue()
err_mess.close()
if 'Skipping the running kernel' in reinstall_stderr:
running_version = re.search(
"Skipping the running kernel: (.*)",
reinstall_stderr, flags=re.MULTILINE).groups()[0]
if installed_version == running_version:
log.info(
'Newest distro kernel already installed and running')
return False
else:
remote.run(args=['sudo', 'yum', 'reinstall', '-y', 'kernel',
run.Raw('||'), 'true'])
newest = get_latest_image_version_rpm(remote)
if package_type == 'deb':
newest = get_latest_image_version_deb(remote, dist_release)
if current in newest or current.replace('-', '_') in newest:
log.info('Newest distro kernel installed and running')
return False
log.info(
'Not newest distro kernel. Current: {cur} Expected: {new}'.format(
cur=current, new=newest))
return newest
def maybe_generate_initrd_rpm(remote, path, version):
out = remote.sh(['rpm', '--scripts', '-qp', path])
if 'bin/installkernel' in out or 'bin/kernel-install' in out:
return
log.info("No installkernel or kernel-install hook in %s, "
"will generate initrd for %s", path, version)
remote.run(
args=[
'sudo',
'mkinitrd',
'--allow-missing',
'-f',
'/boot/initramfs-' + version + '.img',
version,
])
def install_kernel(remote, path=None, version=None):
dist_release = remote.os.name
templ = "install_kernel(remote={remote}, path={path}, version={version})"
log.debug(templ.format(remote=remote, path=path, version=version))
package_type = remote.os.package_type
if package_type == 'rpm':
if dist_release in ['opensuse', 'sle']:
pass
else:
if path:
version = get_image_version(remote, path)
maybe_generate_initrd_rpm(remote, path, version)
elif not version or version == 'distro':
version = get_latest_image_version_rpm(remote)
update_grub_rpm(remote, version)
remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
if package_type == 'deb':
newversion = get_latest_image_version_deb(remote, dist_release)
if 'ubuntu' in dist_release:
grub2conf = teuthology.get_file(remote,
'/boot/grub/grub.cfg', sudo=True).decode()
submenu = ''
menuentry = ''
for line in grub2conf.split('\n'):
if 'submenu' in line:
submenu = line.split('submenu ')[1]
# Ubuntu likes to be sneaky and change formatting of
# grub.cfg between quotes/doublequotes between versions
if submenu.startswith("'"):
submenu = submenu.split("'")[1]
if submenu.startswith('"'):
submenu = submenu.split('"')[1]
if 'menuentry' in line:
if newversion in line and 'recovery' not in line:
menuentry = line.split('\'')[1]
break
if submenu:
grubvalue = submenu + '>' + menuentry
else:
grubvalue = menuentry
grubfile = 'cat <<EOF\nset default="' + grubvalue + '"\nEOF'
teuthology.delete_file(remote, '/etc/grub.d/01_ceph_kernel', sudo=True, force=True)
teuthology.sudo_write_file(remote, '/etc/grub.d/01_ceph_kernel', StringIO(grubfile), '755')
log.info('Distro Kernel Version: {version}'.format(version=newversion))
remote.run(args=['sudo', 'update-grub'])
remote.run(args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
if 'debian' in dist_release:
grub2_kernel_select_generic(remote, newversion, 'deb')
log.info('Distro Kernel Version: {version}'.format(version=newversion))
remote.run( args=['sudo', 'shutdown', '-r', 'now'], wait=False )
return
def update_grub_rpm(remote, newversion):
grub='grub2'
try:
remote.run(args=['sudo', 'rpm', '-qi', 'grub2-tools'])
except Exception:
grub = 'legacy'
log.info('Updating Grub Version: {grub}'.format(grub=grub))
if grub == 'legacy':
data = ''
newgrub = generate_legacy_grub_entry(remote, newversion)
for line in newgrub:
data += line + '\n'
temp_file_path = remote.mktemp()
teuthology.sudo_write_file(remote, temp_file_path, StringIO(data), '755')
teuthology.move_file(remote, temp_file_path, '/boot/grub/grub.conf', True)
else:
grub2_kernel_select_generic(remote, newversion, 'rpm')
def grub2_kernel_select_generic(remote, newversion, ostype):
log.info("Updating grub on {node} to boot {version}".format(
node=remote.shortname, version=newversion))
if ostype == 'rpm':
grubset = 'grub2-set-default'
mkconfig = 'grub2-mkconfig'
grubconfig = '/boot/grub2/grub.cfg'
if ostype == 'deb':
grubset = 'grub-set-default'
grubconfig = '/boot/grub/grub.cfg'
mkconfig = 'grub-mkconfig'
remote.run(args=['sudo', mkconfig, '-o', grubconfig, ])
grub2conf = teuthology.get_file(remote, grubconfig, sudo=True).decode()
entry_num = 0
if '\nmenuentry ' not in grub2conf:
grub2conf = remote.sh('sudo /bin/ls /boot/loader/entries || true')
entry = None
for line in grub2conf.split('\n'):
if line.endswith('.conf') and newversion in line:
entry = line[:-5]
break
else:
for line in grub2conf.split('\n'):
if line.startswith('menuentry '):
if newversion in line:
break
entry_num += 1
entry = str(entry_num)
if entry is None:
log.warning('Unable to update grub2 order')
else:
remote.run(args=['sudo', grubset, entry])
def generate_legacy_grub_entry(remote, newversion):
grubconf = teuthology.get_file(remote,
'/boot/grub/grub.conf', sudo=True).decode()
titleline = ''
rootline = ''
kernelline = ''
initline = ''
kernelversion = ''
linenum = 0
titlelinenum = 0
for line in grubconf.split('\n'):
if re.match('^title', line):
titleline = line
titlelinenum = linenum
if re.match('(^\s+)root', line):
rootline = line
if re.match('(^\s+)kernel', line):
kernelline = line
for word in line.split(' '):
if 'vmlinuz' in word:
kernelversion = word.split('vmlinuz-')[-1]
if re.match('(^\s+)initrd', line):
initline = line
if (kernelline != '') and (initline != ''):
break
else:
linenum += 1
linenum = 0
newgrubconf = []
for line in grubconf.split('\n'):
line = line.rstrip('\n')
if linenum == titlelinenum:
newtitle = re.sub(kernelversion, newversion, titleline)
newroot = re.sub(kernelversion, newversion, rootline)
newkernel = re.sub(kernelversion, newversion, kernelline)
newinit = re.sub(kernelversion, newversion, initline)
newgrubconf.append(newtitle)
newgrubconf.append(newroot)
newgrubconf.append(newkernel)
newgrubconf.append(newinit)
newgrubconf.append('')
newgrubconf.append(line)
else:
newgrubconf.append(line)
linenum += 1
return newgrubconf
def get_image_version(remote, path):
if remote.os.package_type == 'rpm':
files = remote.sh(['rpm', '-qlp', path])
elif remote.os.package_type == 'deb':
files = remote.sh(['dpkg-deb', '-c', path])
else:
raise UnsupportedPackageTypeError(remote)
for file in files.split('\n'):
if '/boot/vmlinuz-' in file:
version = file.split('/boot/vmlinuz-')[1]
break
log.debug("get_image_version: %s", version)
return version
def get_latest_image_version_rpm(remote):
dist_release = remote.os.name
kernel_pkg_name = None
version = None
if dist_release in ['opensuse', 'sle']:
kernel_pkg_name = "kernel-default"
else:
kernel_pkg_name = "kernel"
newest_package = remote.sh(
'rpm -q %s --last | head -n 1' % kernel_pkg_name).strip()
for kernel in newest_package.split():
if kernel.startswith('kernel'):
if 'ceph' not in kernel:
if dist_release in ['opensuse', 'sle']:
kernel = kernel.split()[0]
version = kernel.split(str(kernel_pkg_name) + '-')[1]
log.debug("get_latest_image_version_rpm: %s", version)
return version
def get_latest_image_version_deb(remote, ostype):
remote.run(args=['sudo', 'apt-get', 'clean'])
remote.run(args=['sudo', 'apt-get', 'update'])
output = StringIO()
newest = ''
if 'debian' in ostype:
remote.run(args=['sudo', 'apt-get', '-y', 'install',
'linux-image-amd64'], stdout=output)
remote.run(args=['dpkg', '-s', 'linux-image-amd64'], stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
newest = line.split('linux-image-')[1]
output.close()
return newest
if 'ubuntu' in ostype:
try:
remote.run(args=['sudo', 'DEBIAN_FRONTEND=noninteractive',
'apt-get', '-y', 'install',
'linux-image-current-generic'])
remote.run(args=['dpkg', '-s', 'linux-image-current-generic'],
stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
depends = line.split('Depends: ')[1]
remote.run(args=['sudo', 'apt-get', '-y', 'install',
depends])
remote.run(args=['dpkg', '-s', depends], stdout=output)
except run.CommandFailedError:
# linux-image-current-generic so use linux-image-generic instead.
remote.run(args=['sudo', 'DEBIAN_FRONTEND=noninteractive',
'apt-get', '-y', 'install',
'linux-image-generic'], stdout=output)
remote.run(args=['dpkg', '-s', 'linux-image-generic'],
stdout=output)
for line in output.getvalue().split('\n'):
if 'Depends:' in line:
newest = line.split('linux-image-')[1]
if ',' in newest:
newest = newest.split(',')[0]
if '|' in newest:
# not strictly correct, as any of the |-joined
# packages may satisfy the dependency
newest = newest.split('|')[0].strip()
output.close()
return newest
def get_sha1_from_pkg_name(path):
basename = os.path.basename(path)
match = re.search('[-_]ceph[-_]g([0-9a-f]{12,40})', basename)
sha1 = match.group(1) if match else None
log.debug("get_sha1_from_pkg_name: %s -> %s -> %s", path, basename, sha1)
return sha1
def task(ctx, config):
if config is None:
config = {}
assert isinstance(config, dict), \
"task kernel only supports a dictionary for configuration"
overrides = ctx.config.get('overrides', {}).get('kernel', {})
config, timeout = normalize_and_apply_overrides(ctx, config, overrides)
validate_config(ctx, config)
log.info('config %s, timeout %d' % (config, timeout))
need_install = {} # sha1 to dl, or path to rpm or deb
need_version = {} # utsrelease or sha1
kdb = {}
for role, role_config in config.items():
# gather information about this remote
(role_remote,) = ctx.cluster.only(role).remotes.keys()
system_type = role_remote.os.name
if role_config.get('rpm') or role_config.get('deb'):
# We only care about path - deb: vs rpm: is meaningless,
# rpm: just happens to be parsed first. Nothing is stopping
# 'deb: /path/to/foo.rpm' and it will work provided remote's
path = role_config.get('rpm')
if not path:
path = role_config.get('deb')
sha1 = get_sha1_from_pkg_name(path)
assert sha1, "failed to extract commit hash from path %s" % path
if need_to_install(ctx, role, sha1):
need_install[role] = path
need_version[role] = sha1
elif role_config.get('sha1') == 'distro':
version = need_to_install_distro(role_remote)
if version:
need_install[role] = 'distro'
need_version[role] = version
elif role_config.get("koji") or role_config.get('koji_task'):
build_id = role_config.get("koji")
task_id = role_config.get("koji_task")
if role_remote.os.package_type != "rpm":
msg = (
"Installing a kernel from koji is only supported "
"on rpm based systems. System type is {system_type}."
)
msg = msg.format(system_type=system_type)
log.error(msg)
ctx.summary["failure_reason"] = msg
ctx.summary["status"] = "dead"
raise ConfigError(msg)
install_package('koji', role_remote)
if build_id:
build_info = get_koji_build_info(build_id, role_remote, ctx)
version = "{ver}-{rel}.x86_64".format(
ver=build_info["version"],
rel=build_info["release"]
)
elif task_id:
task_result = get_koji_task_result(task_id, role_remote, ctx)
# about the kernel rpm from the task results, but for the sake
# of reusing the code below I'll still call it that.
build_info = get_koji_task_rpm_info(
'kernel',
task_result['rpms']
)
# from a task and not a build.
build_info["task_id"] = task_id
version = build_info["version"]
if need_to_install(ctx, role, version):
need_install[role] = build_info
need_version[role] = version
else:
builder = get_builder_project()(
"kernel",
role_config,
ctx=ctx,
remote=role_remote,
)
sha1 = builder.sha1
log.debug('sha1 for {role} is {sha1}'.format(role=role, sha1=sha1))
ctx.summary['{role}-kernel-sha1'.format(role=role)] = sha1
if need_to_install(ctx, role, sha1):
if teuth_config.use_shaman:
version = builder.scm_version
else:
version = builder.version
if not version:
raise VersionNotFoundError(builder.base_url)
need_install[role] = sha1
need_version[role] = version
# enable or disable kdb if specified, otherwise do not touch
if role_config.get('kdb') is not None:
kdb[role] = role_config.get('kdb')
if need_install:
install_firmware(ctx, need_install)
download_kernel(ctx, need_install)
install_and_reboot(ctx, need_install)
wait_for_reboot(ctx, need_version, timeout)
enable_disable_kdb(ctx, kdb)
| true | true |
f71b18b542139a4c825daebbad8c706309282806 | 5,588 | py | Python | tianshou/policy/modelfree/discrete_sac.py | danagi/tianshou | b364f1a26f1b8528b01a445a488160ce2d910a1c | [
"MIT"
] | 1 | 2020-08-25T07:55:52.000Z | 2020-08-25T07:55:52.000Z | tianshou/policy/modelfree/discrete_sac.py | q-learning-trader/tianshou | c97aa4065ee8464bd5897bb86f1f81abd8e2cff9 | [
"MIT"
] | null | null | null | tianshou/policy/modelfree/discrete_sac.py | q-learning-trader/tianshou | c97aa4065ee8464bd5897bb86f1f81abd8e2cff9 | [
"MIT"
] | 1 | 2020-04-25T13:05:21.000Z | 2020-04-25T13:05:21.000Z | import torch
import numpy as np
from torch.distributions import Categorical
from typing import Any, Dict, Tuple, Union, Optional
from tianshou.policy import SACPolicy
from tianshou.data import Batch, ReplayBuffer, to_torch
class DiscreteSACPolicy(SACPolicy):
"""Implementation of SAC for Discrete Action Settings. arXiv:1910.07207.
:param torch.nn.Module actor: the actor network following the rules in
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
:param torch.optim.Optimizer actor_optim: the optimizer for actor network.
:param torch.nn.Module critic1: the first critic network. (s -> Q(s))
:param torch.optim.Optimizer critic1_optim: the optimizer for the first
critic network.
:param torch.nn.Module critic2: the second critic network. (s -> Q(s))
:param torch.optim.Optimizer critic2_optim: the optimizer for the second
critic network.
:param float tau: param for soft update of the target network, defaults to
0.005.
:param float gamma: discount factor, in [0, 1], defaults to 0.99.
:param (float, torch.Tensor, torch.optim.Optimizer) or float alpha: entropy
regularization coefficient, default to 0.2.
If a tuple (target_entropy, log_alpha, alpha_optim) is provided, then
alpha is automatatically tuned.
:param bool reward_normalization: normalize the reward to Normal(0, 1),
defaults to ``False``.
:param bool ignore_done: ignore the done flag while training the policy,
defaults to ``False``.
.. seealso::
Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed
explanation.
"""
def __init__(
self,
actor: torch.nn.Module,
actor_optim: torch.optim.Optimizer,
critic1: torch.nn.Module,
critic1_optim: torch.optim.Optimizer,
critic2: torch.nn.Module,
critic2_optim: torch.optim.Optimizer,
tau: float = 0.005,
gamma: float = 0.99,
alpha: Union[
float, Tuple[float, torch.Tensor, torch.optim.Optimizer]
] = 0.2,
reward_normalization: bool = False,
ignore_done: bool = False,
estimation_step: int = 1,
**kwargs: Any,
) -> None:
super().__init__(actor, actor_optim, critic1, critic1_optim, critic2,
critic2_optim, (-np.inf, np.inf), tau, gamma, alpha,
reward_normalization, ignore_done, estimation_step,
**kwargs)
self._alpha: Union[float, torch.Tensor]
def forward( # type: ignore
self,
batch: Batch,
state: Optional[Union[dict, Batch, np.ndarray]] = None,
input: str = "obs",
**kwargs: Any,
) -> Batch:
obs = batch[input]
logits, h = self.actor(obs, state=state, info=batch.info)
dist = Categorical(logits=logits)
act = dist.sample()
return Batch(logits=logits, act=act, state=h, dist=dist)
def _target_q(
self, buffer: ReplayBuffer, indice: np.ndarray
) -> torch.Tensor:
batch = buffer[indice] # batch.obs: s_{t+n}
with torch.no_grad():
obs_next_result = self(batch, input="obs_next")
dist = obs_next_result.dist
target_q = dist.probs * torch.min(
self.critic1_old(batch.obs_next),
self.critic2_old(batch.obs_next),
)
target_q = target_q.sum(dim=-1) + self._alpha * dist.entropy()
return target_q
def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, float]:
weight = batch.pop("weight", 1.0)
target_q = batch.returns.flatten()
act = to_torch(
batch.act[:, np.newaxis], device=target_q.device, dtype=torch.long)
# critic 1
current_q1 = self.critic1(batch.obs).gather(1, act).flatten()
td1 = current_q1 - target_q
critic1_loss = (td1.pow(2) * weight).mean()
self.critic1_optim.zero_grad()
critic1_loss.backward()
self.critic1_optim.step()
# critic 2
current_q2 = self.critic2(batch.obs).gather(1, act).flatten()
td2 = current_q2 - target_q
critic2_loss = (td2.pow(2) * weight).mean()
self.critic2_optim.zero_grad()
critic2_loss.backward()
self.critic2_optim.step()
batch.weight = (td1 + td2) / 2.0 # prio-buffer
# actor
dist = self(batch).dist
entropy = dist.entropy()
with torch.no_grad():
current_q1a = self.critic1(batch.obs)
current_q2a = self.critic2(batch.obs)
q = torch.min(current_q1a, current_q2a)
actor_loss = -(self._alpha * entropy
+ (dist.probs * q).sum(dim=-1)).mean()
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
if self._is_auto_alpha:
log_prob = -entropy.detach() + self._target_entropy
alpha_loss = -(self._log_alpha * log_prob).mean()
self._alpha_optim.zero_grad()
alpha_loss.backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.detach().exp()
self.sync_weight()
result = {
"loss/actor": actor_loss.item(),
"loss/critic1": critic1_loss.item(),
"loss/critic2": critic2_loss.item(),
}
if self._is_auto_alpha:
result["loss/alpha"] = alpha_loss.item()
result["alpha"] = self._alpha.item() # type: ignore
return result
| 37.503356 | 79 | 0.607552 | import torch
import numpy as np
from torch.distributions import Categorical
from typing import Any, Dict, Tuple, Union, Optional
from tianshou.policy import SACPolicy
from tianshou.data import Batch, ReplayBuffer, to_torch
class DiscreteSACPolicy(SACPolicy):
def __init__(
self,
actor: torch.nn.Module,
actor_optim: torch.optim.Optimizer,
critic1: torch.nn.Module,
critic1_optim: torch.optim.Optimizer,
critic2: torch.nn.Module,
critic2_optim: torch.optim.Optimizer,
tau: float = 0.005,
gamma: float = 0.99,
alpha: Union[
float, Tuple[float, torch.Tensor, torch.optim.Optimizer]
] = 0.2,
reward_normalization: bool = False,
ignore_done: bool = False,
estimation_step: int = 1,
**kwargs: Any,
) -> None:
super().__init__(actor, actor_optim, critic1, critic1_optim, critic2,
critic2_optim, (-np.inf, np.inf), tau, gamma, alpha,
reward_normalization, ignore_done, estimation_step,
**kwargs)
self._alpha: Union[float, torch.Tensor]
def forward(
self,
batch: Batch,
state: Optional[Union[dict, Batch, np.ndarray]] = None,
input: str = "obs",
**kwargs: Any,
) -> Batch:
obs = batch[input]
logits, h = self.actor(obs, state=state, info=batch.info)
dist = Categorical(logits=logits)
act = dist.sample()
return Batch(logits=logits, act=act, state=h, dist=dist)
def _target_q(
self, buffer: ReplayBuffer, indice: np.ndarray
) -> torch.Tensor:
batch = buffer[indice]
with torch.no_grad():
obs_next_result = self(batch, input="obs_next")
dist = obs_next_result.dist
target_q = dist.probs * torch.min(
self.critic1_old(batch.obs_next),
self.critic2_old(batch.obs_next),
)
target_q = target_q.sum(dim=-1) + self._alpha * dist.entropy()
return target_q
def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, float]:
weight = batch.pop("weight", 1.0)
target_q = batch.returns.flatten()
act = to_torch(
batch.act[:, np.newaxis], device=target_q.device, dtype=torch.long)
current_q1 = self.critic1(batch.obs).gather(1, act).flatten()
td1 = current_q1 - target_q
critic1_loss = (td1.pow(2) * weight).mean()
self.critic1_optim.zero_grad()
critic1_loss.backward()
self.critic1_optim.step()
current_q2 = self.critic2(batch.obs).gather(1, act).flatten()
td2 = current_q2 - target_q
critic2_loss = (td2.pow(2) * weight).mean()
self.critic2_optim.zero_grad()
critic2_loss.backward()
self.critic2_optim.step()
batch.weight = (td1 + td2) / 2.0
dist = self(batch).dist
entropy = dist.entropy()
with torch.no_grad():
current_q1a = self.critic1(batch.obs)
current_q2a = self.critic2(batch.obs)
q = torch.min(current_q1a, current_q2a)
actor_loss = -(self._alpha * entropy
+ (dist.probs * q).sum(dim=-1)).mean()
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
if self._is_auto_alpha:
log_prob = -entropy.detach() + self._target_entropy
alpha_loss = -(self._log_alpha * log_prob).mean()
self._alpha_optim.zero_grad()
alpha_loss.backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.detach().exp()
self.sync_weight()
result = {
"loss/actor": actor_loss.item(),
"loss/critic1": critic1_loss.item(),
"loss/critic2": critic2_loss.item(),
}
if self._is_auto_alpha:
result["loss/alpha"] = alpha_loss.item()
result["alpha"] = self._alpha.item()
return result
| true | true |
f71b19a1735a916e1c2e81907fdea6d406f03d8f | 6,275 | py | Python | cis-audit.py | flokoe/cis-benchmarks-audit | 85d923cbff9ffe6cede964c3bd2f3ea513944c8d | [
"MIT"
] | null | null | null | cis-audit.py | flokoe/cis-benchmarks-audit | 85d923cbff9ffe6cede964c3bd2f3ea513944c8d | [
"MIT"
] | null | null | null | cis-audit.py | flokoe/cis-benchmarks-audit | 85d923cbff9ffe6cede964c3bd2f3ea513944c8d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script runs tests on the system to check for compliance against different CIS Benchmarks.
No changes are made to system files by this script. Audit only.
License: MIT
"""
from argparse import ArgumentParser
from datetime import datetime
from time import sleep
import json, subprocess, pathlib
__author__ = 'Florian Köhler'
__version__ = '0.1.0'
__license__ = 'MIT'
def parse_cli():
parser = ArgumentParser(description='This script runs tests on the system to check for compliance against different CIS Benchmarks. No changes are made to system files by this script. Audit only.')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--benchmark', '-b', type=pathlib.Path, required=True, metavar='FILE', help='Path to benchmark file (Required).')
parser.add_argument('--level', '-l', type=int, choices=[1, 2], default=None, help='Run tests for the specified level only. Defaults to both.')
parser.add_argument('--include', '-i', default=None, metavar='LIST', help='Comma separated list of tests to include.')
parser.add_argument('--exclude', '-e', default=None, metavar='LIST', help='Comma separated list of tests to exclude.')
return parser.parse_args()
class Recommendation:
def __init__(self, id, description, scored, level, type, test_input, expected_output):
self.id = id
self.description = description
self.scored = scored
self.level = level
self.type = type
self.test_input = test_input
self.expected_output = expected_output
self.error = None
self.passed = None
def match(self, level, include, exclude):
if not level or level in self.level:
return True
class Benchmark:
def __init__(self, args):
self.start_time = datetime.now()
self.recommendations = []
self.filtered_tests = []
self._total_number_tests = None
self._number_passed_tests = None
self._number_of_errors = None
self.test_types = {
"output_contains": self.test_output_contains,
}
print("Loading benchmark data...\n")
with open(args.benchmark, 'r') as benchmark_file:
data = benchmark_file.read()
benchmark_data = json.loads(data)
self.name = benchmark_data['name']
self.version = benchmark_data['version']
for recommendation in benchmark_data['recommendations']:
self.add_recommendation(recommendation)
for recommendation in self.recommendations:
if recommendation.type != 'chapter' and recommendation.scored:
if recommendation.match(args.level, args.include, args.exclude):
self.filtered_tests.append(recommendation)
self.execute_tests()
def add_recommendation(self, recommendation):
self.recommendations.append(Recommendation(recommendation['id'], recommendation['description'], recommendation['scored'], recommendation['level'], recommendation['type'], recommendation['test_input'], recommendation['expected_output']))
@property
def total_number_tests(self):
if not self._total_number_tests:
self._total_number_tests = len(self.filtered_tests)
return self._total_number_tests
@property
def number_passed_tests(self):
if not self._number_passed_tests:
passed_tests = []
for test in self.filtered_tests:
if test.passed:
passed_tests.append(test)
self._number_passed_tests = len(passed_tests)
return self._number_passed_tests
@property
def number_of_errors(self):
if not self._number_of_errors:
error_tests = []
for test in self.filtered_tests:
if test.error:
error_tests.append(test)
self._number_of_errors = len(error_tests)
return self._number_of_errors
def execute_tests(self):
print(f"0 of {self.total_number_tests} tests completed.", end="\r")
for index, test in enumerate(self.filtered_tests, start=1):
execute_test = self.test_types.get(test.type)
execute_test(test)
if index < self.total_number_tests:
print(f"{index} of {self.total_number_tests} tests completed.", end="\r")
else:
print(f"{index} of {self.total_number_tests} tests completed.\n")
self.output_results()
def output_results(self):
heading = f"CIS {self.name} Benchmark v{self.version} Results"
heading_separator = '-' * len(heading)
id_padding = len(max([str(test.id) for test in self.filtered_tests], key = len))
desc_padding = len(max([test.description for test in self.filtered_tests], key = len))
result_heading = 'ID'.ljust(id_padding, ' ') + ' ' + 'Description'.ljust(desc_padding, ' ') + ' Scored' + ' Level' + ' Result'
result_separator = '--'.ljust(id_padding, ' ') + ' ' + '-----------'.ljust(desc_padding, ' ') + ' ------' + ' -----' + ' ------'
print(heading)
print(heading_separator)
print(result_heading)
print(result_separator)
print("")
for test in self.filtered_tests:
print(f"{test.id.ljust(id_padding, ' ')} {test.description.ljust(desc_padding, ' ')} {'Yes ' if test.scored else 'No '} {'1, 2 ' if len(test.level) == 2 else str(test.level[0]).ljust(5, ' ')} {'Error ' if test.error else 'Pass ' if test.passed else 'Fail '}")
print("")
print(f"Passed {self.number_passed_tests} of {self.total_number_tests} tests in x seconds (x Skipped, {self.number_of_errors} Errors)")
def test_output_contains(self, test):
command = test.test_input.split(' ')
try:
output = subprocess.check_output(command, universal_newlines=True)
if test.expected_output in output:
test.passed = True
else:
test.passed = False
except:
test.error = True
test.passed = False
if __name__ == '__main__':
args = parse_cli()
Benchmark(args)
| 38.734568 | 282 | 0.637131 |
from argparse import ArgumentParser
from datetime import datetime
from time import sleep
import json, subprocess, pathlib
__author__ = 'Florian Köhler'
__version__ = '0.1.0'
__license__ = 'MIT'
def parse_cli():
parser = ArgumentParser(description='This script runs tests on the system to check for compliance against different CIS Benchmarks. No changes are made to system files by this script. Audit only.')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--benchmark', '-b', type=pathlib.Path, required=True, metavar='FILE', help='Path to benchmark file (Required).')
parser.add_argument('--level', '-l', type=int, choices=[1, 2], default=None, help='Run tests for the specified level only. Defaults to both.')
parser.add_argument('--include', '-i', default=None, metavar='LIST', help='Comma separated list of tests to include.')
parser.add_argument('--exclude', '-e', default=None, metavar='LIST', help='Comma separated list of tests to exclude.')
return parser.parse_args()
class Recommendation:
def __init__(self, id, description, scored, level, type, test_input, expected_output):
self.id = id
self.description = description
self.scored = scored
self.level = level
self.type = type
self.test_input = test_input
self.expected_output = expected_output
self.error = None
self.passed = None
def match(self, level, include, exclude):
if not level or level in self.level:
return True
class Benchmark:
def __init__(self, args):
self.start_time = datetime.now()
self.recommendations = []
self.filtered_tests = []
self._total_number_tests = None
self._number_passed_tests = None
self._number_of_errors = None
self.test_types = {
"output_contains": self.test_output_contains,
}
print("Loading benchmark data...\n")
with open(args.benchmark, 'r') as benchmark_file:
data = benchmark_file.read()
benchmark_data = json.loads(data)
self.name = benchmark_data['name']
self.version = benchmark_data['version']
for recommendation in benchmark_data['recommendations']:
self.add_recommendation(recommendation)
for recommendation in self.recommendations:
if recommendation.type != 'chapter' and recommendation.scored:
if recommendation.match(args.level, args.include, args.exclude):
self.filtered_tests.append(recommendation)
self.execute_tests()
def add_recommendation(self, recommendation):
self.recommendations.append(Recommendation(recommendation['id'], recommendation['description'], recommendation['scored'], recommendation['level'], recommendation['type'], recommendation['test_input'], recommendation['expected_output']))
@property
def total_number_tests(self):
if not self._total_number_tests:
self._total_number_tests = len(self.filtered_tests)
return self._total_number_tests
@property
def number_passed_tests(self):
if not self._number_passed_tests:
passed_tests = []
for test in self.filtered_tests:
if test.passed:
passed_tests.append(test)
self._number_passed_tests = len(passed_tests)
return self._number_passed_tests
@property
def number_of_errors(self):
if not self._number_of_errors:
error_tests = []
for test in self.filtered_tests:
if test.error:
error_tests.append(test)
self._number_of_errors = len(error_tests)
return self._number_of_errors
def execute_tests(self):
print(f"0 of {self.total_number_tests} tests completed.", end="\r")
for index, test in enumerate(self.filtered_tests, start=1):
execute_test = self.test_types.get(test.type)
execute_test(test)
if index < self.total_number_tests:
print(f"{index} of {self.total_number_tests} tests completed.", end="\r")
else:
print(f"{index} of {self.total_number_tests} tests completed.\n")
self.output_results()
def output_results(self):
heading = f"CIS {self.name} Benchmark v{self.version} Results"
heading_separator = '-' * len(heading)
id_padding = len(max([str(test.id) for test in self.filtered_tests], key = len))
desc_padding = len(max([test.description for test in self.filtered_tests], key = len))
result_heading = 'ID'.ljust(id_padding, ' ') + ' ' + 'Description'.ljust(desc_padding, ' ') + ' Scored' + ' Level' + ' Result'
result_separator = '--'.ljust(id_padding, ' ') + ' ' + '-----------'.ljust(desc_padding, ' ') + ' ------' + ' -----' + ' ------'
print(heading)
print(heading_separator)
print(result_heading)
print(result_separator)
print("")
for test in self.filtered_tests:
print(f"{test.id.ljust(id_padding, ' ')} {test.description.ljust(desc_padding, ' ')} {'Yes ' if test.scored else 'No '} {'1, 2 ' if len(test.level) == 2 else str(test.level[0]).ljust(5, ' ')} {'Error ' if test.error else 'Pass ' if test.passed else 'Fail '}")
print("")
print(f"Passed {self.number_passed_tests} of {self.total_number_tests} tests in x seconds (x Skipped, {self.number_of_errors} Errors)")
def test_output_contains(self, test):
command = test.test_input.split(' ')
try:
output = subprocess.check_output(command, universal_newlines=True)
if test.expected_output in output:
test.passed = True
else:
test.passed = False
except:
test.error = True
test.passed = False
if __name__ == '__main__':
args = parse_cli()
Benchmark(args)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.