content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
# layout.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import util
from util import manhattanDistance
from game import Grid
import os
import random
import itertools
VISIBILITY_MATRIX_CACHE = {}
class Layout:
"""
A Layout manages the static information about the game board.
"""
def __init__(self, layoutText):
self.width = len(layoutText[0])
self.height= len(layoutText)
self.walls = Grid(self.width, self.height, False)
self.food = Grid(self.width, self.height, False)
self.capsules = []
self.agentPositions = []
self.numGhosts = 0
self.processLayoutText(layoutText)
self.layoutText = layoutText
self.totalFood = len(self.food.asList())
# self.initializeVisibilityMatrix()
def getNumGhosts(self):
return self.numGhosts
def initializeVisibilityMatrix(self):
global VISIBILITY_MATRIX_CACHE
if reduce(str.__add__, self.layoutText) not in VISIBILITY_MATRIX_CACHE:
from game import Directions
vecs = [(-0.5,0), (0.5,0),(0,-0.5),(0,0.5)]
dirs = [Directions.NORTH, Directions.SOUTH, Directions.WEST, Directions.EAST]
vis = Grid(self.width, self.height, {Directions.NORTH:set(), Directions.SOUTH:set(), Directions.EAST:set(), Directions.WEST:set(), Directions.STOP:set()})
for x in range(self.width):
for y in range(self.height):
if self.walls[x][y] == False:
for vec, direction in zip(vecs, dirs):
dx, dy = vec
nextx, nexty = x + dx, y + dy
while (nextx + nexty) != int(nextx) + int(nexty) or not self.walls[int(nextx)][int(nexty)] :
vis[x][y][direction].add((nextx, nexty))
nextx, nexty = x + dx, y + dy
self.visibility = vis
VISIBILITY_MATRIX_CACHE[reduce(str.__add__, self.layoutText)] = vis
else:
self.visibility = VISIBILITY_MATRIX_CACHE[reduce(str.__add__, self.layoutText)]
def isWall(self, pos):
x, col = pos
return self.walls[x][col]
def getRandomLegalPosition(self):
x = random.choice(range(self.width))
y = random.choice(range(self.height))
while self.isWall( (x, y) ):
x = random.choice(range(self.width))
y = random.choice(range(self.height))
return (x,y)
def getRandomCorner(self):
poses = [(1,1), (1, self.height - 2), (self.width - 2, 1), (self.width - 2, self.height - 2)]
return random.choice(poses)
def getFurthestCorner(self, pacPos):
poses = [(1,1), (1, self.height - 2), (self.width - 2, 1), (self.width - 2, self.height - 2)]
dist, pos = max([(manhattanDistance(p, pacPos), p) for p in poses])
return pos
def isVisibleFrom(self, ghostPos, pacPos, pacDirection):
row, col = [int(x) for x in pacPos]
return ghostPos in self.visibility[row][col][pacDirection]
def __str__(self):
return "\n".join(self.layoutText)
def deepCopy(self):
return Layout(self.layoutText[:])
def processLayoutText(self, layoutText):
"""
Coordinates are flipped from the input format to the (x,y) convention here
The shape of the maze. Each character
represents a different type of object.
% - Wall
. - Food
o - Capsule
G - Ghost
P - Pacman
Other characters are ignored.
"""
maxY = self.height - 1
for y in range(self.height):
for x in range(self.width):
layoutChar = layoutText[maxY - y][x]
self.processLayoutChar(x, y, layoutChar)
self.agentPositions.sort()
self.agentPositions = [ ( i == 0, pos) for i, pos in self.agentPositions]
def processLayoutChar(self, x, y, layoutChar):
if layoutChar == '%':
self.walls[x][y] = True
elif layoutChar == '.':
self.food[x][y] = True
elif layoutChar == 'o':
self.capsules.append((x, y))
elif layoutChar == 'P':
self.agentPositions.append( (0, (x, y) ) )
elif layoutChar in ['G']:
self.agentPositions.append( (1, (x, y) ) )
self.numGhosts += 1
elif layoutChar in ['1', '2', '3', '4']:
self.agentPositions.append( (int(layoutChar), (x,y)))
self.numGhosts += 1
def getLayout(name, back = 2):
if name.endswith('.lay'):
layout = tryToLoad('layouts/' + name)
if layout == None: layout = tryToLoad(name)
else:
layout = tryToLoad('layouts/' + name + '.lay')
if layout == None: layout = tryToLoad(name + '.lay')
if layout == None and back >= 0:
curdir = os.path.abspath('.')
os.chdir('..')
layout = getLayout(name, back -1)
os.chdir(curdir)
return layout
def tryToLoad(fullname):
if(not os.path.exists(fullname)): return None
f = open(fullname)
try: return Layout([line.strip() for line in f])
finally: f.close()
| tracking/layout.py | 5,782 | A Layout manages the static information about the game board.
Coordinates are flipped from the input format to the (x,y) convention here
The shape of the maze. Each character
represents a different type of object.
% - Wall
. - Food
o - Capsule
G - Ghost
P - Pacman
Other characters are ignored.
layout.py --------- Licensing Information: You are free to use or extend these projects for educational purposes provided that (1) you do not distribute or publish solutions, (2) you retain this notice, and (3) you provide clear attribution to UC Berkeley, including a link to http://ai.berkeley.edu. Attribution Information: The Pacman AI projects were developed at UC Berkeley. The core projects and autograders were primarily created by John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu). Student side autograding was added by Brad Miller, Nick Hay, and Pieter Abbeel (pabbeel@cs.berkeley.edu). self.initializeVisibilityMatrix() | 961 | en | 0.846475 |
# Connection libraries
import os
import shutil
import re
# Class create project
class Create:
def __init__(self, path):
self.path = path
# Create project
def createProject(self, name):
if not os.path.isdir(self.path + name):
shutil.copytree("launcher/shablon/", self.path + name)
else:
n, a = os.listdir(path=self.path), []
for s in n:
if s.find("new") != -1: a.append(s)
shutil.copytree("launcher/shablon/", self.path + name + str(len(a)))
# Delete project
def deleteProject(self, name):
shutil.rmtree(self.path+name) | create.py | 550 | Connection libraries Class create project Create project Delete project | 71 | en | 0.748998 |
import matplotlib.pyplot as plt, streamlit as st
from typing import Iterable, Union
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc, RocCurveDisplay
def train(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Train custom classifier model.
Parameters:
estimator: Unfitted estimator.
X: Input training data.
y: Labels for test data.
Returns:
Fitted estimator model.
"""
return estimator.fit(X=X, y=y)
def classify(estimator: object, X: Iterable[Union[int, float]]):
"""
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
Returns:
Predicted labels.
"""
return estimator.predict(X=X)
def regress(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Predict with custom regressor model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
"""
pass
def evaluate(estimator: object, X: Iterable[Union[int, float]], y: Iterable):
"""
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
"""
pred = estimator.predict(X=X)
# classification report
report = classification_report(y_true=y, y_pred=pred)
st.write('Classification Report')
st.write(report)
# ROC curve
fpr, tpr, thresholds = roc_curve(y, pred)
roc_auc = auc(fpr, tpr)
_, _, figure = RocCurveDisplay(
fpr=fpr,
tpr=tpr,
roc_auc=roc_auc,
estimator_name=type(estimator)
)
st.pyplot(fig=figure)
| ml.py | 1,912 | Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
Returns:
Predicted labels.
Predict with custom classifier model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
Predict with custom regressor model.
Parameters:
estimator: Fitted estimator.
X: Input test data.
y: Labels for test data.
Returns:
Predicted labels.
Train custom classifier model.
Parameters:
estimator: Unfitted estimator.
X: Input training data.
y: Labels for test data.
Returns:
Fitted estimator model.
classification report ROC curve | 684 | en | 0.343033 |
"""
CAR CONFIG
This file is read by your car application's manage.py script to change the car
performance.
EXMAPLE
-----------
import dk
cfg = dk.load_config(config_path='~/mycar/config.py')
print(cfg.CAMERA_RESOLUTION)
"""
import os
#PATHS
CAR_PATH = PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(CAR_PATH, 'data')
MODELS_PATH = os.path.join(CAR_PATH, 'models')
#VEHICLE
DRIVE_LOOP_HZ = 20
MAX_LOOPS = 100000
#CAMERA
CAMERA_TYPE = "PICAM" # (PICAM|WEBCAM|CVCAM|CSIC|V4L|D435|MOCK|IMAGE_LIST)
IMAGE_W = 160
IMAGE_H = 120
IMAGE_DEPTH = 3 # default RGB=3, make 1 for mono
CAMERA_FRAMERATE = DRIVE_LOOP_HZ
CAMERA_VFLIP = False
CAMERA_HFLIP = False
#9865, over rides only if needed, ie. TX2..
PCA9685_I2C_ADDR = 0x40
PCA9685_I2C_BUSNUM = None
#STEERING
STEERING_CHANNEL = 1
STEERING_LEFT_PWM = 460
STEERING_RIGHT_PWM = 290
#THROTTLE
THROTTLE_CHANNEL = 0
THROTTLE_FORWARD_PWM = 500
THROTTLE_STOPPED_PWM = 370
THROTTLE_REVERSE_PWM = 220
#TRAINING
DEFAULT_MODEL_TYPE = 'linear' #(linear|categorical|rnn|imu|behavior|3d|localizer|latent)
BATCH_SIZE = 128
TRAIN_TEST_SPLIT = 0.8
MAX_EPOCHS = 100
SHOW_PLOT = True
VERBOSE_TRAIN = True
USE_EARLY_STOP = True
EARLY_STOP_PATIENCE = 5
MIN_DELTA = .0005
PRINT_MODEL_SUMMARY = True #print layers and weights to stdout
OPTIMIZER = None #adam, sgd, rmsprop, etc.. None accepts default
LEARNING_RATE = 0.001 #only used when OPTIMIZER specified
LEARNING_RATE_DECAY = 0.0 #only used when OPTIMIZER specified
CACHE_IMAGES = True #keep images in memory. will speed succesive epochs, but crater if not enough mem.
PRUNE_CNN = False
PRUNE_PERCENT_TARGET = 75 # The desired percentage of pruning.
PRUNE_PERCENT_PER_ITERATION = 20 # Percenge of pruning that is perform per iteration.
PRUNE_VAL_LOSS_DEGRADATION_LIMIT = 0.2 # The max amout of validation loss that is permitted during pruning.
PRUNE_EVAL_PERCENT_OF_DATASET = .05 # percent of dataset used to perform evaluation of model.
#model transfer options
FREEZE_LAYERS = False
NUM_LAST_LAYERS_TO_TRAIN = 7
#For the categorical model, this limits the upper bound of the learned throttle
#it's very IMPORTANT that this value is matched from the training PC config.py and the robot.py
#and ideally wouldn't change once set.
MODEL_CATEGORICAL_MAX_THROTTLE_RANGE = 0.5
#RNN or 3D
SEQUENCE_LENGTH = 3
#SOMBRERO
HAVE_SOMBRERO = False
#RECORD OPTIONS
RECORD_DURING_AI = False
AUTO_CREATE_NEW_TUB = False #create a new tub (tub_YY_MM_DD) directory when recording or append records to data directory directly
#JOYSTICK
USE_JOYSTICK_AS_DEFAULT = False #when starting the manage.py, when True, will not require a --js option to use the joystick
JOYSTICK_MAX_THROTTLE = 0.5 #this scalar is multiplied with the -1 to 1 throttle value to limit the maximum throttle. This can help if you drop the controller or just don't need the full speed available.
JOYSTICK_STEERING_SCALE = 1.0 #some people want a steering that is less sensitve. This scalar is multiplied with the steering -1 to 1. It can be negative to reverse dir.
AUTO_RECORD_ON_THROTTLE = True #if true, we will record whenever throttle is not zero. if false, you must manually toggle recording with some other trigger. Usually circle button on joystick.
CONTROLLER_TYPE='ps3' #(ps3|ps4|xbox|nimbus|wiiu|F710|rc3|MM1|custom) custom will run the my_joystick.py controller written by the `donkey createjs` command
USE_NETWORKED_JS = False #should we listen for remote joystick control over the network?
NETWORK_JS_SERVER_IP = "192.168.0.1"#when listening for network joystick control, which ip is serving this information
JOYSTICK_DEADZONE = 0.0 # when non zero, this is the smallest throttle before recording triggered.
JOYSTICK_THROTTLE_DIR = -1.0 # use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward
USE_FPV = False # send camera data to FPV webserver
JOYSTICK_DEVICE_FILE = "/dev/input/js0" # this is the unix file use to access the joystick.
#WEB CONTROL
WEB_CONTROL_PORT = int(os.getenv("WEB_CONTROL_PORT", 8887)) # which port to listen on when making a web controller
WEB_INIT_MODE = "user" # which control mode to start in. one of user|local_angle|local. Setting local will start in ai mode.
#DonkeyGym
#Only on Ubuntu linux, you can use the simulator as a virtual donkey and
#issue the same python manage.py drive command as usual, but have them control a virtual car.
#This enables that, and sets the path to the simualator and the environment.
#You will want to download the simulator binary from: https://github.com/tawnkramer/donkey_gym/releases/download/v18.9/DonkeySimLinux.zip
#then extract that and modify DONKEY_SIM_PATH.
DONKEY_GYM = False
DONKEY_SIM_PATH = "path to sim" #"/home/tkramer/projects/sdsandbox/sdsim/build/DonkeySimLinux/donkey_sim.x86_64" when racing on virtual-race-league use "remote", or user "remote" when you want to start the sim manually first.
DONKEY_GYM_ENV_NAME = "donkey-mountain-track-v0" # ("donkey-generated-track-v0"|"donkey-generated-roads-v0"|"donkey-warehouse-v0"|"donkey-avc-sparkfun-v0")
GYM_CONF = { "body_style" : "donkey", "body_rgb" : (128, 128, 128), "car_name" : "car", "font_size" : 100} # body style(donkey|bare|car01) body rgb 0-255
GYM_CONF["racer_name"] = "Your Name"
GYM_CONF["country"] = "Place"
GYM_CONF["bio"] = "I race robots."
SIM_HOST = "127.0.0.1" # when racing on virtual-race-league use host "trainmydonkey.com"
SIM_ARTIFICIAL_LATENCY = 0 # this is the millisecond latency in controls. Can use useful in emulating the delay when useing a remote server. values of 100 to 400 probably reasonable.
| donkeycar/templates/cfg_basic.py | 5,773 | CAR CONFIG
This file is read by your car application's manage.py script to change the car
performance.
EXMAPLE
-----------
import dk
cfg = dk.load_config(config_path='~/mycar/config.py')
print(cfg.CAMERA_RESOLUTION)
PATHSVEHICLECAMERA (PICAM|WEBCAM|CVCAM|CSIC|V4L|D435|MOCK|IMAGE_LIST) default RGB=3, make 1 for mono9865, over rides only if needed, ie. TX2..STEERINGTHROTTLETRAINING(linear|categorical|rnn|imu|behavior|3d|localizer|latent)print layers and weights to stdoutadam, sgd, rmsprop, etc.. None accepts defaultonly used when OPTIMIZER specifiedonly used when OPTIMIZER specifiedkeep images in memory. will speed succesive epochs, but crater if not enough mem. The desired percentage of pruning. Percenge of pruning that is perform per iteration. The max amout of validation loss that is permitted during pruning. percent of dataset used to perform evaluation of model.model transfer optionsFor the categorical model, this limits the upper bound of the learned throttleit's very IMPORTANT that this value is matched from the training PC config.py and the robot.pyand ideally wouldn't change once set.RNN or 3DSOMBRERORECORD OPTIONScreate a new tub (tub_YY_MM_DD) directory when recording or append records to data directory directlyJOYSTICKwhen starting the manage.py, when True, will not require a --js option to use the joystickthis scalar is multiplied with the -1 to 1 throttle value to limit the maximum throttle. This can help if you drop the controller or just don't need the full speed available.some people want a steering that is less sensitve. This scalar is multiplied with the steering -1 to 1. It can be negative to reverse dir.if true, we will record whenever throttle is not zero. if false, you must manually toggle recording with some other trigger. Usually circle button on joystick.(ps3|ps4|xbox|nimbus|wiiu|F710|rc3|MM1|custom) custom will run the my_joystick.py controller written by the `donkey createjs` commandshould we listen for remote joystick control over the network?when listening for network joystick control, which ip is serving this information when non zero, this is the smallest throttle before recording triggered. use -1.0 to flip forward/backward, use 1.0 to use joystick's natural forward/backward send camera data to FPV webserver this is the unix file use to access the joystick.WEB CONTROL which port to listen on when making a web controller which control mode to start in. one of user|local_angle|local. Setting local will start in ai mode.DonkeyGymOnly on Ubuntu linux, you can use the simulator as a virtual donkey andissue the same python manage.py drive command as usual, but have them control a virtual car.This enables that, and sets the path to the simualator and the environment.You will want to download the simulator binary from: https://github.com/tawnkramer/donkey_gym/releases/download/v18.9/DonkeySimLinux.zipthen extract that and modify DONKEY_SIM_PATH."/home/tkramer/projects/sdsandbox/sdsim/build/DonkeySimLinux/donkey_sim.x86_64" when racing on virtual-race-league use "remote", or user "remote" when you want to start the sim manually first. ("donkey-generated-track-v0"|"donkey-generated-roads-v0"|"donkey-warehouse-v0"|"donkey-avc-sparkfun-v0") body style(donkey|bare|car01) body rgb 0-255 when racing on virtual-race-league use host "trainmydonkey.com" this is the millisecond latency in controls. Can use useful in emulating the delay when useing a remote server. values of 100 to 400 probably reasonable. | 3,485 | en | 0.779375 |
# -*- coding: utf-8 -*-
"""
# Author : Camey
# DateTime : 2022/3/12 8:49 下午
# Description :
""" | my_work/config/__init__.py | 108 | # Author : Camey
# DateTime : 2022/3/12 8:49 下午
# Description :
-*- coding: utf-8 -*- | 96 | en | 0.329278 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A module that provides algorithms for performing linear fit between
sets of 2D points.
:Authors: Mihai Cara, Warren Hack
:License: :doc:`../LICENSE`
"""
import logging
import numbers
import numpy as np
from .linalg import inv
from . import __version__ # noqa: F401
__author__ = 'Mihai Cara, Warren Hack'
__all__ = ['iter_linear_fit', 'build_fit_matrix']
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
class SingularMatrixError(Exception):
""" An error class used to report when a singular matrix is encountered."""
pass
class NotEnoughPointsError(Exception):
"""
An error class used to report when there are not enough points to
find parameters of a linear transformation.
"""
pass
def iter_linear_fit(xy, uv, wxy=None, wuv=None,
fitgeom='general', center=None,
nclip=3, sigma=(3.0, 'rmse'), clip_accum=False):
r"""
Compute linear transformation parameters that "best" (in the sense of
minimizing residuals) transform ``uv`` source position to ``xy``
sources iteratively using sigma-clipping.
More precisely, this functions attempts to find a ``2x2`` matrix ``F`` and
a shift vector ``s`` that minimize the residuals between the *transformed*
reference source coordinates ``uv``
.. math::
\mathbf{xy}'_k = \mathbf{F}\cdot(\mathbf{uv}_k-\mathbf{c})+\
\mathbf{s} + \mathbf{c}
:label: ilf1
and the "observed" source positions ``xy``:
.. math::
\epsilon^2 = \Sigma_k w_k \|\mathbf{xy}_k-\mathbf{xy}'_k\|^2.
:label: ilf2
In the above equations, :math:`\mathbf{F}` is a ``2x2`` matrix while
:math:`\mathbf{xy}_k` and :math:`\mathbf{uv}_k` are the position
coordinates of the ``k``-th source (row in input ``xy`` and ``uv`` arrays).
One of the two catalogs (``xy`` or ``uv``) contains what we refer to as
"image" source positions and the other one as "reference" source positions.
The meaning assigned to ``xy`` and ``uv`` parameters are up to the
caller of this function.
Parameters
----------
xy: numpy.ndarray
A ``(N, 2)``-shaped array of source positions (one 2-coordinate
position per line).
uv: numpy.ndarray
A ``(N, 2)``-shaped array of source positions (one 2-coordinate
position per line). This array *must have* the same length (shape)
as the ``xy`` array.
wxy: numpy.ndarray, None, optional
A 1-dimensional array of weights of the same length (``N``)
as ``xy`` array indicating how much a given coordinate should be
weighted in the fit. If not provided or set to `None`, all positions
will be contribute equally to the fit if ``wuv`` is also set to `None`.
See ``Notes`` section for more details.
wuv: numpy.ndarray, None, optional
A 1-dimensional array of weights of the same length (``N``)
as ``xy`` array indicating how much a given coordinate should be
weighted in the fit. If not provided or set to `None`, all positions
will be contribute equally to the fit if ``wxy`` is also set to `None`.
See ``Notes`` section for more details.
fitgeom: {'shift', 'rscale', 'general'}, optional
The fitting geometry to be used in fitting the matched object lists.
This parameter is used in fitting the shifts (offsets), rotations
and/or scale changes from the matched object lists. The 'general'
fit geometry allows for independent scale and rotation for each axis.
center: tuple, list, numpy.ndarray, None, optional
A list-like container with two ``X``- and ``Y``-positions of the center
(origin) of rotations in the ``uv`` and ``xy`` coordinate frames.
If not provided, ``center`` is estimated as a (weighted) mean position
in the ``uv`` frame.
nclip: int, None, optional
Number (a non-negative integer) of clipping iterations in fit.
Clipping will be turned off if ``nclip`` is either `None` or 0.
sigma: float, tuple of the form (float, str), optional
When a tuple is provided, first value (a positive number)
indicates the number of "fit error estimates" to use for clipping.
The second value (a string) indicates the statistic to be
used for "fit error estimate". Currently the following values are
supported: ``'rmse'``, ``'mae'``, and ``'std'``
- see ``Notes`` section for more details.
When ``sigma`` is a single number, it must be a positive number and
the default error estimate ``'rmse'`` is assumed.
This parameter is ignored when ``nclip`` is either `None` or 0.
clip_accum: bool, optional
Indicates whether or not to reset the list of "bad" (clipped out)
sources after each clipping iteration. When set to `True` the list
only grows with each iteration as "bad" positions never re-enter the
pool of available position for the fit. By default the list of
"bad" source positions is purged at each iteration.
Returns
-------
fit: dict
- ``'shift'``: A ``numpy.ndarray`` with two components of the
computed shift.
- ``'shift_ld'``: A ``numpy.ndarray`` with two components of the
computed shift of type ``numpy.longdouble``.
- ``'matrix'``: A ``2x2`` ``numpy.ndarray`` with the computed
generalized rotation matrix.
- ``'matrix_ld'``: A ``2x2`` ``numpy.ndarray`` with the computed
generalized rotation matrix of type ``numpy.longdouble``.
- ``'proper_rot'``: Rotation angle (degree) as if the rotation is
proper.
- ``'rot'``: A tuple of ``(rotx, roty)`` - the rotation angles with
regard to the ``X`` and ``Y`` axes.
- ``'<rot>'``: *Arithmetic mean* of the angles of rotation around
``X`` and ``Y`` axes.
- ``'scale'``: A tuple of ``(sx, sy)`` - scale change in the direction
of the ``X`` and ``Y`` axes.
- ``'<scale>'``: *Geometric mean* of scales ``sx`` and ``sy``.
- ``'skew'``: Computed skew.
- ``'proper'``: a boolean indicating whether the rotation is proper.
- ``'fitgeom'``: Fit geometry (allowed transformations) used for
fitting data (to minimize residuals). This is copy of the input
argument ``fitgeom``.
- ``'center'``: Center of rotation
- ``'center_ld'``: Center of rotation as a ``numpy.longdouble``.
- ``'fitmask'``: A boolean array indicating which source positions
where used for fitting (`True`) and which were clipped out
(`False`). **NOTE** For weighted fits, positions with zero
weights are automatically excluded from the fits.
- ``'eff_nclip'``: Effective number of clipping iterations
- ``'rmse'``: Root-Mean-Square Error
- ``'mae'``: Mean Absolute Error
- ``'std'``: Standard Deviation of the residuals
- ``'resids'``: An array of residuals of the fit.
**NOTE:** Only the residuals for the "valid" points are reported
here. Therefore the length of this array may be smaller than the
length of input arrays of positions.
Notes
-----
**Weights**
Weights can be provided for both "image" source positions and "reference"
source positions. When no weights are given, all positions are weighted
equally. When only one set of positions have weights (i.e., either ``wxy``
or ``wuv`` is not `None`) then weights in :eq:`ilf2` are set to be equal
to the provided set of weights. When weights for *both* "image" source
positions and "reference" source positions are provided, then the
combined weight that is used in :eq:`ilf2` is computed as:
.. math::
1/w = 1/w_{xy} + 1/w_{uv}.
**Statistics for clipping**
Several statistics are available for clipping iterations and all of them
are reported in the returned ``fit`` dictionary regardless of the
setting in ``sigma``:
.. math::
\mathrm{RMSE} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k\|^2}
.. math::
\mathrm{MAE} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k\|}
.. math::
\mathrm{STD} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k - \
\mathbf{\overline{r}}\|^2}/(1-V_2)
where :math:`\mathbf{r}_k=\mathbf{xy}_k-\mathbf{xy}'_k`,
:math:`\Sigma_k w_k = 1`, and :math:`V_2=\Sigma_k w_k^2`.
"""
if fitgeom == 'general':
linear_fit = fit_general
elif fitgeom == 'rscale':
linear_fit = fit_rscale
elif fitgeom == 'shift':
linear_fit = fit_shifts
else:
raise ValueError("Unsupported 'fitgeom' value: '{}'".format(fitgeom))
minobj_per_fitgeom = {'shift': 1, 'rscale': 2, 'general': 3}
minobj = minobj_per_fitgeom[fitgeom]
xy = np.array(xy, dtype=np.longdouble)
uv = np.array(uv, dtype=np.longdouble)
if len(xy.shape) != 2 or xy.shape[1] != 2 or uv.shape != xy.shape:
raise ValueError("Input coordinate arrays 'xy' and 'uv' must be of "
"shape (N, 2) where N is the number of coordinate "
"points.")
wmask = np.ones(len(xy), dtype=np.bool_)
if wxy is not None:
wxy = np.asarray(wxy)
if len(wxy.shape) != 1 or wxy.shape[0] != xy.shape[0]:
raise ValueError("Weights 'wxy' must be a 1-dimensional vector "
"of lengths equal to the number of input points.")
wmask *= wxy > 0.0
if wuv is not None:
wuv = np.asarray(wuv)
if len(wuv.shape) != 1 or wuv.shape[0] != xy.shape[0]:
raise ValueError("Weights 'wuv' must be a 1-dimensional vector "
"of lengths equal to the number of input points.")
wmask *= wuv > 0.0
mask = wmask
if sigma is None and nclip is not None and nclip > 0:
raise ValueError("Argument 'sigma' cannot be None when 'nclip' is "
"a positive number.")
if isinstance(sigma, numbers.Number):
sigstat = 'rmse' # default value
nsigma = float(sigma)
elif sigma is not None:
nsigma = float(sigma[0])
sigstat = sigma[1]
if sigstat not in ['rmse', 'mae', 'std']:
raise ValueError("Unsupported sigma statistics value.")
if sigma is not None and nsigma <= 0.0:
raise ValueError("The value of sigma for clipping iterations must be "
"positive.")
if nclip is None:
nclip = 0
else:
if nclip < 0:
raise ValueError("Argument 'nclip' must be non-negative.")
nclip = int(nclip)
if np.count_nonzero(mask) == minobj:
log.warning("The number of sources for the fit is smaller than the "
"minimum number of sources necessary for the requested "
"'fitgeom'.")
log.warning("Resetting number of clipping iterations to 0.")
nclip = 0
if center is None:
center_ld = uv[mask].mean(axis=0, dtype=np.longdouble)
center = center_ld.astype(np.double)
else:
center_ld = np.longdouble(center)
xy[mask] -= center_ld
uv[mask] -= center_ld
log.info("Performing '{:s}' fit".format(fitgeom))
# initial fit:
wmxy = None if wxy is None else wxy[mask]
wmuv = None if wuv is None else wuv[mask]
fit = linear_fit(xy[mask], uv[mask], wmxy, wmuv)
# clipping iterations:
effective_nclip = 0
for n in range(nclip):
resids = fit['resids']
# redefine what pixels will be included in next iteration
cutoff = nsigma * fit[sigstat]
nonclipped = np.linalg.norm(resids, axis=1) < cutoff
if np.count_nonzero(nonclipped) < minobj or nonclipped.all():
break
effective_nclip += 1
prev_mask = mask
if not clip_accum:
mask = np.array(wmask)
mask[prev_mask] *= nonclipped
wmxy = None if wxy is None else wxy[mask]
wmuv = None if wuv is None else wuv[mask]
fit = linear_fit(xy[mask], uv[mask], wmxy, wmuv)
fit['center'] = center
fit['center_ld'] = center_ld
fit['fitmask'] = mask
fit['eff_nclip'] = effective_nclip
return fit
def _compute_stat(fit, residuals, weights):
if weights is None:
fit['rmse'] = float(np.sqrt(np.mean(2 * residuals**2)))
fit['mae'] = float(np.mean(np.linalg.norm(residuals, axis=1)))
fit['std'] = float(np.linalg.norm(residuals.std(axis=0)))
else:
# assume all weights > 0 (this should be insured by the caller => no
# need to repeat the check here)
npts = len(weights)
wt = np.sum(weights)
if npts == 0 or wt == 0.0:
fit['rmse'] = float('nan')
fit['mae'] = float('nan')
fit['std'] = float('nan')
return
w = weights / wt
fit['rmse'] = float(np.sqrt(np.sum(np.dot(w, residuals**2))))
fit['mae'] = float(np.dot(w, np.linalg.norm(residuals, axis=1)))
if npts == 1:
fit['std'] = 0.0
else:
# see:
# https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights_2
wmean = np.dot(w, residuals)
fit['std'] = float(
np.sqrt(np.sum(np.dot(w, (residuals - wmean)**2) /
(1.0 - np.sum(w**2))))
)
def fit_shifts(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement
transformation only between input lists of positions ``xy`` and ``uv``.
When weights are provided, a weighted fit is performed. Parameter
descriptions and return values are identical to those in `iter_linear_fit`,
except returned ``fit`` dictionary does not contain the following
keys irrelevant to this function: ``'center'``, ``'fitmask'``, and
``'eff_nclip'``.
"""
if xy.size == 0:
raise NotEnoughPointsError(
"At least one point is required to find shifts."
)
diff_pts = np.subtract(xy, uv, dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
meanx = diff_pts[:, 0].mean(dtype=np.longdouble)
meany = diff_pts[:, 1].mean(dtype=np.longdouble)
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if not np.sum(w > 0, dtype=np.int):
raise ValueError("Not enough valid data for 'shift' fit: "
"too many weights are zero!")
w /= np.sum(w, dtype=np.longdouble)
meanx = np.dot(w, diff_pts[:, 0])
meany = np.dot(w, diff_pts[:, 1])
p = np.array([1.0, 0.0, meanx], dtype=np.longdouble)
q = np.array([0.0, 1.0, meany], dtype=np.longdouble)
fit = _build_fit(p, q, 'shift')
resids = diff_pts - fit['shift']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
# Implementation of geomap 'rscale' fitting based on 'lib/geofit.x'
# by Warren Hack. Support for axis flips added by Mihai Cara.
def fit_rscale(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement,
rotation and scale transformations between input lists of positions
``xy`` and ``uv``. When weights are provided, a weighted fit is performed.
Parameter descriptions and return values are identical to those
in `iter_linear_fit`, except returned ``fit`` dictionary does not contain
the following keys irrelevant to this function: ``'center'``,
``'fitmask'``, and ``'eff_nclip'``.
"""
if len(xy) < 2:
raise NotEnoughPointsError(
"At least two points are required to find shifts, rotation, and "
"scale."
)
x = np.array(xy[:, 0], dtype=np.longdouble)
y = np.array(xy[:, 1], dtype=np.longdouble)
u = np.array(uv[:, 0], dtype=np.longdouble)
v = np.array(uv[:, 1], dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
xm = np.mean(x)
ym = np.mean(y)
um = np.mean(u)
vm = np.mean(v)
x -= xm
y -= ym
u -= um
v -= vm
su2 = np.dot(u, u)
sv2 = np.dot(v, v)
sxv = np.dot(x, v)
syu = np.dot(y, u)
sxu = np.dot(x, u)
syv = np.dot(y, v)
su2v2 = su2 + sv2
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if np.sum(w > 0) < 2:
raise ValueError("Not enough valid data for 'rscale' fit: "
"too many weights are zero!")
w /= np.sum(w, dtype=np.longdouble)
xm = np.dot(w, x)
ym = np.dot(w, y)
um = np.dot(w, u)
vm = np.dot(w, v)
x -= xm
y -= ym
u -= um
v -= vm
su2 = np.dot(w, u**2)
sv2 = np.dot(w, v**2)
sxv = np.dot(w, x * v)
syu = np.dot(w, y * u)
sxu = np.dot(w, x * u)
syv = np.dot(w, y * v)
su2v2 = su2 + sv2
det = sxu * syv - sxv * syu
if det < 0:
rot_num = sxv + syu
rot_denom = sxu - syv
else:
rot_num = sxv - syu
rot_denom = sxu + syv
if rot_num == rot_denom:
theta = 0.0
else:
theta = np.rad2deg(np.arctan2(rot_num, rot_denom))
if theta < 0:
theta += 360.0
ctheta = np.cos(np.deg2rad(theta))
stheta = np.sin(np.deg2rad(theta))
s_num = rot_denom * ctheta + rot_num * stheta
if su2v2 > 0.0:
mag = s_num / su2v2
else:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
if det < 0:
# "flip" y-axis (reflection about x-axis *after* rotation)
# NOTE: keep in mind that 'matrix' is the transposed rotation matrix.
sthetax = -mag * stheta
cthetay = -mag * ctheta
else:
sthetax = mag * stheta
cthetay = mag * ctheta
cthetax = mag * ctheta
sthetay = mag * stheta
sdet = np.sign(det)
xshift = xm - um * cthetax - sdet * vm * sthetax
yshift = ym + sdet * um * sthetay - vm * cthetay
p = np.array([cthetax, sthetay, xshift], dtype=np.longdouble)
q = np.array([-sthetax, cthetay, yshift], dtype=np.longdouble)
# Return the shift, rotation, and scale changes
fit = _build_fit(p, q, fitgeom='rscale')
resids = xy - np.dot(uv, fit['matrix_ld'].T) - fit['shift_ld']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
def fit_general(xy, uv, wxy=None, wuv=None):
""" Fits (non-iteratively and without sigma-clipping) a displacement,
rotation, scale, and skew transformations (i.e., the full ``2x2``
transformation matrix) between input lists of positions
``xy`` and ``uv``. When weights are provided, a weighted fit is performed.
Parameter descriptions and return values are identical to those
in `iter_linear_fit`, except returned ``fit`` dictionary does not contain
the following keys irrelevant to this function: ``'center'``,
``'fitmask'``, and ``'eff_nclip'``.
"""
if len(xy) < 3:
raise NotEnoughPointsError(
"At least three points are required to find 6-parameter linear "
"affine transformations."
)
x = np.array(xy[:, 0], dtype=np.longdouble)
y = np.array(xy[:, 1], dtype=np.longdouble)
u = np.array(uv[:, 0], dtype=np.longdouble)
v = np.array(uv[:, 1], dtype=np.longdouble)
if wxy is None and wuv is None:
# no weighting
w = None
# Set up products used for computing the fit
sw = float(x.size)
sx = x.sum()
sy = y.sum()
su = u.sum()
sv = v.sum()
sxu = np.dot(x, u)
syu = np.dot(y, u)
sxv = np.dot(x, v)
syv = np.dot(y, v)
suu = np.dot(u, u)
svv = np.dot(v, v)
suv = np.dot(u, v)
else:
if wxy is None:
w = np.array(wuv, dtype=np.longdouble)
elif wuv is None:
w = np.array(wxy, dtype=np.longdouble)
else:
# 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv
wuv = np.array(wuv, dtype=np.longdouble)
wxy = np.array(wxy, dtype=np.longdouble)
m = np.logical_and(wuv > 0, wxy > 0)
w = np.zeros_like(wuv)
w[m] = wxy[m] * wuv[m] / (wxy[m] + wuv[m])
if np.any(w < 0.0):
raise ValueError("Invalid weights: weights must be non-negative.")
if np.sum(w > 0) < 3:
raise ValueError("Not enough valid data for 'general' fit: "
"too many weights are zero!")
# Set up products used for computing the fit
sw = np.sum(w, dtype=np.longdouble)
sx = np.dot(w, x)
sy = np.dot(w, y)
su = np.dot(w, u)
sv = np.dot(w, v)
sxu = np.dot(w, x * u)
syu = np.dot(w, y * u)
sxv = np.dot(w, x * v)
syv = np.dot(w, y * v)
suu = np.dot(w, u * u)
svv = np.dot(w, v * v)
suv = np.dot(w, u * v)
m = np.array([[su, sv, sw], [suu, suv, su], [suv, svv, sv]],
dtype=np.longdouble)
a = np.array([sx, sxu, sxv], dtype=np.longdouble)
b = np.array([sy, syu, syv], dtype=np.longdouble)
try:
inv_m = inv(m)
except np.linalg.LinAlgError:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
p = np.dot(inv_m, a)
q = np.dot(inv_m, b)
if not (np.all(np.isfinite(p)) and np.all(np.isfinite(q))):
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
) # pragma: no cover
# Return the shift, rotation, and scale changes
fit = _build_fit(p, q, 'general')
resids = xy - np.dot(uv, fit['matrix_ld'].T) - fit['shift_ld']
fit['resids'] = resids.astype(np.double)
_compute_stat(fit, residuals=resids, weights=w)
return fit
def _build_fit(p, q, fitgeom):
# Build fit matrix:
fit_matrix = np.vstack((p[:2], q[:2]))
# determinant of the transformation
det = p[0] * q[1] - p[1] * q[0]
sdet = np.sign(det)
proper = sdet >= 0
# Create a working copy (no reflections) for computing transformation
# parameters (scale, rotation angle, skew):
wfit = fit_matrix.copy()
# Skew is zero for all fitgeom except 'general':
skew = 0.0
if fitgeom == 'shift':
fit = {
'shift': np.array([p[2], q[2]], dtype=np.double),
'shift_ld': np.array([p[2], q[2]], dtype=np.longdouble),
'matrix': np.array(fit_matrix, dtype=np.double),
'matrix_ld': np.array(fit_matrix, dtype=np.longdouble),
'proper_rot': 0.0,
'rot': (0.0, 0.0),
'<rot>': 0.0,
'scale': (1.0, 1.0),
'<scale>': 1.0,
'skew': 0.0,
'proper': proper,
'fitgeom': 'shift'
}
return fit
# Compute average scale:
s = np.sqrt(np.abs(det))
# Compute scales for each axis:
if fitgeom == 'general':
sx, sy = np.sqrt(p[:2]**2 + q[:2]**2)
else:
sx = s
sy = s
# Remove scale from the transformation matrix:
wfit[:, 0] /= sx
wfit[:, 1] /= sy
# Compute rotation angle as if we have a proper rotation.
# This will also act as *some sort* of "average rotation" even for
# transformations with different rot_x and rot_y:
prop_rot = np.rad2deg(
np.arctan2(wfit[0, 1] - sdet * wfit[1, 0],
wfit[0, 0] + sdet * wfit[1, 1])
)
if proper and fitgeom == 'rscale':
rotx = prop_rot
roty = prop_rot
rot = prop_rot
else:
rotx = np.rad2deg(np.arctan2(-wfit[1, 0], wfit[0, 0]))
roty = np.rad2deg(np.arctan2(wfit[0, 1], wfit[1, 1]))
rot = 0.5 * (rotx + roty)
skew = np.mod(roty - rotx - 180.0, 360.0) - 180.0
fit = {
'shift': np.array([p[2], q[2]], dtype=np.double),
'shift_ld': np.array([p[2], q[2]], dtype=np.longdouble),
'matrix': np.array(fit_matrix, dtype=np.double),
'matrix_ld': np.array(fit_matrix, dtype=np.longdouble),
'proper_rot': float(prop_rot),
'rot': (float(rotx), float(roty)),
'<rot>': float(rot),
'scale': (float(sx), float(sy)),
'<scale>': float(s),
'skew': float(skew),
'proper': proper,
'fitgeom': fitgeom
}
return fit
def build_fit_matrix(rot, scale=1):
r"""
Create an affine transformation matrix (2x2) from the provided rotation
angle(s) and scale(s):
.. math::
M = \begin{bmatrix}
s_x \cos(\theta_x) & s_y \sin(\theta_y) \\
-s_x \sin(\theta_x) & s_y \cos(\theta_y)
\end{bmatrix}
Parameters
----------
rot: tuple, float, optional
Rotation angle in degrees. Two values (one for each axis) can be
provided as a tuple.
scale: tuple, float, optional
Scale of the liniar transformation. Two values (one for each axis)
can be provided as a tuple.
Returns
-------
matrix: numpy.ndarray
A 2x2 `numpy.ndarray` containing coefficients of a liniear
transformation.
"""
if hasattr(rot, '__iter__'):
rx, ry = map(np.deg2rad, rot)
else:
rx = ry = np.deg2rad(float(rot))
if hasattr(scale, '__iter__'):
sx, sy = scale
else:
sx = sy = float(scale)
matrix = np.array([[sx * np.cos(rx), sy * np.sin(ry)],
[-sx * np.sin(rx), sy * np.cos(ry)]])
return matrix
| tweakwcs/linearfit.py | 26,996 | An error class used to report when there are not enough points to
find parameters of a linear transformation.
An error class used to report when a singular matrix is encountered.
Create an affine transformation matrix (2x2) from the provided rotation
angle(s) and scale(s):
.. math::
M = \begin{bmatrix}
s_x \cos(\theta_x) & s_y \sin(\theta_y) \\
-s_x \sin(\theta_x) & s_y \cos(\theta_y)
\end{bmatrix}
Parameters
----------
rot: tuple, float, optional
Rotation angle in degrees. Two values (one for each axis) can be
provided as a tuple.
scale: tuple, float, optional
Scale of the liniar transformation. Two values (one for each axis)
can be provided as a tuple.
Returns
-------
matrix: numpy.ndarray
A 2x2 `numpy.ndarray` containing coefficients of a liniear
transformation.
Fits (non-iteratively and without sigma-clipping) a displacement,
rotation, scale, and skew transformations (i.e., the full ``2x2``
transformation matrix) between input lists of positions
``xy`` and ``uv``. When weights are provided, a weighted fit is performed.
Parameter descriptions and return values are identical to those
in `iter_linear_fit`, except returned ``fit`` dictionary does not contain
the following keys irrelevant to this function: ``'center'``,
``'fitmask'``, and ``'eff_nclip'``.
Fits (non-iteratively and without sigma-clipping) a displacement,
rotation and scale transformations between input lists of positions
``xy`` and ``uv``. When weights are provided, a weighted fit is performed.
Parameter descriptions and return values are identical to those
in `iter_linear_fit`, except returned ``fit`` dictionary does not contain
the following keys irrelevant to this function: ``'center'``,
``'fitmask'``, and ``'eff_nclip'``.
Fits (non-iteratively and without sigma-clipping) a displacement
transformation only between input lists of positions ``xy`` and ``uv``.
When weights are provided, a weighted fit is performed. Parameter
descriptions and return values are identical to those in `iter_linear_fit`,
except returned ``fit`` dictionary does not contain the following
keys irrelevant to this function: ``'center'``, ``'fitmask'``, and
``'eff_nclip'``.
Compute linear transformation parameters that "best" (in the sense of
minimizing residuals) transform ``uv`` source position to ``xy``
sources iteratively using sigma-clipping.
More precisely, this functions attempts to find a ``2x2`` matrix ``F`` and
a shift vector ``s`` that minimize the residuals between the *transformed*
reference source coordinates ``uv``
.. math::
\mathbf{xy}'_k = \mathbf{F}\cdot(\mathbf{uv}_k-\mathbf{c})+\
\mathbf{s} + \mathbf{c}
:label: ilf1
and the "observed" source positions ``xy``:
.. math::
\epsilon^2 = \Sigma_k w_k \|\mathbf{xy}_k-\mathbf{xy}'_k\|^2.
:label: ilf2
In the above equations, :math:`\mathbf{F}` is a ``2x2`` matrix while
:math:`\mathbf{xy}_k` and :math:`\mathbf{uv}_k` are the position
coordinates of the ``k``-th source (row in input ``xy`` and ``uv`` arrays).
One of the two catalogs (``xy`` or ``uv``) contains what we refer to as
"image" source positions and the other one as "reference" source positions.
The meaning assigned to ``xy`` and ``uv`` parameters are up to the
caller of this function.
Parameters
----------
xy: numpy.ndarray
A ``(N, 2)``-shaped array of source positions (one 2-coordinate
position per line).
uv: numpy.ndarray
A ``(N, 2)``-shaped array of source positions (one 2-coordinate
position per line). This array *must have* the same length (shape)
as the ``xy`` array.
wxy: numpy.ndarray, None, optional
A 1-dimensional array of weights of the same length (``N``)
as ``xy`` array indicating how much a given coordinate should be
weighted in the fit. If not provided or set to `None`, all positions
will be contribute equally to the fit if ``wuv`` is also set to `None`.
See ``Notes`` section for more details.
wuv: numpy.ndarray, None, optional
A 1-dimensional array of weights of the same length (``N``)
as ``xy`` array indicating how much a given coordinate should be
weighted in the fit. If not provided or set to `None`, all positions
will be contribute equally to the fit if ``wxy`` is also set to `None`.
See ``Notes`` section for more details.
fitgeom: {'shift', 'rscale', 'general'}, optional
The fitting geometry to be used in fitting the matched object lists.
This parameter is used in fitting the shifts (offsets), rotations
and/or scale changes from the matched object lists. The 'general'
fit geometry allows for independent scale and rotation for each axis.
center: tuple, list, numpy.ndarray, None, optional
A list-like container with two ``X``- and ``Y``-positions of the center
(origin) of rotations in the ``uv`` and ``xy`` coordinate frames.
If not provided, ``center`` is estimated as a (weighted) mean position
in the ``uv`` frame.
nclip: int, None, optional
Number (a non-negative integer) of clipping iterations in fit.
Clipping will be turned off if ``nclip`` is either `None` or 0.
sigma: float, tuple of the form (float, str), optional
When a tuple is provided, first value (a positive number)
indicates the number of "fit error estimates" to use for clipping.
The second value (a string) indicates the statistic to be
used for "fit error estimate". Currently the following values are
supported: ``'rmse'``, ``'mae'``, and ``'std'``
- see ``Notes`` section for more details.
When ``sigma`` is a single number, it must be a positive number and
the default error estimate ``'rmse'`` is assumed.
This parameter is ignored when ``nclip`` is either `None` or 0.
clip_accum: bool, optional
Indicates whether or not to reset the list of "bad" (clipped out)
sources after each clipping iteration. When set to `True` the list
only grows with each iteration as "bad" positions never re-enter the
pool of available position for the fit. By default the list of
"bad" source positions is purged at each iteration.
Returns
-------
fit: dict
- ``'shift'``: A ``numpy.ndarray`` with two components of the
computed shift.
- ``'shift_ld'``: A ``numpy.ndarray`` with two components of the
computed shift of type ``numpy.longdouble``.
- ``'matrix'``: A ``2x2`` ``numpy.ndarray`` with the computed
generalized rotation matrix.
- ``'matrix_ld'``: A ``2x2`` ``numpy.ndarray`` with the computed
generalized rotation matrix of type ``numpy.longdouble``.
- ``'proper_rot'``: Rotation angle (degree) as if the rotation is
proper.
- ``'rot'``: A tuple of ``(rotx, roty)`` - the rotation angles with
regard to the ``X`` and ``Y`` axes.
- ``'<rot>'``: *Arithmetic mean* of the angles of rotation around
``X`` and ``Y`` axes.
- ``'scale'``: A tuple of ``(sx, sy)`` - scale change in the direction
of the ``X`` and ``Y`` axes.
- ``'<scale>'``: *Geometric mean* of scales ``sx`` and ``sy``.
- ``'skew'``: Computed skew.
- ``'proper'``: a boolean indicating whether the rotation is proper.
- ``'fitgeom'``: Fit geometry (allowed transformations) used for
fitting data (to minimize residuals). This is copy of the input
argument ``fitgeom``.
- ``'center'``: Center of rotation
- ``'center_ld'``: Center of rotation as a ``numpy.longdouble``.
- ``'fitmask'``: A boolean array indicating which source positions
where used for fitting (`True`) and which were clipped out
(`False`). **NOTE** For weighted fits, positions with zero
weights are automatically excluded from the fits.
- ``'eff_nclip'``: Effective number of clipping iterations
- ``'rmse'``: Root-Mean-Square Error
- ``'mae'``: Mean Absolute Error
- ``'std'``: Standard Deviation of the residuals
- ``'resids'``: An array of residuals of the fit.
**NOTE:** Only the residuals for the "valid" points are reported
here. Therefore the length of this array may be smaller than the
length of input arrays of positions.
Notes
-----
**Weights**
Weights can be provided for both "image" source positions and "reference"
source positions. When no weights are given, all positions are weighted
equally. When only one set of positions have weights (i.e., either ``wxy``
or ``wuv`` is not `None`) then weights in :eq:`ilf2` are set to be equal
to the provided set of weights. When weights for *both* "image" source
positions and "reference" source positions are provided, then the
combined weight that is used in :eq:`ilf2` is computed as:
.. math::
1/w = 1/w_{xy} + 1/w_{uv}.
**Statistics for clipping**
Several statistics are available for clipping iterations and all of them
are reported in the returned ``fit`` dictionary regardless of the
setting in ``sigma``:
.. math::
\mathrm{RMSE} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k\|^2}
.. math::
\mathrm{MAE} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k\|}
.. math::
\mathrm{STD} = \sqrt{\Sigma_k w_k \|\mathbf{r}_k - \
\mathbf{\overline{r}}\|^2}/(1-V_2)
where :math:`\mathbf{r}_k=\mathbf{xy}_k-\mathbf{xy}'_k`,
:math:`\Sigma_k w_k = 1`, and :math:`V_2=\Sigma_k w_k^2`.
A module that provides algorithms for performing linear fit between
sets of 2D points.
:Authors: Mihai Cara, Warren Hack
:License: :doc:`../LICENSE`
Licensed under a 3-clause BSD style license - see LICENSE.rst noqa: F401 default value initial fit: clipping iterations: redefine what pixels will be included in next iteration assume all weights > 0 (this should be insured by the caller => no need to repeat the check here) see: https://en.wikipedia.org/wiki/Weighted_arithmetic_meanReliability_weights_2 no weighting 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv Implementation of geomap 'rscale' fitting based on 'lib/geofit.x' by Warren Hack. Support for axis flips added by Mihai Cara. no weighting 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv "flip" y-axis (reflection about x-axis *after* rotation) NOTE: keep in mind that 'matrix' is the transposed rotation matrix. Return the shift, rotation, and scale changes no weighting Set up products used for computing the fit 1/w = sigma**2 = sigma_xy**2 + sigma_uv**2 = 1/wxy + 1/wuv Set up products used for computing the fit pragma: no cover Return the shift, rotation, and scale changes Build fit matrix: determinant of the transformation Create a working copy (no reflections) for computing transformation parameters (scale, rotation angle, skew): Skew is zero for all fitgeom except 'general': Compute average scale: Compute scales for each axis: Remove scale from the transformation matrix: Compute rotation angle as if we have a proper rotation. This will also act as *some sort* of "average rotation" even for transformations with different rot_x and rot_y: | 10,890 | en | 0.744401 |
'''
@Date: 2019-08-22 20:40:54
@Author: ywyz
@LastModifiedBy: ywyz
@Github: https://github.com/ywyz
@LastEditors: ywyz
@LastEditTime: 2019-08-22 20:48:24
'''
years, months = eval(input("Enter years and months: "))
if (months == 1 or months == 3 or months == 5 or months == 7 or months == 8
or months == 10 or months == 12):
print(years, ".", months, " has 31 days. ")
elif (months == 4 or months == 6 or months == 9 or months == 11):
print(years, ".", months, "has 30 days. ")
elif (months == 2):
if (years % 4 == 0 and years % 100 != 0) or (years % 400 == 0):
print(years, ".", months, "has 29 days. ")
else:
print(years, ".", months, "has 28 days. ")
else:
print("Wrong Input!")
| Exercise04/4-11.py | 725 | @Date: 2019-08-22 20:40:54
@Author: ywyz
@LastModifiedBy: ywyz
@Github: https://github.com/ywyz
@LastEditors: ywyz
@LastEditTime: 2019-08-22 20:48:24 | 149 | en | 0.196449 |
#!/usr/bin/env python2
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Perform inference on a video or zmq with a certain extension
(e.g., .jpg) in a folder. Sample:
python tools/infer_from_video.py \
--cfg configs/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_2x.yaml \
--output-dir ./output \
--image-ext jpg \
--wts generalized_rcnn/model_final.pkl \
--video ~/data/video3.h264
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import argparse
import cv2 # NOQA (Must import before importing caffe2 due to bug in cv2)
import logging
import sys
import time
import zmq
import numpy as np
import os
from caffe2.python import workspace
import glob
from detectron.core.config import assert_and_infer_cfg
from detectron.core.config import cfg
from detectron.core.config import merge_cfg_from_file
from detectron.utils.io import cache_url
from detectron.utils.logging import setup_logging
from detectron.utils.timer import Timer
import detectron.core.test_engine as infer_engine
import detectron.datasets.dummy_datasets as dummy_datasets
import detectron.utils.c2 as c2_utils
# import arp.line_detection as detection
from multiprocessing import Process, Queue
from Queue import Empty
import json
import math
import copy
import arp.const as const
from arp.fusion_kalman import Fusion
from arp.fusion_particle_line import FusionParticle
from arp.detection_filter import LineFilter
from arp.line_extractor import LineExtractor
c2_utils.import_detectron_ops()
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
g_fusion_filter = None
g_particle_filter = None
extractor = LineExtractor()
def parse_args():
parser = argparse.ArgumentParser(description='End-to-end inference')
parser.add_argument(
'--cfg',
dest='cfg',
help='cfg model file (/path/to/model_config.yaml)',
default=None,
type=str
)
parser.add_argument(
'--wts',
dest='weights',
help='weights model file (/path/to/model_weights.pkl)',
default=None,
type=str
)
parser.add_argument(
'--output-dir',
dest='output_dir',
help='directory for visualization pdfs (default: /tmp/infer_simple)',
default='/tmp/infer_simple',
type=str
)
parser.add_argument(
'--image-ext',
dest='image_ext',
help='image file name extension (default: jpg)',
default='png',
type=str
)
parser.add_argument(
'--video',
help='zmq or /path/to/video/file',
default=None,
type=str
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
predict_time = []
process_time = []
show_img = None
#im is rgb
def hanle_frame(args, frameId, origin_im, im, logger, model, dataset, file_name):
global predict_time, process_time, show_img
logger.info('Processing frame: {}'.format(frameId))
# cv2.imshow("tmplog", im)
# cv2.waitKey(0)
timers = defaultdict(Timer)
t = time.time()
im = im[:, :, ::-1]
with c2_utils.NamedCudaScope(0):
cls_boxes, cls_segms, cls_keyps = infer_engine.im_detect_all(
model, im, None, timers=timers
)
predict_time.append(time.time() - t)
logger.info('Inference time: {:.3f}s'.format(time.time() - t))
logger.info('predict_time: {:.3f}s'.format(np.mean(np.array(predict_time))))
# for k, v in timers.items():
# logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
if frameId == 1:
logger.info(
' \ Note: inference on the first image will be slower than the '
'rest (caches and auto-tuning need to warm up)'
)
t = time.time()
img_debug = True
ret = extractor.get_detection_line(
im,
cls_boxes,
cls_segms,
cls_keyps,
dataset=dataset,
show_class=True,
thresh=0.8,
kp_thresh=2,
frame_id=frameId,
img_debug = img_debug
)
im, mid_im, top_im, result, fork_pos = ret
process_time.append(time.time() - t)
logger.info('get_detection_line time: {:.3f}s'.format(time.time() - t))
#
logger.info('process_time: {:.3f}s'.format(np.mean(np.array(process_time))))
line_list = None
cache_list = None
particles = None
filter_list = None
if not result is None:
line_list, cache_list, filter_list, particles = add2MsgQueue(result, frameId, fork_pos, img_debug)
g_debug_img_queue.put((origin_im[:, :, ::-1], im, mid_im, top_im, line_list, cache_list, filter_list, frameId, fork_pos, file_name))
if g_debug_img_queue.full():
try:
g_debug_img_queue.get_nowait()
except Empty:
print ("Queue.Empty")
def drawParticles(image, particles):
histogram = np.array([[i, 0] for i in range(500)])
for index, p in enumerate(particles):
if abs(p.x) > 100:
continue
meter_scale = (3.5/extractor.lane_wid)
# histogram[index][0] = index + 100#int(p.x) / meter_scale
histogram[int(p.x / meter_scale) + 150][1] += 1
cv2.polylines(image, np.int32([np.vstack((histogram[:,0] + extractor.IMAGE_WID/2 - 150, histogram[:,1])).T]), False, (0, 0, 250), thickness=1)
def drawParabola(image, line_param, type, color):
points = []
for x in range(-800, 30, 10):
points.append([line_param[0] * x**2 + line_param[1] * x + line_param[2], x])
points = np.array(points)
points[:,0] = points[:,0] + extractor.IMAGE_WID/2
points[:,1] = points[:,1] + extractor.IMAGE_HEI
points = cv2.perspectiveTransform(np.array([points], dtype='float32'), np.array(extractor.H_OP))
offset_y = extractor.CUT_OFFSET_IMG[0]
points = points[0]
points[:,1] = points[:,1] + offset_y
# print ("drawParabola points:" + str(points))
parabola_im = np.zeros((extractor.IMAGE_HEI,extractor.IMAGE_WID,3), np.uint8)
if type in ["yellow dashed", "yellow solid", "yellow solid solid", "yellow dashed dashed", "yellow dashed-solid", "yellow solid-dashed"]:
cv2.polylines(parabola_im, np.int32([np.vstack((points[:,0], points[:,1])).T]), False, (0, 200, 200), thickness=2)
elif type in ["boundary", "fork_edge", "handrail"]:
cv2.polylines(parabola_im, np.int32([np.vstack((points[:, 0], points[:, 1])).T]), False, (0, 0, 200), thickness=4)
else:
cv2.polylines(parabola_im, np.int32([np.vstack((points[:,0], points[:,1])).T]), False, color, thickness=2)
kernel = np.ones((5,5), np.float32) / 25
parabola_im = cv2.filter2D(parabola_im, -1, kernel)
# parabola_im = cv2.GaussianBlur(parabola_im, (16, 16),0)
image = cv2.addWeighted(image, 1., parabola_im, 1., 0)
return image
def add2MsgQueue(result, frameId, fork_x, img_debug):
if (result is None) or len(result[0]) == 0:
print ("error: len(line_list) == 0")
return [], None
full_line_list = []
full_cache_list = []
line_filter = [left_fork_filter]
is_fork = (len(result) == 2)
if is_fork:
if not right_fork_filter.isAvialabel():
right_fork_filter.reset(left_fork_filter.cache_list)
line_filter.append(right_fork_filter)
else:
if right_fork_filter.isAvialabel():
left_fork_filter.extend(right_fork_filter.cache_list)
for index, parabola_param in enumerate(result):
line_list = []
for (line_param, line_type) in zip(parabola_param[0], parabola_param[1]):
if abs(line_param[2]) > 500:
print ("abs(line_param[2]) > 500")
continue
# line_info = {'curve_param':line_param[0:3].tolist(), 'type':line_type, 'score':line_param[3], 'x':line_param[4]}
line_info = {'curve_param':line_param[0:3].tolist(), 'type':line_type, 'score':line_param[3], 'x':line_param[2], 'middle':line_param[5]}
line_list.append(line_info)
line_list, cache_list = line_filter[index].get_predict_list(line_list, frameId, fork_x[0] if is_fork else None, index==0)
full_line_list.append(line_list)
full_cache_list.append(cache_list)
filter_list = None
particles = None
# filter_list = dr_filter(line_list)
# ret = particle_filter(line_list)
# if not ret is None:
# filter_list, particles = ret
finalMessage = {'frame': frameId, 'timestamp': time.time(), 'is_fork': is_fork, 'line_list': full_line_list[0]}
json_str = json.dumps(finalMessage)
print ("finalMessage:", json_str)
if g_detect_queue.full():
g_detect_queue.get_nowait()
g_detect_queue.put(json_str)
return full_line_list, full_cache_list, filter_list, particles
def get_right_parabola(line_list):
for index, line in enumerate(line_list):
if line["curve_param"][2] > 0:
ret = line["curve_param"][:]
ret[2] = ret[2] % extractor.lane_wid
return ret
ret = line_list[-1]["curve_param"][:]
ret[2] = ret[2] % extractor.lane_wid
return ret
def particle_filter(line_list):
global g_particle_filter
if line_list is None or len(line_list) == 0:
return None
param = get_right_parabola(line_list)
meter_scale = (3.5/extractor.lane_wid)
x = param[2] * meter_scale
if g_particle_filter is None or (time.time() - g_particle_filter.timestamp > 1):
g_particle_filter = FusionParticle(x, g_dr_queue)
g_particle_filter.start()
return None
t = time.time()
# x_estimate, particles = g_particle_filter.update(x)
x_estimate, particles = g_particle_filter.update(x, param)
dalta_x = (x_estimate - x) / meter_scale
print (str(time.time()-t) + "particle filter adjust x:" + str(dalta_x))
filter_list = copy.deepcopy(line_list)
for line in filter_list:
line["curve_param"][2] += dalta_x
return filter_list, particles
g_x_log = []
g_x_pred_log = []
g_x_est_log = []
g_x_time = []
def dr_filter(line_list):
if line_list is None or len(line_list) == 0:
return None
global g_fusion_filter
param = get_right_parabola(line_list)
meter_scale = (3.5/extractor.lane_wid)
x = param[2] * meter_scale
avg_speed = []
avg_angle = []
for i in range(10):
message = g_dr_queue.get(True)
json_item = json.loads(message)
avg_speed.append(json_item["speed"])
avg_angle.append(json_item["steerAngle"])
avg_speed = np.array(avg_speed)
debug_angle = avg_angle[0]
avg_angle = np.array(avg_angle)
avg_speed = np.mean(avg_speed)
avg_angle = np.mean(avg_angle)
print ("g_dr_queue speed:{} angle:{}->{}".format(avg_speed, debug_angle, avg_angle))
v = avg_speed
wheel_theta = avg_angle / const.STEER_RATIO
wheel_theta = math.radians(wheel_theta)
car_theta = np.pi/2 + wheel_theta
w = v/(const.WHEEL_BASE/np.sin(wheel_theta))
# if car_theta < np.pi / 2:
# w = -w
if g_fusion_filter is None or (time.time() - g_fusion_filter.timestamp > 1):
g_fusion_filter = Fusion(x, v, w)
print ("kalman filter recreate")
return None
t = time.time() - g_fusion_filter.timestamp
# pre_estimate = g_fusion_filter.get_estimate()
#x, v, w, t, parabola_param
if len(g_x_time) == 0:
g_x_time.append(t)
else:
g_x_time.append(t + g_x_time[-1])
g_x_log.append(x)
estimate_x = g_fusion_filter.update_step(x, v, w, t, param)
predict_x = g_fusion_filter.get_predict()
g_x_pred_log.append(predict_x)
g_x_est_log.append(estimate_x)
print("kalman filter: {} + {} --> {} ".format(x, predict_x, estimate_x))
if len(g_x_log) % 100 == 0:
np.savetxt('kalman_x.txt', g_x_log, newline=',', fmt=str("%s"))
np.savetxt('kalman_x_pred.txt', np.array(g_x_pred_log), newline=',', fmt=str("%s"))
np.savetxt('kalman_x_est.txt', np.array(g_x_est_log), newline=',', fmt=str("%s"))
np.savetxt('kalman_x_time.txt', np.array(g_x_time), newline=',', fmt=str("%s"))
dalta_x = (estimate_x - x) / meter_scale
print ("kalman filter adjust x:" + str(dalta_x))
filter_list = copy.deepcopy(line_list)
for line in filter_list:
line["curve_param"][2] += dalta_x
return filter_list
left_fork_filter = LineFilter()
right_fork_filter = LineFilter()
def main(args):
logger = logging.getLogger(__name__)
merge_cfg_from_file(args.cfg)
cfg.NUM_GPUS = 1
args.weights = cache_url(args.weights, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
model = infer_engine.initialize_model_from_cfg(args.weights)
dummy_coco_dataset = dummy_datasets.get_coco_dataset()
zmq_video = args.video == "zmq"
frameId = 0
print ("args.video:" + str(args.video))
socket = None
im_list = None
ret = None
if zmq_video:
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:{}".format(const.PORT_IMAGE_OUT))
elif os.path.isdir(args.video):
im_list = glob.glob(args.video + '/*.' + args.image_ext)
im_list.sort()
else:
# From virtual camera video and its associated timestamp file on Drive PX2,e.g."./lane/videofilepath.h264"
cap = cv2.VideoCapture(args.video)
im_file_index = frameId
while True:
file_name = ""
if zmq_video:
try:
socket.send_string('req from detectron')
print ("--------------------send!")
message = socket.recv()
print ("--------------------recv!" + str(time.time()))
print("Received message length:" + str(len(message)) + " type:" + str(type(message)))
if len(message) < 100:
continue
img_np = np.fromstring(message, np.uint8)
if const.CAMERA_TYPE != 2:
img_np = img_np.reshape((1208, 1920,3))
else:
img_np = img_np.reshape((604, 960,3))
print("nparr type:" + str(type(img_np)) + " shape:" + str(img_np.shape))
ret = True
except KeyboardInterrupt:
print ("interrupt received, stopping...")
socket.close()
context.term()
ret = False
cap.release()
elif os.path.isdir(args.video):
if im_file_index >= len(im_list):
break
file_name = im_list[im_file_index].split("/")[-1].split(".")[0]
img_np = cv2.imread(im_list[im_file_index])
img_np = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
im_file_index += 1
ret = True
else:
ret, img_np = cap.read()
frameId += 1
# read completely or raise exception
if not ret:
print("cannot get frame")
break
if frameId < 0:
continue
if frameId % 1 == 0:
t = time.time()
print("time:" + str(t))
time.sleep(0.001)
#cv2.imwrite("tmp" + str(frameId) + ".png", img_np)
if extractor.scale_size:
# img_np = cv2.resize(img_np, dsize=img_np.shape/2, interpolation=cv2.INTER_CUBIC)
img_np = img_np[::2]
img_np = img_np[:,::2]
origin_im = np.copy(img_np)
img_np = img_np[extractor.CUT_OFFSET_IMG[0]:extractor.CUT_OFFSET_IMG[1], 0:extractor.IMAGE_WID]
print ("detection size:", img_np.shape)
# img_np = cv2.undistort(img_np, mtx, dist, None)
hanle_frame(args, frameId, origin_im, img_np, logger, model, dummy_coco_dataset, file_name)
logger.info('hanle_frame time: {:.3f}s'.format(time.time() - t))
raw_input('press Enter to exit...')
def show_debug_img():
print ("debug img process start !")
while(True):
message = g_debug_img_queue.get(True)
if not message is None:
origin_im, im, mid_im, top_im, line_list_array, cache_list_array, filter_list_array, frameId, fork_pos, file_name = message
half_size = (int(im.shape[1] / 2), int(im.shape[0] / 2))
if extractor.IMAGE_WID > 960:
im = cv2.resize(im, half_size)
top_im = cv2.resize(top_im, (extractor.IMAGE_WID/2, extractor.IMAGE_HEI/2))
mid_im = cv2.resize(mid_im, half_size)
# mid_im = mid_im[604:902, 0:extractor.IMAGE_WID]
# mid_im = cv2.resize(mid_im, (int(extractor.IMAGE_WID / 2), 150))
else:
# mid_im = mid_im[302:451, 0:extractor.IMAGE_WID]
pass
if (not line_list_array is None) and (not cache_list_array is None):
if filter_list_array is None:
filter_list_array = [[]] if len(line_list_array) == 1 else [[],[]]
line_color = [(0, 200, 0), (100, 200, 0)]
for line_list, cache_list, filter_list, color in zip(line_list_array, cache_list_array, filter_list_array, line_color):
x_pos = []
x_pos_11 = []
prob_wid = extractor.IMAGE_WID
if prob_wid > 960:
prob_wid = prob_wid / 2
for i in range(-int(prob_wid / 2), int(prob_wid / 2), 1):
matched_y = 1
matched_y_11 = 2
for l in line_list:
dis = abs(l['x'] - i)
if dis < 4:
# hei = dis
if l['type'] == "boundary":
matched_y = int(220 * l['score'])
else:
matched_y = int(190 * l['score'] - dis * dis)
for l in cache_list:
dis = abs(l['x'] - i)
if dis < 8:
matched_y_11 = int(200 * l['score'] - dis * dis)
x_pos.append([i + int(prob_wid / 2), matched_y])
x_pos_11.append([i + int(prob_wid / 2), matched_y_11])
# h = np.zeros((100, extractor.IMAGE_WID, 3))
cv2.polylines(origin_im, [np.array(x_pos)], False, (0, 255, 0))
cv2.polylines(origin_im, [np.array(x_pos_11)], False, (0, 0, 255))
# origin_im = np.flipud(origin_im)
# cv2.imshow('prob', h)
# cv2.waitKey(1)
for line in line_list:
line_param = line['curve_param']
line_type = line['type']
origin_im = drawParabola(origin_im, line_param[0:3], line_type, color=color)
if not filter_list is None:
for line in filter_list:
line_param = line['curve_param']
line_type = line['type']
origin_im = drawParabola(origin_im, line_param[0:3], line_type, color=(200, 0, 0))
overlay = origin_im.copy()
color = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (0, 255, 255), (255, 0, 255)]
# for index in range(len(line_array)):
# if index > 0:
# left_line = line_array[index - 1]
# right_line = line_array[index]
# fill_points = np.array([np.append(left_line, right_line[::-1], axis=0)], dtype=np.int32)
# print ("fill_points:" + str(fill_points.shape))
# print ("color[index - 1]:" + str(color[index - 1]))
# cv2.fillPoly(overlay, fill_points, color[index - 1])
# alpha = 0.2
# cv2.addWeighted(overlay, alpha, origin_im, 1-alpha, 0, origin_im)
# origin_im
origin_im = np.append(origin_im, top_im, axis=1)
im = np.append(im, mid_im, axis=1)
show_img = np.append(origin_im, im, axis=0)
file_name = "source_{}_{}.png".format(file_name, frameId)
cv2.imwrite(os.path.join(args.output_dir, file_name), show_img)
cv2.imshow("carlab", show_img)
cv2.waitKey(1)
def result_sender():
print ("sender process start !")
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.setsockopt(zmq.SNDTIMEO, 3000)
socket.bind("tcp://*:{}".format(const.PORT_DETECTION))
while(True):
message = g_detect_queue.get(True)
if not message is None:
recv = socket.recv()
print ("Received request:%s" % recv)
try:
socket.send(message)
except zmq.ZMQError:
time.sleep(1)
def dr_recever():
print ("dr recever process start !")
sub_context = zmq.Context()
socket = sub_context.socket(zmq.SUB)
print ("tcp://localhost:{}".format(const.PORT_DR_OUT))
socket.connect("tcp://localhost:{}".format(const.PORT_DR_OUT))
socket.setsockopt_string(zmq.SUBSCRIBE, "")
# socket.setsockopt(zmq.CONFLATE, 1)
while(True):
try:
string = socket.recv()
# print ("Received:{}".format(len(string)))
if g_dr_queue.full():
g_dr_queue.get(True)
g_dr_queue.put(string)
except zmq.ZMQError, Queue.em:
time.sleep(1)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
setup_logging(__name__)
args = parse_args()
# g_fusion_filter = Fusion()
g_detect_queue = Queue(2)
g_dr_queue = Queue(10)
p = Process(target=result_sender)
p.start()
g_debug_img_queue = Queue(2)
p = Process(target=show_debug_img)
p.start()
# pdr_receiver = Process(target=dr_recever)
# pdr_receiver.start()
main(args)
| arp/arp_infer.py | 22,884 | !/usr/bin/env python2 Copyright (c) 2017-present, Facebook, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NOQA (Must import before importing caffe2 due to bug in cv2) import arp.line_detection as detection OpenCL may be enabled by default in OpenCV3; disable it because it's not thread safe and causes unwanted GPU memory allocations.im is rgb cv2.imshow("tmplog", im) cv2.waitKey(0) for k, v in timers.items(): logger.info(' | {}: {:.3f}s'.format(k, v.average_time)) histogram[index][0] = index + 100int(p.x) / meter_scale print ("drawParabola points:" + str(points)) parabola_im = cv2.GaussianBlur(parabola_im, (16, 16),0) line_info = {'curve_param':line_param[0:3].tolist(), 'type':line_type, 'score':line_param[3], 'x':line_param[4]} filter_list = dr_filter(line_list) ret = particle_filter(line_list) if not ret is None: filter_list, particles = ret x_estimate, particles = g_particle_filter.update(x) if car_theta < np.pi / 2: w = -w pre_estimate = g_fusion_filter.get_estimate()x, v, w, t, parabola_param From virtual camera video and its associated timestamp file on Drive PX2,e.g."./lane/videofilepath.h264" read completely or raise exceptioncv2.imwrite("tmp" + str(frameId) + ".png", img_np) img_np = cv2.resize(img_np, dsize=img_np.shape/2, interpolation=cv2.INTER_CUBIC) img_np = cv2.undistort(img_np, mtx, dist, None) mid_im = mid_im[604:902, 0:extractor.IMAGE_WID] mid_im = cv2.resize(mid_im, (int(extractor.IMAGE_WID / 2), 150)) mid_im = mid_im[302:451, 0:extractor.IMAGE_WID] hei = dis h = np.zeros((100, extractor.IMAGE_WID, 3)) origin_im = np.flipud(origin_im) cv2.imshow('prob', h) cv2.waitKey(1) for index in range(len(line_array)): if index > 0: left_line = line_array[index - 1] right_line = line_array[index] fill_points = np.array([np.append(left_line, right_line[::-1], axis=0)], dtype=np.int32) print ("fill_points:" + str(fill_points.shape)) print ("color[index - 1]:" + str(color[index - 1])) cv2.fillPoly(overlay, fill_points, color[index - 1]) alpha = 0.2 cv2.addWeighted(overlay, alpha, origin_im, 1-alpha, 0, origin_im) origin_im socket.setsockopt(zmq.CONFLATE, 1) print ("Received:{}".format(len(string))) g_fusion_filter = Fusion() pdr_receiver = Process(target=dr_recever) pdr_receiver.start() | 2,773 | en | 0.605303 |
#!/usr/bin/python3
# Helper script for the transactions method that can read the log file
# and use it to detect and recover the state to one of the transactions
# at which the script had previously been restarted.
import sys
import os
import re
import json
import subprocess
import codecs
import argparse
import tempfile
import git
branchRe = re.compile(r'^.* - Branch ([^ \t\r\n]+) at ([0-9a-fA-F]+)(, current)?.$')
def GetBranch(logLine):
m = branchRe.match(logLine)
if m:
br = m.group(1)
hash = m.group(2)
isCurrent = m.group(3) is not None
return { "name": br, "commit": hash, "is_current": isCurrent}
return None
lastStateRe = re.compile(r'^.*Loaded last state at transaction ([0-9]+) as:$')
def GetTransaction(logLine):
m = lastStateRe.match(logLine)
if m is not None:
return int(m.group(1))
return None
def Restore(repoPath, branchList, transaction):
print("Restoring state for transaction: {tr}".format(tr=transaction))
print("branch list:")
for br in branchList:
print(" - Branch {br} at {hash}.{current}".format(br=br["name"], hash=br["commit"], current=' Current.' if br["is_current"] else ''))
state = { "transaction": transaction, "branch_list": branchList }
repo = git.open(repoPath)
if repo is None:
print("Failed to open git repository '{r}'".format(r=repoPath))
return 1
stateFilePath = None
with tempfile.NamedTemporaryFile(mode='w+', prefix='ac2git_state_', delete=False) as stateFile:
stateFilePath = stateFile.name
stateFile.write(json.dumps(state))
hashObj = repo.raw_cmd(['git', 'hash-object', '-w', stateFilePath ])
if hashObj is None:
raise Exception("Failed to restore state! git hash-object -w {f}, returned {r}.".format(f=stateFilePath, r=hashObj))
else:
os.remove(stateFilePath)
refResult = repo.raw_cmd(['git', 'update-ref', 'refs/ac2git/state', hashObj])
if refResult is None:
raise Exception("Failed to restore state! git update-ref refs/ac2git/state {h}, returned {r}.".format(h=hashObj, r=refResult))
return 0
def Main(argv):
argparser = argparse.ArgumentParser(description='Processes a logfile previously generated by the ac2git.py script for restore points and optionally restores the state of a git repository to a selected point. Only works for the transactions method conversions.')
argparser.add_argument('-f', '--file', dest='file', help='The log file from which the state information will be parsed.')
argparser.add_argument('-t', '--transaction', dest='transaction', help='The transaction, from the log file, to which the state will be restored to. If omitted then all potential restore points are printed and the script exits with a return code of 1.')
argparser.add_argument('-r', '--git-repo', dest='repo', help='The path to the git repository whose state will be restored.')
args = argparser.parse_args()
if not os.path.exists(args.file):
print("Failed to open log file '{f}'.".format(f=args.file))
return 1
trList = []
with codecs.open(args.file) as f:
line = f.readline()
while len(line) > 0:
line = line.strip()
tr = GetTransaction(line)
if tr is not None:
branchList = []
line = f.readline()
while len(line) > 0:
line = line.strip()
br = GetBranch(line)
if br is not None:
branchList.append(br)
else:
break
line=f.readline()
if args.transaction is not None and int(tr) == int(args.transaction):
return Restore(args.repo, branchList, int(args.transaction))
elif tr not in trList:
trList.append(tr)
print("Found transaction {tr}.".format(tr=tr))
line = f.readline()
if len(trList) > 0:
print("Please choose one of the transactions listed above to restore the state to and re-run the script with the -t option.")
return 0
else:
print("Found no usable transaction state information in the log file '{f}'".format(f=args.file))
return 1
if __name__ == "__main__":
Main(sys.argv)
| recover_state_from_log.py | 4,375 | !/usr/bin/python3 Helper script for the transactions method that can read the log file and use it to detect and recover the state to one of the transactions at which the script had previously been restarted. | 208 | en | 0.95829 |
# Generated by Django 3.1.7 on 2021-07-12 13:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contact', '0002_feedback_phone'),
]
operations = [
migrations.RenameField(
model_name='feedback',
old_name='name',
new_name='subject',
),
]
| OmegaErp/contact/migrations/0003_auto_20210712_1539.py | 360 | Generated by Django 3.1.7 on 2021-07-12 13:39 | 45 | en | 0.707983 |
"""Plugin
=========
The fixtures provided by pytest-kivy.
"""
import pytest
import weakref
from typing import Tuple, Type, Optional, Callable
import gc
import logging
from os import environ
from pytest_kivy.app import AsyncUnitApp
__all__ = ('trio_kivy_app', 'asyncio_kivy_app', 'async_kivy_app')
#: NOTE: Kivy cannot be imported before or while the plugin is imported or
# configured as that leads to pytest issues.
environ['KIVY_USE_DEFAULTCONFIG'] = '1'
_async_lib = environ.get('KIVY_EVENTLOOP', 'asyncio')
if _async_lib == 'asyncio':
@pytest.fixture
async def _nursery():
return None
@pytest.fixture
async def _event_loop(event_loop):
return event_loop
elif _async_lib == 'trio':
@pytest.fixture
async def _nursery(nursery):
return nursery
@pytest.fixture
async def _event_loop():
return None
else:
raise TypeError(f'unknown event loop {_async_lib}')
def pytest_addoption(parser):
group = parser.getgroup("kivy")
group.addoption(
"--kivy-app-release",
action="store_true",
default=False,
help='Whether to check after each test if the app is released and no '
'references are kept to the app preventing it from being garbage '
'collected.',
)
group.addoption(
"--kivy-app-release-end",
action="store_true",
default=False,
help='Whether to check at the end of all tests if all of the test apps'
'were released and no references were kept to the app preventing'
'them from being garbage collected.',
)
@pytest.fixture(scope='session')
def _app_release_list():
apps = []
yield apps
gc.collect()
alive_apps = []
for i, (app, request) in enumerate(apps[:-1]):
app = app()
request = request()
if request is None:
request = '<dead request>'
if app is not None:
alive_apps.append((app, request))
logging.error(
'Memory leak: failed to release app for test ' + repr(request))
assert not alive_apps, 'Memory leak: failed to release all apps'
@pytest.fixture
def _app_release():
app = []
yield app
gc.collect()
if not app:
return
app, request = app[0]
app = app()
request = request()
if request is None:
request = '<dead request>'
assert app is None, \
f'Memory leak: failed to release app for test {request!r}'
def _get_request_config(
request, _app_release_list, _app_release
) -> Tuple[Type[AsyncUnitApp], dict, Optional[Callable], list]:
opts = getattr(request, 'param', {})
cls = opts.get('cls', AsyncUnitApp)
kwargs = opts.get('kwargs', {})
app_cls = opts.get('app_cls', None)
app_list = None
if request.config.getoption("kivy_app_release"):
app_list = _app_release
elif request.config.getoption("kivy_app_release_end"):
app_list = _app_release_list
return cls, kwargs, app_cls, app_list
@pytest.fixture
async def trio_kivy_app(
request, nursery, _app_release_list, _app_release
) -> AsyncUnitApp:
"""Fixture yielding a :class:`~pytest_kivy.app.AsyncUnitApp` using
explicitly trio as backend for the async library.
pytest-trio and trio must be installed, and ``trio_mode = true`` must be
set in pytest.ini.
"""
cls, kwargs, app_cls, app_list = _get_request_config(
request, _app_release_list, _app_release)
async with cls(nursery=nursery, async_lib='trio', **kwargs) as app:
if app_list is not None:
app_list.append((weakref.ref(app), weakref.ref(request)))
if app_cls is not None:
await app(app_cls)
app.raise_startup_exception()
yield app
await app.wait_stop_app()
@pytest.fixture
async def asyncio_kivy_app(
request, event_loop, _app_release_list, _app_release) -> AsyncUnitApp:
"""Fixture yielding a :class:`~pytest_kivy.app.AsyncUnitApp` using
explicitly asyncio as backend for the async library.
pytest-asyncio must be installed.
"""
cls, kwargs, app_cls, app_list = _get_request_config(
request, _app_release_list, _app_release)
async with cls(
event_loop=event_loop, async_lib='asyncio', **kwargs) as app:
if app_list is not None:
app_list.append((weakref.ref(app), weakref.ref(request)))
if app_cls is not None:
await app(app_cls)
app.raise_startup_exception()
yield app
await app.wait_stop_app()
@pytest.fixture
async def async_kivy_app(
request, _app_release_list, _app_release, _nursery, _event_loop
) -> AsyncUnitApp:
"""Fixture yielding a :class:`~pytest_kivy.app.AsyncUnitApp` using
trio or asyncio as backend for the async library, depending on
KIVY_EVENTLOOP.
If using trio, pytest-trio and trio must be installed, and
``trio_mode = true`` must be set in pytest.ini. If using asyncio,
pytest-asyncio must be installed.
"""
cls, kwargs, app_cls, app_list = _get_request_config(
request, _app_release_list, _app_release)
async with cls(
nursery=_nursery, event_loop=_event_loop, async_lib=_async_lib,
**kwargs) as app:
if app_list is not None:
app_list.append((weakref.ref(app), weakref.ref(request)))
if app_cls is not None:
await app(app_cls)
app.raise_startup_exception()
yield app
await app.wait_stop_app()
| pytest_kivy/plugin.py | 5,561 | Plugin
=========
The fixtures provided by pytest-kivy.
: NOTE: Kivy cannot be imported before or while the plugin is imported or configured as that leads to pytest issues. | 173 | en | 0.913644 |
"""Test data purging."""
from datetime import datetime, timedelta
import json
from unittest.mock import patch
from homeassistant.components import recorder
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.components.recorder.models import Events, RecorderRuns, States
from homeassistant.components.recorder.purge import purge_old_data
from homeassistant.components.recorder.util import session_scope
from homeassistant.util import dt as dt_util
from .common import wait_recording_done
def test_purge_old_states(hass, hass_recorder):
"""Test deleting old states."""
hass = hass_recorder()
_add_test_states(hass)
# make sure we start with 6 states
with session_scope(hass=hass) as session:
states = session.query(States)
assert states.count() == 6
# run purge_old_data()
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert not finished
assert states.count() == 4
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert not finished
assert states.count() == 2
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert finished
assert states.count() == 2
def test_purge_old_events(hass, hass_recorder):
"""Test deleting old events."""
hass = hass_recorder()
_add_test_events(hass)
with session_scope(hass=hass) as session:
events = session.query(Events).filter(Events.event_type.like("EVENT_TEST%"))
assert events.count() == 6
# run purge_old_data()
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert not finished
assert events.count() == 4
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert not finished
assert events.count() == 2
# we should only have 2 events left
finished = purge_old_data(hass.data[DATA_INSTANCE], 4, repack=False)
assert finished
assert events.count() == 2
def test_purge_old_recorder_runs(hass, hass_recorder):
"""Test deleting old recorder runs keeps current run."""
hass = hass_recorder()
_add_test_recorder_runs(hass)
# make sure we start with 7 recorder runs
with session_scope(hass=hass) as session:
recorder_runs = session.query(RecorderRuns)
assert recorder_runs.count() == 7
# run purge_old_data()
finished = purge_old_data(hass.data[DATA_INSTANCE], 0, repack=False)
assert finished
assert recorder_runs.count() == 1
def test_purge_method(hass, hass_recorder):
"""Test purge method."""
hass = hass_recorder()
service_data = {"keep_days": 4}
_add_test_events(hass)
_add_test_states(hass)
_add_test_recorder_runs(hass)
# make sure we start with 6 states
with session_scope(hass=hass) as session:
states = session.query(States)
assert states.count() == 6
events = session.query(Events).filter(Events.event_type.like("EVENT_TEST%"))
assert events.count() == 6
recorder_runs = session.query(RecorderRuns)
assert recorder_runs.count() == 7
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
# run purge method - no service data, use defaults
hass.services.call("recorder", "purge")
hass.block_till_done()
# Small wait for recorder thread
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
# only purged old events
assert states.count() == 4
assert events.count() == 4
# run purge method - correct service data
hass.services.call("recorder", "purge", service_data=service_data)
hass.block_till_done()
# Small wait for recorder thread
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
# we should only have 2 states left after purging
assert states.count() == 2
# now we should only have 2 events left
assert events.count() == 2
# now we should only have 3 recorder runs left
assert recorder_runs.count() == 3
assert not ("EVENT_TEST_PURGE" in (event.event_type for event in events.all()))
# run purge method - correct service data, with repack
with patch("homeassistant.components.recorder.purge._LOGGER") as mock_logger:
service_data["repack"] = True
hass.services.call("recorder", "purge", service_data=service_data)
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
assert (
mock_logger.debug.mock_calls[5][1][0]
== "Vacuuming SQL DB to free space"
)
def _add_test_states(hass):
"""Add multiple states to the db for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
eleven_days_ago = now - timedelta(days=11)
attributes = {"test_attr": 5, "test_attr_10": "nice"}
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
with recorder.session_scope(hass=hass) as session:
for event_id in range(6):
if event_id < 2:
timestamp = eleven_days_ago
state = "autopurgeme"
elif event_id < 4:
timestamp = five_days_ago
state = "purgeme"
else:
timestamp = now
state = "dontpurgeme"
session.add(
States(
entity_id="test.recorder2",
domain="sensor",
state=state,
attributes=json.dumps(attributes),
last_changed=timestamp,
last_updated=timestamp,
created=timestamp,
event_id=event_id + 1000,
)
)
def _add_test_events(hass):
"""Add a few events for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
eleven_days_ago = now - timedelta(days=11)
event_data = {"test_attr": 5, "test_attr_10": "nice"}
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
with recorder.session_scope(hass=hass) as session:
for event_id in range(6):
if event_id < 2:
timestamp = eleven_days_ago
event_type = "EVENT_TEST_AUTOPURGE"
elif event_id < 4:
timestamp = five_days_ago
event_type = "EVENT_TEST_PURGE"
else:
timestamp = now
event_type = "EVENT_TEST"
session.add(
Events(
event_type=event_type,
event_data=json.dumps(event_data),
origin="LOCAL",
created=timestamp,
time_fired=timestamp,
)
)
def _add_test_recorder_runs(hass):
"""Add a few recorder_runs for testing."""
now = datetime.now()
five_days_ago = now - timedelta(days=5)
eleven_days_ago = now - timedelta(days=11)
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
wait_recording_done(hass)
with recorder.session_scope(hass=hass) as session:
for rec_id in range(6):
if rec_id < 2:
timestamp = eleven_days_ago
elif rec_id < 4:
timestamp = five_days_ago
else:
timestamp = now
session.add(
RecorderRuns(
start=timestamp,
created=dt_util.utcnow(),
end=timestamp + timedelta(days=1),
)
)
| tests/components/recorder/test_purge.py | 7,933 | Add a few events for testing.
Add a few recorder_runs for testing.
Add multiple states to the db for testing.
Test purge method.
Test deleting old events.
Test deleting old recorder runs keeps current run.
Test deleting old states.
Test data purging.
make sure we start with 6 states run purge_old_data() run purge_old_data() we should only have 2 events left make sure we start with 7 recorder runs run purge_old_data() make sure we start with 6 states run purge method - no service data, use defaults Small wait for recorder thread only purged old events run purge method - correct service data Small wait for recorder thread we should only have 2 states left after purging now we should only have 2 events left now we should only have 3 recorder runs left run purge method - correct service data, with repack | 813 | en | 0.898183 |
"""An abstract class for entities."""
from abc import ABC
import asyncio
from datetime import datetime, timedelta
import functools as ft
import logging
from timeit import default_timer as timer
from typing import Any, Awaitable, Dict, Iterable, List, Optional
from homeassistant.config import DATA_CUSTOMIZE
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_DEFAULT_NAME,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import CALLBACK_TYPE, Context, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError, NoEntitySpecifiedError
from homeassistant.helpers.entity_platform import EntityPlatform
from homeassistant.helpers.entity_registry import RegistryEntry
from homeassistant.helpers.event import Event, async_track_entity_registry_updated_event
from homeassistant.helpers.typing import StateType
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util, ensure_unique_string, slugify
_LOGGER = logging.getLogger(__name__)
SLOW_UPDATE_WARNING = 10
DATA_ENTITY_SOURCE = "entity_info"
SOURCE_CONFIG_ENTRY = "config_entry"
SOURCE_PLATFORM_CONFIG = "platform_config"
@callback
@bind_hass
def entity_sources(hass: HomeAssistant) -> Dict[str, Dict[str, str]]:
"""Get the entity sources."""
return hass.data.get(DATA_ENTITY_SOURCE, {})
def generate_entity_id(
entity_id_format: str,
name: Optional[str],
current_ids: Optional[List[str]] = None,
hass: Optional[HomeAssistant] = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
return async_generate_entity_id(entity_id_format, name, current_ids, hass)
@callback
def async_generate_entity_id(
entity_id_format: str,
name: Optional[str],
current_ids: Optional[Iterable[str]] = None,
hass: Optional[HomeAssistant] = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
name = (name or DEVICE_DEFAULT_NAME).lower()
preferred_string = entity_id_format.format(slugify(name))
if current_ids is not None:
return ensure_unique_string(preferred_string, current_ids)
if hass is None:
raise ValueError("Missing required parameter current_ids or hass")
test_string = preferred_string
tries = 1
while not hass.states.async_available(test_string):
tries += 1
test_string = f"{preferred_string}_{tries}"
return test_string
class Entity(ABC):
"""An abstract class for Home Assistant entities."""
# SAFE TO OVERWRITE
# The properties and methods here are safe to overwrite when inheriting
# this class. These may be used to customize the behavior of the entity.
entity_id: str = None # type: ignore
# Owning hass instance. Will be set by EntityPlatform
# While not purely typed, it makes typehinting more useful for us
# and removes the need for constant None checks or asserts.
# Ignore types: https://github.com/PyCQA/pylint/issues/3167
hass: HomeAssistant = None # type: ignore
# Owning platform instance. Will be set by EntityPlatform
platform: Optional[EntityPlatform] = None
# If we reported if this entity was slow
_slow_reported = False
# If we reported this entity is updated while disabled
_disabled_reported = False
# Protect for multiple updates
_update_staged = False
# Process updates in parallel
parallel_updates: Optional[asyncio.Semaphore] = None
# Entry in the entity registry
registry_entry: Optional[RegistryEntry] = None
# Hold list for functions to call on remove.
_on_remove: Optional[List[CALLBACK_TYPE]] = None
# Context
_context: Optional[Context] = None
_context_set: Optional[datetime] = None
# If entity is added to an entity platform
_added = False
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return True
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return None
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return None
@property
def state(self) -> StateType:
"""Return the state of the entity."""
return STATE_UNKNOWN
@property
def capability_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the capability attributes.
Attributes that explain the capabilities of an entity.
Implemented by component base class. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes.
Implemented by component base class, should not be extended by integrations.
Convention for attribute names is lowercase snake_case.
"""
return None
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return entity specific state attributes.
This method is deprecated, platform classes should implement
extra_state_attributes instead.
"""
return None
@property
def extra_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def device_info(self) -> Optional[Dict[str, Any]]:
"""Return device specific attributes.
Implemented by platform classes.
"""
return None
@property
def device_class(self) -> Optional[str]:
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
@property
def unit_of_measurement(self) -> Optional[str]:
"""Return the unit of measurement of this entity, if any."""
return None
@property
def icon(self) -> Optional[str]:
"""Return the icon to use in the frontend, if any."""
return None
@property
def entity_picture(self) -> Optional[str]:
"""Return the entity picture to use in the frontend, if any."""
return None
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return False
@property
def force_update(self) -> bool:
"""Return True if state updates should be forced.
If True, a state change will be triggered anytime the state property is
updated, not just when the value changes.
"""
return False
@property
def supported_features(self) -> Optional[int]:
"""Flag supported features."""
return None
@property
def context_recent_time(self) -> timedelta:
"""Time that a context is considered recent."""
return timedelta(seconds=5)
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return True
# DO NOT OVERWRITE
# These properties and methods are either managed by Home Assistant or they
# are used to perform a very specific function. Overwriting these may
# produce undesirable effects in the entity's operation.
@property
def enabled(self) -> bool:
"""Return if the entity is enabled in the entity registry.
If an entity is not part of the registry, it cannot be disabled
and will therefore always be enabled.
"""
return self.registry_entry is None or not self.registry_entry.disabled
@callback
def async_set_context(self, context: Context) -> None:
"""Set the context the entity currently operates under."""
self._context = context
self._context_set = dt_util.utcnow()
async def async_update_ha_state(self, force_refresh: bool = False) -> None:
"""Update Home Assistant with current state of entity.
If force_refresh == True will update entity before setting state.
This method must be run in the event loop.
"""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
# update entity data
if force_refresh:
try:
await self.async_device_update()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Update for %s fails", self.entity_id)
return
self._async_write_ha_state()
@callback
def async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
self._async_write_ha_state()
@callback
def _async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self.registry_entry and self.registry_entry.disabled_by:
if not self._disabled_reported:
self._disabled_reported = True
assert self.platform is not None
_LOGGER.warning(
"Entity %s is incorrectly being triggered for updates while it is disabled. This is a bug in the %s integration",
self.entity_id,
self.platform.platform_name,
)
return
start = timer()
attr = self.capability_attributes
attr = dict(attr) if attr else {}
if not self.available:
state = STATE_UNAVAILABLE
else:
sstate = self.state
state = STATE_UNKNOWN if sstate is None else str(sstate)
attr.update(self.state_attributes or {})
extra_state_attributes = self.extra_state_attributes
# Backwards compatibility for "device_state_attributes" deprecated in 2021.4
# Add warning in 2021.6, remove in 2021.10
if extra_state_attributes is None:
extra_state_attributes = self.device_state_attributes
attr.update(extra_state_attributes or {})
unit_of_measurement = self.unit_of_measurement
if unit_of_measurement is not None:
attr[ATTR_UNIT_OF_MEASUREMENT] = unit_of_measurement
entry = self.registry_entry
# pylint: disable=consider-using-ternary
name = (entry and entry.name) or self.name
if name is not None:
attr[ATTR_FRIENDLY_NAME] = name
icon = (entry and entry.icon) or self.icon
if icon is not None:
attr[ATTR_ICON] = icon
entity_picture = self.entity_picture
if entity_picture is not None:
attr[ATTR_ENTITY_PICTURE] = entity_picture
assumed_state = self.assumed_state
if assumed_state:
attr[ATTR_ASSUMED_STATE] = assumed_state
supported_features = self.supported_features
if supported_features is not None:
attr[ATTR_SUPPORTED_FEATURES] = supported_features
device_class = self.device_class
if device_class is not None:
attr[ATTR_DEVICE_CLASS] = str(device_class)
end = timer()
if end - start > 0.4 and not self._slow_reported:
self._slow_reported = True
extra = ""
if "custom_components" in type(self).__module__:
extra = "Please report it to the custom component author."
else:
extra = (
"Please create a bug report at "
"https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue"
)
if self.platform:
extra += (
f"+label%3A%22integration%3A+{self.platform.platform_name}%22"
)
_LOGGER.warning(
"Updating state for %s (%s) took %.3f seconds. %s",
self.entity_id,
type(self),
end - start,
extra,
)
# Overwrite properties that have been set in the config file.
if DATA_CUSTOMIZE in self.hass.data:
attr.update(self.hass.data[DATA_CUSTOMIZE].get(self.entity_id))
# Convert temperature if we detect one
try:
unit_of_measure = attr.get(ATTR_UNIT_OF_MEASUREMENT)
units = self.hass.config.units
if (
unit_of_measure in (TEMP_CELSIUS, TEMP_FAHRENHEIT)
and unit_of_measure != units.temperature_unit
):
prec = len(state) - state.index(".") - 1 if "." in state else 0
temp = units.temperature(float(state), unit_of_measure)
state = str(round(temp) if prec == 0 else round(temp, prec))
attr[ATTR_UNIT_OF_MEASUREMENT] = units.temperature_unit
except ValueError:
# Could not convert state to float
pass
if (
self._context_set is not None
and dt_util.utcnow() - self._context_set > self.context_recent_time
):
self._context = None
self._context_set = None
self.hass.states.async_set(
self.entity_id, state, attr, self.force_update, self._context
)
def schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
self.hass.add_job(self.async_update_ha_state(force_refresh)) # type: ignore
@callback
def async_schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
This method must be run in the event loop.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
if force_refresh:
self.hass.async_create_task(self.async_update_ha_state(force_refresh))
else:
self.async_write_ha_state()
async def async_device_update(self, warning: bool = True) -> None:
"""Process 'update' or 'async_update' from entity.
This method is a coroutine.
"""
if self._update_staged:
return
self._update_staged = True
# Process update sequential
if self.parallel_updates:
await self.parallel_updates.acquire()
try:
# pylint: disable=no-member
if hasattr(self, "async_update"):
task = self.hass.async_create_task(self.async_update()) # type: ignore
elif hasattr(self, "update"):
task = self.hass.async_add_executor_job(self.update) # type: ignore
else:
return
if not warning:
await task
return
finished, _ = await asyncio.wait([task], timeout=SLOW_UPDATE_WARNING)
for done in finished:
exc = done.exception()
if exc:
raise exc
return
_LOGGER.warning(
"Update of %s is taking over %s seconds",
self.entity_id,
SLOW_UPDATE_WARNING,
)
await task
finally:
self._update_staged = False
if self.parallel_updates:
self.parallel_updates.release()
@callback
def async_on_remove(self, func: CALLBACK_TYPE) -> None:
"""Add a function to call when entity removed."""
if self._on_remove is None:
self._on_remove = []
self._on_remove.append(func)
async def async_removed_from_registry(self) -> None:
"""Run when entity has been removed from entity registry.
To be extended by integrations.
"""
@callback
def add_to_platform_start(
self,
hass: HomeAssistant,
platform: EntityPlatform,
parallel_updates: Optional[asyncio.Semaphore],
) -> None:
"""Start adding an entity to a platform."""
if self._added:
raise HomeAssistantError(
f"Entity {self.entity_id} cannot be added a second time to an entity platform"
)
self.hass = hass
self.platform = platform
self.parallel_updates = parallel_updates
self._added = True
@callback
def add_to_platform_abort(self) -> None:
"""Abort adding an entity to a platform."""
self.hass = None # type: ignore
self.platform = None
self.parallel_updates = None
self._added = False
async def add_to_platform_finish(self) -> None:
"""Finish adding an entity to a platform."""
await self.async_internal_added_to_hass()
await self.async_added_to_hass()
self.async_write_ha_state()
async def async_remove(self, *, force_remove: bool = False) -> None:
"""Remove entity from Home Assistant.
If the entity has a non disabled entry in the entity registry,
the entity's state will be set to unavailable, in the same way
as when the entity registry is loaded.
If the entity doesn't have a non disabled entry in the entity registry,
or if force_remove=True, its state will be removed.
"""
if self.platform and not self._added:
raise HomeAssistantError(
f"Entity {self.entity_id} async_remove called twice"
)
self._added = False
if self._on_remove is not None:
while self._on_remove:
self._on_remove.pop()()
await self.async_internal_will_remove_from_hass()
await self.async_will_remove_from_hass()
# Check if entry still exists in entity registry (e.g. unloading config entry)
if (
not force_remove
and self.registry_entry
and not self.registry_entry.disabled
):
# Set the entity's state will to unavailable + ATTR_RESTORED: True
self.registry_entry.write_unavailable_state(self.hass)
else:
self.hass.states.async_remove(self.entity_id, context=self._context)
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
To be extended by integrations.
"""
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
To be extended by integrations.
"""
async def async_internal_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
Not to be extended by integrations.
"""
if self.platform:
info = {"domain": self.platform.platform_name}
if self.platform.config_entry:
info["source"] = SOURCE_CONFIG_ENTRY
info["config_entry"] = self.platform.config_entry.entry_id
else:
info["source"] = SOURCE_PLATFORM_CONFIG
self.hass.data.setdefault(DATA_ENTITY_SOURCE, {})[self.entity_id] = info
if self.registry_entry is not None:
# This is an assert as it should never happen, but helps in tests
assert (
not self.registry_entry.disabled_by
), f"Entity {self.entity_id} is being added while it's disabled"
self.async_on_remove(
async_track_entity_registry_updated_event(
self.hass, self.entity_id, self._async_registry_updated
)
)
async def async_internal_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
Not to be extended by integrations.
"""
if self.platform:
self.hass.data[DATA_ENTITY_SOURCE].pop(self.entity_id)
async def _async_registry_updated(self, event: Event) -> None:
"""Handle entity registry update."""
data = event.data
if data["action"] == "remove":
await self.async_removed_from_registry()
self.registry_entry = None
await self.async_remove()
if data["action"] != "update":
return
ent_reg = await self.hass.helpers.entity_registry.async_get_registry()
old = self.registry_entry
self.registry_entry = ent_reg.async_get(data["entity_id"])
assert self.registry_entry is not None
if self.registry_entry.disabled:
await self.async_remove()
return
assert old is not None
if self.registry_entry.entity_id == old.entity_id:
self.async_write_ha_state()
return
await self.async_remove(force_remove=True)
assert self.platform is not None
self.entity_id = self.registry_entry.entity_id
await self.platform.async_add_entities([self])
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
if not isinstance(other, self.__class__):
return False
# Can only decide equality if both have a unique id
if self.unique_id is None or other.unique_id is None:
return False
# Ensure they belong to the same platform
if self.platform is not None or other.platform is not None:
if self.platform is None or other.platform is None:
return False
if self.platform.platform != other.platform.platform:
return False
return self.unique_id == other.unique_id
def __repr__(self) -> str:
"""Return the representation."""
return f"<Entity {self.name}: {self.state}>"
async def async_request_call(self, coro: Awaitable) -> None:
"""Process request batched."""
if self.parallel_updates:
await self.parallel_updates.acquire()
try:
await coro
finally:
if self.parallel_updates:
self.parallel_updates.release()
class ToggleEntity(Entity):
"""An abstract class for entities that can be turned on and off."""
@property
def state(self) -> str:
"""Return the state."""
return STATE_ON if self.is_on else STATE_OFF
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
raise NotImplementedError()
def turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
await self.hass.async_add_executor_job(ft.partial(self.turn_on, **kwargs))
def turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
raise NotImplementedError()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
await self.hass.async_add_executor_job(ft.partial(self.turn_off, **kwargs))
def toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
self.turn_off(**kwargs)
else:
self.turn_on(**kwargs)
async def async_toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
await self.async_turn_off(**kwargs)
else:
await self.async_turn_on(**kwargs)
| homeassistant/helpers/entity.py | 24,720 | An abstract class for Home Assistant entities.
An abstract class for entities that can be turned on and off.
Return the comparison.
Return the representation.
Write the state to the state machine.
Abort adding an entity to a platform.
Start adding an entity to a platform.
Return True if unable to access real state of the entity.
Generate a unique entity ID based on given entity IDs or used IDs.
Add a function to call when entity removed.
Schedule an update ha state change task.
This method must be run in the event loop.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
Set the context the entity currently operates under.
Write the state to the state machine.
Return True if entity is available.
Return the capability attributes.
Attributes that explain the capabilities of an entity.
Implemented by component base class. Convention for attribute names
is lowercase snake_case.
Time that a context is considered recent.
Return the class of this device, from component DEVICE_CLASSES.
Return device specific attributes.
Implemented by platform classes.
Return entity specific state attributes.
This method is deprecated, platform classes should implement
extra_state_attributes instead.
Return if the entity is enabled in the entity registry.
If an entity is not part of the registry, it cannot be disabled
and will therefore always be enabled.
Return the entity picture to use in the frontend, if any.
Return if the entity should be enabled when first added to the entity registry.
Get the entity sources.
Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
Return True if state updates should be forced.
If True, a state change will be triggered anytime the state property is
updated, not just when the value changes.
Generate a unique entity ID based on given entity IDs or used IDs.
Return the icon to use in the frontend, if any.
Return True if entity is on.
Return the name of the entity.
Schedule an update ha state change task.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
Return True if entity has to be polled for state.
False if entity pushes its state to HA.
Return the state of the entity.
Return the state.
Return the state attributes.
Implemented by component base class, should not be extended by integrations.
Convention for attribute names is lowercase snake_case.
Flag supported features.
Toggle the entity.
Turn the entity off.
Turn the entity on.
Return a unique ID.
Return the unit of measurement of this entity, if any.
An abstract class for entities.
SAFE TO OVERWRITE The properties and methods here are safe to overwrite when inheriting this class. These may be used to customize the behavior of the entity. type: ignore Owning hass instance. Will be set by EntityPlatform While not purely typed, it makes typehinting more useful for us and removes the need for constant None checks or asserts. Ignore types: https://github.com/PyCQA/pylint/issues/3167 type: ignore Owning platform instance. Will be set by EntityPlatform If we reported if this entity was slow If we reported this entity is updated while disabled Protect for multiple updates Process updates in parallel Entry in the entity registry Hold list for functions to call on remove. Context If entity is added to an entity platform DO NOT OVERWRITE These properties and methods are either managed by Home Assistant or they are used to perform a very specific function. Overwriting these may produce undesirable effects in the entity's operation. update entity data pylint: disable=broad-except Backwards compatibility for "device_state_attributes" deprecated in 2021.4 Add warning in 2021.6, remove in 2021.10 pylint: disable=consider-using-ternary Overwrite properties that have been set in the config file. Convert temperature if we detect one Could not convert state to float type: ignore Process update sequential pylint: disable=no-member type: ignore type: ignore type: ignore Check if entry still exists in entity registry (e.g. unloading config entry) Set the entity's state will to unavailable + ATTR_RESTORED: True This is an assert as it should never happen, but helps in tests Can only decide equality if both have a unique id Ensure they belong to the same platform | 4,714 | en | 0.822937 |
# from https://stackoverflow.com/questions/8032642/how-to-obtain-image-size-using-standard-python-class-without-using-external-lib
import struct
import imghdr
def get_image_size(fname):
"""Determine the image type of fhandle and return its size.
from draco"""
with open(fname, "rb") as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == "png":
check = struct.unpack(">i", head[4:8])[0]
if check != 0x0D0A1A0A:
return
width, height = struct.unpack(">ii", head[16:24])
elif imghdr.what(fname) == "gif":
width, height = struct.unpack("<HH", head[6:10])
elif imghdr.what(fname) == "jpeg":
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xC0 <= ftype <= 0xCF:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xFF:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack(">H", fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack(">HH", fhandle.read(4))
except Exception: # IGNORE:W0703
return
else:
return
return width, height
| mdpdfbook/mdpdf/image.py | 1,508 | Determine the image type of fhandle and return its size.
from draco
from https://stackoverflow.com/questions/8032642/how-to-obtain-image-size-using-standard-python-class-without-using-external-lib Read 0xff next We are at a SOFn block Skip `precision' byte. IGNORE:W0703 | 273 | en | 0.776093 |
# Copyright 2021 Zeppelin Bend Pty Ltd
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
__all__ = ["get_upstream_end_to_tns"]
from typing import List, Tuple, TypeVar
from zepben.evolve import PowerTransformerEnd, SinglePhaseKind, PhaseDirection
T = TypeVar("T")
def get_upstream_end_to_tns(
ends_to_topological_nodes: List[Tuple[PowerTransformerEnd, T]]
) -> List[Tuple[PowerTransformerEnd, T]]:
return [(end, tn) for (end, tn) in ends_to_topological_nodes
if tn is not None
and end is not None
# TODO: How to account for the fact you can have phases with different directions??
and (end.terminal.traced_phases.direction_normal(SinglePhaseKind.A).has(PhaseDirection.IN)
or end.terminal.traced_phases.direction_normal(SinglePhaseKind.B).has(PhaseDirection.IN)
or end.terminal.traced_phases.direction_normal(SinglePhaseKind.C).has(PhaseDirection.IN))
]
| src/pp_creators/utils.py | 1,120 | Copyright 2021 Zeppelin Bend Pty Ltd This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/. TODO: How to account for the fact you can have phases with different directions?? | 315 | en | 0.9192 |
# coding: utf-8
# Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# Copyright 2018 Kenneth Reitz
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import datetime
import sys
# Import encoding now, to avoid implicit import later.
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
import encodings.idna
from oci._vendor.urllib3.fields import RequestField
from oci._vendor.urllib3.filepost import encode_multipart_formdata
from oci._vendor.urllib3.util import parse_url
from oci._vendor.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from io import UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from ._internal_utils import to_native_string, unicode_is_ascii
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, check_header_validity)
from .compat import (
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
elif hasattr(fp, 'read'):
fdata = fp.read()
elif fp is None:
continue
else:
fdata = fp
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
#: integer denoting starting position of a readable file-like body.
self._body_position = None
def prepare(self,
method=None, url=None, headers=None, files=None, data=None,
params=None, auth=None, cookies=None, hooks=None, json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
p._body_position = self._body_position
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = to_native_string(self.method.upper())
@staticmethod
def _get_idna_encoded_host(host):
from oci._vendor import idna
try:
host = idna.encode(host, uts46=True).decode('utf-8')
except idna.IDNAError:
raise UnicodeError
return host
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindly call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/psf/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Remove leading whitespaces from url
url = url.lstrip()
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
error = error.format(to_native_string(url, 'utf8'))
raise MissingSchema(error)
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# In general, we want to try IDNA encoding the hostname if the string contains
# non-ASCII characters. This allows users to automatically get the correct IDNA
# behaviour. For strings containing only ASCII characters, we need to also verify
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
if not unicode_is_ascii(host):
try:
host = self._get_idna_encoded_host(host)
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
elif host.startswith(u'*'):
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
if isinstance(params, (str, bytes)):
params = to_native_string(params)
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
self.headers = CaseInsensitiveDict()
if headers:
for header in headers.items():
# Raise exception on invalid header value.
check_header_validity(header)
name, value = header
self.headers[to_native_string(name)] = value
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
if not data and json is not None:
# urllib3 requires a bytes-like body. Python 2's json.dumps
# provides this natively, but Python 3 gives a Unicode string.
content_type = 'application/json'
body = complexjson.dumps(json)
if not isinstance(body, bytes):
body = body.encode('utf-8')
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, Mapping))
])
if is_stream:
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
body = data
if getattr(body, 'tell', None) is not None:
# Record the current file position before reading.
# This will allow us to rewind a file in the event
# of a redirect.
try:
self._body_position = body.tell()
except (IOError, OSError):
# This differentiates from None, allowing us to catch
# a failed `tell()` later when trying to rewind the body
self._body_position = object()
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
"""Prepare Content-Length header based on request method and body"""
if body is not None:
length = super_len(body)
if length:
# If length exists, set it. Otherwise, we fallback
# to Transfer-Encoding: chunked.
self.headers['Content-Length'] = builtin_str(length)
elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
# Set Content-Length to 0 for methods that can have a body
# but don't provide one. (i.e. not GET or HEAD)
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content', 'status_code', 'headers', 'url', 'history',
'encoding', 'reason', 'cookies', 'elapsed', 'request'
]
def __init__(self):
self._content = False
self._content_consumed = False
self._next = None
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
#: This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __nonzero__(self):
"""Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
"""Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
"""
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanent versions of redirect."""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def next(self):
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
return self._next
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
# Special case for urllib3.
if hasattr(self.raw, 'stream'):
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
else:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
elif chunk_size is not None and not isinstance(chunk_size, int):
raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0 or self.raw is None:
self._content = None
else:
self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
r"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
"""
if not self.encoding and self.content and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return complexjson.loads(
self.content.decode(encoding), **kwargs
)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return complexjson.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if isinstance(self.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings. (See PR #3538)
try:
reason = self.reason.decode('utf-8')
except UnicodeDecodeError:
reason = self.reason.decode('iso-8859-1')
else:
reason = self.reason
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
if not self._content_consumed:
self.raw.close()
release_conn = getattr(self.raw, 'release_conn', None)
if release_conn is not None:
release_conn()
| src/oci/_vendor/requests/models.py | 34,762 | The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Instances are generated from a :class:`Request <Request>` object, and
should not be instantiated manually; doing so may produce undesirable
effects.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> r = req.prepare()
>>> r
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param json: json for the body to attach to the request (if files or data is not specified).
:param params: URL parameters to append to the URL. If a dictionary or
list of tuples ``[(key, value)]`` is provided, form-encoding will
take place.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'https://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
Allows you to use a response as an iterator.
Returns True if :attr:`status_code` is less than 400.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code, is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
tuples. Order is retained if data is a list of tuples but arbitrary
if parameters are supplied as a dict.
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
or 4-tuples (filename, fileobj, contentype, custom_headers).
Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
The apparent encoding, provided by the chardet library.
Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
Content of the response, in bytes.
Deregister a previously registered hook.
Returns True if the hook existed, False if not.
True if this Response one of the permanent versions of redirect.
True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
chunk_size must be of type int or None. A value of None will
function differently depending on the value of `stream`.
stream=True will read data as it arrives in whatever size the
chunks are received. If stream=False, data is returned as
a single chunk.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
:raises ValueError: If the response body does not contain valid json.
Returns the parsed header links of the response, if any.
Returns a PreparedRequest for the next request in a redirect chain, if there is one.
Returns True if :attr:`status_code` is less than 400, False if not.
This attribute checks if the status code of the response is between
400 and 600 to see if there was a client error or a server error. If
the status code is between 200 and 400, this will return True. This
is **not** a check to see if the response code is ``200 OK``.
Build the path URL to use.
Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it.
Prepares the entire request with the given parameters.
Prepares the given HTTP auth data.
Prepares the given HTTP body data.
Prepare Content-Length header based on request method and body
Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
Prepares the given HTTP headers.
Prepares the given hooks.
Prepares the given HTTP method.
Prepares the given HTTP URL.
Raises :class:`HTTPError`, if one occurred.
Properly register a hook.
Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
coding: utf-8 Modified Work: Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. Copyright 2018 Kenneth Reitz -*- coding: utf-8 -*- Import encoding now, to avoid implicit import later. Implicit import within threads may cause LookupError when standard library is in a ZIP, such as in Embedded Python. See https://github.com/psf/requests/issues/3578.: The set of HTTP status codes that indicate an automatically: processable redirect. 301 302 303 307 308 Don't call str() on bytestrings: in Py3 it all goes wrong. support for explicit filename Default empty dicts for dict params.: HTTP verb to send to the server.: HTTP URL to send the request to.: dictionary of HTTP headers. The `CookieJar` used to create the Cookie header will be stored here after prepare_cookies is called: request body to send to the server.: dictionary of callback hooks, for internal usage.: integer denoting starting position of a readable file-like body. Note that prepare_auth must be last to enable authentication schemes such as OAuth to work on a fully prepared request. This MUST go after prepare_auth. Authenticators could add a hook: Accept objects that have string representations.: We're unable to blindly call unicode/str functions: as this will include the bytestring indicator (b''): on python 3.x.: https://github.com/psf/requests/pull/2238 Remove leading whitespaces from url Don't do any URL preparation for non-HTTP schemes like `mailto`, `data` etc to work around exceptions from `url_parse`, which handles RFC 3986 only. Support for unicode domain names and paths. In general, we want to try IDNA encoding the hostname if the string contains non-ASCII characters. This allows users to automatically get the correct IDNA behaviour. For strings containing only ASCII characters, we need to also verify it doesn't start with a wildcard (*), before allowing the unencoded hostname. Carefully reconstruct the network location Bare domains aren't valid URLs. Raise exception on invalid header value. Check if file, fo, generator, iterator. If not, run through normal process. Nottin' on you. urllib3 requires a bytes-like body. Python 2's json.dumps provides this natively, but Python 3 gives a Unicode string. Record the current file position before reading. This will allow us to rewind a file in the event of a redirect. This differentiates from None, allowing us to catch a failed `tell()` later when trying to rewind the body Multi-part file uploads. Add content-type if it wasn't explicitly provided. If length exists, set it. Otherwise, we fallback to Transfer-Encoding: chunked. Set Content-Length to 0 for methods that can have a body but don't provide one. (i.e. not GET or HEAD) If no Auth is explicitly provided, extract it from the URL first. special-case basic HTTP auth Allow auth to make its changes. Update self to reflect the auth changes. Recompute Content-Length hooks can be passed as None to the prepare method and to this method. To prevent iterating over None, simply use an empty list if hooks is False-y: Integer Code of responded HTTP Status, e.g. 404 or 200.: Case-insensitive Dictionary of Response Headers.: For example, ``headers['content-encoding']`` will return the: value of a ``'Content-Encoding'`` response header.: File-like object representation of response (for advanced usage).: Use of ``raw`` requires that ``stream=True`` be set on the request.: This requirement does not apply for use internally to Requests.: Final URL location of Response.: Encoding to decode with when accessing r.text.: A list of :class:`Response <Response>` objects from: the history of the Request. Any redirect responses will end: up here. The list is sorted from the oldest to the most recent request.: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".: A CookieJar of Cookies the server sent back.: The amount of time elapsed between sending the request: and the arrival of the response (as a timedelta).: This property specifically measures the time taken between sending: the first byte of the request and finishing parsing the headers. It: is therefore unaffected by consuming the response content or the: value of the ``stream`` keyword argument.: The :class:`PreparedRequest <PreparedRequest>` object to which this: is a response. Consume everything; accessing the content attribute makes sure the content has been fully read. pickled objects do not have .raw Special case for urllib3. Standard file-like object. simulate reading small chunks of the content Read the contents. don't need to release the connection; that's been handled by urllib3 since we exhausted the data. Try charset from content-type Fallback to auto-detected encoding. Decode unicode from given encoding. A LookupError is raised if the encoding was not found which could indicate a misspelling or similar mistake. A TypeError can be raised if encoding is None So we try blindly encoding. No encoding set. JSON RFC 4627 section 3 states we should expect UTF-8, -16 or -32. Detect which one to use; If the detection or decoding fails, fall back to `self.text` (using chardet to make a best guess). Wrong UTF codec detected; usually because it's not UTF-8 but some other 8-bit codec. This is an RFC violation, and the server didn't bother to tell us what codec *was* used. l = MultiDict() We attempt to decode utf-8 first because some servers choose to localize their reason strings. If the string isn't utf-8, we fall back to iso-8859-1 for all other encodings. (See PR 3538) | 12,247 | en | 0.827411 |
#!/usr/bin/env python
# -*- coding=utf8 -*-
"""
# Author: achao
# File Name: weight_init.py
# Description:
"""
import copy
import math
import warnings
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from deep3dmap.core.utils import Registry, build_from_cfg, get_logger, print_log
INITIALIZERS = Registry('initializer')
def update_init_info(module, init_info):
"""Update the `_params_init_info` in the module if the value of parameters
are changed.
Args:
module (obj:`nn.Module`): The module of PyTorch with a user-defined
attribute `_params_init_info` which records the initialization
information.
init_info (str): The string that describes the initialization.
"""
assert hasattr(
module,
'_params_init_info'), f'Can not find `_params_init_info` in {module}'
for name, param in module.named_parameters():
assert param in module._params_init_info, (
f'Find a new :obj:`Parameter` '
f'named `{name}` during executing the '
f'`init_weights` of '
f'`{module.__class__.__name__}`. '
f'Please do not add or '
f'replace parameters during executing '
f'the `init_weights`. ')
# The parameter has been changed during executing the
# `init_weights` of module
mean_value = param.data.mean()
if module._params_init_info[param]['tmp_mean_value'] != mean_value:
module._params_init_info[param]['init_info'] = init_info
module._params_init_info[param]['tmp_mean_value'] = mean_value
def constant_init(module, val, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def xavier_init(module, gain=1, bias=0, distribution='normal'):
assert distribution in ['uniform', 'normal']
if hasattr(module, 'weight') and module.weight is not None:
if distribution == 'uniform':
nn.init.xavier_uniform_(module.weight, gain=gain)
else:
nn.init.xavier_normal_(module.weight, gain=gain)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def normal_init(module, mean=0, std=1, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.normal_(module.weight, mean, std)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def trunc_normal_init(module: nn.Module,
mean: float = 0,
std: float = 1,
a: float = -2,
b: float = 2,
bias: float = 0) -> None:
if hasattr(module, 'weight') and module.weight is not None:
trunc_normal_(module.weight, mean, std, a, b) # type: ignore
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias) # type: ignore
def uniform_init(module, a=0, b=1, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.uniform_(module.weight, a, b)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module,
a=0,
mode='fan_out',
nonlinearity='relu',
bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if hasattr(module, 'weight') and module.weight is not None:
if distribution == 'uniform':
nn.init.kaiming_uniform_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def caffe2_xavier_init(module, bias=0):
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
# Acknowledgment to FAIR's internal code
kaiming_init(
module,
a=1,
mode='fan_in',
nonlinearity='leaky_relu',
bias=bias,
distribution='uniform')
def bias_init_with_prob(prior_prob):
"""initialize conv/fc bias value according to a given probability value."""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init
def _get_bases_name(m):
return [b.__name__ for b in m.__class__.__bases__]
class BaseInit(object):
def __init__(self, *, bias=0, bias_prob=None, layer=None):
self.wholemodule = False
if not isinstance(bias, (int, float)):
raise TypeError(f'bias must be a number, but got a {type(bias)}')
if bias_prob is not None:
if not isinstance(bias_prob, float):
raise TypeError(f'bias_prob type must be float, \
but got {type(bias_prob)}')
if layer is not None:
if not isinstance(layer, (str, list)):
raise TypeError(f'layer must be a str or a list of str, \
but got a {type(layer)}')
else:
layer = []
if bias_prob is not None:
self.bias = bias_init_with_prob(bias_prob)
else:
self.bias = bias
self.layer = [layer] if isinstance(layer, str) else layer
def _get_init_info(self):
info = f'{self.__class__.__name__}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Constant')
class ConstantInit(BaseInit):
"""Initialize module parameters with constant values.
Args:
val (int | float): the value to fill the weights in the module with
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, val, **kwargs):
super().__init__(**kwargs)
self.val = val
def __call__(self, module):
def init(m):
if self.wholemodule:
constant_init(m, self.val, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
constant_init(m, self.val, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Xavier')
class XavierInit(BaseInit):
r"""Initialize module parameters with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks - Glorot, X. & Bengio, Y. (2010).
<http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
Args:
gain (int | float): an optional scaling factor. Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
distribution (str): distribution either be ``'normal'``
or ``'uniform'``. Defaults to ``'normal'``.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, gain=1, distribution='normal', **kwargs):
super().__init__(**kwargs)
self.gain = gain
self.distribution = distribution
def __call__(self, module):
def init(m):
if self.wholemodule:
xavier_init(m, self.gain, self.bias, self.distribution)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
xavier_init(m, self.gain, self.bias, self.distribution)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: gain={self.gain}, ' \
f'distribution={self.distribution}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Normal')
class NormalInit(BaseInit):
r"""Initialize module parameters with the values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
mean (int | float):the mean of the normal distribution. Defaults to 0.
std (int | float): the standard deviation of the normal distribution.
Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, mean=0, std=1, **kwargs):
super().__init__(**kwargs)
self.mean = mean
self.std = std
def __call__(self, module):
def init(m):
if self.wholemodule:
normal_init(m, self.mean, self.std, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
normal_init(m, self.mean, self.std, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: mean={self.mean},' \
f' std={self.std}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='TruncNormal')
class TruncNormalInit(BaseInit):
r"""Initialize module parameters with the values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values
outside :math:`[a, b]`.
Args:
mean (float): the mean of the normal distribution. Defaults to 0.
std (float): the standard deviation of the normal distribution.
Defaults to 1.
a (float): The minimum cutoff value.
b ( float): The maximum cutoff value.
bias (float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self,
mean: float = 0,
std: float = 1,
a: float = -2,
b: float = 2,
**kwargs) -> None:
super().__init__(**kwargs)
self.mean = mean
self.std = std
self.a = a
self.b = b
def __call__(self, module: nn.Module) -> None:
def init(m):
if self.wholemodule:
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \
f' mean={self.mean}, std={self.std}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Uniform')
class UniformInit(BaseInit):
r"""Initialize module parameters with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
a (int | float): the lower bound of the uniform distribution.
Defaults to 0.
b (int | float): the upper bound of the uniform distribution.
Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self, a=0, b=1, **kwargs):
super().__init__(**kwargs)
self.a = a
self.b = b
def __call__(self, module):
def init(m):
if self.wholemodule:
uniform_init(m, self.a, self.b, self.bias)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
uniform_init(m, self.a, self.b, self.bias)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a},' \
f' b={self.b}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Kaiming')
class KaimingInit(BaseInit):
r"""Initialize module parameters with the values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification - He, K. et al. (2015).
<https://www.cv-foundation.org/openaccess/content_iccv_2015/
papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_
Args:
a (int | float): the negative slope of the rectifier used after this
layer (only used with ``'leaky_relu'``). Defaults to 0.
mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing
``'fan_in'`` preserves the magnitude of the variance of the weights
in the forward pass. Choosing ``'fan_out'`` preserves the
magnitudes in the backwards pass. Defaults to ``'fan_out'``.
nonlinearity (str): the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` .
Defaults to 'relu'.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
distribution (str): distribution either be ``'normal'`` or
``'uniform'``. Defaults to ``'normal'``.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
"""
def __init__(self,
a=0,
mode='fan_out',
nonlinearity='relu',
distribution='normal',
**kwargs):
super().__init__(**kwargs)
self.a = a
self.mode = mode
self.nonlinearity = nonlinearity
self.distribution = distribution
def __call__(self, module):
def init(m):
if self.wholemodule:
kaiming_init(m, self.a, self.mode, self.nonlinearity,
self.bias, self.distribution)
else:
layername = m.__class__.__name__
basesname = _get_bases_name(m)
if len(set(self.layer) & set([layername] + basesname)):
kaiming_init(m, self.a, self.mode, self.nonlinearity,
self.bias, self.distribution)
module.apply(init)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \
f'nonlinearity={self.nonlinearity}, ' \
f'distribution ={self.distribution}, bias={self.bias}'
return info
@INITIALIZERS.register_module(name='Caffe2Xavier')
class Caffe2XavierInit(KaimingInit):
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
# Acknowledgment to FAIR's internal code
def __init__(self, **kwargs):
super().__init__(
a=1,
mode='fan_in',
nonlinearity='leaky_relu',
distribution='uniform',
**kwargs)
def __call__(self, module):
super().__call__(module)
@INITIALIZERS.register_module(name='Pretrained')
class PretrainedInit(object):
"""Initialize module by loading a pretrained model.
Args:
checkpoint (str): the checkpoint file of the pretrained model should
be load.
prefix (str, optional): the prefix of a sub-module in the pretrained
model. it is for loading a part of the pretrained model to
initialize. For example, if we would like to only load the
backbone of a detector model, we can set ``prefix='backbone.'``.
Defaults to None.
map_location (str): map tensors into proper locations.
"""
def __init__(self, checkpoint, prefix=None, map_location=None):
self.checkpoint = checkpoint
self.prefix = prefix
self.map_location = map_location
def __call__(self, module):
from deep3dmap.runners import (_load_checkpoint_with_prefix, load_checkpoint,
load_state_dict)
logger = get_logger('deep3dmap')
if self.prefix is None:
print_log(f'load model from: {self.checkpoint}', logger=logger)
load_checkpoint(
module,
self.checkpoint,
map_location=self.map_location,
strict=False,
logger=logger)
else:
print_log(
f'load {self.prefix} in model from: {self.checkpoint}',
logger=logger)
state_dict = _load_checkpoint_with_prefix(
self.prefix, self.checkpoint, map_location=self.map_location)
load_state_dict(module, state_dict, strict=False, logger=logger)
if hasattr(module, '_params_init_info'):
update_init_info(module, init_info=self._get_init_info())
def _get_init_info(self):
info = f'{self.__class__.__name__}: load from {self.checkpoint}'
return info
def _initialize(module, cfg, wholemodule=False):
func = build_from_cfg(cfg, INITIALIZERS)
# wholemodule flag is for override mode, there is no layer key in override
# and initializer will give init values for the whole module with the name
# in override.
func.wholemodule = wholemodule
func(module)
def _initialize_override(module, override, cfg):
if not isinstance(override, (dict, list)):
raise TypeError(f'override must be a dict or a list of dict, \
but got {type(override)}')
override = [override] if isinstance(override, dict) else override
for override_ in override:
cp_override = copy.deepcopy(override_)
name = cp_override.pop('name', None)
if name is None:
raise ValueError('`override` must contain the key "name",'
f'but got {cp_override}')
# if override only has name key, it means use args in init_cfg
if not cp_override:
cp_override.update(cfg)
# if override has name key and other args except type key, it will
# raise error
elif 'type' not in cp_override.keys():
raise ValueError(
f'`override` need "type" key, but got {cp_override}')
if hasattr(module, name):
_initialize(getattr(module, name), cp_override, wholemodule=True)
else:
raise RuntimeError(f'module did not have attribute {name}, '
f'but init_cfg is {cp_override}.')
def initialize(module, init_cfg):
"""Initialize a module.
Args:
module (``torch.nn.Module``): the module will be initialized.
init_cfg (dict | list[dict]): initialization configuration dict to
define initializer. OpenMMLab has implemented 6 initializers
including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
``Kaiming``, and ``Pretrained``.
Example:
>>> module = nn.Linear(2, 3, bias=True)
>>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
>>> initialize(module, init_cfg)
>>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
>>> # define key ``'layer'`` for initializing layer with different
>>> # configuration
>>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
dict(type='Constant', layer='Linear', val=2)]
>>> initialize(module, init_cfg)
>>> # define key``'override'`` to initialize some specific part in
>>> # module
>>> class FooNet(nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.feat = nn.Conv2d(3, 16, 3)
>>> self.reg = nn.Conv2d(16, 10, 3)
>>> self.cls = nn.Conv2d(16, 5, 3)
>>> model = FooNet()
>>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
>>> override=dict(type='Constant', name='reg', val=3, bias=4))
>>> initialize(model, init_cfg)
>>> model = ResNet(depth=50)
>>> # Initialize weights with the pretrained model.
>>> init_cfg = dict(type='Pretrained',
checkpoint='torchvision://resnet50')
>>> initialize(model, init_cfg)
>>> # Initialize weights of a sub-module with the specific part of
>>> # a pretrained model by using "prefix".
>>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\
>>> 'retinanet_r50_fpn_1x_coco/'\
>>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
>>> init_cfg = dict(type='Pretrained',
checkpoint=url, prefix='backbone.')
"""
if not isinstance(init_cfg, (dict, list)):
raise TypeError(f'init_cfg must be a dict or a list of dict, \
but got {type(init_cfg)}')
if isinstance(init_cfg, dict):
init_cfg = [init_cfg]
for cfg in init_cfg:
# should deeply copy the original config because cfg may be used by
# other modules, e.g., one init_cfg shared by multiple bottleneck
# blocks, the expected cfg will be changed after pop and will change
# the initialization behavior of other modules
cp_cfg = copy.deepcopy(cfg)
override = cp_cfg.pop('override', None)
_initialize(module, cp_cfg)
if override is not None:
cp_cfg.pop('layer', None)
_initialize_override(module, override, cp_cfg)
else:
# All attributes in module have same initialization.
pass
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float,
b: float) -> Tensor:
# Method based on
# https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
# Modified from
# https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
'The distribution of values may be incorrect.',
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
lower = norm_cdf((a - mean) / std)
upper = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [lower, upper], then translate
# to [2lower-1, 2upper-1].
tensor.uniform_(2 * lower - 1, 2 * upper - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor: Tensor,
mean: float = 0.,
std: float = 1.,
a: float = -2.,
b: float = 2.) -> Tensor:
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Modified from
https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
Args:
tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.
mean (float): the mean of the normal distribution.
std (float): the standard deviation of the normal distribution.
a (float): the minimum cutoff value.
b (float): the maximum cutoff value.
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
| deep3dmap/core/utils/weight_init.py | 26,051 | Initialize module parameters with constant values.
Args:
val (int | float): the value to fill the weights in the module with
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
Initialize module parameters with the values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification - He, K. et al. (2015).
<https://www.cv-foundation.org/openaccess/content_iccv_2015/
papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_
Args:
a (int | float): the negative slope of the rectifier used after this
layer (only used with ``'leaky_relu'``). Defaults to 0.
mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing
``'fan_in'`` preserves the magnitude of the variance of the weights
in the forward pass. Choosing ``'fan_out'`` preserves the
magnitudes in the backwards pass. Defaults to ``'fan_out'``.
nonlinearity (str): the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` .
Defaults to 'relu'.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
distribution (str): distribution either be ``'normal'`` or
``'uniform'``. Defaults to ``'normal'``.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
Initialize module parameters with the values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
mean (int | float):the mean of the normal distribution. Defaults to 0.
std (int | float): the standard deviation of the normal distribution.
Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
Initialize module by loading a pretrained model.
Args:
checkpoint (str): the checkpoint file of the pretrained model should
be load.
prefix (str, optional): the prefix of a sub-module in the pretrained
model. it is for loading a part of the pretrained model to
initialize. For example, if we would like to only load the
backbone of a detector model, we can set ``prefix='backbone.'``.
Defaults to None.
map_location (str): map tensors into proper locations.
Initialize module parameters with the values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values
outside :math:`[a, b]`.
Args:
mean (float): the mean of the normal distribution. Defaults to 0.
std (float): the standard deviation of the normal distribution.
Defaults to 1.
a (float): The minimum cutoff value.
b ( float): The maximum cutoff value.
bias (float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
Initialize module parameters with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
a (int | float): the lower bound of the uniform distribution.
Defaults to 0.
b (int | float): the upper bound of the uniform distribution.
Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
Initialize module parameters with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks - Glorot, X. & Bengio, Y. (2010).
<http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
Args:
gain (int | float): an optional scaling factor. Defaults to 1.
bias (int | float): the value to fill the bias. Defaults to 0.
bias_prob (float, optional): the probability for bias initialization.
Defaults to None.
distribution (str): distribution either be ``'normal'``
or ``'uniform'``. Defaults to ``'normal'``.
layer (str | list[str], optional): the layer will be initialized.
Defaults to None.
initialize conv/fc bias value according to a given probability value.
Initialize a module.
Args:
module (``torch.nn.Module``): the module will be initialized.
init_cfg (dict | list[dict]): initialization configuration dict to
define initializer. OpenMMLab has implemented 6 initializers
including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
``Kaiming``, and ``Pretrained``.
Example:
>>> module = nn.Linear(2, 3, bias=True)
>>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
>>> initialize(module, init_cfg)
>>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
>>> # define key ``'layer'`` for initializing layer with different
>>> # configuration
>>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
dict(type='Constant', layer='Linear', val=2)]
>>> initialize(module, init_cfg)
>>> # define key``'override'`` to initialize some specific part in
>>> # module
>>> class FooNet(nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.feat = nn.Conv2d(3, 16, 3)
>>> self.reg = nn.Conv2d(16, 10, 3)
>>> self.cls = nn.Conv2d(16, 5, 3)
>>> model = FooNet()
>>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
>>> override=dict(type='Constant', name='reg', val=3, bias=4))
>>> initialize(model, init_cfg)
>>> model = ResNet(depth=50)
>>> # Initialize weights with the pretrained model.
>>> init_cfg = dict(type='Pretrained',
checkpoint='torchvision://resnet50')
>>> initialize(model, init_cfg)
>>> # Initialize weights of a sub-module with the specific part of
>>> # a pretrained model by using "prefix".
>>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/' >>> 'retinanet_r50_fpn_1x_coco/' >>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
>>> init_cfg = dict(type='Pretrained',
checkpoint=url, prefix='backbone.')
Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Modified from
https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
Args:
tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.
mean (float): the mean of the normal distribution.
std (float): the standard deviation of the normal distribution.
a (float): the minimum cutoff value.
b (float): the maximum cutoff value.
Update the `_params_init_info` in the module if the value of parameters
are changed.
Args:
module (obj:`nn.Module`): The module of PyTorch with a user-defined
attribute `_params_init_info` which records the initialization
information.
init_info (str): The string that describes the initialization.
# Author: achao
# File Name: weight_init.py
# Description:
!/usr/bin/env python -*- coding=utf8 -*- The parameter has been changed during executing the `init_weights` of module type: ignore type: ignore `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch Acknowledgment to FAIR's internal code `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch Acknowledgment to FAIR's internal code wholemodule flag is for override mode, there is no layer key in override and initializer will give init values for the whole module with the name in override. if override only has name key, it means use args in init_cfg if override has name key and other args except type key, it will raise error should deeply copy the original config because cfg may be used by other modules, e.g., one init_cfg shared by multiple bottleneck blocks, the expected cfg will be changed after pop and will change the initialization behavior of other modules All attributes in module have same initialization. Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf Modified from https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py Computes standard normal cumulative distribution function Values are generated by using a truncated uniform distribution and then using the inverse CDF for the normal distribution. Get upper and lower cdf values Uniformly fill tensor with values from [lower, upper], then translate to [2lower-1, 2upper-1]. Use inverse cdf transform for normal distribution to get truncated standard normal Transform to proper mean, std Clamp to ensure it's in the proper range | 9,397 | en | 0.551292 |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The minc module provides classes for interfacing with the `MINC
<http://www.bic.mni.mcgill.ca/ServicesSoftware/MINC>`_ command line tools. This
module was written to work with MINC version 2.2.00.
Author: Carlo Hamalainen <carlo@carlo-hamalainen.net>
http://carlo-hamalainen.net
"""
import glob
import os
import os.path
import re
import warnings
from ..base import (TraitedSpec, CommandLineInputSpec, CommandLine,
StdOutCommandLineInputSpec, StdOutCommandLine, File,
Directory, InputMultiPath, OutputMultiPath, traits,
isdefined)
from .base import aggregate_filename
warnings.filterwarnings('always', category=UserWarning)
class ExtractInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s.raw',
keep_extension=False)
_xor_write = (
'write_ascii',
'write_ascii',
'write_byte',
'write_short',
'write_int',
'write_long',
'write_float',
'write_double',
'write_signed',
'write_unsigned',
)
write_ascii = traits.Bool(
desc='Write out data as ascii strings (default).',
argstr='-ascii',
xor=_xor_write)
write_byte = traits.Bool(
desc='Write out data as bytes.', argstr='-byte', xor=_xor_write)
write_short = traits.Bool(
desc='Write out data as short integers.',
argstr='-short',
xor=_xor_write)
write_int = traits.Bool(
desc='Write out data as 32-bit integers.',
argstr='-int',
xor=_xor_write)
write_long = traits.Bool(
desc='Superseded by write_int.', argstr='-long', xor=_xor_write)
write_float = traits.Bool(
desc='Write out data as single precision floating-point values.',
argstr='-float',
xor=_xor_write)
write_double = traits.Bool(
desc='Write out data as double precision floating-point values.',
argstr='-double',
xor=_xor_write)
_xor_signed = ('write_signed', 'write_unsigned')
write_signed = traits.Bool(
desc='Write out signed data.', argstr='-signed', xor=_xor_signed)
write_unsigned = traits.Bool(
desc='Write out unsigned data.', argstr='-unsigned', xor=_xor_signed)
write_range = traits.Tuple(
traits.Float,
traits.Float,
argstr='-range %s %s',
desc=
'Specify the range of output values\nDefault value: 1.79769e+308 1.79769e+308.',
)
_xor_normalize = (
'normalize',
'nonormalize',
)
normalize = traits.Bool(
desc='Normalize integer pixel values to file max and min.',
argstr='-normalize',
xor=_xor_normalize)
nonormalize = traits.Bool(
desc='Turn off pixel normalization.',
argstr='-nonormalize',
xor=_xor_normalize)
image_range = traits.Tuple(
traits.Float,
traits.Float,
desc='Specify the range of real image values for normalization.',
argstr='-image_range %s %s')
image_minimum = traits.Float(
desc=('Specify the minimum real image value for normalization.'
'Default value: 1.79769e+308.'),
argstr='-image_minimum %s')
image_maximum = traits.Float(
desc=('Specify the maximum real image value for normalization.'
'Default value: 1.79769e+308.'),
argstr='-image_maximum %s')
start = InputMultiPath(
traits.Int,
desc='Specifies corner of hyperslab (C conventions for indices).',
sep=',',
argstr='-start %s',
)
count = InputMultiPath(
traits.Int,
desc='Specifies edge lengths of hyperslab to read.',
sep=',',
argstr='-count %s',
)
# FIXME Can we make sure that len(start) == len(count)?
_xor_flip = ('flip_positive_direction', 'flip_negative_direction',
'flip_any_direction')
flip_positive_direction = traits.Bool(
desc='Flip images to always have positive direction.',
argstr='-positive_direction',
xor=_xor_flip)
flip_negative_direction = traits.Bool(
desc='Flip images to always have negative direction.',
argstr='-negative_direction',
xor=_xor_flip)
flip_any_direction = traits.Bool(
desc='Do not flip images (Default).',
argstr='-any_direction',
xor=_xor_flip)
_xor_x_flip = ('flip_x_positive', 'flip_x_negative', 'flip_x_any')
flip_x_positive = traits.Bool(
desc='Flip images to give positive xspace:step value (left-to-right).',
argstr='+xdirection',
xor=_xor_x_flip)
flip_x_negative = traits.Bool(
desc='Flip images to give negative xspace:step value (right-to-left).',
argstr='-xdirection',
xor=_xor_x_flip)
flip_x_any = traits.Bool(
desc='Don\'t flip images along x-axis (default).',
argstr='-xanydirection',
xor=_xor_x_flip)
_xor_y_flip = ('flip_y_positive', 'flip_y_negative', 'flip_y_any')
flip_y_positive = traits.Bool(
desc='Flip images to give positive yspace:step value (post-to-ant).',
argstr='+ydirection',
xor=_xor_y_flip)
flip_y_negative = traits.Bool(
desc='Flip images to give negative yspace:step value (ant-to-post).',
argstr='-ydirection',
xor=_xor_y_flip)
flip_y_any = traits.Bool(
desc='Don\'t flip images along y-axis (default).',
argstr='-yanydirection',
xor=_xor_y_flip)
_xor_z_flip = ('flip_z_positive', 'flip_z_negative', 'flip_z_any')
flip_z_positive = traits.Bool(
desc='Flip images to give positive zspace:step value (inf-to-sup).',
argstr='+zdirection',
xor=_xor_z_flip)
flip_z_negative = traits.Bool(
desc='Flip images to give negative zspace:step value (sup-to-inf).',
argstr='-zdirection',
xor=_xor_z_flip)
flip_z_any = traits.Bool(
desc='Don\'t flip images along z-axis (default).',
argstr='-zanydirection',
xor=_xor_z_flip)
class ExtractOutputSpec(TraitedSpec):
output_file = File(desc='output file in raw/text format', exists=True)
class Extract(StdOutCommandLine):
"""Dump a hyperslab of MINC file data.
Examples
--------
>>> from nipype.interfaces.minc import Extract
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> extract = Extract(input_file=minc2Dfile)
>>> extract.run() # doctest: +SKIP
>>> extract = Extract(input_file=minc2Dfile, start=[3, 10, 5], count=[4, 4, 4]) # extract a 4x4x4 slab at offset [3, 10, 5]
>>> extract.run() # doctest: +SKIP
"""
input_spec = ExtractInputSpec
output_spec = ExtractOutputSpec
_cmd = 'mincextract'
class ToRawInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s.raw',
keep_extension=False)
_xor_write = ('write_byte', 'write_short', 'write_int', 'write_long',
'write_float', 'write_double')
write_byte = traits.Bool(
desc='Write out data as bytes.', argstr='-byte', xor=_xor_write)
write_short = traits.Bool(
desc='Write out data as short integers.',
argstr='-short',
xor=_xor_write)
write_int = traits.Bool(
desc='Write out data as 32-bit integers.',
argstr='-int',
xor=_xor_write)
write_long = traits.Bool(
desc='Superseded by write_int.', argstr='-long', xor=_xor_write)
write_float = traits.Bool(
desc='Write out data as single precision floating-point values.',
argstr='-float',
xor=_xor_write)
write_double = traits.Bool(
desc='Write out data as double precision floating-point values.',
argstr='-double',
xor=_xor_write)
_xor_signed = ('write_signed', 'write_unsigned')
write_signed = traits.Bool(
desc='Write out signed data.', argstr='-signed', xor=_xor_signed)
write_unsigned = traits.Bool(
desc='Write out unsigned data.', argstr='-unsigned', xor=_xor_signed)
write_range = traits.Tuple(
traits.Float,
traits.Float,
argstr='-range %s %s',
desc=('Specify the range of output values.'
'Default value: 1.79769e+308 1.79769e+308.'),
)
_xor_normalize = (
'normalize',
'nonormalize',
)
normalize = traits.Bool(
desc='Normalize integer pixel values to file max and min.',
argstr='-normalize',
xor=_xor_normalize)
nonormalize = traits.Bool(
desc='Turn off pixel normalization.',
argstr='-nonormalize',
xor=_xor_normalize)
class ToRawOutputSpec(TraitedSpec):
output_file = File(desc='output file in raw format', exists=True)
class ToRaw(StdOutCommandLine):
"""Dump a chunk of MINC file data. This program is largely
superceded by mincextract (see Extract).
Examples
--------
>>> from nipype.interfaces.minc import ToRaw
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> toraw = ToRaw(input_file=minc2Dfile)
>>> toraw.run() # doctest: +SKIP
>>> toraw = ToRaw(input_file=minc2Dfile, write_range=(0, 100))
>>> toraw.run() # doctest: +SKIP
"""
input_spec = ToRawInputSpec
output_spec = ToRawOutputSpec
_cmd = 'minctoraw'
class ConvertInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file for converting',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_convert_output.mnc')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
template = traits.Bool(
desc=
('Create a template file. The dimensions, variables, and'
'attributes of the input file are preserved but all data it set to zero.'
),
argstr='-template',
)
compression = traits.Enum(
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
argstr='-compress %s',
desc='Set the compression level, from 0 (disabled) to 9 (maximum).',
)
chunk = traits.Range(
low=0,
desc=
'Set the target block size for chunking (0 default, >1 block size).',
argstr='-chunk %d',
)
class ConvertOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Convert(CommandLine):
"""convert between MINC 1 to MINC 2 format.
Examples
--------
>>> from nipype.interfaces.minc import Convert
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> c = Convert(input_file=minc2Dfile, output_file='/tmp/out.mnc', two=True) # Convert to MINC2 format.
>>> c.run() # doctest: +SKIP
"""
input_spec = ConvertInputSpec
output_spec = ConvertOutputSpec
_cmd = 'mincconvert'
class CopyInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to copy',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_copy.mnc')
_xor_pixel = ('pixel_values', 'real_values')
pixel_values = traits.Bool(
desc='Copy pixel values as is.',
argstr='-pixel_values',
xor=_xor_pixel)
real_values = traits.Bool(
desc='Copy real pixel intensities (default).',
argstr='-real_values',
xor=_xor_pixel)
class CopyOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Copy(CommandLine):
"""
Copy image values from one MINC file to another. Both the input
and output files must exist, and the images in both files must
have an equal number dimensions and equal dimension lengths.
NOTE: This program is intended primarily for use with scripts
such as mincedit. It does not follow the typical design rules of
most MINC command-line tools and therefore should be used only
with caution.
"""
input_spec = CopyInputSpec
output_spec = CopyOutputSpec
_cmd = 'minccopy'
class ToEcatInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to convert',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_to_ecat.v',
keep_extension=False)
ignore_patient_variable = traits.Bool(
desc='Ignore informations from the minc patient variable.',
argstr='-ignore_patient_variable',
)
ignore_study_variable = traits.Bool(
desc='Ignore informations from the minc study variable.',
argstr='-ignore_study_variable',
)
ignore_acquisition_variable = traits.Bool(
desc='Ignore informations from the minc acquisition variable.',
argstr='-ignore_acquisition_variable',
)
ignore_ecat_acquisition_variable = traits.Bool(
desc='Ignore informations from the minc ecat_acquisition variable.',
argstr='-ignore_ecat_acquisition_variable',
)
ignore_ecat_main = traits.Bool(
desc='Ignore informations from the minc ecat-main variable.',
argstr='-ignore_ecat_main',
)
ignore_ecat_subheader_variable = traits.Bool(
desc='Ignore informations from the minc ecat-subhdr variable.',
argstr='-ignore_ecat_subheader_variable',
)
no_decay_corr_fctr = traits.Bool(
desc='Do not compute the decay correction factors',
argstr='-no_decay_corr_fctr',
)
voxels_as_integers = traits.Bool(
desc=('Voxel values are treated as integers, scale and'
'calibration factors are set to unity'),
argstr='-label',
)
class ToEcatOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class ToEcat(CommandLine):
"""Convert a 2D image, a 3D volumes or a 4D dynamic volumes
written in MINC file format to a 2D, 3D or 4D Ecat7 file.
Examples
--------
>>> from nipype.interfaces.minc import ToEcat
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> c = ToEcat(input_file=minc2Dfile)
>>> c.run() # doctest: +SKIP
>>> c = ToEcat(input_file=minc2Dfile, voxels_as_integers=True)
>>> c.run() # doctest: +SKIP
"""
input_spec = ToEcatInputSpec
output_spec = ToEcatOutputSpec
_cmd = 'minctoecat'
class DumpInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_dump.txt',
keep_extension=False)
_xor_coords_or_header = (
'coordinate_data',
'header_data',
)
coordinate_data = traits.Bool(
desc='Coordinate variable data and header information.',
argstr='-c',
xor=_xor_coords_or_header)
header_data = traits.Bool(
desc='Header information only, no data.',
argstr='-h',
xor=_xor_coords_or_header)
_xor_annotations = (
'annotations_brief',
'annotations_full',
)
annotations_brief = traits.Enum(
'c',
'f',
argstr='-b %s',
desc='Brief annotations for C or Fortran indices in data.',
xor=_xor_annotations)
annotations_full = traits.Enum(
'c',
'f',
argstr='-f %s',
desc='Full annotations for C or Fortran indices in data.',
xor=_xor_annotations)
variables = InputMultiPath(
traits.Str,
desc='Output data for specified variables only.',
sep=',',
argstr='-v %s')
line_length = traits.Range(
low=0,
desc='Line length maximum in data section (default 80).',
argstr='-l %d')
netcdf_name = traits.Str(
desc='Name for netCDF (default derived from file name).',
argstr='-n %s')
precision = traits.Either(
traits.Int(),
traits.Tuple(traits.Int, traits.Int),
desc='Display floating-point values with less precision',
argstr='%s',
) # See _format_arg in Dump for actual formatting.
class DumpOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Dump(StdOutCommandLine):
"""Dump a MINC file. Typically used in conjunction with mincgen (see Gen).
Examples
--------
>>> from nipype.interfaces.minc import Dump
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> dump = Dump(input_file=minc2Dfile)
>>> dump.run() # doctest: +SKIP
>>> dump = Dump(input_file=minc2Dfile, output_file='/tmp/out.txt', precision=(3, 4))
>>> dump.run() # doctest: +SKIP
"""
input_spec = DumpInputSpec
output_spec = DumpOutputSpec
_cmd = 'mincdump'
def _format_arg(self, name, spec, value):
if name == 'precision':
if isinstance(value, int):
return '-p %d' % value
elif isinstance(value, tuple) and isinstance(
value[0], int) and isinstance(value[1], int):
return '-p %d,%d' % (
value[0],
value[1],
)
else:
raise ValueError('Invalid precision argument: ' + str(value))
return super(Dump, self)._format_arg(name, spec, value)
class AverageInputSpec(CommandLineInputSpec):
_xor_input_files = ('input_files', 'filelist')
input_files = InputMultiPath(
File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2,
xor=_xor_input_files)
filelist = File(
desc='Specify the name of a file containing input file names.',
argstr='-filelist %s',
exists=True,
mandatory=True,
xor=_xor_input_files)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_averaged.mnc')
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_verbose = (
'verbose',
'quiet',
)
verbose = traits.Bool(
desc='Print out log messages (default).',
argstr='-verbose',
xor=_xor_verbose)
quiet = traits.Bool(
desc='Do not print out log messages.',
argstr='-quiet',
xor=_xor_verbose)
debug = traits.Bool(desc='Print out debugging messages.', argstr='-debug')
_xor_check_dimensions = (
'check_dimensions',
'no_check_dimensions',
)
check_dimensions = traits.Bool(
desc='Check that dimension info matches across files (default).',
argstr='-check_dimensions',
xor=_xor_check_dimensions)
no_check_dimensions = traits.Bool(
desc='Do not check dimension info.',
argstr='-nocheck_dimensions',
xor=_xor_check_dimensions)
_xor_format = (
'format_filetype',
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_filetype = traits.Bool(
desc='Use data type of first file (default).',
argstr='-filetype',
xor=_xor_format)
format_byte = traits.Bool(
desc='Write out byte data.', argstr='-byte', xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.', argstr='-short', xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.', argstr='-long', xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.', argstr='-signed', xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
max_buffer_size_in_kb = traits.Range(
low=0,
desc='Specify the maximum size of the internal buffers (in kbytes).',
value=4096,
usedefault=True,
argstr='-max_buffer_size_in_kb %d',
)
_xor_normalize = (
'normalize',
'nonormalize',
)
normalize = traits.Bool(
desc='Normalize data sets for mean intensity.',
argstr='-normalize',
xor=_xor_normalize)
nonormalize = traits.Bool(
desc='Do not normalize data sets (default).',
argstr='-nonormalize',
xor=_xor_normalize)
voxel_range = traits.Tuple(
traits.Int,
traits.Int,
argstr='-range %d %d',
desc='Valid range for output data.')
sdfile = File(
desc='Specify an output sd file (default=none).', argstr='-sdfile %s')
_xor_copy_header = ('copy_header', 'no_copy_header')
copy_header = traits.Bool(
desc=
'Copy all of the header from the first file (default for one file).',
argstr='-copy_header',
xor=_xor_copy_header)
no_copy_header = traits.Bool(
desc=
'Do not copy all of the header from the first file (default for many files)).',
argstr='-nocopy_header',
xor=_xor_copy_header)
avgdim = traits.Str(
desc='Specify a dimension along which we wish to average.',
argstr='-avgdim %s')
binarize = traits.Bool(
desc='Binarize the volume by looking for values in a given range.',
argstr='-binarize')
binrange = traits.Tuple(
traits.Float,
traits.Float,
argstr='-binrange %s %s',
desc=
'Specify a range for binarization. Default value: 1.79769e+308 -1.79769e+308.'
)
binvalue = traits.Float(
desc=('Specify a target value (+/- 0.5) for'
'binarization. Default value: -1.79769e+308'),
argstr='-binvalue %s')
weights = InputMultiPath(
traits.Str,
desc='Specify weights for averaging ("<w1>,<w2>,...").',
sep=',',
argstr='-weights %s',
)
width_weighted = traits.Bool(
desc='Weight by dimension widths when -avgdim is used.',
argstr='-width_weighted',
requires=('avgdim', ))
class AverageOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Average(CommandLine):
"""Average a number of MINC files.
Examples
--------
>>> from nipype.interfaces.minc import Average
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> files = [nonempty_minc_data(i) for i in range(3)]
>>> average = Average(input_files=files, output_file='/tmp/tmp.mnc')
>>> average.run() # doctest: +SKIP
"""
input_spec = AverageInputSpec
output_spec = AverageOutputSpec
_cmd = 'mincaverage'
class BlobInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to blob',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_blob.mnc')
trace = traits.Bool(
desc='compute the trace (approximate growth and shrinkage) -- FAST',
argstr='-trace')
determinant = traits.Bool(
desc='compute the determinant (exact growth and shrinkage) -- SLOW',
argstr='-determinant')
translation = traits.Bool(
desc='compute translation (structure displacement)',
argstr='-translation')
magnitude = traits.Bool(
desc='compute the magnitude of the displacement vector',
argstr='-magnitude')
class BlobOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Blob(CommandLine):
"""Calculate blobs from minc deformation grids.
Examples
--------
>>> from nipype.interfaces.minc import Blob
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> blob = Blob(input_file=minc2Dfile, output_file='/tmp/tmp.mnc', trace=True)
>>> blob.run() # doctest: +SKIP
"""
input_spec = BlobInputSpec
output_spec = BlobOutputSpec
_cmd = 'mincblob'
class CalcInputSpec(CommandLineInputSpec):
_xor_input_files = ('input_files', 'filelist')
input_files = InputMultiPath(
File(exists=True),
desc='input file(s) for calculation',
mandatory=True,
sep=' ',
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_calc.mnc')
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_verbose = (
'verbose',
'quiet',
)
verbose = traits.Bool(
desc='Print out log messages (default).',
argstr='-verbose',
xor=_xor_verbose)
quiet = traits.Bool(
desc='Do not print out log messages.',
argstr='-quiet',
xor=_xor_verbose)
debug = traits.Bool(desc='Print out debugging messages.', argstr='-debug')
filelist = File(
desc='Specify the name of a file containing input file names.',
argstr='-filelist %s',
mandatory=True,
xor=_xor_input_files)
_xor_copy_header = ('copy_header', 'no_copy_header')
copy_header = traits.Bool(
desc='Copy all of the header from the first file.',
argstr='-copy_header',
xor=_xor_copy_header)
no_copy_header = traits.Bool(
desc='Do not copy all of the header from the first file.',
argstr='-nocopy_header',
xor=_xor_copy_header)
_xor_format = (
'format_filetype',
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_filetype = traits.Bool(
desc='Use data type of first file (default).',
argstr='-filetype',
xor=_xor_format)
format_byte = traits.Bool(
desc='Write out byte data.', argstr='-byte', xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.', argstr='-short', xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.', argstr='-long', xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.', argstr='-signed', xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
voxel_range = traits.Tuple(
traits.Int,
traits.Int,
argstr='-range %d %d',
desc='Valid range for output data.',
)
max_buffer_size_in_kb = traits.Range(
low=0,
desc='Specify the maximum size of the internal buffers (in kbytes).',
argstr='-max_buffer_size_in_kb %d')
_xor_check_dimensions = (
'check_dimensions',
'no_check_dimensions',
)
check_dimensions = traits.Bool(
desc='Check that files have matching dimensions (default).',
argstr='-check_dimensions',
xor=_xor_check_dimensions)
no_check_dimensions = traits.Bool(
desc='Do not check that files have matching dimensions.',
argstr='-nocheck_dimensions',
xor=_xor_check_dimensions)
# FIXME Is it sensible to use ignore_nan and propagate_nan at the same
# time? Document this.
ignore_nan = traits.Bool(
desc='Ignore invalid data (NaN) for accumulations.',
argstr='-ignore_nan')
propagate_nan = traits.Bool(
desc='Invalid data in any file at a voxel produces a NaN (default).',
argstr='-propagate_nan')
# FIXME Double-check that these are mutually exclusive?
_xor_nan_zero_illegal = ('output_nan', 'output_zero',
'output_illegal_value')
output_nan = traits.Bool(
desc='Output NaN when an illegal operation is done (default).',
argstr='-nan',
xor=_xor_nan_zero_illegal)
output_zero = traits.Bool(
desc='Output zero when an illegal operation is done.',
argstr='-zero',
xor=_xor_nan_zero_illegal)
output_illegal = traits.Bool(
desc=
'Value to write out when an illegal operation is done. Default value: 1.79769e+308',
argstr='-illegal_value',
xor=_xor_nan_zero_illegal)
_xor_expression = ('expression', 'expfile')
expression = traits.Str(
desc='Expression to use in calculations.',
argstr='-expression \'%s\'',
xor=_xor_expression,
mandatory=True)
expfile = File(
desc='Name of file containing expression.',
argstr='-expfile %s',
xor=_xor_expression,
mandatory=True)
# FIXME test this one, the argstr will probably need tweaking, see
# _format_arg.
outfiles = traits.List(
traits.Tuple(
traits.Str,
File,
argstr='-outfile %s %s',
desc=
('List of (symbol, file) tuples indicating that output should be written'
'to the specified file, taking values from the symbol which should be'
'created in the expression (see the EXAMPLES section). If this option'
'is given, then all non-option arguments are taken as input files.'
'This option can be used multiple times for multiple output files.'
)))
eval_width = traits.Int(
desc='Number of voxels to evaluate simultaneously.',
argstr='-eval_width %s')
class CalcOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Calc(CommandLine):
"""Compute an expression using MINC files as input.
Examples
--------
>>> from nipype.interfaces.minc import Calc
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> file1 = nonempty_minc_data(1)
>>> calc = Calc(input_files=[file0, file1], output_file='/tmp/calc.mnc', expression='A[0] + A[1]') # add files together
>>> calc.run() # doctest: +SKIP
"""
input_spec = CalcInputSpec
output_spec = CalcOutputSpec
_cmd = 'minccalc'
# FIXME mincbbox produces output like
#
# -5.000000 -5.000000 -5.000000 4.800000 2.800000 8.800000
#
# so perhaps this would be better returned as a pair of Python
# lists instead of sending to an output file?
class BBoxInputSpec(StdOutCommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file containing bounding box corners',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_bbox.txt',
keep_extension=False)
threshold = traits.Int(
0,
desc='VIO_Real value threshold for bounding box. Default value: 0.',
argstr='-threshold')
_xor_one_two = ('one_line', 'two_lines')
one_line = traits.Bool(
desc='Output on one line (default): start_x y z width_x y z',
argstr='-one_line',
xor=_xor_one_two)
two_lines = traits.Bool(
desc='Output on two lines: start_x y z \n width_x y z',
argstr='-two_lines',
xor=_xor_one_two)
format_mincresample = traits.Bool(
desc=
'Output format for mincresample: (-step x y z -start x y z -nelements x y z',
argstr='-mincresample')
format_mincreshape = traits.Bool(
desc='Output format for mincreshape: (-start x,y,z -count dx,dy,dz',
argstr='-mincreshape')
format_minccrop = traits.Bool(
desc='Output format for minccrop: (-xlim x1 x2 -ylim y1 y2 -zlim z1 z2',
argstr='-minccrop')
# FIXME Not implemented, will clash with our parsing of the output?
# Command-specific options:
# Options for logging progress. Default = -verbose.
# -verbose: Write messages indicating progress
# -quiet: Do not write log messages
# -debug: Print out debug info.
class BBoxOutputSpec(TraitedSpec):
output_file = File(
desc='output file containing bounding box corners', exists=True)
class BBox(StdOutCommandLine):
"""Determine a bounding box of image.
Examples
--------
>>> from nipype.interfaces.minc import BBox
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> bbox = BBox(input_file=file0)
>>> bbox.run() # doctest: +SKIP
"""
input_spec = BBoxInputSpec
output_spec = BBoxOutputSpec
_cmd = 'mincbbox'
class BeastInputSpec(CommandLineInputSpec):
"""
TODO:
Command-specific options:
-verbose: Enable verbose output.
-positive: Specify mask of positive segmentation (inside mask) instead of the default mask.
-output_selection: Specify file to output selected files.
-count: Specify file to output the patch count.
-mask: Specify a segmentation mask instead of the the default mask.
-no_mask: Do not apply a segmentation mask. Perform the segmentation over the entire image.
-no_positive: Do not apply a positive mask.
Generic options for all commands:
-help: Print summary of command-line options and abort
-version: Print version number of program and exit
Copyright (C) 2011 Simon Fristed Eskildsen, Vladimir Fonov,
Pierrick Coupe, Jose V. Manjon
This program comes with ABSOLUTELY NO WARRANTY; for details type 'cat COPYING'.
This is free software, and you are welcome to redistribute it under certain
conditions; type 'cat COPYING' for details.
Usage: mincbeast [options] <library dir> <input> <output>
mincbeast -help
Get this example to work?
https://github.com/BIC-MNI/BEaST/blob/master/README.library
2.3 Source the minc-toolkit (if installed):
$ source /opt/minc/minc-toolkit-config.sh
2.4 Generate library by running:
$ beast_prepareADNIlib -flip <ADNI download directory> <BEaST library directory>
Example:
$ sudo beast_prepareADNIlib -flip Downloads/ADNI /opt/minc/share/beast-library-1.1
3. Test the setup
3.1 Normalize your data
$ beast_normalize -modeldir /opt/minc/share/icbm152_model_09c input.mnc normal.mnc normal.xfm
3.2 Run BEaST
$ mincbeast /opt/minc/share/beast-library-1.1 normal.mnc brainmask.mnc -conf /opt/minc/share/beast-library-1.1/default.2mm.conf -same_res
"""
probability_map = traits.Bool(
desc='Output the probability map instead of crisp mask.',
argstr='-probability')
flip_images = traits.Bool(
desc=
'Flip images around the mid-sagittal plane to increase patch count.',
argstr='-flip')
load_moments = traits.Bool(
desc=('Do not calculate moments instead use precalculated'
'library moments. (for optimization purposes)'),
argstr='-load_moments')
fill_holes = traits.Bool(
desc='Fill holes in the binary output.', argstr='-fill')
median_filter = traits.Bool(
desc='Apply a median filter on the probability map.', argstr='-median')
nlm_filter = traits.Bool(
desc='Apply an NLM filter on the probability map (experimental).',
argstr='-nlm_filter')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
configuration_file = File(
desc='Specify configuration file.', argstr='-configuration %s')
voxel_size = traits.Int(
4, usedefault=True,
desc=('Specify voxel size for calculations (4, 2, or 1).'
'Default value: 4. Assumes no multiscale. Use configuration'
'file for multiscale.'),
argstr='-voxel_size %s')
abspath = traits.Bool(
desc=
'File paths in the library are absolute (default is relative to library root).',
argstr='-abspath',
usedefault=True,
default_value=True)
patch_size = traits.Int(
1, usedefault=True,
desc='Specify patch size for single scale approach. Default value: 1.',
argstr='-patch_size %s')
search_area = traits.Int(
2, usedefault=True,
desc=
'Specify size of search area for single scale approach. Default value: 2.',
argstr='-search_area %s')
confidence_level_alpha = traits.Float(
0.5, usedefault=True,
desc='Specify confidence level Alpha. Default value: 0.5',
argstr='-alpha %s')
smoothness_factor_beta = traits.Float(
0.5, usedefault=True,
desc='Specify smoothness factor Beta. Default value: 0.25',
argstr='-beta %s')
threshold_patch_selection = traits.Float(
0.95, usedefault=True,
desc='Specify threshold for patch selection. Default value: 0.95',
argstr='-threshold %s')
number_selected_images = traits.Int(
20, usedefault=True,
desc='Specify number of selected images. Default value: 20',
argstr='-selection_num %s')
same_resolution = traits.Bool(
desc='Output final mask with the same resolution as input file.',
argstr='-same_resolution')
library_dir = Directory(
desc='library directory', position=-3, argstr='%s', mandatory=True)
input_file = File(
desc='input file', position=-2, argstr='%s', mandatory=True)
output_file = File(
desc='output file',
position=-1,
argstr='%s',
name_source=['input_file'],
hash_files=False,
name_template='%s_beast_mask.mnc')
class BeastOutputSpec(TraitedSpec):
output_file = File(desc='output mask file', exists=True)
class Beast(CommandLine):
"""Extract brain image using BEaST (Brain Extraction using
non-local Segmentation Technique).
Examples
--------
>>> from nipype.interfaces.minc import Beast
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> beast = Beast(input_file=file0)
>>> beast .run() # doctest: +SKIP
"""
input_spec = BeastInputSpec
output_spec = BeastOutputSpec
_cmd = 'mincbeast'
class PikInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
_xor_image_type = ('jpg', 'png')
jpg = traits.Bool(desc='Output a jpg file.', xor=_xor_image_type)
png = traits.Bool(desc='Output a png file (default).', xor=_xor_image_type)
output_file = File(
desc='output file',
argstr='%s',
genfile=True,
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s.png',
keep_extension=False)
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME not implemented: --verbose
# --fake
# --lookup ==> arguments to pass to minclookup
scale = traits.Int(
2, usedefault=True,
desc=('Scaling factor for resulting image. By default images are'
'output at twice their original resolution.'),
argstr='--scale %s')
width = traits.Int(
desc=
'Autoscale the resulting image to have a fixed image width (in pixels).',
argstr='--width %s')
depth = traits.Enum(
8,
16,
desc='Bitdepth for resulting image 8 or 16 (MSB machines only!)',
argstr='--depth %s')
_xor_title = ('title_string', 'title_with_filename')
title = traits.Either(
traits.Bool(desc='Use input filename as title in resulting image.'),
traits.Str(desc='Add a title to the resulting image.'),
argstr='%s') # see _format_arg for actual arg string
title_size = traits.Int(
desc='Font point size for the title.',
argstr='--title_size %s',
requires=['title'])
annotated_bar = traits.Bool(
desc=
'create an annotated bar to match the image (use height of the output image)',
argstr='--anot_bar')
# FIXME tuple of floats? Not voxel values? Man page doesn't specify.
minc_range = traits.Tuple(
traits.Float,
traits.Float,
desc='Valid range of values for MINC file.',
argstr='--range %s %s')
_xor_image_range = ('image_range', 'auto_range')
image_range = traits.Tuple(
traits.Float,
traits.Float,
desc='Range of image values to use for pixel intensity.',
argstr='--image_range %s %s',
xor=_xor_image_range)
auto_range = traits.Bool(
desc=
'Automatically determine image range using a 5 and 95% PcT. (histogram)',
argstr='--auto_range',
xor=_xor_image_range)
start = traits.Int(
desc='Slice number to get. (note this is in voxel co-ordinates).',
argstr='--slice %s') # FIXME Int is correct?
_xor_slice = ('slice_z', 'slice_y', 'slice_x')
slice_z = traits.Bool(
desc='Get an axial/transverse (z) slice.', argstr='-z', xor=_xor_slice)
slice_y = traits.Bool(
desc='Get a coronal (y) slice.', argstr='-y', xor=_xor_slice)
slice_x = traits.Bool(
desc='Get a sagittal (x) slice.', argstr='-x',
xor=_xor_slice) # FIXME typo in man page? sagital?
triplanar = traits.Bool(
desc='Create a triplanar view of the input file.',
argstr='--triplanar')
tile_size = traits.Int(
desc='Pixel size for each image in a triplanar.',
argstr='--tilesize %s')
_xor_sagittal_offset = ('sagittal_offset', 'sagittal_offset_perc')
sagittal_offset = traits.Int(
desc='Offset the sagittal slice from the centre.',
argstr='--sagittal_offset %s')
sagittal_offset_perc = traits.Range(
low=0,
high=100,
desc='Offset the sagittal slice by a percentage from the centre.',
argstr='--sagittal_offset_perc %d',
)
_xor_vertical_horizontal = ('vertical_triplanar_view',
'horizontal_triplanar_view')
vertical_triplanar_view = traits.Bool(
desc='Create a vertical triplanar view (Default).',
argstr='--vertical',
xor=_xor_vertical_horizontal)
horizontal_triplanar_view = traits.Bool(
desc='Create a horizontal triplanar view.',
argstr='--horizontal',
xor=_xor_vertical_horizontal)
lookup = traits.Str(
desc='Arguments to pass to minclookup', argstr='--lookup %s')
class PikOutputSpec(TraitedSpec):
output_file = File(desc='output image', exists=True)
class Pik(CommandLine):
"""Generate images from minc files.
Mincpik uses Imagemagick to generate images
from Minc files.
Examples
--------
>>> from nipype.interfaces.minc import Pik
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> pik = Pik(input_file=file0, title='foo')
>>> pik .run() # doctest: +SKIP
"""
input_spec = PikInputSpec
output_spec = PikOutputSpec
_cmd = 'mincpik'
def _format_arg(self, name, spec, value):
if name == 'title':
if isinstance(value, bool) and value:
return '--title'
elif isinstance(value, str):
return '--title --title_text %s' % (value, )
else:
raise ValueError(
'Unknown value for "title" argument: ' + str(value))
return super(Pik, self)._format_arg(name, spec, value)
class BlurInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file_base = File(desc='output file base', argstr='%s', position=-1)
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_kernel = ('gaussian', 'rect')
gaussian = traits.Bool(
desc='Use a gaussian smoothing kernel (default).',
argstr='-gaussian',
xor=_xor_kernel)
rect = traits.Bool(
desc='Use a rect (box) smoothing kernel.',
argstr='-rect',
xor=_xor_kernel)
gradient = traits.Bool(
desc='Create the gradient magnitude volume as well.',
argstr='-gradient')
partial = traits.Bool(
desc=
'Create the partial derivative and gradient magnitude volumes as well.',
argstr='-partial')
no_apodize = traits.Bool(
desc='Do not apodize the data before blurring.', argstr='-no_apodize')
_xor_main_options = ('fwhm', 'fwhm3d', 'standard_dev')
fwhm = traits.Float(
0,
desc='Full-width-half-maximum of gaussian kernel. Default value: 0.',
argstr='-fwhm %s',
xor=_xor_main_options,
mandatory=True)
standard_dev = traits.Float(
0,
desc='Standard deviation of gaussian kernel. Default value: 0.',
argstr='-standarddev %s',
xor=_xor_main_options,
mandatory=True)
fwhm3d = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr='-3dfwhm %s %s %s',
desc=('Full-width-half-maximum of gaussian kernel.'
'Default value: -1.79769e+308 -1.79769e+308 -1.79769e+308.'),
xor=_xor_main_options,
mandatory=True)
dimensions = traits.Enum(
3,
1,
2,
desc=
'Number of dimensions to blur (either 1,2 or 3). Default value: 3.',
argstr='-dimensions %s')
class BlurOutputSpec(TraitedSpec):
output_file = File(desc='Blurred output file.', exists=True)
gradient_dxyz = File(desc='Gradient dxyz.')
partial_dx = File(desc='Partial gradient dx.')
partial_dy = File(desc='Partial gradient dy.')
partial_dz = File(desc='Partial gradient dz.')
partial_dxyz = File(desc='Partial gradient dxyz.')
class Blur(StdOutCommandLine):
"""
Convolve an input volume with a Gaussian blurring kernel of
user-defined width. Optionally, the first partial derivatives
and the gradient magnitude volume can be calculated.
Examples
--------
>>> from nipype.interfaces.minc import Blur
>>> from nipype.interfaces.minc.testdata import minc3Dfile
(1) Blur an input volume with a 6mm fwhm isotropic Gaussian
blurring kernel:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
mincblur will create /tmp/out_6_blur.mnc.
(2) Calculate the blurred and gradient magnitude data:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, gradient=True, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
will create /tmp/out_6_blur.mnc and /tmp/out_6_dxyz.mnc.
(3) Calculate the blurred data, the partial derivative volumes
and the gradient magnitude for the same data:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, partial=True, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
will create /tmp/out_6_blur.mnc, /tmp/out_6_dx.mnc,
/tmp/out_6_dy.mnc, /tmp/out_6_dz.mnc and /tmp/out_6_dxyz.mnc.
"""
input_spec = BlurInputSpec
output_spec = BlurOutputSpec
_cmd = 'mincblur'
def _gen_output_base(self):
output_file_base = self.inputs.output_file_base
if isdefined(output_file_base):
return output_file_base
else:
base_file_name = os.path.split(
self.inputs.input_file)[1] # e.g. 'foo.mnc'
base_file_name_no_ext = os.path.splitext(base_file_name)[
0] # e.g. 'foo'
output_base = os.path.join(
os.getcwd(), base_file_name_no_ext +
'_bluroutput') # e.g. '/tmp/blah/foo_bluroutput'
# return os.path.splitext(self.inputs.input_file)[0] +
# '_bluroutput'
return output_base
def _list_outputs(self):
outputs = self.output_spec().get()
output_file_base = self._gen_output_base()
outputs['output_file'] = output_file_base + '_blur.mnc'
if isdefined(self.inputs.gradient):
outputs['gradient_dxyz'] = output_file_base + '_dxyz.mnc'
if isdefined(self.inputs.partial):
outputs['partial_dx'] = output_file_base + '_dx.mnc'
outputs['partial_dy'] = output_file_base + '_dy.mnc'
outputs['partial_dz'] = output_file_base + '_dz.mnc'
outputs['partial_dxyz'] = output_file_base + '_dxyz.mnc'
return outputs
@property
def cmdline(self):
output_file_base = self.inputs.output_file_base
orig_cmdline = super(Blur, self).cmdline
if isdefined(output_file_base):
return orig_cmdline
else:
# FIXME this seems like a bit of a hack. Can we force output_file
# to show up in cmdline by default, even if it isn't specified in
# the instantiation of Pik?
return '%s %s' % (orig_cmdline, self._gen_output_base())
class MathInputSpec(CommandLineInputSpec):
_xor_input_files = ('input_files', 'filelist')
input_files = InputMultiPath(
File(exists=True),
desc='input file(s) for calculation',
mandatory=True,
sep=' ',
argstr='%s',
position=-2,
xor=_xor_input_files)
output_file = File(
desc='output file',
argstr='%s',
genfile=True,
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_mincmath.mnc')
filelist = File(
desc='Specify the name of a file containing input file names.',
argstr='-filelist %s',
exists=True,
mandatory=True,
xor=_xor_input_files)
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
_xor_copy_header = ('copy_header', 'no_copy_header')
copy_header = traits.Bool(
desc=
'Copy all of the header from the first file (default for one file).',
argstr='-copy_header',
xor=_xor_copy_header)
no_copy_header = traits.Bool(
desc=
'Do not copy all of the header from the first file (default for many files)).',
argstr='-nocopy_header',
xor=_xor_copy_header)
_xor_format = (
'format_filetype',
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_filetype = traits.Bool(
desc='Use data type of first file (default).',
argstr='-filetype',
xor=_xor_format)
format_byte = traits.Bool(
desc='Write out byte data.', argstr='-byte', xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.', argstr='-short', xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.', argstr='-long', xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.', argstr='-signed', xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
voxel_range = traits.Tuple(
traits.Int,
traits.Int,
argstr='-range %d %d',
desc='Valid range for output data.')
max_buffer_size_in_kb = traits.Range(
low=0,
desc='Specify the maximum size of the internal buffers (in kbytes).',
value=4096,
usedefault=True,
argstr='-max_buffer_size_in_kb %d',
)
_xor_check_dimensions = (
'check_dimensions',
'no_check_dimensions',
)
check_dimensions = traits.Bool(
desc='Check that dimension info matches across files (default).',
argstr='-check_dimensions',
xor=_xor_check_dimensions)
no_check_dimensions = traits.Bool(
desc='Do not check dimension info.',
argstr='-nocheck_dimensions',
xor=_xor_check_dimensions)
dimension = traits.Str(
desc=
'Specify a dimension along which we wish to perform a calculation.',
argstr='-dimension %s')
# FIXME Is it sensible to use ignore_nan and propagate_nan at the same
# time? Document this.
ignore_nan = traits.Bool(
desc='Ignore invalid data (NaN) for accumulations.',
argstr='-ignore_nan')
propagate_nan = traits.Bool(
desc='Invalid data in any file at a voxel produces a NaN (default).',
argstr='-propagate_nan')
# FIXME Double-check that these are mutually exclusive?
_xor_nan_zero_illegal = ('output_nan', 'output_zero',
'output_illegal_value')
output_nan = traits.Bool(
desc='Output NaN when an illegal operation is done (default).',
argstr='-nan',
xor=_xor_nan_zero_illegal)
output_zero = traits.Bool(
desc='Output zero when an illegal operation is done.',
argstr='-zero',
xor=_xor_nan_zero_illegal)
output_illegal = traits.Bool(
desc=('Value to write out when an illegal operation'
'is done. Default value: 1.79769e+308'),
argstr='-illegal_value',
xor=_xor_nan_zero_illegal)
# FIXME A whole bunch of the parameters will be mutually exclusive, e.g. surely can't do sqrt and abs at the same time?
# Or does mincmath do one and then the next?
##########################################################################
# Traits that expect a bool (compare two volumes) or constant (manipulate one volume) #
##########################################################################
bool_or_const_traits = [
'test_gt', 'test_lt', 'test_eq', 'test_ne', 'test_ge', 'test_le',
'calc_add', 'calc_sub', 'calc_mul', 'calc_div'
]
test_gt = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 > vol2 or vol1 > constant.',
argstr='-gt')
test_lt = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 < vol2 or vol1 < constant.',
argstr='-lt')
test_eq = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for integer vol1 == vol2 or vol1 == constant.',
argstr='-eq')
test_ne = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for integer vol1 != vol2 or vol1 != const.',
argstr='-ne')
test_ge = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 >= vol2 or vol1 >= const.',
argstr='-ge')
test_le = traits.Either(
traits.Bool(),
traits.Float(),
desc='Test for vol1 <= vol2 or vol1 <= const.',
argstr='-le')
calc_add = traits.Either(
traits.Bool(),
traits.Float(),
desc='Add N volumes or volume + constant.',
argstr='-add')
calc_sub = traits.Either(
traits.Bool(),
traits.Float(),
desc='Subtract 2 volumes or volume - constant.',
argstr='-sub')
calc_mul = traits.Either(
traits.Bool(),
traits.Float(),
desc='Multiply N volumes or volume * constant.',
argstr='-mult')
calc_div = traits.Either(
traits.Bool(),
traits.Float(),
desc='Divide 2 volumes or volume / constant.',
argstr='-div')
######################################
# Traits that expect a single volume #
######################################
single_volume_traits = [
'invert', 'calc_not', 'sqrt', 'square', 'abs', 'exp', 'log', 'scale',
'clamp', 'segment', 'nsegment', 'isnan', 'isnan'
] # FIXME enforce this in _parse_inputs and check for other members
invert = traits.Either(
traits.Float(), desc='Calculate 1/c.', argstr='-invert -const %s')
calc_not = traits.Bool(desc='Calculate !vol1.', argstr='-not')
sqrt = traits.Bool(desc='Take square root of a volume.', argstr='-sqrt')
square = traits.Bool(desc='Take square of a volume.', argstr='-square')
abs = traits.Bool(desc='Take absolute value of a volume.', argstr='-abs')
exp = traits.Tuple(
traits.Float,
traits.Float,
argstr='-exp -const2 %s %s',
desc='Calculate c2*exp(c1*x). Both constants must be specified.')
log = traits.Tuple(
traits.Float,
traits.Float,
argstr='-log -const2 %s %s',
desc='Calculate log(x/c2)/c1. The constants c1 and c2 default to 1.')
scale = traits.Tuple(
traits.Float,
traits.Float,
argstr='-scale -const2 %s %s',
desc='Scale a volume: volume * c1 + c2.')
clamp = traits.Tuple(
traits.Float,
traits.Float,
argstr='-clamp -const2 %s %s',
desc='Clamp a volume to lie between two values.')
segment = traits.Tuple(
traits.Float,
traits.Float,
argstr='-segment -const2 %s %s',
desc=
'Segment a volume using range of -const2: within range = 1, outside range = 0.'
)
nsegment = traits.Tuple(
traits.Float,
traits.Float,
argstr='-nsegment -const2 %s %s',
desc='Opposite of -segment: within range = 0, outside range = 1.')
isnan = traits.Bool(desc='Test for NaN values in vol1.', argstr='-isnan')
nisnan = traits.Bool(desc='Negation of -isnan.', argstr='-nisnan')
############################################
# Traits that expect precisely two volumes #
############################################
two_volume_traits = ['percentdiff']
percentdiff = traits.Float(
desc=
'Percent difference between 2 volumes, thresholded (const def=0.0).',
argstr='-percentdiff')
#####################################
# Traits that expect N >= 1 volumes #
#####################################
n_volume_traits = [
'count_valid', 'maximum', 'minimum', 'calc_add', 'calc_or'
]
count_valid = traits.Bool(
desc='Count the number of valid values in N volumes.',
argstr='-count_valid')
maximum = traits.Bool(desc='Find maximum of N volumes.', argstr='-maximum')
minimum = traits.Bool(desc='Find minimum of N volumes.', argstr='-minimum')
calc_and = traits.Bool(
desc='Calculate vol1 && vol2 (&& ...).', argstr='-and')
calc_or = traits.Bool(
desc='Calculate vol1 || vol2 (|| ...).', argstr='-or')
class MathOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Math(StdOutCommandLine):
"""
Various mathematical operations supplied by mincmath.
Examples
--------
>>> from nipype.interfaces.minc import Math
>>> from nipype.interfaces.minc.testdata import minc2Dfile
Scale: volume*3.0 + 2:
>>> scale = Math(input_files=[minc2Dfile], scale=(3.0, 2))
>>> scale.run() # doctest: +SKIP
Test if >= 1.5:
>>> gt = Math(input_files=[minc2Dfile], test_gt=1.5)
>>> gt.run() # doctest: +SKIP
"""
input_spec = MathInputSpec
output_spec = MathOutputSpec
_cmd = 'mincmath'
def _format_arg(self, name, spec, value):
assert value is not None
if name in self.input_spec.bool_or_const_traits:
# t is unused, what was I trying to do with it?
# t = self.inputs.__getattribute__(name)
if isinstance(value, bool) and value:
return spec.argstr
elif isinstance(value, bool) and not value:
raise ValueError('Does not make sense to specify %s=False' %
(name, ))
elif isinstance(value, float):
return '%s -const %s' % (
spec.argstr,
value,
)
else:
raise ValueError('Invalid %s argument: %s' % (
name,
value,
))
return super(Math, self)._format_arg(name, spec, value)
def _parse_inputs(self):
"""A number of the command line options expect precisely one or two files.
"""
nr_input_files = len(self.inputs.input_files)
for n in self.input_spec.bool_or_const_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if isinstance(t, bool):
if nr_input_files != 2:
raise ValueError(
'Due to the %s option we expected 2 files but input_files is of length %d'
% (
n,
nr_input_files,
))
elif isinstance(t, float):
if nr_input_files != 1:
raise ValueError(
'Due to the %s option we expected 1 file but input_files is of length %d'
% (
n,
nr_input_files,
))
else:
raise ValueError(
'Argument should be a bool or const, but got: %s' % t)
for n in self.input_spec.single_volume_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if nr_input_files != 1:
raise ValueError(
'Due to the %s option we expected 1 file but input_files is of length %d'
% (
n,
nr_input_files,
))
for n in self.input_spec.two_volume_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if nr_input_files != 2:
raise ValueError(
'Due to the %s option we expected 2 files but input_files is of length %d'
% (
n,
nr_input_files,
))
for n in self.input_spec.n_volume_traits:
t = self.inputs.__getattribute__(n)
if isdefined(t):
if not nr_input_files >= 1:
raise ValueError(
'Due to the %s option we expected at least one file but input_files is of length %d'
% (
n,
nr_input_files,
))
return super(Math, self)._parse_inputs()
class ResampleInputSpec(CommandLineInputSpec):
"""
not implemented:
-size: synonym for -nelements)
-xsize: synonym for -xnelements
-ysize: synonym for -ynelements
-zsize: synonym for -ynelements
"""
input_file = File(
desc='input file for resampling',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_resample.mnc')
# This is a dummy input.
input_grid_files = InputMultiPath(
File,
desc='input grid file(s)',
)
two = traits.Bool(desc='Create a MINC 2 output file.', argstr='-2')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
_xor_interpolation = ('trilinear_interpolation', 'tricubic_interpolation',
'nearest_neighbour_interpolation',
'sinc_interpolation')
trilinear_interpolation = traits.Bool(
desc='Do trilinear interpolation.',
argstr='-trilinear',
xor=_xor_interpolation)
tricubic_interpolation = traits.Bool(
desc='Do tricubic interpolation.',
argstr='-tricubic',
xor=_xor_interpolation)
nearest_neighbour_interpolation = traits.Bool(
desc='Do nearest neighbour interpolation.',
argstr='-nearest_neighbour',
xor=_xor_interpolation)
sinc_interpolation = traits.Bool(
desc='Do windowed sinc interpolation.',
argstr='-sinc',
xor=_xor_interpolation)
half_width_sinc_window = traits.Enum(
5,
1,
2,
3,
4,
6,
7,
8,
9,
10,
desc='Set half-width of sinc window (1-10). Default value: 5.',
argstr='-width %s',
requires=['sinc_interpolation'])
_xor_sinc_window_type = ('sinc_window_hanning', 'sinc_window_hamming')
sinc_window_hanning = traits.Bool(
desc='Set sinc window type to Hanning.',
argstr='-hanning',
xor=_xor_sinc_window_type,
requires=['sinc_interpolation'])
sinc_window_hamming = traits.Bool(
desc='Set sinc window type to Hamming.',
argstr='-hamming',
xor=_xor_sinc_window_type,
requires=['sinc_interpolation'])
transformation = File(
desc='File giving world transformation. (Default = identity).',
exists=True,
argstr='-transformation %s')
invert_transformation = traits.Bool(
desc='Invert the transformation before using it.',
argstr='-invert_transformation')
_xor_input_sampling = ('vio_transform', 'no_input_sampling')
vio_transform = traits.Bool(
desc='VIO_Transform the input sampling with the transform (default).',
argstr='-tfm_input_sampling',
xor=_xor_input_sampling)
no_input_sampling = traits.Bool(
desc='Use the input sampling without transforming (old behaviour).',
argstr='-use_input_sampling',
xor=_xor_input_sampling)
like = File(
desc='Specifies a model file for the resampling.',
argstr='-like %s',
exists=True)
_xor_format = (
'format_byte',
'format_short',
'format_int',
'format_long',
'format_float',
'format_double',
'format_signed',
'format_unsigned',
)
format_byte = traits.Bool(
desc='Write out byte data.', argstr='-byte', xor=_xor_format)
format_short = traits.Bool(
desc='Write out short integer data.', argstr='-short', xor=_xor_format)
format_int = traits.Bool(
desc='Write out 32-bit integer data.', argstr='-int', xor=_xor_format)
format_long = traits.Bool(
desc='Superseded by -int.', argstr='-long', xor=_xor_format)
format_float = traits.Bool(
desc='Write out single-precision floating-point data.',
argstr='-float',
xor=_xor_format)
format_double = traits.Bool(
desc='Write out double-precision floating-point data.',
argstr='-double',
xor=_xor_format)
format_signed = traits.Bool(
desc='Write signed integer data.', argstr='-signed', xor=_xor_format)
format_unsigned = traits.Bool(
desc='Write unsigned integer data (default).',
argstr='-unsigned',
xor=_xor_format)
output_range = traits.Tuple(
traits.Float,
traits.Float,
argstr='-range %s %s',
desc=
'Valid range for output data. Default value: -1.79769e+308 -1.79769e+308.'
)
_xor_slices = ('transverse', 'sagittal', 'coronal')
transverse_slices = traits.Bool(
desc='Write out transverse slices.',
argstr='-transverse',
xor=_xor_slices)
sagittal_slices = traits.Bool(
desc='Write out sagittal slices', argstr='-sagittal', xor=_xor_slices)
coronal_slices = traits.Bool(
desc='Write out coronal slices', argstr='-coronal', xor=_xor_slices)
_xor_fill = ('nofill', 'fill')
no_fill = traits.Bool(
desc='Use value zero for points outside of input volume.',
argstr='-nofill',
xor=_xor_fill)
fill = traits.Bool(
desc='Use a fill value for points outside of input volume.',
argstr='-fill',
xor=_xor_fill)
fill_value = traits.Float(
desc=('Specify a fill value for points outside of input volume.'
'Default value: 1.79769e+308.'),
argstr='-fillvalue %s',
requires=['fill'])
_xor_scale = ('keep_real_range', 'nokeep_real_range')
keep_real_range = traits.Bool(
desc='Keep the real scale of the input volume.',
argstr='-keep_real_range',
xor=_xor_scale)
nokeep_real_range = traits.Bool(
desc='Do not keep the real scale of the data (default).',
argstr='-nokeep_real_range',
xor=_xor_scale)
_xor_spacetype = ('spacetype', 'talairach')
spacetype = traits.Str(
desc='Set the spacetype attribute to a specified string.',
argstr='-spacetype %s')
talairach = traits.Bool(
desc='Output is in Talairach space.', argstr='-talairach')
origin = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
desc=('Origin of first pixel in 3D space.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-origin %s %s %s')
standard_sampling = traits.Bool(
desc='Set the sampling to standard values (step, start and dircos).',
argstr='-standard_sampling') # FIXME Bool?
units = traits.Str(
desc='Specify the units of the output sampling.',
argstr='-units %s') # FIXME String?
# Elements along each dimension.
# FIXME Ints? Ranges?
# FIXME Check that this xor behaves correctly.
_xor_nelements = ('nelements', 'nelements_x_y_or_z')
# nr elements along each dimension
nelements = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
desc='Number of elements along each dimension (X, Y, Z).',
argstr='-nelements %s %s %s',
xor=_xor_nelements)
# FIXME Is mincresample happy if we only specify one of these, or do we
# need the requires=...?
xnelements = traits.Int(
desc='Number of elements along the X dimension.',
argstr='-xnelements %s',
requires=('ynelements', 'znelements'),
xor=_xor_nelements)
ynelements = traits.Int(
desc='Number of elements along the Y dimension.',
argstr='-ynelements %s',
requires=('xnelements', 'znelements'),
xor=_xor_nelements)
znelements = traits.Int(
desc='Number of elements along the Z dimension.',
argstr='-znelements %s',
requires=('xnelements', 'ynelements'),
xor=_xor_nelements)
# step size along each dimension
_xor_step = ('step', 'step_x_y_or_z')
step = traits.Tuple(
traits.Int,
traits.Int,
traits.Int,
desc=
'Step size along each dimension (X, Y, Z). Default value: (0, 0, 0).',
argstr='-step %s %s %s',
xor=_xor_nelements)
# FIXME Use the requires=...?
xstep = traits.Int(
desc='Step size along the X dimension. Default value: 0.',
argstr='-xstep %s',
requires=('ystep', 'zstep'),
xor=_xor_step)
ystep = traits.Int(
desc='Step size along the Y dimension. Default value: 0.',
argstr='-ystep %s',
requires=('xstep', 'zstep'),
xor=_xor_step)
zstep = traits.Int(
desc='Step size along the Z dimension. Default value: 0.',
argstr='-zstep %s',
requires=('xstep', 'ystep'),
xor=_xor_step)
# start point along each dimension
_xor_start = ('start', 'start_x_y_or_z')
start = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
desc=('Start point along each dimension (X, Y, Z).'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-start %s %s %s',
xor=_xor_nelements)
# FIXME Use the requires=...?
xstart = traits.Float(
desc='Start point along the X dimension. Default value: 1.79769e+308.',
argstr='-xstart %s',
requires=('ystart', 'zstart'),
xor=_xor_start)
ystart = traits.Float(
desc='Start point along the Y dimension. Default value: 1.79769e+308.',
argstr='-ystart %s',
requires=('xstart', 'zstart'),
xor=_xor_start)
zstart = traits.Float(
desc='Start point along the Z dimension. Default value: 1.79769e+308.',
argstr='-zstart %s',
requires=('xstart', 'ystart'),
xor=_xor_start)
# dircos along each dimension
_xor_dircos = ('dircos', 'dircos_x_y_or_z')
dircos = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
desc=(
'Direction cosines along each dimension (X, Y, Z). Default value:'
'1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 ...'
' 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308 1.79769e+308.'
),
argstr='-dircos %s %s %s',
xor=_xor_nelements)
# FIXME Use the requires=...?
xdircos = traits.Float(
desc=('Direction cosines along the X dimension.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-xdircos %s',
requires=('ydircos', 'zdircos'),
xor=_xor_dircos)
ydircos = traits.Float(
desc=('Direction cosines along the Y dimension.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-ydircos %s',
requires=('xdircos', 'zdircos'),
xor=_xor_dircos)
zdircos = traits.Float(
desc=('Direction cosines along the Z dimension.'
'Default value: 1.79769e+308 1.79769e+308 1.79769e+308.'),
argstr='-zdircos %s',
requires=('xdircos', 'ydircos'),
xor=_xor_dircos)
class ResampleOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Resample(StdOutCommandLine):
"""
Resample a minc file.'
Examples
--------
>>> from nipype.interfaces.minc import Resample
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> r = Resample(input_file=minc2Dfile, output_file='/tmp/out.mnc') # Resample the file.
>>> r.run() # doctest: +SKIP
"""
input_spec = ResampleInputSpec
output_spec = ResampleOutputSpec
_cmd = 'mincresample'
class NormInputSpec(CommandLineInputSpec):
"""
Not implemented:
-version print version and exit
-verbose be verbose
-noverbose opposite of -verbose [default]
-quiet be quiet
-noquiet opposite of -quiet [default]
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
"""
input_file = File(
desc='input file to normalise',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_norm.mnc')
output_threshold_mask = File(
desc='File in which to store the threshold mask.',
argstr='-threshold_mask %s',
name_source=['input_file'],
hash_files=False,
name_template='%s_norm_threshold_mask.mnc')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# Normalisation Options
mask = File(
desc='Calculate the image normalisation within a mask.',
argstr='-mask %s',
exists=True)
clamp = traits.Bool(
desc='Force the ouput range between limits [default].',
argstr='-clamp',
usedefault=True,
default_value=True)
cutoff = traits.Range(
low=0.0,
high=100.0,
desc=
'Cutoff value to use to calculate thresholds by a histogram PcT in %. [default: 0.01]',
argstr='-cutoff %s',
)
lower = traits.Float(desc='Lower real value to use.', argstr='-lower %s')
upper = traits.Float(desc='Upper real value to use.', argstr='-upper %s')
out_floor = traits.Float(
desc='Output files maximum [default: 0]',
argstr='-out_floor %s') # FIXME is this a float?
out_ceil = traits.Float(
desc='Output files minimum [default: 100]',
argstr='-out_ceil %s') # FIXME is this a float?
# Threshold Options
threshold = traits.Bool(
desc=
'Threshold the image (set values below threshold_perc to -out_floor).',
argstr='-threshold')
threshold_perc = traits.Range(
low=0.0,
high=100.0,
desc=
'Threshold percentage (0.1 == lower 10% of intensity range) [default: 0.1].',
argstr='-threshold_perc %s')
threshold_bmt = traits.Bool(
desc='Use the resulting image BiModalT as the threshold.',
argstr='-threshold_bmt')
threshold_blur = traits.Float(
desc='Blur FWHM for intensity edges then thresholding [default: 2].',
argstr='-threshold_blur %s')
class NormOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_threshold_mask = File(desc='threshold mask file')
class Norm(CommandLine):
"""Normalise a file between a max and minimum (possibly)
using two histogram pct's.
Examples
--------
>>> from nipype.interfaces.minc import Norm
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> n = Norm(input_file=minc2Dfile, output_file='/tmp/out.mnc') # Normalise the file.
>>> n.run() # doctest: +SKIP
"""
input_spec = NormInputSpec
output_spec = NormOutputSpec
_cmd = 'mincnorm'
"""
| volcentre will centre a MINC image's sampling about a point (0,0,0 typically)
|
| NB: It will modify the file in-place unless an outfile is given
|
| Problems or comments should be sent to: a.janke@gmail.com
Summary of options:
-version print version and exit
-verbose be verbose
-noverbose opposite of -verbose [default]
-clobber clobber existing check files
-noclobber opposite of -clobber [default]
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
-com Use the CoM of the volume for the new centre (via mincstats)
-nocom opposite of -com [default]
-centre <float> <float> <float>
Centre to use (x,y,z) [default: 0 0 0]
-zero_dircos Set the direction cosines to identity [default]
-nozero_dirco opposite of -zero_dircos
Usage: volcentre [options] <infile.mnc> [<outfile.mnc>]
volcentre -help to list options
"""
class VolcentreInputSpec(CommandLineInputSpec):
"""
Not implemented:
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
"""
input_file = File(
desc='input file to centre',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_volcentre.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
com = traits.Bool(
desc=
'Use the CoM of the volume for the new centre (via mincstats). Default: False',
argstr='-com')
centre = traits.Tuple(
traits.Float,
traits.Float,
traits.Float,
argstr='-centre %s %s %s',
desc='Centre to use (x,y,z) [default: 0 0 0].',
)
zero_dircos = traits.Bool(
desc='Set the direction cosines to identity [default].',
argstr='-zero_dircos')
class VolcentreOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Volcentre(CommandLine):
"""Centre a MINC image's sampling about a point, typically (0,0,0).
Example
--------
>>> from nipype.interfaces.minc import Volcentre
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> vc = Volcentre(input_file=minc2Dfile)
>>> vc.run() # doctest: +SKIP
"""
input_spec = VolcentreInputSpec
output_spec = VolcentreOutputSpec
_cmd = 'volcentre'
class VolpadInputSpec(CommandLineInputSpec):
"""
Not implemented:
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
| volpad pads a MINC volume
|
| Problems or comments should be sent to: a.janke@gmail.com
Summary of options:
-- General Options -------------------------------------------------------------
-verbose be verbose
-noverbose opposite of -verbose [default]
-clobber clobber existing files
-noclobber opposite of -clobber [default]
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
"""
input_file = File(
desc='input file to centre',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_volpad.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
auto = traits.Bool(
desc=
'Automatically determine padding distances (uses -distance as max). Default: False.',
argstr='-auto')
auto_freq = traits.Float(
desc=
'Frequency of voxels over bimodalt threshold to stop at [default: 500].',
argstr='-auto_freq %s')
distance = traits.Int(
desc='Padding distance (in voxels) [default: 4].',
argstr='-distance %s')
smooth = traits.Bool(
desc='Smooth (blur) edges before padding. Default: False.',
argstr='-smooth')
smooth_distance = traits.Int(
desc='Smoothing distance (in voxels) [default: 4].',
argstr='-smooth_distance %s')
class VolpadOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Volpad(CommandLine):
"""Centre a MINC image's sampling about a point, typically (0,0,0).
Examples
--------
>>> from nipype.interfaces.minc import Volpad
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> vp = Volpad(input_file=minc2Dfile, smooth=True, smooth_distance=4)
>>> vp.run() # doctest: +SKIP
"""
input_spec = VolpadInputSpec
output_spec = VolpadOutputSpec
_cmd = 'volpad'
class VolisoInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file to convert to isotropic sampling',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_voliso.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='--verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='--clobber',
usedefault=True,
default_value=True)
maxstep = traits.Float(
desc='The target maximum step desired in the output volume.',
argstr='--maxstep %s')
minstep = traits.Float(
desc='The target minimum step desired in the output volume.',
argstr='--minstep %s')
avgstep = traits.Bool(
desc=
'Calculate the maximum step from the average steps of the input volume.',
argstr='--avgstep')
class VolisoOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Voliso(CommandLine):
"""Changes the steps and starts in order that the output volume
has isotropic sampling.
Examples
--------
>>> from nipype.interfaces.minc import Voliso
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> viso = Voliso(input_file=minc2Dfile, minstep=0.1, avgstep=True)
>>> viso.run() # doctest: +SKIP
"""
input_spec = VolisoInputSpec
output_spec = VolisoOutputSpec
_cmd = 'voliso'
class GennlxfmInputSpec(CommandLineInputSpec):
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['like'],
hash_files=False,
name_template='%s_gennlxfm.xfm')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
ident = traits.Bool(
desc='Generate an identity xfm. Default: False.', argstr='-ident')
step = traits.Int(
desc='Output ident xfm step [default: 1].', argstr='-step %s')
like = File(
desc='Generate a nlxfm like this file.',
exists=True,
argstr='-like %s',
)
class GennlxfmOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grid = File(desc='output grid', exists=True)
class Gennlxfm(CommandLine):
"""Generate nonlinear xfms. Currently only identity xfms
are supported!
This tool is part of minc-widgets:
https://github.com/BIC-MNI/minc-widgets/blob/master/gennlxfm/gennlxfm
Examples
--------
>>> from nipype.interfaces.minc import Gennlxfm
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> gennlxfm = Gennlxfm(step=1, like=minc2Dfile)
>>> gennlxfm.run() # doctest: +SKIP
"""
input_spec = GennlxfmInputSpec
output_spec = GennlxfmOutputSpec
_cmd = 'gennlxfm'
def _list_outputs(self):
outputs = super(Gennlxfm, self)._list_outputs()
outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc',
outputs['output_file'])
return outputs
class XfmConcatInputSpec(CommandLineInputSpec):
input_files = InputMultiPath(
File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2)
# This is a dummy input.
input_grid_files = InputMultiPath(
File,
desc='input grid file(s)',
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_xfmconcat.xfm')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
class XfmConcatOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grids = OutputMultiPath(File(exists=True), desc='output grids')
class XfmConcat(CommandLine):
"""Concatenate transforms together. The output transformation
is equivalent to applying input1.xfm, then input2.xfm, ..., in
that order.
Examples
--------
>>> from nipype.interfaces.minc import XfmConcat
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> conc = XfmConcat(input_files=['input1.xfm', 'input1.xfm'])
>>> conc.run() # doctest: +SKIP
"""
input_spec = XfmConcatInputSpec
output_spec = XfmConcatOutputSpec
_cmd = 'xfmconcat'
def _list_outputs(self):
outputs = super(XfmConcat, self)._list_outputs()
if os.path.exists(outputs['output_file']):
if 'grid' in open(outputs['output_file'], 'r').read():
outputs['output_grids'] = glob.glob(
re.sub('.(nlxfm|xfm)$', '_grid_*.mnc',
outputs['output_file']))
return outputs
class BestLinRegInputSpec(CommandLineInputSpec):
source = File(
desc='source Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-4,
)
target = File(
desc='target Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-3,
)
output_xfm = File(
desc='output xfm file',
genfile=True,
argstr='%s',
position=-2,
name_source=['source'],
hash_files=False,
name_template='%s_bestlinreg.xfm',
keep_extension=False)
output_mnc = File(
desc='output mnc file',
genfile=True,
argstr='%s',
position=-1,
name_source=['source'],
hash_files=False,
name_template='%s_bestlinreg.mnc',
keep_extension=False)
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME Very bare implementation, none of these are done yet:
"""
-init_xfm initial transformation (default identity)
-source_mask source mask to use during fitting
-target_mask target mask to use during fitting
-lsq9 use 9-parameter transformation (default)
-lsq12 use 12-parameter transformation (default -lsq9)
-lsq6 use 6-parameter transformation
"""
class BestLinRegOutputSpec(TraitedSpec):
output_xfm = File(desc='output xfm file', exists=True)
output_mnc = File(desc='output mnc file', exists=True)
class BestLinReg(CommandLine):
"""Hierachial linear fitting between two files.
The bestlinreg script is part of the EZminc package:
https://github.com/BIC-MNI/EZminc/blob/master/scripts/bestlinreg.pl
Examples
--------
>>> from nipype.interfaces.minc import BestLinReg
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> target_file = nonempty_minc_data(1)
>>> linreg = BestLinReg(source=input_file, target=target_file)
>>> linreg.run() # doctest: +SKIP
"""
input_spec = BestLinRegInputSpec
output_spec = BestLinRegOutputSpec
_cmd = 'bestlinreg'
class NlpFitInputSpec(CommandLineInputSpec):
source = File(
desc='source Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-3,
)
target = File(
desc='target Minc file',
exists=True,
mandatory=True,
argstr='%s',
position=-2,
)
output_xfm = File(
desc='output xfm file',
genfile=True,
argstr='%s',
position=-1,
)
# This is a dummy input.
input_grid_files = InputMultiPath(
File,
desc='input grid file(s)',
)
config_file = File(
desc='File containing the fitting configuration use.',
argstr='-config_file %s',
mandatory=True,
exists=True)
init_xfm = File(
desc='Initial transformation (default identity).',
argstr='-init_xfm %s',
mandatory=True,
exists=True)
source_mask = File(
desc='Source mask to use during fitting.',
argstr='-source_mask %s',
mandatory=True,
exists=True)
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
class NlpFitOutputSpec(TraitedSpec):
output_xfm = File(desc='output xfm file', exists=True)
output_grid = File(desc='output grid file', exists=True)
class NlpFit(CommandLine):
"""Hierarchial non-linear fitting with bluring.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/nlpfit/nlpfit
Examples
--------
>>> from nipype.interfaces.minc import NlpFit
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data, nlp_config
>>> from nipype.testing import example_data
>>> source = nonempty_minc_data(0)
>>> target = nonempty_minc_data(1)
>>> source_mask = nonempty_minc_data(2)
>>> config = nlp_config
>>> initial = example_data('minc_initial.xfm')
>>> nlpfit = NlpFit(config_file=config, init_xfm=initial, source_mask=source_mask, source=source, target=target)
>>> nlpfit.run() # doctest: +SKIP
"""
input_spec = NlpFitInputSpec
output_spec = NlpFitOutputSpec
_cmd = 'nlpfit'
def _gen_filename(self, name):
if name == 'output_xfm':
output_xfm = self.inputs.output_xfm
if isdefined(output_xfm):
return os.path.abspath(output_xfm)
else:
return aggregate_filename(
[self.inputs.source, self.inputs.target],
'nlpfit_xfm_output') + '.xfm'
else:
raise NotImplemented
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['output_xfm'] = os.path.abspath(
self._gen_filename('output_xfm'))
assert os.path.exists(outputs['output_xfm'])
if 'grid' in open(outputs['output_xfm'], 'r').read():
outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc',
outputs['output_xfm'])
return outputs
class XfmAvgInputSpec(CommandLineInputSpec):
input_files = InputMultiPath(
File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2)
# This is a dummy input.
input_grid_files = InputMultiPath(
File,
desc='input grid file(s)',
)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
)
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME xor these:
avg_linear = traits.Bool(
desc='average the linear part [default].', argstr='-avg_linear')
avg_nonlinear = traits.Bool(
desc='average the non-linear part [default].', argstr='-avg_nonlinear')
ignore_linear = traits.Bool(
desc='opposite of -avg_linear.', argstr='-ignore_linear')
ignore_nonlinear = traits.Bool(
desc='opposite of -avg_nonlinear.', argstr='-ignore_nonline')
class XfmAvgOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grid = File(desc='output grid file', exists=True)
class XfmAvg(CommandLine):
"""Average a number of xfm transforms using matrix logs and exponents.
The program xfmavg calls Octave for numerical work.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/tree/master/xfmavg
Examples
--------
>>> from nipype.interfaces.minc import XfmAvg
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data, nlp_config
>>> from nipype.testing import example_data
>>> xfm1 = example_data('minc_initial.xfm')
>>> xfm2 = example_data('minc_initial.xfm') # cheating for doctest
>>> xfmavg = XfmAvg(input_files=[xfm1, xfm2])
>>> xfmavg.run() # doctest: +SKIP
"""
input_spec = XfmAvgInputSpec
output_spec = XfmAvgOutputSpec
_cmd = 'xfmavg'
def _gen_filename(self, name):
if name == 'output_file':
output_file = self.inputs.output_file
if isdefined(output_file):
return os.path.abspath(output_file)
else:
return aggregate_filename(self.inputs.input_files,
'xfmavg_output') + '.xfm'
else:
raise NotImplemented
def _gen_outfilename(self):
return self._gen_filename('output_file')
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['output_file'] = os.path.abspath(self._gen_outfilename())
assert os.path.exists(outputs['output_file'])
if 'grid' in open(outputs['output_file'], 'r').read():
outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc',
outputs['output_file'])
return outputs
class XfmInvertInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
)
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
class XfmInvertOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
output_grid = File(desc='output grid file', exists=True)
class XfmInvert(CommandLine):
"""Invert an xfm transform file.
Examples
--------
>>> from nipype.interfaces.minc import XfmAvg
>>> from nipype.testing import example_data
>>> xfm = example_data('minc_initial.xfm')
>>> invert = XfmInvert(input_file=xfm)
>>> invert.run() # doctest: +SKIP
"""
input_spec = XfmInvertInputSpec
output_spec = XfmInvertOutputSpec
_cmd = 'xfminvert'
def _gen_filename(self, name):
if name == 'output_file':
output_file = self.inputs.output_file
if isdefined(output_file):
return os.path.abspath(output_file)
else:
return aggregate_filename([self.inputs.input_file],
'xfminvert_output') + '.xfm'
else:
raise NotImplemented
def _gen_outfilename(self):
return self._gen_filename('output_file')
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['output_file'] = os.path.abspath(self._gen_outfilename())
assert os.path.exists(outputs['output_file'])
if 'grid' in open(outputs['output_file'], 'r').read():
outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc',
outputs['output_file'])
return outputs
class BigAverageInputSpec(CommandLineInputSpec):
input_files = InputMultiPath(
File(exists=True),
desc='input file(s)',
mandatory=True,
sep=' ',
argstr='%s',
position=-2)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_files'],
hash_files=False,
name_template='%s_bigaverage.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='--verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='--clobber',
usedefault=True,
default_value=True)
# FIXME Redumentary implementation, various parameters not implemented.
# TODO!
output_float = traits.Bool(
desc='Output files with float precision.', argstr='--float')
robust = traits.Bool(
desc=('Perform robust averaging, features that are outside 1 standard'
'deviation from the mean are downweighted. Works well for noisy'
'data with artifacts. see the --tmpdir option if you have a'
'large number of input files.'),
argstr='-robust')
# Should Nipype deal with where the temp directory is?
tmpdir = Directory(desc='temporary files directory', argstr='-tmpdir %s')
sd_file = File(
desc='Place standard deviation image in specified file.',
argstr='--sdfile %s',
name_source=['input_files'],
hash_files=False,
name_template='%s_bigaverage_stdev.mnc')
class BigAverageOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
sd_file = File(desc='standard deviation image', exists=True)
class BigAverage(CommandLine):
"""Average 1000's of MINC files in linear time.
mincbigaverage is designed to discretise the problem of averaging either
a large number of input files or averaging a smaller number of large
files. (>1GB each). There is also some code included to perform "robust"
averaging in which only the most common features are kept via down-weighting
outliers beyond a standard deviation.
One advantage of mincbigaverage is that it avoids issues around the number
of possible open files in HDF/netCDF. In short if you have more than 100
files open at once while averaging things will slow down significantly.
mincbigaverage does this via a iterative approach to averaging files and
is a direct drop in replacement for mincaverage. That said not all the
arguments of mincaverage are supported in mincbigaverage but they should
be.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/mincbigaverage/mincbigaverage
Examples
--------
>>> from nipype.interfaces.minc import BigAverage
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> files = [nonempty_minc_data(i) for i in range(3)]
>>> average = BigAverage(input_files=files, output_float=True, robust=True)
>>> average.run() # doctest: +SKIP
"""
input_spec = BigAverageInputSpec
output_spec = BigAverageOutputSpec
_cmd = 'mincbigaverage'
class ReshapeInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-2)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_reshape.mnc')
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME MANY options not implemented!
write_short = traits.Bool(
desc='Convert to short integer data.', argstr='-short')
class ReshapeOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
class Reshape(CommandLine):
"""Cut a hyperslab out of a minc file, with dimension reordering.
This is also useful for rewriting with a different format, for
example converting to short (see example below).
Examples
--------
>>> from nipype.interfaces.minc import Reshape
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> reshape_to_short = Reshape(input_file=input_file, write_short=True)
>>> reshape_to_short.run() # doctest: +SKIP
"""
input_spec = ReshapeInputSpec
output_spec = ReshapeOutputSpec
_cmd = 'mincreshape'
class VolSymmInputSpec(CommandLineInputSpec):
input_file = File(
desc='input file',
exists=True,
mandatory=True,
argstr='%s',
position=-3)
trans_file = File(
desc='output xfm trans file',
genfile=True,
argstr='%s',
position=-2,
name_source=['input_file'],
hash_files=False,
name_template='%s_vol_symm.xfm',
keep_extension=False)
output_file = File(
desc='output file',
genfile=True,
argstr='%s',
position=-1,
name_source=['input_file'],
hash_files=False,
name_template='%s_vol_symm.mnc')
# This is a dummy input.
input_grid_files = InputMultiPath(
File,
desc='input grid file(s)',
)
verbose = traits.Bool(
desc='Print out log messages. Default: False.', argstr='-verbose')
clobber = traits.Bool(
desc='Overwrite existing file.',
argstr='-clobber',
usedefault=True,
default_value=True)
# FIXME MANY options not implemented!
fit_linear = traits.Bool(desc='Fit using a linear xfm.', argstr='-linear')
fit_nonlinear = traits.Bool(
desc='Fit using a non-linear xfm.', argstr='-nonlinear')
# FIXME This changes the input/output behaviour of trans_file! Split into
# two separate interfaces?
nofit = traits.Bool(
desc='Use the input transformation instead of generating one.',
argstr='-nofit')
config_file = File(
desc=
'File containing the fitting configuration (nlpfit -help for info).',
argstr='-config_file %s',
exists=True)
x = traits.Bool(desc='Flip volume in x-plane (default).', argstr='-x')
y = traits.Bool(desc='Flip volume in y-plane.', argstr='-y')
z = traits.Bool(desc='Flip volume in z-plane.', argstr='-z')
class VolSymmOutputSpec(TraitedSpec):
output_file = File(desc='output file', exists=True)
trans_file = File(desc='xfm trans file', exists=True)
output_grid = File(
desc='output grid file', exists=True) # FIXME Is exists=True correct?
class VolSymm(CommandLine):
"""Make a volume symmetric about an axis either linearly
and/or nonlinearly. This is done by registering a volume
to a flipped image of itself.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/volsymm/volsymm
Examples
--------
>>> from nipype.interfaces.minc import VolSymm
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> volsymm = VolSymm(input_file=input_file)
>>> volsymm.run() # doctest: +SKIP
"""
input_spec = VolSymmInputSpec
output_spec = VolSymmOutputSpec
_cmd = 'volsymm'
def _list_outputs(self):
outputs = super(VolSymm, self)._list_outputs()
# Have to manually check for the grid files.
if os.path.exists(outputs['trans_file']):
if 'grid' in open(outputs['trans_file'], 'r').read():
outputs['output_grid'] = re.sub('.(nlxfm|xfm)$', '_grid_0.mnc',
outputs['trans_file'])
return outputs
| nipype/interfaces/minc/minc.py | 111,220 | Average a number of MINC files.
Examples
--------
>>> from nipype.interfaces.minc import Average
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> files = [nonempty_minc_data(i) for i in range(3)]
>>> average = Average(input_files=files, output_file='/tmp/tmp.mnc')
>>> average.run() # doctest: +SKIP
Determine a bounding box of image.
Examples
--------
>>> from nipype.interfaces.minc import BBox
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> bbox = BBox(input_file=file0)
>>> bbox.run() # doctest: +SKIP
Extract brain image using BEaST (Brain Extraction using
non-local Segmentation Technique).
Examples
--------
>>> from nipype.interfaces.minc import Beast
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> beast = Beast(input_file=file0)
>>> beast .run() # doctest: +SKIP
TODO:
Command-specific options:
-verbose: Enable verbose output.
-positive: Specify mask of positive segmentation (inside mask) instead of the default mask.
-output_selection: Specify file to output selected files.
-count: Specify file to output the patch count.
-mask: Specify a segmentation mask instead of the the default mask.
-no_mask: Do not apply a segmentation mask. Perform the segmentation over the entire image.
-no_positive: Do not apply a positive mask.
Generic options for all commands:
-help: Print summary of command-line options and abort
-version: Print version number of program and exit
Copyright (C) 2011 Simon Fristed Eskildsen, Vladimir Fonov,
Pierrick Coupe, Jose V. Manjon
This program comes with ABSOLUTELY NO WARRANTY; for details type 'cat COPYING'.
This is free software, and you are welcome to redistribute it under certain
conditions; type 'cat COPYING' for details.
Usage: mincbeast [options] <library dir> <input> <output>
mincbeast -help
Get this example to work?
https://github.com/BIC-MNI/BEaST/blob/master/README.library
2.3 Source the minc-toolkit (if installed):
$ source /opt/minc/minc-toolkit-config.sh
2.4 Generate library by running:
$ beast_prepareADNIlib -flip <ADNI download directory> <BEaST library directory>
Example:
$ sudo beast_prepareADNIlib -flip Downloads/ADNI /opt/minc/share/beast-library-1.1
3. Test the setup
3.1 Normalize your data
$ beast_normalize -modeldir /opt/minc/share/icbm152_model_09c input.mnc normal.mnc normal.xfm
3.2 Run BEaST
$ mincbeast /opt/minc/share/beast-library-1.1 normal.mnc brainmask.mnc -conf /opt/minc/share/beast-library-1.1/default.2mm.conf -same_res
Hierachial linear fitting between two files.
The bestlinreg script is part of the EZminc package:
https://github.com/BIC-MNI/EZminc/blob/master/scripts/bestlinreg.pl
Examples
--------
>>> from nipype.interfaces.minc import BestLinReg
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> target_file = nonempty_minc_data(1)
>>> linreg = BestLinReg(source=input_file, target=target_file)
>>> linreg.run() # doctest: +SKIP
Average 1000's of MINC files in linear time.
mincbigaverage is designed to discretise the problem of averaging either
a large number of input files or averaging a smaller number of large
files. (>1GB each). There is also some code included to perform "robust"
averaging in which only the most common features are kept via down-weighting
outliers beyond a standard deviation.
One advantage of mincbigaverage is that it avoids issues around the number
of possible open files in HDF/netCDF. In short if you have more than 100
files open at once while averaging things will slow down significantly.
mincbigaverage does this via a iterative approach to averaging files and
is a direct drop in replacement for mincaverage. That said not all the
arguments of mincaverage are supported in mincbigaverage but they should
be.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/mincbigaverage/mincbigaverage
Examples
--------
>>> from nipype.interfaces.minc import BigAverage
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> files = [nonempty_minc_data(i) for i in range(3)]
>>> average = BigAverage(input_files=files, output_float=True, robust=True)
>>> average.run() # doctest: +SKIP
Calculate blobs from minc deformation grids.
Examples
--------
>>> from nipype.interfaces.minc import Blob
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> blob = Blob(input_file=minc2Dfile, output_file='/tmp/tmp.mnc', trace=True)
>>> blob.run() # doctest: +SKIP
Convolve an input volume with a Gaussian blurring kernel of
user-defined width. Optionally, the first partial derivatives
and the gradient magnitude volume can be calculated.
Examples
--------
>>> from nipype.interfaces.minc import Blur
>>> from nipype.interfaces.minc.testdata import minc3Dfile
(1) Blur an input volume with a 6mm fwhm isotropic Gaussian
blurring kernel:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
mincblur will create /tmp/out_6_blur.mnc.
(2) Calculate the blurred and gradient magnitude data:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, gradient=True, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
will create /tmp/out_6_blur.mnc and /tmp/out_6_dxyz.mnc.
(3) Calculate the blurred data, the partial derivative volumes
and the gradient magnitude for the same data:
>>> blur = Blur(input_file=minc3Dfile, fwhm=6, partial=True, output_file_base='/tmp/out_6')
>>> blur.run() # doctest: +SKIP
will create /tmp/out_6_blur.mnc, /tmp/out_6_dx.mnc,
/tmp/out_6_dy.mnc, /tmp/out_6_dz.mnc and /tmp/out_6_dxyz.mnc.
Compute an expression using MINC files as input.
Examples
--------
>>> from nipype.interfaces.minc import Calc
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> file1 = nonempty_minc_data(1)
>>> calc = Calc(input_files=[file0, file1], output_file='/tmp/calc.mnc', expression='A[0] + A[1]') # add files together
>>> calc.run() # doctest: +SKIP
convert between MINC 1 to MINC 2 format.
Examples
--------
>>> from nipype.interfaces.minc import Convert
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> c = Convert(input_file=minc2Dfile, output_file='/tmp/out.mnc', two=True) # Convert to MINC2 format.
>>> c.run() # doctest: +SKIP
Copy image values from one MINC file to another. Both the input
and output files must exist, and the images in both files must
have an equal number dimensions and equal dimension lengths.
NOTE: This program is intended primarily for use with scripts
such as mincedit. It does not follow the typical design rules of
most MINC command-line tools and therefore should be used only
with caution.
Dump a MINC file. Typically used in conjunction with mincgen (see Gen).
Examples
--------
>>> from nipype.interfaces.minc import Dump
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> dump = Dump(input_file=minc2Dfile)
>>> dump.run() # doctest: +SKIP
>>> dump = Dump(input_file=minc2Dfile, output_file='/tmp/out.txt', precision=(3, 4))
>>> dump.run() # doctest: +SKIP
Dump a hyperslab of MINC file data.
Examples
--------
>>> from nipype.interfaces.minc import Extract
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> extract = Extract(input_file=minc2Dfile)
>>> extract.run() # doctest: +SKIP
>>> extract = Extract(input_file=minc2Dfile, start=[3, 10, 5], count=[4, 4, 4]) # extract a 4x4x4 slab at offset [3, 10, 5]
>>> extract.run() # doctest: +SKIP
Generate nonlinear xfms. Currently only identity xfms
are supported!
This tool is part of minc-widgets:
https://github.com/BIC-MNI/minc-widgets/blob/master/gennlxfm/gennlxfm
Examples
--------
>>> from nipype.interfaces.minc import Gennlxfm
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> gennlxfm = Gennlxfm(step=1, like=minc2Dfile)
>>> gennlxfm.run() # doctest: +SKIP
Various mathematical operations supplied by mincmath.
Examples
--------
>>> from nipype.interfaces.minc import Math
>>> from nipype.interfaces.minc.testdata import minc2Dfile
Scale: volume*3.0 + 2:
>>> scale = Math(input_files=[minc2Dfile], scale=(3.0, 2))
>>> scale.run() # doctest: +SKIP
Test if >= 1.5:
>>> gt = Math(input_files=[minc2Dfile], test_gt=1.5)
>>> gt.run() # doctest: +SKIP
Hierarchial non-linear fitting with bluring.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/nlpfit/nlpfit
Examples
--------
>>> from nipype.interfaces.minc import NlpFit
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data, nlp_config
>>> from nipype.testing import example_data
>>> source = nonempty_minc_data(0)
>>> target = nonempty_minc_data(1)
>>> source_mask = nonempty_minc_data(2)
>>> config = nlp_config
>>> initial = example_data('minc_initial.xfm')
>>> nlpfit = NlpFit(config_file=config, init_xfm=initial, source_mask=source_mask, source=source, target=target)
>>> nlpfit.run() # doctest: +SKIP
Normalise a file between a max and minimum (possibly)
using two histogram pct's.
Examples
--------
>>> from nipype.interfaces.minc import Norm
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> n = Norm(input_file=minc2Dfile, output_file='/tmp/out.mnc') # Normalise the file.
>>> n.run() # doctest: +SKIP
Not implemented:
-version print version and exit
-verbose be verbose
-noverbose opposite of -verbose [default]
-quiet be quiet
-noquiet opposite of -quiet [default]
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
Generate images from minc files.
Mincpik uses Imagemagick to generate images
from Minc files.
Examples
--------
>>> from nipype.interfaces.minc import Pik
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> file0 = nonempty_minc_data(0)
>>> pik = Pik(input_file=file0, title='foo')
>>> pik .run() # doctest: +SKIP
Resample a minc file.'
Examples
--------
>>> from nipype.interfaces.minc import Resample
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> r = Resample(input_file=minc2Dfile, output_file='/tmp/out.mnc') # Resample the file.
>>> r.run() # doctest: +SKIP
not implemented:
-size: synonym for -nelements)
-xsize: synonym for -xnelements
-ysize: synonym for -ynelements
-zsize: synonym for -ynelements
Cut a hyperslab out of a minc file, with dimension reordering.
This is also useful for rewriting with a different format, for
example converting to short (see example below).
Examples
--------
>>> from nipype.interfaces.minc import Reshape
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> reshape_to_short = Reshape(input_file=input_file, write_short=True)
>>> reshape_to_short.run() # doctest: +SKIP
Convert a 2D image, a 3D volumes or a 4D dynamic volumes
written in MINC file format to a 2D, 3D or 4D Ecat7 file.
Examples
--------
>>> from nipype.interfaces.minc import ToEcat
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> c = ToEcat(input_file=minc2Dfile)
>>> c.run() # doctest: +SKIP
>>> c = ToEcat(input_file=minc2Dfile, voxels_as_integers=True)
>>> c.run() # doctest: +SKIP
Dump a chunk of MINC file data. This program is largely
superceded by mincextract (see Extract).
Examples
--------
>>> from nipype.interfaces.minc import ToRaw
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> toraw = ToRaw(input_file=minc2Dfile)
>>> toraw.run() # doctest: +SKIP
>>> toraw = ToRaw(input_file=minc2Dfile, write_range=(0, 100))
>>> toraw.run() # doctest: +SKIP
Make a volume symmetric about an axis either linearly
and/or nonlinearly. This is done by registering a volume
to a flipped image of itself.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/blob/master/volsymm/volsymm
Examples
--------
>>> from nipype.interfaces.minc import VolSymm
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data
>>> input_file = nonempty_minc_data(0)
>>> volsymm = VolSymm(input_file=input_file)
>>> volsymm.run() # doctest: +SKIP
Centre a MINC image's sampling about a point, typically (0,0,0).
Example
--------
>>> from nipype.interfaces.minc import Volcentre
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> vc = Volcentre(input_file=minc2Dfile)
>>> vc.run() # doctest: +SKIP
Not implemented:
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
Changes the steps and starts in order that the output volume
has isotropic sampling.
Examples
--------
>>> from nipype.interfaces.minc import Voliso
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> viso = Voliso(input_file=minc2Dfile, minstep=0.1, avgstep=True)
>>> viso.run() # doctest: +SKIP
Centre a MINC image's sampling about a point, typically (0,0,0).
Examples
--------
>>> from nipype.interfaces.minc import Volpad
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> vp = Volpad(input_file=minc2Dfile, smooth=True, smooth_distance=4)
>>> vp.run() # doctest: +SKIP
Not implemented:
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
| volpad pads a MINC volume
|
| Problems or comments should be sent to: a.janke@gmail.com
Summary of options:
-- General Options -------------------------------------------------------------
-verbose be verbose
-noverbose opposite of -verbose [default]
-clobber clobber existing files
-noclobber opposite of -clobber [default]
-fake do a dry run, (echo cmds only)
-nofake opposite of -fake [default]
Average a number of xfm transforms using matrix logs and exponents.
The program xfmavg calls Octave for numerical work.
This tool is part of the minc-widgets package:
https://github.com/BIC-MNI/minc-widgets/tree/master/xfmavg
Examples
--------
>>> from nipype.interfaces.minc import XfmAvg
>>> from nipype.interfaces.minc.testdata import nonempty_minc_data, nlp_config
>>> from nipype.testing import example_data
>>> xfm1 = example_data('minc_initial.xfm')
>>> xfm2 = example_data('minc_initial.xfm') # cheating for doctest
>>> xfmavg = XfmAvg(input_files=[xfm1, xfm2])
>>> xfmavg.run() # doctest: +SKIP
Concatenate transforms together. The output transformation
is equivalent to applying input1.xfm, then input2.xfm, ..., in
that order.
Examples
--------
>>> from nipype.interfaces.minc import XfmConcat
>>> from nipype.interfaces.minc.testdata import minc2Dfile
>>> conc = XfmConcat(input_files=['input1.xfm', 'input1.xfm'])
>>> conc.run() # doctest: +SKIP
Invert an xfm transform file.
Examples
--------
>>> from nipype.interfaces.minc import XfmAvg
>>> from nipype.testing import example_data
>>> xfm = example_data('minc_initial.xfm')
>>> invert = XfmInvert(input_file=xfm)
>>> invert.run() # doctest: +SKIP
A number of the command line options expect precisely one or two files.
The minc module provides classes for interfacing with the `MINC
<http://www.bic.mni.mcgill.ca/ServicesSoftware/MINC>`_ command line tools. This
module was written to work with MINC version 2.2.00.
Author: Carlo Hamalainen <carlo@carlo-hamalainen.net>
http://carlo-hamalainen.net
-*- coding: utf-8 -*- emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- vi: set ft=python sts=4 ts=4 sw=4 et: FIXME Can we make sure that len(start) == len(count)? See _format_arg in Dump for actual formatting. FIXME Is it sensible to use ignore_nan and propagate_nan at the same time? Document this. FIXME Double-check that these are mutually exclusive? FIXME test this one, the argstr will probably need tweaking, see _format_arg. FIXME mincbbox produces output like -5.000000 -5.000000 -5.000000 4.800000 2.800000 8.800000 so perhaps this would be better returned as a pair of Python lists instead of sending to an output file? FIXME Not implemented, will clash with our parsing of the output? Command-specific options: Options for logging progress. Default = -verbose. -verbose: Write messages indicating progress -quiet: Do not write log messages -debug: Print out debug info. FIXME not implemented: --verbose --fake --lookup ==> arguments to pass to minclookup see _format_arg for actual arg string FIXME tuple of floats? Not voxel values? Man page doesn't specify. FIXME Int is correct? FIXME typo in man page? sagital? e.g. 'foo.mnc' e.g. 'foo' e.g. '/tmp/blah/foo_bluroutput' return os.path.splitext(self.inputs.input_file)[0] + '_bluroutput' FIXME this seems like a bit of a hack. Can we force output_file to show up in cmdline by default, even if it isn't specified in the instantiation of Pik? FIXME Is it sensible to use ignore_nan and propagate_nan at the same time? Document this. FIXME Double-check that these are mutually exclusive? FIXME A whole bunch of the parameters will be mutually exclusive, e.g. surely can't do sqrt and abs at the same time? Or does mincmath do one and then the next? Traits that expect a bool (compare two volumes) or constant (manipulate one volume) Traits that expect a single volume FIXME enforce this in _parse_inputs and check for other members Traits that expect precisely two volumes Traits that expect N >= 1 volumes t is unused, what was I trying to do with it? t = self.inputs.__getattribute__(name) This is a dummy input. FIXME Bool? FIXME String? Elements along each dimension. FIXME Ints? Ranges? FIXME Check that this xor behaves correctly. nr elements along each dimension FIXME Is mincresample happy if we only specify one of these, or do we need the requires=...? step size along each dimension FIXME Use the requires=...? start point along each dimension FIXME Use the requires=...? dircos along each dimension FIXME Use the requires=...? Normalisation Options FIXME is this a float? FIXME is this a float? Threshold Options This is a dummy input. FIXME Very bare implementation, none of these are done yet: This is a dummy input. This is a dummy input. FIXME xor these: FIXME Redumentary implementation, various parameters not implemented. TODO! Should Nipype deal with where the temp directory is? FIXME MANY options not implemented! This is a dummy input. FIXME MANY options not implemented! FIXME This changes the input/output behaviour of trans_file! Split into two separate interfaces? FIXME Is exists=True correct? Have to manually check for the grid files. | 18,815 | en | 0.583231 |
"""
A file to contain specific logic to handle version upgrades in Kolibri.
"""
from shutil import rmtree
from django.conf import settings
from kolibri.core.upgrade import version_upgrade
# Before 0.15 we copied static files to the KOLIBRI_HOME directory.
# After 0.15 we read them directly from their source directories.
@version_upgrade(old_version="<0.15.0")
def clear_static_dir():
rmtree(settings.STATIC_ROOT, ignore_errors=True)
| kolibri/core/device/upgrade.py | 443 | A file to contain specific logic to handle version upgrades in Kolibri.
Before 0.15 we copied static files to the KOLIBRI_HOME directory. After 0.15 we read them directly from their source directories. | 203 | en | 0.850235 |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
from optimizer_test_util import clip_grad_norm_np
import oneflow as flow
def compare_with_numpy_lamb(
test_case,
device,
x_shape,
learning_rate,
train_iters,
betas,
weight_decay,
eps,
do_bias_correction,
adam_w_mode,
clip_grad_max_norm,
clip_grad_norm_type,
):
np.random.seed(1000)
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = np.random.uniform(size=x_shape).astype(np.float32)
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.param = flow.nn.Parameter(
flow.Tensor(init_value, device=flow.device(device))
)
def forward(self, mask):
return self.param * mask
simp_module = CustomModule()
simp_module.to(device)
simp_module.train()
optim_kwargs = {
"params": simp_module.parameters(),
"lr": learning_rate,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
"adam_w_mode": adam_w_mode,
"do_bias_correction": do_bias_correction,
}
if clip_grad_max_norm != -1:
optim_kwargs["clip_grad_max_norm"] = clip_grad_max_norm
optim_kwargs["clip_grad_norm_type"] = clip_grad_norm_type
lamb_optim = flow.optim.LAMB([optim_kwargs])
class CustomLambGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = simp_module
self.add_optimizer(lamb_optim)
def build(self, mask_tensor):
loss = flow.sum(self.m(mask_tensor))
loss.backward()
return loss
lamb_graph = CustomLambGraph()
for i in range(train_iters):
mask_tensor = flow.tensor(
random_grad_seq[i],
dtype=flow.float32,
requires_grad=False,
device=flow.device(device),
)
lamb_graph(mask_tensor)
of_res = simp_module.param.numpy()
def train_by_numpy():
x = init_value
mt = np.zeros_like(x)
vt = np.zeros_like(x)
beta1 = betas[0]
beta2 = betas[1]
if adam_w_mode:
l2 = 0
wd = weight_decay
else:
l2 = weight_decay
wd = 0
def np_train_one_iter(step, grad):
if clip_grad_max_norm != -1:
_, grad = clip_grad_norm_np(
grad, clip_grad_max_norm, clip_grad_norm_type
)
grad = grad + l2 * x
bias_correction1 = 1.0
bias_correction2 = 1.0
if do_bias_correction:
bias_correction1 = 1.0 - np.power(beta1, step + 1)
bias_correction2 = 1.0 - np.power(beta2, step + 1)
m = beta1 * mt + (1 - beta1) * grad
v = beta2 * vt + (1 - beta2) * grad * grad
denom = np.sqrt(v) / np.sqrt(bias_correction2) + eps
adam_diff = m / bias_correction1 / denom
w_norm = np.linalg.norm(x, ord=2)
g_norm = np.linalg.norm(adam_diff, ord=2)
if w_norm > 0 and g_norm > 0:
trust_ratio = w_norm / g_norm
else:
trust_ratio = 1.0
param = x - learning_rate * trust_ratio * (adam_diff + wd * x)
return (param, m, v)
for i in range(train_iters):
(x, mt, vt) = np_train_one_iter(i, random_grad_seq[i])
return x
np_res = train_by_numpy()
test_case.assertTrue(
np.allclose(of_res.flatten(), np_res.flatten(), rtol=1e-3, atol=1e-3)
)
@flow.unittest.skip_unless_1n1d()
class TestLamb(flow.unittest.TestCase):
def test_lamb(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["x_shape"] = [(10,)]
arg_dict["learning_rate"] = [0.1, 1e-3]
arg_dict["train_iters"] = [10]
arg_dict["betas"] = [(0.99, 0.9)]
arg_dict["weight_decay"] = [0.001, 0.1]
arg_dict["eps"] = [1e-8, 1e-6]
arg_dict["do_bias_correction"] = [True, False]
arg_dict["adam_w_mode"] = [True, False]
# NOTE(l1aoxingyu): max_norm = -1 means no clip grad
# nn.Graph only support `clip_grad_max_norm == 1.0` and `clip_grad_norm_type == 2.0`
arg_dict["clip_grad_max_norm"] = [-1, 1.0]
arg_dict["clip_grad_norm_type"] = [2.0]
for arg in GenArgList(arg_dict):
compare_with_numpy_lamb(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| python/oneflow/test/graph/test_graph_optim_lamb.py | 5,311 | Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
NOTE(l1aoxingyu): max_norm = -1 means no clip grad nn.Graph only support `clip_grad_max_norm == 1.0` and `clip_grad_norm_type == 2.0` | 717 | en | 0.842175 |
#-
# Copyright (c) 2016 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase, attr
@attr('capabilities')
@attr('cached')
class test_cp2_c0_sc(BaseBERITestCase):
EXPECTED_EXCEPTIONS = 0
def test_cp2_c0_sc_1(self):
self.assertTrapInfoNoTrap(self.MIPS.s0, "An exception was raised by SC with an unaligned DDC (but address overall is aligned)")
| tests/cp2/test_cp2_c0_sc.py | 1,460 | - Copyright (c) 2016 Michael Roe All rights reserved. This software was developed by SRI International and the University of Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. @BERI_LICENSE_HEADER_START@ Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. BERI licenses this file to you under the BERI Hardware-Software License, Version 1.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.beri-open-systems.org/legal/license-1-0.txt Unless required by applicable law or agreed to in writing, Work distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. @BERI_LICENSE_HEADER_END@ | 1,079 | en | 0.850213 |
from graphql import Undefined
from .mountedtype import MountedType
from .structures import NonNull
from .utils import get_type
class InputField(MountedType):
"""
Makes a field available on an ObjectType in the GraphQL schema. Any type can be mounted as a
Input Field except Interface and Union:
- Object Type
- Scalar Type
- Enum
Input object types also can't have arguments on their input fields, unlike regular ``graphene.Field``.
All class attributes of ``graphene.InputObjectType`` are implicitly mounted as InputField
using the below arguments.
.. code:: python
from graphene import InputObjectType, String, InputField
class Person(InputObjectType):
# implicitly mounted as Input Field
first_name = String(required=True)
# explicitly mounted as Input Field
last_name = InputField(String, description="Surname")
args:
type (class for a graphene.UnmountedType): Must be a class (not an instance) of an
unmounted graphene type (ex. scalar or object) which is used for the type of this
field in the GraphQL schema.
name (optional, str): Name of the GraphQL input field (must be unique in a type).
Defaults to attribute name.
default_value (optional, Any): Default value to use as input if none set in user operation (
query, mutation, etc.).
deprecation_reason (optional, str): Setting this value indicates that the field is
depreciated and may provide instruction or reason on how for clients to proceed.
description (optional, str): Description of the GraphQL field in the schema.
required (optional, bool): Indicates this input field as not null in the graphql schema.
Raises a validation error if argument not provided. Same behavior as graphene.NonNull.
Default False.
**extra_args (optional, Dict): Not used.
"""
def __init__(
self,
type_,
name=None,
default_value=Undefined,
deprecation_reason=None,
description=None,
required=False,
_creation_counter=None,
**extra_args
):
super(InputField, self).__init__(_creation_counter=_creation_counter)
self.name = name
if required:
type_ = NonNull(type_)
self._type = type_
self.deprecation_reason = deprecation_reason
self.default_value = default_value
self.description = description
@property
def type(self):
return get_type(self._type)
| graphene/types/inputfield.py | 2,617 | Makes a field available on an ObjectType in the GraphQL schema. Any type can be mounted as a
Input Field except Interface and Union:
- Object Type
- Scalar Type
- Enum
Input object types also can't have arguments on their input fields, unlike regular ``graphene.Field``.
All class attributes of ``graphene.InputObjectType`` are implicitly mounted as InputField
using the below arguments.
.. code:: python
from graphene import InputObjectType, String, InputField
class Person(InputObjectType):
# implicitly mounted as Input Field
first_name = String(required=True)
# explicitly mounted as Input Field
last_name = InputField(String, description="Surname")
args:
type (class for a graphene.UnmountedType): Must be a class (not an instance) of an
unmounted graphene type (ex. scalar or object) which is used for the type of this
field in the GraphQL schema.
name (optional, str): Name of the GraphQL input field (must be unique in a type).
Defaults to attribute name.
default_value (optional, Any): Default value to use as input if none set in user operation (
query, mutation, etc.).
deprecation_reason (optional, str): Setting this value indicates that the field is
depreciated and may provide instruction or reason on how for clients to proceed.
description (optional, str): Description of the GraphQL field in the schema.
required (optional, bool): Indicates this input field as not null in the graphql schema.
Raises a validation error if argument not provided. Same behavior as graphene.NonNull.
Default False.
**extra_args (optional, Dict): Not used. | 1,684 | en | 0.758379 |
# -*- coding: utf-8 -*-
from cms.exceptions import CMSDeprecationWarning
from django.conf import settings
from patch import post_patch, post_patch_check, pre_patch
import warnings
def patch_settings():
"""Merge settings with global cms settings, so all required attributes
will exist. Never override, just append non existing settings.
Also check for setting inconsistencies if settings.DEBUG
"""
if patch_settings.ALREADY_PATCHED:
return
patch_settings.ALREADY_PATCHED = True
if getattr(settings, 'CMS_MODERATOR', False):
warnings.warn("CMS_MODERATOR will be removed and replaced in django CMS 2.4!", CMSDeprecationWarning)
from cms.conf import global_settings
# patch settings
pre_patch()
# merge with global cms settings
for attr in dir(global_settings):
if attr == attr.upper() and not hasattr(settings, attr):
setattr(settings._wrapped, attr, getattr(global_settings, attr))
post_patch()
if settings.DEBUG:
# check if settings are correct, call this only if debugging is enabled
post_patch_check()
patch_settings.ALREADY_PATCHED = False
| cms/conf/__init__.py | 1,185 | Merge settings with global cms settings, so all required attributes
will exist. Never override, just append non existing settings.
Also check for setting inconsistencies if settings.DEBUG
-*- coding: utf-8 -*- patch settings merge with global cms settings check if settings are correct, call this only if debugging is enabled | 328 | en | 0.788147 |
#!/usr/bin/env py.test
# -*- coding: utf-8 -*-
__author__ = "Varun Nayyar <nayyarv@gmail.com>"
import numpy as np
import pytest
import NN.layerversions.layers4 as layer
def test_fc():
l1 = layer.FullyConnected(5, 10)
x = np.ones((100, 5))
y, c = l1.forward(x)
assert y.shape == (100, 10)
assert np.all(c == x)
def test_tanh():
l = layer.Tanh()
x = np.ones((100, 5))
y, c = l.forward(x)
assert y.shape == (100, 5)
assert np.all(c == y)
@pytest.fixture()
def optim():
return layer.sgd_optimiser(0.01)
def test_back_fc(optim):
l1 = layer.FullyConnected(5, 10)
x = np.ones((100, 5))
dldy = np.random.randn(100, 10)
dldx = l1.backward(dldy, x, optim)
assert dldx.shape == (100, 5)
def test_back_tanh():
l1 = layer.Tanh()
x = np.random.randn(100, 5)
dldy = np.random.randn(100, 5)
dldx = l1.backward(dldy, np.tanh(x), optim)
assert dldx.shape == (100, 5)
def test_network():
from NN.loss import MSELoss
x = np.random.randn(100, 10)
y = np.random.randn(100, 3)
net = layer.Network(
layer.FullyConnected(10, 20),
layer.Tanh(),
layer.FullyConnected(20, 3),
layer.Tanh()
)
mse = MSELoss()
layer.train(net, (x, y), 10)
yhat, _ = net.forward(x)
initloss = mse.loss(y, yhat)
layer.train(net, (x, y), 10)
yhat, _ = net.forward(x)
finloss = mse.loss(yhat, y)
assert initloss > finloss
| tests/test_layers4.py | 1,460 | !/usr/bin/env py.test -*- coding: utf-8 -*- | 44 | en | 0.300577 |
import unittest
class IntcodeComputer():
OP_ADD = 1
OP_MULTIPLY = 2
OP_INPUT = 3
OP_OUTPUT = 4
OP_JUMP_TRUE = 5
OP_JUMP_FALSE = 6
OP_LESS_THAN = 7
OP_EQUALS = 8
OP_MOD_REL = 9
OP_HALT = 99
PARAM_MODE_POS = '0'
PARAM_MODE_IMD = '1'
PARAM_MODE_REL = '2'
NOUN_ADDR = 1
VERB_ADDR = 2
RESULT_ADDR = 0
START_ADDR = 0
INIT_VAL = 0
def __init__(self, data = []):
self.inputs = []
self.memory = []
self.initial_memory = []
if data:
self.load_memory(data)
def load_memory(self, data):
self.initial_memory = self.normalize_memory(data)
self.reset()
def expand_memory(self, addr):
needed_mem = addr - (len(self.memory) - 1)
if needed_mem > 0:
self.memory += ([self.INIT_VAL] * needed_mem)
else:
raise Exception(f'Cannot expand memory for addr {addr}')
def check_addr(self, addr):
if addr < 0:
raise Exception(f'Addr {addr}, cannot be negative')
if addr >= len(self.memory):
self.expand_memory(addr)
return addr
def reset(self):
if self.memory:
del self.memory[:]
self.memory = self.initial_memory.copy()
if self.inputs:
del self.inputs[:]
self.inputs = []
self.output = None
self.last_input = None
self.instruction_ptr = self.START_ADDR
self.relative_base = self.START_ADDR
def add_input(self, data):
self.inputs.append(data)
def print_program(self):
print("Program: {:02d}{:02d}".format(self.memory[self.NOUN_ADDR],self.memory[self.VERB_ADDR]))
def normalize_memory(self, intcode):
if type(intcode) is str:
return list(map(int, intcode.split(',')))
elif type(intcode) is list:
if type(intcode[0]) is str:
return list(map(int, intcode))
else:
return intcode
else:
raise Exception('Corrupt intcode')
def get_paramater(self, mode):
param = self.memory[self.instruction_ptr]
self.instruction_ptr += 1
if mode == self.PARAM_MODE_POS:
addr = self.check_addr(param)
val = self.memory[addr]
elif mode == self.PARAM_MODE_REL:
addr = self.relative_base + param
addr = self.check_addr(addr)
val = self.memory[addr]
elif mode == self.PARAM_MODE_IMD:
val = param
else:
raise Exception(f"Unkown paramater mode: {param}")
return val
def set_paramater(self, mode, data):
param = self.memory[self.instruction_ptr]
self.instruction_ptr += 1
if mode == self.PARAM_MODE_POS:
addr = self.check_addr(param)
self.memory[addr] = data
elif mode == self.PARAM_MODE_REL:
addr = self.relative_base + param
addr = self.check_addr(addr)
self.memory[addr] = data
elif mode == self.PARAM_MODE_IMD:
raise Exception("Set paramater can't be in immediate mode")
else:
raise Exception(f"Unkown paramater mode: {param}")
def parse_opcode(self):
mode_opcode_str = '{:>05}'.format(str(self.memory[self.instruction_ptr]))
# Reverse of the first three chars
modes = mode_opcode_str[:3][::-1]
# integer of the last two chars
opcode = int(mode_opcode_str[3:])
self.instruction_ptr += 1
return modes, opcode
def run(self):
self.output = None
while self.instruction_ptr < len(self.memory):
param_mode, opcode = self.parse_opcode()
if opcode == self.OP_HALT:
return 0
elif opcode == self.OP_ADD:
in1 = self.get_paramater(param_mode[0])
in2 = self.get_paramater(param_mode[1])
self.set_paramater(param_mode[2], in1 + in2)
elif opcode == self.OP_MULTIPLY:
in1 = self.get_paramater(param_mode[0])
in2 = self.get_paramater(param_mode[1])
self.set_paramater(param_mode[2], in1 * in2)
elif opcode == self.OP_INPUT:
if self.inputs:
self.last_input = self.inputs.pop()
if self.last_input != None:
self.set_paramater(param_mode[0], self.last_input)
else:
raise Exception(f"{self.last_input} is not a valid input")
elif opcode == self.OP_OUTPUT:
self.output = self.get_paramater(param_mode[0])
return 1
elif opcode == self.OP_JUMP_TRUE:
do_jump = self.get_paramater(param_mode[0])
new_addr = self.get_paramater(param_mode[1])
if do_jump != 0:
self.instruction_ptr = new_addr
elif opcode == self.OP_JUMP_FALSE:
do_jump = self.get_paramater(param_mode[0])
new_addr = self.get_paramater(param_mode[1])
if do_jump == 0:
self.instruction_ptr = new_addr
elif opcode == self.OP_LESS_THAN:
in1 = self.get_paramater(param_mode[0])
in2 = self.get_paramater(param_mode[1])
if in1 < in2:
self.set_paramater(param_mode[2], 1)
else:
self.set_paramater(param_mode[2], 0)
elif opcode == self.OP_EQUALS:
in1 = self.get_paramater(param_mode[0])
in2 = self.get_paramater(param_mode[1])
if in1 == in2:
self.set_paramater(param_mode[2], 1)
else:
self.set_paramater(param_mode[2], 0)
elif opcode == self.OP_MOD_REL:
val = self.get_paramater(param_mode[0])
self.relative_base += val
else:
raise Exception(f'Unknown opcode {opcode} at addr {self.instruction_ptr}.')
self.reset()
return -1
| AoC 2019/intcode.py | 6,642 | Reverse of the first three chars integer of the last two chars | 62 | en | 0.847323 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListDatabaseAccountKeysResult',
'AwaitableListDatabaseAccountKeysResult',
'list_database_account_keys',
]
@pulumi.output_type
class ListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
"""
def __init__(__self__, primary_master_key=None, primary_readonly_master_key=None, secondary_master_key=None, secondary_readonly_master_key=None):
if primary_master_key and not isinstance(primary_master_key, str):
raise TypeError("Expected argument 'primary_master_key' to be a str")
pulumi.set(__self__, "primary_master_key", primary_master_key)
if primary_readonly_master_key and not isinstance(primary_readonly_master_key, str):
raise TypeError("Expected argument 'primary_readonly_master_key' to be a str")
pulumi.set(__self__, "primary_readonly_master_key", primary_readonly_master_key)
if secondary_master_key and not isinstance(secondary_master_key, str):
raise TypeError("Expected argument 'secondary_master_key' to be a str")
pulumi.set(__self__, "secondary_master_key", secondary_master_key)
if secondary_readonly_master_key and not isinstance(secondary_readonly_master_key, str):
raise TypeError("Expected argument 'secondary_readonly_master_key' to be a str")
pulumi.set(__self__, "secondary_readonly_master_key", secondary_readonly_master_key)
@property
@pulumi.getter(name="primaryMasterKey")
def primary_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-write key.
"""
return pulumi.get(self, "primary_master_key")
@property
@pulumi.getter(name="primaryReadonlyMasterKey")
def primary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the primary read-only key.
"""
return pulumi.get(self, "primary_readonly_master_key")
@property
@pulumi.getter(name="secondaryMasterKey")
def secondary_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-write key.
"""
return pulumi.get(self, "secondary_master_key")
@property
@pulumi.getter(name="secondaryReadonlyMasterKey")
def secondary_readonly_master_key(self) -> str:
"""
Base 64 encoded value of the secondary read-only key.
"""
return pulumi.get(self, "secondary_readonly_master_key")
class AwaitableListDatabaseAccountKeysResult(ListDatabaseAccountKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDatabaseAccountKeysResult(
primary_master_key=self.primary_master_key,
primary_readonly_master_key=self.primary_readonly_master_key,
secondary_master_key=self.secondary_master_key,
secondary_readonly_master_key=self.secondary_readonly_master_key)
def list_database_account_keys(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDatabaseAccountKeysResult:
"""
The access keys for the given database account.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20191212:listDatabaseAccountKeys', __args__, opts=opts, typ=ListDatabaseAccountKeysResult).value
return AwaitableListDatabaseAccountKeysResult(
primary_master_key=__ret__.primary_master_key,
primary_readonly_master_key=__ret__.primary_readonly_master_key,
secondary_master_key=__ret__.secondary_master_key,
secondary_readonly_master_key=__ret__.secondary_readonly_master_key)
| sdk/python/pulumi_azure_native/documentdb/v20191212/list_database_account_keys.py | 4,465 | The access keys for the given database account.
The access keys for the given database account.
:param str account_name: Cosmos DB database account name.
:param str resource_group_name: Name of an Azure resource group.
Base 64 encoded value of the primary read-write key.
Base 64 encoded value of the primary read-only key.
Base 64 encoded value of the secondary read-write key.
Base 64 encoded value of the secondary read-only key.
coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** pylint: disable=using-constant-test | 634 | en | 0.840372 |
import pytest
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_almost_equal, assert_equal)
from nose.tools import assert_raises
from pystruct.models import NodeTypeEdgeFeatureGraphCRF, EdgeFeatureGraphCRF
from pystruct.inference.linear_programming import lp_general_graph
from pystruct.inference import compute_energy, get_installed
from pystruct.utils import make_grid_edges, edge_list_to_features
from pystruct.datasets import generate_blocks_multinomial
def test_checks():
g = NodeTypeEdgeFeatureGraphCRF(
1 #how many node type?
, [4] #how many labels per node type?
, [3] #how many features per node type?
, np.array([[3]]) #how many features per node type X node type?
)
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
3 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5, 3] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
3 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [2,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2, 3], [2,3,4]]) #how many features per node type X node type?
)
with pytest.raises(ValueError):
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3 ] #how many labels per node type?
, [4, 5] #how many features per node type?
, np.array([[1, 2], [99,4]]) #how many features per node type X node type?
)
def debug_joint_feature():
# -------------------------------------------------------------------------------------------
#print "---MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many possible labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [1, 2]
, [2, 3]]) #how many features per node type X node type?
)
l_node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
l_edges = [ np.array([[0, 1]]) #type 0 node 0 to type 0 node 0
, np.array([[0, 1]])
, None
, None
]
l_edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = (l_node_f, l_edges, l_edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([0, 1]),
np.array([0, 1, 2])
])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array(
[ 1. , 1., 1. , 2., 2., 2.
, 0.11 , 0.12 , 0.13 , 0.14 , 0.21 , 0.22 , 0.23 , 0.24 , 0.31 , 0.32 , 0.33 , 0.34
, 0. , 0.111, 0. , 0. , 0. , 0.221,
0. , 0. , 0. , 0. , 0. , 0.222, 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.
]))
def get_simple_graph_structure():
g = NodeTypeEdgeFeatureGraphCRF(
1 #how many node type?
, [4] #how many labels per node type?
, [3] #how many features per node type?
, np.array([[3]]) #how many features per node type X node type?
)
return g
def get_simple_graph():
node_f = [ np.array([[1,1,1],
[2,2,2]])
]
edges = [ np.array([[0,1]])
] #an edge from 0 to 1
edge_f = [ np.array([[3,3,3]])
]
return (node_f, edges, edge_f)
def get_simple_graph2():
node_f = [ np.array([ [1,1,1]
, [2,2,2]]) ]
edges = [ np.array( [[0,1], #an edge from 0 to 1
[0,0] #an edge from 0 to 0
]) ]
edge_f = [ np.array([
[3,3,3],
[4,4,4]
]) ]
return (node_f, edges, edge_f)
def test_flatten_unflattenY():
g, (node_f, edges, edge_f) = get_simple_graph_structure(), get_simple_graph()
y = np.array([1,2])
l_nf = [ np.zeros((2,3)) ] #list of node feature , per type
X = (l_nf, None, None) #we give no edge
y_ref = [ np.array([1,2]) ]
assert all( [ (y_typ1 == y_typ2).all() for y_typ1, y_typ2 in zip(g.unflattenY(X, y), y_ref) ])
assert (y == g.flattenY(g.unflattenY(X, y))).all()
#============================================
g, x, y = more_complex_graph()
Y = [ np.array([0, 0])
, np.array([0, 0, 0]) #we start again at zero on 2nd type
]
y = np.hstack([ np.array([0, 0])
, 2+np.array([0, 0, 0])
])
l_nf = [ np.zeros( (2,3) ), np.zeros( (3, 4) )] #2 node with 3 features, 3 node with 4 features
X = (l_nf, None, None) #we give no edge
assert (g.flattenY(Y) == y).all()
#print g.unflattenY(X, y)
assert all( [ (y_typ1 == y_typ2).all() for y_typ1, y_typ2 in zip(g.unflattenY(X, y), Y) ])
l_nf = [ np.zeros( (1,3) ), np.zeros( (3, 4) )] #2 node with 3 features, 3 node with 4 features
X = (l_nf, None, None) #we give no edge
assert_raises(ValueError, g.unflattenY, X, y)
def test_joint_feature():
#print "---SIMPLE---------------------------------------------------------------------"
g, (node_f, edges, edge_f) = get_simple_graph_structure(), get_simple_graph()
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([1,2])
# y = np.array([1,0])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(g.joint_feature(x,y)
, np.array([ 0., 0., 0., 1., 1., 1., 2., 2., 2., 0., 0., 0.
, 0.,
0., 0., 0., 0., 0., 3., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 3., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 3., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([0,0])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(g.joint_feature(x,y)
, np.array([ 3., 3., 3., 0., 0., 0., 0., 0., 0., 0., 0., 0., 3.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 3., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 3., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([0,1])
node_f = [ np.array([[1.1,1.2,1.3], [2.1,2.2,2.3]]) ]
edge_f = [ np.array([[3.1,3.2,3.3]]) ]
x = (node_f, edges, edge_f)
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
assert_array_equal(g.joint_feature(x,y)
, np.array([ 1.1, 1.2, 1.3, 2.1, 2.2, 2.3, 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 3.1, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 3.2, 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 3.3, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ])
)
#print "---SIMPLE + 2nd EDGE--------------------------------------------------------"
node_f, edges, edge_f = get_simple_graph2()
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([1,2])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf
, np.array([ 0., 0., 0., 1., 1., 1., 2., 2., 2., 0., 0., 0., 0.,
0., 0., 0., 0., 4., 3., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 4., 3., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 4., 3., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.array([0,0])
#print y
g.initialize(x, y)
#print "joint_feature = \n", `g.joint_feature(x,y)`
#print
assert_array_equal(g.joint_feature(x,y)
, np.array([ 3., 3., 3., 0., 0., 0., 0., 0., 0., 0., 0., 0., 7.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 7., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 7., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
)
def more_complex_graph():
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [1, 2]
, [2, 3]]) #how many features per node type X node type?
)
# nodes = np.array( [[0,0], [0,1], [1, 0], [1, 1], [1, 2]] )
node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
edges = [ np.array( [ [0,1] #an edge from 0 to 1
])
, np.array( [
[0,0] #an edge from typ0:0 to typ1:0
])
, None
, None
]
edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = (node_f, edges, edge_f)
y = np.hstack([ np.array([0, 0])
, 2+np.array([0, 0, 0])
])
return g, x, y
def test_joint_feature2():
# -------------------------------------------------------------------------------------------
#print "---MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"
g, x, y = more_complex_graph()
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 3. , 3. , 3. , 0. , 0. , 0. , 0.63 , 0.66 ,
0.69 , 0.72 , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0.111, 0. , 0. , 0. , 0.221, 0. ,
0. , 0. , 0. , 0. , 0.222, 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]))
#print "---MORE COMPLEX GRAPH :) -- BIS -------------------------------------------------------------------"
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [1, 2]
, [2, 3]]) #how many features per node type X node type?
)
node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
edges = [ np.array( [ [0,1]] ), #an edge from 0 to 1
np.array( [ [0,2]] ) #an edge from 0 to 2
, None, None
]
edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = ( node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([np.array([0, 1]),
2+np.array([0, 1, 2])])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 1. , 1. , 1. , 2. , 2. , 2. , 0.11 , 0.12 ,
0.13 , 0.14 , 0.21 , 0.22 , 0.23 , 0.24 , 0.31 , 0.32 ,
0.33 , 0.34 , 0. , 0.111, 0. , 0. , 0. , 0. ,
0.221, 0. , 0. , 0. , 0. , 0. , 0.222, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]))
#print "MORE COMPLEX GRAPH :) -- BIS OK"
#print "--- REORDERED MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"
node_f = [ np.array([ [2,2,2], [1,1,1] ])
, np.array([ [.31, .32, .33, .34], [.11, .12, .13, .14], [.21, .22, .23, .24]])
]
edges = [ np.array( [ [1, 0]] ),
np.array( [ [1,0]] ) #an edge from 0 to 2
, None, None
]
edge_f = [ np.array([[.111]])
, np.array([[.221, .222]])
, None
, None
]
x = ( node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([np.array([1, 0]),
2+np.array([2, 0, 1])])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 1. , 1. , 1. , 2. , 2. , 2. , 0.11 , 0.12 ,
0.13 , 0.14 , 0.21 , 0.22 , 0.23 , 0.24 , 0.31 , 0.32 ,
0.33 , 0.34 , 0. , 0.111, 0. , 0. , 0. , 0. ,
0.221, 0. , 0. , 0. , 0. , 0. , 0.222, 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ]))
def test_joint_feature3():
# -------------------------------------------------------------------------------------------
#print "---MORE COMPLEX GRAPH AGAIN :) ---------------------------------------------------------------------"
g = NodeTypeEdgeFeatureGraphCRF(
2 #how many node type?
, [2, 3] #how many labels per node type?
, [3, 4] #how many features per node type?
, np.array([ [0, 2]
, [2, 3]]) #how many features per node type X node type?
)
# nodes = np.array( [[0,0], [0,1], [1, 0], [1, 1], [1, 2]] )
node_f = [ np.array([ [1,1,1], [2,2,2] ])
, np.array([ [.11, .12, .13, .14], [.21, .22, .23, .24], [.31, .32, .33, .34]])
]
edges = [ None
, np.array( [
[0,1] #an edge from typ0:0 to typ1:1
])
, None
, np.array( [
[0,1], #an edge from typ0:0 to typ1:1
[1,2] #an edge from typ1:1 to typ1:2
])
]
edge_f = [ None
, np.array([[.221, .222]])
, None
, np.array([[.01, .02, .03 ],
[.001, .002, .003]])
]
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([0, 0])
, 2+np.array([0, 0, 0])
])
#print y
g.initialize(x, y)
#print g.size_unaries
#print g.size_pairwise
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 3. , 3. , 3. , 0. , 0. , 0. ,
0.63 , 0.66 , 0.69 , 0.72 , 0. , 0., 0., 0. , 0., 0., 0. , 0.,
#edges 0 to 0 2x2 states
#typ0 typ0 EMPTY
#typ0 typ1
.221, 0., 0., 0., 0., 0.,
.222, 0., 0., 0., 0., 0.,
#typ1 typ0
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
#typ1 typ1
0.011, 0., 0., 0., 0., 0., 0., 0., 0.,
0.022, 0., 0., 0., 0., 0., 0., 0., 0.,
0.033, 0., 0., 0., 0., 0., 0., 0., 0.
])
)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([0, 1])
, 2+np.array([1, 1, 0])
])
#print y
g.initialize(x, y)
jf = g.joint_feature(x,y)
#print "joint_feature = \n", `jf`
#print
assert_array_equal(jf, jf)
assert_array_almost_equal(jf
, np.array([ 1. , 1. , 1. , 2. , 2. , 2. ,
.31, .32, .33, .34 , .32, .34, .36, .38 , 0., 0., 0. , 0.,
#edges 0 to 0 2x2 states
#typ0 typ0 EMPTY
#typ0 typ1
0., .221, 0., 0., 0., 0.,
0., .222, 0., 0., 0., 0.,
#typ1 typ0
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
#typ1 typ1
0., 0., 0., 0.001, 0.01, 0., 0., 0., 0.,
0., 0., 0., 0.002, 0.02, 0., 0., 0., 0.,
0., 0., 0., 0.003, 0.03, 0., 0., 0., 0.
])
)
w = np.array([ 1,1,1, 2,2,2, 10,10,10,10, 20,20,20,20, 30,30,30,30 ]
+[1.0]*51, dtype=np.float64
)
#print `w`
ret_u = g._get_unary_potentials(x, w)
#print `ret_u`
assert len(ret_u) == 2
assert_array_almost_equal(ret_u[0], np.array([ #n_nodes x n_states
[3, 6],
[6, 12]]))
assert_array_almost_equal(ret_u[1], np.array([ #n_nodes x n_states
[5, 10, 15],
[9, 18, 27],
[13, 26, 39]]))
assert len(w) == g.size_joint_feature
ret_pw = g._get_pairwise_potentials(x, w)
# for _pw in ret_pw:
# print "_pw ", `_pw`
pw00, pw01, pw10, pw11 = ret_pw
assert len(pw00) == 0
assert_array_almost_equal(pw01,np.array([ #n_edges, n_states, n_states
[[0.443, 0.443, 0.443],
[0.443, 0.443, 0.443]]
]))
assert len(pw10) == 0
assert_array_almost_equal(pw11,np.array([ #n_edges, n_states, n_states
[[0.06 , 0.06 , 0.06],
[0.06 , 0.06 , 0.06],
[0.06 , 0.06 , 0.06]]
,
[[0.006, 0.006, 0.006],
[0.006, 0.006, 0.006],
[0.006, 0.006, 0.006]]
]))
def test_unary_potentials():
#print "---SIMPLE---------------------------------------------------------------------"
#g, (node_f, edges, edge_f) = get_simple_graph_structure(), get_simple_graph()
g = NodeTypeEdgeFeatureGraphCRF(
1 #how many node type?
, [4] #how many labels per node type?
, [3] #how many features per node type?
, np.array([[3]]) #how many features per node type X node type?
)
node_f = [ np.array([[1,1,1],
[2,2,2]])
]
edges = [ np.array([[0,1]])
] #an edge from 0 to 1
edge_f = [ np.array([[3,3,3]])
]
x = (node_f, edges, edge_f)
#print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "
y = np.hstack([ np.array([1,2])])
# y = np.array([1,0])
#print y
g.initialize(x, y)
gref = EdgeFeatureGraphCRF(4,3,3)
xref = (node_f[0], edges[0], edge_f[0])
wref = np.arange(gref.size_joint_feature)
potref = gref._get_unary_potentials(xref, wref)
#print `potref`
w = np.arange(g.size_joint_feature)
pot = g._get_unary_potentials(x, w)
#print `pot`
assert_array_equal(pot, [potref])
pwpotref = gref._get_pairwise_potentials(xref, wref)
#print `pwpotref`
pwpot = g._get_pairwise_potentials(x, w)
#print `pwpot`
assert_array_equal(pwpot, [pwpotref])
# def test_inference_util():
# g = NodeTypeEdgeFeatureGraphCRF(
# 3 #how many node type?
# , [2, 3, 1] #how many labels per node type?
# , [3, 4, 1] #how many features per node type?
# , np.array([ [1, 2, 2]
# , [2, 3, 2]
# , [2, 2, 1]]) #how many features per node type X node type?
# )
# node_f = [ np.array([ [2,2,2], [1,1,1] ])
# , np.array([ [.31, .32, .33, .34], [.11, .12, .13, .14], [.21, .22, .23, .24]])
# , np.array([ [77], [88], [99]])
# ]
# edges = [ np.array( [ [1, 0]] ),
# np.array( [ [1,0]] ) #an edge from 0 to 2
# , None
#
# , None
# , None
# , None
#
# , np.array( [[1,1]] )
# , None
# , None ]
#
# x = ( node_f, edges, None)
#
# reindexed_exdges = g._index_all_edges(x)
# #print `reindexed_exdges`
# assert_array_equal(reindexed_exdges,
# np.array( [[1,0],
# [1,2],
# [6,1]]))
#
# def report_model_config(crf):
# print crf.n_states
# print crf.n_features
# print crf.n_edge_features
def inference_data():
"""
Testing with a single type of nodes. Must do as well as EdgeFeatureGraphCRF
"""
# Test inference with different weights in different directions
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
n_states = x.shape[-1]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
pw_horz = -1 * np.eye(n_states)
xx, yy = np.indices(pw_horz.shape)
# linear ordering constraint horizontally
pw_horz[xx > yy] = 1
# high cost for unequal labels vertically
pw_vert = -1 * np.eye(n_states)
pw_vert[xx != yy] = 1
pw_vert *= 10
# generate edge weights
edge_weights_horizontal = np.repeat(pw_horz[np.newaxis, :, :],
edge_list[0].shape[0], axis=0)
edge_weights_vertical = np.repeat(pw_vert[np.newaxis, :, :],
edge_list[1].shape[0], axis=0)
edge_weights = np.vstack([edge_weights_horizontal, edge_weights_vertical])
# do inference
res = lp_general_graph(-x.reshape(-1, n_states), edges, edge_weights)
edge_features = edge_list_to_features(edge_list)
x = ([x.reshape(-1, n_states)], [edges], [edge_features])
y = y.ravel()
return x, y, pw_horz, pw_vert, res, n_states
def test_inference_ad3plus():
x, y, pw_horz, pw_vert, res, n_states = inference_data()
# same inference through CRF inferface
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3+")
crf.initialize(x, y)
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=True)
if isinstance(y_pred, tuple):
# ad3 produces an integer result if it found the exact solution
#np.set_printoptions(precision=2, threshold=9999)
assert_array_almost_equal(res[0], y_pred[0][0].reshape(-1, n_states), 5)
assert_array_almost_equal(res[1], y_pred[1][0], 5)
assert_array_equal(y, np.argmax(y_pred[0][0], axis=-1), 5)
# again, this time discrete predictions only
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3+")
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
crf.initialize(x)
y_pred = crf.inference(x, w, relaxed=False)
assert_array_equal(y, y_pred)
def test_inference_ad3():
x, y, pw_horz, pw_vert, res, n_states = inference_data()
# same inference through CRF inferface
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3")
crf.initialize(x, y)
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
y_pred = crf.inference(x, w, relaxed=True)
if isinstance(y_pred, tuple):
# ad3 produces an integer result if it found the exact solution
#np.set_printoptions(precision=2, threshold=9999)
assert_array_almost_equal(res[0], y_pred[0][0].reshape(-1, n_states), 5)
assert_array_almost_equal(res[1], y_pred[1][0], 5)
assert_array_equal(y, np.argmax(y_pred[0][0], axis=-1), 5)
# again, this time discrete predictions only
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]]
, inference_method="ad3")
#crf.initialize([x], [y])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
crf.initialize(x)
y_pred = crf.inference(x, w, relaxed=False)
assert_array_equal(y, y_pred)
def test_joint_feature_discrete():
"""
Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF
"""
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
x = ([x.reshape(-1, 3)], [edges], [edge_features])
y_flat = y.ravel()
#for inference_method in get_installed(["lp", "ad3", "qpbo"]):
if True:
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]])
joint_feature_y = crf.joint_feature(x, y_flat)
assert_equal(joint_feature_y.shape, (crf.size_joint_feature,))
# first horizontal, then vertical
# we trust the unaries ;)
n_states = crf.l_n_states[0]
n_features = crf.l_n_features[0]
pw_joint_feature_horz, pw_joint_feature_vert = joint_feature_y[n_states *
n_features:].reshape(
2, n_states, n_states)
assert_array_equal(pw_joint_feature_vert, np.diag([9 * 4, 9 * 4, 9 * 4]))
vert_joint_feature = np.diag([10 * 3, 10 * 3, 10 * 3])
vert_joint_feature[0, 1] = 10
vert_joint_feature[1, 2] = 10
assert_array_equal(pw_joint_feature_horz, vert_joint_feature)
def test_joint_feature_continuous():
"""
Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF
"""
# FIXME
# first make perfect prediction, including pairwise part
X, Y = generate_blocks_multinomial(noise=2, n_samples=1, seed=1)
x, y = X[0], Y[0]
n_states = x.shape[-1]
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
#x = (x.reshape(-1, 3), edges, edge_features)
x = ([x.reshape(-1, 3)], [edges], [edge_features])
y = y.ravel()
pw_horz = -1 * np.eye(n_states)
xx, yy = np.indices(pw_horz.shape)
# linear ordering constraint horizontally
pw_horz[xx > yy] = 1
# high cost for unequal labels vertically
pw_vert = -1 * np.eye(n_states)
pw_vert[xx != yy] = 1
pw_vert *= 10
# create crf, assemble weight, make prediction
# for inference_method in get_installed(["lp", "ad3"]):
# crf = EdgeFeatureGraphCRF(inference_method=inference_method)
if True:
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]])
w = np.hstack([np.eye(3).ravel(), -pw_horz.ravel(), -pw_vert.ravel()])
#crf.initialize([x], [y])
#report_model_config(crf)
crf.initialize(x, y)
y_pred = crf.inference(x, w, relaxed=True)
# compute joint_feature for prediction
joint_feature_y = crf.joint_feature(x, y_pred)
assert_equal(joint_feature_y.shape, (crf.size_joint_feature,))
# FIXME
# first horizontal, then vertical
# we trust the unaries ;)
#pw_joint_feature_horz, pw_joint_feature_vert = joint_feature_y[crf.n_states *
#crf.n_features:].reshape(2,
#crf.n_states,
#crf.n_states)
def test_energy_continuous():
# make sure that energy as computed by ssvm is the same as by lp
np.random.seed(0)
#for inference_method in get_installed(["lp", "ad3"]):
if True:
found_fractional = False
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]])
while not found_fractional:
x = np.random.normal(size=(7, 8, 3))
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
x = ([x.reshape(-1, 3)], [edges], [edge_features])
unary_params = np.random.normal(size=(3, 3))
pw1 = np.random.normal(size=(3, 3))
pw2 = np.random.normal(size=(3, 3))
w = np.hstack([unary_params.ravel(), pw1.ravel(), pw2.ravel()])
crf.initialize(x)
res, energy = crf.inference(x, w, relaxed=True, return_energy=True)
found_fractional = np.any(np.max(res[0], axis=-1) != 1)
joint_feature = crf.joint_feature(x, res)
energy_svm = np.dot(joint_feature, w)
assert_almost_equal(energy, -energy_svm)
def test_energy_discrete():
# for inference_method in get_installed(["qpbo", "ad3"]):
# crf = EdgeFeatureGraphCRF(n_states=3,
# inference_method=inference_method,
# n_edge_features=2, n_features=3)
crf = NodeTypeEdgeFeatureGraphCRF(1, [3], [3], [[2]])
for i in range(10):
x = np.random.normal(size=(7, 8, 3))
edge_list = make_grid_edges(x, 4, return_lists=True)
edges = np.vstack(edge_list)
edge_features = edge_list_to_features(edge_list)
x = ([x.reshape(-1, 3)], [edges], [edge_features])
unary_params = np.random.normal(size=(3, 3))
pw1 = np.random.normal(size=(3, 3))
pw2 = np.random.normal(size=(3, 3))
w = np.hstack([unary_params.ravel(), pw1.ravel(), pw2.ravel()])
crf.initialize(x)
y_hat = crf.inference(x, w, relaxed=False)
#flat_edges = crf._index_all_edges(x)
energy = compute_energy(crf._get_unary_potentials(x, w)[0],
crf._get_pairwise_potentials(x, w)[0], edges, #CAUTION: pass the flatened edges!!
y_hat)
joint_feature = crf.joint_feature(x, y_hat)
energy_svm = np.dot(joint_feature, w)
assert_almost_equal(energy, energy_svm)
if __name__ == "__main__":
np.set_printoptions(precision=3, linewidth=9999)
if 0:
debug_joint_feature()
if 1:
test_flatten_unflattenY()
if 1:
test_joint_feature()
if 1:
test_joint_feature2()
if 1:
test_joint_feature3()
if 1: test_unary_potentials()
# if 1: test_inference_util()
if 1: test_inference_ad3()
if 1: test_inference_ad3plus()
if 1: test_joint_feature_discrete()
if 1: test_joint_feature_continuous()
if 1: test_energy_continuous()
if 1: test_energy_discrete()
#print "OK"
| pystruct/tests/test_models/test_node_type_edge_feature_graph_crf.py | 36,982 | Testing with a single type of nodes. Must do as well as EdgeFeatureGraphCRF
Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF
Testing with a single type of nodes. Must de aw well as EdgeFeatureGraphCRF
how many node type?how many labels per node type?how many features per node type?how many features per node type X node type? how many node type?how many labels per node type?how many features per node type?how many features per node type X node type? how many node type?how many labels per node type?how many features per node type?how many features per node type X node type? how many node type?how many labels per node type?how many features per node type?how many features per node type X node type? how many node type?how many labels per node type?how many features per node type?how many features per node type X node type? how many node type?how many labels per node type?how many features per node type?how many features per node type X node type? how many node type?how many labels per node type?how many features per node type?how many features per node type X node type? -------------------------------------------------------------------------------------------print "---MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"how many node type?how many possible labels per node type?how many features per node type?how many features per node type X node type? type 0 node 0 to type 0 node 0 print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "print yprint "joint_feature = \n", `jf`printhow many node type?how many labels per node type?how many features per node type?how many features per node type X node type? an edge from 0 to 1an edge from 0 to 1an edge from 0 to 0list of node feature , per typewe give no edge============================================ we start again at zero on 2nd type2 node with 3 features, 3 node with 4 featureswe give no edgeprint g.unflattenY(X, y)2 node with 3 features, 3 node with 4 featureswe give no edgeprint "---SIMPLE---------------------------------------------------------------------"print "- - - - - - - - - - - - - - - - - - - - - - - - - - - " y = np.array([1,0])print yprint "joint_feature = \n", `jf`printprint "- - - - - - - - - - - - - - - - - - - - - - - - - - - "print yprint "joint_feature = \n", `jf`printprint "- - - - - - - - - - - - - - - - - - - - - - - - - - - "print "joint_feature = \n", `jf`print "---SIMPLE + 2nd EDGE--------------------------------------------------------"print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "print yprint "joint_feature = \n", `jf`printprint "- - - - - - - - - - - - - - - - - - - - - - - - - - - "print yprint "joint_feature = \n", `g.joint_feature(x,y)`printhow many node type?how many labels per node type?how many features per node type?how many features per node type X node type? nodes = np.array( [[0,0], [0,1], [1, 0], [1, 1], [1, 2]] ) an edge from 0 to 1an edge from typ0:0 to typ1:0 -------------------------------------------------------------------------------------------print "---MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"print yprint "joint_feature = \n", `jf`printprint "---MORE COMPLEX GRAPH :) -- BIS -------------------------------------------------------------------"how many node type?how many labels per node type?how many features per node type?how many features per node type X node type? an edge from 0 to 1an edge from 0 to 2print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "print yprint "joint_feature = \n", `jf`printprint "MORE COMPLEX GRAPH :) -- BIS OK"print "--- REORDERED MORE COMPLEX GRAPH :) ---------------------------------------------------------------------"an edge from 0 to 2print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "print yprint "joint_feature = \n", `jf`print -------------------------------------------------------------------------------------------print "---MORE COMPLEX GRAPH AGAIN :) ---------------------------------------------------------------------"how many node type?how many labels per node type?how many features per node type?how many features per node type X node type? nodes = np.array( [[0,0], [0,1], [1, 0], [1, 1], [1, 2]] ) an edge from typ0:0 to typ1:1 an edge from typ0:0 to typ1:1 an edge from typ1:1 to typ1:2 print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "print yprint g.size_unariesprint g.size_pairwiseprint "joint_feature = \n", `jf`printedges 0 to 0 2x2 statestyp0 typ0 EMPTYtyp0 typ1typ1 typ0typ1 typ1print "- - - - - - - - - - - - - - - - - - - - - - - - - - - "print yprint "joint_feature = \n", `jf`printedges 0 to 0 2x2 statestyp0 typ0 EMPTYtyp0 typ1typ1 typ0typ1 typ1print `w`print `ret_u`n_nodes x n_statesn_nodes x n_states for _pw in ret_pw: print "_pw ", `_pw`n_edges, n_states, n_statesn_edges, n_states, n_statesprint "---SIMPLE---------------------------------------------------------------------"g, (node_f, edges, edge_f) = get_simple_graph_structure(), get_simple_graph()how many node type?how many labels per node type?how many features per node type?how many features per node type X node type? an edge from 0 to 1print "- - - - - - - - - - - - - - - - - - - - - - - - - - - " y = np.array([1,0])print yprint `potref`print `pot`print `pwpotref`print `pwpot` def test_inference_util(): g = NodeTypeEdgeFeatureGraphCRF( 3 how many node type? , [2, 3, 1] how many labels per node type? , [3, 4, 1] how many features per node type? , np.array([ [1, 2, 2] , [2, 3, 2] , [2, 2, 1]]) how many features per node type X node type? ) node_f = [ np.array([ [2,2,2], [1,1,1] ]) , np.array([ [.31, .32, .33, .34], [.11, .12, .13, .14], [.21, .22, .23, .24]]) , np.array([ [77], [88], [99]]) ] edges = [ np.array( [ [1, 0]] ), np.array( [ [1,0]] ) an edge from 0 to 2 , None , None , None , None , np.array( [[1,1]] ) , None , None ] x = ( node_f, edges, None) reindexed_exdges = g._index_all_edges(x) print `reindexed_exdges` assert_array_equal(reindexed_exdges, np.array( [[1,0], [1,2], [6,1]])) def report_model_config(crf): print crf.n_states print crf.n_features print crf.n_edge_features Test inference with different weights in different directions linear ordering constraint horizontally high cost for unequal labels vertically generate edge weights do inference same inference through CRF inferfacecrf.initialize([x], [y]) ad3 produces an integer result if it found the exact solutionnp.set_printoptions(precision=2, threshold=9999) again, this time discrete predictions onlycrf.initialize([x], [y]) same inference through CRF inferfacecrf.initialize([x], [y]) ad3 produces an integer result if it found the exact solutionnp.set_printoptions(precision=2, threshold=9999) again, this time discrete predictions onlycrf.initialize([x], [y])for inference_method in get_installed(["lp", "ad3", "qpbo"]): first horizontal, then vertical we trust the unaries ;) FIXME first make perfect prediction, including pairwise partx = (x.reshape(-1, 3), edges, edge_features) linear ordering constraint horizontally high cost for unequal labels vertically create crf, assemble weight, make prediction for inference_method in get_installed(["lp", "ad3"]): crf = EdgeFeatureGraphCRF(inference_method=inference_method)crf.initialize([x], [y])report_model_config(crf) compute joint_feature for prediction FIXME first horizontal, then vertical we trust the unaries ;)pw_joint_feature_horz, pw_joint_feature_vert = joint_feature_y[crf.n_states *crf.n_features:].reshape(2,crf.n_states,crf.n_states) make sure that energy as computed by ssvm is the same as by lpfor inference_method in get_installed(["lp", "ad3"]): for inference_method in get_installed(["qpbo", "ad3"]): crf = EdgeFeatureGraphCRF(n_states=3, inference_method=inference_method, n_edge_features=2, n_features=3)flat_edges = crf._index_all_edges(x)CAUTION: pass the flatened edges!! if 1: test_inference_util()print "OK" | 8,711 | en | 0.571907 |
from __future__ import print_function, absolute_import
import importlib
import logging
import os
from argparse import ArgumentParser
from six import string_types
from adr.formatter import all_formatters
from .errors import MissingDataError
log = logging.getLogger('adr')
here = os.path.abspath(os.path.dirname(__file__))
RECIPE_DIR = os.path.join(here, 'recipes')
ARGUMENT_GROUPS = {
'branch': [
[['-B', '--branch'],
{'default': ['mozilla-central'],
'action': 'append',
'help': "Branches to query results from",
}],
],
'build': [
[['-b', '--build-type'],
{'default': 'opt',
'help': "Build type (default: opt)",
}],
],
'date': [
[['--from'],
{'dest': 'from_date',
'default': 'today-week',
'help': "Starting date to pull data from, defaults "
"to a week ago",
}],
[['--to'],
{'dest': 'to_date',
'default': 'eod', # end of day
'help': "Ending date to pull data from, defaults "
"to now",
}],
],
'path': [
[['--path'],
{'required': True,
'help': "Path relative to repository root (file or directory)",
}],
],
'platform': [
[['-p', '--platform'],
{'default': 'windows10-64',
'help': "Platform to limit results to (default: windows10-64)",
}],
],
'rev': [
[['-r', '--revision'],
{'dest': 'rev',
'required': True,
'help': "Revision to limit results to",
}],
],
'test': [
[['-t', '--test'],
{'required': True,
'dest': 'test_name',
'help': "Path to a test file",
}],
],
}
"""
These are commonly used arguments which can be re-used. They are shared to
provide a consistent CLI across recipes.
"""
class RecipeParser(ArgumentParser):
arguments = []
def __init__(self, *groups, **kwargs):
ArgumentParser.__init__(self, **kwargs)
for cli, kwargs in self.arguments:
self.add_argument(*cli, **kwargs)
for name in groups:
group = self.add_argument_group("{} arguments".format(name))
arguments = ARGUMENT_GROUPS[name]
for cli, kwargs in arguments:
group.add_argument(*cli, **kwargs)
def run_recipe(recipe, args, config):
"""Given a recipe, calls the appropriate query and returns the result.
The provided recipe name is used to make a call to the modules.
:param str recipe: name of the recipe to be run.
:param list args: remainder arguments that were unparsed.
:param Configuration config: config object.
:returns: string
"""
modname = '.recipes.{}'.format(recipe)
mod = importlib.import_module(modname, package='adr')
try:
output = mod.run(args, config)
except MissingDataError:
return "ActiveData didn\'t return any data."
if isinstance(config.fmt, string_types):
fmt = all_formatters[config.fmt]
log.debug("Result:")
return fmt(output)
| adr/recipe.py | 3,156 | Given a recipe, calls the appropriate query and returns the result.
The provided recipe name is used to make a call to the modules.
:param str recipe: name of the recipe to be run.
:param list args: remainder arguments that were unparsed.
:param Configuration config: config object.
:returns: string
end of day | 314 | en | 0.736307 |
import bbi
import clodius.tiles.format as hgfo
import functools as ft
import logging
import numpy as np
import pandas as pd
import re
from concurrent.futures import ThreadPoolExecutor
MAX_THREADS = 4
TILE_SIZE = 1024
logger = logging.getLogger(__name__)
aggregation_modes = {}
aggregation_modes['mean'] = {'name': 'Mean', 'value': 'mean'}
aggregation_modes['min'] = {'name': 'Min', 'value': 'min'}
aggregation_modes['max'] = {'name': 'Max', 'value': 'max'}
aggregation_modes['std'] = {'name': 'Standard Deviation', 'value': 'std'}
range_modes = {}
range_modes['minMax'] = {'name': 'Min-Max', 'value': 'minMax'}
range_modes['whisker'] = {'name': 'Whisker', 'value': 'whisker'}
def get_quadtree_depth(chromsizes):
tile_size_bp = TILE_SIZE
min_tile_cover = np.ceil(sum(chromsizes) / tile_size_bp)
return int(np.ceil(np.log2(min_tile_cover)))
def get_zoom_resolutions(chromsizes):
return [2**x for x in range(get_quadtree_depth(chromsizes) + 1)][::-1]
def natsort_key(s, _NS_REGEX=re.compile(r'(\d+)', re.U)):
return tuple(
[int(x) if x.isdigit() else x for x in _NS_REGEX.split(s) if x]
)
def natcmp(x, y):
if x.find('_') >= 0:
x_parts = x.split('_')
if y.find('_') >= 0:
# chr_1 vs chr_2
y_parts = y.split('_')
return natcmp(x_parts[1], y_parts[1])
else:
# chr_1 vs chr1
# chr1 comes first
return 1
if y.find('_') >= 0:
# chr1 vs chr_1
# y comes second
return -1
_NS_REGEX = re.compile(r'(\d+)', re.U)
x_parts = tuple(
[int(a) if a.isdigit() else a for a in _NS_REGEX.split(x) if a]
)
y_parts = tuple(
[int(a) if a.isdigit() else a for a in _NS_REGEX.split(y) if a]
)
for key in ['m', 'y', 'x']: # order of these parameters is purposefully reverse how they should be ordered
if key in y.lower():
return -1
if key in x.lower():
return 1
try:
if x_parts < y_parts:
return -1
elif y_parts > x_parts:
return 1
else:
return 0
except TypeError:
return 1
def natsorted(iterable):
return sorted(iterable, key=ft.cmp_to_key(natcmp))
def get_chromsizes(bwpath):
"""
TODO: replace this with negspy
Also, return NaNs from any missing chromosomes in bbi.fetch
"""
chromsizes = bbi.chromsizes(bwpath)
chromosomes = natsorted(chromsizes.keys())
chrom_series = pd.Series(chromsizes)[chromosomes]
return chrom_series
def abs2genomic(chromsizes, start_pos, end_pos):
abs_chrom_offsets = np.r_[0, np.cumsum(chromsizes.values)]
cid_lo, cid_hi = np.searchsorted(abs_chrom_offsets,
[start_pos, end_pos],
side='right') - 1
rel_pos_lo = start_pos - abs_chrom_offsets[cid_lo]
rel_pos_hi = end_pos - abs_chrom_offsets[cid_hi]
start = rel_pos_lo
for cid in range(cid_lo, cid_hi):
yield cid, start, chromsizes[cid]
start = 0
yield cid_hi, start, rel_pos_hi
def tileset_info(bwpath, chromsizes=None):
'''
Get the tileset info for a bigWig file
Parameters
----------
bwpath: string
The path to the bigwig file from which to retrieve data
chromsizes: [[chrom, size],...]
A list of chromosome sizes associated with this tileset.
Typically passed in to specify in what order data from
the bigwig should be returned.
Returns
-------
tileset_info: {'min_pos': [],
'max_pos': [],
'tile_size': 1024,
'max_zoom': 7
}
'''
TILE_SIZE = 1024
if chromsizes is None:
chromsizes = get_chromsizes(bwpath)
chromsizes_list = []
for chrom, size in chromsizes.iteritems():
chromsizes_list += [[chrom, int(size)]]
else:
chromsizes_list = chromsizes
min_tile_cover = np.ceil(
sum([int(c[1]) for c in chromsizes_list]) / TILE_SIZE
)
max_zoom = int(np.ceil(np.log2(min_tile_cover)))
tileset_info = {
'min_pos': [0],
'max_pos': [TILE_SIZE * 2 ** max_zoom],
'max_width': TILE_SIZE * 2 ** max_zoom,
'tile_size': TILE_SIZE,
'max_zoom': max_zoom,
'chromsizes': chromsizes_list,
'aggregation_modes': aggregation_modes,
'range_modes': range_modes
}
return tileset_info
def fetch_data(a):
(
bwpath,
binsize,
chromsizes,
aggregation_mode,
range_mode,
cid,
start,
end
) = a
n_bins = int(np.ceil((end - start) / binsize))
n_dim = 1
if range_mode == 'minMax':
n_dim = 2
if range_mode == 'whisker':
n_dim = 4
x = np.zeros((n_bins, n_dim)) if n_dim > 1 else np.zeros(n_bins)
try:
chrom = chromsizes.index[cid]
clen = chromsizes.values[cid]
args = [bwpath, chrom, start, end]
kwargs = {"bins": n_bins, "missing": np.nan}
if range_mode == 'minMax':
x[:, 0] = bbi.fetch(*args, **dict(kwargs, summary='min'))
x[:, 1] = bbi.fetch(*args, **dict(kwargs, summary='max'))
elif range_mode == 'whisker':
x[:, 0] = bbi.fetch(*args, **dict(kwargs, summary='min'))
x[:, 1] = bbi.fetch(*args, **dict(kwargs, summary='max'))
x[:, 2] = bbi.fetch(*args, **dict(kwargs, summary='mean'))
x[:, 3] = bbi.fetch(*args, **dict(kwargs, summary='std'))
else:
x[:] = bbi.fetch(*args, **dict(kwargs, summary=aggregation_mode))
# drop the very last bin if it is smaller than the binsize
if end == clen and clen % binsize != 0:
x = x[:-1]
except IndexError:
# beyond the range of the available chromosomes
# probably means we've requested a range of absolute
# coordinates that stretch beyond the end of the genome
x[:] = np.nan
except KeyError:
# probably requested a chromosome that doesn't exist (e.g. chrM)
x[:] = np.nan
return x
def get_bigwig_tile(
bwpath,
zoom_level,
start_pos,
end_pos,
chromsizes=None,
aggregation_mode='mean',
range_mode=None
):
if chromsizes is None:
chromsizes = get_chromsizes(bwpath)
resolutions = get_zoom_resolutions(chromsizes)
binsize = resolutions[zoom_level]
cids_starts_ends = list(abs2genomic(chromsizes, start_pos, end_pos))
with ThreadPoolExecutor(max_workers=16) as e:
arrays = list(
e.map(
fetch_data, [
tuple([
bwpath,
binsize,
chromsizes,
aggregation_mode,
range_mode
] + list(c)) for c in cids_starts_ends
]
)
)
return np.concatenate(arrays)
def tiles(bwpath, tile_ids, chromsizes_map={}, chromsizes=None):
'''
Generate tiles from a bigwig file.
Parameters
----------
tileset: tilesets.models.Tileset object
The tileset that the tile ids should be retrieved from
tile_ids: [str,...]
A list of tile_ids (e.g. xyx.0.0) identifying the tiles
to be retrieved
chromsizes_map: {uid: []}
A set of chromsizes listings corresponding to the parameters of the
tile_ids. To be used if a chromsizes id is passed in with the tile id
with the `|cos:id` tag in the tile id
chromsizes: [[chrom, size],...]
A 2d array containing chromosome names and sizes. Overrides the
chromsizes in chromsizes_map
Returns
-------
tile_list: [(tile_id, tile_data),...]
A list of tile_id, tile_data tuples
'''
TILE_SIZE = 1024
generated_tiles = []
for tile_id in tile_ids:
tile_option_parts = tile_id.split('|')[1:]
tile_no_options = tile_id.split('|')[0]
tile_id_parts = tile_no_options.split('.')
tile_position = list(map(int, tile_id_parts[1:3]))
return_value = tile_id_parts[3] if len(tile_id_parts) > 3 else 'mean'
aggregation_mode = (
return_value if return_value in aggregation_modes else 'mean'
)
range_mode = return_value if return_value in range_modes else None
tile_options = dict([o.split(':') for o in tile_option_parts])
if chromsizes:
chromnames = [c[0] for c in chromsizes]
chromlengths = [int(c[1]) for c in chromsizes]
chromsizes_to_use = pd.Series(chromlengths, index=chromnames)
else:
chromsizes_id = None
if 'cos' in tile_options:
chromsizes_id = tile_options['cos']
if chromsizes_id in chromsizes_map:
chromsizes_to_use = chromsizes_map[chromsizes_id]
else:
chromsizes_to_use = None
zoom_level = tile_position[0]
tile_pos = tile_position[1]
# this doesn't combine multiple consequetive ids, which
# would speed things up
if chromsizes_to_use is None:
chromsizes_to_use = get_chromsizes(bwpath)
max_depth = get_quadtree_depth(chromsizes_to_use)
tile_size = TILE_SIZE * 2 ** (max_depth - zoom_level)
start_pos = tile_pos * tile_size
end_pos = start_pos + tile_size
dense = get_bigwig_tile(
bwpath,
zoom_level,
start_pos,
end_pos,
chromsizes_to_use,
aggregation_mode=aggregation_mode,
range_mode=range_mode,
)
tile_value = hgfo.format_dense_tile(dense)
generated_tiles += [(tile_id, tile_value)]
return generated_tiles
def chromsizes(filename):
'''
Get a list of chromosome sizes from this [presumably] bigwig
file.
Parameters:
-----------
filename: string
The filename of the bigwig file
Returns
-------
chromsizes: [(name:string, size:int), ...]
An ordered list of chromosome names and sizes
'''
try:
chrom_series = get_chromsizes(filename)
data = []
for chrom, size in chrom_series.iteritems():
data.append([chrom, size])
return data
except Exception as ex:
logger.error(ex)
raise Exception(
'Error loading chromsizes from bigwig file: {}'.format(ex)
)
| clodius/tiles/bigwig.py | 10,577 | Get a list of chromosome sizes from this [presumably] bigwig
file.
Parameters:
-----------
filename: string
The filename of the bigwig file
Returns
-------
chromsizes: [(name:string, size:int), ...]
An ordered list of chromosome names and sizes
TODO: replace this with negspy
Also, return NaNs from any missing chromosomes in bbi.fetch
Generate tiles from a bigwig file.
Parameters
----------
tileset: tilesets.models.Tileset object
The tileset that the tile ids should be retrieved from
tile_ids: [str,...]
A list of tile_ids (e.g. xyx.0.0) identifying the tiles
to be retrieved
chromsizes_map: {uid: []}
A set of chromsizes listings corresponding to the parameters of the
tile_ids. To be used if a chromsizes id is passed in with the tile id
with the `|cos:id` tag in the tile id
chromsizes: [[chrom, size],...]
A 2d array containing chromosome names and sizes. Overrides the
chromsizes in chromsizes_map
Returns
-------
tile_list: [(tile_id, tile_data),...]
A list of tile_id, tile_data tuples
Get the tileset info for a bigWig file
Parameters
----------
bwpath: string
The path to the bigwig file from which to retrieve data
chromsizes: [[chrom, size],...]
A list of chromosome sizes associated with this tileset.
Typically passed in to specify in what order data from
the bigwig should be returned.
Returns
-------
tileset_info: {'min_pos': [],
'max_pos': [],
'tile_size': 1024,
'max_zoom': 7
}
chr_1 vs chr_2 chr_1 vs chr1 chr1 comes first chr1 vs chr_1 y comes second order of these parameters is purposefully reverse how they should be ordered drop the very last bin if it is smaller than the binsize beyond the range of the available chromosomes probably means we've requested a range of absolute coordinates that stretch beyond the end of the genome probably requested a chromosome that doesn't exist (e.g. chrM) this doesn't combine multiple consequetive ids, which would speed things up | 2,031 | en | 0.691378 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-02-22 09:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waldur_jira', '0011_unique_together'),
]
operations = [
migrations.AlterField(
model_name='attachment',
name='backend_id',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='comment',
name='backend_id',
field=models.CharField(max_length=255, null=True),
),
migrations.AlterField(
model_name='issue',
name='backend_id',
field=models.CharField(max_length=255, null=True),
),
]
| src/waldur_jira/migrations/0012_backend_id_null.py | 810 | -*- coding: utf-8 -*- Generated by Django 1.11.7 on 2018-02-22 09:00 | 68 | en | 0.612917 |
#!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the talisman network\n')
g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| share/seeds/generate-seeds.py | 4,298 | Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
!/usr/bin/python Copyright (c) 2014 Wladmir J. van der Laan Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. ipv4 in ipv6 prefix tor-specific ipv6 prefix IPv4 IPv6 prefix, suffix skip empty component at beginning or end :: skips to suffix two bytes per component IPv4-in-little-endian ipv6 | 965 | en | 0.623813 |
import law
import luigi
import os
from subprocess import PIPE
from law.util import interruptable_popen
from framework import Task
class CROWNBuild(Task):
"""
Gather and compile CROWN with the given configuration
"""
# configuration variables
channels = luigi.Parameter()
shifts = luigi.Parameter()
build_dir = luigi.Parameter()
install_dir = luigi.Parameter()
env_script = os.path.join(
os.path.dirname(__file__), "../../", "setup", "setup_crown_cmake.sh"
)
def output(self):
return self.local_target("crown_{}_{}.tar.gz".format(self.era, self.sampletype))
def run(self):
# get output file path
output = self.output()
print(output.path)
if os.path.exists(output.path):
print("tarball already existing in {}".format(output.path))
else:
output.parent.touch()
_sampletype = str(self.sampletype)
_era = str(self.era)
_channels = str(self.channels)
_analysis = str(self.analysis)
_shifts = str(self.shifts)
_tag = "{}_{}".format(_era, _sampletype)
_build_dir = os.path.join(str(self.build_dir), _tag)
_install_dir = os.path.join(str(self.install_dir), _tag)
# find crown
_crown_path = os.path.abspath("CROWN")
# create build directory
if not os.path.exists(_build_dir):
os.makedirs(_build_dir)
_build_dir = os.path.abspath(_build_dir)
# same for the install directory
if not os.path.exists(_install_dir):
os.makedirs(_install_dir)
_install_dir = os.path.abspath(_install_dir)
# set environment variables
my_env = self.set_environment(self.env_script)
# checking cmake path
code, _cmake_executable, error = interruptable_popen(
["which", "cmake"], stdout=PIPE, stderr=PIPE, env=my_env
)
# actual payload:
print("=========================================================")
print("| Starting cmake step for CROWN")
print("| Using cmake {}".format(_cmake_executable.replace("\n", "")))
print("| Using CROWN {}".format(_crown_path))
print("| Using build_directory {}".format(_build_dir))
print("| Using install directory {}".format(_install_dir))
print("=========================================================")
# run CROWN build step
_cmake_cmd = ["cmake", _crown_path]
_cmake_args = [
"-DANALYSIS={ANALYSIS}".format(ANALYSIS=_analysis),
"-DSAMPLES={SAMPLES}".format(SAMPLES=_sampletype),
"-DERAS={ERAS}".format(ERAS=_era),
"-DCHANNELS={CHANNELS}".format(CHANNELS=_channels),
"-DSHIFTS={SHIFTS}".format(SHIFTS=_shifts),
"-DINSTALLDIR={INSTALLDIR}".format(INSTALLDIR=_install_dir),
"-B{BUILDFOLDER}".format(BUILDFOLDER=_build_dir),
]
print("Executable: {}".format(" ".join(_cmake_cmd + _cmake_args)))
code, out, error = interruptable_popen(
_cmake_cmd + _cmake_args, stdout=PIPE, stderr=PIPE, env=my_env
)
print(code, out, error)
# if successful save Herwig-cache and run-file as tar.gz
if code != 0:
print("Error when running cmake {}".format(error))
print("Output: {}".format(out))
print("cmake returned non-zero exit status {}".format(code))
raise Exception("cmake failed")
else:
print("Successful cmake build !")
print("Executable: {}".format(" ".join(["make", "install"])))
code, out, error = interruptable_popen(
["make", "install"],
stdout=PIPE,
stderr=PIPE,
env=my_env,
cwd=_build_dir,
)
if code != 0:
print("Error when running make {}".format(error))
print("Output: {}".format(out))
print("make returned non-zero exit status {}".format(code))
raise Exception("make failed")
else:
print("Successful cmake build !")
# TODO Create Tarball from the install directory\
code, out, error = interruptable_popen(
["touch", output.basename],
stdout=PIPE,
stderr=PIPE,
env=my_env,
cwd=os.path.join(_install_dir),
)
command = [
"tar",
"-czvf",
output.basename,
"--exclude={}".format(output.basename),
".",
]
print("Executable: {}".format(" ".join(command)))
code, out, error = interruptable_popen(
command,
stdout=PIPE,
stderr=PIPE,
env=my_env,
cwd=os.path.join(_install_dir),
)
if code != 0:
print("Error when creating tarball {}".format(error))
print("Output: {}".format(out))
print("tar returned non-zero exit status {}".format(code))
raise Exception("tar failed")
else:
print("Successful tarball creation ! ")
output.copy_from_local(os.path.join(_install_dir, output.basename))
print("=======================================================")
| processor/tasks/CROWNBuild.py | 5,702 | Gather and compile CROWN with the given configuration
configuration variables get output file path find crown create build directory same for the install directory set environment variables checking cmake path actual payload: run CROWN build step if successful save Herwig-cache and run-file as tar.gz TODO Create Tarball from the install directory\ | 351 | en | 0.599292 |
#!/usr/bin/env python
# encoding: utf-8
class MeanVariance(object):
def __init__(self):
self.n = 0
self.K = 0.0
self.ex0 = 0.0
self.ex2 = 0.0
def add_variable(self, x):
if self.n == 0:
self.K = x
self.n += 1
delta = x - self.K
self.ex0 += delta
self.ex2 += delta * delta
def get_mean_value(self, ):
return self.K + self.ex0 / self.n
def get_variance(self, ):
return (self.ex2 - (self.ex0 * self.ex0) / self.n) / (self.n - 1)
if __name__ == "__main__":
mv = MeanVariance()
mv.add_variable(1.0)
mv.add_variable(2.0)
mv.add_variable(3.0)
mv.add_variable(4.0)
print mv.get_mean_value()
| src/main/python/mean-variance.py | 729 | !/usr/bin/env python encoding: utf-8 | 36 | en | 0.34219 |
#!/usr/bin/python3
# -*- coding:utf-8 -*-
# FORKED FROM https://github.com/floydawong/LuaFormat
# Copyright (c) 2017 Floyda (floyda@163.com)
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
_start_node = None
_end_node = None
_lines = []
_setting = {}
# ----------------------------------------------------------
# Const
# ----------------------------------------------------------
class NodeType:
WORD = "WORD"
BLANK = "BLANK"
OPERATOR = "OPERATOR"
SEPARATOR = "SEPARATOR"
EQUAL = "EQUAL"
BRACKET = "BRACKET"
REVERSE_BRACKET = "REVERSE_BRACKET"
ENTER = "ENTER"
STRING = "STRING"
COMMENT_SINGLE = "COMMENT_SINGLE"
COMMENT_MULTI = "COMMENT_MULTI"
NodePattern = {
"WORD": [],
"BLANK": [" "],
"OPERATOR": ["+", "-", "*", "/", "^", "%"],
"SEPARATOR": [",", ";"],
"EQUAL": ["=", "~", ">", "<"],
"BRACKET": ["(", "{", "["],
"REVERSE_BRACKET": [")", "}", "]"],
"ENTER": ["\r\n", "\n", "\r"],
"STRING": ['"', "'"],
"COMMENT_SINGLE": [],
"COMMENT_MULTI": [],
}
SingletonType = [
NodeType.BRACKET,
NodeType.REVERSE_BRACKET,
NodeType.STRING,
NodeType.BLANK,
]
CommentType = [NodeType.STRING, NodeType.COMMENT_SINGLE, NodeType.COMMENT_MULTI]
IndentKeyword = [
"function",
"for",
"repeat",
"while",
"if",
"do",
]
UnindentKeyword = ["end", "until"]
# ----------------------------------------------------------
# Line
# ----------------------------------------------------------
class Line:
def __init__(self):
self._nodes = []
self._indent = 0
def __str__(self):
r = ""
for node in self._nodes:
r += str(node)
enter_pos = r.find("\n")
r = r[:enter_pos].strip(" ") + r[enter_pos:]
if r.strip(" ") == "\n":
return "\n" # 20
return " " * _settings.get("tab_size") * self._indent + r
def is_blank_line(self):
for node in self._nodes:
if node.type not in [NodeType.BLANK, NodeType.ENTER]:
return False
return True
def add(self, node):
self._nodes.append(node)
def get_nodes(self):
return self._nodes
def set_indent(self, indent):
self._indent = indent
def get_indent(self):
return self._indent
def add_indent(self, indent):
self._indent += indent
def create_line():
line = Line()
_lines.append(line)
return line
# ----------------------------------------------------------
# Node
# ----------------------------------------------------------
class NodeIterator:
def __init__(self):
self.node = _start_node
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
if not self.node:
raise StopIteration()
node = self.node
self.node = self.node.next
return node
class Node:
def __init__(self, c):
self._str = c
def __str__(self):
if self.type is NodeType.BLANK:
return " "
if self.type in CommentType:
r = self._str
r = r.replace(r"\\n", r"\n")
r = r.replace(r"\\r", r"\r")
return r
return self._str.strip(" ")
def add(self, c):
self._str += c
def make_property(attr):
def set_attr():
def inner(self, value):
setattr(self, "_" + attr, value)
return inner
def get_attr():
def inner(self):
try:
return getattr(self, "_" + attr)
except:
return None
return inner
return property(get_attr(), set_attr())
type = make_property("type")
last = make_property("last")
next = make_property("next")
del make_property
def create_node(content, type=None):
global _start_node
global _end_node
node = Node(content)
node.type = type
if _start_node is None:
_start_node = node
if _end_node:
node.last = _end_node
_end_node.next = node
_end_node = node
return node
def insert_blank_node(node):
bn = Node(" ")
bn.type = NodeType.BLANK
bn.last = node.last
bn.next = node
node.last.next = bn
node.last = bn
def merge_prev_node(node):
if not node.last:
return node
lnode = node.last
lnode.add(str(node))
if node.next:
node.next.last = lnode
lnode.next = node.next
else:
lnode.next = None
del node
return lnode
def delete_node(node):
if node.last and node.next:
node.last.next = node.next
node.next.last = node.last
elif node.next == None:
node.last.next = None
elif node.last == None:
node.next.last = None
return node.next
def delete_forward_blank(node):
while True:
node = node.last
if node and node.type == NodeType.BLANK:
node = delete_node(node)
else:
return
def delete_backward_blank(node):
while True:
node = node.next
if node and node.type == NodeType.BLANK:
node = delete_node(node)
node = node.last
else:
return
def get_forward_char(node, count):
r = ""
while True:
if not node:
return r[::-1]
r += str(node)[::-1]
if len(r) >= count:
return r[::-1][-count:]
node = node.last
def get_forward_type(node):
pnode = node.last
if pnode:
return pnode.type
return None
def get_forward_type_for_negative(node):
while True:
node = node.last
if node is None:
return None
if node.type != NodeType.BLANK:
return node.type
# ----------------------------------------------------------
# Format
# ----------------------------------------------------------
def split_content(content, count=1):
return content[:count], content[count:]
def get_char_type(c):
for key in NodePattern:
pattern = NodePattern[key]
if c in pattern:
return key
return NodeType.WORD
def parse_node(content):
node = None
while content:
c, content = split_content(content)
ctype = get_char_type(c)
if node is None:
node = create_node(c, ctype)
continue
if ctype == node.type and not ctype in SingletonType:
node.add(c)
else:
node = create_node(c, ctype)
def foreach_node():
node = _start_node
while node:
if node.type == NodeType.STRING:
char_key = str(node)
while True:
node = node.next
if char_key == str(node) and get_forward_char(node, 2)[0] != "\\":
merge_prev_node(node)
break
if not node.next:
break
merge_prev_node(node)
str_node = str(node)
if (
str_node == len(str_node) * "="
and str(node.last) == "["
and str(node.next) == "["
):
end_flag = "]%s]" % (len(str_node) * "=")
node = merge_prev_node(node)
node.type = NodeType.COMMENT_SINGLE
while True:
node = node.next
merge_prev_node(node)
if get_forward_char(node, len(end_flag)) == end_flag:
break
if not node.next:
break
if get_forward_char(node, 2) == "[[":
node = merge_prev_node(node)
node.type = NodeType.STRING
while True:
node = node.next
node.type = NodeType.STRING
if get_forward_char(node, 2) == "]]":
node = merge_prev_node(node)
break
merge_prev_node(node)
if not node.next:
break
if get_forward_char(node, 2) == "--":
# COMMENT_SINGLE
# node = merge_prev_node(node)
node.type = NodeType.COMMENT_SINGLE
while True:
node = node.next
if node.type == NodeType.ENTER:
break
if not node.next:
break
tmp = merge_prev_node(node)
str_tmp = str(tmp)
check_flag = "--[%s[" % ((len(str_tmp) - 4) * "=")
end_flag = "]%s]" % ((len(str_tmp) - 4) * "=")
if str(tmp) == check_flag:
node = tmp
# node.type == NodeType.COMMENT_MULTI
while True:
node = node.next
if get_forward_char(node, len(end_flag)) == end_flag:
merge_prev_node(node)
break
merge_prev_node(node)
if not node.next:
break
break
node = node.next
def foreach_blank():
for node in NodeIterator():
if node.last and node.type == node.last.type == NodeType.BLANK:
merge_prev_node(node)
def foreach_string_connect():
for node in NodeIterator():
if str(node) == "..":
node.type = NodeType.OPERATOR
def foreach_operator():
for node in NodeIterator():
if str(node) == "-":
# scientific notation
# 科学计数法
if (
node.last
and str(node.last)[-1].lower() == "e"
and str(node.last)[-2] in [str(x) for x in range(10)]
):
continue
# negative number
# 负号
pntype = get_forward_type_for_negative(node)
if not pntype in [NodeType.WORD, NodeType.REVERSE_BRACKET, NodeType.STRING]:
delete_backward_blank(node)
continue
if node.type == NodeType.OPERATOR:
delete_forward_blank(node)
delete_backward_blank(node)
if _settings.get("special_symbol_split"):
if node.last and node.last.type is not NodeType.BLANK:
insert_blank_node(node)
if node.next and node.next.type is not NodeType.BLANK:
insert_blank_node(node.next)
def foreach_separator():
for node in NodeIterator():
if node.type == NodeType.SEPARATOR:
delete_forward_blank(node)
delete_backward_blank(node)
if _settings.get("special_symbol_split"):
if node.next and node.next.type is not NodeType.BLANK:
insert_blank_node(node.next)
def foreach_equal():
for node in NodeIterator():
if node.type == NodeType.EQUAL:
if node.last and node.last.type is NodeType.EQUAL:
merge_prev_node(node)
for node in NodeIterator():
if node.type == NodeType.EQUAL:
delete_forward_blank(node)
delete_backward_blank(node)
if _settings.get("special_symbol_split"):
if node.last and node.last.type is not NodeType.BLANK:
insert_blank_node(node)
if node.next and node.next.type is not NodeType.BLANK:
insert_blank_node(node.next)
def foreach_bracket():
for node in NodeIterator():
if node.type == NodeType.BRACKET:
delete_backward_blank(node)
if _settings.get("bracket_split"):
if node.next and node.next.type != NodeType.BRACKET:
insert_blank_node(node.next)
if node.type == NodeType.REVERSE_BRACKET:
delete_forward_blank(node)
if _settings.get("bracket_split"):
if node.last and node.last.type != NodeType.REVERSE_BRACKET:
insert_blank_node(node)
if (
node.last
and node.last.last
and node.last.type == NodeType.ENTER
and node.last.last.type == NodeType.REVERSE_BRACKET
):
delete_node(node.last)
def foreach_word():
for node in NodeIterator():
if node.last and node.last.type == node.type == NodeType.WORD:
merge_prev_node(node)
def tidy_indent():
global line_indent
global indent
line_indent = 0
indent = 0
line = create_line()
line_key_dict = {}
bracket_key_dict = {}
def deal_indent(line, delta=0):
line.set_indent(indent + delta)
def inc_indent(delta):
global line_indent
global indent
if line_indent + delta > 1:
return
if line_indent + delta < -1:
return
line_indent += delta
indent += delta
if indent < 0:
indent = 0
for node in NodeIterator():
line.add(node)
key = str(node)
line_key_dict[key] = line_key_dict.get(key, 0) + 1
if node.type is NodeType.BRACKET or node.type is NodeType.REVERSE_BRACKET:
bracket_key_dict[key] = bracket_key_dict.get(key, 0) + 1
if node.type is NodeType.ENTER:
inc_indent(
1 if line_key_dict.get("(", 0) > line_key_dict.get(")", 0) else 0
)
inc_indent(
1 if line_key_dict.get("{", 0) > line_key_dict.get("}", 0) else 0
)
inc_indent(
1 if line_key_dict.get("[", 0) > line_key_dict.get("]", 0) else 0
)
if line_key_dict.get("(", 0) < line_key_dict.get(")", 0):
inc_indent(-1)
deal_indent(line)
if line_key_dict.get("{", 0) < line_key_dict.get("}", 0):
inc_indent(-1)
deal_indent(line)
if line_key_dict.get("[", 0) < line_key_dict.get("]", 0):
inc_indent(-1)
deal_indent(line)
do_count = line_key_dict.get("do", 0)
end_count = line_key_dict.get("end", 0)
if do_count > 0 and do_count <= end_count:
indent += end_count - do_count
deal_indent(line)
line = create_line()
else:
line = create_line()
deal_indent(line)
line_indent = 0
del line_key_dict
line_key_dict = {}
if str(node) == "else" or str(node) == "elseif":
deal_indent(line, -1)
if str(node) in IndentKeyword:
inc_indent(1)
if str(node) in UnindentKeyword:
inc_indent(-1)
deal_indent(line)
# ----------------------------------------------------------
# Main
# ----------------------------------------------------------
def purge():
global _start_node
global _end_node
global _lines
global _settings
_start_node = None
_end_node = None
_lines = []
_settings = {}
def _lua_format(lines, setting=None):
purge()
global _settings
_settings = setting
# deal content
content = ""
for line in lines:
line += "\n"
content += line
content += "\n"
content = content.replace("\t", "")
content = content.replace(r"\n", r"\\n")
content = content.replace(r"\r", r"\\r")
parse_node(content)
foreach_node()
# for node in NodeIterator():
# print(str(node), node.ty8e)
# return ""
# exit()
foreach_blank()
foreach_string_connect()
foreach_word()
foreach_bracket()
foreach_operator()
foreach_separator()
foreach_equal()
tidy_indent()
# return a string
def lua_format(lines, settings):
_lua_format(lines, settings)
r = ""
blank_line_count = 0
for line in _lines:
if line.is_blank_line():
blank_line_count += 1
if blank_line_count >= 2:
continue
else:
blank_line_count = 0
r += str(line)
r = r[:-1]
return r
def load_lines(fpath):
lines = []
with open(fpath, "r") as fp:
for line in fp.readlines():
line = line[:-1]
lines.append(line)
fp.close()
return lines
return []
if __name__ == "__main__":
settings = {}
settings["tab_size"] = 2
settings["special_symbol_split"] = False
settings["bracket_split"] = False
for i, _ in enumerate(sys.argv):
if i == 0:
continue
content_origin = load_lines(sys.argv[i])
fmt_resultT = lua_format(content_origin, settings)
with open(sys.argv[i], "w") as f:
f.write(fmt_resultT)
print("'{}' formatted".format(sys.argv[i]))
| lua-format.py | 17,873 | !/usr/bin/python3 -*- coding:utf-8 -*- FORKED FROM https://github.com/floydawong/LuaFormat Copyright (c) 2017 Floyda (floyda@163.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---------------------------------------------------------- Const ---------------------------------------------------------- ---------------------------------------------------------- Line ---------------------------------------------------------- 20 ---------------------------------------------------------- Node ---------------------------------------------------------- ---------------------------------------------------------- Format ---------------------------------------------------------- COMMENT_SINGLE node = merge_prev_node(node) node.type == NodeType.COMMENT_MULTI scientific notation 科学计数法 negative number 负号 ---------------------------------------------------------- Main ---------------------------------------------------------- deal content for node in NodeIterator(): print(str(node), node.ty8e) return "" exit() return a string | 2,006 | en | 0.625816 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.core.exceptions import ExecutionFailed
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import File
from resource_management.core.source import Template
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions.get_user_call_output import get_user_call_output
from resource_management.libraries.script import Script
import metron_service
from rest_commands import RestCommands
class RestMaster(Script):
def install(self, env):
from params import params
env.set_params(params)
self.install_packages(env)
def configure(self, env, upgrade_type=None, config_dir=None):
from params import params
env.set_params(params)
File(format("/etc/default/metron"),
content=Template("metron.j2")
)
metron_service.refresh_configs(params)
commands = RestCommands(params)
if not commands.is_kafka_configured():
commands.init_kafka_topics()
if not commands.is_hbase_configured():
commands.create_hbase_tables()
if not commands.is_pcap_configured():
commands.init_pcap()
if not commands.is_metron_user_hdfs_dir_configured():
commands.create_metron_user_hdfs_dir()
if params.security_enabled and not commands.is_hbase_acl_configured():
commands.set_hbase_acls()
if params.security_enabled and not commands.is_kafka_acl_configured():
commands.init_kafka_acls()
commands.set_kafka_acl_configured()
if params.security_enabled and not commands.is_pcap_perm_configured():
# If we Kerberize the cluster, we need to call this again, to remove write perms from hadoop group
# If we start off Kerberized, it just does the same thing twice.
commands.init_pcap()
commands.set_pcap_perm_configured()
def start(self, env, upgrade_type=None):
from params import params
env.set_params(params)
self.configure(env)
commands = RestCommands(params)
commands.start_rest_application()
def stop(self, env, upgrade_type=None):
from params import params
env.set_params(params)
commands = RestCommands(params)
commands.stop_rest_application()
def status(self, env):
from params import status_params
env.set_params(status_params)
cmd = format('curl --max-time 3 {hostname}:{metron_rest_port}')
try:
get_user_call_output(cmd, user=status_params.metron_user)
except ExecutionFailed:
raise ComponentIsNotRunning()
def restart(self, env):
from params import params
env.set_params(params)
self.configure(env)
commands = RestCommands(params)
commands.restart_rest_application(env)
if __name__ == "__main__":
RestMaster().execute()
| metron-deployment/packaging/ambari/metron-mpack/src/main/resources/common-services/METRON/CURRENT/package/scripts/rest_master.py | 3,848 | Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
If we Kerberize the cluster, we need to call this again, to remove write perms from hadoop group If we start off Kerberized, it just does the same thing twice. | 917 | en | 0.880309 |
## Ao arrumar os NCM nos registros nãoesteaindaC180 e C190 pode ocorrer duplicidade
## nos conjuntos de campos de acordo com o manual.
## No caso preciso juntar todos os registros com estas caracteristicas
import helper
def exec(conexao):
cursor = conexao.cursor()
print("RULE 02 - Inicializando",end=' ')
select = " SELECT r0 FROM principal WHERE r1 = \"C010\" "
select = cursor.execute(select)
rselect = select.fetchall()
rselect = [i[0] for i in rselect]
#rselect.append(rselect[len(rselect)-1] + 1000)
select = " SELECT max(r0) FROM principal WHERE "
select = select + " r1 in (\"C191\",\"C195\") "
select = select + " AND r0 > " + str(rselect[len(rselect)-1]) + " "
temp = cursor.execute(select)
temp = temp.fetchone()[0]
rselect.append(temp == None and rselect[len(rselect)-1] + 1 or temp)
n2 = rselect.pop(0)
while len(rselect) > 0:
print('-',end=' ')
n1 = n2
n2 = rselect.pop(0)
# verifica se tem C190 repetido em cada C010
select = " SELECT r2,r5,r6,r7,count(*) c "
select = select + " FROM principal "
select = select + " WHERE r1 = \"C190\" "
select = select + " AND r0 BETWEEN " + str(n1) + " AND " + str(n2) + " "
select = select + " GROUP BY r2,r5,r6,r7 "
select = select + " HAVING COUNT(*) > 1 "
select = select + " ORDER BY r5 "
repetidos = cursor.execute(select)
repetidos = repetidos.fetchall()
## se não tiver repetido, continua a olhar nos outros C010
if len(repetidos) == 0:
continue
## caso tenha C190 repetido é iterado nesses processos para concertar
for i in repetidos:
print('-',end=' ')
##/* pega o r0 de todos os C190 repetidos */
select = " SELECT r0 FROM principal "
select = select + " WHERE r1 = \"C190\" "
select = select + " AND r0 BETWEEN " + str(n1) + " AND " + str(n2) + " "
select = select + " AND r2 = \"" + i[0] + "\" "
select = select + " AND r5 = \"" + i[1] + "\" "
select = select + " AND r6 = \"" + i[2] + "\" "
r0s = cursor.execute(select)
r0s = r0s.fetchall()
r0s = [i[0] for i in r0s]
primeiroID = r0s[0]
qtrepetidos = len(r0s)
## coloca na lista todos os dados do C191 e C195 que fazem parte do C190
lista = []
for i2 in r0s:
limit = helper.takeLimit(cursor,i2,"C190")
select = " SELECT r0,r1,r2,r3,r4, "
select = select + " (ROUND(CAST(replace(r5,',','.') AS FLOAT),2)) r5, "
select = select + " r6,r7,r8,r9,r10,r11,r12 "
select = select + " FROM principal WHERE "
select = select + " r0 BETWEEN " + str(limit[0]) + " AND " + str(limit[1])
select = select + " AND r1 in (\"C191\",\"C195\") "
temp = cursor.execute(select)
temp = temp.fetchall()
lista.append(temp)
if len(lista) > 1:
lista1 = []
for z in range(0,len(lista)):
lista1 = lista1 + lista[z]
lista = []
ids = []
for i2 in lista1:
lista.append(i2[1:])
ids.append(i2[0])
lista = list(set(lista))
#ids.append(temp[1][0])
## deleta todos os registros para depois inserir os que não são repetidos
delete = "DELETE FROM principal WHERE "
delete = delete + " r0 BETWEEN " + str(ids[0]) + " AND " + str(ids[len(ids)-1]) + " "
cursor.execute(delete)
conexao.commit()
## insere os itens sem repetição e soma ao mesmo tempo o valor total do item
valor_total = 0
lista.sort()
primeiroIDTemp = primeiroID
for i3 in lista:
valor_total = valor_total + i3[4]
primeiroIDTemp = primeiroIDTemp + 1
stringt = "\",\"".join([str(iz) for iz in i3])
insert = ""
insert = insert + " INSERT INTO principal(r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,r10,r11,r12) "
insert = insert + " VALUES("
insert = insert + str(primeiroIDTemp) + ",\""
insert = insert + stringt.replace(".",",")
insert = insert + "\")"
cursor.execute(insert)
conexao.commit()
## atualiza valor total do C190
update = ""
update = update + " UPDATE principal SET "
update = update + " r8 = \"" + str(round(valor_total / qtrepetidos,2)).replace(".",",") +"\""
update = update + " where r0 = " + str(primeiroID)
cursor.execute(update)
conexao.commit()
print("Finalizado") | sped_correcao/rule02.py | 5,083 | Ao arrumar os NCM nos registros nãoesteaindaC180 e C190 pode ocorrer duplicidade nos conjuntos de campos de acordo com o manual. No caso preciso juntar todos os registros com estas caracteristicasrselect.append(rselect[len(rselect)-1] + 1000) verifica se tem C190 repetido em cada C010 se não tiver repetido, continua a olhar nos outros C010 caso tenha C190 repetido é iterado nesses processos para concertar/* pega o r0 de todos os C190 repetidos */ coloca na lista todos os dados do C191 e C195 que fazem parte do C190ids.append(temp[1][0]) deleta todos os registros para depois inserir os que não são repetidos insere os itens sem repetição e soma ao mesmo tempo o valor total do item atualiza valor total do C190 | 716 | pt | 0.959265 |
import argparse
from functools import partial
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.optimize import minimize
def read_args():
'''Reads command line arguments.
Returns: Parsed arguments.'''
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, help='path to .csv file', default='orbit.csv')
parser.add_argument('-u', '--units', type=str, help='units of distance (m or km)', default='km')
return parser.parse_args()
def plane_err(data,coeffs):
'''Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Arguments:
data: A numpy array of points.
coeffs: The coefficients of the plane ax+by+c=0.
Returns: The total squared error wrt the plane defined by ax+by+cz = 0.'''
a,b,c = coeffs
return np.sum((a*data[:,0]+b*data[:,1]+c*data[:,2])**2)/(a**2+b**2+c**2)
def project_to_plane(points,coeffs):
'''Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Arguments:
coeffs: The coefficients of the plane ax+by+c=0.
points: A numpy array of points.
Returns:
A list of projected points.'''
a,b,c = coeffs
proj_mat = [[b**2+c**2, -a*b , -a*c ],
[ -a*b ,a**2+c**2, -b*c ],
[ -a*c , -b*c ,a**2+b**2]]
return np.matmul(points,proj_mat)/(a**2+b**2+c**2)
def conv_to_2D(points,x,y):
'''Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].'''
mat = [x[0:2],y[0:2]]
mat_inv = np.linalg.inv(mat)
coords = np.matmul(points[:,0:2],mat_inv)
return coords
def cart_to_pol(points):
'''Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].'''
pol = np.empty(points.shape)
pol[:,0] = np.sqrt(points[:,0]**2+points[:,1]**2)
pol[:,1] = np.arctan2(points[:,1],points[:,0])#*57.296
return pol
def ellipse_err(polar_coords,params):
'''Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.'''
a,e,t0 = params
dem = 1+e*np.cos(polar_coords[:,1]-t0)
num = a*(1-e**2)
r = np.divide(num,dem)
err = np.sum((r - polar_coords[:,0])**2)
return err
# Main program
args = read_args()
data = np.loadtxt(args.file,skiprows=1,usecols=(1,2,3));
# try to fit a plane to the data first.
# make a partial function of plane_err by supplying the data
plane_err_data = partial(plane_err,data)
# plane is defined by ax+by+cz=0.
p0 = [0,0,1] # make an initial guess
# minimize the error
p = minimize(plane_err_data,p0,method='nelder-mead').x
p = p/np.linalg.norm(p) # normalize p
# now p is the normal vector of the best-fit plane.
# lan_vec is a vector along the line of intersection of the plane
# and the x-y plane.
lan_vec = np.cross([0,0,1],p)
# if lan_vec is [0,0,0] it means that it is undefined and can take on
# any value. So we set it to [1,0,0] so that the rest of the
# calculation can proceed.
if (np.array_equal(lan_vec,[0,0,0])):
lan_vec = [1,0,0]
# inclination is the angle between p and the z axis.
inc = math.acos(np.dot(p,[0,0,1])/np.linalg.norm(p))
# lan is the angle between the lan_vec and the x axis.
lan = math.acos(np.dot(lan_vec,[1,0,0])/np.linalg.norm(lan_vec))
# now we try to convert the problem into a 2D problem.
# project all the points onto the plane.
proj_data = project_to_plane(data,p)
# p_x and p_y are 2 orthogonal unit vectors on the plane.
p_x,p_y = lan_vec, project_to_plane(np.cross([0,0,1],lan_vec),p)
p_x,p_y = p_x/np.linalg.norm(p_x), p_y/np.linalg.norm(p_y)
# find coordinates of the points wrt the basis [x,y].
coords_2D = conv_to_2D(proj_data,p_x,p_y)
# now try to fit an ellipse to these points.
# convert them into polar coordinates
polar_coords = cart_to_pol(coords_2D)
# make an initial guess for the parametres
r_m = np.min(polar_coords[:,0])
r_M = np.max(polar_coords[:,0])
a0 = (r_m+r_M)/2
e0 = (r_M-r_m)/(r_M+r_m)
t00 = polar_coords[np.argmin(polar_coords[:,0]),1]
params0 = [a0,e0,t00] # initial guess
# make a partial function of ellipse_err with the data
ellipse_err_data = partial(ellipse_err,polar_coords)
# minimize the error
params = minimize(ellipse_err_data,params0,method='nelder-mead').x
# output the parametres
print("Semi-major axis: ",params[0],args.units)
print("Eccentricity: ",params[1])
print("Argument of periapsis: ",params[2],"rad")
print("Inclination: ",inc,"rad")
print("Longitude of Ascending Node:",lan,"rad")
# now plot the results
a,e,t0 = params
# generate 1000 points on the ellipse
theta = np.linspace(0,2*math.pi,1000)
radii = a*(1-e**2)/(1+e*np.cos(theta-t0))
# convert to cartesian
x_s = np.multiply(radii,np.cos(theta))
y_s = np.multiply(radii,np.sin(theta))
# convert to 3D
mat = np.column_stack((p_x,p_y))
coords_3D = np.matmul(mat,[x_s,y_s])
fig = plt.figure()
ax = Axes3D(fig)
ax.axis('equal')
# plot
ax.plot3D(coords_3D[0],coords_3D[1],coords_3D[2],'red',label='Fitted Ellipse')
ax.scatter3D(data[::8,0],data[::8,1],data[::8,2],c='black',depthshade=False,label='Initial Data')
# The Pale Blue Dot
ax.scatter3D(0,0,0,c='blue',depthshade=False,label='Earth')
ax.can_zoom()
ax.legend()
plt.show()
| orbitdeterminator/kep_determination/ellipse_fit.py | 6,629 | Converts a list of cartesian coordinates into polar ones.
Arguments:
points: The list of points in the format [x,y].
Returns:
A list of polar coordinates in the format [radius,angle].
Finds coordinates of points in a plane wrt a basis.
Given a list of points in a plane, and a basis of the plane,
this function returns the coordinates of those points
wrt this basis.
Arguments:
points: A numpy array of points.
x: One vector of the basis.
y: Another vector of the basis.
Returns:
Coordinates of the points wrt the basis [x,y].
Calculates the total squared error of the data wrt an ellipse.
params is a 3 element array used to define an ellipse.
It contains 3 elements a,e, and t0.
a is the semi-major axis
e is the eccentricity
t0 is the angle of the major axis wrt the x-axis.
These 3 elements define an ellipse with one focus at origin.
Equation of the ellipse is r = a(1-e^2)/(1+ecos(t-t0))
The function calculates r for every theta in the data.
It then takes the square of the difference and sums it.
Arguments:
polar_coords: A list of polar coordinates in the format [radius,angle].
params: The array [a,e,t0].
Returns:
The total squared error of the data wrt the ellipse.
Calculates the total squared error of the data wrt a plane.
The data should be a list of points. coeffs is an array of
3 elements - the coefficients a,b,c in the plane equation
ax+by+c = 0.
Arguments:
data: A numpy array of points.
coeffs: The coefficients of the plane ax+by+c=0.
Returns: The total squared error wrt the plane defined by ax+by+cz = 0.
Projects points onto a plane.
Projects a list of points onto the plane ax+by+c=0,
where a,b,c are elements of coeffs.
Arguments:
coeffs: The coefficients of the plane ax+by+c=0.
points: A numpy array of points.
Returns:
A list of projected points.
Reads command line arguments.
Returns: Parsed arguments.
*57.296 Main program try to fit a plane to the data first. make a partial function of plane_err by supplying the data plane is defined by ax+by+cz=0. make an initial guess minimize the error normalize p now p is the normal vector of the best-fit plane. lan_vec is a vector along the line of intersection of the plane and the x-y plane. if lan_vec is [0,0,0] it means that it is undefined and can take on any value. So we set it to [1,0,0] so that the rest of the calculation can proceed. inclination is the angle between p and the z axis. lan is the angle between the lan_vec and the x axis. now we try to convert the problem into a 2D problem. project all the points onto the plane. p_x and p_y are 2 orthogonal unit vectors on the plane. find coordinates of the points wrt the basis [x,y]. now try to fit an ellipse to these points. convert them into polar coordinates make an initial guess for the parametres initial guess make a partial function of ellipse_err with the data minimize the error output the parametres now plot the results generate 1000 points on the ellipse convert to cartesian convert to 3D plot The Pale Blue Dot | 2,989 | en | 0.798229 |
from os import environ
from os.path import join, dirname
# Third party imports
from flask_compress import Compress
class Config:
"""
Common configurations
"""
# private variable used by Flask to secure/encrypt session cookies
SECRET_KEY = environ['SECRET_KEY']
# get the root url and concantenate it with the client secrets file
OIDC_CLIENT_SECRETS = join(dirname(__file__), "client_secrets.json")
# test out login and registration in development without using SSL
OIDC_COOKIE_SECURE = False
# URL to handle user login
OIDC_CALLBACK_ROUTE = "/oidc/callback"
# what user data to request on log in
OIDC_SCOPES = ["openid", "email", "profile"]
OIDC_ID_TOKEN_COOKIE_NAME = "oidc_token"
TESTING = False
DEBUG = False
CSRF_ENABLED = True # protect against CSRF attacks
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Gzip compression allows to reduce the size of the response by 70-90%
# Flask-Compress compresses the application’s response with gzip
COMPRESS_MIMETYPES = ['text/html', 'text/css', 'text/xml',
'application/json', 'application/javascript']
COMPRESS_LEVEL = 6
COMPRESS_MIN_SIZE = 500
CACHE_TYPE = 'simple' # compare with memcached, redis, filesystem, etc.
# 'datastore' does not require any additional configuration.
DATA_BACKEND = 'datastore' # alternatively 'cloudsql' or 'mongodb'
# Google Cloud Project ID
PROJECT_ID = environ['PROJECT_ID']
# CloudSQL & SQLAlchemy configuration
CLOUDSQL_USER = environ['CLOUDSQL_USER']
CLOUDSQL_PASSWORD = environ['CLOUDSQL_PASSWORD']
CLOUDSQL_DATABASE = environ['CLOUDSQL_DATABASE']
DB_HOST_IP = environ['DB_HOST_IP']
DB_HOST_PORT = environ['DB_HOST_PORT']
# The CloudSQL proxy is used locally to connect to the cloudsql instance.
# To start the proxy, use:
#
# $ cloud_sql_proxy -instances=your-connection-name=tcp:3306
#
# Port 3306 is the standard MySQL port. If you need to use a different port,
# change the 3306 to a different port number.
# Alternatively, use a local MySQL instance for testing.
LOCAL_SQLALCHEMY_DATABASE_URI = (
'postgresql://{}:{}@127.0.0.1:5432/{}').format(CLOUDSQL_USER, CLOUDSQL_PASSWORD, CLOUDSQL_DATABASE)
# When running on App Engine, a unix socket is used to connect to the cloudsql instance.
LIVE_SQLALCHEMY_DATABASE_URI = ('postgresql://{}:{}@{}:{}/{}').format(
CLOUDSQL_USER, CLOUDSQL_PASSWORD, DB_HOST_IP, DB_HOST_PORT, CLOUDSQL_DATABASE)
SQLALCHEMY_DATABASE_URI = LIVE_SQLALCHEMY_DATABASE_URI if environ.get(
'FLASK_ENV') == 'production' else LOCAL_SQLALCHEMY_DATABASE_URI
# Mongo configuration
# If using mongolab, the connection URI is available from the mongolab control
# panel. If self-hosting on compute engine, replace the values below.
# MONGO_URI = 'mongodb://user:password@host:27017/database'
# Google Cloud Storage and upload settings.
# You can adjust the max content length and allow extensions settings to allow
# larger or more varied file types if desired.
CLOUD_STORAGE_BUCKET = environ['CLOUD_STORAGE_BUCKET']
MAX_CONTENT_LENGTH = 8 * 1024 * 1024
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
# OAuth2 configuration.
# This can be generated from the Google Developers Console at
# https://console.developers.google.com/project/_/apiui/credential.
#
# * http://localhost:8080/oauth2callback
# * https://flask-okta-1.appspot.com/oauth2callback.
#
# If you receive a invalid redirect URI error review you settings to ensure
# that the current URI is allowed.
# GOOGLE_OAUTH2_CLIENT_ID = \
# 'your-client-id'
# GOOGLE_OAUTH2_CLIENT_SECRET = 'your-client-secret'
class ProductionConfig(Config):
"""
Production configurations
"""
DEBUG = False
TESTING = False
class DevelopmentConfig(Config):
"""
Development configurations
"""
DEBUG = True
SQLALCHEMY_ECHO = True
class TestingConfig(Config):
"""
Testing configurations
"""
TESTING = True
app_config = {
'development': DevelopmentConfig,
'production': ProductionConfig,
'testing': TestingConfig
}
def configure_app(app):
"""Multiple app configurations"""
# Configure Compressing
Compress(app)
| config.py | 4,396 | Common configurations
Development configurations
Production configurations
Testing configurations
Multiple app configurations
Third party imports private variable used by Flask to secure/encrypt session cookies get the root url and concantenate it with the client secrets file test out login and registration in development without using SSL URL to handle user login what user data to request on log in protect against CSRF attacks Gzip compression allows to reduce the size of the response by 70-90% Flask-Compress compresses the application’s response with gzip compare with memcached, redis, filesystem, etc. 'datastore' does not require any additional configuration. alternatively 'cloudsql' or 'mongodb' Google Cloud Project ID CloudSQL & SQLAlchemy configuration The CloudSQL proxy is used locally to connect to the cloudsql instance. To start the proxy, use: $ cloud_sql_proxy -instances=your-connection-name=tcp:3306 Port 3306 is the standard MySQL port. If you need to use a different port, change the 3306 to a different port number. Alternatively, use a local MySQL instance for testing. When running on App Engine, a unix socket is used to connect to the cloudsql instance. Mongo configuration If using mongolab, the connection URI is available from the mongolab control panel. If self-hosting on compute engine, replace the values below. MONGO_URI = 'mongodb://user:password@host:27017/database' Google Cloud Storage and upload settings. You can adjust the max content length and allow extensions settings to allow larger or more varied file types if desired. OAuth2 configuration. This can be generated from the Google Developers Console at https://console.developers.google.com/project/_/apiui/credential. * http://localhost:8080/oauth2callback * https://flask-okta-1.appspot.com/oauth2callback. If you receive a invalid redirect URI error review you settings to ensure that the current URI is allowed. GOOGLE_OAUTH2_CLIENT_ID = \ 'your-client-id' GOOGLE_OAUTH2_CLIENT_SECRET = 'your-client-secret' Configure Compressing | 2,045 | en | 0.73698 |
"""Module with functions around testing. You can run tests including doctest with generating coverage,
you can generate tests from readme or you can configure tests in conftest with single call."""
from mypythontools_cicd.tests.tests_internal import (
add_readme_tests,
deactivate_test_settings,
default_test_config,
run_tests,
setup_tests,
TestConfig,
)
__all__ = [
"add_readme_tests",
"deactivate_test_settings",
"default_test_config",
"run_tests",
"setup_tests",
"TestConfig",
]
| mypythontools_cicd/tests/__init__.py | 532 | Module with functions around testing. You can run tests including doctest with generating coverage,
you can generate tests from readme or you can configure tests in conftest with single call. | 191 | en | 0.92687 |
# -*- coding: utf-8 -*-
import tkinter as tk
from tkinter import messagebox
import ipaddress
from model import RecvTcpThread, RecvUdpThread, RecvMonitorThread, HachiUtil
from controller import LogController
# =================================
# == 定数
# =================================
DEF_PROTO = 1 # 0:TCP 1:UDP
DEF_DST_PROT = 12000
MAX_DATALEN = 9999
# =================================
# == 公開クラス
# =================================
class RecvParams(object):
""" 受信パラメータ情報クラス """
_instance = None
def __new__(cls, *args, **keys):
if cls._instance is None:
cls._instance = object.__new__(cls)
cls.proto = tk.IntVar(value=DEF_PROTO)
cls.ip = tk.StringVar()
cls.port = tk.IntVar(value=DEF_DST_PROT)
return cls._instance
class MonitorParams(object):
""" 受信モニター情報クラス """
_instance = None
def __new__(cls, *args, **keys):
if cls._instance is None:
cls._instance = object.__new__(cls)
cls.datalen = tk.IntVar(value=0)
cls.bps = tk.StringVar(value="0 bps")
cls.pps = tk.IntVar(value=0)
cls.recv_btn = tk.StringVar(value="受信開始")
# "bps換算"動的更新
cls.pps.trace_add('write', HachiUtil.UpdateBps(
cls.datalen, cls.pps, cls.bps))
return cls._instance
class RecvShareObject(object):
""" スレッド間で値を共有するためのクラス """
def __init__(self):
self.count = 0
self.total = 0
class RecvAction:
def __init__(self, widgets):
self.recvParams = RecvParams()
self.monitorParams = MonitorParams()
self.widgets = widgets
self.stat = 0
self.shareObj = RecvShareObject()
# スレッド変数
self.th_recv = None
self.th_monitor = None
def __call__(self, event=None):
# 入力パラメータチェック
msg = _param_check()
if len(msg) > 0:
messagebox.showwarning(title="warning", message=msg)
return
if self.stat == 0:
self._recv_start()
else:
self._recv_stop()
def _recv_start(self):
# ログ出力インスタンス
logger = LogController.LogController()
# モニタースレッド開始
self.monitor_start()
# パケット受信スレッド開始
# 0:TCP 1:UDP
proto = RecvParams.proto.get()
ip = RecvParams.ip.get()
port = RecvParams.port.get()
if proto == 0:
logger.insert("TCPパケット受信を開始します({}:{})".format(ip, port))
self.recv_tcp_start()
elif proto == 1:
logger.insert("UDPパケット受信を開始します({}:{})".format(ip, port))
self.recv_udp_start()
MonitorParams.recv_btn.set("受信停止")
# ウィジェット非活性化
for widget in self.widgets.values():
widget.state(['disabled'])
self.stat = 1
def recv_tcp_start(self):
""" TCPパケット送信スレッド """
self.th_recv = RecvTcpThread.RecvTcpThread(RecvParams(), self.shareObj)
self.th_recv.setDaemon(True)
self.th_recv.start()
def recv_udp_start(self):
""" UDPパケット受信スレッド """
self.th_recv = RecvUdpThread.RecvUdpThread(RecvParams(), self.shareObj)
self.th_recv.setDaemon(True)
self.th_recv.start()
def monitor_start(self):
""" パケット受信監視スレッド """
self.th_monitor = RecvMonitorThread.RecvMonitorThread(
MonitorParams(), self.shareObj)
self.th_monitor.setDaemon(True)
self.th_monitor.start()
def _recv_stop(self):
LogController.LogController().insert("パケット受信を停止します")
""" スレッド停止 """
# スレッド停止
if self.th_recv is not None:
self.th_recv.stop()
if self.th_monitor is not None:
self.th_monitor.stop()
MonitorParams().recv_btn.set("受信開始")
# 設定ウィジェット活性化
for widget in self.widgets.values():
widget.state(['!disabled'])
self.stat = 0
# =================================
# == ローカル関数
# =================================
def _param_check():
""" 受信パラメータチェック """
msg = ""
# IPアドレスチェック
if not HachiUtil.LocalAddress().is_localaddress(RecvParams.ip.get()):
# インタフェースなし
msg += "・指定した待受IPアドレスがインターフェースにありません。\n"
# ポート番号 0~65535
if not (0 <= RecvParams.port.get() <= 65535):
msg += "・ポート番号は 0~65535 の範囲で指定してください。\n"
return msg
| controller/RxController.py | 4,992 | 受信モニター情報クラス
受信パラメータ情報クラス
スレッド間で値を共有するためのクラス
受信パラメータチェック
パケット受信監視スレッド
TCPパケット送信スレッド
UDPパケット受信スレッド
-*- coding: utf-8 -*- ================================= == 定数 ================================= 0:TCP 1:UDP ================================= == 公開クラス ================================= "bps換算"動的更新 スレッド変数 入力パラメータチェック ログ出力インスタンス モニタースレッド開始 パケット受信スレッド開始 0:TCP 1:UDP ウィジェット非活性化 スレッド停止 設定ウィジェット活性化 ================================= == ローカル関数 ================================= IPアドレスチェック インタフェースなし ポート番号 0~65535 | 511 | ja | 0.919497 |
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
from behave import *
import numpy
import toyplot
import testing
@given(u'a sample plot, the plot can be rendered with a dashed line style.')
def step_impl(context):
canvas, axes, mark = toyplot.plot(numpy.linspace(0, 1) ** 2, style={"stroke-dasharray":"5,5"})
testing.assert_canvas_equal(canvas, "style-stroke-dasharray")
| features/steps/style.py | 508 | Copyright 2014, Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. | 167 | en | 0.84275 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Grappler Arithmetic Optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArithmeticOptimizerTest(test.TestCase):
# See b/146524878.
def testFunctionArgShapeInference(self):
@def_function.function
def f(x, y):
return math_ops.matmul(
x, array_ops.reshape(array_ops.transpose(y), [384, 1536]))
with context.eager_mode():
x = array_ops.ones((1, 384))
y = array_ops.ones((1536, 384))
with context.collect_graphs(optimized=True) as graphs:
f(x, y).numpy()
self.assertLen(graphs, 1)
self.assertLen(graphs[0].node, 4)
self.assertEqual(graphs[0].node[2].name,
'ArithmeticOptimizer/FoldTransposeIntoMatMul_MatMul')
if __name__ == '__main__':
test.main()
| tensorflow/python/grappler/arithmetic_optimizer_test.py | 1,757 | Tests for Grappler Arithmetic Optimizer.
Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== See b/146524878. | 720 | en | 0.82264 |
from .DtnAbstractParser import DtnAbstractParser
from pydantic import confloat, PositiveFloat
from typing import Optional
class DtnFileBroadcasterParser(DtnAbstractParser):
""" Validator for a file generator """
# Data Tyoe
data_type: str
# Start time of the file transmission (in simulation time)
tstart: confloat(gt=-1) = 0
# Bundle size in bits
bundle_size: PositiveFloat
# File size in [bits]
size: PositiveFloat
# Bundle Time-to-live (TTL) in [sec]
bundle_TTL: PositiveFloat
# Data criticality. If True, then network will be flooded
# with this data
critical: bool = False
# How many times to send the file
repeat: int = 1
# How long in [sec] to wait between sending the files again
wait: float = 0.0 | simulator/parsers/DtnFileBroadcasterParser.py | 785 | Validator for a file generator
Data Tyoe Start time of the file transmission (in simulation time) Bundle size in bits File size in [bits] Bundle Time-to-live (TTL) in [sec] Data criticality. If True, then network will be flooded with this data How many times to send the file How long in [sec] to wait between sending the files again | 336 | en | 0.880581 |
# this file is only for presentation of the swallow fish game
# http://www.4399.com/flash/201247_4.htm
from pyautogui import press, keyDown, keyUp
from time import time
import serialPart
# u = 'up'
# d = 'down'
# l = 'left'
# r = 'right'
u = 'w'
d = 's'
l = 'a'
r = 'd'
def key_unit(key, period):
cmd_time_cost = 0.21
to_delay = period - cmd_time_cost
if to_delay > 0:
# start = time()
keyDown(key) # mind that these them selves take time also
delay(period - 0.203)
keyUp(key)
else:
keyDown(key)
keyUp(key)
# print('cannot be too short',time())
# print(time()-start)
def delay(period): # in second
start = time()
while (time() - start) < period:
pass
def sim_key_by_time(x, y, period=0.0000000000000001, thresh=50): # adjust this period(second) for better game control
half_period = period / 2
key_x = key_y = None # todo use this None to invoke no
# y = -88
# print(x, y, '000')
if x > 100:
x = 100
if y > 100:
y = 100
if x < -100:
x = -100
if y < -100:
y = -100
# print(x, y, '111')
if (x < -thresh) or (x > thresh):
if x > 0:
key_x = r
else:
key_x = l
x = abs(x) - thresh
else:
x = 0
if (y < -thresh) or (y > thresh):
if y > 0:
key_y = u
else:
key_y = d
y = abs(y) - thresh
else:
y = 0
# print(x, y, '222')
tx = x / (100 - thresh) * half_period
ty = y / (100 - thresh) * half_period
# tx = abs(x) * 0.01 * half_period
# ty = abs(y) * 0.01 * half_period
release_period = 2 * half_period - tx - ty
# print(key_x, key_y, tx, ty, period, release_period)
#
# t1 = time()
if key_x:
key_unit(key_x, tx)
if key_y:
key_unit(key_y, ty)
# delay(release_period)
# print(tx+ty,period,time()-t1)
#
def sim_key_by_press(x, y, thresh=10, div=1):
# half_period = period / 2
key_x = key_y = None # todo use this None to invoke no
# y = -88
# print(x, y, '000')
if x > 100:
x = 100
if y > 100:
y = 100
if x < -100:
x = -100
if y < -100:
y = -100
# print(x, y, '111')
if (x < -thresh) or (x > thresh):
if x > 0:
key_x = r
else:
key_x = l
x = abs(x) - thresh
else:
x = 0
if (y < -thresh) or (y > thresh):
if y > 0:
key_y = u
else:
key_y = d
y = abs(y) - thresh
else:
y = 0
x = x // div
y = y // div
t1 = time()
while x > 0 or y > 0:
if x >= y:
press(key_x)
x -= x
else:
press(key_y)
y -= y
print(x + y, time() - t1)
def sim_key_by_shortest_hold(x, y, thresh=10, div=10):
# half_period = period / 2
key_x = key_y = None # todo use this None to invoke no
# y = -88
# print(x, y, '000')
if x > 100:
x = 100
if y > 100:
y = 100
if x < -100:
x = -100
if y < -100:
y = -100
# print(x, y, '111')
if (x < -thresh) or (x > thresh):
if x > 0:
key_x = r
else:
key_x = l
x = abs(x) - thresh
else:
x = 0
if (y < -thresh) or (y > thresh):
if y > 0:
key_y = u
else:
key_y = d
y = abs(y) - thresh
else:
y = 0
x = x // div
y = y // div
t1 = time()
while x > 0 or y > 0:
if x >= y:
key_unit(key_x, 0)
x -= x
else:
key_unit(key_y, 0)
y -= y
print(x + y, time() - t1)
if __name__ == '__main__':
period = 1 # in second
last_time = time()
delay(1) # wait for user to switch to game
# x_stop_center = 235
# y_stop_center = 74
ser = serialPart.serial_open()
x_stop_center, y_stop_center = serialPart.get_avg_stop_point(ser)
while True:
xyz_read = serialPart.read_one_period(ser)
z_read, y_read, x_read = xyz_read.values() # order adjusted for the stick
# print(x_read,y_read)
x = -(x_read - x_stop_center) # 在输出值在100以内不再解三角函数算相对角度增量了,虽然更为合理,粗略第当作线性近似吧
y = y_read - y_stop_center
now_time = time()
delta_time = now_time - last_time
if delta_time > period:
last_time = now_time
# print(x,y)
sim_key_by_time(x, y, thresh=5, period=1 * period)
# sim_key_by_press(x, y, div=10)
# sim_key_by_shortest_hold(x,y,div=30)
# pyautogui.rightClick()
# pyautogui.hotkey('ctrl', 'v')
| simKeyControlGame_temp2.py | 5,027 | this file is only for presentation of the swallow fish game http://www.4399.com/flash/201247_4.htm u = 'up' d = 'down' l = 'left' r = 'right' start = time() mind that these them selves take time also print('cannot be too short',time()) print(time()-start) in second adjust this period(second) for better game control todo use this None to invoke no y = -88 print(x, y, '000') print(x, y, '111') print(x, y, '222') tx = abs(x) * 0.01 * half_period ty = abs(y) * 0.01 * half_period print(key_x, key_y, tx, ty, period, release_period) t1 = time() delay(release_period) print(tx+ty,period,time()-t1) half_period = period / 2 todo use this None to invoke no y = -88 print(x, y, '000') print(x, y, '111') half_period = period / 2 todo use this None to invoke no y = -88 print(x, y, '000') print(x, y, '111') in second wait for user to switch to game x_stop_center = 235 y_stop_center = 74 order adjusted for the stick print(x_read,y_read) 在输出值在100以内不再解三角函数算相对角度增量了,虽然更为合理,粗略第当作线性近似吧 print(x,y) sim_key_by_press(x, y, div=10) sim_key_by_shortest_hold(x,y,div=30) pyautogui.rightClick() pyautogui.hotkey('ctrl', 'v') | 1,108 | en | 0.488682 |
#!/usr/bin/env python3
'''
A series of test for PyKMCFile class.
'''
import sys
import os
import subprocess
import kmer_utils
import init_sys_path
import py_kmc_api as pka
import pytest
if not init_sys_path.is_windows():
import resource
@pytest.fixture(scope="module", autouse=True)
def create_kmc_db():
'''
Set up tests and clean up after.
'''
kmer_len = 17
memory = 2 #GB
cutoff_min = 1
sig_len = 9
reads_src = 'input.fastq'
reads = (
'GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCAACGACGATCAGTCATGGTCGAG',
'GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCAACGACGATCAGTCATGGTCGAG',
'GTCGATGCATCGATGCTGATGCTGCTGTGCTAGTAGCGTCTGAGGGCTA'
)
_save_reads_as_fastq(reads, reads_src)
kmers = _cout_kmers(reads, kmer_len)
absent_kmers = _generate_not_existing_kmers(kmers, kmer_len)
_run_kmc(cutoff_min, kmer_len, memory, sig_len, reads_src)
result = {
'kmers': kmers,
'kmer_len': kmer_len,
'sig_len': sig_len,
'absent_kmers': absent_kmers
}
yield result
os.remove(reads_src)
os.remove('kmc_db.kmc_pre')
os.remove('kmc_db.kmc_suf')
def _cout_kmers(reads, kmer_len):
''' Simple k-mer counting routine. '''
kmers = {}
for read in reads:
for start in range(0, len(read) - kmer_len + 1):
kmer = read[start:start+kmer_len]
if 'N' in kmer:
continue
rev = kmer_utils.rev_comp(kmer)
if rev < kmer:
kmer = rev
if kmer in kmers.keys():
kmers[kmer] += 1
else:
kmers[kmer] = 1
return kmers
def _save_reads_as_fastq(reads, file_name):
''' Save reads from input to file named file_name. '''
file = open(file_name, 'w')
for read in reads:
file.write("@TEST\n")
file.write(read + "\n")
file.write("+TEST\n")
file.write("I"*len(read) + "\n")
file.close()
def _generate_not_existing_kmers(kmers, kmer_len):
''' Generate k-mers that are not present in the database.
:kmers: existing k-mers
:kmer_len: length of k-mers
'''
def increment_kmer(kmer, start):
''' Increments k-mer to next lexographical.
Start from pos :start: (from end, i.e. start = 0 means last k-mer symbol). '''
def replace_char(string, pos, new_char):
''' Create new string with character at :pos: changed to :new_char:. '''
if pos < 0:
pos = len(string) + pos
return string[:pos] + new_char + string[pos+1:]
for i in range(start, len(kmer)):
if kmer[-1-i] == 'A':
return replace_char(kmer, -1 - i, 'C')
if kmer[-1-i] == 'C':
return replace_char(kmer, -1 - i, 'G')
if kmer[-1-i] == 'T':
return replace_char(kmer, -1 - i, 'T')
kmer = replace_char(kmer, -1 - i, 'T')
return kmer
absent_kmers = []
for i in range(0, kmer_len):
for kmer_str in kmers.keys():
inc_kmer = increment_kmer(kmer_str, i)
if not inc_kmer in kmers.keys():
absent_kmers.append(inc_kmer)
return absent_kmers
def _run_kmc(cutoff_min, kmer_len, memory, sig_len, reads_src):
''' Runs kmc. '''
if init_sys_path.is_linux() or init_sys_path.is_mac():
kmc_path = os.path.join(os.path.dirname(__file__), '../../bin/kmc')
elif init_sys_path.is_windows():
kmc_path = os.path.join(os.path.dirname(__file__), '../../x64/Release/kmer_counter.exe')
if init_sys_path.is_mac():
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, 2048))
subprocess.call([kmc_path,
'-ci{}'.format(cutoff_min),
'-k{}'.format(kmer_len),
'-m{}'.format(memory),
'-p{}'.format(sig_len),
reads_src,
'kmc_db',
'.'
])
def _open_for_listing():
''' Open kmc database for listing and check if opened sucessfully. '''
kmc_file = pka.KMCFile()
assert kmc_file.OpenForListing('kmc_db')
return kmc_file
def _open_for_ra():
''' Open kmc database for random access and check if opened sucessfully. '''
kmc_file = pka.KMCFile()
assert kmc_file.OpenForRA('kmc_db')
return kmc_file
def test_info(create_kmc_db):
'''
Test if some fields in object returned from Info are set properly.
'''
pattern = create_kmc_db
kmc_file = _open_for_listing()
info = kmc_file.Info()
assert info.kmer_length == pattern['kmer_len']
assert info.mode == 0 # no Quake mode (quake not supported anymore)
assert info.counter_size == 1
assert info.signature_len == pattern['sig_len']
assert info.min_count == 1
assert info.both_strands
assert info.total_kmers == len(pattern['kmers'])
def test_kmc_file_next_kmer(create_kmc_db):
''' Test if all counted k-mers are returned by KMC API using NextKmer method. '''
pattern = create_kmc_db['kmers']
kmc_file = _open_for_listing()
counter = pka.Count()
kmer = pka.KmerAPI(create_kmc_db['kmer_len'])
res = {}
while kmc_file.ReadNextKmer(kmer, counter):
res[str(kmer)] = counter.value
assert res == pattern
def test_get_counters_for_read(create_kmc_db):
''' Test case for GetCountersForRead method of KMCFile. '''
kmers = create_kmc_db['kmers']
read = "GGCATTGCATGCAGTNNCAGTCATGCAGTCAGGCAGTCATGGCATGCGTAAACGACGATCAGTCATGGTCGAG"
pattern = []
kmer_len = create_kmc_db['kmer_len']
for i in range(0, len(read) - kmer_len + 1):
kmer = read[i:i+kmer_len]
if 'N' in kmer:
pattern.append(0)
continue
rev = kmer_utils.rev_comp(kmer)
if rev < kmer:
kmer = rev
if not kmer in kmers.keys():
pattern.append(0)
else:
pattern.append(kmers[kmer])
kmc_file = _open_for_ra()
res = pka.CountVec()
kmc_file.GetCountersForRead(read, res)
assert res.value == pattern
def test_check_kmer(create_kmc_db):
'''
Test case for CheckKmer method.
Check if are k-mers from input are present in the database and
if some not present in the input are absent in the output.
'''
kmers = create_kmc_db['kmers']
kmer_len = create_kmc_db['kmer_len']
kmer = pka.KmerAPI(kmer_len)
counter = pka.Count()
kmc_file = _open_for_ra()
for kmer_str, count in kmers.items():
kmer.from_string(kmer_str)
assert kmc_file.CheckKmer(kmer, counter)
assert counter.value == count
absent_kmers = create_kmc_db['absent_kmers']
for kmer_str in absent_kmers:
kmer.from_string(kmer_str)
assert not kmc_file.CheckKmer(kmer, counter)
| tests/py_kmc_api/test_py_kmc_file.py | 6,881 | Simple k-mer counting routine.
Generate k-mers that are not present in the database.
:kmers: existing k-mers
:kmer_len: length of k-mers
Open kmc database for listing and check if opened sucessfully.
Open kmc database for random access and check if opened sucessfully.
Runs kmc.
Save reads from input to file named file_name.
Set up tests and clean up after.
Increments k-mer to next lexographical.
Start from pos :start: (from end, i.e. start = 0 means last k-mer symbol).
Create new string with character at :pos: changed to :new_char:.
Test case for CheckKmer method.
Check if are k-mers from input are present in the database and
if some not present in the input are absent in the output.
Test case for GetCountersForRead method of KMCFile.
Test if some fields in object returned from Info are set properly.
Test if all counted k-mers are returned by KMC API using NextKmer method.
A series of test for PyKMCFile class.
!/usr/bin/env python3GB no Quake mode (quake not supported anymore) | 1,004 | en | 0.830258 |
import tensorflow as tf
def leakyrelu(x, leak=0.01):
"""
leakyrelu激活函数
Args:
x (Tensor): input
leak (int): x<0时的斜率
Returns:
Tensor
"""
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * tf.abs(x)
| algorithm/BST/leakyrelu.py | 290 | leakyrelu激活函数
Args:
x (Tensor): input
leak (int): x<0时的斜率
Returns:
Tensor | 86 | ja | 0.214631 |
from unittest import TestCase
from generative_playground.codec.hypergraph_grammar import HypergraphGrammar
from generative_playground.molecules.models.conditional_probability_model import CondtionalProbabilityModel
from generative_playground.models.pg_runner import PolicyGradientRunner
class TestStart(TestCase):
def test_get_set_params_as_vector(self):
grammar_cache = 'hyper_grammar_guac_10k_with_clique_collapse.pickle' # 'hyper_grammar.pickle'
first_runner = PolicyGradientRunner('hypergraph:' + grammar_cache,
BATCH_SIZE=10,
reward_fun=lambda x: 0,
max_steps=60,
num_batches=2,
lr=0.05,
entropy_wgt=0.0,
# lr_schedule=shifted_cosine_schedule,
root_name='test',
preload_file_root_name=None,
plot_metrics=True,
save_location='./data',
metric_smooth=0.0,
decoder_type='graph_conditional', # 'rnn_graph',# 'attention',
on_policy_loss_type='advantage_record',
rule_temperature_schedule=None,
# lambda x: toothy_exp_schedule(x, scale=num_batches),
eps=0.0,
priors='conditional',
)
coeffs = first_runner.get_model_coeff_vector()
coeffs[0] = 1
first_runner.set_model_coeff_vector(coeffs)
coeffs2 = first_runner.get_model_coeff_vector()
assert coeffs2[0] == coeffs[0]
def test_get_set_params_as_property(self):
grammar_cache = 'hyper_grammar_guac_10k_with_clique_collapse.pickle' # 'hyper_grammar.pickle'
first_runner = PolicyGradientRunner('hypergraph:' + grammar_cache,
BATCH_SIZE=10,
reward_fun=lambda x: 0,
max_steps=60,
num_batches=2,
lr=0.05,
entropy_wgt=0.0,
# lr_schedule=shifted_cosine_schedule,
root_name='test',
preload_file_root_name=None,
plot_metrics=True,
save_location='./data',
metric_smooth=0.0,
decoder_type='graph_conditional', # 'rnn_graph',# 'attention',
on_policy_loss_type='advantage_record',
rule_temperature_schedule=None,
# lambda x: toothy_exp_schedule(x, scale=num_batches),
eps=0.0,
priors='conditional',
)
coeffs = first_runner.params
coeffs[0] = 1
first_runner.params = coeffs
coeffs2 = first_runner.params
assert coeffs2[0] == coeffs[0]
| src/tests/runner_tests.py | 3,766 | 'hyper_grammar.pickle' lr_schedule=shifted_cosine_schedule, 'rnn_graph', 'attention', lambda x: toothy_exp_schedule(x, scale=num_batches), 'hyper_grammar.pickle' lr_schedule=shifted_cosine_schedule, 'rnn_graph', 'attention', lambda x: toothy_exp_schedule(x, scale=num_batches), | 277 | en | 0.452611 |
import glob
print(glob.glob("./src/ibmaemagic/sdk/*"))
# import sys
# sys.path.append("./src/ibmaemagic/magic/")
# from analytic_magic_client import AnalyticMagicClient
# import analytic_engine_client.AnalyticEngineClient
# from ibmaemagic.magic.analytic_magic_client import AnalyticMagicClient
# from ibmaemagic.sdk.analytic_engine_client import AnalyticEngineClient
# from ibmaemagic import AnalyticEngineClient
import sys
sys.path.append("./src/ibmaemagic/sdk/")
from analytic_engine_client import AnalyticEngineClient | tests/sdk/foo.py | 525 | import sys sys.path.append("./src/ibmaemagic/magic/") from analytic_magic_client import AnalyticMagicClient import analytic_engine_client.AnalyticEngineClient from ibmaemagic.magic.analytic_magic_client import AnalyticMagicClient from ibmaemagic.sdk.analytic_engine_client import AnalyticEngineClient from ibmaemagic import AnalyticEngineClient | 344 | en | 0.472904 |
from pyinfra import host
from pyinfra.modules import git, pip, server
# Ensure the state of git repositories
git.repo(
{'Clone pyinfra repository'},
'git@github.com:Fizzadar/pyinfra',
host.data.app_dir,
branch='develop',
ssh_keyscan=True,
sudo=True,
# Carry SSH agent details w/sudo
preserve_sudo_env=True,
)
# Manage pip packages
did_install = pip.packages(
{'Install virtualenv with pip'},
['virtualenv'],
sudo=True,
)
# Use operation meta to affect the deploy
if did_install.changed:
server.shell(
'echo "Clean package build/etc"',
)
# Create a virtualenv
server.shell(
{'Setup the virtualenv'},
'virtualenv {{ host.data.env_dir }}',
sudo=True,
sudo_user='pyinfra',
)
# and manage pip within it
pip.packages(
{'Install Python packages with pip'},
['ElasticQuery', 'JsonTest'],
virtualenv=host.data.env_dir,
sudo=True,
sudo_user='pyinfra',
)
| examples/python_app.py | 944 | Ensure the state of git repositories Carry SSH agent details w/sudo Manage pip packages Use operation meta to affect the deploy Create a virtualenv and manage pip within it | 172 | en | 0.639418 |
# Generated by Django 3.0.3 on 2021-03-05 03:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=25, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| app/core/migrations/0001_initial.py | 1,700 | Generated by Django 3.0.3 on 2021-03-05 03:35 | 45 | en | 0.70765 |
# -*- coding: utf-8 -*-
from dfa import *
"""
Created on Mon Apr 1 09:50:10 2019
@author: Savio
"""
#Example of a simple DFA
#DFA only accepts odd-sized string
states = {0, 1}
alphabet = {'0','1'}
transition = {
(0, '0'): 1,
(0, '1'): 1,
(1, '0'): 0,
(1, '1'): 0,
}
start_state = 0
accept_states = {1}
dfa1 = DFA(states, alphabet, transition, start_state, accept_states)
string = list('010') #Accept
#string= list('1010') #Reject
print(dfa1.run(string))
| main.py | 493 | -*- coding: utf-8 -*-Example of a simple DFADFA only accepts odd-sized stringAcceptstring= list('1010') Reject | 110 | en | 0.516756 |
from amaranth import *
from amaranth.asserts import *
from amaranth.utils import log2_int
from amaranth_soc import wishbone
from amaranth_soc.memory import MemoryMap
from amaranth_soc.periph import ConstantMap
from . import Peripheral
from ..cores import litedram
__all__ = ["WritebackCache", "SDRAMPeripheral"]
class WritebackCache(Elaboratable):
"""Write-back cache.
A write-back cache designed to bridge the SoC interconnect to LiteDRAM.
Parameters
----------
dram_port : :class:`litedram.NativePort`
LiteDRAM user port.
size : int
Cache size.
data_width : int
Initiator bus data width.
granularity : int
Initiator bus granularity.
dirty_init : bool
Dirty initialization. Defaults to ``False``. May be useful for simulation.
Attributes
----------
intr_bus : :class:`amaranth_soc.wishbone.Interface`
Initiator bus, with support for incremental bursts.
"""
def __init__(self, dram_port, *, size, data_width, granularity=None, dirty_init=False):
if not isinstance(dram_port, litedram.NativePort):
raise TypeError("DRAM port must be an instance of lambdasoc.cores.litedram.NativePort, "
"not {!r}"
.format(dram_port))
if not isinstance(size, int) or size <= 0 or size & size - 1:
raise ValueError("Cache size must be a positive power of two integer, not {!r}"
.format(size))
if not isinstance(data_width, int) or data_width <= 0 or data_width & data_width - 1:
raise ValueError("Data width must be a positive power of two integer, not {!r}"
.format(data_width))
if dram_port.data_width % data_width != 0:
raise ValueError("DRAM port data width must be a multiple of data width, but {} is "
"not a multiple of {}"
.format(dram_port.data_width, data_width))
self.intr_bus = wishbone.Interface(
addr_width = dram_port.addr_width + log2_int(dram_port.data_width // data_width),
data_width = data_width,
granularity = granularity,
features = {"cti", "bte"},
)
intr_map = MemoryMap(
addr_width = self.intr_bus.addr_width + log2_int(data_width // granularity),
data_width = granularity,
)
try:
intr_map.add_window(dram_port.memory_map)
except AttributeError:
pass
self.intr_bus.memory_map = intr_map
self.dram_port = dram_port
self.size = size
self.dirty_init = bool(dirty_init)
def elaborate(self, platform):
m = Module()
ratio = self.dram_port.data_width // self.intr_bus.data_width
nb_lines = (self.size * self.intr_bus.granularity) // self.dram_port.data_width
intr_adr = Record([
("offset", log2_int(ratio)),
("line", log2_int(nb_lines)),
("tag", len(self.intr_bus.adr) - log2_int(nb_lines) - log2_int(ratio)),
])
m.d.comb += intr_adr.eq(self.intr_bus.adr),
intr_adr_next = Record.like(intr_adr)
with m.Switch(self.intr_bus.bte):
with m.Case(wishbone.BurstTypeExt.LINEAR):
m.d.comb += intr_adr_next.eq(intr_adr + 1)
with m.Case(wishbone.BurstTypeExt.WRAP_4):
m.d.comb += intr_adr_next[:2].eq(intr_adr[:2] + 1)
m.d.comb += intr_adr_next[2:].eq(intr_adr[2:])
with m.Case(wishbone.BurstTypeExt.WRAP_8):
m.d.comb += intr_adr_next[:3].eq(intr_adr[:3] + 1)
m.d.comb += intr_adr_next[3:].eq(intr_adr[3:])
with m.Case(wishbone.BurstTypeExt.WRAP_16):
m.d.comb += intr_adr_next[:4].eq(intr_adr[:4] + 1)
m.d.comb += intr_adr_next[4:].eq(intr_adr[4:])
tag_rp_data = Record([
("tag", intr_adr.tag.shape()),
("dirty", 1),
])
tag_wp_data = Record.like(tag_rp_data)
tag_mem = Memory(width=len(tag_rp_data), depth=nb_lines)
if self.dirty_init:
tag_mem.init = [-1 for _ in range(nb_lines)]
m.submodules.tag_rp = tag_rp = tag_mem.read_port(transparent=False)
m.submodules.tag_wp = tag_wp = tag_mem.write_port()
tag_rp.en.reset = 0
m.d.comb += [
tag_rp_data.eq(tag_rp.data),
tag_wp.data.eq(tag_wp_data),
]
dat_mem = Memory(width=self.dram_port.data_width, depth=nb_lines)
m.submodules.dat_rp = dat_rp = dat_mem.read_port(transparent=False)
m.submodules.dat_wp = dat_wp = dat_mem.write_port(granularity=self.intr_bus.granularity)
dat_rp.en.reset = 0
intr_bus_r = Record.like(self.intr_bus)
intr_adr_r = Record.like(intr_adr)
m.d.comb += intr_adr_r.eq(intr_bus_r.adr)
with m.FSM() as fsm:
with m.State("CHECK"):
m.d.sync += [
intr_bus_r.cyc.eq(self.intr_bus.cyc),
intr_bus_r.stb.eq(self.intr_bus.stb),
intr_bus_r.adr.eq(self.intr_bus.adr),
]
# Tag/Data memory read
with m.If(self.intr_bus.cyc & self.intr_bus.stb):
with m.If(self.intr_bus.ack & (self.intr_bus.cti == wishbone.CycleType.INCR_BURST)):
m.d.comb += [
tag_rp.addr.eq(intr_adr_next.line),
dat_rp.addr.eq(intr_adr_next.line),
]
with m.Else():
m.d.comb += [
tag_rp.addr.eq(intr_adr.line),
dat_rp.addr.eq(intr_adr.line),
]
with m.If(~intr_bus_r.cyc | ~intr_bus_r.stb | self.intr_bus.ack):
m.d.comb += [
tag_rp.en.eq(1),
dat_rp.en.eq(1),
]
m.d.comb += [
self.intr_bus.dat_r.eq(
dat_rp.data.word_select(intr_adr.offset, len(self.intr_bus.dat_r))
),
]
# Tag/Data memory write
m.d.comb += [
tag_wp.addr .eq(intr_adr.line),
tag_wp_data.tag .eq(intr_adr.tag),
tag_wp_data.dirty.eq(1),
dat_wp.addr .eq(intr_adr.line),
dat_wp.data .eq(Repl(self.intr_bus.dat_w, ratio)),
]
with m.If(self.intr_bus.cyc & self.intr_bus.stb):
with m.If(intr_adr.tag == tag_rp_data.tag):
m.d.comb += self.intr_bus.ack.eq(intr_bus_r.cyc & intr_bus_r.stb)
with m.If(self.intr_bus.we & self.intr_bus.ack):
m.d.comb += [
tag_wp.en.eq(1),
dat_wp.en.word_select(intr_adr.offset, len(self.intr_bus.sel)).eq(self.intr_bus.sel),
]
with m.Elif(intr_bus_r.cyc & intr_bus_r.stb):
m.d.sync += [
intr_bus_r.cyc.eq(0),
intr_bus_r.stb.eq(0),
]
with m.If(tag_rp_data.dirty):
m.next = "EVICT"
with m.Else():
m.next = "REFILL"
with m.State("EVICT"):
evict_done = Record([("cmd", 1), ("w", 1)])
with m.If(evict_done.all()):
m.d.sync += evict_done.eq(0)
m.next = "REFILL"
# Command
m.d.comb += [
self.dram_port.cmd.valid.eq(~evict_done.cmd),
self.dram_port.cmd.last .eq(0),
self.dram_port.cmd.addr .eq(Cat(intr_adr_r.line, tag_rp_data.tag)),
self.dram_port.cmd.we .eq(1),
]
with m.If(self.dram_port.cmd.valid & self.dram_port.cmd.ready):
m.d.sync += evict_done.cmd.eq(1)
# Write
m.d.comb += [
self.dram_port.w.valid.eq(~evict_done.w),
self.dram_port.w.we .eq(Repl(Const(1), self.dram_port.data_width // 8)),
self.dram_port.w.data .eq(dat_rp.data),
]
with m.If(self.dram_port.w.valid & self.dram_port.w.ready):
m.d.sync += evict_done.w.eq(1)
with m.State("REFILL"):
refill_done = Record([("cmd", 1), ("r", 1)])
with m.If(refill_done.all()):
m.d.sync += refill_done.eq(0)
m.next = "CHECK"
# Command
m.d.comb += [
self.dram_port.cmd.valid.eq(~refill_done.cmd),
self.dram_port.cmd.last .eq(1),
self.dram_port.cmd.addr .eq(Cat(intr_adr_r.line, intr_adr_r.tag)),
self.dram_port.cmd.we .eq(0),
]
with m.If(self.dram_port.cmd.valid & self.dram_port.cmd.ready):
m.d.sync += refill_done.cmd.eq(1)
# Read
m.d.comb += [
self.dram_port.r.ready.eq(~refill_done.r),
tag_wp.addr .eq(intr_adr_r.line),
tag_wp.en .eq((self.dram_port.r.valid & self.dram_port.r.ready)),
tag_wp_data.tag .eq(intr_adr_r.tag),
tag_wp_data.dirty.eq(0),
dat_wp.addr .eq(intr_adr_r.line),
dat_wp.en .eq(Repl((self.dram_port.r.valid & self.dram_port.r.ready), len(dat_wp.en))),
dat_wp.data .eq(self.dram_port.r.data),
]
with m.If(self.dram_port.r.valid & self.dram_port.r.ready):
m.d.sync += refill_done.r.eq(1)
if platform == "formal":
with m.If(Initial()):
m.d.comb += [
Assume(fsm.ongoing("CHECK")),
Assume(~intr_bus_r.cyc),
Assume(~evict_done.any()),
Assume(~refill_done.any()),
]
return m
class SDRAMPeripheral(Peripheral, Elaboratable):
"""SDRAM controller peripheral.
Parameters
----------
core : :class:`litedram.Core`
LiteDRAM core.
cache_size : int
Cache size, in bytes.
cache_dirty_init : boot
Initialize cache as dirty. Defaults to `False`.
"""
def __init__(self, *, core, cache_size, cache_dirty_init=False):
super().__init__()
if not isinstance(core, litedram.Core):
raise TypeError("LiteDRAM core must be an instance of lambdasoc.cores.litedram.Core, "
"not {!r}"
.format(core))
self.core = core
data_width = core.ctrl_bus.data_width
granularity = core.ctrl_bus.granularity
granularity_bits = log2_int(data_width // granularity)
# Data path : bridge -> cache -> LiteDRAM user port
self._data_bus = self.window(
addr_width = core.user_port.addr_width
+ log2_int(core.user_port.data_width // 8)
- granularity_bits,
data_width = data_width,
granularity = granularity,
features = {"cti", "bte"},
)
data_map = MemoryMap(
addr_width = self._data_bus.addr_width + granularity_bits,
data_width = granularity,
alignment = 0,
)
self._cache = WritebackCache(
core.user_port,
size = cache_size,
data_width = data_width,
granularity = granularity,
dirty_init = cache_dirty_init,
)
data_map.add_window(self._cache.intr_bus.memory_map)
self._data_bus.memory_map = data_map
# Control path : bridge -> LiteDRAM control port
self._ctrl_bus = self.window(
addr_width = core._ctrl_bus.addr_width,
data_width = data_width,
granularity = granularity,
addr = core.size,
)
ctrl_map = MemoryMap(
addr_width = self._ctrl_bus.addr_width + granularity_bits,
data_width = granularity,
alignment = 0,
)
ctrl_map.add_window(core.ctrl_bus.memory_map)
self._ctrl_bus.memory_map = ctrl_map
self._bridge = self.bridge(data_width=data_width, granularity=granularity)
self.bus = self._bridge.bus
@property
def constant_map(self):
return ConstantMap(
SIZE = self.core.size,
CACHE_SIZE = self._cache.size,
)
def elaborate(self, platform):
m = Module()
m.submodules.bridge = self._bridge
m.submodules.cache = self._cache
m.submodules.core = self.core
m.d.comb += [
self._cache.intr_bus.adr .eq(self._data_bus.adr),
self._cache.intr_bus.cyc .eq(self._data_bus.cyc),
self._cache.intr_bus.stb .eq(self._data_bus.stb),
self._cache.intr_bus.sel .eq(self._data_bus.sel),
self._cache.intr_bus.we .eq(self._data_bus.we),
self._cache.intr_bus.dat_w.eq(self._data_bus.dat_w),
self._cache.intr_bus.cti .eq(self._data_bus.cti),
self._cache.intr_bus.bte .eq(self._data_bus.bte),
self._data_bus.ack .eq(self._cache.intr_bus.ack),
self._data_bus.dat_r.eq(self._cache.intr_bus.dat_r),
self.core.ctrl_bus.adr .eq(self._ctrl_bus.adr),
self.core.ctrl_bus.cyc .eq(self._ctrl_bus.cyc),
self.core.ctrl_bus.stb .eq(self._ctrl_bus.stb),
self.core.ctrl_bus.sel .eq(self._ctrl_bus.sel),
self.core.ctrl_bus.we .eq(self._ctrl_bus.we),
self.core.ctrl_bus.dat_w.eq(self._ctrl_bus.dat_w),
self._ctrl_bus.ack .eq(self.core.ctrl_bus.ack),
self._ctrl_bus.dat_r.eq(self.core.ctrl_bus.dat_r),
]
return m
| lambdasoc/periph/sdram.py | 14,493 | SDRAM controller peripheral.
Parameters
----------
core : :class:`litedram.Core`
LiteDRAM core.
cache_size : int
Cache size, in bytes.
cache_dirty_init : boot
Initialize cache as dirty. Defaults to `False`.
Write-back cache.
A write-back cache designed to bridge the SoC interconnect to LiteDRAM.
Parameters
----------
dram_port : :class:`litedram.NativePort`
LiteDRAM user port.
size : int
Cache size.
data_width : int
Initiator bus data width.
granularity : int
Initiator bus granularity.
dirty_init : bool
Dirty initialization. Defaults to ``False``. May be useful for simulation.
Attributes
----------
intr_bus : :class:`amaranth_soc.wishbone.Interface`
Initiator bus, with support for incremental bursts.
Tag/Data memory read Tag/Data memory write Command Write Command Read Data path : bridge -> cache -> LiteDRAM user port Control path : bridge -> LiteDRAM control port | 918 | en | 0.440515 |
# -*- coding: utf-8 -*-
import json
import logging
import re
from lncrawl.core.crawler import Crawler
logger = logging.getLogger(__name__)
search_url = 'https://www.novelall.com/search/?name=%s'
class NovelAllCrawler(Crawler):
base_url = 'https://www.novelall.com/'
def search_novel(self, query):
query = query.lower().replace(' ', '+')
soup = self.get_soup(search_url % query)
results = []
for a in soup.select('.cover-info p.title a')[:20]:
url = self.absolute_url(a['href'])
results.append({
'url': url,
'title': a.text.strip(),
})
# end for
return results
# end def
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url + '?waring=1')
self.novel_title = soup.find(
'div', {"class": "manga-detail"}).find('h1').text
logger.info('Novel title: %s', self.novel_title)
self.novel_cover = self.absolute_url(
soup.find('div', {"class": "manga-detail"}).find('img')['src'])
logger.info('Novel cover: %s', self.novel_cover)
author = soup.find(
'div', {"class": "detail-info"}).find('a').text.split(',')
if len(author) == 2:
self.novel_author = author[0] + ' (' + author[1] + ')'
else:
self.novel_author = ' '.join(author)
# end if
logger.info('Novel author: %s', self.novel_author)
chapters = soup.find(
'div', {"class": "manga-detailchapter"}).findAll('a', title=True)
chapters.reverse()
for a in chapters:
for span in a.findAll('span'):
span.extract()
# end for
# end for
for x in chapters:
chap_id = len(self.chapters) + 1
if len(self.chapters) % 100 == 0:
vol_id = chap_id//100 + 1
vol_title = 'Volume ' + str(vol_id)
self.volumes.append({
'id': vol_id,
'title': vol_title,
})
# end if
self.chapters.append({
'id': chap_id,
'volume': vol_id,
'url': self.absolute_url(x['href']),
'title': x['title'] or ('Chapter %d' % chap_id),
})
# end for
# end def
def download_chapter_body(self, chapter):
'''Download body of a single chapter and return as clean html format.'''
logger.info('Downloading %s', chapter['url'])
soup = self.get_soup(chapter['url'])
contents = soup.find('div', {'class': 'reading-box'})
self.clean_contents(contents)
return str(contents)
# end def
# end class
| sources/novelall.py | 2,856 | Download body of a single chapter and return as clean html format.
Get novel title, autor, cover etc
-*- coding: utf-8 -*- end for end def end if end for end for end if end for end def end def end class | 204 | en | 0.766613 |
import requests
import ratelimit
from arcas.tools import Api
from .api_key import api_key
from arcas.tools import APIError
class Ieee(Api):
"""
API argument is 'ieee'.
"""
def __init__(self):
self.standard = 'https://ieeexploreapi.ieee.org/api/v1/search/articles?'
self.key_api = api_key
def create_url_search(self, parameters):
"""Creates the search url, combining the standard url and various
search parameters."""
url = self.standard
url += parameters[0]
for i in parameters[1:]:
url += '&{}'.format(i)
url += '&apikey={}'.format(self.key_api)
return url
@staticmethod
@ratelimit.rate_limited(3)
def make_request(url):
"""Request from an API and returns response."""
response = requests.get(url, stream=True, verify=False)
if response.status_code != 200:
raise APIError(response.status_code)
return response
def to_dataframe(self, raw_article):
"""A function which takes a dictionary with structure of the IEEE
results and transform it to a standardized format.
"""
raw_article['url'] = raw_article.get('html_url', None)
try:
raw_article['author'] = [author['full_name'] for author in raw_article['authors']['authors']]
except KeyError:
raw_article['author'] = ['No authors found for this document.']
raw_article['abstract'] = raw_article.get('abstract', None)
if raw_article['content_type'] == 'Conferences':
date = raw_article.get('conference_dates', None)
else:
date = raw_article.get('publication_date', None)
if date is not None:
date = int(date.split(' ')[-1])
raw_article['date'] = date
category = raw_article.get('index_terms', None)
if category is not None:
try:
category = category['author_terms']['terms']
except KeyError:
try:
category = category['ieee_terms']['terms']
except KeyError:
category = None
raw_article['doi'] = raw_article.get('doi', None)
raw_article['category'] = category
raw_article['journal'] = raw_article.get('publication_title', None)
raw_article['provenance'] = 'IEEE'
raw_article['key'], raw_article['unique_key'] = self.create_keys(raw_article)
raw_article['open_access'] = raw_article['access_type'] == 'OPEN_ACCESS'
raw_article['score'] = 'Not available'
return self.dict_to_dataframe(raw_article)
def parse(self, root):
"""Parsing the xml file"""
if root['total_records'] == 0:
return False
return root['articles']
@staticmethod
def parameters_fix(author=None, title=None, abstract=None, year=None,
records=None, start=None, category=None, journal=None,
keyword=None):
parameters = []
if author is not None:
parameters.append('author={}'.format(author))
if title is not None:
parameters.append('article_title={}'.format(title))
if abstract is not None:
parameters.append('abstract={}'.format(abstract))
if year is not None:
parameters.append('publication_year={}'.format(year))
if category is not None:
parameters.append('index_terms={}'.format(category))
if journal is not None:
parameters.append('publication_title={}'.format(journal))
if keyword is not None:
parameters.append('querytext={}'.format(keyword))
if records is not None:
parameters.append('max_records={}'.format(records))
if start is not None:
parameters.append('start_record={}'.format(start))
return parameters
@staticmethod
def get_root(response):
root = response.json()
return root
| src/arcas/IEEE/main.py | 4,026 | API argument is 'ieee'.
Creates the search url, combining the standard url and various
search parameters.
Request from an API and returns response.
Parsing the xml file
A function which takes a dictionary with structure of the IEEE
results and transform it to a standardized format. | 282 | en | 0.735623 |
#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from builtins import object
from argparse import ArgumentParser
from copy import deepcopy
from httplib2 import Http
COHORT_DATASETS = {
'prod': 'cloud_deployment_cohorts',
'staging': 'cloud_deployment_cohorts',
'dev': 'dev_deployment_cohorts'
}
COHORT_TABLES = {
'prod': 'prod_cohorts',
'staging': 'staging_cohorts'
}
from apiclient.discovery import build
from oauth2client.client import GoogleCredentials
from isb_cgc.settings import get_project_identifier
def authorize_and_get_bq_service():
credentials = GoogleCredentials.get_application_default().create_scoped(['https://www.googleapis.com/auth/bigquery'])
http = Http()
http = credentials.authorize(http)
bigquery_service = build('bigquery', 'v2', http=http)
return bigquery_service
# TODO Use bq_data_access.BigQueryCohortSupport
class BigQueryCohortSupport(object):
cohort_schema = [
{
"name": "cohort_id",
"type": "INTEGER",
"mode": "REQUIRED"
},
{
"name": "patient_barcode",
"type": "STRING"
},
{
"name": "sample_barcode",
"type": "STRING"
},
{
"name": "aliquot_barcode",
"type": "STRING"
}
]
patient_type = 'patient'
sample_type = 'sample'
@classmethod
def get_schema(cls):
return deepcopy(cls.cohort_schema)
def __init__(self, service, project_id, dataset_id, table_id):
self.service = service
self.project_id = project_id
self.dataset_id = dataset_id
self.table_id = table_id
def _build_request_body_from_rows(self, rows):
insertable_rows = []
for row in rows:
insertable_rows.append({
'json': row
})
return {
"rows": insertable_rows
}
def _streaming_insert(self, rows):
table_data = self.service.tabledata()
body = self._build_request_body_from_rows(rows)
response = table_data.insertAll(projectId=self.project_id,
datasetId=self.dataset_id,
tableId=self.table_id,
body=body).execute()
return response
def _build_cohort_row(self, cohort_id,
patient_barcode=None, sample_barcode=None, aliquot_barcode=None):
return {
'cohort_id': cohort_id,
'patient_barcode': patient_barcode,
'sample_barcode': sample_barcode,
'aliquot_barcode': aliquot_barcode
}
def add_cohort_with_sample_barcodes(self, cohort_id, barcodes):
rows = []
for sample_barcode in barcodes:
patient_barcode = sample_barcode[:12]
rows.append(self._build_cohort_row(cohort_id, patient_barcode, sample_barcode, None))
response = self._streaming_insert(rows)
return response
def create_table(dataset_id, table_id):
print("Creating table {0}.{1}".format(dataset_id, table_id))
project_id = get_project_identifier()
schema = BigQueryCohortSupport.get_schema()
dataset_args = {
'projectId': project_id,
'datasetId': dataset_id
}
table_ref = {
'tableId': table_id,
'projectId': project_id,
'datasetId': dataset_id
}
table = {
'tableReference': table_ref,
'schema': {
'fields': schema
}
}
service = authorize_and_get_bq_service()
table = service.tables().insert(
body=table,
**dataset_args
).execute()
return table
def prod_table(args):
dataset_id = COHORT_DATASETS['prod']
table_id = COHORT_TABLES['prod']
if args.cmd == 'create':
create_table(dataset_id, table_id)
def staging_table(args):
dataset_id = COHORT_DATASETS['staging']
table_id = COHORT_TABLES['staging']
if args.cmd == 'create':
create_table(dataset_id, table_id)
def dev_table(args):
dataset_id = COHORT_DATASETS['dev']
if args.cmd == 'create':
create_table(dataset_id, args.table)
def main():
parser = ArgumentParser(description="Cohort table utility")
subparsers = parser.add_subparsers(help='commands')
# Staging deployment
staging_parser = subparsers.add_parser('staging', help="Staging deployment")
staging_parser.add_argument('cmd', choices=['delete', 'create'])
staging_parser.set_defaults(func=staging_table)
# Production deployment
prod_parser = subparsers.add_parser('prod', help="Production deployment")
prod_parser.add_argument('cmd', choices=['delete', 'create'])
prod_parser.set_defaults(func=prod_table)
# Development deployment
dev_parser = subparsers.add_parser('dev', help="Local development deployment")
dev_parser.add_argument('cmd', choices=['delete', 'create'])
dev_parser.add_argument('table', type=str, help='Table name for local developer')
dev_parser.set_defaults(func=dev_table)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
| scripts/bigquery/cohort_table_utils.py | 5,794 | Copyright 2015-2019, Institute for Systems Biology Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. TODO Use bq_data_access.BigQueryCohortSupport Staging deployment Production deployment Development deployment | 681 | en | 0.850626 |
#!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, TICI
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import dirty, get_git_commit, version, origin, branch, commit, \
terms_version, training_version, comma_remote, \
get_git_branch, get_git_remote
from selfdrive.hardware.eon.apk import system
def manager_init():
# update system time from panda
set_time(cloudlog)
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("IsMetric", "1"),
# HKG
("UseClusterSpeed", "1"),
("LongControlEnabled", "0"),
("MadModeEnabled", "1"),
("AutoLaneChangeEnabled", "0"),
("SccSmootherSlowOnCurves", "0"),
("SccSmootherSyncGasPressed", "0"),
("ShowDebugUI", "0")
]
if TICI:
default_params.append(("IsUploadRawEnabled", "1"))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
os.umask(0) # Make sure we can create files with 777 permissions
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", version)
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_git_commit(default=""))
params.put("GitBranch", get_git_branch(default=""))
params.put("GitRemote", get_git_remote(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty,
device=HARDWARE.get_device_type())
if comma_remote and not (os.getenv("NOLOG") or os.getenv("NOCRASH") or PC):
crash.init()
crash.bind_user(id=dongle_id)
crash.bind_extra(dirty=dirty, origin=origin, branch=branch, commit=commit,
device=HARDWARE.get_device_type())
def manager_prepare():
for p in managed_processes.values():
p.prepare()
def manager_cleanup():
for p in managed_processes.values():
p.stop()
cloudlog.info("everything is dead")
def manager_thread():
Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start()
system("am startservice com.neokii.optool/.MainService")
system("am startservice com.neokii.openpilot/.MainService")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
ignore = []
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
if os.getenv("BLOCK") is not None:
ignore += os.getenv("BLOCK").split(",")
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
if sm['deviceState'].freeSpacePercent < 5:
not_run.append("loggerd")
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc]
cloudlog.debug(' '.join(running_list))
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# TODO: let UI handle this
# Exit main loop when uninstall is needed
if params.get_bool("DoUninstall"):
break
def main():
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
manager_cleanup()
if Params().get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
| selfdrive/manager/manager.py | 6,417 | !/usr/bin/env python3 update system time from panda HKG set unset params is this dashcam? Make sure we can create files with 777 permissions Create folders needed for msgq set version params set dongle id Needed for swaglog save boot logsubprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd")) trigger an update after going offroad send managerState TODO: let UI handle this Exit main loop when uninstall is needed Start UI early so prepare can happen in the background SystemExit on sigterm Show last 3 lines of traceback manual exit because we are forked | 576 | en | 0.740721 |
from __future__ import print_function, division
import _init_paths
import math
import os.path as osp
from shapely.geometry import Polygon
from gen_data import get_cent
from bbox_util import is_rect
import argparse
import sys
from model.config import cfg
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate txt result file')
parser.add_argument('--dir', dest='base_dir',
help='result base dir',
default='/home/hezheqi/data/frame/result', type=str)
parser.add_argument('--gt', dest='gt_dir',
help='gt base dir',
default='/data/hezheqi/frame/test/gt', type=str)
parser.add_argument('--name', dest='name',
help='out name', default=None, type=str)
parser.add_argument('--list', dest='img_list_dir',
help='image list', default='/data/hezheqi/frame/test/img_list.txt', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def read_txt(name, use_bound=False, rect_label=None, is_gt=False):
ret = []
if not osp.exists(name):
return ret
with open(name) as fin:
for line in fin:
info = line.strip().split()
if len(info) == 1:
continue
if is_gt and len(info) != 9:
continue
info = list(map(int, info))
# for i in range(len(info)):
# info[i] = max(0, info[i])
if rect_label != None: # only use rectangle gt
rect_label.append(is_rect(info[1:]))
pts = [(info[i], info[i + 1]) for i in range(1, len(info), 2)]
cx, cy = get_cent(info[1:])
pts.sort(key=lambda a: math.atan2(a[1] - cy, a[0] - cx))
# if is_gt:
# print(pts)
frame = Polygon(pts)
if use_bound:
x1, y1, x2, y2 = frame.bounds
# print(x1, y1, x2, y2)
frame = Polygon([[x1, y1], [x2, y1], [x2, y2], [x1, y2]])
if not frame.is_valid:
print(info[0])
continue
# frame = frame.convex_hull
ret.append(frame)
return ret
def calculate_iou(p1, p2):
a1 = p1.area
a2 = p2.area
# print(a1, a2)
# print(p1.is_valid, p2.is_valid)
intersection = p1.intersection(p2).area
return intersection / (a1 + a2 - intersection)
def verify_point_distance(poly1, poly2):
pts1 = list(poly1.exterior.coords)
pts2 = list(poly2.exterior.coords)
for p1, p2 in zip(pts1, pts2):
dis = math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2)
if dis > 2500:
return False
return True
def eval_one(results, gts, point_dis=False, rect_label=None):
'''
:param results:
:param gts:
:param point_dis:
:param rect_label: use rectangle or not
:return right_num, error_num, mid_num
'''
m = len(gts)
is_used = [False] * m
right_num = 0
err_num = 0
mid_num = 0
for res in results:
if not point_dis:
max_iou = -1
max_index = -1
for j, gt in enumerate(gts):
if is_used[j]:
continue
iou = calculate_iou(res, gt)
if max_iou < iou:
max_iou = iou
max_index = j
if max_iou > th:
is_used[max_index] = True
if rect_label == None:
right_num += 1
elif rect_label[max_index]:
right_num += 1
elif not rect_label[max_index]:
mid_num += 1
else:
err_num += 1
else:
flag = False
for j, gt in enumerate(gts):
if is_used[j]:
continue
if verify_point_distance(res, gt):
is_used[j] = True
right_num += 1
flag = True
break
if not flag:
err_num += 1
assert (right_num <= m)
assert (err_num <= len(results))
return right_num, err_num, mid_num
def evaluate(mean_f=True, point_dis=False, rect_flag=False):
name_list = open(name_list_dir).read().strip().split('\n')
fout = open(osp.join(cfg.DATA_DIR, 'wrong.txt'), 'w')
precision, recall, page_correct = 0, 0, 0
right_all, error_all, gt_all, res_all = 0, 0, 0, 0
for name in name_list:
results = read_txt(osp.join(res_base_dir, name + '.txt'), use_bound=False)
if rect_flag:
rect_label = []
else:
rect_label = None
gts = read_txt(osp.join(gt_base_dir, name + '.txt'), rect_label=rect_label,
is_gt=True, use_bound=False)
right_num, error_num, mid_num = eval_one(results, gts, rect_label=rect_label, point_dis=point_dis)
# right_num, error_num, mid_num = eval_one(results, gts)
right_all += right_num
error_all += error_num
gt_all += len(gts) - mid_num
res_all += len(results) - mid_num
if len(results) - mid_num > 0:
precision += right_num / (len(results) - mid_num)
if len(gts) - mid_num > 0:
recall += right_num / (len(gts) - mid_num)
if right_num == len(gts) and error_num == 0:
# if right_num == len(gts):
page_correct += 1
else:
fout.write('{}\n'.format(name))
n = len(name_list)
precision /= n
recall /= n
page_correct /= n
f1 = 2 * precision * recall / (precision + recall)
print('{} {:.5f} {:.5f} {:.5f} {:.5f}'.format(th, precision, recall, f1, page_correct))
if not mean_f:
precision = right_all / res_all
recall = right_all / gt_all
f1 = 2 * precision * recall / (precision + recall)
# print(th, precision, recall, f1, page_correct)
print('{} {:.5f} {:.5f} {:.5f} {:.5f}'.format(th, precision, recall, f1, page_correct))
if __name__ == '__main__':
# gt_base_dir = '/data/datasets/frame/test_2000/gt'
# res_base_dir = '/data/datasets/frame/result/result_all_0.8_th0.75'
# res_base_dir = '/data3/dt'
# res_base_dir = '/data/datasets/frame/result/result_ssd_th0.75'
# res_base_dir = '/home/hezheqi/data/frame/result/faster_reg2_poly'
# res_base_dir = '/home/hezheqi/Project/dpreg/net/results/pages_mult/txt'
# res_base_dir = '/home/cpdp/Documents/yf-workspace/data/2000_res_txt'
# res_base_dir = '/data3/20w_results/ly_crf_new'
# res_base_dir = '/data3/20w_results/dt'
# res_base_dir = '/home/cpdp/Documents/yf-workspace/data/29845_LD_DRR'
# res_base_dir = '/data/datasets/frame/result/result_2000_0.8_th0.75'
# name_list_dir = '/data/datasets/frame/test_2000/img_list.txt'
args = parse_args()
gt_base_dir = args.gt_dir
res_base_dir = osp.join(args.base_dir, args.name)
th = 0.9
name_list_dir = args.img_list_dir
evaluate(mean_f=False, point_dis=False)
# evaluate(False, True)
| tools/eval_frame.py | 6,491 | :param results:
:param gts:
:param point_dis:
:param rect_label: use rectangle or not
:return right_num, error_num, mid_num
Parse input arguments
for i in range(len(info)): info[i] = max(0, info[i]) only use rectangle gt if is_gt: print(pts) print(x1, y1, x2, y2) frame = frame.convex_hull print(a1, a2) print(p1.is_valid, p2.is_valid) right_num, error_num, mid_num = eval_one(results, gts) if right_num == len(gts): print(th, precision, recall, f1, page_correct) gt_base_dir = '/data/datasets/frame/test_2000/gt' res_base_dir = '/data/datasets/frame/result/result_all_0.8_th0.75' res_base_dir = '/data3/dt' res_base_dir = '/data/datasets/frame/result/result_ssd_th0.75' res_base_dir = '/home/hezheqi/data/frame/result/faster_reg2_poly' res_base_dir = '/home/hezheqi/Project/dpreg/net/results/pages_mult/txt' res_base_dir = '/home/cpdp/Documents/yf-workspace/data/2000_res_txt' res_base_dir = '/data3/20w_results/ly_crf_new' res_base_dir = '/data3/20w_results/dt' res_base_dir = '/home/cpdp/Documents/yf-workspace/data/29845_LD_DRR' res_base_dir = '/data/datasets/frame/result/result_2000_0.8_th0.75' name_list_dir = '/data/datasets/frame/test_2000/img_list.txt' evaluate(False, True) | 1,194 | en | 0.316575 |
from twisted.protocols import stateful
from twisted.internet import reactor
from twisted.internet.protocol import Factory, Protocol
from twisted.internet.endpoints import TCP4ClientEndpoint
from datetime import datetime
import sys
import struct
import zlib
import time
import threading
import Queue
import uuid
import AmmoMessages_pb2
MAGIC_NUMBER = 0xfeedbeef
DEFAULT_PRIORITY = 0
DEFAULT_RESERVED1 = 0
DEFAULT_RESERVED2 = 0
DEFAULT_RESERVED3 = 0
class AndroidProtocol(stateful.StatefulProtocol):
'''
This class implements the stateful Android <-> Gateway protocol. It contains
an 8-byte header with messageSize and a checksum, then a protobuf
MessageWrapper object (of length messageSize).
We use Twisted's StatefulProtocol to implement this protocol-- states are
composed of a callback function and a size; Twisted calls the callback
function when <size> data has been received. The callback functions return
the next state.
'''
_messageSize = 0
_checksum = 0
_onMessageAvailableCallback = None
def getInitialState(self):
return (self.receiveHeader, 20) #initial state receives the header
def receiveHeader(self, data):
(magicNumber, messageSize, priority, error, reserved2, reserved3, checksum, headerChecksum) = struct.unpack("<IIbbbbii", data)
calculatedHeaderChecksum = zlib.crc32(data[:16])
if magicNumber != MAGIC_NUMBER:
print "Invalid magic number!"
if calculatedHeaderChecksum != headerChecksum:
print "Header checksum error!"
print "Expected", headerChecksum
print "Calculated", calculatedHeaderChecksum
if error != 0 and messageSize == 0 and checksum == 0:
print "Error received from gateway:"
print " ", error,
if error == 1:
print "Invalid magic number"
elif error == 2:
print "Invalid header checksum"
elif error == 3:
print "Invalid message checksum"
elif error == 4:
print "Message too large"
else:
print "Unknown error"
return (self.receiveHeader, 20)
else:
self._messageSize = messageSize
self._checksum = checksum
return (self.receiveData, self._messageSize)
def receiveData(self, data):
calculatedChecksum = zlib.crc32(data)
if calculatedChecksum != self._checksum:
print "Checksum error!"
msg = AmmoMessages_pb2.MessageWrapper()
msg.ParseFromString(data)
if self._onMessageAvailableCallback != None:
self._onMessageAvailableCallback(msg)
return (self.receiveHeader, 20)
def sendMessageWrapper(self, msg):
serializedMsg = msg.SerializeToString()
messageHeader = struct.pack("<IIbbbbi", MAGIC_NUMBER, len(serializedMsg), DEFAULT_PRIORITY, DEFAULT_RESERVED1, DEFAULT_RESERVED2, DEFAULT_RESERVED3, zlib.crc32(serializedMsg))
headerChecksum = zlib.crc32(messageHeader)
messageHeader = messageHeader + struct.pack("i", headerChecksum)
self.transport.write(messageHeader) #little-endian byte order for now
self.transport.write(serializedMsg);
def connectionMade(self):
pass
def connectionLost(self, reason):
print "Connection lost:"
reason.printTraceback()
#TODO: signal the authentication loop so it knows we disconnected too
def setOnMessageAvailableCallback(self, callback):
self._onMessageAvailableCallback = callback
class AuthenticationFailure(Exception):
pass
class MessageScope:
GLOBAL = 0
LOCAL = 1
class MessagePriority:
AUTH = 127
CTRL = 126
FLASH = 96
URGENT = 64
IMPORTANT = 32
NORMAL = 0
BACKGROUND = -32
class AndroidConnector(threading.Thread):
_address = ""
_port = 0
_deviceId = ""
_userId = ""
_userKey = ""
_protocol = None
_authenticated = False
_cancelled = False
_authCondition = None
_messageQueueEnabled = True
_messageQueue = None
_messageCallback = None
def __init__(self, address, port, deviceId, userId, userKey, heartbeatPeriod = 30):
threading.Thread.__init__(self)
self._address = address
self._port = port
self._deviceId = deviceId
self._userId = userId
self._userKey = userKey
self._heartbeatNumber = 0
self._heartbeatPeriod = heartbeatPeriod
self._authenticated = False
self._cancelled = False
self._authCondition = threading.Condition()
self._messageQueueEnabled = True
self._messageQueue = Queue.Queue()
self._messageCallback = None
def _gotProtocol(self, p):
self._protocol = p
self._onConnect()
def _onError(self, failure):
failure.printTraceback()
reactor.stop()
self._authCondition.acquire()
self._cancelled = True
self._authCondition.notifyAll()
self._authCondition.release()
def _connect(self):
factory = Factory()
factory.protocol = AndroidProtocol
point = TCP4ClientEndpoint(reactor, self._address, self._port)
d = point.connect(factory)
d.addCallback(self._gotProtocol)
d.addErrback(self._onError)
def run(self):
if reactor.running == False:
self._connect()
print "Running reactor"
reactor.run(False) #Argument False tells the reactor that it's not on the
#main thread, so it doesn't attempt to register signal
#handlers (which doesn't work on other threads)
print "Reactor stopped"
else:
reactor.callFromThread(self._connect)
print "Reactor is already running... this background thread will exit."
def _onConnect(self):
self._protocol.setOnMessageAvailableCallback(self._onMessageAvailable)
self._sendAuthMessage()
def _onMessageAvailable(self, msg):
if self._authenticated == False:
if msg.type == AmmoMessages_pb2.MessageWrapper.AUTHENTICATION_RESULT:
if msg.authentication_result.result == AmmoMessages_pb2.AuthenticationResult.SUCCESS:
print "Authentication succeeded."
self._authCondition.acquire()
self._authenticated = True
self._authCondition.notifyAll()
self._authCondition.release()
if(self._heartbeatPeriod > 0):
self._sendAndScheduleHeartbeat()
else:
print "Authentication failed."
raise AuthenticationFailure("Auth failed: " + msg.authentication_result.message)
if msg.type == AmmoMessages_pb2.MessageWrapper.DATA_MESSAGE:
if msg.data_message.thresholds.device_delivered == True:
self.pushAcknowledgement(msg.data_message.uri, msg.data_message.origin_device, msg.data_message.user_id, self._deviceId, self._userId)
time = datetime.now()
if self._messageCallback != None:
self._messageCallback(self, msg)
if self._messageQueueEnabled:
self._messageQueue.put((msg, time))
def _sendAuthMessage(self):
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.AUTHENTICATION_MESSAGE
m.message_priority = MessagePriority.AUTH
m.authentication_message.device_id = self._deviceId
m.authentication_message.user_id = self._userId
m.authentication_message.user_key = self._userKey
print "Sending auth message"
self._protocol.sendMessageWrapper(m)
def _sendAndScheduleHeartbeat(self):
self.heartbeat()
if(self._heartbeatPeriod > 0):
reactor.callLater(self._heartbeatPeriod, self._sendAndScheduleHeartbeat)
def dequeueMessage(self):
'''
Dequeues a message from the message queue and returns it. Returns 'none' if
the queue is empty; otherwise, it returns a pair (message, timeReceived).
'''
item = None
try:
item = self._messageQueue.get(False) #don't block if queue is empty; raises Empty exception instead
except Queue.Empty:
item = None
pass
return item
def isDataAvailable(self):
'''
Checks to see if data is available in the message queue. Note that, since
the message queue is filled from a background thread (and could be emptied
from a background thread), this method returning true/false does not
necessarily mean that a message will or will not be present when
dequeueMessage() is called.
'''
return not self._messageQueue.empty()
def push(self, uri, mimeType, data, scope = MessageScope.GLOBAL, priority = MessagePriority.NORMAL, ackDeviceDelivered = False, ackPluginDelivered = False, ackAndroidPluginReceived = True):
'''
Sends a push message with the specified URI and MIME type to the gateway.
'''
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.DATA_MESSAGE
m.message_priority = priority
m.data_message.uri = uri
m.data_message.mime_type = mimeType
m.data_message.data = data
m.data_message.origin_device = self._deviceId
m.data_message.user_id = self._userId
m.data_message.thresholds.device_delivered = ackDeviceDelivered
m.data_message.thresholds.plugin_delivered = ackPluginDelivered
m.data_message.thresholds.android_plugin_received = ackAndroidPluginReceived
if scope == MessageScope.GLOBAL:
m.data_message.scope = AmmoMessages_pb2.GLOBAL
else:
m.data_message.scope = AmmoMessages_pb2.LOCAL
reactor.callFromThread(self._protocol.sendMessageWrapper, m)
def pushAcknowledgement(self, uid, destinationDevice, destinationUser, acknowledgingDevice, acknowledgingUser):
'''
Sends a push acknowledgement back to the specified device. The
destinationDevice parameter should match the origin_device parameter from
the push message which was received.
Scripts shouldn't normally need to call this directly; AndroidConnector
will automatically generate an acknowledgement if the message indicates
that an acknowledgement is required.
'''
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.PUSH_ACKNOWLEDGEMENT
m.message_priority = MessagePriority.CTRL
m.push_acknowledgement.uri = uid
m.push_acknowledgement.destination_device = destinationDevice
m.push_acknowledgement.acknowledging_device = acknowledgingDevice
m.push_acknowledgement.destination_user = destinationUser
m.push_acknowledgement.acknowledging_user = acknowledgingUser
m.push_acknowledgement.threshold.device_delivered = True
m.push_acknowledgement.threshold.plugin_delivered = False
m.push_acknowledgement.threshold.android_plugin_received = False
m.push_acknowledgement.status = AmmoMessages_pb2.PushAcknowledgement.SUCCESS
reactor.callFromThread(self._protocol.sendMessageWrapper, m)
def subscribe(self, mimeType, scope = MessageScope.GLOBAL):
'''
Subscribes to push data with the specified MIME type.
By default, data received will be placed in the message queue. The caller
should periodically call dequeueMessage to receive the push messages that
it subscribed to.
'''
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.SUBSCRIBE_MESSAGE
m.message_priority = MessagePriority.CTRL
m.subscribe_message.mime_type = mimeType
if scope == MessageScope.GLOBAL:
m.subscribe_message.scope = AmmoMessages_pb2.GLOBAL
else:
m.subscribe_message.scope = AmmoMessages_pb2.LOCAL
reactor.callFromThread(self._protocol.sendMessageWrapper, m)
def pullRequest(self, mimeType, query, projection, maxResults, startFromCount, liveQuery, priority = MessagePriority.NORMAL):
'''
Sends a pull request with the specified parameters. Note that the request
UID and device ID are automatically set to the correct values (request UID
is a generated UUID, and device ID is the device ID passed to the
constructor of this AndroidConnector object).
'''
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.PULL_REQUEST
m.message_priority = priority
m.pull_request.request_uid = uuid.uuid1().hex
m.pull_request.mime_type = mimeType
m.pull_request.query = query
m.pull_request.projection = projection
m.pull_request.max_results = maxResults
m.pull_request.start_from_count = startFromCount
m.pull_request.live_query = liveQuery
reactor.callFromThread(self._protocol.sendMessageWrapper, m)
def heartbeat(self):
m = AmmoMessages_pb2.MessageWrapper()
m.type = AmmoMessages_pb2.MessageWrapper.HEARTBEAT
m.message_priority = MessagePriority.NORMAL
m.heartbeat.sequence_number = self._heartbeatNumber
reactor.callFromThread(self._protocol.sendMessageWrapper, m)
self._heartbeatNumber = self._heartbeatNumber + 1
def waitForAuthentication(self):
'''
Waits for the AndroidConnector to connect to the Android Gateway Plugin, and
waits for successful authentication.
This method MUST be called after the AndroidConnector's background thread
is started. Attempting to call any other member methods of this class
before authentication is complete has undefined behavior.
'''
with self._authCondition:
while not (self._cancelled or self._authenticated):
self._authCondition.wait(1)
if self._authenticated == False:
raise AuthenticationFailure("Connection failure or interrupt during waitForAuthentication")
def registerMessageCallback(self, callback):
'''
Registers a callback method to be called when a message is received. Note
that this callback is called on the *event loop's* thread-- which may not
be the thread where the caller (of this method) is running. The caller is
expected to handle any synchronization issues which might result.
Also note that registering this callback does not disable the message queue--
the consumer of AndroidConnector will want to either drain this queue or
disable it with AndroidConnector.setMessageQueueEnabled(False) to avoid
memory leaks.
'''
self._messageCallback = callback
def setMessageQueueEnabled(self, enabled):
'''
Enables or disables the message queue. The message queue is enabled by
default; you might want to disable it if, for example, you only want to
print messages as they are received in a callback.
setMessageQueueEnabled(false) should almost always be used in conjunction
with registerMessageCallback, or you will lose any messages received while
the message queue is disabled.
'''
self._messageQueueEnabled = enabled
# Main method for this class (not run when it's imported).
# This is a usage example for the AndroidConnector-- it subscribes to a data
# type, then prints out any data that it receives with that type.
if __name__ == "__main__":
print "Android Gateway Tester"
connector = AndroidConnector("localhost", 33289, "device:test/pythonTestDriver1", "user:user/testPythonUser1", "")
try:
connector.start()
connector.waitForAuthentication()
print "Subscribing to type text/plain"
connector.subscribe("text/plain")
while True:
while(connector.isDataAvailable()):
result = connector.dequeueMessage()
if(result != None):
(msg, receivedTime) = result
print "Message received at:", receivedTime
print msg
time.sleep(0.5)
except KeyboardInterrupt:
print "Got ^C... Closing"
reactor.callFromThread(reactor.stop)
# re-raising the exception so we get a traceback (useful for debugging,
# occasionally). Real "applications"/testdrivers shouldn't do this.
raise
except:
print "Unexpected error... dying."
reactor.callFromThread(reactor.stop)
raise
| AndroidGatewayPlugin/Testdriver/AndroidConnector/ammo/AndroidConnector.py | 15,714 | initial state receives the headerlittle-endian byte order for nowTODO: signal the authentication loop so it knows we disconnected tooArgument False tells the reactor that it's not on themain thread, so it doesn't attempt to register signalhandlers (which doesn't work on other threads)don't block if queue is empty; raises Empty exception instead Main method for this class (not run when it's imported). This is a usage example for the AndroidConnector-- it subscribes to a data type, then prints out any data that it receives with that type. re-raising the exception so we get a traceback (useful for debugging, occasionally). Real "applications"/testdrivers shouldn't do this. | 680 | en | 0.865899 |
# Generated by Django 3.1.8 on 2021-05-20 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0020_auto_20210415_0831'),
]
operations = [
migrations.AddField(
model_name='candidateusermodel',
name='organization',
field=models.CharField(default='not indicated', max_length=255, verbose_name='organization'),
),
]
| users/migrations/0021_candidateusermodel_organization.py | 455 | Generated by Django 3.1.8 on 2021-05-20 18:22 | 45 | en | 0.626734 |
import math
import numpy as np
import pytest
import tensorflow as tf
import kerastuner as kt
from kerastuner.engine import hyperparameters as hp_module
from kerastuner.engine import trial as trial_module
from kerastuner.tuners import bayesian as bo_module
@pytest.fixture(scope="function")
def tmp_dir(tmpdir_factory):
return tmpdir_factory.mktemp("bayesian_test", numbered=True)
def build_model(hp):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Flatten(input_shape=(2, 2)))
for i in range(3):
model.add(
tf.keras.layers.Dense(
units=hp.Int("units_" + str(i), 2, 4, 2), activation="relu"
)
)
model.add(tf.keras.layers.Dense(2, activation="softmax"))
model.compile(
optimizer=tf.keras.optimizers.Adam(
hp.Choice("learning_rate", [1e-2, 1e-3, 1e-4])
),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
return model
def test_gpr_mse_is_small():
x_train = np.random.rand(1000, 2)
y_train = np.multiply(x_train, x_train).mean(axis=-1)
x_test = np.random.rand(1000, 2)
y_test = np.multiply(x_test, x_test).mean(axis=-1)
gpr = bo_module.GaussianProcessRegressor(alpha=1e-4, seed=3)
gpr.fit(x_train, y_train)
y_predict_mean, y_predict_std = gpr.predict(x_test)
assert ((y_predict_mean - y_test) ** 2).mean(axis=0) < 1e-8
assert y_predict_std.shape == (1000,)
def test_bayesian_oracle(tmp_dir):
hps = hp_module.HyperParameters()
hps.Choice("a", [1, 2], default=1)
hps.Int("b", 3, 10, default=3)
hps.Float("c", 0, 1, 0.1, default=0)
hps.Fixed("d", 7)
hps.Choice("e", [9, 0], default=9)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "max"),
max_trials=20,
num_initial_points=2,
hyperparameters=hps,
)
oracle._set_project_dir(tmp_dir, "untitled")
for i in range(5):
trial = oracle.create_trial(str(i))
oracle.update_trial(trial.trial_id, {"score": i})
oracle.end_trial(trial.trial_id, "COMPLETED")
def test_bayesian_oracle_with_zero_y(tmp_dir):
hps = hp_module.HyperParameters()
hps.Choice("a", [1, 2], default=1)
hps.Int("b", 3, 10, default=3)
hps.Float("c", 0, 1, 0.1, default=0)
hps.Fixed("d", 7)
hps.Choice("e", [9, 0], default=9)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "max"),
max_trials=20,
num_initial_points=2,
hyperparameters=hps,
)
oracle._set_project_dir(tmp_dir, "untitled")
for i in range(5):
trial = oracle.create_trial(str(i))
oracle.update_trial(trial.trial_id, {"score": 0})
oracle.end_trial(trial.trial_id, "COMPLETED")
def test_bayesian_dynamic_space(tmp_dir):
hps = hp_module.HyperParameters()
hps.Choice("a", [1, 2], default=1)
oracle = bo_module.BayesianOptimizationOracle(
objective="val_acc", max_trials=20, num_initial_points=10
)
oracle._set_project_dir(tmp_dir, "untitled")
oracle.hyperparameters = hps
for i in range(10):
oracle._populate_space(str(i))
hps.Int("b", 3, 10, default=3)
assert "b" in oracle._populate_space("1_0")["values"]
hps.Float("c", 0, 1, 0.1, default=0)
assert "c" in oracle._populate_space("1_1")["values"]
hps.Fixed("d", 7)
assert "d" in oracle._populate_space("1_2")["values"]
hps.Choice("e", [9, 0], default=9)
assert "e" in oracle._populate_space("1_3")["values"]
def test_bayesian_save_reload(tmp_dir):
hps = hp_module.HyperParameters()
hps.Choice("a", [1, 2], default=1)
hps.Choice("b", [3, 4], default=3)
hps.Choice("c", [5, 6], default=5)
hps.Choice("d", [7, 8], default=7)
hps.Choice("e", [9, 0], default=9)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "max"), max_trials=20, hyperparameters=hps
)
oracle._set_project_dir(tmp_dir, "untitled")
for _ in range(3):
trial = oracle.create_trial("tuner_id")
oracle.update_trial(trial.trial_id, {"score": 1.0})
oracle.end_trial(trial.trial_id, "COMPLETED")
oracle.save()
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "max"), max_trials=20, hyperparameters=hps
)
oracle._set_project_dir(tmp_dir, "untitled")
oracle.reload()
for trial_id in range(3):
trial = oracle.create_trial("tuner_id")
oracle.update_trial(trial.trial_id, {"score": 1.0})
oracle.end_trial(trial.trial_id, "COMPLETED")
assert len(oracle.trials) == 6
def test_bayesian_optimization_tuner(tmp_dir):
tuner = bo_module.BayesianOptimization(
build_model, objective="val_accuracy", max_trials=15, directory=tmp_dir
)
assert isinstance(tuner.oracle, bo_module.BayesianOptimizationOracle)
def test_bayesian_optimization_tuner_set_alpha_beta(tmp_dir):
tuner = bo_module.BayesianOptimization(
build_model,
alpha=1e-4,
beta=2.6,
objective="val_accuracy",
max_trials=15,
directory=tmp_dir,
)
assert isinstance(tuner.oracle, bo_module.BayesianOptimizationOracle)
def test_save_before_result(tmp_dir):
hps = hp_module.HyperParameters()
hps.Choice("a", [1, 2], default=1)
hps.Int("b", 3, 10, default=3)
hps.Float("c", 0, 1, 0.1, default=0)
hps.Fixed("d", 7)
hps.Choice("e", [9, 0], default=9)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "max"), max_trials=10, hyperparameters=hps
)
oracle._set_project_dir(tmp_dir, "untitled")
oracle._populate_space(str(1))
oracle.save()
def test_bayesian_oracle_maximize(tmp_dir):
hps = hp_module.HyperParameters()
hps.Int("a", -100, 100)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", direction="max"),
max_trials=20,
hyperparameters=hps,
num_initial_points=2,
)
oracle._set_project_dir(tmp_dir, "untitled")
# Make examples with high 'a' and high score.
for i in range(5):
trial = trial_module.Trial(hyperparameters=hps.copy())
trial.hyperparameters.values["a"] = 10 * i
trial.score = i
trial.status = "COMPLETED"
oracle.trials[trial.trial_id] = trial
# Make examples with low 'a' and low score
for i in range(5):
trial = trial_module.Trial(hyperparameters=hps.copy())
trial.hyperparameters.values["a"] = -10 * i
trial.score = -i
trial.status = "COMPLETED"
oracle.trials[trial.trial_id] = trial
trial = oracle.create_trial("tuner0")
assert trial.status == "RUNNING"
# Assert that the oracle suggests hps it thinks will maximize.
assert trial.hyperparameters.get("a") > 0
def test_hyperparameters_added(tmp_dir):
hps = hp_module.HyperParameters()
hps.Int("a", -100, 100)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", direction="max"),
max_trials=20,
hyperparameters=hps,
num_initial_points=2,
)
oracle._set_project_dir(tmp_dir, "untitled")
# Populate initial trials.
for i in range(10):
trial = trial_module.Trial(hyperparameters=hps.copy())
trial.hyperparameters.values["a"] = 10 * i
trial.score = i
trial.status = "COMPLETED"
oracle.trials[trial.trial_id] = trial
# Update the space.
new_hps = hp_module.HyperParameters()
new_hps.Float("b", 3.2, 6.4, step=0.2, default=3.6)
new_hps.Boolean("c", default=True)
oracle.update_space(new_hps)
# Make a new trial, it should have b set.
trial = oracle.create_trial("tuner0")
assert trial.status == "RUNNING"
assert "b" in trial.hyperparameters.values
assert "c" in trial.hyperparameters.values
def test_step_respected(tmp_dir):
hps = hp_module.HyperParameters()
hps.Float("c", 0, 10, step=3)
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", direction="max"),
max_trials=20,
hyperparameters=hps,
num_initial_points=2,
)
oracle._set_project_dir(tmp_dir, "untitled")
# Populate initial trials.
for i in range(10):
trial = trial_module.Trial(hyperparameters=hps.copy())
trial.hyperparameters.values["c"] = 3.0
trial.score = i
trial.status = "COMPLETED"
oracle.trials[trial.trial_id] = trial
trial = oracle.create_trial("tuner0")
# Check that oracle respects the `step` param.
assert trial.hyperparameters.get("c") in {0, 3, 6, 9}
def test_float_optimization(tmp_dir):
def build_model(hp):
# Maximum at a=-1, b=1, c=1, d=0 with score=3
return -1 * hp["a"] ** 3 + hp["b"] ** 3 + hp["c"] - abs(hp["d"])
class PolynomialTuner(kt.engine.base_tuner.BaseTuner):
def run_trial(self, trial):
hps = trial.hyperparameters
score = self.hypermodel.build(hps)
self.oracle.update_trial(trial.trial_id, {"score": score})
hps = hp_module.HyperParameters()
hps.Float("a", -1, 1)
hps.Float("b", -1, 1)
hps.Float("c", -1, 1)
hps.Float("d", -1, 1)
tuner = PolynomialTuner(
hypermodel=build_model,
oracle=kt.oracles.BayesianOptimization(
objective=kt.Objective("score", "max"),
hyperparameters=hps,
max_trials=50,
),
directory=tmp_dir,
)
tuner.search()
atol, rtol = 1e-1, 1e-1
best_trial = tuner.oracle.get_best_trials()[0]
best_hps = best_trial.hyperparameters
assert np.isclose(best_trial.score, 3, atol=atol, rtol=rtol)
assert np.isclose(best_hps["a"], -1, atol=atol, rtol=rtol)
assert np.isclose(best_hps["b"], 1, atol=atol, rtol=rtol)
assert np.isclose(best_hps["c"], 1, atol=atol, rtol=rtol)
assert np.isclose(best_hps["d"], 0, atol=atol, rtol=rtol)
def test_distributed_optimization(tmp_dir):
hps = hp_module.HyperParameters()
hps.Int("a", 0, 10)
hps.Float("b", -1, 1, step=0.1)
hps.Float("c", 1e-5, 1e-2, sampling="log")
def evaluate(hp):
# Minimum at a=4, b=1, c=1e-3 with score=-1
return abs(hp["a"] - 4) - hp["b"] + 0.1 * abs(3 + math.log(hp["c"], 10))
oracle = bo_module.BayesianOptimizationOracle(
objective=kt.Objective("score", "min"), hyperparameters=hps, max_trials=60
)
oracle._set_project_dir(tmp_dir, "untitled")
tuners = 4
for _ in range(10):
trials = []
for i in range(tuners):
trial = oracle.create_trial("tuner_" + str(i))
trials.append(trial)
for trial in trials:
oracle.update_trial(
trial.trial_id, {"score": evaluate(trial.hyperparameters)}
)
for trial in trials:
oracle.end_trial(trial.trial_id, "COMPLETED")
atol, rtol = 1e-1, 1e-1
best_trial = oracle.get_best_trials()[0]
best_hps = best_trial.hyperparameters
# The minimum is not always found but it is always close.
assert best_trial.score < -0.8, best_hps.values
assert np.isclose(best_hps["a"], 4, atol=atol, rtol=rtol)
assert np.isclose(best_hps["b"], 1, atol=atol, rtol=rtol)
# For log-scale param, just check that the order of magnitude is correct.
log_best_c = math.log(best_hps["c"], 10)
assert log_best_c > -4 and log_best_c < -2
| tests/kerastuner/tuners/bayesian_test.py | 11,504 | Make examples with high 'a' and high score. Make examples with low 'a' and low score Assert that the oracle suggests hps it thinks will maximize. Populate initial trials. Update the space. Make a new trial, it should have b set. Populate initial trials. Check that oracle respects the `step` param. Maximum at a=-1, b=1, c=1, d=0 with score=3 Minimum at a=4, b=1, c=1e-3 with score=-1 The minimum is not always found but it is always close. For log-scale param, just check that the order of magnitude is correct. | 512 | en | 0.898495 |
import abc
from ... import errors, utils
from ...tl import types
class ChatGetter(abc.ABC):
"""
Helper base class that introduces the `chat`, `input_chat`
and `chat_id` properties and `get_chat` and `get_input_chat`
methods.
Subclasses **must** have the following private members: `_chat`,
`_input_chat`, `_chat_peer`, `_broadcast` and `_client`. As an end
user, you should not worry about this.
"""
def __init__(self):
self._chat = self._input_chat = self._chat_peer = \
self._client = self._broadcast = None
@property
def chat(self):
"""
Returns the :tl:`User`, :tl:`Chat` or :tl:`Channel` where this object
belongs to. It may be ``None`` if Telegram didn't send the chat.
If you're using `telethon.events`, use `get_chat` instead.
"""
return self._chat
async def get_chat(self):
"""
Returns `chat`, but will make an API call to find the
chat unless it's already cached.
"""
# See `get_sender` for information about 'min'.
if (self._chat is None or getattr(self._chat, 'min', None))\
and await self.get_input_chat():
try:
self._chat =\
await self._client.get_entity(self._input_chat)
except ValueError:
await self._refetch_chat()
return self._chat
@property
def input_chat(self):
"""
This :tl:`InputPeer` is the input version of the chat where the
message was sent. Similarly to `input_sender`, this doesn't have
things like username or similar, but still useful in some cases.
Note that this might not be available if the library doesn't
have enough information available.
"""
if self._input_chat is None and self._chat_peer and self._client:
try:
self._input_chat = self._client._entity_cache[self._chat_peer]
except KeyError:
pass
return self._input_chat
async def get_input_chat(self):
"""
Returns `input_chat`, but will make an API call to find the
input chat unless it's already cached.
"""
if self.input_chat is None and self.chat_id and self._client:
try:
# The chat may be recent, look in dialogs
target = self.chat_id
async for d in self._client.iter_dialogs(100):
if d.id == target:
self._chat = d.entity
self._input_chat = d.input_entity
break
except errors.RPCError:
pass
return self._input_chat
@property
def chat_id(self):
"""
Returns the marked chat integer ID. Note that this value **will
be different** from `to_id` for incoming private messages, since
the chat *to* which the messages go is to your own person, but
the *chat* itself is with the one who sent the message.
TL;DR; this gets the ID that you expect.
"""
return utils.get_peer_id(self._chat_peer) if self._chat_peer else None
@property
def is_private(self):
"""True if the message was sent as a private message."""
return isinstance(self._chat_peer, types.PeerUser)
@property
def is_group(self):
"""True if the message was sent on a group or megagroup."""
if self._broadcast is None and self.chat:
self._broadcast = getattr(self.chat, 'broadcast', None)
return (
isinstance(self._chat_peer, (types.PeerChat, types.PeerChannel))
and not self._broadcast
)
@property
def is_channel(self):
"""True if the message was sent on a megagroup or channel."""
return isinstance(self._chat_peer, types.PeerChannel)
async def _refetch_chat(self):
"""
Re-fetches chat information through other means.
"""
| telethon/tl/custom/chatgetter.py | 4,047 | Helper base class that introduces the `chat`, `input_chat`
and `chat_id` properties and `get_chat` and `get_input_chat`
methods.
Subclasses **must** have the following private members: `_chat`,
`_input_chat`, `_chat_peer`, `_broadcast` and `_client`. As an end
user, you should not worry about this.
Returns the :tl:`User`, :tl:`Chat` or :tl:`Channel` where this object
belongs to. It may be ``None`` if Telegram didn't send the chat.
If you're using `telethon.events`, use `get_chat` instead.
Returns the marked chat integer ID. Note that this value **will
be different** from `to_id` for incoming private messages, since
the chat *to* which the messages go is to your own person, but
the *chat* itself is with the one who sent the message.
TL;DR; this gets the ID that you expect.
This :tl:`InputPeer` is the input version of the chat where the
message was sent. Similarly to `input_sender`, this doesn't have
things like username or similar, but still useful in some cases.
Note that this might not be available if the library doesn't
have enough information available.
True if the message was sent on a megagroup or channel.
True if the message was sent on a group or megagroup.
True if the message was sent as a private message.
See `get_sender` for information about 'min'. The chat may be recent, look in dialogs | 1,325 | en | 0.851309 |
# -*- coding: utf-8 -*-
import json
import os
import os.path as osp
import time
from pprint import pprint
import random
import glob
import numpy as np
import cv2
def generate_db(dataPath='./thor_DB.json', categoryPath='./categories.json', \
newDataPath='./thor_DB_msdn.json'):
# msdn style
# GUI 툴로 생성한 ai2thor DB를 msdn 폼으로 재구성하여 저장됨
print('-- generate thor_DB file --')
with open(dataPath, 'r') as f:
# DB = json.load(f)
DB = json.load(f)
with open(categoryPath, 'r') as f:
categories = json.load(f)
print('data read complete')
print('DB len:', len(DB))
new_DB = []
for idx, data in enumerate(DB):
if len(data['visible_object']) == 0: # 물체가 없으면 넘김
continue
if idx % 10000 == 0:
print('data preprocessing... ({}/{})'.format(idx, len(DB)))
new_data = {}
new_data['height'] = 600
new_data['width'] = 600
new_data['scene_name'] = data['scene']
new_data['id'] = data['data_id']
new_data['path'] = data['image_file']
new_data['depth_path'] = data['depth_file']
agent = {}
global_rotation = data['agent']['global_rotation']['y'] # float 형태 [마이너스는 없음]
if global_rotation == 0: # 정면
global_rotation = 0 # int 형태
elif global_rotation == 90: # 오른쪽
global_rotation = 1
elif global_rotation == 180: # 뒤
global_rotation = 2
elif global_rotation == 270: # 왼쪽
global_rotation = 3
agent['global_rotation'] = global_rotation
agent['global_position'] = [data['agent']['global_position']['x'], data['agent']['global_position']['y'],
data['agent']['global_position']['z']]
new_data['agent'] = agent
objects = []
obj_g2l = {} # global idx -> local idx
for idx, obj in enumerate(data['visible_object']):
b_3d = obj['global_bounds3D']
size_3d = [b_3d[3] - b_3d[0], b_3d[4] - b_3d[1], b_3d[5] - b_3d[2]]
objects.append({'class': categories['object'][obj['obj_class'] - 1], \
'box': obj['bounding_box'], \
'global_position': [obj['global_position']['x'], obj['global_position']['y'],
obj['global_position']['z']], \
'size_3d': size_3d, \
'distance': obj['distance'], \
'color': categories['color'][obj['color']], \
'open_state': categories['open_state'][obj['open_state']]})
# 'off_state':categories['off_state'][obj['off_state']]})
# index는 string label로 변경
# index가 안맞을 수 있음. 확인요망
obj_g2l[obj['id']] = idx
new_data['objects'] = objects
relationships = []
for rel in data['relation']: # relation...!!!!
relationships.append({'sub_id': obj_g2l[rel['subject_id']], \
'predicate': categories['predicate'][rel['rel_class'] - 1], \
'obj_id': obj_g2l[rel['object_id']]})
new_data['relationships'] = relationships
global_relationships = []
for rel in data['global_relation']: # global relation...!!!!
global_relationships.append({'sub_id': obj_g2l[rel['subject_id']], \
'predicate': categories['predicate'][rel['rel_class'] - 1], \
'obj_id': obj_g2l[rel['object_id']]})
new_data['global_relationships'] = global_relationships
new_DB.append(new_data)
print('data preprocessing complete')
print('new_DB len:', len(new_DB))
print('data dumpping...')
with open(newDataPath, 'w') as f:
json.dump(new_DB, f, indent='\t')
print('data dumpping complete')
print('file path:', newDataPath)
def generate_categories_file(dataPath='./class', newDataPath='./categories.json'):
# object, rel, color 등의 클래스 별로 파일을 나누었는데,
# msdn의 폼에서는 categories 파일에 다 저장됨
# 이를 위해 여러 클래스 정의 파일들을 categories.json으로 통일시킴
print('-- generating categories file --')
with open(osp.join(dataPath, 'objects.txt'), 'r') as f:
object_label = f.read().split('\n')
with open(osp.join(dataPath, 'relationships.txt'), 'r') as f:
relationship_label = f.read().split('\n')
with open(osp.join(dataPath, 'colors.txt'), 'r') as f:
color_label = f.read().split('\n')
with open(osp.join(dataPath, 'open_states.txt'), 'r') as f:
openState_label = f.read().split('\n')
with open(osp.join(dataPath, 'off_states.txt'), 'r') as f:
offState_label = f.read().split('\n')
categories = {}
categories['object'] = object_label[1:] # background 제거
categories['predicate'] = relationship_label[1:] # background 제거
categories['color'] = color_label
categories['open_state'] = openState_label
# categories['off_state'] = offState_label
with open(newDataPath, 'w') as f:
json.dump(categories, f, indent='\t')
print('data dumpping complete')
print('file path:', newDataPath)
def get_image_info(dataPath='./thor_DB_msdn.json', imageDir='./images', depth_image=False):
# DB 파일에 속한 영상들의 평균과 표준 편차를 계산
print('-- get image info --')
print('image path : {}'.format(imageDir))
with open(dataPath, 'r') as f:
DB = json.load(f)
print('data read complete')
len_DB = len(DB)
print('DB len:', len_DB)
max_value = 255.
mean = np.zeros(3)
std = np.zeros(3)
start_time = time.time()
for idx, data in enumerate(DB):
if idx % 1000 == 0 and idx != 0:
dt = time.time() - start_time
print('data preprocessing... ({}/{}), {} fps, {}s left..'.format(idx, len(DB), \
round(1000. / dt, 2),
(len(DB) - idx) * round(dt / 1000., 4)))
start_time = time.time()
if depth_image:
if 'path' in data:
img_fn = data['depth_path']
elif 'image_file' in data:
img_fn = data['depth_file']
else:
raise Exception('where is the image path !!')
else:
if 'path' in data:
img_fn = data['path']
elif 'image_file' in data:
img_fn = data['image_file']
else:
raise Exception('where is the image path !!')
img = cv2.imread(osp.join(imageDir, img_fn))
for i in range(3):
mean[i] += img[:, :, i].mean() / max_value
std[i] += img[:, :, i].std() / max_value
mean /= len_DB
std /= len_DB
print('mean :', mean.round(3))
print('std :', std.round(3))
return mean, std
def divide_db_by_scene(dataPath='./thor_DB_msdn.json'):
# msdn 폼의 DB를 train과 test 집합으로 나눔 [test scene을 설정하고 나눔]
print('-- divide DB --')
with open(dataPath, 'r') as f:
DB = json.load(f)
test_scene = ['FloorPlan10', 'FloorPlan11']
train_DB = []
test_DB = []
for idx, data in enumerate(DB):
if idx % 10000 == 0:
print('data preprocessing... ({}/{})'.format(idx, len(DB)))
if data['scene_name'] in test_scene:
test_DB.append(data)
else:
train_DB.append(data)
print('train.json#:', len(train_DB))
print('test.json#:', len(test_DB))
with open('./train.json', 'w') as f:
json.dump(train_DB, f, indent='\t')
print('generate train.json')
with open('./test.json', 'w') as f:
json.dump(test_DB, f, indent='\t')
print('generate test.json')
def divide_db_by_random(dataPath='./thor_DB_msdn.json', test_rate=0.2):
# msdn 폼의 DB를 train과 test 집합으로 나눔 [test set 비율만 정하고 랜덤으로 나눔]
print('-- divide DB --')
with open(dataPath, 'r') as f:
DB = json.load(f)
num_data = len(DB)
test_idx = random.sample(list(range(num_data)), int(num_data * test_rate))
test_idx.sort()
train_DB = []
test_DB = []
for idx, data in enumerate(DB):
if idx % 10000 == 0:
print('data preprocessing... ({}/{})'.format(idx, len(DB)))
if idx in test_idx:
test_DB.append(data)
else:
train_DB.append(data)
print('train.json#:', len(train_DB))
print('test.json#:', len(test_DB))
with open('./train.json', 'w') as f:
json.dump(train_DB, f, indent='\t')
print('generate train.json')
with open('./test.json', 'w') as f:
json.dump(test_DB, f, indent='\t')
print('generate test.json')
def get_class_weights(dataPath='./train.json'):
print('-- get_class_info --')
with open(dataPath, 'r') as f:
DB = json.load(f)
obj_dict = {}
color_dict = {}
os_dict = {}
rel_dict = {} # 현재 negative는 아예 고려를 안하고 positive에서만 구함 (나머지도 마찬가지인데, 나머지는 상관없음)
for idx, data in enumerate(DB):
for obj in data['objects']:
if obj['class'] in obj_dict:
obj_dict[obj['class']] += 1
else:
obj_dict[obj['class']] = 1
if obj['color'] in color_dict:
color_dict[obj['color']] += 1
else:
color_dict[obj['color']] = 1
if obj['open_state'] in os_dict:
os_dict[obj['open_state']] += 1
else:
os_dict[obj['open_state']] = 1
if len(data['objects']) > 0:
rel_dict['background'] = len(data['objects']) * (len(data['objects'])-1) - len(data['relationships'])
for rel in data['relationships']:
if rel['predicate'] in rel_dict:
rel_dict[rel['predicate']] += 1
else:
rel_dict[rel['predicate']] = 1
output = [obj_dict, color_dict, os_dict, rel_dict]
print('results')
pprint(output)
def get_weight(class_dict):
values = np.array(list(class_dict.values())).astype(float)
weights = np.zeros_like(values)
print(values)
sum = values.sum()
for i, v in enumerate(values):
values[i] /= sum
max = values.max()
for i, v in enumerate(values):
weights[i] = max / values[i]
weights_dict = dict(zip(class_dict.keys(), weights.tolist()))
return weights_dict
print('\nweights')
all_weights = {}
all_weights['object'] = get_weight(obj_dict)
all_weights['color'] = get_weight(color_dict)
all_weights['open_state'] = get_weight(os_dict)
all_weights['relationship'] = get_weight(rel_dict)
pprint(all_weights['object'])
pprint(all_weights['color'])
pprint(all_weights['open_state'])
pprint(all_weights['relationship'])
with open('./class_weights.json', 'w') as f:
json.dump(all_weights, f, indent='\t')
print('generate class_weights.json')
def make_color_balance(dataPath='./thor_DB_msdn.json'):
print('-- make_color_balance --')
with open(dataPath, 'r') as f:
DB = json.load(f)
idx_for_unique_cls = {}
len_for_unique_cls = {}
for idx, data in enumerate(DB):
color_counts = {}
for obj in data['objects']:
if obj['color'] in color_counts:
color_counts[obj['color']] += 1
else:
color_counts[obj['color']] = 1
values = np.array(list(color_counts.values()))
if sum(values > 0) == 1:
unique_cls = values.argmax()
cls = list(color_counts.keys())[unique_cls]
if cls in idx_for_unique_cls:
idx_for_unique_cls[cls].append(idx)
len_for_unique_cls[cls] += values.max()
else:
idx_for_unique_cls[cls] = [idx]
len_for_unique_cls[cls] = values.max()
# pprint(idx_for_unique_cls)
for k, v in len_for_unique_cls.items():
print(k + ': ' + str(v))
def generate_prior_knowledge(dataPath='./thor_DB_msdn.json'):
print('-- generate_prior_knowledge --')
with open(dataPath) as f:
db = json.load(f)
prior_knowledge = {}
obj_category = dict()
for data in db:
objs = data['objects']
for obj in objs:
if obj['class'] in obj_category:
if obj['size_3d'][0] >= obj['size_3d'][2]: # 회전된 물체 통일하기
size = [obj['size_3d'][0], obj['size_3d'][1], obj['size_3d'][2]]
else:
size = [obj['size_3d'][2], obj['size_3d'][1], obj['size_3d'][0]]
obj_category[obj['class']]['size_3d'].append(size)
else:
obj_category[obj['class']] = {}
if obj['size_3d'][0] >= obj['size_3d'][2]: # 회전된 물체 통일하기
size = [obj['size_3d'][0], obj['size_3d'][1], obj['size_3d'][2]]
else:
size = [obj['size_3d'][2], obj['size_3d'][1], obj['size_3d'][0]]
obj_category[obj['class']]['size_3d'] = [size]
for k, v in obj_category.items():
# print(np.array(v['size_3d']).mean(0))
obj_category[k]['size_3d'] = np.array(v['size_3d']).mean(0).tolist()
prior_knowledge['objects'] = obj_category
with open('./prior_knowledge.json', 'w') as f:
json.dump(prior_knowledge, f, indent='\t')
print('generate prior_knowledge.json')
def action_success_definition(action_scenario_dir, output_dir, success_rate=0.8):
print('input dir:', action_scenario_dir)
print('output dir:', output_dir)
ah_paths = glob.glob(f'{action_scenario_dir}/*.json')
roulette = [True]*round(success_rate*10) + [False]*round((1-success_rate)*10)
print('success_rate :', success_rate)
print('roulette :', roulette)
os.makedirs(output_dir, exist_ok=True)
for ah_path in ah_paths:
with open(ah_path, 'r') as f:
action_history = json.load(f)
file_name = ah_path.split('/')[-1]
actions = []
for i, action_info in enumerate(action_history['actions']):
atom = {}
atom['step'] = i
atom['action'] = action_info[0]
if len(action_info) > 1:
atom['target_object'] = action_info[1]
atom['success'] = random.choice(roulette)
actions.append(atom)
new_ah = {
'actions':actions,
'scene_name':action_history['scene_name'],
'file_name':file_name
}
with open(osp.join(output_dir, file_name), 'w') as f:
json.dump(new_ah, f, indent='\t')
print('done')
if __name__ == '__main__':
# generate_categories_file()
# generate_db(dataPath='/media/ailab/D/ai2thor/thor_DB.json')
# divide_db_by_scene(dataPath='./thor_DB_msdn.json')
# divide_DB_by_random(dataPath='./thor_DB_msdn.json', test_rate=0.1)
# get_image_info(imageDir='/media/ailab/D/ai2thor/images')
# get_image_info(imageDir='/media/ailab/D/ai2thor/depth_images', depth_image=True)
# get_class_weights(dataPath='./thor_DB_msdn.json')
# make_color_balance(dataPath='./thor_DB_msdn.json')
# generate_prior_knowledge(dataPath='./thor_DB_msdn.json')
action_success_definition(action_scenario_dir='/home/ailab/DH/ai2thor/datasets/thorDBv2_gsg_gt/thorDBv2_action_history',
output_dir='/home/ailab/DH/ai2thor/datasets/thorDBv2_gsg_gt/thorDBv2_sf_action_history')
| data_preprocessing.py | 15,992 | -*- coding: utf-8 -*- msdn style GUI 툴로 생성한 ai2thor DB를 msdn 폼으로 재구성하여 저장됨 DB = json.load(f) 물체가 없으면 넘김 float 형태 [마이너스는 없음] 정면 int 형태 오른쪽 뒤 왼쪽 global idx -> local idx 'off_state':categories['off_state'][obj['off_state']]}) index는 string label로 변경 index가 안맞을 수 있음. 확인요망 relation...!!!! global relation...!!!! object, rel, color 등의 클래스 별로 파일을 나누었는데, msdn의 폼에서는 categories 파일에 다 저장됨 이를 위해 여러 클래스 정의 파일들을 categories.json으로 통일시킴 background 제거 background 제거 categories['off_state'] = offState_label DB 파일에 속한 영상들의 평균과 표준 편차를 계산 msdn 폼의 DB를 train과 test 집합으로 나눔 [test scene을 설정하고 나눔] msdn 폼의 DB를 train과 test 집합으로 나눔 [test set 비율만 정하고 랜덤으로 나눔] 현재 negative는 아예 고려를 안하고 positive에서만 구함 (나머지도 마찬가지인데, 나머지는 상관없음) pprint(idx_for_unique_cls) 회전된 물체 통일하기 회전된 물체 통일하기 print(np.array(v['size_3d']).mean(0)) generate_categories_file() generate_db(dataPath='/media/ailab/D/ai2thor/thor_DB.json') divide_db_by_scene(dataPath='./thor_DB_msdn.json') divide_DB_by_random(dataPath='./thor_DB_msdn.json', test_rate=0.1) get_image_info(imageDir='/media/ailab/D/ai2thor/images') get_image_info(imageDir='/media/ailab/D/ai2thor/depth_images', depth_image=True) get_class_weights(dataPath='./thor_DB_msdn.json') make_color_balance(dataPath='./thor_DB_msdn.json') generate_prior_knowledge(dataPath='./thor_DB_msdn.json') | 1,288 | ko | 0.87825 |
#!/usr/bin/env python
import numpy as np
import sys
sys.path.append("../ar/")
import fastopc, time
import functionLib as lib
import micStream
nStrips = 16
lStrip = 64
client = fastopc.FastOPC('localhost:7890')
pixels = lib.Pixels(nStrips, lStrip, 0)
theoStrip = np.zeros([lStrip, 3])
stream = micStream.Stream(fps=40, nBuffers=4)
powerSmooth = lib.ExpFilter(val=0.05, alpha_rise=0.05, alpha_decay=0.05)
nColorWheel = 1500
colorWheel = lib.getColorWheel(nColorWheel)
frameCount = 0
while True:
success = stream.readAndCalc()
if success:
frameNumEff = np.mod(frameCount, nColorWheel)
power = np.sum(stream.freqSpectrum[10//5:300//5])
powerSmooth.update(power)
displayPower = int(122*power/powerSmooth.value)
theoStrip = displayPower * colorWheel[frameNumEff]
pixels.update(theoStrip, 0.9, 0.2)
#print(displayPower * colorWheel[frameNumEff])
client.putPixels(0, pixels.getArrayForDisplay())
frameCount+=1
| deprecated/ledWall/ar_bassThumpAllSame.py | 991 | !/usr/bin/env pythonprint(displayPower * colorWheel[frameNumEff]) | 65 | en | 0.161254 |
# Polygraph (release 0.1)
# Signature generation algorithms for polymorphic worms
#
# Copyright (c) 2004-2005, Intel Corporation
# All Rights Reserved
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
import string
class SigGen(object):
"""
Abstract class for signature generation factories.
"""
def __init__(self, pname="Pretty Name", fname="filename"): pass
def train(self, pos_samples):
"""
Generate one or more signatures from pos_samples (suspicious pool).
Returns a sequence of Sig objects.
"""
raise NotImplementedError
class Sig(object):
"""
Abstract signature class.
"""
def match(self, sample):
"Return whether current signature matches the sample"
raise NotImplementedError
def __str__(self):
raise NotImplementedError
def regex_esc(s):
escaped = []
for c in s:
if c.isalnum():
escaped.append(c)
elif c == ' ':
escaped.append("\\ ")
elif c == "\t":
escaped.append("\\t")
elif c == "\n":
escaped.append("\\n")
elif c == "\r":
escaped.append("\\r")
elif string.punctuation.find(c) >= 0:
escaped.append("\\%s" % c)
else:
escaped.append("\\x%02x" % ord(c))
return ''.join(escaped)
estd_fpos_rate = {} # memoize
def est_fpos_rate(token, trace=None, stats=None):
"""
Estimate false positive rate of a single-token signature.
Estimates using the 'tokensplit' and trace-modeling methods,
and returns the higher (most pessimistic of the two). Note that both
of these estimates are strictly equal to or higher than the actual
fraction of streams that 'token' occurs in within the trace.
"""
global estd_fpos_rate
# if we don't have it cached, figure it out
if not (estd_fpos_rate.has_key(trace) and estd_fpos_rate[trace].has_key(token)):
# make sure there's a dictionary for this trace
if not estd_fpos_rate.has_key(trace):
estd_fpos_rate[trace] = {}
# use most pessimistic (highest) estimate
import polygraph.sigprob.tokensplit as tokensplit
import polygraph.sigprob.sigprob as sigprob
if trace:
split_prob = tokensplit.mpp(token, trace, minlen=3)[0]
stat_prob = tokensplit.maxcontextprob(token, trace)[0]
estd_fpos_rate[trace][token] = max(split_prob, stat_prob)
else:
estd_fpos_rate[trace][token] = sigprob.token_prob(token, 1000, stats=stats)[-1]
rv = estd_fpos_rate[trace][token]
# conserve memory
if len(token) > 20:
del estd_fpos_rate[trace][token]
if len(estd_fpos_rate[trace].keys()) > 200:
estd_fpos_rate[trace].clear() # XXX should delete least recently accessed
return rv
| polygraph/sig_gen/sig_gen.py | 3,080 | Abstract signature class.
Abstract class for signature generation factories.
Estimate false positive rate of a single-token signature.
Estimates using the 'tokensplit' and trace-modeling methods,
and returns the higher (most pessimistic of the two). Note that both
of these estimates are strictly equal to or higher than the actual
fraction of streams that 'token' occurs in within the trace.
Return whether current signature matches the sample
Generate one or more signatures from pos_samples (suspicious pool).
Returns a sequence of Sig objects.
Polygraph (release 0.1) Signature generation algorithms for polymorphic worms Copyright (c) 2004-2005, Intel Corporation All Rights Reserved This software is distributed under the terms of the Eclipse Public License, Version 1.0 which can be found in the file named LICENSE. ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT memoize if we don't have it cached, figure it out make sure there's a dictionary for this trace use most pessimistic (highest) estimate conserve memory XXX should delete least recently accessed | 1,153 | en | 0.842408 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.browser.server_groups.servers.tests import utils as server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as tables_utils
class TableUpdateTestCase(BaseTestGenerator):
"""This class will add new collation under schema node."""
scenarios = [
# Fetching default URL for table node.
('Update Table', dict(url='/browser/table/obj/')),
('Create partitions of existing range partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='range',
mode='create'
)
),
('Create partitions of existing list partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='list',
mode='create'
)
),
('Detach partition from existing range partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='range',
mode='detach'
)
),
('Detach partition from existing list partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='list',
mode='detach'
)
),
('Attach partition to existing range partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='range',
mode='attach'
)
),
('Attach partition to existing list partitioned table',
dict(url='/browser/table/obj/',
server_min_version=100000,
partition_type='list',
mode='attach'
)
)
]
def setUp(self):
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database to add a table.")
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema to add a table.")
self.table_name = "test_table_put_%s" % (str(uuid.uuid4())[1:8])
self.is_partition = False
if hasattr(self, 'server_min_version'):
server_con = server_utils.connect_server(self, self.server_id)
if not server_con["info"] == "Server connected.":
raise Exception("Could not connect to server to add "
"partitioned table.")
if server_con["data"]["version"] < self.server_min_version:
message = "Partitioned table are not supported by " \
"PPAS/PG 10.0 and below."
self.skipTest(message)
else:
self.is_partition = True
self.table_id = tables_utils.create_table_for_partition(
self.server,
self.db_name,
self.schema_name,
self.table_name,
'partitioned',
self.partition_type)
else:
self.table_id = tables_utils.create_table(self.server, self.db_name,
self.schema_name,
self.table_name)
def runTest(self):
"""This function will fetch added table under schema node."""
table_response = tables_utils.verify_table(self.server, self.db_name,
self.table_id)
if not table_response:
raise Exception("Could not find the table to update.")
if self.is_partition:
data = {"id": self.table_id}
tables_utils.set_partition_data(
self.server, self.db_name, self.schema_name, self.table_name,
self.partition_type, data, self.mode)
else:
data = {
"description": "This is test comment for table",
"id": self.table_id
}
response = self.tester.put(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' + str(self.db_id) + '/' +
str(self.schema_id) + '/' + str(self.table_id),
data=json.dumps(data), follow_redirects=True)
self.assertEquals(response.status_code, 200)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id)
| pgAdmin4/pgAdmin4/lib/python2.7/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tables/tests/test_table_put.py | 5,809 | This class will add new collation under schema node.
This function will fetch added table under schema node.
pgAdmin 4 - PostgreSQL Tools Copyright (C) 2013 - 2018, The pgAdmin Development Team This software is released under the PostgreSQL Licence Fetching default URL for table node. Disconnect the database | 311 | en | 0.589055 |
#!/usr/bin/env python3
import pytest
import shutil
import sys
import os
import logging
from datetime import datetime
from src.dependency import check_dependencies
from src.exif import Exif
from src.phockup import Phockup
os.chdir(os.path.dirname(__file__))
def test_check_dependencies(mocker):
mocker.patch('shutil.which', return_value='exiftool')
mocker.patch('sys.exit')
check_dependencies()
assert not sys.exit.called
def test_check_dependencies_missing(mocker):
mocker.patch('shutil.which', return_value=None)
mocker.patch('sys.exit')
with pytest.raises(Exception, match="Exiftool is not installed. \
Visit http://www.sno.phy.queensu.ca/~phil/exiftool/"):
check_dependencies()
def test_exception_if_missing_input_directory(mocker):
mocker.patch('os.makedirs')
mocker.patch('sys.exit')
with pytest.raises(RuntimeError, match="Input directory 'in' does not \
exist or cannot be accessed"):
Phockup('in', 'out')
def test_removing_trailing_slash_for_input_output(mocker):
mocker.patch('os.makedirs')
mocker.patch('sys.exit')
mocker.patch.object(Phockup, 'check_directories')
if sys.platform == 'win32':
phockup = Phockup('in\\', 'out\\')
else:
phockup = Phockup('in/', 'out/')
assert phockup.input_dir == 'in'
assert phockup.output_dir == 'out'
def test_exception_for_no_write_access_when_creating_output_dir(mocker):
mocker.patch.object(Phockup, 'walk_directory')
with pytest.raises(OSError, match="Cannot create output '/root/phockup' \
directory. No write access!"):
Phockup('input', '/root/phockup')
def test_walking_directory():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output')
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
dir4 = 'output/2018/01/01/'
assert os.path.isdir(dir1)
assert os.path.isdir(dir2)
assert os.path.isdir(dir3)
assert os.path.isdir(dir4)
assert len([name for name in os.listdir(dir1) if
os.path.isfile(os.path.join(dir1, name))]) == 3
assert len([name for name in os.listdir(dir2) if
os.path.isfile(os.path.join(dir2, name))]) == 1
assert len([name for name in os.listdir(dir3) if
os.path.isfile(os.path.join(dir3, name))]) == 1
assert len([name for name in os.listdir(dir4) if
os.path.isfile(os.path.join(dir4, name))]) == 1
shutil.rmtree('output', ignore_errors=True)
def test_dry_run():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', dry_run=True)
assert not os.path.isdir('output')
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
dir4 = 'output/2018/01/01/'
assert not os.path.isdir(dir1)
assert not os.path.isdir(dir2)
assert not os.path.isdir(dir3)
assert not os.path.isdir(dir4)
def test_get_file_type(mocker):
mocker.patch.object(Phockup, 'check_directories')
assert Phockup('in', '.').get_file_type("image/jpeg")
assert Phockup('in', '.').get_file_type("video/mp4")
assert not Phockup('in', '.').get_file_type("foo/bar")
def test_get_file_name(mocker):
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
date = {
"date": datetime(2017, 1, 1, 1, 1, 1),
"subseconds": "20"
}
assert Phockup('in', 'out').get_file_name("Bar/Foo.jpg", date) == \
"20170101-01010120.jpg"
def test_get_file_name_is_original_on_exception(mocker):
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
assert Phockup('in', 'out').get_file_name("Bar/Foo.jpg", None) == "Foo.jpg"
def test_process_file_with_filename_date(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
Phockup('input', 'output').process_file("input/date_20170101_010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_link_to_file_with_filename_date(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file(
"input/link_to_date_20170101_010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_broken_link(mocker, caplog):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
with caplog.at_level(logging.WARNING):
Phockup('input', 'output').process_file("input/not_a_file.jpg")
assert 'skipped, no such file or directory' in caplog.text
shutil.rmtree('output', ignore_errors=True)
def test_process_broken_link_move(mocker, caplog):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output', move=True)
phockup.process_file("input/not_a_file.jpg")
with caplog.at_level(logging.WARNING):
Phockup('input', 'output').process_file("input/not_a_file.jpg")
assert 'skipped, no such file or directory' in caplog.text
shutil.rmtree('output', ignore_errors=True)
def test_process_image_exif_date(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/exif.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_xmp(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/xmp.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_xmp_noext(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/xmp_noext.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_xmp_ext_and_noext(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/xmp_ext.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_image_unknown(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
Phockup('input', 'output').process_file("input/UNKNOWN.jpg")
assert os.path.isfile("output/unknown/unknown.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_other(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output').process_file("input/other.txt")
assert os.path.isfile("output/unknown/other.txt")
shutil.rmtree('output', ignore_errors=True)
def test_process_move(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
phockup = Phockup('input', 'output', move=True)
open("input/tmp_20170101_010101.jpg", "w").close()
open("input/tmp_20170101_010101.xmp", "w").close()
phockup.process_file("input/tmp_20170101_010101.jpg")
phockup.process_file("input/tmp_20170101_010101.xmp")
assert not os.path.isfile("input/tmp_20170101_010101.jpg")
assert not os.path.isfile("input/tmp_20170101_010101.xmp")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
shutil.rmtree('output', ignore_errors=True)
def test_process_link(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg"
}
phockup = Phockup('input', 'output', link=True)
open("input/tmp_20170101_010101.jpg", "w").close()
open("input/tmp_20170101_010101.xmp", "w").close()
phockup.process_file("input/tmp_20170101_010101.jpg")
phockup.process_file("input/tmp_20170101_010101.xmp")
assert os.path.isfile("input/tmp_20170101_010101.jpg")
assert os.path.isfile("input/tmp_20170101_010101.xmp")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.xmp")
shutil.rmtree('output', ignore_errors=True)
os.remove("input/tmp_20170101_010101.jpg")
os.remove("input/tmp_20170101_010101.xmp")
def test_process_exists_same(mocker, caplog):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output')
phockup.process_file("input/exif.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101.jpg")
with caplog.at_level(logging.INFO):
phockup.process_file("input/exif.jpg")
assert 'skipped, duplicated file' in caplog.text
shutil.rmtree('output', ignore_errors=True)
def test_process_same_date_different_files_rename(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output')
phockup.process_file("input/exif.jpg")
mocker.patch.object(Exif, 'data')
Exif.data.return_value = {
"MIMEType": "image/jpeg",
"CreateDate": "2017:01:01 01:01:01"
}
phockup.process_file("input/date_20170101_010101.jpg")
assert os.path.isfile("output/2017/01/01/20170101-010101-2.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_process_skip_xmp(mocker):
# Assume no errors == skip XMP file
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
phockup = Phockup('input', 'output')
phockup.process_file("skip.xmp")
def test_process_skip_ignored_file():
shutil.rmtree('output', ignore_errors=True)
shutil.rmtree('input_ignored', ignore_errors=True)
os.mkdir('input_ignored')
open("input_ignored/.DS_Store", "w").close()
Phockup('input_ignored', 'output')
assert not os.path.isfile("output/unknown/.DS_Store")
shutil.rmtree('output', ignore_errors=True)
shutil.rmtree('input_ignored', ignore_errors=True)
def test_keep_original_filenames(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output', original_filenames=True).process_file(
"input/exif.jpg")
assert os.path.isfile("output/2017/01/01/exif.jpg")
assert not os.path.isfile("output/2017/01/01/20170101-010101.jpg")
shutil.rmtree('output', ignore_errors=True)
def test_keep_original_filenames_and_filenames_case(mocker):
shutil.rmtree('output', ignore_errors=True)
mocker.patch.object(Phockup, 'check_directories')
mocker.patch.object(Phockup, 'walk_directory')
Phockup('input', 'output', original_filenames=True).process_file(
"input/UNKNOWN.jpg")
assert os.path.isfile("output/2017/10/06/UNKNOWN.jpg")
assert 'unknown.jpg' not in os.listdir("output/2017/10/06")
shutil.rmtree('output', ignore_errors=True)
def test_maxdepth_zero():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', maxdepth=0)
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
assert os.path.isdir(dir1)
assert os.path.isdir(dir2)
assert os.path.isdir(dir3)
assert len([name for name in os.listdir(dir1) if
os.path.isfile(os.path.join(dir1, name))]) == 3
assert len([name for name in os.listdir(dir2) if
os.path.isfile(os.path.join(dir2, name))]) == 1
assert len([name for name in os.listdir(dir3) if
os.path.isfile(os.path.join(dir3, name))]) == 1
shutil.rmtree('output', ignore_errors=True)
def test_maxdepth_one():
shutil.rmtree('output', ignore_errors=True)
Phockup('input', 'output', maxdepth=1)
dir1 = 'output/2017/01/01'
dir2 = 'output/2017/10/06'
dir3 = 'output/unknown'
dir4 = 'output/2018/01/01/'
assert os.path.isdir(dir1)
assert os.path.isdir(dir2)
assert os.path.isdir(dir3)
assert os.path.isdir(dir4)
assert len([name for name in os.listdir(dir1) if
os.path.isfile(os.path.join(dir1, name))]) == 3
assert len([name for name in os.listdir(dir2) if
os.path.isfile(os.path.join(dir2, name))]) == 1
assert len([name for name in os.listdir(dir3) if
os.path.isfile(os.path.join(dir3, name))]) == 1
assert len([name for name in os.listdir(dir4) if
os.path.isfile(os.path.join(dir4, name))]) == 1
shutil.rmtree('output', ignore_errors=True)
| tests/test_phockup.py | 14,491 | !/usr/bin/env python3 Assume no errors == skip XMP file | 55 | en | 0.52252 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Efficient ImageNet input pipeline using tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import resnet_preprocessing
class ImageNetInput(object):
"""Generates ImageNet input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
Args:
is_training: `bool` for whether the input is for training
data_dir: `str` for the directory of the training and validation data;
if 'null' (the literal string 'null', not None), then construct a null
pipeline, consisting of empty images.
batch_size: The global batch size to use during training or evaluation.
"""
def __init__(self, is_training, data_dir, batch_size=1024,
use_bfloat16=False):
self.image_preprocessing_fn = resnet_preprocessing.preprocess_image
self.is_training = is_training
self.data_dir = data_dir
if self.data_dir == 'null' or self.data_dir == '':
self.data_dir = None
self.batch_size = batch_size
self.use_bfloat16 = use_bfloat16
def dataset_parser(self, value):
"""Parse an ImageNet record from a serialized string Tensor."""
keys_to_features = {
'image/encoded':
tf.FixedLenFeature((), tf.string, ''),
'image/format':
tf.FixedLenFeature((), tf.string, 'jpeg'),
'image/class/label':
tf.FixedLenFeature([], tf.int64, -1),
'image/class/text':
tf.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.VarLenFeature(dtype=tf.int64),
}
parsed = tf.parse_single_example(value, keys_to_features)
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
image = self.image_preprocessing_fn(
image_bytes=image_bytes,
is_training=self.is_training,
use_bfloat16=self.use_bfloat16)
# Subtract one so that labels are in [0, 1000), and cast to float32 for
# Keras model.
label = tf.cast(tf.cast(
tf.reshape(parsed['image/class/label'], shape=[1]), dtype=tf.int32) - 1,
dtype=tf.float32)
return image, label
def input_fn(self):
"""Input function which provides a single batch for train or eval.
Returns:
A `tf.data.Dataset` object.
"""
if self.data_dir is None:
tf.logging.info('Using fake input.')
return self.input_fn_null()
# Shuffle the filenames to ensure better randomization.
file_pattern = os.path.join(
self.data_dir, 'train-*' if self.is_training else 'validation-*')
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=self.is_training)
if self.is_training:
dataset = dataset.repeat()
def fetch_dataset(filename):
buffer_size = 8 * 1024 * 1024 # 8 MiB per file
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
# Read the data from disk in parallel
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
fetch_dataset, cycle_length=16, sloppy=True))
dataset = dataset.shuffle(1024)
# Parse, pre-process, and batch the data in parallel
dataset = dataset.apply(
tf.contrib.data.map_and_batch(
self.dataset_parser, batch_size=self.batch_size,
num_parallel_batches=2,
drop_remainder=True))
# Prefetch overlaps in-feed with training
dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)
return dataset
def input_fn_null(self):
"""Input function which provides null (black) images."""
dataset = tf.data.Dataset.range(1).repeat().map(self._get_null_input)
dataset = dataset.prefetch(self.batch_size)
dataset = dataset.batch(self.batch_size, drop_remainder=True)
dataset = dataset.prefetch(32) # Prefetch overlaps in-feed with training
tf.logging.info('Input dataset: %s', str(dataset))
return dataset
def _get_null_input(self, _):
null_image = tf.zeros([224, 224, 3], tf.float32)
return null_image, tf.constant(0, tf.float32)
| models/experimental/distribution_strategy/imagenet_input_keras.py | 5,548 | Generates ImageNet input_fn for training or evaluation.
The training data is assumed to be in TFRecord format with keys as specified
in the dataset_parser below, sharded across 1024 files, named sequentially:
train-00000-of-01024
train-00001-of-01024
...
train-01023-of-01024
The validation data is in the same format but sharded in 128 files.
The format of the data required is created by the script at:
https://github.com/tensorflow/tpu/blob/master/tools/datasets/imagenet_to_gcs.py
Args:
is_training: `bool` for whether the input is for training
data_dir: `str` for the directory of the training and validation data;
if 'null' (the literal string 'null', not None), then construct a null
pipeline, consisting of empty images.
batch_size: The global batch size to use during training or evaluation.
Parse an ImageNet record from a serialized string Tensor.
Input function which provides a single batch for train or eval.
Returns:
A `tf.data.Dataset` object.
Input function which provides null (black) images.
Efficient ImageNet input pipeline using tf.data.Dataset.
Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Subtract one so that labels are in [0, 1000), and cast to float32 for Keras model. Shuffle the filenames to ensure better randomization. 8 MiB per file Read the data from disk in parallel Parse, pre-process, and batch the data in parallel Prefetch overlaps in-feed with training Prefetch overlaps in-feed with training | 2,094 | en | 0.819791 |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
from torch import Tensor, tensor
from torchmetrics.utilities.checks import _check_retrieval_functional_inputs
def retrieval_fall_out(preds: Tensor, target: Tensor, k: Optional[int] = None) -> Tensor:
"""Computes the Fall-out (for information retrieval), as explained in `IR Fall-out`_ Fall-out is the fraction
of non-relevant documents retrieved among all the non-relevant documents.
``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,
``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`,
otherwise an error is raised. If you want to measure Fall-out@K, ``k`` must be a positive integer.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document being relevant or not.
k: consider only the top k elements (default: `None`, which considers them all)
Returns:
a single-value tensor with the fall-out (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``.
Raises:
ValueError:
If ``k`` parameter is not `None` or an integer larger than 0
Example:
>>> from torchmetrics.functional import retrieval_fall_out
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> retrieval_fall_out(preds, target, k=2)
tensor(1.)
"""
preds, target = _check_retrieval_functional_inputs(preds, target)
k = preds.shape[-1] if k is None else k
if not (isinstance(k, int) and k > 0):
raise ValueError("`k` has to be a positive integer or None")
target = 1 - target
if not target.sum():
return tensor(0.0, device=preds.device)
relevant = target[torch.argsort(preds, dim=-1, descending=True)][:k].sum().float()
return relevant / target.sum()
| torchmetrics/functional/retrieval/fall_out.py | 2,528 | Computes the Fall-out (for information retrieval), as explained in `IR Fall-out`_ Fall-out is the fraction
of non-relevant documents retrieved among all the non-relevant documents.
``preds`` and ``target`` should be of the same shape and live on the same device. If no ``target`` is ``True``,
``0`` is returned. ``target`` must be either `bool` or `integers` and ``preds`` must be `float`,
otherwise an error is raised. If you want to measure Fall-out@K, ``k`` must be a positive integer.
Args:
preds: estimated probabilities of each document to be relevant.
target: ground truth about each document being relevant or not.
k: consider only the top k elements (default: `None`, which considers them all)
Returns:
a single-value tensor with the fall-out (at ``k``) of the predictions ``preds`` w.r.t. the labels ``target``.
Raises:
ValueError:
If ``k`` parameter is not `None` or an integer larger than 0
Example:
>>> from torchmetrics.functional import retrieval_fall_out
>>> preds = tensor([0.2, 0.3, 0.5])
>>> target = tensor([True, False, True])
>>> retrieval_fall_out(preds, target, k=2)
tensor(1.)
Copyright The PyTorch Lightning team. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1,717 | en | 0.772739 |
"""
Views for the web service
"""
import os
import json
import urllib.parse
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponseNotAllowed, HttpResponseBadRequest
from django.http import HttpResponse
from django.urls import reverse
import requests
from django.views.decorators.csrf import csrf_exempt
from webservice.github_util import parse_dependencies
from pkgpkr.settings import GITHUB_CLIENT_ID, GITHUB_CLIENT_SECRET, \
GITHUB_OATH_AUTH_PATH, GITHUB_OATH_ACCESS_TOKEN_PATH, JAVASCRIPT, PYTHON, SUPPORTED_LANGUAGES, \
DEFAULT_MAX_RECOMMENDATIONS
from . import github_util
from .recommender_service import RecommenderService
# Instantiate service class
RECOMMENDER_SERVICE = RecommenderService()
DEMO_REPO_INPUT_NAME = 'DEMO'
def index(request):
"""
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
"""
return render(request,
"webservice/index.html",
{'demo_input_repo_name': DEMO_REPO_INPUT_NAME,
'supported_languages': sorted([lang.capitalize() for lang in SUPPORTED_LANGUAGES.keys()])})
def about(request):
"""
Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
"""
return render(request, "webservice/about.html")
def login(request):
""" Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Create keys if not yet there!
if not request.session.get('github_token'):
request.session['github_token'] = None # To keep API token
request.session['github_info'] = None # To keep user infor (e.g. name, avatar url)
# For Selenium testing
if os.environ.get('SELENIUM_TEST') == '1':
assert os.environ.get('GH_TOKEN'), "GH_TOKEN not set"
request.session['github_token'] = os.environ.get('GH_TOKEN')
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
# Redirect to attempt Github Auth
return HttpResponseRedirect(GITHUB_OATH_AUTH_PATH)
def callback(request):
"""
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Get code supplied by github
code = request.GET.get('code')
# Payload to fetch
payload = {'client_id': GITHUB_CLIENT_ID,
'client_secret': GITHUB_CLIENT_SECRET,
'code': code}
headers = {"accept": "application/json"}
# Call github to get token
res = requests.post(GITHUB_OATH_ACCESS_TOKEN_PATH,
data=payload,
headers=headers)
# Set token
request.session['github_token'] = res.json()['access_token']
# Call for user info and store in sessions (to be used for UI)
request.session['github_info'] = github_util.get_user_info(request.session['github_token'])
return HttpResponseRedirect(reverse('index'))
def logout(request):
"""
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
"""
# Flush the session
request.session['github_token'] = None
request.session['github_info'] = None
return HttpResponseRedirect(reverse("index"))
def repositories(request):
"""
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
"""
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Get all repos
repos_per_language = github_util.get_repositories(request.session['github_token'])
combined_repos = dict()
for language, repos in repos_per_language.items():
for repo in repos:
# Skip if repo has no dependencies
if not repo['object']:
continue
# Updated Date
date_time = repo['updatedAt']
# Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16
date = date_time.split('T')[0]
repo['date'] = date
# Convert string to encoded URL e.g. hello/world -> hello%2world
repo['nameWithOwnerEscaped'] = urllib.parse.quote_plus(repo['nameWithOwner'])
repo['language'] = language
# Get dependencies if any, remember if at least some dependencies found
if parse_dependencies(repo['object']['text'], language, True):
combined_repos[repo['nameWithOwner']] = repo
return render(request, "webservice/repositories.html", {
'repos': combined_repos.values()
})
def recommendations(request, name):
"""
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
# Process for DEMO run
if request.method == 'POST':
language = request.POST.get('language')
language = language.lower()
dependencies = request.POST.get('dependencies')
dependencies = dependencies.strip(',')
if language not in SUPPORTED_LANGUAGES.keys():
return HttpResponse(f'Demo language {language} not supported', status=404)
request.session['dependencies'] = dependencies
request.session['language'] = language
branch_name = None
branch_names = None
# If GET it means it's not a DEMO POST call with manual dependencies inputs
else:
# Assure login
if not request.session.get('github_token'):
return HttpResponseRedirect(reverse("index"))
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get branch names and language (ONLY) for the repo, no need for dependencies yet
_, branch_names, language = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
return render(request, "webservice/recommendations.html", {
'repository_name': repo_name,
'recommendation_url': f"/recommendations/{urllib.parse.quote_plus(name)}?branch={branch_name}",
'branch_names': branch_names,
'current_branch': branch_name,
'language': language
})
def recommendations_json(request, name):
"""
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
"""
# Convert encoded URL back to string e.g. hello%2world -> hello/world
repo_name = urllib.parse.unquote_plus(name)
if name == DEMO_REPO_INPUT_NAME:
dependencies = github_util.parse_dependencies(request.session.get('dependencies'),
request.session.get('language'))
# Set to none (will also allow for not showing branch selector
branch_name = None
else:
if not request.session.get('github_token'):
return HttpResponse('Unauthorized', status=401)
# Fetch branch name out of HTTP GET Param
branch_name = request.GET.get('branch', default='master')
# Get dependencies for current repo, and branch names for the repo
dependencies, _, _ = github_util.get_dependencies(request.session['github_token'],
repo_name,
branch_name)
# Get predictions
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Setup data to be returned
data = {
'repository_name': repo_name,
'current_branch': branch_name,
'data': recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
@csrf_exempt
def recommendations_service_api(request):
"""
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
"""
if request.method == 'POST':
# Fetch JSON
try:
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
except json.JSONDecodeError:
return HttpResponseBadRequest("Could not parse JSON.")
# Fetch non-optional keys
try:
dependencies = json_data['dependencies']
language = json_data['language'].lower()
except KeyError:
return HttpResponseBadRequest('Required JSON keys: `dependencies`, `language`')
except AttributeError as e:
return HttpResponseBadRequest(f'Error casting language to lower(): {e}')
# Assure proper inputs
if not isinstance(dependencies, list) or not dependencies:
return HttpResponseBadRequest(f'{language.capitalize()} dependencies must be non-empty and of type LIST (i.e. [...]).')
# Convert comma separated dependencies into proper expected format
if language == PYTHON:
dependencies = '\n'.join(dependencies)
elif language == JAVASCRIPT:
# Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"'
formatted_dependencies_list = ['"' + dep.replace("@", '":"') + '"' for dep in dependencies]
dependencies = ','.join(formatted_dependencies_list)
else:
return HttpResponseBadRequest(f"Language not supported: [{language}].")
# Parse dependencies
dependencies = github_util.parse_dependencies(dependencies, language)
# Get recommendation all or cutoff if limit specified
if 'max_recommendations' in json_data:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies,
json_data['max_recommendations'])
else:
recommended_dependencies = RECOMMENDER_SERVICE.get_recommendations(dependencies)
# Convert the output tuples into list of dictionaries with names to return back
output_recommended_dependencies = []
for recommended_dependency in recommended_dependencies:
d = dict()
d['forPackage'] = recommended_dependency[0]
d['recommendedPackage'] = recommended_dependency[1]
d['url'] = recommended_dependency[2]
d['pkgpkrScore'] = recommended_dependency[3]
d['absoluteTrendScore'] = recommended_dependency[4]
d['relativeTrendScore'] = recommended_dependency[5]
d['boundedPopularityScore'] = recommended_dependency[6]
d['boundedSimilarityScore'] = recommended_dependency[7]
d['categories'] = recommended_dependency[8]
d['displayDate'] = recommended_dependency[9]
d['monthlyDownloadsLastMonth'] = recommended_dependency[10]
output_recommended_dependencies.append(d)
# Setup data to be returned
data = {
'language': language,
'recommended_dependencies': output_recommended_dependencies
}
return HttpResponse(json.dumps(data), content_type="application/json")
return HttpResponseNotAllowed(['POST'])
| webserver/pkgpkr/webservice/views.py | 11,933 | Return about info
arguments:
:request: GET HTTP request
returns:
Rendered about page
GitHub redirect here, then retrieves token for API
arguments:
:request: GET HTTP request
returns:
Redirects to index
Return landing page
arguments:
:request: GET HTTP request
returns:
Rendered home (index) page
Log user in using GitHub OAuth
arguments:
:request: GET HTTP request
returns:
Redirects to index
Logs user out but keep authorization ot OAuth GitHub
arguments:
:request: GET HTTP request
returns:
Redirects to index
Get recommended packages for the repo
arguments:
:request: GET/POST HTTP request
:name: repo name
returns:
Rendered recommendation page
Get recommended packages for the repo in JSON format
arguments:
:request: GET HTTP request
:name: repo name
returns:
JSON object with recommendations
Returns package recommendations for API POST call without authentication
arguments:
:request: POST request of application/json type
returns:
list of package recommendations
Get full list (up to 100) for the current user
arguments:
:request: GET HTTP request
returns:
Rendered repositories page
Views for the web service
Instantiate service class Create keys if not yet there! To keep API token To keep user infor (e.g. name, avatar url) For Selenium testing Redirect to attempt Github Auth Get code supplied by github Payload to fetch Call github to get token Set token Call for user info and store in sessions (to be used for UI) Flush the session Assure login Get all repos Skip if repo has no dependencies Updated Date Convert time format e.g. 2020-03-16T13:03:34Z -> 2020-03-16 Convert string to encoded URL e.g. hello/world -> hello%2world Get dependencies if any, remember if at least some dependencies found Convert encoded URL back to string e.g. hello%2world -> hello/world Process for DEMO run If GET it means it's not a DEMO POST call with manual dependencies inputs Assure login Fetch branch name out of HTTP GET Param Get branch names and language (ONLY) for the repo, no need for dependencies yet Convert encoded URL back to string e.g. hello%2world -> hello/world Set to none (will also allow for not showing branch selector Fetch branch name out of HTTP GET Param Get dependencies for current repo, and branch names for the repo Get predictions Setup data to be returned Fetch JSON request.raw_post_data w/ Django < 1.4 Fetch non-optional keys Assure proper inputs Convert comma separated dependencies into proper expected format Converts e.g ["lodash:4.17.15","react:16.13.1"] -> '"lodash":"4.17.15","react":"16.13.1"' Parse dependencies Get recommendation all or cutoff if limit specified Convert the output tuples into list of dictionaries with names to return back Setup data to be returned | 2,809 | en | 0.68716 |
# Alacritty config options
# Antonio Sarosi
# December 10, 2020
from typing import List, Dict, Any
from collections.abc import Mapping
from pathlib import Path
from sys import stderr
import yaml
import log
class ConfigError(Exception):
def __init__(self, message='Error applying configuration'):
super().__init__(message)
class Alacritty:
def __init__(self):
self.base_path = Path().home() / '.config' / 'alacritty'
if not self.base_path.exists():
raise ConfigError(f'Config directory not found: {self.base_path}')
self.config_file = self.base_path / 'alacritty.yml'
if not self.config_file.is_file():
log.warn('Config file not found')
self.config_file.touch()
print('Created config file =>', end=' ', file=stderr)
log.color_print(self.config_file, log.Color.BLUE, file=stderr)
self.config = self._load(self.config_file)
if self.config is None:
self.config = {}
log.warn('Alacritty config file was empty')
self.resources = {
'themes': {
'type': 'Themes directory',
'path': self.base_path / 'themes',
'exists': lambda: self.resources['themes']['path'].is_dir(),
'create': lambda: self.resources['themes']['path'].mkdir()
},
'fonts': {
'type': 'Fonts file',
'path': self.base_path / 'fonts.yaml',
'exists': lambda: self.resources['fonts']['path'].is_file(),
'create': lambda: self.resources['fonts']['path'].touch()
}
}
def _load(self, yaml_file: Path) -> Dict[str, Any]:
with open(yaml_file) as f:
try:
return yaml.load(f, Loader=yaml.FullLoader)
except yaml.YAMLError as e:
raise ConfigError((
'YAML error at parsing file "{0}", '
'at line {1.problem_mark.line}, '
'column {1.problem_mark.column}:\n'
'{1.problem} {1.context}'
).format(yaml_file.name, e))
def _resource_path(self, resource: str) -> Path:
if resource not in self.resources:
raise ConfigError(f'Path for resource "{resource}" not set')
resource = self.resources[resource]
if not resource['exists']():
log.warn(f'{resource["type"]} not found')
resource['create']()
print('Created resource =>', end=' ', file=stderr)
log.color_print(resource['path'], log.Color.BLUE, file=stderr)
return resource['path']
def save(self):
with open(self.config_file, 'w') as f:
yaml.dump(self.config, f)
def apply(self, **config):
if config is None or len(config) < 1:
raise ConfigError('No options provided')
actions = {
'theme': self.change_theme,
'font': self.change_font,
'size': self.change_font_size,
'opacity': self.change_opacity,
'padding': self.change_padding,
'offset': self.change_font_offset,
'list': self.list,
'print': self.print,
}
errors_found = 0
for param, action in actions.items():
if param in config:
try:
action(config[param])
except ConfigError as e:
log.err(e)
errors_found += 1
if errors_found > 0:
raise ConfigError(f'\n{errors_found} error(s) found')
def change_theme(self, theme: str):
themes_directory = self._resource_path('themes')
theme_file = themes_directory / f'{theme}.yaml'
if not theme_file.is_file():
raise ConfigError(f'Theme "{theme}" not found')
theme_yaml = self._load(theme_file)
if theme_yaml is None:
raise ConfigError(f'File {theme_file.name} is empty')
if 'colors' not in theme_yaml:
raise ConfigError(f'{theme_file} does not contain color config')
expected_colors = [
'black',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
]
expected_props = {
'primary': ['background', 'foreground'],
'normal': expected_colors,
'bright': expected_colors,
}
for k in expected_props:
if k not in theme_yaml['colors']:
log.warn(f'Missing "colors:{k}" for theme "{theme}"')
continue
for v in expected_props[k]:
if v not in theme_yaml['colors'][k]:
log.warn(f'Missing "colors:{k}:{v}" for theme "{theme}"')
self.config['colors'] = theme_yaml['colors']
log.ok(f'Theme {theme} applied')
def change_font_size(self, size: float):
if size <= 0:
raise ConfigError('Font size cannot be negative or zero')
if 'font' not in self.config:
self.config['font'] = {}
log.warn('"font" prop config was not present in alacritty.yml')
self.config['font']['size'] = size
log.ok(f'Font size set to {size:.1f}')
def change_font(self, font: str):
if 'font' not in self.config:
self.config['font'] = {}
log.warn('"font" prop was not present in alacritty.yml')
fonts_file = self._resource_path('fonts')
fonts = self._load(fonts_file)
if fonts is None:
raise ConfigError(f'File "{fonts_file}" is empty')
if 'fonts' not in fonts:
raise ConfigError(f'No font config found in "{fonts_file}"')
fonts = fonts['fonts']
if font not in fonts:
raise ConfigError(f'Config for font "{font}" not found')
font_types = ['normal', 'bold', 'italic']
if isinstance(fonts[font], str):
font_name = fonts[font]
fonts[font] = {}
for t in font_types:
fonts[font][t] = font_name
if not isinstance(fonts[font], Mapping):
raise ConfigError(f'Font "{font}" has wrong format')
for t in font_types:
if t not in fonts[font]:
raise ConfigError(f'Font "{font}" does not have "{t}" property')
if t not in self.config['font']:
self.config['font'][t] = {'family': 'tmp'}
self.config['font'][t]['family'] = fonts[font][t]
log.ok(f'Font {font} applied')
def change_opacity(self, opacity: float):
if opacity < 0.0 or opacity > 1.0:
raise ConfigError('Opacity should be between 0.0 and 1.0')
self.config['background_opacity'] = opacity
log.ok(f'Opacity set to {opacity:.2f}')
def change_padding(self, padding: List[int]):
if len(padding) != 2:
raise ConfigError('Padding should only have an x and y value')
x, y = padding
if 'window' not in self.config:
self.config['window'] = {}
log.warn('"window" prop was not present in config file')
if 'padding' not in self.config['window']:
self.config['window']['padding'] = {}
log.warn('"padding" prop was not present in config file')
self.config['window']['padding']['x'] = x
self.config['window']['padding']['y'] = y
log.ok(f'Padding set to x: {x}, y: {y}')
def change_font_offset(self, offset: List[int]):
if len(offset) != 2:
raise ConfigError('Wrong offset config, should be [x, y]')
x, y = offset
if 'font' not in self.config:
self.config['font'] = {}
if 'offset' not in self.config['font']:
log.warn('"offset" prop was not set')
self.config['font']['offset'] = {}
self.config['font']['offset']['x'] = x
self.config['font']['offset']['y'] = y
log.ok(f'Offset set to x: {x}, y: {y}')
def list(self, to_be_listed: str):
def list_themes():
themes_dir = self._resource_path('themes')
themes = [file.name.split('.')[0] for file in themes_dir.iterdir()]
if len(themes) < 1:
log.warn('Cannot list themes, themes directory is empty')
else:
log.color_print('Themes:', log.Color.BOLD)
for theme in themes:
log.color_print(f' {theme}', log.Color.BLUE)
def list_fonts():
fonts = self._load(self._resource_path('fonts'))
if fonts is None or 'fonts' not in fonts:
log.warn('Cannot list fonts, no fonts found')
else:
log.color_print('Fonts:', log.Color.BOLD)
for font in fonts['fonts']:
log.color_print(f' {font}', log.Color.PURPLE)
options = {
'themes': list_themes,
'fonts': list_fonts,
}
if to_be_listed == 'all':
for _, list_function in options.items():
list_function()
else:
if to_be_listed not in options:
raise ConfigError(f'Cannot list {to_be_listed}, unknown option')
options[to_be_listed]()
def print(self, to_be_printed: List[str]):
def print_config():
log.color_print(self.config_file, log.Color.BOLD)
print(yaml.dump(self.config))
def print_fonts():
fonts_file = self._resource_path('fonts')
log.color_print(fonts_file, log.Color.BOLD)
print(yaml.dump(self._load(fonts_file)))
def print_theme(theme: str):
themes_dir = self._resource_path('themes')
theme_file = themes_dir / f'{theme}.yaml'
if not theme_file.is_file():
raise ConfigError(
f'Failed printing "{theme}" theme, "{theme_file}" not found'
)
log.color_print(theme_file, log.Color.BOLD)
print(yaml.dump(self._load(theme_file)))
options = {
'fonts': print_fonts,
'config': print_config,
}
if len(to_be_printed) == 0:
to_be_printed.append('config')
for param in to_be_printed:
if param not in options:
print_theme(param)
else:
options[param]()
| src/alacritty.py | 10,535 | Alacritty config options Antonio Sarosi December 10, 2020 | 57 | en | 0.193457 |
#!/usr/bin/python
"""
This script runs a convergence study for solid elements
"""
#from subprocess import call
import os
import pylab
import numpy
import re
# Calculix solid element types with their cgx counterparts.
#~ eltyps={"C3D8":"he8",
#~ "C3D4":"te4",
#~ "C3D10":"te10"}
eltyps={"C3D8":"he8",
"C3D8R":"he8r",
"C3D8I":"he8i",
"C3D20":"he20",
"C3D20R":"he20r",
"C3D4":"te4",
"C3D10":"te10"}
#elsizes=[500,250,100,50,25,10,5]
elsizes=[100,50,25,10,5]
# read the template fbd file
f = open("solid.fbd","r")
lines=f.readlines()
f.close()
# loop over element types
for elty in eltyps.keys():
# open results summary file
fdata=open(elty+".txt","w")
fdata.write("# size NoN smax umax\n")
# loop over element sizes
for elsize in elsizes:
print elty, elsize
# modify solid.fbd and write output to solid-auto.fbd
fout = open("solid_auto.fbd", "w")
for line in lines:
# set element type
if line.startswith("valu Etyp"):
line="valu Etyp "+eltyps[elty]+"\n"
# set element size
if line.startswith("div all auto"):
line="div all auto "+str(elsize)+"\n"
if elty.startswith("C3D8") or elty.startswith("C3D4"):
# increase the node distance for linear elements
line=line+"div all div 2\n"
elsize=elsize*2
fout.write("ulin "+elty+" "+str(elsize)+"\n")
fout.write(line)
fout.write("quit\n")
fout.close()
# run solid_auto.fbd (preprocessing, solving and postprocessing)
os.system("cgx -b solid_auto.fbd")
# get number of nodes from solid.frd
f=open("solid.frd")
for line in f:
if line.startswith(" 2C"):
nnode=int(line.split()[1])
f.close()
print "Knotenzahl ", nnode
# get smax from smax.txt
smax=numpy.genfromtxt("smax.txt")[3]
# get umax from umax.txt
smin=numpy.genfromtxt("smin.txt")[3]
# get umax from umax.txt
umax=numpy.genfromtxt("umax.txt")[3]
# rename the stress plot
os.system("mv hcpy_1.png "+"solid_"+elty+"_"+str(elsize)+"_S.png")
# write the values to the data file
fdata.write(str(elsize)+" "+str(nnode)+" "+str((smax-smin)/2.)+" "+str(abs(umax))+"\n")
fdata.close()
| Elements/Solid/solid-conv.py | 2,428 | !/usr/bin/pythonfrom subprocess import call Calculix solid element types with their cgx counterparts.~ eltyps={"C3D8":"he8",~ "C3D4":"te4",~ "C3D10":"te10"}elsizes=[500,250,100,50,25,10,5] read the template fbd file loop over element types open results summary file loop over element sizes modify solid.fbd and write output to solid-auto.fbd set element type set element size increase the node distance for linear elements run solid_auto.fbd (preprocessing, solving and postprocessing) get number of nodes from solid.frd get smax from smax.txt get umax from umax.txt get umax from umax.txt rename the stress plot write the values to the data file | 646 | en | 0.705268 |
from textattack.shared.utils import default_class_repr
from textattack.constraints.pre_transformation import PreTransformationConstraint
from textattack.shared.validators import transformation_consists_of_word_swaps
import nltk
class StopwordModification(PreTransformationConstraint):
"""
A constraint disallowing the modification of stopwords
"""
def __init__(self, stopwords=None):
if stopwords is not None:
self.stopwords = set(stopwords)
else:
self.stopwords = set(nltk.corpus.stopwords.words('english'))
def _get_modifiable_indices(self, current_text):
"""
Returns the word indices in ``current_text`` which are able to be modified.
"""
non_stopword_indices = set()
for i, word in enumerate(current_text.words):
if word not in self.stopwords:
non_stopword_indices.add(i)
return non_stopword_indices
def check_compatibility(self, transformation):
"""
The stopword constraint only is concerned with word swaps since paraphrasing phrases
containing stopwords is OK.
Args:
transformation: The ``Transformation`` to check compatibility with.
"""
return transformation_consists_of_word_swaps(transformation)
| textattack/constraints/pre_transformation/stopword_modification.py | 1,317 | A constraint disallowing the modification of stopwords
Returns the word indices in ``current_text`` which are able to be modified.
The stopword constraint only is concerned with word swaps since paraphrasing phrases
containing stopwords is OK.
Args:
transformation: The ``Transformation`` to check compatibility with. | 322 | en | 0.862794 |
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
from eight import *
from bw2data import mapping, Database, databases
from ..units import normalize_units as normalize_units_function
from ..errors import StrategyError
from ..utils import activity_hash, DEFAULT_FIELDS
from copy import deepcopy
import numbers
import numpy as np
import pprint
def format_nonunique_key_error(obj, fields, others):
template = """Object in source database can't be uniquely linked to target database.\nProblematic dataset is:\n{ds}\nPossible targets include (at least one not shown):\n{targets}"""
fields_to_print = list(fields or DEFAULT_FIELDS) + ['filename']
_ = lambda x: {field: x.get(field, "(missing)") for field in fields_to_print}
return template.format(
ds=pprint.pformat(_(obj)),
targets=pprint.pformat([_(x) for x in others])
)
def link_iterable_by_fields(unlinked, other=None, fields=None, kind=None,
internal=False, relink=False):
"""Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``."""
if kind:
kind = {kind} if isinstance(kind, str) else kind
if relink:
filter_func = lambda x: x.get('type') in kind
else:
filter_func = lambda x: x.get('type') in kind and not x.get('input')
else:
if relink:
filter_func = lambda x: True
else:
filter_func = lambda x: not x.get('input')
if internal:
other = unlinked
duplicates, candidates = {}, {}
try:
# Other can be a generator, so a bit convoluted
for ds in other:
key = activity_hash(ds, fields)
if key in candidates:
duplicates.setdefault(key, []).append(ds)
else:
candidates[key] = (ds['database'], ds['code'])
except KeyError:
raise StrategyError("Not all datasets in database to be linked have "
"``database`` or ``code`` attributes")
for container in unlinked:
for obj in filter(filter_func, container.get('exchanges', [])):
key = activity_hash(obj, fields)
if key in duplicates:
raise StrategyError(format_nonunique_key_error(obj, fields, duplicates[key]))
elif key in candidates:
obj['input'] = candidates[key]
return unlinked
def assign_only_product_as_production(db):
"""Assign only product as reference product.
Skips datasets that already have a reference product or no production exchanges. Production exchanges must have a ``name`` and an amount.
Will replace the following activity fields, if not already specified:
* 'name' - name of reference product
* 'unit' - unit of reference product
* 'production amount' - amount of reference product
"""
for ds in db:
if ds.get("reference product"):
continue
products = [x for x in ds.get('exchanges', []) if x.get('type') == 'production']
if len(products) == 1:
product = products[0]
assert product['name']
ds['reference product'] = product['name']
ds['production amount'] = product['amount']
ds['name'] = ds.get('name') or product['name']
ds['unit'] = ds.get('unit') or product.get('unit') or 'Unknown'
return db
def link_technosphere_by_activity_hash(db, external_db_name=None, fields=None):
"""Link technosphere exchanges using ``activity_hash`` function.
If ``external_db_name``, link against a different database; otherwise link internally.
If ``fields``, link using only certain fields."""
TECHNOSPHERE_TYPES = {"technosphere", "substitution", "production"}
if external_db_name is not None:
if external_db_name not in databases:
raise StrategyError("Can't find external database {}".format(
external_db_name))
other = (obj for obj in Database(external_db_name)
if obj.get('type', 'process') == 'process')
internal = False
else:
other = None
internal = True
return link_iterable_by_fields(db, other, internal=internal, kind=TECHNOSPHERE_TYPES, fields=fields)
def set_code_by_activity_hash(db, overwrite=False):
"""Use ``activity_hash`` to set dataset code.
By default, won't overwrite existing codes, but will if ``overwrite`` is ``True``."""
for ds in db:
if 'code' not in ds or overwrite:
ds['code'] = activity_hash(ds)
return db
def tupleize_categories(db):
for ds in db:
if ds.get('categories'):
ds['categories'] = tuple(ds['categories'])
for exc in ds.get('exchanges', []):
if exc.get('categories'):
exc['categories'] = tuple(exc['categories'])
return db
def drop_unlinked(db):
"""This is the nuclear option - use at your own risk!"""
for ds in db:
ds['exchanges'] = [obj for obj in ds['exchanges'] if obj.get('input')]
return db
def normalize_units(db):
"""Normalize units in datasets and their exchanges"""
for ds in db:
if 'unit' in ds:
ds['unit'] = normalize_units_function(ds['unit'])
for exc in ds.get('exchanges', []):
if 'unit' in exc:
exc['unit'] = normalize_units_function(exc['unit'])
for param in ds.get('parameters', {}).values():
if 'unit' in param:
param['unit'] = normalize_units_function(param['unit'])
return db
def add_database_name(db, name):
"""Add database name to datasets"""
for ds in db:
ds['database'] = name
return db
def convert_uncertainty_types_to_integers(db):
"""Generic number conversion function convert to floats. Return to integers."""
for ds in db:
for exc in ds['exchanges']:
try:
exc['uncertainty type'] = int(exc['uncertainty type'])
except:
pass
return db
def drop_falsey_uncertainty_fields_but_keep_zeros(db):
"""Drop fields like '' but keep zero and NaN.
Note that this doesn't strip `False`, which behaves *exactly* like 0.
"""
uncertainty_fields = [
'minimum',
'maximum',
'scale',
'shape',
'loc',
]
def drop_if_appropriate(exc):
for field in uncertainty_fields:
if field not in exc or exc[field] == 0:
continue
elif isinstance(exc[field], numbers.Number) and np.isnan(exc[field]):
continue
elif not exc[field]:
del exc[field]
for ds in db:
for exc in ds['exchanges']:
drop_if_appropriate(exc)
return db
def convert_activity_parameters_to_list(data):
"""Convert activity parameters from dictionary to list of dictionaries"""
def _(key, value):
dct = deepcopy(value)
dct['name'] = key
return dct
for ds in data:
if 'parameters' in ds:
ds['parameters'] = [_(x, y) for x, y in ds['parameters'].items()]
return data
| bw2io/strategies/generic.py | 7,612 | Add database name to datasets
Assign only product as reference product.
Skips datasets that already have a reference product or no production exchanges. Production exchanges must have a ``name`` and an amount.
Will replace the following activity fields, if not already specified:
* 'name' - name of reference product
* 'unit' - unit of reference product
* 'production amount' - amount of reference product
Convert activity parameters from dictionary to list of dictionaries
Generic number conversion function convert to floats. Return to integers.
Drop fields like '' but keep zero and NaN.
Note that this doesn't strip `False`, which behaves *exactly* like 0.
This is the nuclear option - use at your own risk!
Generic function to link objects in ``unlinked`` to objects in ``other`` using fields ``fields``.
The database to be linked must have uniqueness for each object for the given ``fields``.
If ``kind``, limit objects in ``unlinked`` of type ``kind``.
If ``relink``, link to objects which already have an ``input``. Otherwise, skip already linked objects.
If ``internal``, linked ``unlinked`` to other objects in ``unlinked``. Each object must have the attributes ``database`` and ``code``.
Link technosphere exchanges using ``activity_hash`` function.
If ``external_db_name``, link against a different database; otherwise link internally.
If ``fields``, link using only certain fields.
Normalize units in datasets and their exchanges
Use ``activity_hash`` to set dataset code.
By default, won't overwrite existing codes, but will if ``overwrite`` is ``True``.
-*- coding: utf-8 -*- Other can be a generator, so a bit convoluted | 1,649 | en | 0.772076 |
# Generated by h2py from \mssdk\include\winnt.h
APPLICATION_ERROR_MASK = 536870912
ERROR_SEVERITY_SUCCESS = 0
ERROR_SEVERITY_INFORMATIONAL = 1073741824
ERROR_SEVERITY_WARNING = -2147483648
ERROR_SEVERITY_ERROR = -1073741824
MINCHAR = 128
MAXCHAR = 127
MINSHORT = 32768
MAXSHORT = 32767
MINLONG = -2147483648
MAXLONG = 2147483647
MAXBYTE = 255
MAXWORD = 65535
MAXDWORD = -1
LANG_NEUTRAL = 0
LANG_AFRIKAANS = 54
LANG_ALBANIAN = 28
LANG_ARABIC = 1
LANG_BASQUE = 45
LANG_BELARUSIAN = 35
LANG_BULGARIAN = 2
LANG_CATALAN = 3
LANG_CHINESE = 4
LANG_CROATIAN = 26
LANG_CZECH = 5
LANG_DANISH = 6
LANG_DUTCH = 19
LANG_ENGLISH = 9
LANG_ESTONIAN = 37
LANG_FAEROESE = 56
LANG_FARSI = 41
LANG_FINNISH = 11
LANG_FRENCH = 12
LANG_GERMAN = 7
LANG_GREEK = 8
LANG_HEBREW = 13
LANG_HINDI = 57
LANG_HUNGARIAN = 14
LANG_ICELANDIC = 15
LANG_INDONESIAN = 33
LANG_ITALIAN = 16
LANG_JAPANESE = 17
LANG_KOREAN = 18
LANG_LATVIAN = 38
LANG_LITHUANIAN = 39
LANG_MACEDONIAN = 47
LANG_MALAY = 62
LANG_NORWEGIAN = 20
LANG_POLISH = 21
LANG_PORTUGUESE = 22
LANG_ROMANIAN = 24
LANG_RUSSIAN = 25
LANG_SERBIAN = 26
LANG_SLOVAK = 27
LANG_SLOVENIAN = 36
LANG_SPANISH = 10
LANG_SWAHILI = 65
LANG_SWEDISH = 29
LANG_THAI = 30
LANG_TURKISH = 31
LANG_UKRAINIAN = 34
LANG_VIETNAMESE = 42
SUBLANG_NEUTRAL = 0
SUBLANG_DEFAULT = 1
SUBLANG_SYS_DEFAULT = 2
SUBLANG_ARABIC_SAUDI_ARABIA = 1
SUBLANG_ARABIC_IRAQ = 2
SUBLANG_ARABIC_EGYPT = 3
SUBLANG_ARABIC_LIBYA = 4
SUBLANG_ARABIC_ALGERIA = 5
SUBLANG_ARABIC_MOROCCO = 6
SUBLANG_ARABIC_TUNISIA = 7
SUBLANG_ARABIC_OMAN = 8
SUBLANG_ARABIC_YEMEN = 9
SUBLANG_ARABIC_SYRIA = 10
SUBLANG_ARABIC_JORDAN = 11
SUBLANG_ARABIC_LEBANON = 12
SUBLANG_ARABIC_KUWAIT = 13
SUBLANG_ARABIC_UAE = 14
SUBLANG_ARABIC_BAHRAIN = 15
SUBLANG_ARABIC_QATAR = 16
SUBLANG_CHINESE_TRADITIONAL = 1
SUBLANG_CHINESE_SIMPLIFIED = 2
SUBLANG_CHINESE_HONGKONG = 3
SUBLANG_CHINESE_SINGAPORE = 4
SUBLANG_CHINESE_MACAU = 5
SUBLANG_DUTCH = 1
SUBLANG_DUTCH_BELGIAN = 2
SUBLANG_ENGLISH_US = 1
SUBLANG_ENGLISH_UK = 2
SUBLANG_ENGLISH_AUS = 3
SUBLANG_ENGLISH_CAN = 4
SUBLANG_ENGLISH_NZ = 5
SUBLANG_ENGLISH_EIRE = 6
SUBLANG_ENGLISH_SOUTH_AFRICA = 7
SUBLANG_ENGLISH_JAMAICA = 8
SUBLANG_ENGLISH_CARIBBEAN = 9
SUBLANG_ENGLISH_BELIZE = 10
SUBLANG_ENGLISH_TRINIDAD = 11
SUBLANG_ENGLISH_ZIMBABWE = 12
SUBLANG_ENGLISH_PHILIPPINES = 13
SUBLANG_FRENCH = 1
SUBLANG_FRENCH_BELGIAN = 2
SUBLANG_FRENCH_CANADIAN = 3
SUBLANG_FRENCH_SWISS = 4
SUBLANG_FRENCH_LUXEMBOURG = 5
SUBLANG_FRENCH_MONACO = 6
SUBLANG_GERMAN = 1
SUBLANG_GERMAN_SWISS = 2
SUBLANG_GERMAN_AUSTRIAN = 3
SUBLANG_GERMAN_LUXEMBOURG = 4
SUBLANG_GERMAN_LIECHTENSTEIN = 5
SUBLANG_ITALIAN = 1
SUBLANG_ITALIAN_SWISS = 2
SUBLANG_KOREAN = 1
SUBLANG_KOREAN_JOHAB = 2
SUBLANG_LITHUANIAN = 1
SUBLANG_LITHUANIAN_CLASSIC = 2
SUBLANG_MALAY_MALAYSIA = 1
SUBLANG_MALAY_BRUNEI_DARUSSALAM = 2
SUBLANG_NORWEGIAN_BOKMAL = 1
SUBLANG_NORWEGIAN_NYNORSK = 2
SUBLANG_PORTUGUESE = 2
SUBLANG_PORTUGUESE_BRAZILIAN = 1
SUBLANG_SERBIAN_LATIN = 2
SUBLANG_SERBIAN_CYRILLIC = 3
SUBLANG_SPANISH = 1
SUBLANG_SPANISH_MEXICAN = 2
SUBLANG_SPANISH_MODERN = 3
SUBLANG_SPANISH_GUATEMALA = 4
SUBLANG_SPANISH_COSTA_RICA = 5
SUBLANG_SPANISH_PANAMA = 6
SUBLANG_SPANISH_DOMINICAN_REPUBLIC = 7
SUBLANG_SPANISH_VENEZUELA = 8
SUBLANG_SPANISH_COLOMBIA = 9
SUBLANG_SPANISH_PERU = 10
SUBLANG_SPANISH_ARGENTINA = 11
SUBLANG_SPANISH_ECUADOR = 12
SUBLANG_SPANISH_CHILE = 13
SUBLANG_SPANISH_URUGUAY = 14
SUBLANG_SPANISH_PARAGUAY = 15
SUBLANG_SPANISH_BOLIVIA = 16
SUBLANG_SPANISH_EL_SALVADOR = 17
SUBLANG_SPANISH_HONDURAS = 18
SUBLANG_SPANISH_NICARAGUA = 19
SUBLANG_SPANISH_PUERTO_RICO = 20
SUBLANG_SWEDISH = 1
SUBLANG_SWEDISH_FINLAND = 2
SORT_DEFAULT = 0
SORT_JAPANESE_XJIS = 0
SORT_JAPANESE_UNICODE = 1
SORT_CHINESE_BIG5 = 0
SORT_CHINESE_PRCP = 0
SORT_CHINESE_UNICODE = 1
SORT_CHINESE_PRC = 2
SORT_KOREAN_KSC = 0
SORT_KOREAN_UNICODE = 1
SORT_GERMAN_PHONE_BOOK = 1
def PRIMARYLANGID(lgid): return ((WORD)(lgid) & 1023)
def SUBLANGID(lgid): return ((WORD)(lgid) >> 10)
NLS_VALID_LOCALE_MASK = 1048575
def LANGIDFROMLCID(lcid): return ((WORD)(lcid))
def SORTIDFROMLCID(lcid): return (
(WORD)((((DWORD)(lcid)) & NLS_VALID_LOCALE_MASK) >> 16))
def UNREFERENCED_PARAMETER(P): return (P)
def DBG_UNREFERENCED_PARAMETER(P): return (P)
def DBG_UNREFERENCED_LOCAL_VARIABLE(V): return (V)
def UNREFERENCED_PARAMETER(P): return \
def DBG_UNREFERENCED_PARAMETER(P): return \
def DBG_UNREFERENCED_LOCAL_VARIABLE(V): return \
MAXIMUM_WAIT_OBJECTS = 64
MAXIMUM_SUSPEND_COUNT = MAXCHAR
EXCEPTION_NONCONTINUABLE = 1
EXCEPTION_MAXIMUM_PARAMETERS = 15
PROCESS_TERMINATE = (1)
PROCESS_CREATE_THREAD = (2)
PROCESS_VM_OPERATION = (8)
PROCESS_VM_READ = (16)
PROCESS_VM_WRITE = (32)
PROCESS_DUP_HANDLE = (64)
PROCESS_CREATE_PROCESS = (128)
PROCESS_SET_QUOTA = (256)
PROCESS_SET_INFORMATION = (512)
PROCESS_QUERY_INFORMATION = (1024)
MAXIMUM_PROCESSORS = 32
THREAD_TERMINATE = (1)
THREAD_SUSPEND_RESUME = (2)
THREAD_GET_CONTEXT = (8)
THREAD_SET_CONTEXT = (16)
THREAD_SET_INFORMATION = (32)
THREAD_QUERY_INFORMATION = (64)
THREAD_SET_THREAD_TOKEN = (128)
THREAD_IMPERSONATE = (256)
THREAD_DIRECT_IMPERSONATION = (512)
JOB_OBJECT_ASSIGN_PROCESS = (1)
JOB_OBJECT_SET_ATTRIBUTES = (2)
JOB_OBJECT_QUERY = (4)
JOB_OBJECT_TERMINATE = (8)
TLS_MINIMUM_AVAILABLE = 64
THREAD_BASE_PRIORITY_LOWRT = 15
THREAD_BASE_PRIORITY_MAX = 2
THREAD_BASE_PRIORITY_MIN = -2
THREAD_BASE_PRIORITY_IDLE = -15
JOB_OBJECT_LIMIT_WORKINGSET = 1
JOB_OBJECT_LIMIT_PROCESS_TIME = 2
JOB_OBJECT_LIMIT_JOB_TIME = 4
JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 8
JOB_OBJECT_LIMIT_AFFINITY = 16
JOB_OBJECT_LIMIT_PRIORITY_CLASS = 32
JOB_OBJECT_LIMIT_VALID_FLAGS = 63
EVENT_MODIFY_STATE = 2
MUTANT_QUERY_STATE = 1
SEMAPHORE_MODIFY_STATE = 2
TIME_ZONE_ID_UNKNOWN = 0
TIME_ZONE_ID_STANDARD = 1
TIME_ZONE_ID_DAYLIGHT = 2
PROCESSOR_INTEL_386 = 386
PROCESSOR_INTEL_486 = 486
PROCESSOR_INTEL_PENTIUM = 586
PROCESSOR_MIPS_R4000 = 4000
PROCESSOR_ALPHA_21064 = 21064
PROCESSOR_HITACHI_SH3 = 10003
PROCESSOR_HITACHI_SH3E = 10004
PROCESSOR_HITACHI_SH4 = 10005
PROCESSOR_MOTOROLA_821 = 821
PROCESSOR_ARM_7TDMI = 70001
PROCESSOR_ARCHITECTURE_INTEL = 0
PROCESSOR_ARCHITECTURE_MIPS = 1
PROCESSOR_ARCHITECTURE_ALPHA = 2
PROCESSOR_ARCHITECTURE_PPC = 3
PROCESSOR_ARCHITECTURE_SH = 4
PROCESSOR_ARCHITECTURE_ARM = 5
PROCESSOR_ARCHITECTURE_IA64 = 6
PROCESSOR_ARCHITECTURE_ALPHA64 = 7
PROCESSOR_ARCHITECTURE_MSIL = 8
PROCESSOR_ARCHITECTURE_AMD64 = 9
PROCESSOR_ARCHITECTURE_IA32_ON_WIN64 = 10
PROCESSOR_ARCHITECTURE_UNKNOWN = 65535
PF_FLOATING_POINT_PRECISION_ERRATA = 0
PF_FLOATING_POINT_EMULATED = 1
PF_COMPARE_EXCHANGE_DOUBLE = 2
PF_MMX_INSTRUCTIONS_AVAILABLE = 3
PF_PPC_MOVEMEM_64BIT_OK = 4
PF_ALPHA_BYTE_INSTRUCTIONS = 5
SECTION_QUERY = 1
SECTION_MAP_WRITE = 2
SECTION_MAP_READ = 4
SECTION_MAP_EXECUTE = 8
SECTION_EXTEND_SIZE = 16
PAGE_NOACCESS = 1
PAGE_READONLY = 2
PAGE_READWRITE = 4
PAGE_WRITECOPY = 8
PAGE_EXECUTE = 16
PAGE_EXECUTE_READ = 32
PAGE_EXECUTE_READWRITE = 64
PAGE_EXECUTE_WRITECOPY = 128
PAGE_GUARD = 256
PAGE_NOCACHE = 512
MEM_COMMIT = 4096
MEM_RESERVE = 8192
MEM_DECOMMIT = 16384
MEM_RELEASE = 32768
MEM_FREE = 65536
MEM_PRIVATE = 131072
MEM_MAPPED = 262144
MEM_RESET = 524288
MEM_TOP_DOWN = 1048576
MEM_4MB_PAGES = -2147483648
SEC_FILE = 8388608
SEC_IMAGE = 16777216
SEC_VLM = 33554432
SEC_RESERVE = 67108864
SEC_COMMIT = 134217728
SEC_NOCACHE = 268435456
MEM_IMAGE = SEC_IMAGE
FILE_READ_DATA = (1)
FILE_LIST_DIRECTORY = (1)
FILE_WRITE_DATA = (2)
FILE_ADD_FILE = (2)
FILE_APPEND_DATA = (4)
FILE_ADD_SUBDIRECTORY = (4)
FILE_CREATE_PIPE_INSTANCE = (4)
FILE_READ_EA = (8)
FILE_WRITE_EA = (16)
FILE_EXECUTE = (32)
FILE_TRAVERSE = (32)
FILE_DELETE_CHILD = (64)
FILE_READ_ATTRIBUTES = (128)
FILE_WRITE_ATTRIBUTES = (256)
FILE_SHARE_READ = 1
FILE_SHARE_WRITE = 2
FILE_SHARE_DELETE = 4
FILE_ATTRIBUTE_READONLY = 1
FILE_ATTRIBUTE_HIDDEN = 2
FILE_ATTRIBUTE_SYSTEM = 4
FILE_ATTRIBUTE_DIRECTORY = 16
FILE_ATTRIBUTE_ARCHIVE = 32
FILE_ATTRIBUTE_DEVICE = 64
FILE_ATTRIBUTE_NORMAL = 128
FILE_ATTRIBUTE_TEMPORARY = 256
FILE_ATTRIBUTE_SPARSE_FILE = 512
FILE_ATTRIBUTE_REPARSE_POINT = 1024
FILE_ATTRIBUTE_COMPRESSED = 2048
FILE_ATTRIBUTE_OFFLINE = 4096
FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 8192
FILE_ATTRIBUTE_ENCRYPTED = 16384
FILE_ATTRIBUTE_VIRTUAL = 65536
FILE_NOTIFY_CHANGE_FILE_NAME = 1
FILE_NOTIFY_CHANGE_DIR_NAME = 2
FILE_NOTIFY_CHANGE_ATTRIBUTES = 4
FILE_NOTIFY_CHANGE_SIZE = 8
FILE_NOTIFY_CHANGE_LAST_WRITE = 16
FILE_NOTIFY_CHANGE_LAST_ACCESS = 32
FILE_NOTIFY_CHANGE_CREATION = 64
FILE_NOTIFY_CHANGE_SECURITY = 256
FILE_ACTION_ADDED = 1
FILE_ACTION_REMOVED = 2
FILE_ACTION_MODIFIED = 3
FILE_ACTION_RENAMED_OLD_NAME = 4
FILE_ACTION_RENAMED_NEW_NAME = 5
FILE_CASE_SENSITIVE_SEARCH = 1
FILE_CASE_PRESERVED_NAMES = 2
FILE_UNICODE_ON_DISK = 4
FILE_PERSISTENT_ACLS = 8
FILE_FILE_COMPRESSION = 16
FILE_VOLUME_QUOTAS = 32
FILE_SUPPORTS_SPARSE_FILES = 64
FILE_SUPPORTS_REPARSE_POINTS = 128
FILE_SUPPORTS_REMOTE_STORAGE = 256
FILE_VOLUME_IS_COMPRESSED = 32768
FILE_SUPPORTS_OBJECT_IDS = 65536
FILE_SUPPORTS_ENCRYPTION = 131072
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = (16 * 1024)
IO_REPARSE_TAG_RESERVED_ZERO = (0)
IO_REPARSE_TAG_RESERVED_ONE = (1)
IO_REPARSE_TAG_SYMBOLIC_LINK = (2)
IO_REPARSE_TAG_NSS = (5)
IO_REPARSE_TAG_FILTER_MANAGER = -2147483637
IO_REPARSE_TAG_DFS = -2147483638
IO_REPARSE_TAG_SIS = -2147483641
IO_REPARSE_TAG_MOUNT_POINT = -1610612733
IO_REPARSE_TAG_HSM = -1073741820
IO_REPARSE_TAG_NSSRECOVER = (8)
IO_REPARSE_TAG_RESERVED_MS_RANGE = (256)
IO_REPARSE_TAG_RESERVED_RANGE = IO_REPARSE_TAG_RESERVED_ONE
IO_COMPLETION_MODIFY_STATE = 2
DUPLICATE_CLOSE_SOURCE = 1
DUPLICATE_SAME_ACCESS = 2
DELETE = (65536)
READ_CONTROL = (131072)
WRITE_DAC = (262144)
WRITE_OWNER = (524288)
SYNCHRONIZE = (1048576)
STANDARD_RIGHTS_REQUIRED = (983040)
STANDARD_RIGHTS_READ = (READ_CONTROL)
STANDARD_RIGHTS_WRITE = (READ_CONTROL)
STANDARD_RIGHTS_EXECUTE = (READ_CONTROL)
STANDARD_RIGHTS_ALL = (2031616)
SPECIFIC_RIGHTS_ALL = (65535)
IO_COMPLETION_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3
ACCESS_SYSTEM_SECURITY = (16777216)
MAXIMUM_ALLOWED = (33554432)
GENERIC_READ = (-2147483648)
GENERIC_WRITE = (1073741824)
GENERIC_EXECUTE = (536870912)
GENERIC_ALL = (268435456)
# Included from pshpack4.h
# Included from poppack.h
SID_REVISION = (1)
SID_MAX_SUB_AUTHORITIES = (15)
SID_RECOMMENDED_SUB_AUTHORITIES = (1)
SidTypeUser = 1
SidTypeGroup = 2
SidTypeDomain = 3
SidTypeAlias = 4
SidTypeWellKnownGroup = 5
SidTypeDeletedAccount = 6
SidTypeInvalid = 7
SidTypeUnknown = 8
SECURITY_NULL_RID = (0)
SECURITY_WORLD_RID = (0)
SECURITY_LOCAL_RID = (0X00000000)
SECURITY_CREATOR_OWNER_RID = (0)
SECURITY_CREATOR_GROUP_RID = (1)
SECURITY_CREATOR_OWNER_SERVER_RID = (2)
SECURITY_CREATOR_GROUP_SERVER_RID = (3)
SECURITY_DIALUP_RID = (1)
SECURITY_NETWORK_RID = (2)
SECURITY_BATCH_RID = (3)
SECURITY_INTERACTIVE_RID = (4)
SECURITY_SERVICE_RID = (6)
SECURITY_ANONYMOUS_LOGON_RID = (7)
SECURITY_PROXY_RID = (8)
SECURITY_SERVER_LOGON_RID = (9)
SECURITY_PRINCIPAL_SELF_RID = (10)
SECURITY_AUTHENTICATED_USER_RID = (11)
SECURITY_LOGON_IDS_RID = (5)
SECURITY_LOGON_IDS_RID_COUNT = (3)
SECURITY_LOCAL_SYSTEM_RID = (18)
SECURITY_NT_NON_UNIQUE = (21)
SECURITY_BUILTIN_DOMAIN_RID = (32)
DOMAIN_USER_RID_ADMIN = (500)
DOMAIN_USER_RID_GUEST = (501)
DOMAIN_GROUP_RID_ADMINS = (512)
DOMAIN_GROUP_RID_USERS = (513)
DOMAIN_GROUP_RID_GUESTS = (514)
DOMAIN_ALIAS_RID_ADMINS = (544)
DOMAIN_ALIAS_RID_USERS = (545)
DOMAIN_ALIAS_RID_GUESTS = (546)
DOMAIN_ALIAS_RID_POWER_USERS = (547)
DOMAIN_ALIAS_RID_ACCOUNT_OPS = (548)
DOMAIN_ALIAS_RID_SYSTEM_OPS = (549)
DOMAIN_ALIAS_RID_PRINT_OPS = (550)
DOMAIN_ALIAS_RID_BACKUP_OPS = (551)
DOMAIN_ALIAS_RID_REPLICATOR = (552)
SE_GROUP_MANDATORY = (1)
SE_GROUP_ENABLED_BY_DEFAULT = (2)
SE_GROUP_ENABLED = (4)
SE_GROUP_OWNER = (8)
SE_GROUP_LOGON_ID = (-1073741824)
ACL_REVISION = (2)
ACL_REVISION_DS = (4)
ACL_REVISION1 = (1)
ACL_REVISION2 = (2)
ACL_REVISION3 = (3)
ACL_REVISION4 = (4)
MAX_ACL_REVISION = ACL_REVISION4
# ACE types
ACCESS_MIN_MS_ACE_TYPE = (0)
ACCESS_ALLOWED_ACE_TYPE = (0)
ACCESS_DENIED_ACE_TYPE = (1)
SYSTEM_AUDIT_ACE_TYPE = (2)
SYSTEM_ALARM_ACE_TYPE = (3)
ACCESS_MAX_MS_V2_ACE_TYPE = (3)
ACCESS_ALLOWED_COMPOUND_ACE_TYPE = (4)
ACCESS_MAX_MS_V3_ACE_TYPE = (4)
ACCESS_MIN_MS_OBJECT_ACE_TYPE = (5)
ACCESS_ALLOWED_OBJECT_ACE_TYPE = (5)
ACCESS_DENIED_OBJECT_ACE_TYPE = (6)
SYSTEM_AUDIT_OBJECT_ACE_TYPE = (7)
SYSTEM_ALARM_OBJECT_ACE_TYPE = (8)
ACCESS_MAX_MS_OBJECT_ACE_TYPE = (8)
ACCESS_MAX_MS_V4_ACE_TYPE = (8)
ACCESS_MAX_MS_ACE_TYPE = (8)
ACCESS_ALLOWED_CALLBACK_ACE_TYPE = 9
ACCESS_DENIED_CALLBACK_ACE_TYPE = 10
ACCESS_ALLOWED_CALLBACK_OBJECT_ACE_TYPE = 11
ACCESS_DENIED_CALLBACK_OBJECT_ACE_TYPE = 12
SYSTEM_AUDIT_CALLBACK_ACE_TYPE = 13
SYSTEM_ALARM_CALLBACK_ACE_TYPE = 14
SYSTEM_AUDIT_CALLBACK_OBJECT_ACE_TYPE = 15
SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE = 16
SYSTEM_MANDATORY_LABEL_ACE_TYPE = 17
ACCESS_MAX_MS_V5_ACE_TYPE = 17
# ACE inheritance flags
OBJECT_INHERIT_ACE = (1)
CONTAINER_INHERIT_ACE = (2)
NO_PROPAGATE_INHERIT_ACE = (4)
INHERIT_ONLY_ACE = (8)
INHERITED_ACE = (16)
VALID_INHERIT_FLAGS = (31)
SUCCESSFUL_ACCESS_ACE_FLAG = (64)
FAILED_ACCESS_ACE_FLAG = (128)
ACE_OBJECT_TYPE_PRESENT = 1
ACE_INHERITED_OBJECT_TYPE_PRESENT = 2
SECURITY_DESCRIPTOR_REVISION = (1)
SECURITY_DESCRIPTOR_REVISION1 = (1)
SECURITY_DESCRIPTOR_MIN_LENGTH = (20)
SE_OWNER_DEFAULTED = (1)
SE_GROUP_DEFAULTED = (2)
SE_DACL_PRESENT = (4)
SE_DACL_DEFAULTED = (8)
SE_SACL_PRESENT = (16)
SE_SACL_DEFAULTED = (32)
SE_DACL_AUTO_INHERIT_REQ = (256)
SE_SACL_AUTO_INHERIT_REQ = (512)
SE_DACL_AUTO_INHERITED = (1024)
SE_SACL_AUTO_INHERITED = (2048)
SE_DACL_PROTECTED = (4096)
SE_SACL_PROTECTED = (8192)
SE_SELF_RELATIVE = (32768)
ACCESS_OBJECT_GUID = 0
ACCESS_PROPERTY_SET_GUID = 1
ACCESS_PROPERTY_GUID = 2
ACCESS_MAX_LEVEL = 4
AUDIT_ALLOW_NO_PRIVILEGE = 1
ACCESS_DS_SOURCE_A = "Directory Service"
ACCESS_DS_OBJECT_TYPE_NAME_A = "Directory Service Object"
SE_PRIVILEGE_ENABLED_BY_DEFAULT = (1)
SE_PRIVILEGE_ENABLED = (2)
SE_PRIVILEGE_USED_FOR_ACCESS = (-2147483648)
PRIVILEGE_SET_ALL_NECESSARY = (1)
SE_CREATE_TOKEN_NAME = "SeCreateTokenPrivilege"
SE_ASSIGNPRIMARYTOKEN_NAME = "SeAssignPrimaryTokenPrivilege"
SE_LOCK_MEMORY_NAME = "SeLockMemoryPrivilege"
SE_INCREASE_QUOTA_NAME = "SeIncreaseQuotaPrivilege"
SE_UNSOLICITED_INPUT_NAME = "SeUnsolicitedInputPrivilege"
SE_MACHINE_ACCOUNT_NAME = "SeMachineAccountPrivilege"
SE_TCB_NAME = "SeTcbPrivilege"
SE_SECURITY_NAME = "SeSecurityPrivilege"
SE_TAKE_OWNERSHIP_NAME = "SeTakeOwnershipPrivilege"
SE_LOAD_DRIVER_NAME = "SeLoadDriverPrivilege"
SE_SYSTEM_PROFILE_NAME = "SeSystemProfilePrivilege"
SE_SYSTEMTIME_NAME = "SeSystemtimePrivilege"
SE_PROF_SINGLE_PROCESS_NAME = "SeProfileSingleProcessPrivilege"
SE_INC_BASE_PRIORITY_NAME = "SeIncreaseBasePriorityPrivilege"
SE_CREATE_PAGEFILE_NAME = "SeCreatePagefilePrivilege"
SE_CREATE_PERMANENT_NAME = "SeCreatePermanentPrivilege"
SE_BACKUP_NAME = "SeBackupPrivilege"
SE_RESTORE_NAME = "SeRestorePrivilege"
SE_SHUTDOWN_NAME = "SeShutdownPrivilege"
SE_DEBUG_NAME = "SeDebugPrivilege"
SE_AUDIT_NAME = "SeAuditPrivilege"
SE_SYSTEM_ENVIRONMENT_NAME = "SeSystemEnvironmentPrivilege"
SE_CHANGE_NOTIFY_NAME = "SeChangeNotifyPrivilege"
SE_REMOTE_SHUTDOWN_NAME = "SeRemoteShutdownPrivilege"
TOKEN_ASSIGN_PRIMARY = (1)
TOKEN_DUPLICATE = (2)
TOKEN_IMPERSONATE = (4)
TOKEN_QUERY = (8)
TOKEN_QUERY_SOURCE = (16)
TOKEN_ADJUST_PRIVILEGES = (32)
TOKEN_ADJUST_GROUPS = (64)
TOKEN_ADJUST_DEFAULT = (128)
TOKEN_ALL_ACCESS = (STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE |
TOKEN_IMPERSONATE |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT)
TOKEN_READ = (STANDARD_RIGHTS_READ |
TOKEN_QUERY)
TOKEN_WRITE = (STANDARD_RIGHTS_WRITE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT)
TOKEN_EXECUTE = (STANDARD_RIGHTS_EXECUTE)
TOKEN_SOURCE_LENGTH = 8
# Token types
TokenPrimary = 1
TokenImpersonation = 2
# TOKEN_INFORMATION_CLASS, used with Get/SetTokenInformation
TokenUser = 1
TokenGroups = 2
TokenPrivileges = 3
TokenOwner = 4
TokenPrimaryGroup = 5
TokenDefaultDacl = 6
TokenSource = 7
TokenType = 8
TokenImpersonationLevel = 9
TokenStatistics = 10
TokenRestrictedSids = 11
TokenSessionId = 12
TokenGroupsAndPrivileges = 13
TokenSessionReference = 14
TokenSandBoxInert = 15
TokenAuditPolicy = 16
TokenOrigin = 17
TokenElevationType = 18
TokenLinkedToken = 19
TokenElevation = 20
TokenHasRestrictions = 21
TokenAccessInformation = 22
TokenVirtualizationAllowed = 23
TokenVirtualizationEnabled = 24
TokenIntegrityLevel = 25
TokenUIAccess = 26
TokenMandatoryPolicy = 27
TokenLogonSid = 28
OWNER_SECURITY_INFORMATION = (0X00000001)
GROUP_SECURITY_INFORMATION = (0X00000002)
DACL_SECURITY_INFORMATION = (0X00000004)
SACL_SECURITY_INFORMATION = (0X00000008)
LABEL_SECURITY_INFORMATION = 0x00000010
IMAGE_DOS_SIGNATURE = 23117
IMAGE_OS2_SIGNATURE = 17742
IMAGE_OS2_SIGNATURE_LE = 17740
IMAGE_VXD_SIGNATURE = 17740
IMAGE_NT_SIGNATURE = 17744
IMAGE_SIZEOF_FILE_HEADER = 20
IMAGE_FILE_RELOCS_STRIPPED = 1
IMAGE_FILE_EXECUTABLE_IMAGE = 2
IMAGE_FILE_LINE_NUMS_STRIPPED = 4
IMAGE_FILE_LOCAL_SYMS_STRIPPED = 8
IMAGE_FILE_AGGRESIVE_WS_TRIM = 16
IMAGE_FILE_LARGE_ADDRESS_AWARE = 32
IMAGE_FILE_BYTES_REVERSED_LO = 128
IMAGE_FILE_32BIT_MACHINE = 256
IMAGE_FILE_DEBUG_STRIPPED = 512
IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP = 1024
IMAGE_FILE_NET_RUN_FROM_SWAP = 2048
IMAGE_FILE_SYSTEM = 4096
IMAGE_FILE_DLL = 8192
IMAGE_FILE_UP_SYSTEM_ONLY = 16384
IMAGE_FILE_BYTES_REVERSED_HI = 32768
IMAGE_FILE_MACHINE_UNKNOWN = 0
IMAGE_FILE_MACHINE_I386 = 332
IMAGE_FILE_MACHINE_R3000 = 354
IMAGE_FILE_MACHINE_R4000 = 358
IMAGE_FILE_MACHINE_R10000 = 360
IMAGE_FILE_MACHINE_WCEMIPSV2 = 361
IMAGE_FILE_MACHINE_ALPHA = 388
IMAGE_FILE_MACHINE_POWERPC = 496
IMAGE_FILE_MACHINE_SH3 = 418
IMAGE_FILE_MACHINE_SH3E = 420
IMAGE_FILE_MACHINE_SH4 = 422
IMAGE_FILE_MACHINE_ARM = 448
IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16
IMAGE_SIZEOF_ROM_OPTIONAL_HEADER = 56
IMAGE_SIZEOF_STD_OPTIONAL_HEADER = 28
IMAGE_SIZEOF_NT_OPTIONAL_HEADER = 224
IMAGE_NT_OPTIONAL_HDR_MAGIC = 267
IMAGE_ROM_OPTIONAL_HDR_MAGIC = 263
IMAGE_SUBSYSTEM_UNKNOWN = 0
IMAGE_SUBSYSTEM_NATIVE = 1
IMAGE_SUBSYSTEM_WINDOWS_GUI = 2
IMAGE_SUBSYSTEM_WINDOWS_CUI = 3
IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 4
IMAGE_SUBSYSTEM_OS2_CUI = 5
IMAGE_SUBSYSTEM_POSIX_CUI = 7
IMAGE_SUBSYSTEM_RESERVED8 = 8
IMAGE_DLLCHARACTERISTICS_WDM_DRIVER = 8192
IMAGE_DIRECTORY_ENTRY_EXPORT = 0
IMAGE_DIRECTORY_ENTRY_IMPORT = 1
IMAGE_DIRECTORY_ENTRY_RESOURCE = 2
IMAGE_DIRECTORY_ENTRY_EXCEPTION = 3
IMAGE_DIRECTORY_ENTRY_SECURITY = 4
IMAGE_DIRECTORY_ENTRY_BASERELOC = 5
IMAGE_DIRECTORY_ENTRY_DEBUG = 6
IMAGE_DIRECTORY_ENTRY_COPYRIGHT = 7
IMAGE_DIRECTORY_ENTRY_GLOBALPTR = 8
IMAGE_DIRECTORY_ENTRY_TLS = 9
IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG = 10
IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT = 11
IMAGE_DIRECTORY_ENTRY_IAT = 12
IMAGE_SIZEOF_SHORT_NAME = 8
IMAGE_SIZEOF_SECTION_HEADER = 40
IMAGE_SCN_TYPE_NO_PAD = 8
IMAGE_SCN_CNT_CODE = 32
IMAGE_SCN_CNT_INITIALIZED_DATA = 64
IMAGE_SCN_CNT_UNINITIALIZED_DATA = 128
IMAGE_SCN_LNK_OTHER = 256
IMAGE_SCN_LNK_INFO = 512
IMAGE_SCN_LNK_REMOVE = 2048
IMAGE_SCN_LNK_COMDAT = 4096
IMAGE_SCN_MEM_FARDATA = 32768
IMAGE_SCN_MEM_PURGEABLE = 131072
IMAGE_SCN_MEM_16BIT = 131072
IMAGE_SCN_MEM_LOCKED = 262144
IMAGE_SCN_MEM_PRELOAD = 524288
IMAGE_SCN_ALIGN_1BYTES = 1048576
IMAGE_SCN_ALIGN_2BYTES = 2097152
IMAGE_SCN_ALIGN_4BYTES = 3145728
IMAGE_SCN_ALIGN_8BYTES = 4194304
IMAGE_SCN_ALIGN_16BYTES = 5242880
IMAGE_SCN_ALIGN_32BYTES = 6291456
IMAGE_SCN_ALIGN_64BYTES = 7340032
IMAGE_SCN_LNK_NRELOC_OVFL = 16777216
IMAGE_SCN_MEM_DISCARDABLE = 33554432
IMAGE_SCN_MEM_NOT_CACHED = 67108864
IMAGE_SCN_MEM_NOT_PAGED = 134217728
IMAGE_SCN_MEM_SHARED = 268435456
IMAGE_SCN_MEM_EXECUTE = 536870912
IMAGE_SCN_MEM_READ = 1073741824
IMAGE_SCN_MEM_WRITE = -2147483648
IMAGE_SCN_SCALE_INDEX = 1
IMAGE_SIZEOF_SYMBOL = 18
IMAGE_SYM_TYPE_NULL = 0
IMAGE_SYM_TYPE_VOID = 1
IMAGE_SYM_TYPE_CHAR = 2
IMAGE_SYM_TYPE_SHORT = 3
IMAGE_SYM_TYPE_INT = 4
IMAGE_SYM_TYPE_LONG = 5
IMAGE_SYM_TYPE_FLOAT = 6
IMAGE_SYM_TYPE_DOUBLE = 7
IMAGE_SYM_TYPE_STRUCT = 8
IMAGE_SYM_TYPE_UNION = 9
IMAGE_SYM_TYPE_ENUM = 10
IMAGE_SYM_TYPE_MOE = 11
IMAGE_SYM_TYPE_BYTE = 12
IMAGE_SYM_TYPE_WORD = 13
IMAGE_SYM_TYPE_UINT = 14
IMAGE_SYM_TYPE_DWORD = 15
IMAGE_SYM_TYPE_PCODE = 32768
IMAGE_SYM_DTYPE_NULL = 0
IMAGE_SYM_DTYPE_POINTER = 1
IMAGE_SYM_DTYPE_FUNCTION = 2
IMAGE_SYM_DTYPE_ARRAY = 3
IMAGE_SYM_CLASS_NULL = 0
IMAGE_SYM_CLASS_AUTOMATIC = 1
IMAGE_SYM_CLASS_EXTERNAL = 2
IMAGE_SYM_CLASS_STATIC = 3
IMAGE_SYM_CLASS_REGISTER = 4
IMAGE_SYM_CLASS_EXTERNAL_DEF = 5
IMAGE_SYM_CLASS_LABEL = 6
IMAGE_SYM_CLASS_UNDEFINED_LABEL = 7
IMAGE_SYM_CLASS_MEMBER_OF_STRUCT = 8
IMAGE_SYM_CLASS_ARGUMENT = 9
IMAGE_SYM_CLASS_STRUCT_TAG = 10
IMAGE_SYM_CLASS_MEMBER_OF_UNION = 11
IMAGE_SYM_CLASS_UNION_TAG = 12
IMAGE_SYM_CLASS_TYPE_DEFINITION = 13
IMAGE_SYM_CLASS_UNDEFINED_STATIC = 14
IMAGE_SYM_CLASS_ENUM_TAG = 15
IMAGE_SYM_CLASS_MEMBER_OF_ENUM = 16
IMAGE_SYM_CLASS_REGISTER_PARAM = 17
IMAGE_SYM_CLASS_BIT_FIELD = 18
IMAGE_SYM_CLASS_FAR_EXTERNAL = 68
IMAGE_SYM_CLASS_BLOCK = 100
IMAGE_SYM_CLASS_FUNCTION = 101
IMAGE_SYM_CLASS_END_OF_STRUCT = 102
IMAGE_SYM_CLASS_FILE = 103
IMAGE_SYM_CLASS_SECTION = 104
IMAGE_SYM_CLASS_WEAK_EXTERNAL = 105
N_BTMASK = 15
N_TMASK = 48
N_TMASK1 = 192
N_TMASK2 = 240
N_BTSHFT = 4
N_TSHIFT = 2
def BTYPE(x): return ((x) & N_BTMASK)
def ISPTR(x): return (((x) & N_TMASK) == (IMAGE_SYM_DTYPE_POINTER << N_BTSHFT))
def ISFCN(x): return (((x) & N_TMASK) == (
IMAGE_SYM_DTYPE_FUNCTION << N_BTSHFT))
def ISARY(x): return (((x) & N_TMASK) == (IMAGE_SYM_DTYPE_ARRAY << N_BTSHFT))
def INCREF(x): return (
(((x) & ~N_BTMASK) << N_TSHIFT) | (
IMAGE_SYM_DTYPE_POINTER << N_BTSHFT) | (
(x) & N_BTMASK))
def DECREF(x): return ((((x) >> N_TSHIFT) & ~N_BTMASK) | ((x) & N_BTMASK))
IMAGE_SIZEOF_AUX_SYMBOL = 18
IMAGE_COMDAT_SELECT_NODUPLICATES = 1
IMAGE_COMDAT_SELECT_ANY = 2
IMAGE_COMDAT_SELECT_SAME_SIZE = 3
IMAGE_COMDAT_SELECT_EXACT_MATCH = 4
IMAGE_COMDAT_SELECT_ASSOCIATIVE = 5
IMAGE_COMDAT_SELECT_LARGEST = 6
IMAGE_COMDAT_SELECT_NEWEST = 7
IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY = 1
IMAGE_WEAK_EXTERN_SEARCH_LIBRARY = 2
IMAGE_WEAK_EXTERN_SEARCH_ALIAS = 3
IMAGE_SIZEOF_RELOCATION = 10
IMAGE_REL_I386_ABSOLUTE = 0
IMAGE_REL_I386_DIR16 = 1
IMAGE_REL_I386_REL16 = 2
IMAGE_REL_I386_DIR32 = 6
IMAGE_REL_I386_DIR32NB = 7
IMAGE_REL_I386_SEG12 = 9
IMAGE_REL_I386_SECTION = 10
IMAGE_REL_I386_SECREL = 11
IMAGE_REL_I386_REL32 = 20
IMAGE_REL_MIPS_ABSOLUTE = 0
IMAGE_REL_MIPS_REFHALF = 1
IMAGE_REL_MIPS_REFWORD = 2
IMAGE_REL_MIPS_JMPADDR = 3
IMAGE_REL_MIPS_REFHI = 4
IMAGE_REL_MIPS_REFLO = 5
IMAGE_REL_MIPS_GPREL = 6
IMAGE_REL_MIPS_LITERAL = 7
IMAGE_REL_MIPS_SECTION = 10
IMAGE_REL_MIPS_SECREL = 11
IMAGE_REL_MIPS_SECRELLO = 12
IMAGE_REL_MIPS_SECRELHI = 13
IMAGE_REL_MIPS_REFWORDNB = 34
IMAGE_REL_MIPS_PAIR = 37
IMAGE_REL_ALPHA_ABSOLUTE = 0
IMAGE_REL_ALPHA_REFLONG = 1
IMAGE_REL_ALPHA_REFQUAD = 2
IMAGE_REL_ALPHA_GPREL32 = 3
IMAGE_REL_ALPHA_LITERAL = 4
IMAGE_REL_ALPHA_LITUSE = 5
IMAGE_REL_ALPHA_GPDISP = 6
IMAGE_REL_ALPHA_BRADDR = 7
IMAGE_REL_ALPHA_HINT = 8
IMAGE_REL_ALPHA_INLINE_REFLONG = 9
IMAGE_REL_ALPHA_REFHI = 10
IMAGE_REL_ALPHA_REFLO = 11
IMAGE_REL_ALPHA_PAIR = 12
IMAGE_REL_ALPHA_MATCH = 13
IMAGE_REL_ALPHA_SECTION = 14
IMAGE_REL_ALPHA_SECREL = 15
IMAGE_REL_ALPHA_REFLONGNB = 16
IMAGE_REL_ALPHA_SECRELLO = 17
IMAGE_REL_ALPHA_SECRELHI = 18
IMAGE_REL_PPC_ABSOLUTE = 0
IMAGE_REL_PPC_ADDR64 = 1
IMAGE_REL_PPC_ADDR32 = 2
IMAGE_REL_PPC_ADDR24 = 3
IMAGE_REL_PPC_ADDR16 = 4
IMAGE_REL_PPC_ADDR14 = 5
IMAGE_REL_PPC_REL24 = 6
IMAGE_REL_PPC_REL14 = 7
IMAGE_REL_PPC_TOCREL16 = 8
IMAGE_REL_PPC_TOCREL14 = 9
IMAGE_REL_PPC_ADDR32NB = 10
IMAGE_REL_PPC_SECREL = 11
IMAGE_REL_PPC_SECTION = 12
IMAGE_REL_PPC_IFGLUE = 13
IMAGE_REL_PPC_IMGLUE = 14
IMAGE_REL_PPC_SECREL16 = 15
IMAGE_REL_PPC_REFHI = 16
IMAGE_REL_PPC_REFLO = 17
IMAGE_REL_PPC_PAIR = 18
IMAGE_REL_PPC_SECRELLO = 19
IMAGE_REL_PPC_SECRELHI = 20
IMAGE_REL_PPC_TYPEMASK = 255
IMAGE_REL_PPC_NEG = 256
IMAGE_REL_PPC_BRTAKEN = 512
IMAGE_REL_PPC_BRNTAKEN = 1024
IMAGE_REL_PPC_TOCDEFN = 2048
IMAGE_REL_SH3_ABSOLUTE = 0
IMAGE_REL_SH3_DIRECT16 = 1
IMAGE_REL_SH3_DIRECT32 = 2
IMAGE_REL_SH3_DIRECT8 = 3
IMAGE_REL_SH3_DIRECT8_WORD = 4
IMAGE_REL_SH3_DIRECT8_LONG = 5
IMAGE_REL_SH3_DIRECT4 = 6
IMAGE_REL_SH3_DIRECT4_WORD = 7
IMAGE_REL_SH3_DIRECT4_LONG = 8
IMAGE_REL_SH3_PCREL8_WORD = 9
IMAGE_REL_SH3_PCREL8_LONG = 10
IMAGE_REL_SH3_PCREL12_WORD = 11
IMAGE_REL_SH3_STARTOF_SECTION = 12
IMAGE_REL_SH3_SIZEOF_SECTION = 13
IMAGE_REL_SH3_SECTION = 14
IMAGE_REL_SH3_SECREL = 15
IMAGE_REL_SH3_DIRECT32_NB = 16
IMAGE_SIZEOF_LINENUMBER = 6
IMAGE_SIZEOF_BASE_RELOCATION = 8
IMAGE_REL_BASED_ABSOLUTE = 0
IMAGE_REL_BASED_HIGH = 1
IMAGE_REL_BASED_LOW = 2
IMAGE_REL_BASED_HIGHLOW = 3
IMAGE_REL_BASED_HIGHADJ = 4
IMAGE_REL_BASED_MIPS_JMPADDR = 5
IMAGE_REL_BASED_SECTION = 6
IMAGE_REL_BASED_REL32 = 7
IMAGE_ARCHIVE_START_SIZE = 8
IMAGE_ARCHIVE_START = "!<arch>\n"
IMAGE_ARCHIVE_END = "`\n"
IMAGE_ARCHIVE_PAD = "\n"
IMAGE_ARCHIVE_LINKER_MEMBER = "/ "
IMAGE_SIZEOF_ARCHIVE_MEMBER_HDR = 60
IMAGE_ORDINAL_FLAG = -2147483648
def IMAGE_SNAP_BY_ORDINAL(Ordina): return ((Ordinal & IMAGE_ORDINAL_FLAG) != 0)
def IMAGE_ORDINAL(Ordina): return (Ordinal & 65535)
IMAGE_RESOURCE_NAME_IS_STRING = -2147483648
IMAGE_RESOURCE_DATA_IS_DIRECTORY = -2147483648
IMAGE_DEBUG_TYPE_UNKNOWN = 0
IMAGE_DEBUG_TYPE_COFF = 1
IMAGE_DEBUG_TYPE_CODEVIEW = 2
IMAGE_DEBUG_TYPE_FPO = 3
IMAGE_DEBUG_TYPE_MISC = 4
IMAGE_DEBUG_TYPE_EXCEPTION = 5
IMAGE_DEBUG_TYPE_FIXUP = 6
IMAGE_DEBUG_TYPE_OMAP_TO_SRC = 7
IMAGE_DEBUG_TYPE_OMAP_FROM_SRC = 8
IMAGE_DEBUG_TYPE_BORLAND = 9
FRAME_FPO = 0
FRAME_TRAP = 1
FRAME_TSS = 2
FRAME_NONFPO = 3
SIZEOF_RFPO_DATA = 16
IMAGE_DEBUG_MISC_EXENAME = 1
IMAGE_SEPARATE_DEBUG_SIGNATURE = 18756
IMAGE_SEPARATE_DEBUG_FLAGS_MASK = 32768
IMAGE_SEPARATE_DEBUG_MISMATCH = 32768
# Included from string.h
_NLSCMPERROR = 2147483647
NULL = 0
HEAP_NO_SERIALIZE = 1
HEAP_GROWABLE = 2
HEAP_GENERATE_EXCEPTIONS = 4
HEAP_ZERO_MEMORY = 8
HEAP_REALLOC_IN_PLACE_ONLY = 16
HEAP_TAIL_CHECKING_ENABLED = 32
HEAP_FREE_CHECKING_ENABLED = 64
HEAP_DISABLE_COALESCE_ON_FREE = 128
HEAP_CREATE_ALIGN_16 = 65536
HEAP_CREATE_ENABLE_TRACING = 131072
HEAP_MAXIMUM_TAG = 4095
HEAP_PSEUDO_TAG_FLAG = 32768
HEAP_TAG_SHIFT = 16
IS_TEXT_UNICODE_ASCII16 = 1
IS_TEXT_UNICODE_REVERSE_ASCII16 = 16
IS_TEXT_UNICODE_STATISTICS = 2
IS_TEXT_UNICODE_REVERSE_STATISTICS = 32
IS_TEXT_UNICODE_CONTROLS = 4
IS_TEXT_UNICODE_REVERSE_CONTROLS = 64
IS_TEXT_UNICODE_SIGNATURE = 8
IS_TEXT_UNICODE_REVERSE_SIGNATURE = 128
IS_TEXT_UNICODE_ILLEGAL_CHARS = 256
IS_TEXT_UNICODE_ODD_LENGTH = 512
IS_TEXT_UNICODE_DBCS_LEADBYTE = 1024
IS_TEXT_UNICODE_NULL_BYTES = 4096
IS_TEXT_UNICODE_UNICODE_MASK = 15
IS_TEXT_UNICODE_REVERSE_MASK = 240
IS_TEXT_UNICODE_NOT_UNICODE_MASK = 3840
IS_TEXT_UNICODE_NOT_ASCII_MASK = 61440
COMPRESSION_FORMAT_NONE = (0)
COMPRESSION_FORMAT_DEFAULT = (1)
COMPRESSION_FORMAT_LZNT1 = (2)
COMPRESSION_ENGINE_STANDARD = (0)
COMPRESSION_ENGINE_MAXIMUM = (256)
MESSAGE_RESOURCE_UNICODE = 1
RTL_CRITSECT_TYPE = 0
RTL_RESOURCE_TYPE = 1
SEF_DACL_AUTO_INHERIT = 1
SEF_SACL_AUTO_INHERIT = 2
SEF_DEFAULT_DESCRIPTOR_FOR_OBJECT = 4
SEF_AVOID_PRIVILEGE_CHECK = 8
DLL_PROCESS_ATTACH = 1
DLL_THREAD_ATTACH = 2
DLL_THREAD_DETACH = 3
DLL_PROCESS_DETACH = 0
EVENTLOG_SEQUENTIAL_READ = 0X0001
EVENTLOG_SEEK_READ = 0X0002
EVENTLOG_FORWARDS_READ = 0X0004
EVENTLOG_BACKWARDS_READ = 0X0008
EVENTLOG_SUCCESS = 0X0000
EVENTLOG_ERROR_TYPE = 1
EVENTLOG_WARNING_TYPE = 2
EVENTLOG_INFORMATION_TYPE = 4
EVENTLOG_AUDIT_SUCCESS = 8
EVENTLOG_AUDIT_FAILURE = 16
EVENTLOG_START_PAIRED_EVENT = 1
EVENTLOG_END_PAIRED_EVENT = 2
EVENTLOG_END_ALL_PAIRED_EVENTS = 4
EVENTLOG_PAIRED_EVENT_ACTIVE = 8
EVENTLOG_PAIRED_EVENT_INACTIVE = 16
KEY_QUERY_VALUE = (1)
KEY_SET_VALUE = (2)
KEY_CREATE_SUB_KEY = (4)
KEY_ENUMERATE_SUB_KEYS = (8)
KEY_NOTIFY = (16)
KEY_CREATE_LINK = (32)
KEY_READ = ((STANDARD_RIGHTS_READ |
KEY_QUERY_VALUE |
KEY_ENUMERATE_SUB_KEYS |
KEY_NOTIFY)
&
(~SYNCHRONIZE))
KEY_WRITE = ((STANDARD_RIGHTS_WRITE |
KEY_SET_VALUE |
KEY_CREATE_SUB_KEY)
&
(~SYNCHRONIZE))
KEY_EXECUTE = ((KEY_READ)
&
(~SYNCHRONIZE))
KEY_ALL_ACCESS = ((STANDARD_RIGHTS_ALL |
KEY_QUERY_VALUE |
KEY_SET_VALUE |
KEY_CREATE_SUB_KEY |
KEY_ENUMERATE_SUB_KEYS |
KEY_NOTIFY |
KEY_CREATE_LINK)
&
(~SYNCHRONIZE))
REG_OPTION_RESERVED = (0)
REG_OPTION_NON_VOLATILE = (0)
REG_OPTION_VOLATILE = (1)
REG_OPTION_CREATE_LINK = (2)
REG_OPTION_BACKUP_RESTORE = (4)
REG_OPTION_OPEN_LINK = (8)
REG_LEGAL_OPTION = \
(REG_OPTION_RESERVED |
REG_OPTION_NON_VOLATILE |
REG_OPTION_VOLATILE |
REG_OPTION_CREATE_LINK |
REG_OPTION_BACKUP_RESTORE |
REG_OPTION_OPEN_LINK)
# dispositions returned from RegCreateKeyEx
REG_CREATED_NEW_KEY = 1
REG_OPENED_EXISTING_KEY = 2
# flags used with RegSaveKeyEx
REG_STANDARD_FORMAT = 1
REG_LATEST_FORMAT = 2
REG_NO_COMPRESSION = 4
# flags used with RegRestoreKey
REG_WHOLE_HIVE_VOLATILE = 1
REG_REFRESH_HIVE = 2
REG_NO_LAZY_FLUSH = 4
REG_FORCE_RESTORE = 8
REG_NOTIFY_CHANGE_NAME = (1)
REG_NOTIFY_CHANGE_ATTRIBUTES = (2)
REG_NOTIFY_CHANGE_LAST_SET = (4)
REG_NOTIFY_CHANGE_SECURITY = (8)
REG_LEGAL_CHANGE_FILTER = \
(REG_NOTIFY_CHANGE_NAME |
REG_NOTIFY_CHANGE_ATTRIBUTES |
REG_NOTIFY_CHANGE_LAST_SET |
REG_NOTIFY_CHANGE_SECURITY)
REG_NONE = (0)
REG_SZ = (1)
REG_EXPAND_SZ = (2)
REG_BINARY = (3)
REG_DWORD = (4)
REG_DWORD_LITTLE_ENDIAN = (4)
REG_DWORD_BIG_ENDIAN = (5)
REG_LINK = (6)
REG_MULTI_SZ = (7)
REG_RESOURCE_LIST = (8)
REG_FULL_RESOURCE_DESCRIPTOR = (9)
REG_RESOURCE_REQUIREMENTS_LIST = (10)
SERVICE_KERNEL_DRIVER = 1
SERVICE_FILE_SYSTEM_DRIVER = 2
SERVICE_ADAPTER = 4
SERVICE_RECOGNIZER_DRIVER = 8
SERVICE_DRIVER = (SERVICE_KERNEL_DRIVER |
SERVICE_FILE_SYSTEM_DRIVER |
SERVICE_RECOGNIZER_DRIVER)
SERVICE_WIN32_OWN_PROCESS = 16
SERVICE_WIN32_SHARE_PROCESS = 32
SERVICE_WIN32 = (SERVICE_WIN32_OWN_PROCESS |
SERVICE_WIN32_SHARE_PROCESS)
SERVICE_INTERACTIVE_PROCESS = 256
SERVICE_TYPE_ALL = (SERVICE_WIN32 |
SERVICE_ADAPTER |
SERVICE_DRIVER |
SERVICE_INTERACTIVE_PROCESS)
SERVICE_BOOT_START = 0
SERVICE_SYSTEM_START = 1
SERVICE_AUTO_START = 2
SERVICE_DEMAND_START = 3
SERVICE_DISABLED = 4
SERVICE_ERROR_IGNORE = 0
SERVICE_ERROR_NORMAL = 1
SERVICE_ERROR_SEVERE = 2
SERVICE_ERROR_CRITICAL = 3
TAPE_ERASE_SHORT = 0
TAPE_ERASE_LONG = 1
TAPE_LOAD = 0
TAPE_UNLOAD = 1
TAPE_TENSION = 2
TAPE_LOCK = 3
TAPE_UNLOCK = 4
TAPE_FORMAT = 5
TAPE_SETMARKS = 0
TAPE_FILEMARKS = 1
TAPE_SHORT_FILEMARKS = 2
TAPE_LONG_FILEMARKS = 3
TAPE_ABSOLUTE_POSITION = 0
TAPE_LOGICAL_POSITION = 1
TAPE_PSEUDO_LOGICAL_POSITION = 2
TAPE_REWIND = 0
TAPE_ABSOLUTE_BLOCK = 1
TAPE_LOGICAL_BLOCK = 2
TAPE_PSEUDO_LOGICAL_BLOCK = 3
TAPE_SPACE_END_OF_DATA = 4
TAPE_SPACE_RELATIVE_BLOCKS = 5
TAPE_SPACE_FILEMARKS = 6
TAPE_SPACE_SEQUENTIAL_FMKS = 7
TAPE_SPACE_SETMARKS = 8
TAPE_SPACE_SEQUENTIAL_SMKS = 9
TAPE_DRIVE_FIXED = 1
TAPE_DRIVE_SELECT = 2
TAPE_DRIVE_INITIATOR = 4
TAPE_DRIVE_ERASE_SHORT = 16
TAPE_DRIVE_ERASE_LONG = 32
TAPE_DRIVE_ERASE_BOP_ONLY = 64
TAPE_DRIVE_ERASE_IMMEDIATE = 128
TAPE_DRIVE_TAPE_CAPACITY = 256
TAPE_DRIVE_TAPE_REMAINING = 512
TAPE_DRIVE_FIXED_BLOCK = 1024
TAPE_DRIVE_VARIABLE_BLOCK = 2048
TAPE_DRIVE_WRITE_PROTECT = 4096
TAPE_DRIVE_EOT_WZ_SIZE = 8192
TAPE_DRIVE_ECC = 65536
TAPE_DRIVE_COMPRESSION = 131072
TAPE_DRIVE_PADDING = 262144
TAPE_DRIVE_REPORT_SMKS = 524288
TAPE_DRIVE_GET_ABSOLUTE_BLK = 1048576
TAPE_DRIVE_GET_LOGICAL_BLK = 2097152
TAPE_DRIVE_SET_EOT_WZ_SIZE = 4194304
TAPE_DRIVE_EJECT_MEDIA = 16777216
TAPE_DRIVE_RESERVED_BIT = -2147483648
TAPE_DRIVE_LOAD_UNLOAD = -2147483647
TAPE_DRIVE_TENSION = -2147483646
TAPE_DRIVE_LOCK_UNLOCK = -2147483644
TAPE_DRIVE_REWIND_IMMEDIATE = -2147483640
TAPE_DRIVE_SET_BLOCK_SIZE = -2147483632
TAPE_DRIVE_LOAD_UNLD_IMMED = -2147483616
TAPE_DRIVE_TENSION_IMMED = -2147483584
TAPE_DRIVE_LOCK_UNLK_IMMED = -2147483520
TAPE_DRIVE_SET_ECC = -2147483392
TAPE_DRIVE_SET_COMPRESSION = -2147483136
TAPE_DRIVE_SET_PADDING = -2147482624
TAPE_DRIVE_SET_REPORT_SMKS = -2147481600
TAPE_DRIVE_ABSOLUTE_BLK = -2147479552
TAPE_DRIVE_ABS_BLK_IMMED = -2147475456
TAPE_DRIVE_LOGICAL_BLK = -2147467264
TAPE_DRIVE_LOG_BLK_IMMED = -2147450880
TAPE_DRIVE_END_OF_DATA = -2147418112
TAPE_DRIVE_RELATIVE_BLKS = -2147352576
TAPE_DRIVE_FILEMARKS = -2147221504
TAPE_DRIVE_SEQUENTIAL_FMKS = -2146959360
TAPE_DRIVE_SETMARKS = -2146435072
TAPE_DRIVE_SEQUENTIAL_SMKS = -2145386496
TAPE_DRIVE_REVERSE_POSITION = -2143289344
TAPE_DRIVE_SPACE_IMMEDIATE = -2139095040
TAPE_DRIVE_WRITE_SETMARKS = -2130706432
TAPE_DRIVE_WRITE_FILEMARKS = -2113929216
TAPE_DRIVE_WRITE_SHORT_FMKS = -2080374784
TAPE_DRIVE_WRITE_LONG_FMKS = -2013265920
TAPE_DRIVE_WRITE_MARK_IMMED = -1879048192
TAPE_DRIVE_FORMAT = -1610612736
TAPE_DRIVE_FORMAT_IMMEDIATE = -1073741824
TAPE_DRIVE_HIGH_FEATURES = -2147483648
TAPE_FIXED_PARTITIONS = 0
TAPE_SELECT_PARTITIONS = 1
TAPE_INITIATOR_PARTITIONS = 2
TRANSACTIONMANAGER_QUERY_INFORMATION = 0x0001
TRANSACTIONMANAGER_SET_INFORMATION = 0x0002
TRANSACTIONMANAGER_RECOVER = 0x0004
TRANSACTIONMANAGER_RENAME = 0x0008
TRANSACTIONMANAGER_CREATE_RM = 0x0010
TRANSACTIONMANAGER_BIND_TRANSACTION = 0x0020
TRANSACTIONMANAGER_GENERIC_READ = STANDARD_RIGHTS_READ | TRANSACTIONMANAGER_QUERY_INFORMATION
TRANSACTIONMANAGER_GENERIC_WRITE = STANDARD_RIGHTS_WRITE |\
TRANSACTIONMANAGER_SET_INFORMATION |\
TRANSACTIONMANAGER_RECOVER |\
TRANSACTIONMANAGER_RENAME |\
TRANSACTIONMANAGER_CREATE_RM
TRANSACTIONMANAGER_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE
TRANSACTIONMANAGER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |\
TRANSACTIONMANAGER_GENERIC_READ |\
TRANSACTIONMANAGER_GENERIC_WRITE |\
TRANSACTIONMANAGER_GENERIC_EXECUTE |\
TRANSACTIONMANAGER_BIND_TRANSACTION
TRANSACTION_QUERY_INFORMATION = 0x0001
TRANSACTION_SET_INFORMATION = 0x0002
TRANSACTION_ENLIST = 0x0004
TRANSACTION_COMMIT = 0x0008
TRANSACTION_ROLLBACK = 0x0010
TRANSACTION_PROPAGATE = 0x0020
TRANSACTION_SAVEPOINT = 0x0040
TRANSACTION_MARSHALL = TRANSACTION_QUERY_INFORMATION
TRANSACTION_GENERIC_READ = STANDARD_RIGHTS_READ |\
TRANSACTION_QUERY_INFORMATION |\
SYNCHRONIZE
TRANSACTION_GENERIC_WRITE = STANDARD_RIGHTS_WRITE |\
TRANSACTION_SET_INFORMATION |\
TRANSACTION_COMMIT |\
TRANSACTION_ENLIST |\
TRANSACTION_ROLLBACK |\
TRANSACTION_PROPAGATE |\
TRANSACTION_SAVEPOINT |\
SYNCHRONIZE
TRANSACTION_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE |\
TRANSACTION_COMMIT |\
TRANSACTION_ROLLBACK |\
SYNCHRONIZE
TRANSACTION_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |\
TRANSACTION_GENERIC_READ |\
TRANSACTION_GENERIC_WRITE |\
TRANSACTION_GENERIC_EXECUTE
TRANSACTION_RESOURCE_MANAGER_RIGHTS = TRANSACTION_GENERIC_READ |\
STANDARD_RIGHTS_WRITE |\
TRANSACTION_SET_INFORMATION |\
TRANSACTION_ENLIST |\
TRANSACTION_ROLLBACK |\
TRANSACTION_PROPAGATE |\
SYNCHRONIZE
RESOURCEMANAGER_QUERY_INFORMATION = 0x0001
RESOURCEMANAGER_SET_INFORMATION = 0x0002
RESOURCEMANAGER_RECOVER = 0x0004
RESOURCEMANAGER_ENLIST = 0x0008
RESOURCEMANAGER_GET_NOTIFICATION = 0x0010
RESOURCEMANAGER_REGISTER_PROTOCOL = 0x0020
RESOURCEMANAGER_COMPLETE_PROPAGATION = 0x0040
RESOURCEMANAGER_GENERIC_READ = STANDARD_RIGHTS_READ |\
RESOURCEMANAGER_QUERY_INFORMATION |\
SYNCHRONIZE
RESOURCEMANAGER_GENERIC_WRITE = STANDARD_RIGHTS_WRITE |\
RESOURCEMANAGER_SET_INFORMATION |\
RESOURCEMANAGER_RECOVER |\
RESOURCEMANAGER_ENLIST |\
RESOURCEMANAGER_GET_NOTIFICATION |\
RESOURCEMANAGER_REGISTER_PROTOCOL |\
RESOURCEMANAGER_COMPLETE_PROPAGATION |\
SYNCHRONIZE
RESOURCEMANAGER_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE |\
RESOURCEMANAGER_RECOVER |\
RESOURCEMANAGER_ENLIST |\
RESOURCEMANAGER_GET_NOTIFICATION |\
RESOURCEMANAGER_COMPLETE_PROPAGATION |\
SYNCHRONIZE
RESOURCEMANAGER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |\
RESOURCEMANAGER_GENERIC_READ |\
RESOURCEMANAGER_GENERIC_WRITE |\
RESOURCEMANAGER_GENERIC_EXECUTE
ENLISTMENT_QUERY_INFORMATION = 0x0001
ENLISTMENT_SET_INFORMATION = 0x0002
ENLISTMENT_RECOVER = 0x0004
ENLISTMENT_SUBORDINATE_RIGHTS = 0x0008
ENLISTMENT_SUPERIOR_RIGHTS = 0x0010
ENLISTMENT_GENERIC_READ = STANDARD_RIGHTS_READ | ENLISTMENT_QUERY_INFORMATION
ENLISTMENT_GENERIC_WRITE = STANDARD_RIGHTS_WRITE |\
ENLISTMENT_SET_INFORMATION |\
ENLISTMENT_RECOVER |\
ENLISTMENT_SUBORDINATE_RIGHTS |\
ENLISTMENT_SUPERIOR_RIGHTS
ENLISTMENT_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE |\
ENLISTMENT_RECOVER |\
ENLISTMENT_SUBORDINATE_RIGHTS |\
ENLISTMENT_SUPERIOR_RIGHTS
ENLISTMENT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |\
ENLISTMENT_GENERIC_READ |\
ENLISTMENT_GENERIC_WRITE |\
ENLISTMENT_GENERIC_EXECUTE
# TRANSACTION_OUTCOME enum
TransactionOutcomeUndetermined = 1
TransactionOutcomeCommitted = 2
TransactionOutcomeAborted = 3
# TRANSACTION_STATE enum
TransactionStateNormal = 1
TransactionStateIndoubt = 2
TransactionStateCommittedNotify = 3
# TRANSACTION_INFORMATION_CLASS enum
TransactionBasicInformation = 0
TransactionPropertiesInformation = 1
TransactionEnlistmentInformation = 2
TransactionFullInformation = 3
# TRANSACTIONMANAGER_INFORMATION_CLASS enum
TransactionManagerBasicInformation = 0
TransactionManagerLogInformation = 1
TransactionManagerLogPathInformation = 2
TransactionManagerOnlineProbeInformation = 3
# RESOURCEMANAGER_INFORMATION_CLASS ENUM
ResourceManagerBasicInformation = 0
ResourceManagerCompletionInformation = 1
ResourceManagerFullInformation = 2
ResourceManagerNameInformation = 3
# ENLISTMENT_INFORMATION_CLASS enum
EnlistmentBasicInformation = 0
EnlistmentRecoveryInformation = 1
EnlistmentFullInformation = 2
EnlistmentNameInformation = 3
# KTMOBJECT_TYPE enum
KTMOBJECT_TRANSACTION = 0
KTMOBJECT_TRANSACTION_MANAGER = 1
KTMOBJECT_RESOURCE_MANAGER = 2
KTMOBJECT_ENLISTMENT = 3
KTMOBJECT_INVALID = 4
| env/Lib/site-packages/win32/winnt.py | 38,419 | Generated by h2py from \mssdk\include\winnt.h Included from pshpack4.h Included from poppack.h ACE types ACE inheritance flags Token types TOKEN_INFORMATION_CLASS, used with Get/SetTokenInformation Included from string.h dispositions returned from RegCreateKeyEx flags used with RegSaveKeyEx flags used with RegRestoreKey TRANSACTION_OUTCOME enum TRANSACTION_STATE enum TRANSACTION_INFORMATION_CLASS enum TRANSACTIONMANAGER_INFORMATION_CLASS enum RESOURCEMANAGER_INFORMATION_CLASS ENUM ENLISTMENT_INFORMATION_CLASS enum KTMOBJECT_TYPE enum | 539 | en | 0.69689 |
import os
from hexbytes import HexBytes
import web3_api
import csv
import numpy as np
import sys
import copy
block_start = 12865000
block_end = 13135000
gasused = {}
sibling_cnt = {}
timestamp = {}
is_hotspot = {}
avggas_per = {}
def set_block_interval(start,end):
global block_start,block_end
block_start = start
block_end = end
def init():
reader = csv.reader(open('./blockdata/gas_csv.csv','r'))
istitle = True
for row in reader:
if istitle:
istitle = False
continue
blockno = int(row[0])
gas = int(row[1])
gasused[blockno] = gas
reader = csv.reader(open('./blockdata/sibling_csv.csv','r'))
istitle = True
for row in reader:
if istitle:
istitle = False
continue
blockno = int(row[0])
cnt = int(row[1])
sibling_cnt[blockno] = cnt
reader = csv.reader(open('./blockdata/timestamp_csv.csv','r'))
istitle = True
for row in reader:
if istitle:
istitle = False
continue
blockno = int(row[0])
ts = int(row[1])
timestamp[blockno] = ts
def write_csv():
writer = csv.writer(open('./spikedata/avggas.csv','w',newline=''))
writer.writerow(('block_number','20 sec','30 sec','40 sec','60 sec','90 sec','120 sec'))
for blockno in range(block_start,block_end):
writer.writerow((blockno,avggas_per[20][blockno],avggas_per[30][blockno],avggas_per[40][blockno]\
,avggas_per[60][blockno],avggas_per[90][blockno],avggas_per[120][blockno]))
def indicate_hotspots(period:int,gaspersec:int):
for blockno in range(block_start,block_end):
is_hotspot[blockno] = False
for blockno in range(block_start,block_end):
sum = -gasused[blockno]
bk = blockno
while (timestamp[bk]>timestamp[blockno]-period):
bk -= 1
sum += gasused[bk+1]
is_hotspot[blockno]= (sum>=period*gaspersec)
def calc_avggas_per(period:int):
for blockno in range(block_start,block_end):
sum = -gasused[blockno]
bk = blockno
while (timestamp[bk]>timestamp[blockno]-period):
bk -= 1
sum += gasused[bk+1]
sum /= period
avggas_per[period][blockno]=sum
if __name__=='__main__':
set_block_interval(12895000,13105000)
#set_block_interval(13035000,13105000)
init()
gasused[12894999]=14763353;sibling_cnt[12894999]=0
gasused[12894998]=14984748;sibling_cnt[12894998]=0
gasused[12894997]=14980637;sibling_cnt[12894997]=0
gasused[12894996]=14965180;sibling_cnt[12894996]=0
gasused[12894995]=14952940;sibling_cnt[12894995]=0
gasused[12894994]=14958059;sibling_cnt[12894994]=0
gasused[12894993]=14966093;sibling_cnt[12894993]=0
gasused[12894992]=14727000;sibling_cnt[12894992]=0
gasused[12894991]=14960561;sibling_cnt[12894991]=0
gasused[12894990]=14946131;sibling_cnt[12894990]=0
gasused[12894989]=14976050;sibling_cnt[12894989]=0
gasused[12894988]=14970445;sibling_cnt[12894988]=0
gasused[12894987]=14979409;sibling_cnt[12894987]=0
gasused[12894986]=14988665;sibling_cnt[12894986]=0
gasused[12894985]=14717667;sibling_cnt[12894985]=0
for period in (20,30,40,60,90,120):
avggas_per[period]={}
calc_avggas_per(period)
print('period = ',period)
for threshold in (1000000,1200000,1400000,1600000,1800000,2000000,2200000,2400000):
cnt = 0
for blockno in range(13035000,13105000):
if avggas_per[period][blockno]>=threshold:
cnt += 1
print('%.2f'%(cnt/70000*100,),end=' & ')
print('')
write_csv()
| Data/spike.py | 3,728 | set_block_interval(13035000,13105000) | 37 | en | 0.21223 |
from nextcord.ext import commands
import requests
# the prefix is not used in this example
bot = commands.Bot(command_prefix='$')
# @bot.event
# async def on_message(message):
# print(f'Message from {message.author}: {message.content}')
@bot.command()
async def ping(ctx):
await ctx.send(f"The bot latency is {round(bot.latency * 1000)}ms.")
@bot.command()
async def greet(ctx):
await ctx.send(f"Hello Master {ctx.author.mention}")
bot.run('your_token') | bot.py | 470 | the prefix is not used in this example @bot.event async def on_message(message): print(f'Message from {message.author}: {message.content}') | 143 | en | 0.416065 |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class SimInterfaceIPv6Config(Base):
"""Data associated with simulated IPv6 interface link configuration inside a Network Topology.
The SimInterfaceIPv6Config class encapsulates a list of simInterfaceIPv6Config resources that are managed by the system.
A list of resources can be retrieved from the server using the SimInterfaceIPv6Config.find() method.
"""
__slots__ = ()
_SDM_NAME = 'simInterfaceIPv6Config'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'EnableIp': 'enableIp',
'FromIP': 'fromIP',
'Name': 'name',
'SubnetPrefixLength': 'subnetPrefixLength',
'ToIP': 'toIP',
}
def __init__(self, parent):
super(SimInterfaceIPv6Config, self).__init__(parent)
@property
def Ospfv3PseudoInterface(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv3pseudointerface_bbc932877888c8c8400661ec299754a8.Ospfv3PseudoInterface): An instance of the Ospfv3PseudoInterface class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv3pseudointerface_bbc932877888c8c8400661ec299754a8 import Ospfv3PseudoInterface
return Ospfv3PseudoInterface(self)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableIp(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable IPv6
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableIp']))
@property
def FromIP(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 128 Bits IPv6 address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FromIP']))
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def SubnetPrefixLength(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Subnet Prefix Length
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubnetPrefixLength']))
@property
def ToIP(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 128 Bits IPv6 address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ToIP']))
def update(self, Name=None):
"""Updates simInterfaceIPv6Config resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
"""Finds and retrieves simInterfaceIPv6Config resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve simInterfaceIPv6Config resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all simInterfaceIPv6Config resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching simInterfaceIPv6Config resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of simInterfaceIPv6Config data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the simInterfaceIPv6Config resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, EnableIp=None, FromIP=None, SubnetPrefixLength=None, ToIP=None):
"""Base class infrastructure that gets a list of simInterfaceIPv6Config device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- EnableIp (str): optional regex of enableIp
- FromIP (str): optional regex of fromIP
- SubnetPrefixLength (str): optional regex of subnetPrefixLength
- ToIP (str): optional regex of toIP
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('abort', payload=payload, response_object=None)
def Start(self):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('stop', payload=payload, response_object=None)
| uhd_restpy/testplatform/sessions/ixnetwork/topology/siminterfaceipv6config_189f3bfbc365f2b105e35cd8b9d542d6.py | 9,856 | Data associated with simulated IPv6 interface link configuration inside a Network Topology.
The SimInterfaceIPv6Config class encapsulates a list of simInterfaceIPv6Config resources that are managed by the system.
A list of resources can be retrieved from the server using the SimInterfaceIPv6Config.find() method.
Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable IPv6
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 128 Bits IPv6 address.
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv3pseudointerface_bbc932877888c8c8400661ec299754a8.Ospfv3PseudoInterface): An instance of the Ospfv3PseudoInterface class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Subnet Prefix Length
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 128 Bits IPv6 address.
Finds and retrieves simInterfaceIPv6Config resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve simInterfaceIPv6Config resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all simInterfaceIPv6Config resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching simInterfaceIPv6Config resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
Base class infrastructure that gets a list of simInterfaceIPv6Config device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- EnableIp (str): optional regex of enableIp
- FromIP (str): optional regex of fromIP
- SubnetPrefixLength (str): optional regex of subnetPrefixLength
- ToIP (str): optional regex of toIP
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
Retrieves a single instance of simInterfaceIPv6Config data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the simInterfaceIPv6Config resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
Updates simInterfaceIPv6Config resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
MIT LICENSE Copyright 1997 - 2020 by IXIA Keysight Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 5,718 | en | 0.677957 |
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient import exc
from oslo_log import log as logging
from pycadf import cadftaxonomy as taxonomy
import six
from magnum.common import clients
from magnum.common import exception
from magnum.common import profiler
from magnum.conductor.handlers.common import cert_manager
from magnum.conductor.handlers.common import trust_manager
from magnum.conductor import scale_manager
from magnum.conductor import utils as conductor_utils
import magnum.conf
from magnum.drivers.common import driver
from magnum.i18n import _
from magnum import objects
from magnum.objects import fields
CONF = magnum.conf.CONF
LOG = logging.getLogger(__name__)
@profiler.trace_cls("rpc")
class Handler(object):
def __init__(self):
super(Handler, self).__init__()
# Cluster Operations
def cluster_create(self, context, cluster, create_timeout):
LOG.debug('cluster_heat cluster_create')
osc = clients.OpenStackClients(context)
cluster.status = fields.ClusterStatus.CREATE_IN_PROGRESS
cluster.status_reason = None
cluster.create()
try:
# Create trustee/trust and set them to cluster
trust_manager.create_trustee_and_trust(osc, cluster)
# Generate certificate and set the cert reference to cluster
cert_manager.generate_certificates_to_cluster(cluster,
context=context)
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_PENDING)
# Get driver
cluster_driver = driver.Driver.get_driver_for_cluster(context,
cluster)
# Create cluster
cluster_driver.create_cluster(context, cluster, create_timeout)
cluster.save()
except Exception as e:
cluster.status = fields.ClusterStatus.CREATE_FAILED
cluster.status_reason = six.text_type(e)
cluster.save()
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_CREATE, taxonomy.OUTCOME_FAILURE)
if isinstance(e, exc.HTTPBadRequest):
e = exception.InvalidParameterValue(message=six.text_type(e))
raise e
raise
return cluster
def cluster_update(self, context, cluster, rollback=False):
LOG.debug('cluster_heat cluster_update')
osc = clients.OpenStackClients(context)
allow_update_status = (
fields.ClusterStatus.CREATE_COMPLETE,
fields.ClusterStatus.UPDATE_COMPLETE,
fields.ClusterStatus.RESUME_COMPLETE,
fields.ClusterStatus.RESTORE_COMPLETE,
fields.ClusterStatus.ROLLBACK_COMPLETE,
fields.ClusterStatus.SNAPSHOT_COMPLETE,
fields.ClusterStatus.CHECK_COMPLETE,
fields.ClusterStatus.ADOPT_COMPLETE
)
if cluster.status not in allow_update_status:
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE)
operation = _('Updating a cluster when status is '
'"%s"') % cluster.status
raise exception.NotSupported(operation=operation)
delta = cluster.obj_what_changed()
if not delta:
return cluster
manager = scale_manager.get_scale_manager(context, osc, cluster)
# Get driver
ct = conductor_utils.retrieve_cluster_template(context, cluster)
cluster_driver = driver.Driver.get_driver(ct.server_type,
ct.cluster_distro,
ct.coe)
# Update cluster
try:
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_PENDING)
cluster_driver.update_cluster(context, cluster, manager, rollback)
cluster.status = fields.ClusterStatus.UPDATE_IN_PROGRESS
cluster.status_reason = None
except Exception as e:
cluster.status = fields.ClusterStatus.UPDATE_FAILED
cluster.status_reason = six.text_type(e)
cluster.save()
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_UPDATE, taxonomy.OUTCOME_FAILURE)
if isinstance(e, exc.HTTPBadRequest):
e = exception.InvalidParameterValue(message=six.text_type(e))
raise e
raise
cluster.save()
return cluster
def cluster_delete(self, context, uuid):
LOG.debug('cluster_conductor cluster_delete')
osc = clients.OpenStackClients(context)
cluster = objects.Cluster.get_by_uuid(context, uuid)
ct = conductor_utils.retrieve_cluster_template(context, cluster)
cluster_driver = driver.Driver.get_driver(ct.server_type,
ct.cluster_distro,
ct.coe)
try:
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_PENDING)
cluster_driver.delete_cluster(context, cluster)
cluster.status = fields.ClusterStatus.DELETE_IN_PROGRESS
cluster.status_reason = None
except exc.HTTPNotFound:
LOG.info('The cluster %s was not found during cluster'
' deletion.' % cluster.id)
try:
trust_manager.delete_trustee_and_trust(osc, context, cluster)
cert_manager.delete_certificates_from_cluster(cluster,
context=context)
cluster.destroy()
except exception.ClusterNotFound:
LOG.info('The cluster %s has been deleted by others.' %
uuid)
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_SUCCESS)
return None
except exc.HTTPConflict:
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
raise exception.OperationInProgress(cluster_name=cluster.name)
except Exception as unexp:
conductor_utils.notify_about_cluster_operation(
context, taxonomy.ACTION_DELETE, taxonomy.OUTCOME_FAILURE)
cluster.status = fields.ClusterStatus.DELETE_FAILED
cluster.status_reason = six.text_type(unexp)
cluster.save()
raise
cluster.save()
return None
| magnum/conductor/handlers/cluster_conductor.py | 7,452 | Copyright 2014 NEC Corporation. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Cluster Operations Create trustee/trust and set them to cluster Generate certificate and set the cert reference to cluster Get driver Create cluster Get driver Update cluster | 751 | en | 0.862786 |
#!/usr/bin/env python
"""
This action will display a list of volumes for an account
"""
from libsf.apputil import PythonApp
from libsf.argutil import SFArgumentParser, GetFirstLine, SFArgFormatter
from libsf.logutil import GetLogger, logargs
from libsf.sfcluster import SFCluster
from libsf.util import ValidateAndDefault, NameOrID, IPv4AddressType, BoolType, StrType, OptionalValueType, SelectionType, SolidFireIDType
from libsf import sfdefaults
from libsf import SolidFireError, UnknownObjectError
import sys
import json
@logargs
@ValidateAndDefault({
# "arg_name" : (arg_type, arg_default)
"account_name" : (OptionalValueType(StrType), None),
"account_id" : (OptionalValueType(SolidFireIDType), None),
"by_id" : (BoolType, False),
"mvip" : (IPv4AddressType, sfdefaults.mvip),
"username" : (StrType, sfdefaults.username),
"password" : (StrType, sfdefaults.password),
"output_format" : (OptionalValueType(SelectionType(sfdefaults.all_output_formats)), None),
})
def AccountListVolumes(account_name,
account_id,
by_id,
mvip,
username,
password,
output_format):
"""
Show the list of volumes for an account
Args:
account_name: the name of the account
account_id: the ID of the account
by_id: show volume IDs instead of names
mvip: the management IP of the cluster
username: the admin user of the cluster
password: the admin password of the cluster
output_format: the format to display the information
"""
log = GetLogger()
NameOrID(account_name, account_id, "account")
log.info("Searching for accounts")
try:
account = SFCluster(mvip, username, password).FindAccount(accountName=account_name, accountID=account_id)
except UnknownObjectError:
log.error("Account does not exists")
return False
except SolidFireError as e:
log.error("Could not search for accounts: {}".format(e))
return False
log.info("Searching for volumes")
try:
all_volumes = SFCluster(mvip, username, password).ListActiveVolumes()
all_volumes += SFCluster(mvip, username, password).ListDeletedVolumes()
except SolidFireError as e:
log.error("Could not search for volumes: {}".format(e))
return False
all_volumes = {vol["volumeID"] : vol for vol in all_volumes}
attr = "name"
if by_id:
attr = "volumeID"
account_volumes = [all_volumes[vid][attr] for vid in account.volumes]
# Display the list in the requested format
if output_format and output_format == "bash":
sys.stdout.write(" ".join([str(item) for item in account_volumes]) + "\n")
sys.stdout.flush()
elif output_format and output_format == "json":
sys.stdout.write(json.dumps({"volumes" : account_volumes}) + "\n")
sys.stdout.flush()
else:
log.info("{} volumes in account {}".format(len(account.volumes), account.username))
if account.volumes:
log.info(" {}".format(", ".join([str(item) for item in account_volumes])))
return True
if __name__ == '__main__':
parser = SFArgumentParser(description=GetFirstLine(__doc__), formatter_class=SFArgFormatter)
parser.add_cluster_mvip_args()
parser.add_account_selection_args()
parser.add_argument("--byid", action="store_true", default=False, dest="by_id", help="display volume IDs instead of volume names")
parser.add_console_format_args()
args = parser.parse_args_to_dict()
app = PythonApp(AccountListVolumes, args)
app.Run(**args)
| account_list_volumes.py | 3,772 | Show the list of volumes for an account
Args:
account_name: the name of the account
account_id: the ID of the account
by_id: show volume IDs instead of names
mvip: the management IP of the cluster
username: the admin user of the cluster
password: the admin password of the cluster
output_format: the format to display the information
This action will display a list of volumes for an account
!/usr/bin/env python "arg_name" : (arg_type, arg_default) Display the list in the requested format | 591 | en | 0.659891 |
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.storage_net_app_node_relationship import StorageNetAppNodeRelationship
globals()['StorageNetAppNodeRelationship'] = StorageNetAppNodeRelationship
class StorageNetAppSensorAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'STORAGE.NETAPPSENSOR': "storage.NetAppSensor",
},
('object_type',): {
'STORAGE.NETAPPSENSOR': "storage.NetAppSensor",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'array_controller': (StorageNetAppNodeRelationship,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'array_controller': 'ArrayController', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""StorageNetAppSensorAllOf - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "storage.NetAppSensor", must be one of ["storage.NetAppSensor", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "storage.NetAppSensor", must be one of ["storage.NetAppSensor", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
array_controller (StorageNetAppNodeRelationship): [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "storage.NetAppSensor")
object_type = kwargs.get('object_type', "storage.NetAppSensor")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.class_id = class_id
self.object_type = object_type
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| intersight/model/storage_net_app_sensor_all_of.py | 9,573 | NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
StorageNetAppSensorAllOf - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "storage.NetAppSensor", must be one of ["storage.NetAppSensor", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "storage.NetAppSensor", must be one of ["storage.NetAppSensor", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
array_controller (StorageNetAppNodeRelationship): [optional] # noqa: E501
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
noqa: F401 noqa: F401 noqa: F401 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 discard variable. | 5,850 | en | 0.842474 |
'''entre no sistema com um valor float e saia com a sua
parte inteira'''
cores = {'limpa': '\033[m', 'azul': '\033[1;34m'}
print('{:-^40}'.format('PARTE INTEIRA DE UM VALOR'))
num = float(input('Digite um valor com ponto [Ex: 1.20]: '))
print('{}{}{} - sua parte inteira é - {}{}{} '
.format(cores['azul'], num, cores['limpa'], cores['azul'], round(num), cores['limpa']))
print('{:-^40}'.format('FIM'))
| script_python/parte_inteira_float_016.py | 406 | entre no sistema com um valor float e saia com a sua
parte inteira | 66 | pt | 0.983155 |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
r"""
eventcluster module.
This module is intended to be used to summarize large numbers of events
into clusters of different patterns. High volume repeating events can
often make it difficult to see unique and interesting items.
The module contains functions to generate clusterable features from
string data. For example, an administration command that does some
maintenance on thousands of servers with a commandline such as:
``install-update -hostname {host.fqdn} -tmp:/tmp/{GUID}/rollback``\ can
be collapsed into a single cluster pattern by ignoring the character
values in the string and using delimiters or tokens to group the values.
This is an unsupervised learning module implemented using SciKit Learn
DBScan.
Contains:
dbcluster_events: generic clustering method using DBSCAN designed to summarize
process events and other similar data by grouping on common features.
add_process_features: derives numerical features from text features such as
commandline and process path.
"""
from binascii import crc32
from functools import lru_cache
from math import log10, floor
import re
from typing import List, Any, Tuple, Union
import numpy as np
import pandas as pd
from ..common.exceptions import MsticpyImportExtraError
from ..common.utility import export
from .._version import VERSION
try:
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import Normalizer
import matplotlib.pyplot as plt
from matplotlib import cm
except ImportError as imp_err:
raise MsticpyImportExtraError(
"Cannot use this feature without Sklearn and matplotlib installed",
title="Error importing Scikit Learn and matplotlib",
extra="ml",
) from imp_err
__version__ = VERSION
__author__ = "Ian Hellen"
# pylint: disable=too-many-arguments, too-many-locals
@export
def dbcluster_events(
data: Any,
cluster_columns: List[Any] = None,
verbose: bool = False,
normalize: bool = True,
time_column: str = "TimeCreatedUtc",
max_cluster_distance: float = 0.01,
min_cluster_samples: int = 2,
**kwargs,
) -> Tuple[pd.DataFrame, DBSCAN, np.ndarray]:
"""
Cluster data set according to cluster_columns features.
Parameters
----------
data : Any
Input data as a pandas DataFrame or numpy array
cluster_columns : List[Any], optional
List of columns to use for features
- for DataFrame this is a list of column names
- for numpy array this is a list of column indexes
verbose : bool, optional
Print additional information about clustering results (the default is False)
normalize : bool, optional
Normalize the input data (should probably always be True)
time_column : str, optional
If there is a time column the output data will be ordered by this
(the default is 'TimeCreatedUtc')
max_cluster_distance : float, optional
DBSCAN eps (max cluster member distance) (the default is 0.01)
min_cluster_samples : int, optional
DBSCAN min_samples (the minimum cluster size) (the default is 2)
Other Parameters
----------------
kwargs: Other arguments are passed to DBSCAN constructor
Returns
-------
Tuple[pd.DataFrame, DBSCAN, np.ndarray]
Output dataframe with clustered rows
DBSCAN model
Normalized data set
"""
allowed_types = [np.ndarray, pd.DataFrame]
x_input = None
if isinstance(data, pd.DataFrame):
if cluster_columns is None:
x_input = data.values
else:
x_input = data[cluster_columns].values
elif isinstance(data, np.ndarray):
x_input = data if cluster_columns is None else data[:, cluster_columns].values
if x_input is None:
mssg = "Input data not in expected format.\n{} is not one of allowed types {}"
type_list = ", ".join(str(t) for t in allowed_types)
mssg = mssg.format(str(type(data)), type_list)
raise ValueError(mssg)
# Create DBSCAN cluster object
db_cluster = DBSCAN(
eps=max_cluster_distance, min_samples=min_cluster_samples, **kwargs
)
# Normalize the data (most clustering algorithms don't do well with
# unnormalized data)
x_norm = Normalizer().fit_transform(x_input) if normalize else x_input
# fit the data set
db_cluster.fit(x_norm)
labels = db_cluster.labels_
cluster_set, counts = np.unique(labels, return_counts=True)
if verbose:
print(
"Clustering for set size ",
len(x_norm),
" - ",
len(cluster_set),
" clusters",
)
print("Individual cluster sizes: ", ", ".join(str(c) for c in counts))
clustered_events = _merge_clustered_items(
cluster_set, labels, data, time_column, counts
)
if verbose:
print("Cluster output rows: ", len(clustered_events))
return clustered_events, db_cluster, x_norm
def _merge_clustered_items(
cluster_set: np.array,
labels: np.array,
data: Union[pd.DataFrame, np.array],
time_column: str,
counts: np.array,
) -> pd.DataFrame:
"""
Merge outliers and core clusters into single DataFrame.
Parameters
----------
cluster_set : np.array
The set of clusters
labels : np.array
The cluster labels
data : Union[pd.DataFrame, np.array]
The source data
time_column : str
Name of the Time column
counts : np.array
The counts of members in each cluster
Returns
-------
pd.DataFrame
Merged dataframe
"""
tz_aware = data.iloc[0][time_column].tz
ts_type = "datetime64[ns, UTC]" if tz_aware is not None else "datetime64[ns]"
cluster_list = []
# Iterate through clusters, adding exemplar to output frame
# pylint: disable=consider-using-enumerate
# we need to know the index of the item within the loop
for idx in range(len(cluster_set)):
cluster_id = cluster_set[idx]
class_members = labels == cluster_id
if isinstance(data, pd.DataFrame):
time_ordered = data[class_members].sort_values(time_column, ascending=True)
first_event_time = time_ordered[0:][time_column].iat[0]
last_event_time = time_ordered[-1:][time_column].iat[0]
else:
first_event_time = None
last_event_time = None
if cluster_id == -1:
# 'Noise' events are individual items that could not be assigned
# to a cluster and so are unique
cluster_list.append(
data[class_members]
.assign(
Clustered=False,
ClusterId=cluster_id,
ClusterSize=1,
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)
.astype(
dtype={
"TimeGenerated": ts_type,
"FirstEventTime": ts_type,
"LastEventTime": ts_type,
}
)
)
else:
# Otherwise, just choose the first example of the cluster set
cluster_list.append(
data[class_members]
.assign(
Clustered=True,
ClusterId=cluster_id,
ClusterSize=counts[idx],
TimeGenerated=first_event_time,
FirstEventTime=first_event_time,
LastEventTime=last_event_time,
)[0:1]
.astype(
dtype={
"TimeGenerated": ts_type,
"FirstEventTime": ts_type,
"LastEventTime": ts_type,
}
)
)
# pylint: enable=consider-using-enumerate
return pd.concat(cluster_list)
@export
def add_process_features(
input_frame: pd.DataFrame, path_separator: str = None, force: bool = False
) -> pd.DataFrame:
r"""
Add numerical features based on patterns of command line and process name.
Parameters
----------
input_frame : pd.DataFrame
The input dataframe
path_separator : str, optional
Path separator. If not supplied, try to determine
from 'NewProcessName' column of first 10 rows
(the default is None)
force : bool, optional
Forces re-calculation of feature columns even if they
already exist (the default is False)
Returns
-------
pd.DataFrame
Copy of the dataframe with the additional numeric features
Notes
-----
Features added:
- processNameLen: length of process file name (inc path)
- processNameTokens: the number of elements in the path
- processName: the process file name (minus path)
- commandlineTokens: number of space-separated tokens in the command line
- commandlineLen: length of the command line
- commandlineLogLen: log10 length of commandline
- isSystemSession: 1 if session Id is 0x3e7 for Windows or -1 for Linux
- commandlineTokensFull: counts number of token separators in commandline
[\\s\-\\/\.,"\'\|&:;%$()]
- pathScore: sum of ord() value of characters in path
- pathLogScore: log10 of pathScore
- commandlineScore: sum of ord() value of characters in commandline
- commandlineLogScore: log10 of commandlineScore
"""
output_df = input_frame.copy()
# Set any NaN values to empty string
if "NewProcessName" in output_df and "CommandLine" in output_df:
output_df[["NewProcessName", "CommandLine"]] = output_df[
["NewProcessName", "CommandLine"]
].fillna(value="")
# try to determine the path separator
if path_separator is None:
sample_df = output_df.head(10)
lx_path = len(sample_df[sample_df["NewProcessName"].str.contains("/")])
path_separator = "/" if lx_path else "\\"
# Create features from process name and command line
if "NewProcessName" in output_df:
_add_processname_features(output_df, force, path_separator)
if "CommandLine" in output_df:
_add_commandline_features(output_df, force)
if "SubjectLogonId" in output_df and ("isSystemSession" not in output_df or force):
output_df["isSystemSession"] = output_df["SubjectLogonId"].isin(["0x3e7", "-1"])
return output_df
def _add_processname_features(
output_df: pd.DataFrame, force: bool, path_separator: str
):
"""
Add process name default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
path_separator : str
Path separator for OS
"""
if "processName" not in output_df or force:
output_df["processName"] = output_df.apply(
lambda x: x.NewProcessName.split(path_separator)[-1], axis=1
)
if "pathScore" not in output_df or force:
output_df["pathScore"] = output_df.apply(
lambda x: char_ord_score(x.NewProcessName), axis=1
)
if "pathLogScore" not in output_df or force:
output_df["pathLogScore"] = output_df.apply(
lambda x: log10(x.pathScore) if x.pathScore else 0, axis=1
)
if "pathHash" not in output_df or force:
output_df["pathHash"] = output_df.apply(
lambda x: crc32_hash(x.NewProcessName), axis=1
)
def _add_commandline_features(output_df: pd.DataFrame, force: bool):
"""
Add commandline default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
"""
if "commandlineLen" not in output_df or force:
output_df["commandlineLen"] = output_df.apply(
lambda x: len(x.CommandLine), axis=1
)
if "commandlineLogLen" not in output_df or force:
output_df["commandlineLogLen"] = output_df.apply(
lambda x: log10(x.commandlineLen) if x.commandlineLen else 0, axis=1
)
if "commandlineTokensFull" not in output_df or force:
output_df["commandlineTokensFull"] = output_df[["CommandLine"]].apply(
lambda x: delim_count(x.CommandLine), axis=1
)
if "commandlineScore" not in output_df or force:
output_df["commandlineScore"] = output_df.apply(
lambda x: char_ord_score(x.CommandLine), axis=1
)
if "commandlineTokensHash" not in output_df or force:
output_df["commandlineTokensHash"] = output_df.apply(
lambda x: delim_hash(x.CommandLine), axis=1
)
@export
@lru_cache(maxsize=1024)
def delim_count(value: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]') -> int:
r"""
Count the delimiters in input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Count of delimiters in the string.
"""
return len(re.findall(delim_list, value))
@export
@lru_cache(maxsize=1024)
def delim_hash(value: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]') -> int:
r"""
Return a hash (CRC32) of the delimiters from input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Hash of delimiter set in the string.
"""
return crc32(bytes("".join(re.findall(delim_list, value)), "utf-8"))
@export
@lru_cache(maxsize=1024)
def char_ord_score(value: str, scale: int = 1) -> int:
"""
Return sum of ord values of characters in string.
Parameters
----------
value : str
Data to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
int
[description]
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
"""
return floor(sum(ord(x) for x in value) / scale)
@export
@lru_cache(maxsize=1024)
def token_count(value: str, delimiter: str = " ") -> int:
"""
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
value : str
Data to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
int
count of tokens
"""
return len(value.split(delimiter))
def _string_score(input_str):
"""Sum the ord(c) for characters in a string."""
return sum(ord(x) for x in input_str)
@export
@lru_cache(maxsize=1024)
def crc32_hash(value: str) -> int:
"""
Return the CRC32 hash of the input column.
Parameters
----------
value : str
Data to process
Returns
-------
int
CRC32 hash
"""
return crc32(bytes(value.encode("utf-8")))
@export
def delim_count_df(
data: pd.DataFrame, column: str, delim_list: str = r'[\s\-\\/\.,"\'|&:;%$()]'
) -> pd.Series:
r"""
Count the delimiters in input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
The name of the column to process
delim_list : str, optional
delimiters to use. (the default is r\'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]\')
Returns
-------
pd.Series
Count of delimiters in the string in `column`.
"""
return data[column].str.count(delim_list)
@export
def char_ord_score_df(data: pd.DataFrame, column: str, scale: int = 1) -> pd.Series:
"""
Return sum of ord values of characters in string.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
pd.Series
The sum of the ordinal values of the characters
in `column`.
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
"""
return data.apply(lambda x: sum(ord(char) for char in x[column]) / scale, axis=1)
@export
def token_count_df(data: pd.DataFrame, column: str, delimiter: str = " ") -> pd.Series:
"""
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
pd.Series
count of tokens in strings in `column`
"""
return data.apply(lambda x: len(x[column].split(delimiter)), axis=1)
@export
def crc32_hash_df(data: pd.DataFrame, column: str) -> pd.Series:
"""
Return the CRC32 hash of the input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
Returns
-------
pd.Series
CRC32 hash of input column
"""
return data.apply(lambda x: crc32(bytes(x[column].encode("utf-8"))), axis=1)
# pylint: disable=too-many-arguments, too-many-statements
@export # noqa: C901, MC0001
def plot_cluster(
db_cluster: DBSCAN,
data: pd.DataFrame,
x_predict: np.ndarray,
plot_label: str = None,
plot_features: Tuple[int, int] = (0, 1),
verbose: bool = False,
cut_off: int = 3,
xlabel: str = None,
ylabel: str = None,
):
"""
Plot clustered data as scatter chart.
Parameters
----------
db_cluster : DBSCAN
DBScan Cluster (from SkLearn DBSCAN).
data : pd.DataFrame
Dataframe containing original data.
x_predict : np.ndarray
The DBSCAN predict numpy array
plot_label : str, optional
If set the column to use to label data points
(the default is None)
plot_features : Tuple[int, int], optional
Which two features in x_predict to plot (the default is (0, 1))
verbose : bool, optional
Verbose execution with some extra info
(the default is False)
cut_off : int, optional
The cluster size below which items are considered outliers
(the default is 3)
xlabel : str, optional
x-axis label (the default is None)
ylabel : str, optional
y-axis label (the default is None)
"""
max_idx = x_predict.shape[1] - 1
if plot_features[0] >= x_predict.shape[1]:
raise ValueError(
"plot_features[0] index must be a value from 0 to {}.".format(max_idx)
)
if plot_features[1] >= x_predict.shape[1]:
raise ValueError(
"plot_features[1] index must be a value from 0 to {}.".format(max_idx)
)
if plot_features[0] == plot_features[1]:
mssg = "plot_features indexes must be 2 different values in range 0 to"
raise ValueError(mssg + f" {max_idx}.")
labels = db_cluster.labels_
core_samples_mask = np.zeros_like(labels, dtype=bool)
# pylint: disable=unsupported-assignment-operation
# (assignment of numpy array is valid)
core_samples_mask[db_cluster.core_sample_indices_] = True
unique_labels = set(labels)
# pylint: disable=no-member
# Spectral color map does exist
colors = [cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
_, counts = np.unique(labels, return_counts=True)
if verbose:
print("Estimated number of clusters: %d" % n_clusters_)
print("Estimated number of noise points: %d" % n_noise_)
# print("Silhouette Coefficient: %0.3f"
# % metrics.silhouette_score(x_predict, labels))
if (
not isinstance(data, pd.DataFrame)
or plot_label is not None
and plot_label not in data
):
plot_label = None
p_label = None
for cluster_id, color in zip(unique_labels, colors):
if cluster_id == -1:
# Black used for noise.
color = [0, 0, 0, 1]
class_member_mask = labels == cluster_id
cluster_size = counts[cluster_id]
marker_size = cluster_size
marker = "o"
font_size = "small"
alpha = 0.4
if cluster_size < cut_off:
marker = "+"
marker_size = 10
font_size = "large"
alpha = 1.0
xy_pos = x_predict[class_member_mask & core_samples_mask]
plt.plot(
xy_pos[:, plot_features[0]],
xy_pos[:, plot_features[1]],
marker,
markerfacecolor=tuple(color),
markersize=marker_size,
)
if plot_label:
first_row = data[class_member_mask].iloc[0]
if not first_row.empty and plot_label in first_row:
p_label = first_row[plot_label]
try:
plt.annotate(
p_label,
xy=(xy_pos[0, plot_features[0]], xy_pos[0, plot_features[1]]),
fontsize=font_size,
alpha=alpha,
)
except IndexError:
pass
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title("Estimated number of clusters: %d" % n_clusters_)
plt.show()
return plt
| msticpy/analysis/eventcluster.py | 23,231 | Add commandline default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
Add process name default features.
Parameters
----------
output_df : pd.DataFrame
The dataframe to add features to
force : bool
If True overwrite existing feature columns
path_separator : str
Path separator for OS
Merge outliers and core clusters into single DataFrame.
Parameters
----------
cluster_set : np.array
The set of clusters
labels : np.array
The cluster labels
data : Union[pd.DataFrame, np.array]
The source data
time_column : str
Name of the Time column
counts : np.array
The counts of members in each cluster
Returns
-------
pd.DataFrame
Merged dataframe
Sum the ord(c) for characters in a string.
Add numerical features based on patterns of command line and process name.
Parameters
----------
input_frame : pd.DataFrame
The input dataframe
path_separator : str, optional
Path separator. If not supplied, try to determine
from 'NewProcessName' column of first 10 rows
(the default is None)
force : bool, optional
Forces re-calculation of feature columns even if they
already exist (the default is False)
Returns
-------
pd.DataFrame
Copy of the dataframe with the additional numeric features
Notes
-----
Features added:
- processNameLen: length of process file name (inc path)
- processNameTokens: the number of elements in the path
- processName: the process file name (minus path)
- commandlineTokens: number of space-separated tokens in the command line
- commandlineLen: length of the command line
- commandlineLogLen: log10 length of commandline
- isSystemSession: 1 if session Id is 0x3e7 for Windows or -1 for Linux
- commandlineTokensFull: counts number of token separators in commandline
[\\s\-\\/\.,"\'\|&:;%$()]
- pathScore: sum of ord() value of characters in path
- pathLogScore: log10 of pathScore
- commandlineScore: sum of ord() value of characters in commandline
- commandlineLogScore: log10 of commandlineScore
Return sum of ord values of characters in string.
Parameters
----------
value : str
Data to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
int
[description]
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
Return sum of ord values of characters in string.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
scale : int, optional
reduce the scale of the feature (reducing the
influence of variations this feature on the clustering
algorithm (the default is 1)
Returns
-------
pd.Series
The sum of the ordinal values of the characters
in `column`.
Notes
-----
This function sums the ordinal value of each character in the
input string. Two strings with minor differences will result in
a similar score. However, for strings with highly variable content
(e.g. command lines or http requests containing GUIDs) this may result
in too much variance to be useful when you are trying to detect
similar patterns. You can use the scale parameter to reduce the
influence of features using this function on clustering and anomaly
algorithms.
Return the CRC32 hash of the input column.
Parameters
----------
value : str
Data to process
Returns
-------
int
CRC32 hash
Return the CRC32 hash of the input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
Returns
-------
pd.Series
CRC32 hash of input column
Cluster data set according to cluster_columns features.
Parameters
----------
data : Any
Input data as a pandas DataFrame or numpy array
cluster_columns : List[Any], optional
List of columns to use for features
- for DataFrame this is a list of column names
- for numpy array this is a list of column indexes
verbose : bool, optional
Print additional information about clustering results (the default is False)
normalize : bool, optional
Normalize the input data (should probably always be True)
time_column : str, optional
If there is a time column the output data will be ordered by this
(the default is 'TimeCreatedUtc')
max_cluster_distance : float, optional
DBSCAN eps (max cluster member distance) (the default is 0.01)
min_cluster_samples : int, optional
DBSCAN min_samples (the minimum cluster size) (the default is 2)
Other Parameters
----------------
kwargs: Other arguments are passed to DBSCAN constructor
Returns
-------
Tuple[pd.DataFrame, DBSCAN, np.ndarray]
Output dataframe with clustered rows
DBSCAN model
Normalized data set
Count the delimiters in input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Count of delimiters in the string.
Count the delimiters in input column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
The name of the column to process
delim_list : str, optional
delimiters to use. (the default is r\'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]\')
Returns
-------
pd.Series
Count of delimiters in the string in `column`.
Return a hash (CRC32) of the delimiters from input column.
Parameters
----------
value : str
Data to process
delim_list : str, optional
delimiters to use. (the default is r'[\\s\\\\-\\\\\\\\/\.,"\\\\'|&:;%$()]')
Returns
-------
int
Hash of delimiter set in the string.
Plot clustered data as scatter chart.
Parameters
----------
db_cluster : DBSCAN
DBScan Cluster (from SkLearn DBSCAN).
data : pd.DataFrame
Dataframe containing original data.
x_predict : np.ndarray
The DBSCAN predict numpy array
plot_label : str, optional
If set the column to use to label data points
(the default is None)
plot_features : Tuple[int, int], optional
Which two features in x_predict to plot (the default is (0, 1))
verbose : bool, optional
Verbose execution with some extra info
(the default is False)
cut_off : int, optional
The cluster size below which items are considered outliers
(the default is 3)
xlabel : str, optional
x-axis label (the default is None)
ylabel : str, optional
y-axis label (the default is None)
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
value : str
Data to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
int
count of tokens
Return count of delimiter-separated tokens pd.Series column.
Parameters
----------
data : pd.DataFrame
The DataFrame to process
column : str
Column name to process
delimiter : str, optional
Delimiter used to split the column string.
(the default is ' ')
Returns
-------
pd.Series
count of tokens in strings in `column`
eventcluster module.
This module is intended to be used to summarize large numbers of events
into clusters of different patterns. High volume repeating events can
often make it difficult to see unique and interesting items.
The module contains functions to generate clusterable features from
string data. For example, an administration command that does some
maintenance on thousands of servers with a commandline such as:
``install-update -hostname {host.fqdn} -tmp:/tmp/{GUID}/rollback``\ can
be collapsed into a single cluster pattern by ignoring the character
values in the string and using delimiters or tokens to group the values.
This is an unsupervised learning module implemented using SciKit Learn
DBScan.
Contains:
dbcluster_events: generic clustering method using DBSCAN designed to summarize
process events and other similar data by grouping on common features.
add_process_features: derives numerical features from text features such as
commandline and process path.
------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. -------------------------------------------------------------------------- pylint: disable=too-many-arguments, too-many-locals Create DBSCAN cluster object Normalize the data (most clustering algorithms don't do well with unnormalized data) fit the data set Iterate through clusters, adding exemplar to output frame pylint: disable=consider-using-enumerate we need to know the index of the item within the loop 'Noise' events are individual items that could not be assigned to a cluster and so are unique Otherwise, just choose the first example of the cluster set pylint: enable=consider-using-enumerate Set any NaN values to empty string try to determine the path separator Create features from process name and command line pylint: disable=too-many-arguments, too-many-statements noqa: C901, MC0001 pylint: disable=unsupported-assignment-operation (assignment of numpy array is valid) pylint: disable=no-member Spectral color map does exist Number of clusters in labels, ignoring noise if present. print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(x_predict, labels)) Black used for noise. | 9,842 | en | 0.629022 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import argparse
import platform
import subprocess
import sys
import os
import errno
import stat
import gzip
import six.moves.urllib as urllib
from pkg_resources import parse_version
DEFAULT_SCHEMES = {
'deepspeech': 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.deepspeech.native_client.%(branch_name)s.%(arch_string)s/artifacts/public/%(artifact_name)s',
'tensorflow': 'https://community-tc.services.mozilla.com/api/index/v1/task/project.deepspeech.tensorflow.pip.%(branch_name)s.%(arch_string)s/artifacts/public/%(artifact_name)s'
}
TASKCLUSTER_SCHEME = os.getenv('TASKCLUSTER_SCHEME', DEFAULT_SCHEMES['deepspeech'])
def get_tc_url(arch_string, artifact_name='native_client.tar.xz', branch_name='master'):
assert arch_string is not None
assert artifact_name is not None
assert artifact_name
assert branch_name is not None
assert branch_name
return TASKCLUSTER_SCHEME % { 'arch_string': arch_string, 'artifact_name': artifact_name, 'branch_name': branch_name}
def maybe_download_tc(target_dir, tc_url, progress=True):
def report_progress(count, block_size, total_size):
percent = (count * block_size * 100) // total_size
sys.stdout.write("\rDownloading: %d%%" % percent)
sys.stdout.flush()
if percent >= 100:
print('\n')
assert target_dir is not None
target_dir = os.path.abspath(target_dir)
try:
os.makedirs(target_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
assert os.path.isdir(os.path.dirname(target_dir))
tc_filename = os.path.basename(tc_url)
target_file = os.path.join(target_dir, tc_filename)
is_gzip = False
if not os.path.isfile(target_file):
print('Downloading %s ...' % tc_url)
_, headers = urllib.request.urlretrieve(tc_url, target_file, reporthook=(report_progress if progress else None))
is_gzip = headers.get('Content-Encoding') == 'gzip'
else:
print('File already exists: %s' % target_file)
if is_gzip:
with open(target_file, "r+b") as frw:
decompressed = gzip.decompress(frw.read())
frw.seek(0)
frw.write(decompressed)
frw.truncate()
return target_file
def maybe_download_tc_bin(**kwargs):
final_file = maybe_download_tc(kwargs['target_dir'], kwargs['tc_url'], kwargs['progress'])
final_stat = os.stat(final_file)
os.chmod(final_file, final_stat.st_mode | stat.S_IEXEC)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def main():
parser = argparse.ArgumentParser(description='Tooling to ease downloading of components from TaskCluster.')
parser.add_argument('--target', required=False,
help='Where to put the native client binary files')
parser.add_argument('--arch', required=False,
help='Which architecture to download binaries for. "arm" for ARM 7 (32-bit), "arm64" for ARM64, "gpu" for CUDA enabled x86_64 binaries, "cpu" for CPU-only x86_64 binaries, "osx" for CPU-only x86_64 OSX binaries. Optional ("cpu" by default)')
parser.add_argument('--artifact', required=False,
default='native_client.tar.xz',
help='Name of the artifact to download. Defaults to "native_client.tar.xz"')
parser.add_argument('--source', required=False, default=None,
help='Name of the TaskCluster scheme to use.')
parser.add_argument('--branch', required=False,
help='Branch name to use. Defaulting to current content of VERSION file.')
parser.add_argument('--decoder', action='store_true',
help='Get URL to ds_ctcdecoder Python package.')
args = parser.parse_args()
if not args.target and not args.decoder:
print('Pass either --target or --decoder.')
exit(1)
is_arm = 'arm' in platform.machine()
is_mac = 'darwin' in sys.platform
is_64bit = sys.maxsize > (2**31 - 1)
is_ucs2 = sys.maxunicode < 0x10ffff
if not args.arch:
if is_arm:
args.arch = 'arm64' if is_64bit else 'arm'
elif is_mac:
args.arch = 'osx'
else:
args.arch = 'cpu'
if not args.branch:
version_string = read('../VERSION').strip()
ds_version = parse_version(version_string)
args.branch = "v{}".format(version_string)
else:
ds_version = parse_version(args.branch)
if args.decoder:
plat = platform.system().lower()
arch = platform.machine()
if plat == 'linux' and arch == 'x86_64':
plat = 'manylinux1'
if plat == 'darwin':
plat = 'macosx_10_10'
m_or_mu = 'mu' if is_ucs2 else 'm'
pyver = ''.join(map(str, sys.version_info[0:2]))
artifact = "ds_ctcdecoder-{ds_version}-cp{pyver}-cp{pyver}{m_or_mu}-{platform}_{arch}.whl".format(
ds_version=ds_version,
pyver=pyver,
m_or_mu=m_or_mu,
platform=plat,
arch=arch
)
ctc_arch = args.arch + '-ctc'
print(get_tc_url(ctc_arch, artifact, args.branch))
exit(0)
if args.source is not None:
if args.source in DEFAULT_SCHEMES:
global TASKCLUSTER_SCHEME
TASKCLUSTER_SCHEME = DEFAULT_SCHEMES[args.source]
else:
print('No such scheme: %s' % args.source)
exit(1)
maybe_download_tc(target_dir=args.target, tc_url=get_tc_url(args.arch, args.artifact, args.branch))
if args.artifact == "convert_graphdef_memmapped_format":
convert_graph_file = os.path.join(args.target, args.artifact)
final_stat = os.stat(convert_graph_file)
os.chmod(convert_graph_file, final_stat.st_mode | stat.S_IEXEC)
if '.tar.' in args.artifact:
subprocess.check_call(['tar', 'xvf', os.path.join(args.target, args.artifact), '-C', args.target])
if __name__ == '__main__':
main()
| oscarlm/taskcluster.py | 6,165 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
# -*- coding: utf-8 -*-
"""
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
import time
import webapp2
from webapp2_extras import security
from webapp2_extras import sessions
#: Default configuration values for this module. Keys are:
#:
#: user_model
#: User model which authenticates custom users and tokens.
#: Can also be a string in dotted notation to be lazily imported.
#: Default is :class:`webapp2_extras.appengine.auth.models.User`.
#:
#: session_backend
#: Name of the session backend to be used. Default is `securecookie`.
#:
#: cookie_name
#: Name of the cookie to save the auth session. Default is `auth`.
#:
#: token_max_age
#: Number of seconds of inactivity after which an auth token is
#: invalidated. The same value is used to set the ``max_age`` for
#: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).
#:
#: token_new_age
#: Number of seconds after which a new token is written to the database.
#: Use this to limit database writes; set to None to write on all requests.
#: Default is 86400 (1 day).
#:
#: token_cache_age
#: Number of seconds after which a token must be checked in the database.
#: Use this to limit database reads; set to None to read on all requests.
#: Default is 3600 (1 hour).
#:
#: user_attributes
#: A list of extra user attributes to be stored in the session.
# The user object must provide all of them as attributes.
#: Default is an empty list.
default_config = {
'user_model': 'webapp2_extras.appengine.auth.models.User',
'session_backend': 'securecookie',
'cookie_name': 'auth',
'token_max_age': 86400 * 7 * 3,
'token_new_age': 86400,
'token_cache_age': 3600,
'user_attributes': [],
}
#: Internal flag for anonymous users.
_anon = object()
class AuthError(Exception):
"""Base auth exception."""
class InvalidAuthIdError(AuthError):
"""Raised when a user can't be fetched given an auth_id."""
class InvalidPasswordError(AuthError):
"""Raised when a user password doesn't match."""
class AuthStore(object):
"""Provides common utilities and configuration for :class:`Auth`."""
#: Configuration key.
config_key = __name__
#: Required attributes stored in a session.
_session_attributes = ['user_id', 'remember',
'token', 'token_ts', 'cache_ts']
def __init__(self, app, config=None):
"""Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
"""
self.app = app
# Base configuration.
self.config = app.config.load_config(self.config_key,
default_values=default_config, user_values=config)
# User data we're interested in -------------------------------------------
@webapp2.cached_property
def session_attributes(self):
"""The list of attributes stored in a session.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self._session_attributes + self.user_attributes
return [a for a in attrs if a not in seen and not seen.add(a)]
@webapp2.cached_property
def user_attributes(self):
"""The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
"""
seen = set()
attrs = self.config['user_attributes']
return [a for a in attrs if a not in seen and not seen.add(a)]
# User model related ------------------------------------------------------
@webapp2.cached_property
def user_model(self):
"""Configured user model."""
cls = self.config['user_model']
if isinstance(cls, str):
cls = self.config['user_model'] = webapp2.import_string(cls)
return cls
def user_to_dict(self, user):
"""Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
"""
if not user:
return None
user_dict = dict((a, getattr(user, a)) for a in self.user_attributes)
user_dict['user_id'] = user.key.id()
return user_dict
def get_user_by_auth_password(self, auth_id, password, silent=False):
"""Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
try:
user = self.user_model.get_by_auth_password(auth_id, password)
return self.user_to_dict(user)
except (InvalidAuthIdError, InvalidPasswordError):
if not silent:
raise
return None
def get_user_by_auth_token(self, user_id, token):
"""Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
"""
user, ts = self.user_model.get_by_auth_token(user_id, token)
return self.user_to_dict(user), ts
def create_auth_token(self, user_id):
"""Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
"""
return self.user_model.create_auth_token(user_id)
def delete_auth_token(self, user_id, token):
"""Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
"""
return self.user_model.delete_auth_token(user_id, token)
# Session related ---------------------------------------------------------
def get_session(self, request):
"""Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
"""
store = sessions.get_store(request=request)
return store.get_session(self.config['cookie_name'],
backend=self.config['session_backend'])
def serialize_session(self, data):
"""Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
"""
assert len(data) == len(self.session_attributes)
return [data.get(k) for k in self.session_attributes]
def deserialize_session(self, data):
"""Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
"""
assert len(data) == len(self.session_attributes)
return dict(list(zip(self.session_attributes, data)))
# Validators --------------------------------------------------------------
def set_password_validator(self, func):
"""Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
"""
self.validate_password = func.__get__(self, self.__class__)
def set_token_validator(self, func):
"""Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
"""
self.validate_token = func.__get__(self, self.__class__)
def default_password_validator(self, auth_id, password, silent=False):
"""Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
return self.get_user_by_auth_password(auth_id, password, silent=silent)
def default_token_validator(self, user_id, token, token_ts=None):
"""Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
"""
now = int(time.time())
delete = token_ts and ((now - token_ts) > self.config['token_max_age'])
create = False
if not delete:
# Try to fetch the user.
user, ts = self.get_user_by_auth_token(user_id, token)
if user:
# Now validate the real timestamp.
delete = (now - ts) > self.config['token_max_age']
create = (now - ts) > self.config['token_new_age']
if delete or create or not user:
if delete or create:
# Delete token from db.
self.delete_auth_token(user_id, token)
if delete:
user = None
token = None
return user, token
validate_password = default_password_validator
validate_token = default_token_validator
class Auth(object):
"""Authentication provider for a single request."""
#: A :class:`webapp2.Request` instance.
request = None
#: An :class:`AuthStore` instance.
store = None
#: Cached user for the request.
_user = None
def __init__(self, request):
"""Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
"""
self.request = request
self.store = get_store(app=request.app)
# Retrieving a user -------------------------------------------------------
def _user_or_none(self):
return self._user if self._user is not _anon else None
def get_user_by_session(self, save_session=True):
"""Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is None:
data = self.get_session_data(pop=True)
if not data:
self._user = _anon
else:
self._user = self.get_user_by_token(
user_id=data['user_id'], token=data['token'],
token_ts=data['token_ts'], cache=data,
cache_ts=data['cache_ts'], remember=data['remember'],
save_session=save_session)
return self._user_or_none()
def get_user_by_token(self, user_id, token, token_ts=None, cache=None,
cache_ts=None, remember=False, save_session=True):
"""Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
"""
if self._user is not None:
assert (self._user is not _anon and
self._user['user_id'] == user_id and
self._user['token'] == token)
return self._user_or_none()
if cache and cache_ts:
# Check if we can use the cached info.
now = int(time.time())
valid = (now - cache_ts) < self.store.config['token_cache_age']
if valid and token_ts:
valid2 = (now - token_ts) < self.store.config['token_max_age']
valid3 = (now - token_ts) < self.store.config['token_new_age']
valid = valid2 and valid3
if valid:
self._user = cache
else:
cache_ts = None
if self._user is None:
# Fetch and validate the token.
self._user, token = self.store.validate_token(user_id, token,
token_ts=token_ts)
if self._user is None:
self._user = _anon
elif save_session:
if not token:
token_ts = None
self.set_session(self._user, token=token, token_ts=token_ts,
cache_ts=cache_ts, remember=remember)
return self._user_or_none()
def get_user_by_password(self, auth_id, password, remember=False,
save_session=True, silent=False):
"""Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
"""
if save_session:
# During a login attempt, invalidate current session.
self.unset_session()
self._user = self.store.validate_password(auth_id, password,
silent=silent)
if not self._user:
self._user = _anon
elif save_session:
# This always creates a new token with new timestamp.
self.set_session(self._user, remember=remember)
return self._user_or_none()
# Storing and removing user from session ----------------------------------
@webapp2.cached_property
def session(self):
"""Auth session."""
return self.store.get_session(self.request)
def set_session(self, user, token=None, token_ts=None, cache_ts=None,
remember=False, **session_args):
"""Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
"""
now = int(time.time())
token = token or self.store.create_auth_token(user['user_id'])
token_ts = token_ts or now
cache_ts = cache_ts or now
if remember:
max_age = self.store.config['token_max_age']
else:
max_age = None
session_args.setdefault('max_age', max_age)
# Create a new dict or just update user?
# We are doing the latter, and so the user dict will always have
# the session metadata (token, timestamps etc). This is easier to test.
# But we could store only user_id and custom user attributes instead.
user.update({
'token': token,
'token_ts': token_ts,
'cache_ts': cache_ts,
'remember': int(remember),
})
self.set_session_data(user, **session_args)
self._user = user
def unset_session(self):
"""Removes a user from the session and invalidates the auth token."""
self._user = None
data = self.get_session_data(pop=True)
if data:
# Invalidate current token.
self.store.delete_auth_token(data['user_id'], data['token'])
def get_session_data(self, pop=False):
"""Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
"""
func = self.session.pop if pop else self.session.get
rv = func('_user', None)
if rv:
return self.store.deserialize_session(rv)
def set_session_data(self, data, **session_args):
"""Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
"""
self.session['_user'] = self.store.serialize_session(data)
self.session.container.session_args.update(session_args)
# Factories -------------------------------------------------------------------
#: Key used to store :class:`AuthStore` in the app registry.
_store_registry_key = 'webapp2_extras.auth.Auth'
#: Key used to store :class:`Auth` in the request registry.
_auth_registry_key = 'webapp2_extras.auth.Auth'
def get_store(factory=AuthStore, key=_store_registry_key, app=None):
"""Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
"""
app = app or webapp2.get_app()
store = app.registry.get(key)
if not store:
store = app.registry[key] = factory(app)
return store
def set_store(store, key=_store_registry_key, app=None):
"""Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
"""
app = app or webapp2.get_app()
app.registry[key] = store
def get_auth(factory=Auth, key=_auth_registry_key, request=None):
"""Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
auth = request.registry.get(key)
if not auth:
auth = request.registry[key] = factory(request)
return auth
def set_auth(auth, key=_auth_registry_key, request=None):
"""Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
"""
request = request or webapp2.get_request()
request.registry[key] = auth
| Webapp2_samplesite/webapp2_extras/auth.py | 21,327 | Authentication provider for a single request.
Base auth exception.
Provides common utilities and configuration for :class:`Auth`.
Raised when a user can't be fetched given an auth_id.
Raised when a user password doesn't match.
Initializes the session store.
:param app:
A :class:`webapp2.WSGIApplication` instance.
:param config:
A dictionary of configuration values to be overridden. See
the available keys in :data:`default_config`.
Initializes the auth provider for a request.
:param request:
A :class:`webapp2.Request` instance.
Creates a new authentication token.
:param user_id:
Authentication id.
:returns:
A new authentication token.
Validates a password.
Passwords are used to log-in using forms or to request auth tokens
from services.
:param auth_id:
Authentication id.
:param password:
Password to be checked.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
user or None
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
Validates a token.
Tokens are random strings used to authenticate temporarily. They are
used to validate sessions or service requests.
:param user_id:
User id.
:param token:
Token to be checked.
:param token_ts:
Optional token timestamp used to pre-validate the token age.
:returns:
A tuple ``(user_dict, token)``.
Deletes an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
Deserializes values for a session.
:param data:
A list with session data.
:returns:
A dict with session data.
Returns an instance of :class:`Auth` from the request registry.
It'll try to get it from the current request registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`Auth` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param request:
A :class:`webapp2.Request` instance used to store the instance. The
active request is used if it is not set.
Returns an auth session.
:param request:
A :class:`webapp2.Request` instance.
:returns:
A session dict.
Returns the session data as a dictionary.
:param pop:
If True, removes the session.
:returns:
A deserialized session, or None.
Returns an instance of :class:`AuthStore` from the app registry.
It'll try to get it from the current app registry, and if it is not
registered it'll be instantiated and registered. A second call to this
function will return the same instance.
:param factory:
The callable used to build and register the instance if it is not yet
registered. The default is the class :class:`AuthStore` itself.
:param key:
The key used to store the instance in the registry. A default is used
if it is not set.
:param app:
A :class:`webapp2.WSGIApplication` instance used to store the instance.
The active app is used if it is not set.
Returns a user dict based on auth_id and password.
:param auth_id:
Authentication id.
:param password:
User password.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A dictionary with user data.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
Returns a user dict based on user_id and auth token.
:param user_id:
User id.
:param token:
Authentication token.
:returns:
A tuple ``(user_dict, token_timestamp)``. Both values can be None.
The token timestamp will be None if the user is invalid or it
is valid but the token requires renewal.
Returns a user based on password credentials.
:param auth_id:
Authentication id.
:param password:
User password.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:param silent:
If True, raises an exception if auth_id or password are invalid.
:returns:
A user dict or None.
:raises:
``InvalidAuthIdError`` or ``InvalidPasswordError``.
Returns a user based on the current session.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
Returns a user based on an authentication token.
:param user_id:
User id.
:param token:
Authentication token.
:param token_ts:
Token timestamp, used to perform pre-validation.
:param cache:
Cached user data (from the session).
:param cache_ts:
Cache timestamp.
:param remember:
If True, saves permanent sessions.
:param save_session:
If True, saves the user in the session if authentication succeeds.
:returns:
A user dict or None.
Serializes values for a session.
:param data:
A dict with session data.
:returns:
A list with session data.
Auth session.
The list of attributes stored in a session.
This must be an ordered list of unique elements.
Sets an instance of :class:`Auth` in the request registry.
:param auth:
An instance of :class:`Auth`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.Request` instance used to retrieve the instance. The
active request is used if it is not set.
Sets the function used to perform password validation.
:param func:
A function that receives ``(store, auth_id, password)``
and returns a user dict or None.
Saves a user in the session.
:param user:
A dictionary with user data.
:param token:
A unique token to be persisted. If None, a new one is created.
:param token_ts:
Token timestamp. If None, a new one is created.
:param cache_ts:
Token cache timestamp. If None, a new one is created.
:remember:
If True, session is set to be persisted.
:param session_args:
Keyword arguments to set the session arguments.
Sets the session data as a list.
:param data:
Deserialized session data.
:param session_args:
Extra arguments for the session.
Sets an instance of :class:`AuthStore` in the app registry.
:param store:
An instance of :class:`AuthStore`.
:param key:
The key used to retrieve the instance from the registry. A default
is used if it is not set.
:param request:
A :class:`webapp2.WSGIApplication` instance used to retrieve the
instance. The active app is used if it is not set.
Sets the function used to perform token validation.
:param func:
A function that receives ``(store, user_id, token, token_ts)``
and returns a tuple ``(user_dict, token)``.
Removes a user from the session and invalidates the auth token.
The list of attributes retrieved from the user model.
This must be an ordered list of unique elements.
Configured user model.
Returns a dictionary based on a user object.
Extra attributes to be retrieved must be set in this module's
configuration.
:param user:
User object: an instance the custom user model.
:returns:
A dictionary with user data.
webapp2_extras.auth
===================
Utilities for authentication and authorization.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
-*- coding: utf-8 -*-: Default configuration values for this module. Keys are::: user_model: User model which authenticates custom users and tokens.: Can also be a string in dotted notation to be lazily imported.: Default is :class:`webapp2_extras.appengine.auth.models.User`.:: session_backend: Name of the session backend to be used. Default is `securecookie`.:: cookie_name: Name of the cookie to save the auth session. Default is `auth`.:: token_max_age: Number of seconds of inactivity after which an auth token is: invalidated. The same value is used to set the ``max_age`` for: persistent auth sessions. Default is 86400 * 7 * 3 (3 weeks).:: token_new_age: Number of seconds after which a new token is written to the database.: Use this to limit database writes; set to None to write on all requests.: Default is 86400 (1 day).:: token_cache_age: Number of seconds after which a token must be checked in the database.: Use this to limit database reads; set to None to read on all requests.: Default is 3600 (1 hour).:: user_attributes: A list of extra user attributes to be stored in the session. The user object must provide all of them as attributes.: Default is an empty list.: Internal flag for anonymous users.: Configuration key.: Required attributes stored in a session. Base configuration. User data we're interested in ------------------------------------------- User model related ------------------------------------------------------ Session related --------------------------------------------------------- Validators -------------------------------------------------------------- Try to fetch the user. Now validate the real timestamp. Delete token from db.: A :class:`webapp2.Request` instance.: An :class:`AuthStore` instance.: Cached user for the request. Retrieving a user ------------------------------------------------------- Check if we can use the cached info. Fetch and validate the token. During a login attempt, invalidate current session. This always creates a new token with new timestamp. Storing and removing user from session ---------------------------------- Create a new dict or just update user? We are doing the latter, and so the user dict will always have the session metadata (token, timestamps etc). This is easier to test. But we could store only user_id and custom user attributes instead. Invalidate current token. Factories -------------------------------------------------------------------: Key used to store :class:`AuthStore` in the app registry.: Key used to store :class:`Auth` in the request registry. | 9,947 | en | 0.671951 |
from tests.common import reboot, port_toggle
import os
import time
import random
import logging
import pprint
import pytest
import json
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from tests.common import reboot, port_toggle
from tests.common.helpers.assertions import pytest_require
from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError
from tests.common.fixtures.duthost_utils import backup_and_restore_config_db_on_duts
from tests.common.fixtures.ptfhost_utils import copy_arp_responder_py, run_garp_service, change_mac_addresses
from tests.common.utilities import wait_until
from tests.common.dualtor.dual_tor_mock import mock_server_base_ip_addr
from tests.common.helpers.assertions import pytest_assert
logger = logging.getLogger(__name__)
pytestmark = [
pytest.mark.acl,
pytest.mark.disable_loganalyzer, # Disable automatic loganalyzer, since we use it for the test
pytest.mark.topology("any"),
pytest.mark.usefixtures('backup_and_restore_config_db_on_duts')
]
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DUT_TMP_DIR = "acl_test_dir" # Keep it under home dir so it persists through reboot
FILES_DIR = os.path.join(BASE_DIR, "files")
TEMPLATE_DIR = os.path.join(BASE_DIR, "templates")
ACL_TABLE_TEMPLATE = "acltb_table.j2"
ACL_REMOVE_RULES_FILE = "acl_rules_del.json"
# TODO: We really shouldn't have two separate templates for v4 and v6, need to combine them somehow
ACL_RULES_FULL_TEMPLATE = {
"ipv4": "acltb_test_rules.j2",
"ipv6": "acltb_v6_test_rules.j2"
}
ACL_RULES_PART_TEMPLATES = {
"ipv4": tuple("acltb_test_rules_part_{}.j2".format(i) for i in xrange(1, 3)),
"ipv6": tuple("acltb_v6_test_rules_part_{}.j2".format(i) for i in xrange(1, 3))
}
DEFAULT_SRC_IP = {
"ipv4": "20.0.0.1",
"ipv6": "60c0:a800::5"
}
# TODO: These routes don't match the VLAN interface from the T0 topology.
# This needs to be addressed before we can enable the v6 tests for T0
DOWNSTREAM_DST_IP = {
"ipv4": "192.168.0.253",
"ipv6": "20c0:a800::2"
}
DOWNSTREAM_IP_TO_ALLOW = {
"ipv4": "192.168.0.252",
"ipv6": "20c0:a800::4"
}
DOWNSTREAM_IP_TO_BLOCK = {
"ipv4": "192.168.0.251",
"ipv6": "20c0:a800::8"
}
DOWNSTREAM_IP_PORT_MAP = {}
UPSTREAM_DST_IP = {
"ipv4": "192.168.128.1",
"ipv6": "40c0:a800::2"
}
UPSTREAM_IP_TO_ALLOW = {
"ipv4": "192.168.136.1",
"ipv6": "40c0:a800::4"
}
UPSTREAM_IP_TO_BLOCK = {
"ipv4": "192.168.144.1",
"ipv6": "40c0:a800::8"
}
VLAN_BASE_MAC_PATTERN = "72060001{:04}"
LOG_EXPECT_ACL_TABLE_CREATE_RE = ".*Created ACL table.*"
LOG_EXPECT_ACL_TABLE_REMOVE_RE = ".*Successfully deleted ACL table.*"
LOG_EXPECT_ACL_RULE_CREATE_RE = ".*Successfully created ACL rule.*"
LOG_EXPECT_ACL_RULE_REMOVE_RE = ".*Successfully deleted ACL rule.*"
PACKETS_COUNT = "packets_count"
BYTES_COUNT = "bytes_count"
@pytest.fixture(scope="module")
def setup(duthosts, ptfhost, rand_selected_dut, rand_unselected_dut, tbinfo, ptfadapter):
"""Gather all required test information from DUT and tbinfo.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
tbinfo: A fixture to gather information about the testbed.
Yields:
A Dictionary with required test information.
"""
mg_facts = rand_selected_dut.get_extended_minigraph_facts(tbinfo)
topo = tbinfo["topo"]["type"]
vlan_ports = []
vlan_mac = None
if topo == "t0":
vlan_ports = [mg_facts["minigraph_ptf_indices"][ifname]
for ifname in mg_facts["minigraph_vlans"].values()[0]["members"]]
config_facts = rand_selected_dut.get_running_config_facts()
vlan_table = config_facts["VLAN"]
vlan_name = list(vlan_table.keys())[0]
if "mac" in vlan_table[vlan_name]:
vlan_mac = vlan_table[vlan_name]["mac"]
# Get the list of upstream/downstream ports
downstream_ports = defaultdict(list)
upstream_ports = defaultdict(list)
downstream_port_ids = []
upstream_port_ids = []
upstream_port_id_to_router_mac_map = {}
downstream_port_id_to_router_mac_map = {}
# For T0/dual ToR testbeds, we need to use the VLAN MAC to interact with downstream ports
# For T1 testbeds, no VLANs are present so using the router MAC is acceptable
downlink_dst_mac = vlan_mac if vlan_mac is not None else rand_selected_dut.facts["router_mac"]
for interface, neighbor in mg_facts["minigraph_neighbors"].items():
port_id = mg_facts["minigraph_ptf_indices"][interface]
if (topo == "t1" and "T0" in neighbor["name"]) or (topo == "t0" and "Server" in neighbor["name"]):
downstream_ports[neighbor['namespace']].append(interface)
downstream_port_ids.append(port_id)
downstream_port_id_to_router_mac_map[port_id] = downlink_dst_mac
elif (topo == "t1" and "T2" in neighbor["name"]) or (topo == "t0" and "T1" in neighbor["name"]):
upstream_ports[neighbor['namespace']].append(interface)
upstream_port_ids.append(port_id)
upstream_port_id_to_router_mac_map[port_id] = rand_selected_dut.facts["router_mac"]
# stop garp service for single tor
if 'dualtor' not in tbinfo['topo']['name']:
logging.info("Stopping GARP service on single tor")
ptfhost.shell("supervisorctl stop garp_service", module_ignore_errors=True)
# If running on a dual ToR testbed, any uplink for either ToR is an acceptable
# source or destination port
if 'dualtor' in tbinfo['topo']['name'] and rand_unselected_dut is not None:
peer_mg_facts = rand_unselected_dut.get_extended_minigraph_facts(tbinfo)
for interface, neighbor in peer_mg_facts['minigraph_neighbors'].items():
if (topo == "t1" and "T2" in neighbor["name"]) or (topo == "t0" and "T1" in neighbor["name"]):
port_id = peer_mg_facts["minigraph_ptf_indices"][interface]
upstream_port_ids.append(port_id)
upstream_port_id_to_router_mac_map[port_id] = rand_unselected_dut.facts["router_mac"]
# Get the list of LAGs
port_channels = mg_facts["minigraph_portchannels"]
# TODO: We should make this more robust (i.e. bind all active front-panel ports)
acl_table_ports = defaultdict(list)
if topo == "t0" or tbinfo["topo"]["name"] in ("t1", "t1-lag"):
for namespace, port in downstream_ports.iteritems():
acl_table_ports[namespace] += port
# In multi-asic we need config both in host and namespace.
if namespace:
acl_table_ports[''] += port
if topo == "t0" or tbinfo["topo"]["name"] in ("t1-lag", "t1-64-lag", "t1-64-lag-clet"):
for k, v in port_channels.iteritems():
acl_table_ports[v['namespace']].append(k)
# In multi-asic we need config both in host and namespace.
if v['namespace']:
acl_table_ports[''].append(k)
else:
for namespace, port in upstream_ports.iteritems():
acl_table_ports[namespace] += port
# In multi-asic we need config both in host and namespace.
if namespace:
acl_table_ports[''] += port
dest_mac_mapping = {
"downlink->uplink": downstream_port_id_to_router_mac_map,
"uplink->downlink": upstream_port_id_to_router_mac_map
}
setup_information = {
"destination_mac": dest_mac_mapping,
"downstream_port_ids": downstream_port_ids,
"upstream_port_ids": upstream_port_ids,
"acl_table_ports": acl_table_ports,
"vlan_ports": vlan_ports,
"topo": topo,
"vlan_mac": vlan_mac
}
logger.info("Gathered variables for ACL test:\n{}".format(pprint.pformat(setup_information)))
logger.info("Creating temporary folder \"{}\" for ACL test".format(DUT_TMP_DIR))
for duthost in duthosts:
duthost.command("mkdir -p {}".format(DUT_TMP_DIR))
yield setup_information
logger.info("Removing temporary directory \"{}\"".format(DUT_TMP_DIR))
for duthost in duthosts:
duthost.command("rm -rf {}".format(DUT_TMP_DIR))
@pytest.fixture(scope="module", params=["ipv4", "ipv6"])
def ip_version(request, tbinfo, duthosts, rand_one_dut_hostname):
if tbinfo["topo"]["type"] == "t0" and request.param == "ipv6":
pytest.skip("IPV6 ACL test not currently supported on t0 testbeds")
return request.param
@pytest.fixture(scope="module")
def populate_vlan_arp_entries(setup, ptfhost, duthosts, rand_one_dut_hostname, ip_version):
"""Set up the ARP responder utility in the PTF container."""
duthost = duthosts[rand_one_dut_hostname]
if setup["topo"] != "t0":
def noop():
pass
yield noop
return # Don't fall through to t0 case
addr_list = [DOWNSTREAM_DST_IP[ip_version], DOWNSTREAM_IP_TO_ALLOW[ip_version], DOWNSTREAM_IP_TO_BLOCK[ip_version]]
vlan_host_map = defaultdict(dict)
for i in range(len(addr_list)):
mac = VLAN_BASE_MAC_PATTERN.format(i)
port = random.choice(setup["vlan_ports"])
addr = addr_list[i]
vlan_host_map[port][str(addr)] = mac
DOWNSTREAM_IP_PORT_MAP[addr] = port
arp_responder_conf = {}
for port in vlan_host_map:
arp_responder_conf['eth{}'.format(port)] = vlan_host_map[port]
with open("/tmp/from_t1.json", "w") as ar_config:
json.dump(arp_responder_conf, ar_config)
ptfhost.copy(src="/tmp/from_t1.json", dest="/tmp/from_t1.json")
ptfhost.host.options["variable_manager"].extra_vars.update({"arp_responder_args": "-e"})
ptfhost.template(src="templates/arp_responder.conf.j2", dest="/etc/supervisor/conf.d/arp_responder.conf")
ptfhost.shell("supervisorctl reread && supervisorctl update")
ptfhost.shell("supervisorctl restart arp_responder")
def populate_arp_table():
for dut in duthosts:
dut.command("sonic-clear fdb all")
dut.command("sonic-clear arp")
# Wait some time to ensure the async call of clear is completed
time.sleep(20)
for addr in addr_list:
dut.command("ping {} -c 3".format(addr), module_ignore_errors=True)
populate_arp_table()
yield populate_arp_table
logging.info("Stopping ARP responder")
ptfhost.shell("supervisorctl stop arp_responder")
duthost.command("sonic-clear fdb all")
duthost.command("sonic-clear arp")
@pytest.fixture(scope="module", params=["ingress", "egress"])
def stage(request, duthosts, rand_one_dut_hostname):
"""Parametrize tests for Ingress/Egress stage testing.
Args:
request: A fixture to interact with Pytest data.
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
Returns:
str: The ACL stage to be tested.
"""
duthost = duthosts[rand_one_dut_hostname]
pytest_require(
request.param == "ingress" or duthost.facts["asic_type"] not in ("broadcom"),
"Egress ACLs are not currently supported on \"{}\" ASICs".format(duthost.facts["asic_type"])
)
return request.param
def create_or_remove_acl_table(duthost, acl_table_config, setup, op):
for sonic_host_or_asic_inst in duthost.get_sonic_host_and_frontend_asic_instance():
namespace = sonic_host_or_asic_inst.namespace if hasattr(sonic_host_or_asic_inst, 'namespace') else ''
if op == "add":
logger.info("Creating ACL table: \"{}\" in namespace {} on device {}".format(acl_table_config["table_name"], namespace, duthost))
sonic_host_or_asic_inst.command(
"config acl add table {} {} -s {} -p {}".format(
acl_table_config["table_name"],
acl_table_config["table_type"],
acl_table_config["table_stage"],
",".join(setup["acl_table_ports"][namespace]),
)
)
else:
logger.info("Removing ACL table \"{}\" in namespace {} on device {}".format(acl_table_config["table_name"], namespace, duthost))
sonic_host_or_asic_inst.command("config acl remove table {}".format(acl_table_config["table_name"]))
@pytest.fixture(scope="module")
def acl_table(duthosts, rand_one_dut_hostname, setup, stage, ip_version):
"""Apply ACL table configuration and remove after tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
setup: Parameters for the ACL tests.
stage: The ACL stage under test.
ip_version: The IP version under test
Yields:
The ACL table configuration.
"""
table_name = "DATA_{}_{}_TEST".format(stage.upper(), ip_version.upper())
acl_table_config = {
"table_name": table_name,
"table_ports": ",".join(setup["acl_table_ports"]['']),
"table_stage": stage,
"table_type": "L3" if ip_version == "ipv4" else "L3V6"
}
logger.info("Generated ACL table configuration:\n{}".format(pprint.pformat(acl_table_config)))
dut_to_analyzer_map = {}
for duthost in duthosts:
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="acl")
loganalyzer.load_common_config()
dut_to_analyzer_map[duthost] = loganalyzer
try:
loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_CREATE_RE]
with loganalyzer:
create_or_remove_acl_table(duthost, acl_table_config, setup, "add")
except LogAnalyzerError as err:
# Cleanup Config DB if table creation failed
logger.error("ACL table creation failed, attempting to clean-up...")
create_or_remove_acl_table(duthost, acl_table_config, setup, "remove")
raise err
try:
yield acl_table_config
finally:
for duthost, loganalyzer in dut_to_analyzer_map.items():
loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_REMOVE_RE]
with loganalyzer:
create_or_remove_acl_table(duthost, acl_table_config, setup, "remove")
class BaseAclTest(object):
"""Base class for testing ACL rules.
Subclasses must provide `setup_rules` method to prepare ACL rules for traffic testing.
They can optionally override `teardown_rules`, which will otherwise remove the rules by
applying an empty configuration file.
"""
__metaclass__ = ABCMeta
ACL_COUNTERS_UPDATE_INTERVAL_SECS = 10
@abstractmethod
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
pass
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Perform actions after rules have been applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
"""
pass
def teardown_rules(self, dut):
"""Tear down ACL rules once the tests have completed.
Args:
dut: The DUT having ACLs applied.
"""
logger.info("Finished with tests, removing all ACL rules...")
# Copy empty rules configuration
dut.copy(src=os.path.join(FILES_DIR, ACL_REMOVE_RULES_FILE), dest=DUT_TMP_DIR)
remove_rules_dut_path = os.path.join(DUT_TMP_DIR, ACL_REMOVE_RULES_FILE)
# Remove the rules
logger.info("Applying \"{}\"".format(remove_rules_dut_path))
dut.command("config acl update full {}".format(remove_rules_dut_path))
@pytest.fixture(scope="class", autouse=True)
def acl_rules(self, duthosts, localhost, setup, acl_table, populate_vlan_arp_entries, tbinfo, ip_version):
"""Setup/teardown ACL rules for the current set of tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
localhost: The host from which tests are run.
setup: Parameters for the ACL tests.
acl_table: Configuration info for the ACL table.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
"""
dut_to_analyzer_map = {}
for duthost in duthosts:
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="acl_rules")
loganalyzer.load_common_config()
dut_to_analyzer_map[duthost] = loganalyzer
try:
loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_CREATE_RE]
with loganalyzer:
self.setup_rules(duthost, acl_table, ip_version)
self.post_setup_hook(duthost, localhost, populate_vlan_arp_entries, tbinfo)
assert self.check_rule_counters(duthost), "Rule counters should be ready!"
except LogAnalyzerError as err:
# Cleanup Config DB if rule creation failed
logger.error("ACL rule application failed, attempting to clean-up...")
self.teardown_rules(duthost)
raise err
try:
yield
finally:
for duthost, loganalyzer in dut_to_analyzer_map.items():
loganalyzer.expect_regex = [LOG_EXPECT_ACL_RULE_REMOVE_RE]
with loganalyzer:
logger.info("Removing ACL rules")
self.teardown_rules(duthost)
@pytest.yield_fixture(scope="class", autouse=True)
def counters_sanity_check(self, duthosts, acl_rules, acl_table):
"""Validate that the counters for each rule in the rules list increased as expected.
This fixture yields a list of rule IDs. The test case should add on to this list if
it is required to check the rule for increased counters.
After the test cases pass, the fixture will wait for the ACL counters to update and then
check if the counters for each rule in the list were increased.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
acl_rules: Fixture that sets up the ACL rules.
acl_table: Fixture that sets up the ACL table.
"""
acl_facts = defaultdict(dict)
table_name = acl_table["table_name"]
for duthost in duthosts:
acl_facts[duthost]['before']= duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"][table_name]["rules"]
rule_list = []
yield rule_list
if not rule_list:
return
# Wait for orchagent to update the ACL counters
time.sleep(self.ACL_COUNTERS_UPDATE_INTERVAL_SECS)
for duthost in duthosts:
acl_facts[duthost]['after']= duthost.acl_facts()["ansible_facts"]["ansible_acl_facts"][table_name]["rules"]
for duthost in duthosts:
assert len(acl_facts[duthost]['before']) == len(acl_facts[duthost]['after'])
for rule in rule_list:
rule = "RULE_{}".format(rule)
counters_before = {
PACKETS_COUNT: 0,
BYTES_COUNT: 0
}
for duthost in duthosts:
counters_before[PACKETS_COUNT] += acl_facts[duthost]['before'][rule][PACKETS_COUNT]
counters_before[BYTES_COUNT] += acl_facts[duthost]['before'][rule][BYTES_COUNT]
logger.info("Counters for ACL rule \"{}\" before traffic:\n{}"
.format(rule, pprint.pformat(counters_before)))
counters_after = {
PACKETS_COUNT: 0,
BYTES_COUNT: 0
}
for duthost in duthosts:
counters_after[PACKETS_COUNT] += acl_facts[duthost]['after'][rule][PACKETS_COUNT]
counters_after[BYTES_COUNT] += acl_facts[duthost]['after'][rule][BYTES_COUNT]
logger.info("Counters for ACL rule \"{}\" after traffic:\n{}"
.format(rule, pprint.pformat(counters_after)))
assert counters_after[PACKETS_COUNT] > counters_before[PACKETS_COUNT]
assert counters_after[BYTES_COUNT] > counters_before[BYTES_COUNT]
@pytest.fixture(params=["downlink->uplink", "uplink->downlink"])
def direction(self, request):
"""Parametrize test based on direction of traffic."""
return request.param
def check_rule_counters(self, duthost):
logger.info('Wait all rule counters are ready')
return wait_until(60, 2, 0, self.check_rule_counters_internal, duthost)
def check_rule_counters_internal(self, duthost):
for asic_id in duthost.get_frontend_asic_ids():
res = duthost.asic_instance(asic_id).command('aclshow -a')
num_of_lines = len(res['stdout'].split('\n'))
if num_of_lines <= 2 or 'N/A' in res['stdout']:
return False
return True
@pytest.fixture(autouse=True)
def get_src_port(self, setup, direction):
"""Get a source port for the current test."""
src_ports = setup["downstream_port_ids"] if direction == "downlink->uplink" else setup["upstream_port_ids"]
src_port = random.choice(src_ports)
logger.info("Selected source port {}".format(src_port))
self.src_port = src_port
def get_dst_ports(self, setup, direction):
"""Get the set of possible destination ports for the current test."""
return setup["upstream_port_ids"] if direction == "downlink->uplink" else setup["downstream_port_ids"]
def get_dst_ip(self, direction, ip_version):
"""Get the default destination IP for the current test."""
return UPSTREAM_DST_IP[ip_version] if direction == "downlink->uplink" else DOWNSTREAM_DST_IP[ip_version]
def tcp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, proto=None, sport=0x4321, dport=0x51, flags=None):
"""Generate a TCP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
pkt = testutils.simple_tcp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
tcp_sport=sport,
tcp_dport=dport,
ip_ttl=64
)
if proto:
pkt["IP"].proto = proto
else:
pkt = testutils.simple_tcpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
tcp_sport=sport,
tcp_dport=dport,
ipv6_hlim=64
)
if proto:
pkt["IPv6"].nh = proto
if flags:
pkt["TCP"].flags = flags
return pkt
def udp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, sport=1234, dport=80):
"""Generate a UDP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
return testutils.simple_udp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
udp_sport=sport,
udp_dport=dport,
ip_ttl=64
)
else:
return testutils.simple_udpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
udp_sport=sport,
udp_dport=dport,
ipv6_hlim=64
)
def icmp_packet(self, setup, direction, ptfadapter, ip_version, src_ip=None, dst_ip=None, icmp_type=8, icmp_code=0):
"""Generate an ICMP packet for testing."""
src_ip = src_ip or DEFAULT_SRC_IP[ip_version]
dst_ip = dst_ip or self.get_dst_ip(direction, ip_version)
if ip_version == "ipv4":
return testutils.simple_icmp_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ip_dst=dst_ip,
ip_src=src_ip,
icmp_type=icmp_type,
icmp_code=icmp_code,
ip_ttl=64,
)
else:
return testutils.simple_icmpv6_packet(
eth_dst=setup["destination_mac"][direction][self.src_port],
eth_src=ptfadapter.dataplane.get_mac(0, 0),
ipv6_dst=dst_ip,
ipv6_src=src_ip,
icmp_type=icmp_type,
icmp_code=icmp_code,
ipv6_hlim=64,
)
def expected_mask_routed_packet(self, pkt, ip_version):
"""Generate the expected mask for a routed packet."""
exp_pkt = pkt.copy()
exp_pkt = mask.Mask(exp_pkt)
exp_pkt.set_do_not_care_scapy(packet.Ether, "dst")
exp_pkt.set_do_not_care_scapy(packet.Ether, "src")
if ip_version == "ipv4":
exp_pkt.set_do_not_care_scapy(packet.IP, "chksum")
# In multi-asic we cannot determine this so ignore.
exp_pkt.set_do_not_care_scapy(packet.IP, 'ttl')
else:
# In multi-asic we cannot determine this so ignore.
exp_pkt.set_do_not_care_scapy(packet.IPv6, 'hlim')
return exp_pkt
def test_ingress_unmatched_blocked(self, setup, direction, ptfadapter, ip_version, stage):
"""Verify that unmatched packets are dropped for ingress."""
if stage == "egress":
pytest.skip("Only run for ingress")
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
def test_egress_unmatched_forwarded(self, setup, direction, ptfadapter, ip_version, stage):
"""Verify that default egress rule allow all traffics"""
if stage == "ingress":
pytest.skip("Only run for egress")
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
def test_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a packet on source IP."""
src_ip = "20.0.0.2" if ip_version == "ipv4" else "60c0:a800::6"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(1)
def test_rules_priority_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we respect rule priorites in the forwarding case."""
src_ip = "20.0.0.7" if ip_version == "ipv4" else "60c0:a800::7"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(20)
def test_rules_priority_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we respect rule priorites in the drop case."""
src_ip = "20.0.0.3" if ip_version == "ipv4" else "60c0:a800::4"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(7)
def test_dest_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a packet on destination IP."""
dst_ip = DOWNSTREAM_IP_TO_ALLOW[ip_version] if direction == "uplink->downlink" else UPSTREAM_IP_TO_ALLOW[ip_version]
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(2 if direction == "uplink->downlink" else 3)
def test_dest_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a packet on destination IP."""
dst_ip = DOWNSTREAM_IP_TO_BLOCK[ip_version] if direction == "uplink->downlink" else UPSTREAM_IP_TO_BLOCK[ip_version]
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dst_ip=dst_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(15 if direction == "uplink->downlink" else 16)
def test_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a packet on source IP."""
src_ip = "20.0.0.6" if ip_version == "ipv4" else "60c0:a800::3"
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(14)
def test_udp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward a UDP packet on source IP."""
src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8"
pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(13)
def test_udp_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop a UDP packet on source IP."""
src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2"
pkt = self.udp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(26)
def test_icmp_source_ip_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop an ICMP packet on source IP."""
src_ip = "20.0.0.8" if ip_version == "ipv4" else "60c0:a800::2"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(25)
def test_icmp_source_ip_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward an ICMP packet on source IP."""
src_ip = "20.0.0.4" if ip_version == "ipv4" else "60c0:a800::8"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(12)
def test_l4_dport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on L4 destination port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x1217)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5)
def test_l4_sport_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on L4 source port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x120D)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(4)
def test_l4_dport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on a range of L4 destination ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x123B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(11)
def test_l4_sport_range_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on a range of L4 source ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x123A)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(10)
def test_l4_dport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on a range of L4 destination ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22)
def test_l4_sport_range_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on a range of L4 source ports."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(17)
def test_ip_proto_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on the IP protocol."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7E)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(5)
def test_tcp_flags_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and forward on the TCP flags."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x1B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(6)
def test_l4_dport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on L4 destination port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, dport=0x127B)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(22)
def test_l4_sport_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on L4 source port."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, sport=0x1271)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(10)
def test_ip_proto_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the IP protocol."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, proto=0x7F)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(18)
def test_tcp_flags_match_dropped(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the TCP flags."""
pkt = self.tcp_packet(setup, direction, ptfadapter, ip_version, flags=0x24)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, True, ip_version)
counters_sanity_check.append(5)
def test_icmp_match_forwarded(self, setup, direction, ptfadapter, counters_sanity_check, ip_version):
"""Verify that we can match and drop on the TCP flags."""
src_ip = "20.0.0.10" if ip_version == "ipv4" else "60c0:a800::10"
pkt = self.icmp_packet(setup, direction, ptfadapter, ip_version, src_ip=src_ip, icmp_type=3, icmp_code=1)
self._verify_acl_traffic(setup, direction, ptfadapter, pkt, False, ip_version)
counters_sanity_check.append(29)
def _verify_acl_traffic(self, setup, direction, ptfadapter, pkt, dropped, ip_version):
exp_pkt = self.expected_mask_routed_packet(pkt, ip_version)
if ip_version == "ipv4":
downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IP].dst)
else:
downstream_dst_port = DOWNSTREAM_IP_PORT_MAP.get(pkt[packet.IPv6].dst)
ptfadapter.dataplane.flush()
testutils.send(ptfadapter, self.src_port, pkt)
if direction == "uplink->downlink" and downstream_dst_port:
if dropped:
testutils.verify_no_packet(ptfadapter, exp_pkt, downstream_dst_port)
else:
testutils.verify_packet(ptfadapter, exp_pkt, downstream_dst_port)
else:
if dropped:
testutils.verify_no_packet_any(ptfadapter, exp_pkt, ports=self.get_dst_ports(setup, direction))
else:
testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=self.get_dst_ports(setup, direction),
timeout=20)
class TestBasicAcl(BaseAclTest):
"""Test Basic functionality of ACL rules (i.e. setup with full update on a running device)."""
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
table_name = acl_table["table_name"]
dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating basic ACL rules config for ACL table \"{}\" on {}".format(table_name, dut))
dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}.json".format(table_name))
dut.template(src=os.path.join(TEMPLATE_DIR, ACL_RULES_FULL_TEMPLATE[ip_version]),
dest=dut_conf_file_path)
logger.info("Applying ACL rules config \"{}\"".format(dut_conf_file_path))
dut.command("config acl update full {}".format(dut_conf_file_path))
class TestIncrementalAcl(BaseAclTest):
"""Test ACL rule functionality with an incremental configuration.
Verify that everything still works as expected when an ACL configuration is applied in
multiple parts.
"""
def setup_rules(self, dut, acl_table, ip_version):
"""Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
"""
table_name = acl_table["table_name"]
dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating incremental ACL rules config for ACL table \"{}\""
.format(table_name))
for part, config_file in enumerate(ACL_RULES_PART_TEMPLATES[ip_version]):
dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}_part_{}.json".format(table_name, part))
dut.template(src=os.path.join(TEMPLATE_DIR, config_file), dest=dut_conf_file_path)
logger.info("Applying ACL rules config \"{}\"".format(dut_conf_file_path))
dut.command("config acl update incremental {}".format(dut_conf_file_path))
@pytest.mark.reboot
class TestAclWithReboot(TestBasicAcl):
"""Test ACL rule functionality with a reboot.
Verify that configuration persists correctly after reboot and is applied properly
upon startup.
"""
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Save configuration and reboot after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
"""
dut.command("config save -y")
reboot(dut, localhost, wait=240)
# We need some additional delay on e1031
if dut.facts["platform"] == "x86_64-cel_e1031-r0":
time.sleep(240)
populate_vlan_arp_entries()
@pytest.mark.port_toggle
class TestAclWithPortToggle(TestBasicAcl):
"""Test ACL rule functionality after toggling ports.
Verify that ACLs still function as expected after links flap.
"""
def post_setup_hook(self, dut, localhost, populate_vlan_arp_entries, tbinfo):
"""Toggle ports after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
"""
port_toggle(dut, tbinfo)
populate_vlan_arp_entries()
| tests/acl/test_acl.py | 42,091 | Base class for testing ACL rules.
Subclasses must provide `setup_rules` method to prepare ACL rules for traffic testing.
They can optionally override `teardown_rules`, which will otherwise remove the rules by
applying an empty configuration file.
Test ACL rule functionality after toggling ports.
Verify that ACLs still function as expected after links flap.
Test ACL rule functionality with a reboot.
Verify that configuration persists correctly after reboot and is applied properly
upon startup.
Test Basic functionality of ACL rules (i.e. setup with full update on a running device).
Test ACL rule functionality with an incremental configuration.
Verify that everything still works as expected when an ACL configuration is applied in
multiple parts.
Setup/teardown ACL rules for the current set of tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
localhost: The host from which tests are run.
setup: Parameters for the ACL tests.
acl_table: Configuration info for the ACL table.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
Apply ACL table configuration and remove after tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
setup: Parameters for the ACL tests.
stage: The ACL stage under test.
ip_version: The IP version under test
Yields:
The ACL table configuration.
Validate that the counters for each rule in the rules list increased as expected.
This fixture yields a list of rule IDs. The test case should add on to this list if
it is required to check the rule for increased counters.
After the test cases pass, the fixture will wait for the ACL counters to update and then
check if the counters for each rule in the list were increased.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
acl_rules: Fixture that sets up the ACL rules.
acl_table: Fixture that sets up the ACL table.
Parametrize test based on direction of traffic.
Generate the expected mask for a routed packet.
Get the default destination IP for the current test.
Get the set of possible destination ports for the current test.
Get a source port for the current test.
Generate an ICMP packet for testing.
Set up the ARP responder utility in the PTF container.
Perform actions after rules have been applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A function to populate ARP/FDB tables for VLAN interfaces.
Save configuration and reboot after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
Toggle ports after rules are applied.
Args:
dut: The DUT having ACLs applied.
localhost: The host from which tests are run.
populate_vlan_arp_entries: A fixture to populate ARP/FDB tables for VLAN interfaces.
Gather all required test information from DUT and tbinfo.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
tbinfo: A fixture to gather information about the testbed.
Yields:
A Dictionary with required test information.
Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
Setup ACL rules for testing.
Args:
dut: The DUT having ACLs applied.
acl_table: Configuration info for the ACL table.
Parametrize tests for Ingress/Egress stage testing.
Args:
request: A fixture to interact with Pytest data.
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
Returns:
str: The ACL stage to be tested.
Generate a TCP packet for testing.
Tear down ACL rules once the tests have completed.
Args:
dut: The DUT having ACLs applied.
Verify that we can match and drop a packet on destination IP.
Verify that we can match and forward a packet on destination IP.
Verify that default egress rule allow all traffics
Verify that we can match and drop on the TCP flags.
Verify that we can match and drop an ICMP packet on source IP.
Verify that we can match and forward an ICMP packet on source IP.
Verify that unmatched packets are dropped for ingress.
Verify that we can match and drop on the IP protocol.
Verify that we can match and forward on the IP protocol.
Verify that we can match and drop on L4 destination port.
Verify that we can match and forward on L4 destination port.
Verify that we can match and drop on a range of L4 destination ports.
Verify that we can match and forward on a range of L4 destination ports.
Verify that we can match and drop on L4 source port.
Verify that we can match and forward on L4 source port.
Verify that we can match and drop on a range of L4 source ports.
Verify that we can match and forward on a range of L4 source ports.
Verify that we respect rule priorites in the drop case.
Verify that we respect rule priorites in the forwarding case.
Verify that we can match and drop a packet on source IP.
Verify that we can match and forward a packet on source IP.
Verify that we can match and drop on the TCP flags.
Verify that we can match and forward on the TCP flags.
Verify that we can match and drop a UDP packet on source IP.
Verify that we can match and forward a UDP packet on source IP.
Generate a UDP packet for testing.
Disable automatic loganalyzer, since we use it for the test Keep it under home dir so it persists through reboot TODO: We really shouldn't have two separate templates for v4 and v6, need to combine them somehow TODO: These routes don't match the VLAN interface from the T0 topology. This needs to be addressed before we can enable the v6 tests for T0 Get the list of upstream/downstream ports For T0/dual ToR testbeds, we need to use the VLAN MAC to interact with downstream ports For T1 testbeds, no VLANs are present so using the router MAC is acceptable stop garp service for single tor If running on a dual ToR testbed, any uplink for either ToR is an acceptable source or destination port Get the list of LAGs TODO: We should make this more robust (i.e. bind all active front-panel ports) In multi-asic we need config both in host and namespace. In multi-asic we need config both in host and namespace. In multi-asic we need config both in host and namespace. Don't fall through to t0 case Wait some time to ensure the async call of clear is completed Cleanup Config DB if table creation failed Copy empty rules configuration Remove the rules Cleanup Config DB if rule creation failed Wait for orchagent to update the ACL counters In multi-asic we cannot determine this so ignore. In multi-asic we cannot determine this so ignore. We need some additional delay on e1031 | 7,156 | en | 0.853936 |
"""
Django settings for ask project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kdfeejt1$j*i&h$4zy4r6w0&(iznitggm%(h##9j$42-fpnch)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'qa',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
)
ROOT_URLCONF = 'ask.urls'
WSGI_APPLICATION = 'ask.wsgi.application'
TEMPLATE_DIRS = BASE_DIR + '/templates'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE' : 'django.db.backends.mysql',
'NAME' : 'qa',
'DATABASE_HOST' : '/var/run/mysql',
'USER' : 'qauser',
'PASSWORD' : 'qapass',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/' | ask/ask/settings.py | 2,121 | Django settings for ask project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/1.6/ref/settings/databases Internationalization https://docs.djangoproject.com/en/1.6/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.6/howto/static-files/ | 824 | en | 0.694034 |
# <Copyright 2022, Argo AI, LLC. Released under the MIT license.>
"""Utilities to evaluate motion forecasting predictions and compute metrics."""
import numpy as np
from av2.utils.typing import NDArrayBool, NDArrayFloat, NDArrayNumber
def compute_ade(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the average displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory.
Returns:
(K,) Average displacement error for each of the predicted trajectories.
"""
# (K,N)
displacement_errors = np.linalg.norm(forecasted_trajectories - gt_trajectory, axis=2) # type: ignore
ade: NDArrayFloat = np.mean(displacement_errors, axis=1)
return ade
def compute_fde(forecasted_trajectories: NDArrayNumber, gt_trajectory: NDArrayNumber) -> NDArrayFloat:
"""Compute the final displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.
Returns:
(K,) Final displacement error for each of the predicted trajectories.
"""
# Compute final displacement error for all K trajectories
fde_vector = (forecasted_trajectories - gt_trajectory)[:, -1] # type: ignore
fde: NDArrayFloat = np.linalg.norm(fde_vector, axis=-1) # type: ignore
return fde
def compute_is_missed_prediction(
forecasted_trajectories: NDArrayNumber,
gt_trajectory: NDArrayNumber,
miss_threshold_m: float = 2.0,
) -> NDArrayBool:
"""Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.
miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.
Returns:
(K,) Bools indicating whether prediction missed by more than specified threshold.
"""
fde = compute_fde(forecasted_trajectories, gt_trajectory)
is_missed_prediction = fde > miss_threshold_m # type: ignore
return is_missed_prediction
| src/av2/datasets/motion_forecasting/eval/metrics.py | 2,554 | Compute the average displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory.
Returns:
(K,) Average displacement error for each of the predicted trajectories.
Compute the final displacement error for a set of K predicted trajectories (for the same actor).
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, FDE will be evaluated against true position at index `N-1`.
Returns:
(K,) Final displacement error for each of the predicted trajectories.
Compute whether each of K predicted trajectories (for the same actor) missed by more than a distance threshold.
Args:
forecasted_trajectories: (K, N, 2) predicted trajectories, each N timestamps in length.
gt_trajectory: (N, 2) ground truth trajectory, miss will be evaluated against true position at index `N-1`.
miss_threshold_m: Minimum distance threshold for final displacement to be considered a miss.
Returns:
(K,) Bools indicating whether prediction missed by more than specified threshold.
Utilities to evaluate motion forecasting predictions and compute metrics.
<Copyright 2022, Argo AI, LLC. Released under the MIT license.> (K,N) type: ignore Compute final displacement error for all K trajectories type: ignore type: ignore type: ignore | 1,495 | en | 0.678142 |
from django.db import models
from mptt.models import MPTTModel, TreeForeignKey
class Factor(models.Model):
group = models.TextField()
factor = models.TextField()
note = models.TextField(blank=True, null=True)
class Meta:
ordering = ('-group', 'factor',)
def __str__(self):
txt = 'Group ' + str(self.group) + ' ' + str(self.factor)
return(txt)
class Element(MPTTModel):
factor = models.ForeignKey(Factor, to_field='id', on_delete=models.CASCADE, related_name='element_factor')
value = models.TextField()
parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='element_child')
note = models.TextField(blank=True, null=True)
#class MPTTMeta:
# order_insertion_by = ['factor']
class Meta:
ordering = ('-id', '-factor')
def __str__(self):
return(str(self.factor) + ' ' + str(self.value))
#def get_absolute_url(self):
# return(reverse('element:element_detail', args=[self.id]))
| geo/bms/element/models.py | 1,042 | class MPTTMeta: order_insertion_by = ['factor']def get_absolute_url(self): return(reverse('element:element_detail', args=[self.id])) | 138 | en | 0.073094 |
from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/3-sender_receiver_5.py | 15,990 | invar delta >= 0 delta > 0 -> (r2s' = r2s & s2r' = s2r) (G F !s.stutter) -> G (s.wait_ack -> F s.send) send & c = 0 & msg_id = 0 invar: wait_ack -> c <= timeout delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout & c' = c + delta & out_c' = out_c (send & send') -> (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c) (send & wait_ack') -> (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c) (wait_ack) -> (c' = 0 & out_c' = out_c & (wait_ack' <-> (in_c != msg_id & c > timeout)) (wait_ack & wait_ack') -> (timeout' > timeout) (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout)) (wait_ack & send') -> (timeout' = base_timeout) wait delta > 0 -> loc' = loc & out_c' = out_c wait -> (wait' <-> in_c = out_c) (wait & wait') -> (out_c' = out_c) (wait & work') -> out_c' = in_c work -> out_c' = out_c | 858 | en | 0.254738 |
import os
import numpy as np
import pandas as pd
from fedot.core.data.data import InputData
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.repository.dataset_types import DataTypesEnum
from fedot.core.repository.tasks import Task, TaskTypesEnum, TsForecastingParams
from fedot.core.utils import fedot_project_root
from test.unit.tasks.test_regression import get_synthetic_regression_data
def get_ts_pipeline(window_size):
""" Function return pipeline with lagged transformation in it """
node_lagged = PrimaryNode('lagged')
node_lagged.custom_params = {'window_size': window_size}
node_final = SecondaryNode('ridge', nodes_from=[node_lagged])
pipeline = Pipeline(node_final)
return pipeline
def get_ransac_pipeline():
""" Function return pipeline with lagged transformation in it """
node_ransac = PrimaryNode('ransac_lin_reg')
node_final = SecondaryNode('linear', nodes_from=[node_ransac])
pipeline = Pipeline(node_final)
return pipeline
def test_lagged_with_invalid_params_fit_correctly():
""" The function define a pipeline with incorrect parameters in the lagged
transformation. During the training of the pipeline, the parameter 'window_size'
is corrected
"""
window_size = 600
len_forecast = 50
# The length of the time series is 500 elements
project_root_path = str(fedot_project_root())
file_path = os.path.join(project_root_path, 'test/data/short_time_series.csv')
df = pd.read_csv(file_path)
time_series = np.array(df['sea_height'])
task = Task(TaskTypesEnum.ts_forecasting,
TsForecastingParams(forecast_length=len_forecast))
ts_input = InputData(idx=np.arange(0, len(time_series)), features=time_series,
target=time_series, task=task, data_type=DataTypesEnum.ts)
# Get pipeline with lagged transformation in it
pipeline = get_ts_pipeline(window_size)
# Fit it
pipeline.fit(ts_input)
# Get lagged node
lagged_node = pipeline.nodes[1]
fixed_params = lagged_node.custom_params
assert pipeline.is_fitted
assert fixed_params['window_size'] == 439
def test_ransac_with_invalid_params_fit_correctly():
""" Check that on a small dataset the RANSAC anomaly search algorithm can
adjust the values of hyperparameters
As stated in the sklearn documentation, min_samples is determined by default
based on how many features are in the dataset
Therefore, problems can arise when there are more attributes in a dataset
than the number of objects
"""
input_regression = get_synthetic_regression_data(n_samples=20, n_features=23)
ransac_pipeline = get_ransac_pipeline()
ransac_pipeline.fit(input_regression)
predicted = ransac_pipeline.predict(input_regression)
assert ransac_pipeline.is_fitted
assert predicted is not None
| test/unit/data_operations/test_data_operation_params.py | 2,949 | Function return pipeline with lagged transformation in it
Function return pipeline with lagged transformation in it
The function define a pipeline with incorrect parameters in the lagged
transformation. During the training of the pipeline, the parameter 'window_size'
is corrected
Check that on a small dataset the RANSAC anomaly search algorithm can
adjust the values of hyperparameters
As stated in the sklearn documentation, min_samples is determined by default
based on how many features are in the dataset
Therefore, problems can arise when there are more attributes in a dataset
than the number of objects
The length of the time series is 500 elements Get pipeline with lagged transformation in it Fit it Get lagged node | 731 | en | 0.843883 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.swob import Request
from swift.common.middleware import domain_remap
class FakeApp(object):
def __call__(self, env, start_response):
return env['PATH_INFO']
def start_response(*args):
pass
class TestDomainRemap(unittest.TestCase):
def setUp(self):
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), {})
def test_domain_remap_passthrough(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'example.com'},
headers={'Host': None})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/')
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/')
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'example.com:8080'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/')
def test_domain_remap_account(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET',
'SERVER_NAME': 'AUTH_a.example.com'},
headers={'Host': None})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/AUTH_a')
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/AUTH_a')
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH-uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/AUTH_uuid')
def test_domain_remap_account_container(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/AUTH_a/c')
def test_domain_remap_extra_subdomains(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'x.y.c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, ['Bad domain in host header'])
def test_domain_remap_account_with_path_root(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/AUTH_a')
def test_domain_remap_account_container_with_path_root(self):
req = Request.blank('/v1', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/AUTH_a/c')
def test_domain_remap_account_container_with_path(self):
req = Request.blank('/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/AUTH_a/c/obj')
def test_domain_remap_account_container_with_path_root_and_path(self):
req = Request.blank('/v1/obj', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/AUTH_a/c/obj')
def test_domain_remap_account_matching_ending_not_domain(self):
req = Request.blank('/dontchange', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.aexample.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/dontchange')
def test_domain_remap_configured_with_empty_storage_domain(self):
self.app = domain_remap.DomainRemapMiddleware(FakeApp(),
{'storage_domain': ''})
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.AUTH_a.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/test')
def test_domain_remap_configured_with_prefixes(self):
conf = {'reseller_prefixes': 'PREFIX'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.prefix_uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/v1/PREFIX_uuid/c/test')
def test_domain_remap_configured_with_bad_prefixes(self):
conf = {'reseller_prefixes': 'UNKNOWN'}
self.app = domain_remap.DomainRemapMiddleware(FakeApp(), conf)
req = Request.blank('/test', environ={'REQUEST_METHOD': 'GET'},
headers={'Host': 'c.prefix_uuid.example.com'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, '/test')
if __name__ == '__main__':
unittest.main()
| test/unit/common/middleware/test_domain_remap.py | 6,065 | Copyright (c) 2010-2012 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 565 | en | 0.859753 |
from __future__ import absolute_import
from .base import BasePage
from .global_selection import GlobalSelectionPage
class IssueDetailsPage(BasePage):
def __init__(self, browser, client):
super(IssueDetailsPage, self).__init__(browser)
self.client = client
self.global_selection = GlobalSelectionPage(browser)
def visit_issue(self, org, groupid):
self.browser.get(u"/organizations/{}/issues/{}/".format(org, groupid))
self.wait_until_loaded()
def visit_issue_in_environment(self, org, groupid, environment):
self.browser.get(
u"/organizations/{}/issues/{}/?environment={}".format(org, groupid, environment)
)
self.browser.wait_until(".group-detail")
def visit_tag_values(self, org, groupid, tag):
self.browser.get(u"/organizations/{}/issues/{}/tags/{}".format(org, groupid, tag))
self.browser.wait_until_not(".loading-indicator")
def get_environment(self):
return self.browser.find_element_by_css_selector('[data-test-id="env-label"').text.lower()
def go_back_to_issues(self):
self.global_selection.go_back_to_issues()
def api_issue_get(self, groupid):
return self.client.get(u"/api/0/issues/{}/".format(groupid))
def go_to_subtab(self, name):
tabs = self.browser.find_element_by_css_selector(".group-detail .nav-tabs")
tabs.find_element_by_partial_link_text(name).click()
self.browser.wait_until_not(".loading-indicator")
def open_issue_errors(self):
self.browser.click(".errors-toggle")
self.browser.wait_until(".entries > .errors ul")
def open_curl(self):
self.browser.find_element_by_xpath("//a//code[contains(text(), 'curl')]").click()
def resolve_issue(self):
self.browser.click('[data-test-id="action-link-resolve"]')
# Resolve should become unresolve
self.browser.wait_until('[data-test-id="button-unresolve"]')
def ignore_issue(self):
self.browser.click('[data-test-id="action-link-ignore"]')
# Ignore should become unresolve
self.browser.wait_until('[data-test-id="button-unresolve"]')
def bookmark_issue(self):
self.browser.click(".group-bookmark")
self.browser.wait_until(".group-bookmark.active")
def assign_to(self, user):
assignee = self.browser.find_element_by_css_selector(".assigned-to")
# Open the assignee picker
assignee.find_element_by_css_selector('[role="button"]').click()
assignee.find_element_by_tag_name("input").send_keys(user)
# Click the member/team
options = assignee.find_elements_by_css_selector('[data-test-id="assignee-option"]')
assert len(options) > 0, "No assignees could be found."
options[0].click()
self.browser.wait_until_not(".loading-indicator")
def find_comment_form(self):
return self.browser.find_element_by_css_selector('[data-test-id="note-input-form"]')
def has_comment(self, text):
element = self.browser.element('[data-test-id="activity-note-body"]')
return text in element.text
def wait_until_loaded(self):
self.browser.wait_until_not(".loading-indicator")
self.browser.wait_until_test_id("event-entries")
self.browser.wait_until_test_id("linked-issues")
self.browser.wait_until_test_id("loaded-device-name")
if self.browser.element_exists("#grouping-info"):
self.browser.wait_until_test_id("loaded-grouping-info")
| tests/acceptance/page_objects/issue_details.py | 3,534 | Resolve should become unresolve Ignore should become unresolve Open the assignee picker Click the member/team | 109 | en | 0.942679 |
#!/usr/bin/python
import livejournal
import os
import getpass
import urllib
lj = livejournal.LJ('evan', getpass.getpass(), 'evan_tech')
def dump_entries(dirname, response):
"""Given a getevents response, dump all the entries into files
named by itemid.
Return the set of itemids received."""
all_itemids = set()
props = {}
for i in range(1, int(response.get('prop_count', 0)) + 1):
itemid = response['prop_%d_itemid' % i]
name = response['prop_%d_name' % i]
value = response['prop_%d_value' % i]
if itemid not in props:
props[itemid] = {}
props[itemid][name] = value
for i in range(1, int(response.get('events_count', 0)) + 1):
itemid = response['events_%d_itemid' % i]
all_itemids.add(itemid)
with open('%s/%s' % (dirname, itemid), 'w') as outfile:
fields = ('itemid',
'anum',
'eventtime',
'security',
'allowmask',
'poster',
'url',
'subject',)
for field in fields:
key = 'events_%d_%s' % (i, field)
if key in response:
print >>outfile, field + ':', response[key]
if itemid in props:
for key, val in props[itemid].items():
print >>outfile, key + ':', val
print >>outfile
key = 'events_%d_event' % i
print >>outfile, urllib.unquote(response[key])
return all_itemids
def get_syncitems():
"""Run the loop to get all 'syncitems' entries.
Returns a list of (syncitem, time) tuples."""
syncitems = lj.run('syncitems')
items = []
total = int(syncitems['sync_total'])
print '%d/%d syncitems' % (len(items), total)
lastsync = None
while len(items) < total:
for i in range(1, int(syncitems['sync_count']) + 1):
item = syncitems['sync_%d_item' % i]
time = syncitems['sync_%d_time' % i]
lastsync = max(lastsync, time)
items.append((item, time))
if len(items) < total:
syncitems = lj.run('syncitems', lastsync=lastsync)
return items
def subtract_second(time):
"""Subtract a second off an LJ date. I didn't handle 00:00 because I
am lazy. This syncing system is way too complicated."""
sec = int(time[-2:])
if sec > 0:
return time[:-2] + '%02d' % (sec - 1)
min = int(time[-5:-3])
if min > 0:
return time[:-5] + '%02d:59' % (min - 1)
raise RuntimeError, "Couldn't subtract second from " + time
output_dir = lj.usejournal or lj.user
try:
os.mkdir(output_dir)
except OSError:
pass # Assume it exists already.
# Fetch syncitems; convert to a map of itemid => time.
items = get_syncitems()
remaining = {}
for item, time in items:
if item[0] != 'L':
continue
remaining[item[2:]] = time
# Download items, crossing them off as we get them.
while len(remaining) > 0:
lastsync = min(remaining.values())
print '%d left, lastsync %s' % (len(remaining), lastsync)
lastsync = subtract_second(lastsync)
entries = lj.run('getevents',
selecttype='syncitems',
lastsync=lastsync,
lineendings='unix')
done = dump_entries(output_dir, entries)
for itemid in done:
if itemid in remaining:
del remaining[itemid]
| dump.py | 3,500 | !/usr/bin/python Assume it exists already. Fetch syncitems; convert to a map of itemid => time. Download items, crossing them off as we get them. | 145 | en | 0.900052 |
import logging
import sys
import gym
logger = logging.getLogger(__name__)
root_logger = logging.getLogger()
requests_logger = logging.getLogger('requests')
# Set up the default handler
formatter = logging.Formatter('[%(asctime)s] %(message)s')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
# We need to take in the gym logger explicitly since this is called
# at initialization time.
def logger_setup(gym_logger):
root_logger.addHandler(handler)
gym_logger.setLevel(logging.INFO)
# When set to INFO, this will print out the hostname of every
# connection it makes.
# requests_logger.setLevel(logging.WARN)
def undo_logger_setup():
"""Undoes the automatic logging setup done by OpenAI Gym. You should call
this function if you want to manually configure logging
yourself. Typical usage would involve putting something like the
following at the top of your script:
gym.undo_logger_setup()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stderr))
"""
root_logger.removeHandler(handler)
gym.logger.setLevel(logging.NOTSET)
requests_logger.setLevel(logging.NOTSET)
| gym/configuration.py | 1,186 | Undoes the automatic logging setup done by OpenAI Gym. You should call
this function if you want to manually configure logging
yourself. Typical usage would involve putting something like the
following at the top of your script:
gym.undo_logger_setup()
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stderr))
Set up the default handler We need to take in the gym logger explicitly since this is called at initialization time. When set to INFO, this will print out the hostname of every connection it makes. requests_logger.setLevel(logging.WARN) | 574 | en | 0.860058 |
"""Child worker process module."""
import os
import sys
import time
import signal
import socket
import shutil
import logging
import argparse
import platform
import threading
import subprocess
import traceback
def parse_cmdline():
"""Child worker command line parsing"""
parser = argparse.ArgumentParser(description="Remote runner parser")
parser.add_argument("--address", action="store")
parser.add_argument("--index", action="store")
parser.add_argument("--wd", action="store")
parser.add_argument("--runpath", action="store", default=None)
parser.add_argument("--type", action="store")
parser.add_argument("--log-level", action="store", default=0, type=int)
parser.add_argument("--remote-pool-type", action="store", default="thread")
parser.add_argument("--remote-pool-size", action="store", default=1)
parser.add_argument("--sys-path-file", action="store")
return parser.parse_args()
class ChildLoop(object):
"""
Child process loop that can be started in a process and starts a local
thread pool to execute the tasks received.
"""
def __init__(
self,
index,
transport,
pool_type,
pool_size,
worker_type,
logger,
runpath=None,
):
self._metadata = {"index": index, "pid": os.getpid()}
self._transport = transport
self._pool_type = pool_type
self._pool_size = int(pool_size)
self._pool_cfg = None
self._worker_type = worker_type
self._to_heartbeat = float(0)
self.runpath = runpath
self.logger = logger
@property
def metadata(self):
"""Metadata information."""
return self._metadata
def _child_pool(self):
# Local thread pool will not cleanup the previous layer runpath.
self._pool = self._pool_type(
name="Pool_{}".format(self._metadata["pid"]),
worker_type=self._worker_type,
size=self._pool_size,
runpath=self.runpath,
allow_task_rerun=False, # always return False
)
self._pool.parent = self
self._pool.cfg.parent = self._pool_cfg
return self._pool
def _handle_abort(self, signum, frame):
self.logger.debug(
"Signal handler called for signal {} from {}".format(
signum, threading.current_thread()
)
)
if self._pool:
self._pool.abort()
os.kill(os.getpid(), 9)
self.logger.debug("Pool {} aborted.".format(self._pool))
def _setup_logfiles(self):
from testplan.common.utils.logger import LOGFILE_FORMAT
if not os.path.exists(self.runpath):
os.makedirs(self.runpath)
stderr_file = os.path.join(
self.runpath, "{}_stderr".format(self._metadata["index"])
)
log_file = os.path.join(
self.runpath, "{}_stdout".format(self._metadata["index"])
)
self.logger.info(
"stdout file = %(file)s (log level = %(lvl)s)",
{"file": log_file, "lvl": self.logger.level},
)
self.logger.info("stderr file = %s", stderr_file)
self.logger.info(
"Closing stdin, stdout and stderr file descriptors..."
)
# This closes stdin, stdout and stderr for this process.
for fdesc in range(3):
os.close(fdesc)
mode = "w" if platform.python_version().startswith("3") else "wb"
sys.stderr = open(stderr_file, mode)
fhandler = logging.FileHandler(log_file, encoding="utf-8")
formatter = logging.Formatter(LOGFILE_FORMAT)
fhandler.setFormatter(formatter)
fhandler.setLevel(self.logger.level)
self.logger.addHandler(fhandler)
def _send_and_expect(self, message, send, expect):
try:
return self._transport.send_and_receive(
message.make(send), expect=expect
)
except AttributeError:
self.logger.critical("Pool seems dead, child exits2.")
raise
def _pre_loop_setup(self, message):
response = self._send_and_expect(
message,
message.ConfigRequest,
[message.ConfigSending, message.Stop],
)
# Process pool might be exiting after worker restarts and tries
# to connect, at this time worker can gracefully exit.
if response.cmd == message.Stop:
print("Stop message received, child exits.")
os._exit(0)
# Response.data: [cfg, cfg.parent, cfg.parent.parent, ...]
pool_cfg = response.data[0]
for idx, cfg in enumerate(response.data):
try:
cfg.parent = response.data[idx + 1]
except IndexError:
break
self._pool_cfg = pool_cfg
for sig in self._pool_cfg.abort_signals:
signal.signal(sig, self._handle_abort)
pool_metadata = response.sender_metadata
if self.runpath is None:
if pool_metadata.get("runpath") is None:
raise RuntimeError("runpath was not set in pool metadata")
self.runpath = pool_metadata["runpath"]
self._setup_logfiles()
def worker_loop(self):
"""
Child process worker loop. Manages an underlying thread pool, pulls and
sends back results to the main pool.
"""
from testplan.runners.pools.communication import Message
message = Message(**self.metadata)
try:
self._pre_loop_setup(message)
except Exception:
print("_pre_loop_setup failed")
self._transport.send_and_receive(
message.make(message.SetupFailed, data=traceback.format_exc()),
expect=message.Ack,
)
return
with self._child_pool():
message = Message(**self.metadata)
next_possible_request = time.time()
next_heartbeat = time.time()
request_delay = self._pool_cfg.active_loop_sleep
while True:
# TODO: SHALL CHECK CHILD POOL ALIVE HERE
now = time.time()
if self._pool_cfg.worker_heartbeat and now > next_heartbeat:
hb_resp = self._transport.send_and_receive(
message.make(message.Heartbeat, data=time.time())
)
if hb_resp is None:
self.logger.critical("Pool seems dead, child exits1.")
self.exit_loop()
break
else:
self.logger.debug(
"Pool heartbeat response:"
" {} at {} before {}s.".format(
hb_resp.cmd,
hb_resp.data,
time.time() - hb_resp.data,
)
)
next_heartbeat = now + self._pool_cfg.worker_heartbeat
# Send back results
if self._pool.results:
task_results = []
for uid in list(self._pool.results.keys()):
task_results.append(self._pool.results[uid])
self.logger.debug(
"Sending back result for {}".format(
self._pool.results[uid].task
)
)
del self._pool.results[uid]
self._transport.send_and_receive(
message.make(message.TaskResults, data=task_results),
expect=message.Ack,
)
# Request new tasks
demand = (
self._pool.workers_requests()
- self._pool.unassigned.qsize()
)
if demand > 0 and time.time() > next_possible_request:
received = self._transport.send_and_receive(
message.make(message.TaskPullRequest, data=demand)
)
if received is None or received.cmd == Message.Stop:
self.logger.critical(
"Pool seems dead or stopping, child exits."
)
self.exit_loop()
break
elif received.cmd == Message.TaskSending:
next_possible_request = time.time()
request_delay = 0
for task in received.data:
self.logger.debug(
"Added {} to local pool".format(task)
)
self._pool.add(task, task.uid())
# Reset workers request counters
for worker in self._pool._workers:
worker.requesting = 0
elif received.cmd == Message.Ack:
request_delay = min(
(request_delay + 0.2) * 1.5,
self._pool_cfg.max_active_loop_sleep,
)
next_possible_request = time.time() + request_delay
pass
time.sleep(self._pool_cfg.active_loop_sleep)
self.logger.info("Local pool {} stopped.".format(self._pool))
def exit_loop(self):
self._pool.abort()
class RemoteChildLoop(ChildLoop):
"""
Child loop for remote workers.
This involved exchange of metadata for additional functionality.
"""
def __init__(self, *args, **kwargs):
super(RemoteChildLoop, self).__init__(*args, **kwargs)
self._setup_metadata = None
def _pre_loop_setup(self, message):
super(RemoteChildLoop, self)._pre_loop_setup(message)
self._setup_metadata = self._send_and_expect(
message, message.MetadataPull, message.Metadata
).data
if self._setup_metadata.env:
for key, value in self._setup_metadata.env.items():
os.environ[key] = value
if self._setup_metadata.setup_script:
if subprocess.call(
self._setup_metadata.setup_script,
stdout=sys.stdout,
stderr=sys.stderr,
):
raise RuntimeError("Setup script exited with non 0 code.")
def exit_loop(self):
if self._setup_metadata.delete_pushed:
for item in self._setup_metadata.push_dirs:
self.logger.test_info("Removing directory: {}".format(item))
shutil.rmtree(item, ignore_errors=True)
for item in self._setup_metadata.push_files:
self.logger.test_info("Removing file: {}".format(item))
os.remove(item)
super(RemoteChildLoop, self).exit_loop()
def child_logic(args):
"""Able to be imported child logic."""
import psutil
from testplan.runners.pools.base import Pool, Worker
from testplan.runners.pools.process import ProcessPool, ProcessWorker
from testplan.runners.pools.connection import ZMQClient
if args.log_level:
from testplan.common.utils.logger import (
TESTPLAN_LOGGER,
STDOUT_HANDLER,
)
TESTPLAN_LOGGER.setLevel(args.log_level)
TESTPLAN_LOGGER.removeHandler(STDOUT_HANDLER)
print(
"Starting child process worker on {}, {} with parent {}".format(
socket.gethostname(),
os.getpid(),
psutil.Process(os.getpid()).ppid(),
)
)
if args.runpath:
print("Removing old runpath: {}".format(args.runpath))
shutil.rmtree(args.runpath, ignore_errors=True)
class NoRunpathPool(Pool):
"""
Pool that creates no runpath directory.
Has only one worker.
Will use the one already created by parent process.
"""
# To eliminate a not needed runpath layer.
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
class NoRunpathThreadPool(Pool):
"""
Pool that creates no runpath directory.
Will use the one already created by parent process.
Supports multiple thread workers.
"""
# To eliminate a not needed runpath layer.
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
class NoRunpathProcessPool(ProcessPool):
"""
Pool that creates no runpath directory.
Will use the one already created by parent process.
Supports multiple process workers.
"""
# To eliminate a not needed runpath layer.
def make_runpath_dirs(self):
self._runpath = self.cfg.runpath
transport = ZMQClient(address=args.address, recv_timeout=30)
if args.type == "process_worker":
loop = ChildLoop(
args.index, transport, NoRunpathPool, 1, Worker, TESTPLAN_LOGGER
)
loop.worker_loop()
elif args.type == "remote_worker":
if args.remote_pool_type == "process":
pool_type = NoRunpathProcessPool
worker_type = ProcessWorker
else:
pool_type = NoRunpathThreadPool
worker_type = Worker
loop = RemoteChildLoop(
args.index,
transport,
pool_type,
args.remote_pool_size,
worker_type,
TESTPLAN_LOGGER,
runpath=args.runpath,
)
loop.worker_loop()
def parse_syspath_file(filename):
"""
Read and parse the syspath file, which should contain each sys.path entry
on a separate line.
"""
with open(filename) as f:
new_syspath = f.read().split("\n")
return new_syspath
if __name__ == "__main__":
"""
To start an external child process worker.
"""
ARGS = parse_cmdline()
if ARGS.wd:
os.chdir(ARGS.wd)
if ARGS.sys_path_file:
sys.path = parse_syspath_file(ARGS.sys_path_file)
# upfront import to speed-up execution
import testplan
import psutil
from testplan.runners.pools.communication import Message
from testplan.runners.pools.base import Pool, Worker
from testplan.runners.pools.process import ProcessPool, ProcessWorker
from testplan.runners.pools.connection import ZMQClient
from testplan.common.utils.logger import LOGFILE_FORMAT
from testplan.common.utils.logger import (
TESTPLAN_LOGGER,
STDOUT_HANDLER,
)
child_logic(ARGS)
print("child.py exiting")
os._exit(0)
| testplan/runners/pools/child.py | 14,836 | Child process loop that can be started in a process and starts a local
thread pool to execute the tasks received.
Pool that creates no runpath directory.
Has only one worker.
Will use the one already created by parent process.
Pool that creates no runpath directory.
Will use the one already created by parent process.
Supports multiple process workers.
Pool that creates no runpath directory.
Will use the one already created by parent process.
Supports multiple thread workers.
Child loop for remote workers.
This involved exchange of metadata for additional functionality.
Able to be imported child logic.
Metadata information.
Child worker command line parsing
Read and parse the syspath file, which should contain each sys.path entry
on a separate line.
Child process worker loop. Manages an underlying thread pool, pulls and
sends back results to the main pool.
Child worker process module.
Local thread pool will not cleanup the previous layer runpath. always return False This closes stdin, stdout and stderr for this process. Process pool might be exiting after worker restarts and tries to connect, at this time worker can gracefully exit. Response.data: [cfg, cfg.parent, cfg.parent.parent, ...] TODO: SHALL CHECK CHILD POOL ALIVE HERE Send back results Request new tasks Reset workers request counters To eliminate a not needed runpath layer. To eliminate a not needed runpath layer. To eliminate a not needed runpath layer. upfront import to speed-up execution | 1,475 | en | 0.905106 |
# Copyright (c) 2022 Mira Geoscience Ltd.
#
# This file is part of geoapps.
#
# geoapps is distributed under the terms and conditions of the MIT License
# (see LICENSE file at the root of this source code package).
import numpy as np
from geoh5py.io import H5Writer
from geoh5py.objects import Curve, Points, Surface
from ipywidgets import Checkbox, HBox, Label, Layout, Text, VBox, interactive_output
from scipy.interpolate import LinearNDInterpolator
from geoapps import PlotSelection2D
from geoapps.utils.formatters import string_name
class ContourValues(PlotSelection2D):
"""
Application for 2D contouring of spatial data.
"""
defaults = {
"h5file": "../../assets/FlinFlon.geoh5",
"objects": "{538a7eb1-2218-4bec-98cc-0a759aa0ef4f}",
"data": "{44822654-b6ae-45b0-8886-2d845f80f422}",
"contours": "-400:2000:100,-240",
"resolution": 50,
"ga_group_name": "Contours",
}
def __init__(self, **kwargs):
self.defaults.update(**kwargs)
self._contours = Text(
value="", description="Contours", disabled=False, continuous_update=False
)
self._export_as = Text(value="Contours")
self._z_value = Checkbox(
value=False, indent=False, description="Assign Z from values"
)
self.data.observe(self.update_name, names="value")
super().__init__(**self.defaults)
self.selection = interactive_output(
self.compute_plot,
{
"contour_values": self.contours,
},
)
self.trigger.on_click(self.trigger_click)
self.trigger.description = "Export"
self.trigger.button_style = "danger"
@property
def contours(self):
"""
:obj:`ipywidgets.Text`: String defining sets of contours.
Contours can be defined over an interval `50:200:10` and/or at a fix value `215`.
Any combination of the above can be used:
50:200:10, 215 => Contours between values 50 and 200 every 10, with a contour at 215.
"""
return self._contours
@property
def export(self):
"""
:obj:`ipywidgets.ToggleButton`: Write contours to the target geoh5
"""
return self._export
@property
def export_as(self):
"""
:obj:`ipywidgets.Text`: Name given to the Curve object
"""
return self._export_as
@property
def z_value(self):
"""
:obj:`ipywidgets.Checkbox`: Assign z-coordinate based on contour values
"""
return self._z_value
@property
def main(self):
"""
:obj:`ipywidgets.VBox`: A box containing all widgets forming the application.
"""
if self._main is None:
self._main = VBox(
[
self.project_panel,
HBox(
[
VBox(
[
Label("Input options:"),
self.data_panel,
self.contours,
self.window_selection,
]
),
VBox(
[
Label("Save as:"),
self.export_as,
self.z_value,
self.output_panel,
],
layout=Layout(width="50%"),
),
]
),
self.selection,
]
)
return self._main
def compute_plot(self, contour_values):
"""
Get current selection and trigger update
"""
entity, data = self.get_selected_entities()
if data is None:
return
if contour_values is not None:
self.contours.value = contour_values
def update_contours(self):
"""
Assign
"""
if self.data.value is not None:
self.export_as.value = (
self.data.uid_name_map[self.data.value] + "_" + self.contours.value
)
def update_name(self, _):
if self.data.value is not None:
self.export_as.value = self.data.uid_name_map[self.data.value]
else:
self.export_as.value = "Contours"
def trigger_click(self, _):
entity, _ = self.get_selected_entities()
if getattr(self.contours, "contour_set", None) is not None:
contour_set = self.contours.contour_set
vertices, cells, values = [], [], []
count = 0
for segs, level in zip(contour_set.allsegs, contour_set.levels):
for poly in segs:
n_v = len(poly)
vertices.append(poly)
cells.append(
np.c_[
np.arange(count, count + n_v - 1),
np.arange(count + 1, count + n_v),
]
)
values.append(np.ones(n_v) * level)
count += n_v
if vertices:
vertices = np.vstack(vertices)
if self.z_value.value:
vertices = np.c_[vertices, np.hstack(values)]
else:
if isinstance(entity, (Points, Curve, Surface)):
z_interp = LinearNDInterpolator(
entity.vertices[:, :2], entity.vertices[:, 2]
)
vertices = np.c_[vertices, z_interp(vertices)]
else:
vertices = np.c_[
vertices,
np.ones(vertices.shape[0]) * entity.origin["z"],
]
curves = [
child
for child in self.ga_group.children
if child.name == self.export_as.value
]
if any(curves):
curve = curves[0]
for child in curve.children:
self.workspace.remove_entity(child)
curve.vertices = vertices
curve.cells = np.vstack(cells).astype("uint32")
else:
curve = Curve.create(
self.workspace,
name=string_name(self.export_as.value),
vertices=vertices,
cells=np.vstack(cells).astype("uint32"),
parent=self.ga_group,
)
curve.add_data({self.contours.value: {"values": np.hstack(values)}})
if self.live_link.value:
self.live_link_output(
self.export_directory.selected_path, self.ga_group
)
self.workspace.finalize()
| geoapps/contours/application.py | 7,219 | Application for 2D contouring of spatial data.
Get current selection and trigger update
:obj:`ipywidgets.Text`: String defining sets of contours.
Contours can be defined over an interval `50:200:10` and/or at a fix value `215`.
Any combination of the above can be used:
50:200:10, 215 => Contours between values 50 and 200 every 10, with a contour at 215.
:obj:`ipywidgets.ToggleButton`: Write contours to the target geoh5
:obj:`ipywidgets.Text`: Name given to the Curve object
:obj:`ipywidgets.VBox`: A box containing all widgets forming the application.
Assign
:obj:`ipywidgets.Checkbox`: Assign z-coordinate based on contour values
Copyright (c) 2022 Mira Geoscience Ltd. This file is part of geoapps. geoapps is distributed under the terms and conditions of the MIT License (see LICENSE file at the root of this source code package). | 843 | en | 0.672019 |
"""[directory]
cd /Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/article_bert_detecting_fake_news_1/fake_news_nlp_detection/
python fake_news_nlp_detection_2.py
"""
# This is a Python 3 environment
# Base level imports for data science work
import numpy as np
import pandas as pd
import re,string,unicodedata
import os
from os import path
# Visualization Libs
import seaborn as sns
import matplotlib.pyplot as plt
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
# NLP Libs
import nltk
from sklearn.preprocessing import LabelBinarizer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from wordcloud import WordCloud,STOPWORDS
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize,sent_tokenize
from bs4 import BeautifulSoup
from keras.preprocessing import text, sequence
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelBinarizer
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
# Additional Libs
from sklearn.metrics import classification_report,confusion_matrix,accuracy_score
from sklearn.model_selection import train_test_split
from string import punctuation
from nltk import pos_tag
from nltk.corpus import wordnet
# Deep Learning Libs
import keras
from keras.models import Sequential
from keras.layers import Dense,Embedding,LSTM,Dropout
from keras.callbacks import ReduceLROnPlateau
import tensorflow as tf
# Import our data
# an insignificant dataset with 3 records
# true = pd.read_csv("TrueSmallSample.csv")
# fake = pd.read_csv("FakeSmallSample.csv")
# a medium dataset with 90 records
true = pd.read_csv("TrueMediumSample.csv")
fake = pd.read_csv("FakeMediumSample.csv")
# the complete dataset
# true = pd.read_csv("TrueBigSample.csv")
# fake = pd.read_csv("FakeBigSample.csv")
# Let's explore the data at a base level
sample_true = true.sample(20)
sample_fake = fake.sample(20)
print("\n--- sample_true only 20")
print (sample_true)
print("\n--- sample_fake only 20")
print(sample_fake)
# true.shape,fake.shape
print("\n--- true.shape with number of TRUE news")
print(true.shape)
print("\n--- fake.shape with number of FAKE news")
print(fake.shape)
# These dataframes do not currently have a category for whether they are true or fake.
# Let's create those before we combine the datasets
true['category'] = 1 # news that are true
fake['category'] = 0 # news that are fake
# Now we'll merge the two datasets into a single dataframe
df = pd.concat([true,fake])
# PICTURE_1 OUTPUT
""" Total Fake and Real News Articles """
plt.figure(figsize=(16, 9))
sns.countplot(df.category)
plt.title('Total Fake and Real News Articles', fontsize=24)
plt.ylabel('Total', fontsize=16)
plt.xlabel('')
plt.xticks([0, 1], ['Fake', 'Real'], fontsize=16)
# plt.show()
plt.savefig('picture_1_total_fake_and_real_news_articles.png')
print("\n--- # PICTURE_1 OUTPUT ")
df.isna().sum()
df_info = df.info()
# Look in https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.info.html
# One last thing I'm interested in looking at at this point
# # OUTPUT_3
df_subject = df.subject.value_counts()
print("\n--- # OUTPUT_3 :: df_subject")
print(df_subject)
# PICTURE_2 OUTPUT
""" Total Number of Articles Per Subject """
plt.figure(figsize=(16, 9))
sns.countplot(x='subject', data=df)
plt.title('Total Number of Articles Per Subject', fontsize=24)
plt.ylabel('Total', fontsize=16)
plt.xlabel('')
plt.xticks(fontsize=12)
# plt.show()
plt.savefig('picture_2_total_number_of_articles_per_subject.png')
print("\n--- # PICTURE_2 OUTPUT ")
# PICTURE_3 OUTPUT
""" Article Subjects By Type """
plt.figure(figsize=(16, 9))
sns.countplot(x='subject', hue='category', data=df)
plt.title('Article Subjects By Type', fontsize=24)
plt.ylabel('Total', fontsize=16)
plt.xlabel('')
plt.xticks(fontsize=12)
plt.legend(['Fake', 'Real'])
# plt.show()
plt.savefig('picture_3_article_subjects_by_type.png')
print("\n--- # PICTURE_3 OUTPUT ")
# # OUTPUT_4
# df_head = df.head()
# print("\n--- df_head")
# print(df_head)
# Now we'll create the Corpus that will be used in our NLP model
# This will create a single column with all the relevant text
# OUTPUT_5
df['text'] = df['title'] + " " + df['text']
# print("\n--- OUTPUT_5")
# print(df['text'])
# This will delete all the other columns we do not need for the rest of the work.
del df['title'] # added to our text corpus
del df['subject'] # we determined it would affect our results
# this might be an interesting item to keep in a future analysis that spans more time, but we will delete it for now.
del df['date']
# STOPWORDS
stop = set(stopwords.words('english'))
punctuation = list(string.punctuation)
stop.update(punctuation)
# FUNCTIONS
# Removing the html with BS4
def strip_html(text):
soup = BeautifulSoup(text, "html.parser")
return soup.get_text()
# Removing the square brackets
def remove_between_square_brackets(text):
return re.sub('\[[^]]*\]', '', text)
# Removing URL's
def remove_urls(text):
return re.sub(r'http\S+', '', text)
# Removing the stopwords from text
def remove_stopwords(text):
final_text = []
for i in text.split():
if i.strip().lower() not in stop:
final_text.append(i.strip())
return " ".join(final_text)
# Final function to clean the text
def clean_text(text):
text = strip_html(text)
text = remove_between_square_brackets(text)
text = remove_urls(text)
text = remove_stopwords(text)
return text
#Apply function on review column
# OUTPUT_6
df['text'] = df['text'].apply(clean_text)
df.head()
# print("\n--- OUTPUT_6")
# print(df['text'])
# print(df.head())
# PICTURE_4 OUTPUT
""" picture_4_wordcloud """
plt.figure(figsize=(20, 20)) # Text from the fake news articles
wc = WordCloud(max_words=2000, width=1600, height=800,
stopwords=STOPWORDS).generate(" ".join(df[df.category == 0].text))
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
# do not show save it
# plt.show()
plt.savefig('picture_4_wordcloud_text_fake_news.png')
print("\n--- # PICTURE_4 OUTPUT ")
# FUNCTIONS TO PREPARE PICTURE_5 OUTPUT
def clean(text: str) -> list:
'A simple function to cleanup text data'
wnl = nltk.stem.WordNetLemmatizer()
stopwords = nltk.corpus.stopwords.words('english')
text = (text.encode('ascii', 'ignore')
.decode('utf-8', 'ignore')
.lower())
words = re.sub(r'[^\w\s]', '', text).split() # tokenization
return [wnl.lemmatize(word) for word in words if word not in stopwords]
corpus = clean(' '.join(df[df.category == 1].text))
def listToString(s):
# initialize an empty string
str1 = " "
# return string
return (str1.join(s))
corpus_str = listToString(corpus)
d = path.dirname(__file__) if "__file__" in locals() else os.getcwd()
# read the mask / color image taken from
# Require bg_model_real_news.jpg to be in the directory
coloring = np.array(Image.open(path.join(d, "bg_model_real_news.jpg")))
stopwords = set(STOPWORDS)
wc = WordCloud(background_color="white", max_words=2000, width=1600, height=800, mask=coloring,stopwords=stopwords, max_font_size=80, random_state=42)
# generate word cloud
wc.generate(corpus_str)
# create coloring from image
image_colors = ImageColorGenerator(coloring)
plt.imshow(wc, interpolation='bilinear')
plt.axis("off")
# PICTURE_4_A OUTPUT
# This bit of code is used to create a high quality exported image
fig1 = plt.gcf()
# plt.show()
plt.draw()
fig1.savefig("picture_4_a_real_news_nlp_word_cloud.png", bbox_inches='tight', dpi=600)
plt.figure(figsize=(20, 20))
# PICTURE_5 OUTPUT
""" Characters per article """
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9))
text_len = df[df['category'] == 1]['text'].str.len()
ax1.hist(text_len, color='tab:orange')
ax1.set_title('Real News', fontsize=14)
text_len = df[df['category'] == 0]['text'].str.len()
ax2.hist(text_len, color='tab:blue')
ax2.set_title('Fake News', fontsize=14)
fig.suptitle('Characters per article', fontsize=24)
# do not show save it
# plt.show()
plt.savefig('picture_5_output_characters_per_article.png')
print("\n--- # PICTURE_5 OUTPUT ")
# There seem to be differences in the characters per article between the two categories. It looks like
# 2500 or less characters in text is the most common in real news category, while around 5000 or less
# characters in text are most common in fake news category.
# PICTURE_6 OUTPUT
""" Words per article """
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9))
text_len = df[df['category'] == 1]['text'].str.split().map(lambda x: len(x))
ax1.hist(text_len, color='tab:orange')
ax1.set_title('Real News', fontsize=14)
text_len = df[df['category'] == 0]['text'].str.split().map(lambda x: len(x))
ax2.hist(text_len, color='tab:blue')
ax2.set_title('Fake News', fontsize=14)
fig.suptitle('Words per article', fontsize=24)
# do not show save it
# plt.show()
plt.savefig('picture_6_output_words_per_article.png')
print("\n--- # PICTURE_6 OUTPUT ")
# NEW PART :: Training and Modeling
""" [This part is traning and modeling not in use for images] """
# Now that we've done a bit of data exploration, let's get into modeling
# First up, we need to Train Test Split the data
X = df.text
y = df.category
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=.8, stratify=y, random_state=19)
X = df.text
y = df.category
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=.8, stratify=y, random_state=19)
# Create of Count Vectorize object
cv = CountVectorizer(min_df=0, max_df=1, binary=False, ngram_range=(1, 3))
# Transform the train dataset
cv_train = cv.fit_transform(X_train)
# Transform the test dataset
cv_test = cv.transform(X_test)
print("\n--- NEW PART :: Training and Modeling")
print('Bag of words Train:', cv_train.shape)
print('Bag of words Test:', cv_test.shape)
# Create the TF-IDF object
tfidf = TfidfVectorizer(min_df=0, max_df=1, use_idf=True, ngram_range=(1, 3))
# Transform the train dataset
tfidf_train = tfidf.fit_transform(X_train)
# Transform the test dataset
tfidf_test = tfidf.transform(X_test)
print('TF-IDF Train:', tfidf_train.shape)
print('TF-IDF Test:', tfidf_test.shape)
# Create our model object
mnb = MultinomialNB()
# Fit the model to the Bag of words CountVectorizor
mnb_cv = mnb.fit(cv_train, y_train)
# Fit the model to the TF-IDF features
mnb_tfidf = mnb.fit(tfidf_train, y_train)
# Predicting the model for bag of words
mnb_cv_predict = mnb.predict(cv_test)
# Predicting the model for tf-idf features
mnb_tfidf_predict = mnb.predict(tfidf_test)
# Check the accuracy score for bag of words
mnb_cv_score = accuracy_score(y_test, mnb_cv_predict)
print("Naive Bayes Bag of words accuracy score:", mnb_cv_score)
# Check the accuracy score for tfidf features
mnb_tfidf_score = accuracy_score(y_test, mnb_tfidf_predict)
print("Naive Bayes TF-IDF accuracy score:", mnb_tfidf_score)
mnb_cv_report = classification_report(
y_test, mnb_cv_predict, target_names=['0', '1'])
print(mnb_cv_report)
mnb_tfidf_report = classification_report(
y_test, mnb_tfidf_predict, target_names=['0', '1'])
print(mnb_tfidf_report)
# At the end of some quick EDA and NLP modeling, we're left with a high level understanding of the two datasets,
# some charts and word clouds for a data product, and a model that currently has 94% accuracy with Bag of Words
# and 92% with TF-IDF.
| fake_news_nlp_detection/fake_news_nlp_detection_2.py | 11,603 | A simple function to cleanup text data
[directory]
cd /Users/brunoflaven/Documents/02_copy/_000_IA_bruno_light/article_bert_detecting_fake_news_1/fake_news_nlp_detection/
python fake_news_nlp_detection_2.py
This is a Python 3 environment Base level imports for data science work Visualization Libs NLP Libs Additional Libs Deep Learning Libs Import our data an insignificant dataset with 3 records true = pd.read_csv("TrueSmallSample.csv") fake = pd.read_csv("FakeSmallSample.csv") a medium dataset with 90 records the complete dataset true = pd.read_csv("TrueBigSample.csv") fake = pd.read_csv("FakeBigSample.csv") Let's explore the data at a base level true.shape,fake.shape These dataframes do not currently have a category for whether they are true or fake. Let's create those before we combine the datasets news that are true news that are fake Now we'll merge the two datasets into a single dataframe PICTURE_1 OUTPUT plt.show() Look in https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.info.html One last thing I'm interested in looking at at this point OUTPUT_3 PICTURE_2 OUTPUT plt.show() PICTURE_3 OUTPUT plt.show() OUTPUT_4 df_head = df.head() print("\n--- df_head") print(df_head) Now we'll create the Corpus that will be used in our NLP model This will create a single column with all the relevant text OUTPUT_5 print("\n--- OUTPUT_5") print(df['text']) This will delete all the other columns we do not need for the rest of the work. added to our text corpus we determined it would affect our results this might be an interesting item to keep in a future analysis that spans more time, but we will delete it for now. STOPWORDS FUNCTIONS Removing the html with BS4 Removing the square brackets Removing URL's Removing the stopwords from text Final function to clean the textApply function on review column OUTPUT_6 print("\n--- OUTPUT_6") print(df['text']) print(df.head()) PICTURE_4 OUTPUT Text from the fake news articles do not show save it plt.show() FUNCTIONS TO PREPARE PICTURE_5 OUTPUT tokenization initialize an empty string return string read the mask / color image taken from Require bg_model_real_news.jpg to be in the directory generate word cloud create coloring from image PICTURE_4_A OUTPUT This bit of code is used to create a high quality exported image plt.show() PICTURE_5 OUTPUT do not show save it plt.show() There seem to be differences in the characters per article between the two categories. It looks like 2500 or less characters in text is the most common in real news category, while around 5000 or less characters in text are most common in fake news category. PICTURE_6 OUTPUT do not show save it plt.show() NEW PART :: Training and Modeling Now that we've done a bit of data exploration, let's get into modeling First up, we need to Train Test Split the data Create of Count Vectorize object Transform the train dataset Transform the test dataset Create the TF-IDF object Transform the train dataset Transform the test dataset Create our model object Fit the model to the Bag of words CountVectorizor Fit the model to the TF-IDF features Predicting the model for bag of words Predicting the model for tf-idf features Check the accuracy score for bag of words Check the accuracy score for tfidf features At the end of some quick EDA and NLP modeling, we're left with a high level understanding of the two datasets, some charts and word clouds for a data product, and a model that currently has 94% accuracy with Bag of Words and 92% with TF-IDF. | 3,522 | en | 0.78238 |
#! /usr/bin/env python2
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
LIBRARIES BUILD
"""
import sys
from time import time
from os.path import join, abspath, dirname
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from tools.toolchains import TOOLCHAINS, TOOLCHAIN_CLASSES, TOOLCHAIN_PATHS
from tools.toolchains import mbedToolchain
from tools.targets import TARGET_NAMES, TARGET_MAP
from tools.options import get_default_options_parser
from tools.build_api import build_library, build_mbed_libs, build_lib
from tools.build_api import mcu_toolchain_matrix
from tools.build_api import static_analysis_scan, static_analysis_scan_lib, static_analysis_scan_library
from tools.build_api import print_build_results
from tools.settings import CPPCHECK_CMD, CPPCHECK_MSG_FORMAT
from utils import argparse_filestring_type, args_error
from tools.settings import CPPCHECK_CMD, CPPCHECK_MSG_FORMAT, CLI_COLOR_MAP
from utils import argparse_filestring_type, argparse_dir_not_parent
if __name__ == '__main__':
start = time()
# Parse Options
parser = get_default_options_parser()
parser.add_argument("--source", dest="source_dir", type=argparse_filestring_type,
default=None, help="The source (input) directory", action="append")
parser.add_argument("--build", dest="build_dir", type=argparse_dir_not_parent(ROOT),
default=None, help="The build (output) directory")
parser.add_argument("--no-archive", dest="no_archive", action="store_true",
default=False, help="Do not produce archive (.ar) file, but rather .o")
# Extra libraries
parser.add_argument("-r", "--rtos",
action="store_true",
dest="rtos",
default=False,
help="Compile the rtos")
parser.add_argument("--rpc",
action="store_true",
dest="rpc",
default=False,
help="Compile the rpc library")
parser.add_argument("-e", "--eth",
action="store_true", dest="eth",
default=False,
help="Compile the ethernet library")
parser.add_argument("-U", "--usb_host",
action="store_true",
dest="usb_host",
default=False,
help="Compile the USB Host library")
parser.add_argument("-u", "--usb",
action="store_true",
dest="usb",
default=False,
help="Compile the USB Device library")
parser.add_argument("-d", "--dsp",
action="store_true",
dest="dsp",
default=False,
help="Compile the DSP library")
parser.add_argument("-F", "--fat",
action="store_true",
dest="fat",
default=False,
help="Compile FS and SD card file system library")
parser.add_argument("-b", "--ublox",
action="store_true",
dest="ublox",
default=False,
help="Compile the u-blox library")
parser.add_argument( "--cpputest",
action="store_true",
dest="cpputest_lib",
default=False,
help="Compiles 'cpputest' unit test library (library should be on the same directory level as mbed repository)")
parser.add_argument("-D",
action="append",
dest="macros",
help="Add a macro definition")
parser.add_argument("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_argument('-f', '--filter',
dest='general_filter_regex',
default=None,
help='For some commands you can use filter to filter out results')
parser.add_argument("--cppcheck",
action="store_true",
dest="cppcheck_validation",
default=False,
help="Forces 'cppcheck' static code analysis")
parser.add_argument("-j", "--jobs", type=int, dest="jobs",
default=0, help="Number of concurrent jobs. Default: 0/auto (based on host machine's number of CPUs)")
parser.add_argument("-N", "--artifact-name", dest="artifact_name",
default=None, help="The built project's name")
parser.add_argument("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose diagnostic output")
parser.add_argument("--silent",
action="store_true",
dest="silent",
default=False,
help="Silent diagnostic output (no copy, compile notification)")
parser.add_argument("-x", "--extra-verbose-notifications",
action="store_true",
dest="extra_verbose_notify",
default=False,
help="Makes compiler more verbose, CI friendly.")
options = parser.parse_args()
# Only prints matrix of supported toolchains
if options.supported_toolchains:
print mcu_toolchain_matrix(platform_filter=options.general_filter_regex)
exit(0)
# Get target list
targets = options.mcu if options.mcu else TARGET_NAMES
# Get toolchains list
toolchains = options.tool if options.tool else TOOLCHAINS
if options.source_dir and not options.build_dir:
args_error(parser, "argument --build is required by argument --source")
if options.color:
# This import happens late to prevent initializing colorization when we don't need it
import colorize
if options.verbose:
notify = mbedToolchain.print_notify_verbose
else:
notify = mbedToolchain.print_notify
notify = colorize.print_in_color_notifier(CLI_COLOR_MAP, notify)
else:
notify = None
# Get libraries list
libraries = []
# Additional Libraries
if options.rtos:
libraries.extend(["rtx", "rtos"])
if options.rpc:
libraries.extend(["rpc"])
if options.eth:
libraries.append("eth")
if options.usb:
libraries.append("usb")
if options.usb_host:
libraries.append("usb_host")
if options.dsp:
libraries.extend(["dsp"])
if options.fat:
libraries.extend(["fat"])
if options.ublox:
libraries.extend(["rtx", "rtos", "usb_host", "ublox"])
if options.cpputest_lib:
libraries.extend(["cpputest"])
# Build results
failures = []
successes = []
skipped = []
# CPPCHECK code validation
if options.cppcheck_validation:
for toolchain in toolchains:
if not TOOLCHAIN_CLASSES[toolchain].check_executable():
search_path = TOOLCHAIN_PATHS[toolchain] or "No path set"
args_error(parser, "Could not find executable for %s.\n"
"Currently set search path: %s"
% (toolchain, search_path))
for target in targets:
try:
mcu = TARGET_MAP[target]
# CMSIS and MBED libs analysis
static_analysis_scan(mcu, toolchain, CPPCHECK_CMD, CPPCHECK_MSG_FORMAT, verbose=options.verbose, jobs=options.jobs)
for lib_id in libraries:
# Static check for library
static_analysis_scan_lib(lib_id, mcu, toolchain, CPPCHECK_CMD, CPPCHECK_MSG_FORMAT,
options=options.options,
extra_verbose=options.extra_verbose_notify, verbose=options.verbose, jobs=options.jobs, clean=options.clean,
macros=options.macros)
pass
except Exception, e:
if options.verbose:
import traceback
traceback.print_exc(file=sys.stdout)
sys.exit(1)
print e
else:
# Build
for toolchain in toolchains:
for target in targets:
tt_id = "%s::%s" % (toolchain, target)
if toolchain not in TARGET_MAP[target].supported_toolchains:
# Log this later
print "%s skipped: toolchain not supported" % tt_id
skipped.append(tt_id)
else:
try:
mcu = TARGET_MAP[target]
if options.source_dir:
lib_build_res = build_library(options.source_dir, options.build_dir, mcu, toolchain,
options=options.options,
extra_verbose=options.extra_verbose_notify,
verbose=options.verbose,
silent=options.silent,
jobs=options.jobs,
clean=options.clean,
archive=(not options.no_archive),
macros=options.macros,
name=options.artifact_name)
else:
lib_build_res = build_mbed_libs(mcu, toolchain,
options=options.options,
extra_verbose=options.extra_verbose_notify,
verbose=options.verbose,
silent=options.silent,
jobs=options.jobs,
clean=options.clean,
macros=options.macros)
for lib_id in libraries:
build_lib(lib_id, mcu, toolchain,
options=options.options,
extra_verbose=options.extra_verbose_notify,
verbose=options.verbose,
silent=options.silent,
clean=options.clean,
macros=options.macros,
jobs=options.jobs)
if lib_build_res:
successes.append(tt_id)
else:
skipped.append(tt_id)
except Exception, e:
if options.verbose:
import traceback
traceback.print_exc(file=sys.stdout)
sys.exit(1)
failures.append(tt_id)
print e
# Write summary of the builds
print
print "Completed in: (%.2f)s" % (time() - start)
print
for report, report_name in [(successes, "Build successes:"),
(skipped, "Build skipped:"),
(failures, "Build failures:"),
]:
if report:
print print_build_results(report, report_name),
if failures:
sys.exit(1)
| tools/build.py | 12,768 | ! /usr/bin/env python2 Be sure that the tools directory is in the search path Parse Options Extra libraries Only prints matrix of supported toolchains Get target list Get toolchains list This import happens late to prevent initializing colorization when we don't need it Get libraries list Additional Libraries Build results CPPCHECK code validation CMSIS and MBED libs analysis Static check for library Build Log this later Write summary of the builds | 452 | en | 0.799252 |
# Copyright 2017-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
from itertools import product
def example_data_binomial():
"""
Returns an output dataframe with categorical
features (country and test variation), and orginal features (date),
as well as number of successes and total observations for each combination
"""
countries = ['ca', 'us']
dates = pd.date_range('2018-01-01', '2018-02-01')
variation_names = ['test', 'control', 'test2']
# test ca, test us, control ca, control us, test2 ca, test2 us
success_rates = [.3, .32, .24, .22, .25, .42]
n_observations = [50, 80, 30, 50, 40, 50]
return_df = pd.DataFrame()
for i, (country, variation) in enumerate(
product(countries, variation_names)):
df = pd.DataFrame({'date': dates})
df['country'] = country
df['variation_name'] = variation
df['total'] = np.random.poisson(n_observations[i], size=len(dates))
df['success'] = df['total'].apply(
lambda x: np.random.binomial(x, success_rates[i]))
return_df = pd.concat([return_df, df], axis=0)
return return_df
def example_data_gaussian():
df = pd.DataFrame({
'variation_name': [
'test',
'control',
'test2',
'test',
'control',
'test2',
'test',
'control',
'test2',
'test',
'control',
'test2',
'test',
'control',
'test2',
],
'nr_of_items': [
500,
8,
100,
510,
8,
100,
520,
9,
104,
530,
7,
100,
530,
8,
103,
],
'nr_of_items_sumsq': [
2500,
12,
150,
2510,
13,
140,
2520,
14,
154,
2530,
15,
160,
2530,
16,
103,
],
'users': [
1010,
22,
150,
1000,
20,
153,
1030,
23,
154,
1000,
20,
150,
1040,
21,
155,
],
'days_since_reg': [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5],
})
return df
| spotify_confidence/examples.py | 3,070 | Returns an output dataframe with categorical
features (country and test variation), and orginal features (date),
as well as number of successes and total observations for each combination
Copyright 2017-2020 Spotify AB Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. test ca, test us, control ca, control us, test2 ca, test2 us | 803 | en | 0.863634 |
# Copyright (c) 2019 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..types import XMLScalar
class Kd(XMLScalar):
_NAME = 'kd'
_TYPE = 'sdf'
def __init__(self, default=1):
XMLScalar.__init__(self, default)
def _set_value(self, value):
assert value >= 0
XMLScalar._set_value(self, value)
| pcg_libraries/src/pcg_gazebo/parsers/sdf/kd.py | 955 | Copyright (c) 2019 - The Procedural Generation for Gazebo authors For information on the respective copyright owner see the NOTICE file Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 657 | en | 0.862835 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.